VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/if.c@ 13738

Last change on this file since 13738 was 13738, checked in by vboxsync, 16 years ago

removing extra if/ifndefs
introduced defered socket removing, to prevent deletion of socket being in lock

  • Property svn:eol-style set to native
File size: 10.9 KB
Line 
1/*
2 * Copyright (c) 1995 Danny Gasparovski.
3 *
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
6 */
7
8#include <slirp.h>
9
10
11#define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm))
12
13static void ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
14{
15 ifm->ifs_next = ifmhead->ifs_next;
16 ifmhead->ifs_next = ifm;
17 ifm->ifs_prev = ifmhead;
18 ifm->ifs_next->ifs_prev = ifm;
19}
20
21static void ifs_remque(struct mbuf *ifm)
22{
23 ifm->ifs_prev->ifs_next = ifm->ifs_next;
24 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
25}
26
27void
28if_init(PNATState pData)
29{
30#if 0
31 /*
32 * Set if_maxlinkhdr to 48 because it's 40 bytes for TCP/IP,
33 * and 8 bytes for PPP, but need to have it on an 8byte boundary
34 */
35#ifdef USE_PPP
36 if_maxlinkhdr = 48;
37#else
38 if_maxlinkhdr = 40;
39#endif
40#else
41 /* 2 for alignment, 14 for ethernet, 40 for TCP/IP */
42 if_maxlinkhdr = 2 + 14 + 40;
43#endif
44 if_queued = 0;
45 if_thresh = 10;
46 if_mtu = 1500;
47 if_mru = 1500;
48 if_comp = IF_AUTOCOMP;
49 if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq;
50
51 VBOX_SLIRP_LOCK_CREATE(&pData->if_fastq_mutex);
52 VBOX_SLIRP_LOCK_CREATE(&if_fastq.m_mutex);
53
54 if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq;
55
56 VBOX_SLIRP_LOCK_CREATE(&pData->if_batchq_mutex);
57 VBOX_SLIRP_LOCK_CREATE(&if_batchq.m_mutex);
58
59 /* sl_compress_init(&comp_s); */
60 next_m = &if_batchq;
61}
62
63#if 0
64/*
65 * This shouldn't be needed since the modem is blocking and
66 * we don't expect any signals, but what the hell..
67 */
68inline int
69writen(fd, bptr, n)
70 int fd;
71 char *bptr;
72 int n;
73{
74 int ret;
75 int total;
76
77 /* This should succeed most of the time */
78 ret = send(fd, bptr, n,0);
79 if (ret == n || ret <= 0)
80 return ret;
81
82 /* Didn't write everything, go into the loop */
83 total = ret;
84 while (n > total) {
85 ret = send(fd, bptr+total, n-total,0);
86 if (ret <= 0)
87 return ret;
88 total += ret;
89 }
90 return total;
91}
92
93/*
94 * if_input - read() the tty, do "top level" processing (ie: check for any escapes),
95 * and pass onto (*ttyp->if_input)
96 *
97 * XXXXX Any zeros arriving by themselves are NOT placed into the arriving packet.
98 */
99#define INBUFF_SIZE 2048 /* XXX */
100void
101if_input(ttyp)
102 struct ttys *ttyp;
103{
104 u_char if_inbuff[INBUFF_SIZE];
105 int if_n;
106
107 DEBUG_CALL("if_input");
108 DEBUG_ARG("ttyp = %lx", (long)ttyp);
109
110 if_n = recv(ttyp->fd, (char *)if_inbuff, INBUFF_SIZE,0);
111
112 DEBUG_MISC((dfd, " read %d bytes\n", if_n));
113
114 if (if_n <= 0) {
115 if (if_n == 0 || (errno != EINTR && errno != EAGAIN)) {
116 if (ttyp->up)
117 link_up--;
118 tty_detached(ttyp, 0);
119 }
120 return;
121 }
122 if (if_n == 1) {
123 if (*if_inbuff == '0') {
124 ttyp->ones = 0;
125 if (++ttyp->zeros >= 5)
126 slirp_exit(0);
127 return;
128 }
129 if (*if_inbuff == '1') {
130 ttyp->zeros = 0;
131 if (++ttyp->ones >= 5)
132 tty_detached(ttyp, 0);
133 return;
134 }
135 }
136 ttyp->ones = ttyp->zeros = 0;
137
138 (*ttyp->if_input)(ttyp, if_inbuff, if_n);
139}
140#endif
141
142/*
143 * if_output: Queue packet into an output queue.
144 * There are 2 output queue's, if_fastq and if_batchq.
145 * Each output queue is a doubly linked list of double linked lists
146 * of mbufs, each list belonging to one "session" (socket). This
147 * way, we can output packets fairly by sending one packet from each
148 * session, instead of all the packets from one session, then all packets
149 * from the next session, etc. Packets on the if_fastq get absolute
150 * priority, but if one session hogs the link, it gets "downgraded"
151 * to the batchq until it runs out of packets, then it'll return
152 * to the fastq (eg. if the user does an ls -alR in a telnet session,
153 * it'll temporarily get downgraded to the batchq)
154 */
155void
156if_output(PNATState pData, struct socket *so, struct mbuf *ifm)
157{
158 struct mbuf *ifq;
159#ifdef VBOX_WITH_SYNC_SLIRP
160 struct mbuf *ifqprev;
161#endif
162 int on_fastq = 1;
163
164 DEBUG_CALL("if_output");
165 DEBUG_ARG("so = %lx", (long)so);
166 DEBUG_ARG("ifm = %lx", (long)ifm);
167
168
169 /*
170 * First remove the mbuf from m_usedlist,
171 * since we're gonna use m_next and m_prev ourselves
172 * XXX Shouldn't need this, gotta change dtom() etc.
173 */
174 VBOX_SLIRP_LOCK(pData->m_usedlist_mutex);
175 VBOX_SLIRP_LOCK(ifm->m_mutex);
176
177 if (ifm->m_flags & M_USEDLIST) {
178 remque(pData, ifm);
179 ifm->m_flags &= ~M_USEDLIST;
180 }
181 VBOX_SLIRP_UNLOCK(pData->m_usedlist_mutex);
182
183 /*
184 * See if there's already a batchq list for this session.
185 * This can include an interactive session, which should go on fastq,
186 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
187 * We mustn't put this packet back on the fastq (or we'll send it out of order)
188 * XXX add cache here?
189 */
190 VBOX_SLIRP_LOCK(pData->if_batchq_mutex);
191#ifndef VBOX_WITH_SYNC_SLIRP
192 for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) {
193#else
194 ifq = if_batchq.ifq_prev;
195 while(1){
196 if (ifq == &if_batchq) {
197 VBOX_SLIRP_UNLOCK(pData->if_batchq_mutex);
198 break;
199 }
200 ifqprev = ifq->ifq_prev;
201#endif
202 VBOX_SLIRP_LOCK(ifq->m_mutex);
203 VBOX_SLIRP_UNLOCK(pData->if_batchq_mutex);
204 if (so == ifq->ifq_so) {
205 /* A match! */
206 ifm->ifq_so = so;
207 ifs_insque(ifm, ifq->ifs_prev);
208 goto diddit;
209 }
210 VBOX_SLIRP_UNLOCK(ifq->m_mutex);
211 VBOX_SLIRP_LOCK(pData->if_batchq_mutex);
212#ifdef VBOX_WITH_SYNC_SLIRP
213 ifq = ifqprev;
214#endif
215 }
216
217 /* No match, check which queue to put it on */
218 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
219 VBOX_SLIRP_LOCK(pData->if_fastq_mutex);
220 ifq = if_fastq.ifq_prev;
221 VBOX_SLIRP_LOCK(ifq->m_mutex);
222 VBOX_SLIRP_UNLOCK(pData->if_fastq_mutex);
223 on_fastq = 1;
224 /*
225 * Check if this packet is a part of the last
226 * packet's session
227 */
228 if (ifq->ifq_so == so) {
229 ifm->ifq_so = so;
230 ifs_insque(ifm, ifq->ifs_prev);
231 goto diddit;
232 }
233 }
234 else {
235 VBOX_SLIRP_LOCK(pData->if_batchq_mutex);
236 ifq = if_batchq.ifq_prev;
237 if (ifq != &if_batchq) {
238 VBOX_SLIRP_LOCK(ifq->m_mutex);
239 }
240 VBOX_SLIRP_UNLOCK(pData->if_batchq_mutex);
241 }
242
243 /* Create a new doubly linked list for this session */
244 ifm->ifq_so = so;
245 ifs_init(ifm);
246 insque(pData, ifm, ifq);
247
248diddit:
249 VBOX_SLIRP_LOCK(pData->if_queued_mutex);
250
251 ++if_queued;
252
253 VBOX_SLIRP_UNLOCK(pData->if_queued_mutex);
254
255 if (so) {
256 /* Update *_queued */
257 VBOX_SLIRP_LOCK(so->so_mutex);
258 so->so_queued++;
259 so->so_nqueued++;
260 /*
261 * Check if the interactive session should be downgraded to
262 * the batchq. A session is downgraded if it has queued 6
263 * packets without pausing, and at least 3 of those packets
264 * have been sent over the link
265 * (XXX These are arbitrary numbers, probably not optimal..)
266 */
267 if (on_fastq && ((so->so_nqueued >= 6) &&
268 (so->so_nqueued - so->so_queued) >= 3)) {
269
270 VBOX_SLIRP_LOCK(pData->if_fastq_mutex);
271 /* Remove from current queue... */
272 remque(pData, ifm->ifs_next);
273
274 VBOX_SLIRP_UNLOCK(pData->if_fastq_mutex);
275 VBOX_SLIRP_LOCK(pData->if_batchq_mutex);
276
277 /* ...And insert in the new. That'll teach ya! */
278 insque(pData, ifm->ifs_next, &if_batchq);
279 VBOX_SLIRP_UNLOCK(pData->if_batchq_mutex);
280 }
281 VBOX_SLIRP_UNLOCK(so->so_mutex);
282 }
283 VBOX_SLIRP_UNLOCK(ifq->m_mutex);
284 VBOX_SLIRP_UNLOCK(ifm->m_mutex);
285
286#ifndef FULL_BOLT
287 /*
288 * This prevents us from malloc()ing too many mbufs
289 */
290 if (link_up) {
291 /* if_start will check towrite */
292 if_start(pData);
293 }
294#endif
295}
296
297/*
298 * Send a packet
299 * We choose a packet based on it's position in the output queues;
300 * If there are packets on the fastq, they are sent FIFO, before
301 * everything else. Otherwise we choose the first packet from the
302 * batchq and send it. the next packet chosen will be from the session
303 * after this one, then the session after that one, and so on.. So,
304 * for example, if there are 3 ftp session's fighting for bandwidth,
305 * one packet will be sent from the first session, then one packet
306 * from the second session, then one packet from the third, then back
307 * to the first, etc. etc.
308 */
309void
310if_start(PNATState pData)
311{
312 struct mbuf *ifm, *ifqt;
313#ifdef VBOX_WITH_SYNC_SLIRP
314 int on_fast = 0; /*required for correctness */
315 struct mbuf *ifm_prev;
316#endif
317
318 DEBUG_CALL("if_start");
319
320 VBOX_SLIRP_LOCK(pData->if_queued_mutex);
321 if (if_queued <= 0) {
322 VBOX_SLIRP_UNLOCK(pData->if_queued_mutex);
323 return; /* Nothing to do */
324 }
325
326 again:
327 VBOX_SLIRP_UNLOCK(pData->if_queued_mutex);
328
329 /* check if we can really output */
330 if (!slirp_can_output(pData->pvUser))
331 return;
332
333 /*
334 * See which queue to get next packet from
335 * If there's something in the fastq, select it immediately
336 */
337 VBOX_SLIRP_LOCK(pData->if_fastq_mutex);
338 if (if_fastq.ifq_next != &if_fastq) {
339 ifm = if_fastq.ifq_next;
340#ifdef VBOX_WITH_SYNC_SLIRP
341 on_fast = 1;
342#endif
343 VBOX_SLIRP_LOCK(ifm->m_mutex);
344 } else {
345 VBOX_SLIRP_UNLOCK(pData->if_fastq_mutex);
346
347 VBOX_SLIRP_LOCK(pData->if_batchq_mutex);
348 VBOX_SLIRP_LOCK(pData->next_m_mutex);
349 /* Nothing on fastq, see if next_m is valid */
350 if (next_m != &if_batchq)
351 ifm = next_m;
352 else
353 ifm = if_batchq.ifq_next;
354
355 /* Set which packet to send on next iteration */
356 next_m = ifm->ifq_next;
357 VBOX_SLIRP_UNLOCK(pData->next_m_mutex);
358 }
359#ifdef VBOX_WITH_SYNC_SLIRP
360 VBOX_SLIRP_LOCK(ifm->m_mutex);
361 VBOX_SLIRP_LOCK(pData->if_queued_mutex);
362 if (if_queued == 0) {
363 if (on_fast) {
364 VBOX_SLIRP_UNLOCK(pData->if_fastq_mutex);
365 }else {
366 VBOX_SLIRP_UNLOCK(pData->if_batchq_mutex);
367 }
368 goto done;
369 }
370#endif
371 /* Remove it from the queue */
372 ifqt = ifm->ifq_prev;
373 remque(pData, ifm);
374
375 --if_queued;
376#ifdef VBOX_WITH_SYNC_SLIRP
377 VBOX_SLIRP_UNLOCK(pData->if_queued_mutex);
378 if (on_fast == 1) {
379 VBOX_SLIRP_UNLOCK(pData->if_fastq_mutex);
380 }
381 else {
382 VBOX_SLIRP_UNLOCK(pData->if_batchq_mutex);
383 }
384#endif
385
386 /* If there are more packets for this session, re-queue them */
387 if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) {
388 insque(pData, ifm->ifs_next, ifqt);
389 ifs_remque(ifm);
390 }
391
392 /* Update so_queued */
393 if (ifm->ifq_so) {
394 VBOX_SLIRP_LOCK(ifm->ifq_so->so_mutex);
395 if (--ifm->ifq_so->so_queued == 0)
396 /* If there's no more queued, reset nqueued */
397 ifm->ifq_so->so_nqueued = 0;
398 VBOX_SLIRP_UNLOCK(ifm->ifq_so->so_mutex);
399 }
400
401 /* Encapsulate the packet for sending */
402 if_encap(pData, (const uint8_t *)ifm->m_data, ifm->m_len);
403
404 m_free(pData, ifm);
405
406 if (ifm != NULL)VBOX_SLIRP_UNLOCK(ifm->m_mutex);
407 VBOX_SLIRP_LOCK(pData->if_queued_mutex);
408 /*We release if_queued_mutex after again label and before return*/
409
410 if (if_queued > 0)
411 goto again;
412 done:
413 VBOX_SLIRP_UNLOCK(pData->if_queued_mutex);
414}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette