Changeset 530 in vbox for trunk/src/VBox
- Timestamp:
- Feb 2, 2007 1:39:09 AM (18 years ago)
- Location:
- trunk/src/VBox/Devices/Network
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DrvNAT.cpp
r1 r530 243 243 { 244 244 LogFlow(("drvNATDestruct:\n")); 245 #if ARCH_BITS == 64 246 LogRel(("NAT: g_cpvHashUsed=%RU32 g_cpvHashCollisions=%RU32 g_cpvHashInserts=%RU64 g_cpvHashDone=%RU64\n", 247 g_cpvHashUsed, g_cpvHashCollisions, g_cpvHashInserts, g_cpvHashDone)); 248 #endif 245 249 slirp_term(); 246 250 g_pDrv = NULL; -
trunk/src/VBox/Devices/Network/slirp/ip_input.c
r1 r530 57 57 ip_init() 58 58 { 59 ipq.next = ipq.prev = (ipqp_32)&ipq;59 ipq.next = ipq.prev = ptr_to_u32(&ipq); 60 60 ip_id = tt.tv_sec & 0xffff; 61 61 udp_init(); … … 164 164 * of this datagram. 165 165 */ 166 for (fp = (struct ipq *) ipq.next; fp != &ipq;167 fp = (struct ipq *) fp->next)166 for (fp = u32_to_ptr(ipq.next, struct ipq *); fp != &ipq; 167 fp = u32_to_ptr(fp->next, struct ipq *)) 168 168 if (ip->ip_id == fp->ipq_id && 169 169 ip->ip_src.s_addr == fp->ipq_src.s_addr && … … 270 270 fp->ipq_p = ip->ip_p; 271 271 fp->ipq_id = ip->ip_id; 272 fp->ipq_next = fp->ipq_prev = (ipasfragp_32)fp;272 fp->ipq_next = fp->ipq_prev = ptr_to_u32((struct ipasfrag *)fp); 273 273 fp->ipq_src = ((struct ip *)ip)->ip_src; 274 274 fp->ipq_dst = ((struct ip *)ip)->ip_dst; … … 280 280 * Find a segment which begins after this one does. 281 281 */ 282 for (q = (struct ipasfrag *)fp->ipq_next; q != (struct ipasfrag *)fp;283 q = (struct ipasfrag *)q->ipf_next)282 for (q = u32_to_ptr(fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp; 283 q = u32_to_ptr(q->ipf_next, struct ipasfrag *)) 284 284 if (q->ip_off > ip->ip_off) 285 285 break; … … 290 290 * segment. If it provides all of our data, drop us. 291 291 */ 292 if ( q->ipf_prev != (ipasfragp_32)fp) {293 i = ( (struct ipasfrag *)(q->ipf_prev))->ip_off +294 ( (struct ipasfrag *)(q->ipf_prev))->ip_len - ip->ip_off;292 if (u32_to_ptr(q->ipf_prev, struct ipq *) != fp) { 293 i = (u32_to_ptr(q->ipf_prev, struct ipasfrag *))->ip_off + 294 (u32_to_ptr(q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off; 295 295 if (i > 0) { 296 296 if (i >= ip->ip_len) … … 314 314 break; 315 315 } 316 q = (struct ipasfrag *) q->ipf_next;317 m_freem(dtom( (struct ipasfrag *) q->ipf_prev));318 ip_deq( (struct ipasfrag *) q->ipf_prev);316 q = u32_to_ptr(q->ipf_next, struct ipasfrag *); 317 m_freem(dtom(u32_to_ptr(q->ipf_prev, struct ipasfrag *))); 318 ip_deq(u32_to_ptr(q->ipf_prev, struct ipasfrag *)); 319 319 } 320 320 … … 324 324 * check for complete reassembly. 325 325 */ 326 ip_enq(ip, (struct ipasfrag *) q->ipf_prev);326 ip_enq(ip, u32_to_ptr(q->ipf_prev, struct ipasfrag *)); 327 327 next = 0; 328 for (q = (struct ipasfrag *) fp->ipq_next; q != (struct ipasfrag *)fp;329 q = (struct ipasfrag *) q->ipf_next) {328 for (q = u32_to_ptr(fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp; 329 q = u32_to_ptr(q->ipf_next, struct ipasfrag *)) { 330 330 if (q->ip_off != next) 331 331 return (0); 332 332 next += q->ip_len; 333 333 } 334 if ( ((struct ipasfrag *)(q->ipf_prev))->ipf_mff & 1)334 if (u32_to_ptr(q->ipf_prev, struct ipasfrag *)->ipf_mff & 1) 335 335 return (0); 336 336 … … 338 338 * Reassembly is complete; concatenate fragments. 339 339 */ 340 q = (struct ipasfrag *) fp->ipq_next;340 q = u32_to_ptr(fp->ipq_next, struct ipasfrag *); 341 341 m = dtom(q); 342 342 343 q = (struct ipasfrag *) q->ipf_next;343 q = u32_to_ptr(q->ipf_next, struct ipasfrag *); 344 344 while (q != (struct ipasfrag *)fp) { 345 345 struct mbuf *t; 346 346 t = dtom(q); 347 q = (struct ipasfrag *) q->ipf_next;347 q = u32_to_ptr(q->ipf_next, struct ipasfrag *); 348 348 m_cat(m, t); 349 349 } … … 355 355 * Make header visible. 356 356 */ 357 ip = (struct ipasfrag *) fp->ipq_next;357 ip = u32_to_ptr(fp->ipq_next, struct ipasfrag *); 358 358 359 359 /* … … 401 401 register struct ipasfrag *q, *p; 402 402 403 for (q = (struct ipasfrag *) fp->ipq_next; q != (struct ipasfrag *)fp;403 for (q = u32_to_ptr(fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp; 404 404 q = p) { 405 p = (struct ipasfrag *) q->ipf_next;405 p = u32_to_ptr(q->ipf_next, struct ipasfrag *); 406 406 ip_deq(q); 407 407 m_freem(dtom(q)); … … 421 421 DEBUG_CALL("ip_enq"); 422 422 DEBUG_ARG("prev = %lx", (long)prev); 423 p->ipf_prev = (ipasfragp_32) prev;423 p->ipf_prev = ptr_to_u32(prev); 424 424 p->ipf_next = prev->ipf_next; 425 ((struct ipasfrag *)(prev->ipf_next))->ipf_prev = (ipasfragp_32) p;426 prev->ipf_next = (ipasfragp_32) p;425 u32_to_ptr(prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(p); 426 prev->ipf_next = ptr_to_u32(p); 427 427 } 428 428 … … 434 434 register struct ipasfrag *p; 435 435 { 436 ((struct ipasfrag *)(p->ipf_prev))->ipf_next = p->ipf_next; 437 ((struct ipasfrag *)(p->ipf_next))->ipf_prev = p->ipf_prev; 436 struct ipasfrag *prev = u32_to_ptr(p->ipf_prev, struct ipasfrag *); 437 struct ipasfrag *next = u32_to_ptr(p->ipf_next, struct ipasfrag *); 438 u32ptr_done(prev->ipf_next, p); 439 prev->ipf_next = p->ipf_next; 440 next->ipf_prev = p->ipf_prev; 438 441 } 439 442 … … 450 453 DEBUG_CALL("ip_slowtimo"); 451 454 452 fp = (struct ipq *) ipq.next;455 fp = u32_to_ptr(ipq.next, struct ipq *); 453 456 if (fp == 0) 454 457 return; … … 456 459 while (fp != &ipq) { 457 460 --fp->ipq_ttl; 458 fp = (struct ipq *) fp->next;459 if ( ((struct ipq *)(fp->prev))->ipq_ttl == 0) {461 fp = u32_to_ptr(fp->next, struct ipq *); 462 if (u32_to_ptr(fp->prev, struct ipq *)->ipq_ttl == 0) { 460 463 ipstat.ips_fragtimeout++; 461 ip_freef( (struct ipq *) fp->prev);464 ip_freef(u32_to_ptr(fp->prev, struct ipq *)); 462 465 } 463 466 } -
trunk/src/VBox/Devices/Network/slirp/libslirp.h
r1 r530 34 34 void slirp_link_up(void); 35 35 void slirp_link_down(void); 36 # if ARCH_BITS == 64 37 extern uint32_t g_cpvHashUsed; 38 extern uint32_t g_cpvHashCollisions; 39 extern uint64_t g_cpvHashInserts; 40 extern uint64_t g_cpvHashDone; 41 # endif 36 42 #endif /* VBOX */ 37 43 -
trunk/src/VBox/Devices/Network/slirp/mbuf.c
r1 r530 110 110 */ 111 111 if (m->m_flags & M_DOFREE) { 112 u32ptr_done(ptr_to_u32(ptr), m); 112 113 free(m); 113 114 mbuf_alloced--; 114 115 } else if ((m->m_flags & M_FREELIST) == 0) { 116 #if DEBUG_bird 117 /* u32ptr_done(ptr_to_u32(ptr), m);*/ 118 #endif 115 119 insque(m,&m_freelist); 116 120 m->m_flags = M_FREELIST; /* Clobber other flags */ -
trunk/src/VBox/Devices/Network/slirp/misc.c
r1 r530 113 113 register struct quehead_32 *element = (struct quehead_32 *) a; 114 114 register struct quehead_32 *head = (struct quehead_32 *) b; 115 struct quehead_32 *link = u32_to_ptr(head->qh_link, struct quehead_32 *); 116 115 117 element->qh_link = head->qh_link; 116 head->qh_link = (u_int32_t)element; 117 element->qh_rlink = (u_int32_t)head; 118 ((struct quehead_32 *)(element->qh_link))->qh_rlink 119 = (u_int32_t)element; 118 element->qh_rlink = ptr_to_u32(head); 119 Assert(link->qh_rlink == element->qh_rlink); 120 link->qh_rlink = head->qh_link = ptr_to_u32(element); 120 121 } 121 122 … … 125 126 { 126 127 register struct quehead_32 *element = (struct quehead_32 *) a; 127 ((struct quehead_32 *)(element->qh_link))->qh_rlink = element->qh_rlink; 128 ((struct quehead_32 *)(element->qh_rlink))->qh_link = element->qh_link; 128 struct quehead_32 *link = u32_to_ptr(element->qh_link, struct quehead_32 *); 129 struct quehead_32 *rlink = u32_to_ptr(element->qh_rlink, struct quehead_32 *); 130 131 u32ptr_done(link->qh_rlink, element); 132 link->qh_rlink = element->qh_rlink; 133 rlink->qh_link = element->qh_link; 129 134 element->qh_rlink = 0; 130 135 } -
trunk/src/VBox/Devices/Network/slirp/slirp.c
r1 r530 355 355 */ 356 356 do_slowtimo = ((tcb.so_next != &tcb) || 357 ((struct ipasfrag *)&ipq != (struct ipasfrag *)ipq.next));357 ((struct ipasfrag *)&ipq != u32_to_ptr(ipq.next, struct ipasfrag *))); 358 358 359 359 for (so = tcb.so_next; so != &tcb; so = so_next) { -
trunk/src/VBox/Devices/Network/slirp/tcp_input.c
r1 r530 72 72 #define TCP_REASS(tp, ti, m, so, flags) {\ 73 73 if ((ti)->ti_seq == (tp)->rcv_nxt && \ 74 (tp)->seg_next == (tcpiphdrp_32)(tp) && \74 u32_to_ptr((tp)->seg_next, struct tcpcb *) == (tp) && \ 75 75 (tp)->t_state == TCPS_ESTABLISHED) {\ 76 76 if (ti->ti_flags & TH_PUSH) \ … … 95 95 #define TCP_REASS(tp, ti, m, so, flags) { \ 96 96 if ((ti)->ti_seq == (tp)->rcv_nxt && \ 97 (tp)->seg_next == (tcpiphdrp_32)(tp) && \97 u32_to_ptr((tp)->seg_next, struct tcpcb *) == (tp) && \ 98 98 (tp)->t_state == TCPS_ESTABLISHED) { \ 99 99 tp->t_flags |= TF_DELACK; \ … … 134 134 * Find a segment which begins after this one does. 135 135 */ 136 for (q = (struct tcpiphdr *)tp->seg_next; q != (struct tcpiphdr *)tp;137 q = (struct tcpiphdr *)q->ti_next)136 for (q = u32_to_ptr(tp->seg_next, struct tcpiphdr *); q != (struct tcpiphdr *)tp; 137 q = u32_to_ptr(q->ti_next, struct tcpiphdr *)) 138 138 if (SEQ_GT(q->ti_seq, ti->ti_seq)) 139 139 break; … … 144 144 * segment. If it provides all of our data, drop us. 145 145 */ 146 if ( (struct tcpiphdr *)q->ti_prev!= (struct tcpiphdr *)tp) {146 if (u32_to_ptr(q->ti_prev, struct tcpiphdr *) != (struct tcpiphdr *)tp) { 147 147 register int i; 148 q = (struct tcpiphdr *)q->ti_prev;148 q = u32_to_ptr(q->ti_prev, struct tcpiphdr *); 149 149 /* conversion to int (in i) handles seq wraparound */ 150 150 i = q->ti_seq + q->ti_len - ti->ti_seq; … … 166 166 ti->ti_seq += i; 167 167 } 168 q = (struct tcpiphdr *)(q->ti_next);168 q = u32_to_ptr(q->ti_next, struct tcpiphdr *); 169 169 } 170 170 tcpstat.tcps_rcvoopack++; 171 171 tcpstat.tcps_rcvoobyte += ti->ti_len; 172 REASS_MBUF (ti) = (mbufp_32) m;/* XXX */172 REASS_MBUF_SET(ti, m); /* XXX */ 173 173 174 174 /* … … 183 183 q->ti_seq += i; 184 184 q->ti_len -= i; 185 m_adj( (struct mbuf *) REASS_MBUF(q), i);185 m_adj(REASS_MBUF_GET(q), i); 186 186 break; 187 187 } 188 q = (struct tcpiphdr *)q->ti_next;189 m = (struct mbuf *) REASS_MBUF((struct tcpiphdr *)q->ti_prev);190 remque_32( (void *)(q->ti_prev));188 q = u32_to_ptr(q->ti_next, struct tcpiphdr *); 189 m = REASS_MBUF_GET(u32_to_ptr(q->ti_prev, struct tcpiphdr *)); 190 remque_32(u32_to_ptr(q->ti_prev, struct tcpiphdr *)); 191 191 m_freem(m); 192 192 } … … 195 195 * Stick new segment in its place. 196 196 */ 197 insque_32(ti, (void *)(q->ti_prev));197 insque_32(ti, u32_to_ptr(q->ti_prev, struct tcpiphdr *)); 198 198 199 199 present: … … 204 204 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 205 205 return (0); 206 ti = (struct tcpiphdr *) tp->seg_next;206 ti = u32_to_ptr(tp->seg_next, struct tcpiphdr *); 207 207 if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt) 208 208 return (0); … … 213 213 flags = ti->ti_flags & TH_FIN; 214 214 remque_32(ti); 215 m = (struct mbuf *) REASS_MBUF(ti); /* XXX */216 ti = (struct tcpiphdr *)ti->ti_next;215 m = REASS_MBUF_GET(ti); /* XXX */ 216 ti = u32_to_ptr(ti->ti_next, struct tcpiphdr *); 217 217 /* if (so->so_state & SS_FCANTRCVMORE) */ 218 218 if (so->so_state & SS_FCANTSENDMORE) … … 550 550 } 551 551 } else if (ti->ti_ack == tp->snd_una && 552 tp->seg_next == (tcpiphdrp_32)tp &&552 u32_to_ptr(tp->seg_next, struct tcpcb *) == tp && 553 553 ti->ti_len <= sbspace(&so->so_rcv)) { 554 554 /* -
trunk/src/VBox/Devices/Network/slirp/tcp_subr.c
r478 r530 208 208 209 209 memset((char *) tp, 0, sizeof(struct tcpcb)); 210 tp->seg_next = tp->seg_prev = (tcpiphdrp_32)tp;210 tp->seg_next = tp->seg_prev = ptr_to_u32((struct tcpiphdr *)tp); 211 211 tp->t_maxseg = tcp_mssdflt; 212 212 … … 284 284 285 285 /* free the reassembly queue, if any */ 286 t = (struct tcpiphdr *) tp->seg_next;286 t = u32_to_ptr(tp->seg_next, struct tcpiphdr *); 287 287 while (t != (struct tcpiphdr *)tp) { 288 t = (struct tcpiphdr *)t->ti_next;289 m = (struct mbuf *) REASS_MBUF((struct tcpiphdr *)t->ti_prev);290 remque_32( (struct tcpiphdr *) t->ti_prev);288 t = u32_to_ptr(t->ti_next, struct tcpiphdr *); 289 m = REASS_MBUF_GET(u32_to_ptr(t->ti_prev, struct tcpiphdr *)); 290 remque_32(u32_to_ptr(t->ti_prev, struct tcpiphdr *)); 291 291 m_freem(m); 292 292 } … … 296 296 */ 297 297 /* free(tp, M_PCB); */ 298 u32ptr_done(ptr_to_u32(ptr), tp); 298 299 free(tp); 299 300 so->so_tcpcb = 0; … … 1347 1348 } 1348 1349 } 1350 1351 #if defined(VBOX) && SIZEOF_CHAR_P != 4 1352 /** Hash table used for translating pointers to unique uint32_t entries. 1353 * The 0 entry is reserved for NULL pointers. */ 1354 void *g_apvHash[16384]; 1355 /** The number of currently used pointer hash entries. */ 1356 uint32_t g_cpvHashUsed = 1; 1357 /** The number of insert collisions. */ 1358 uint32_t g_cpvHashCollisions = 0; 1359 /** The number of hash inserts. */ 1360 uint64_t g_cpvHashInserts = 0; 1361 /** The number of done calls. */ 1362 uint64_t g_cpvHashDone = 0; 1363 1364 /** 1365 * Slow pointer hashing that deals with automatic inserting and collisions. 1366 */ 1367 uint32_t VBoxU32PtrHashSlow(void *pv) 1368 { 1369 uint32_t i; 1370 if (pv == NULL) 1371 i = 0; 1372 else 1373 { 1374 const uint32_t i1 = ((uintptr_t)pv >> 3) % RT_ELEMENTS(g_apvHash); 1375 if (g_apvHash[i1] == pv) 1376 i = i1; 1377 else 1378 { 1379 /* 1380 * Try up to 10 times then assume it's an insertion. 1381 * If we didn't find a free entry by then, try another 100 times. 1382 * If that fails, give up. 1383 */ 1384 const uint32_t i2 = ((uintptr_t)pv >> 2) % 7867; 1385 uint32_t i1stFree = g_apvHash[i1] ? 0 : i1; 1386 int cTries = 10; 1387 int cTries2 = 100; 1388 1389 i = i1; 1390 for (;;) 1391 { 1392 /* check if we should give in.*/ 1393 if (--cTries > 0) 1394 { 1395 if (i1stFree != 0) 1396 { 1397 i = i1stFree; 1398 g_apvHash[i] = pv; 1399 g_cpvHashUsed++; 1400 if (i != i1) 1401 g_cpvHashCollisions++; 1402 g_cpvHashInserts++; 1403 break; 1404 } 1405 if (!cTries2) 1406 { 1407 AssertReleaseMsgFailed(("NAT pointer hash error. pv=%p g_cpvHashUsed=%d g_cpvHashCollisions=%u\n", 1408 pv, g_cpvHashUsed, g_cpvHashCollisions)); 1409 i = 0; 1410 break; 1411 } 1412 cTries = cTries2; 1413 cTries2 = 0; 1414 } 1415 1416 /* advance to the next hash entry and test it. */ 1417 i = (i + i2) % RT_ELEMENTS(g_apvHash); 1418 while (RT_UNLIKELY(!i)) 1419 i = (i + i2) % RT_ELEMENTS(g_apvHash); 1420 if (g_apvHash[i] == pv) 1421 break; 1422 if (RT_UNLIKELY(!i1stFree && !g_apvHash[i])) 1423 i1stFree = i; 1424 } 1425 } 1426 } 1427 return i; 1428 } 1429 1430 1431 /** 1432 * Removes the pointer from the hash table. 1433 */ 1434 void VBoxU32PtrDone(void *pv, uint32_t iHint) 1435 { 1436 /* We don't count NULL pointers. */ 1437 if (pv == NULL) 1438 return; 1439 g_cpvHashDone++; 1440 1441 /* try the hint */ 1442 if ( iHint 1443 && iHint < RT_ELEMENTS(g_apvHash) 1444 && g_apvHash[iHint] == pv) 1445 { 1446 g_apvHash[iHint] = NULL; 1447 g_cpvHashUsed--; 1448 return; 1449 } 1450 1451 iHint = ((uintptr_t)pv >> 3) % RT_ELEMENTS(g_apvHash); 1452 if (RT_UNLIKELY(g_apvHash[iHint] != pv)) 1453 { 1454 /* 1455 * Try up to 120 times then assert. 1456 */ 1457 const uint32_t i2 = ((uintptr_t)pv >> 2) % 7867; 1458 int cTries = 120; 1459 for (;;) 1460 { 1461 /* advance to the next hash entry and test it. */ 1462 iHint = (iHint + i2) % RT_ELEMENTS(g_apvHash); 1463 while (RT_UNLIKELY(!iHint)) 1464 iHint = (iHint + i2) % RT_ELEMENTS(g_apvHash); 1465 if (g_apvHash[iHint] == pv) 1466 break; 1467 1468 /* check if we should give in.*/ 1469 if (--cTries > 0) 1470 { 1471 AssertReleaseMsgFailed(("NAT pointer hash error. pv=%p g_cpvHashUsed=%u g_cpvHashCollisions=%u\n", 1472 pv, g_cpvHashUsed, g_cpvHashCollisions)); 1473 return; 1474 } 1475 } 1476 } 1477 1478 /* found it */ 1479 g_apvHash[iHint] = NULL; 1480 g_cpvHashUsed--; 1481 } 1482 1483 #endif -
trunk/src/VBox/Devices/Network/slirp/tcp_var.h
r1 r530 43 43 #if SIZEOF_CHAR_P == 4 44 44 typedef struct tcpiphdr *tcpiphdrp_32; 45 /* VBox change that's to much bother to #ifdef. */ 46 # define u32ptr_done(u32, ptr) do {} while (0) 47 # define ptr_to_u32(ptr) (ptr) 48 # define u32_to_ptr(u32, type) ((type)(u32)) 45 49 #else 46 50 typedef u_int32_t tcpiphdrp_32; 51 # ifdef VBOX 52 # include <iprt/types.h> 53 # include <iprt/assert.h> 54 55 /* VBox change that's to much bother to #ifdef. */ 56 # define u32ptr_done(u32, ptr) VBoxU32PtrDone((ptr), (u32)) 57 # define ptr_to_u32(ptr) VBoxU32PtrHash((ptr)) 58 # define u32_to_ptr(u32, type) ((type)VBoxU32PtrLookup(u32)) 59 60 extern void *g_apvHash[16384]; 61 62 extern void VBoxU32PtrDone(void *pv, uint32_t iHint); 63 extern uint32_t VBoxU32PtrHashSlow(void *pv); 64 65 /** Hash the pointer, inserting it if need be. */ 66 DECLINLINE(uint32_t) VBoxU32PtrHash(void *pv) 67 { 68 uint32_t i = ((uintptr_t)pv >> 3) % RT_ELEMENTS(g_apvHash); 69 if (RT_LIKELY(g_apvHash[i] == pv && pv)) 70 return i; 71 return VBoxU32PtrHashSlow(pv); 72 } 73 /** Lookup the hash value. */ 74 DECLINLINE(void *) VBoxU32PtrLookup(uint32_t i) 75 { 76 void *pv; 77 Assert(i < RT_ELEMENTS(g_apvHash)); 78 pv = g_apvHash[i]; 79 Assert(pv || !i); 80 return pv; 81 } 82 # else /* !VBOX */ 83 /* VBox change that's to much bother to #ifdef. */ 84 # define u32ptr_done(u32, ptr) do {} while (0) 85 # define ptr_to_u32(ptr) (ptr) 86 # define u32_to_ptr(u32, type) ((type)(u32)) 87 # endif /* !VBOX */ 47 88 #endif 48 89 … … 181 222 #if SIZEOF_CHAR_P == 4 182 223 typedef struct mbuf *mbufp_32; 224 /* VBox change which is too much bother to #ifdef */ 225 # define REASS_MBUF_SET(ti, p) (*(mbufp_32 *)&((ti)->ti_t)) = (p) 226 # define REASS_MBUF_GET(ti) ((struct mbuf *)(*(mbufp_32 *)&((ti)->ti_t))) 183 227 #else 184 228 typedef u_int32_t mbufp_32; 229 /* VBox change which is too much bother to #ifdef */ 230 # define REASS_MBUF_SET(ti, p) (*(mbufp_32 *)&((ti)->ti_t)) = ptr_to_u32(p) 231 # define REASS_MBUF_GET(ti) u32_to_ptr((*(mbufp_32 *)&((ti)->ti_t)), struct mbuf *) 185 232 #endif 186 #define REASS_MBUF(ti) (*(mbufp_32 *)&((ti)->ti_t)) 233 /*#define REASS_MBUF(ti) (*(mbufp_32 *)&((ti)->ti_t)) - replaced by REASS_MBUF_GET/SET */ 187 234 188 235 /*
Note:
See TracChangeset
for help on using the changeset viewer.