Changeset 16720 in vbox
- Timestamp:
- Feb 13, 2009 12:32:05 AM (16 years ago)
- Location:
- trunk/src/VBox/Runtime/r0drv/solaris/vbi
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c
r13323 r16720 55 55 #include <sys/sunddi.h> 56 56 #include <sys/modctl.h> 57 58 #include <sys/vbi.h> 59 60 /* 61 * If we are running on an old version of Solaris, then 62 * we have to use dl_lookup to find contig_free().57 #include <sys/machparam.h> 58 59 #include "vbi.h" 60 61 /* 62 * We have to use dl_lookup to find contig_free(). 63 63 */ 64 64 extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int); … … 68 68 69 69 /* 70 * Workarounds for running on old versions of solaris with lower NCPU. 71 * If we detect this, the assumption is that NCPU was such that a cpuset_t 72 * is just a ulong_t 73 */ 74 static int use_old_xc_call = 0; 70 * Workarounds for running on old versions of solaris with different cross call 71 * interfaces. If we find xc_init_cpu() in the kenel, then just use the defined 72 * interfaces for xc_call() from the include file where the xc_call() 73 * interfaces just takes a pointer to a ulong_t array. The array must be long 74 * enough to hold "ncpus" bits at runtime. 75 76 * The reason for the hacks is that using the type "cpuset_t" is pretty much 77 * impossible from code built outside the Solaris source repository that wants 78 * to run on multiple releases of Solaris. 79 * 80 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use 81 * "ulong_t" as cpuset_t. 82 * 83 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];} 84 * where "x" depends on NCPU. 85 * 86 * We detect the difference in 64 bit support by checking the kernel value of 87 * max_cpuid, which always holds the compiled value of NCPU - 1. 88 * 89 * If Solaris increases NCPU to more than 256, this module will continue 90 * to work on all versions of Solaris as long as the number of installed 91 * CPUs in the machine is <= VBI_NCPU. If VBI_NCPU is increased, this code 92 * has to be re-written some to provide compatibility with older Solaris which 93 * expects cpuset_t to be based on NCPU==256 -- or we discontinue support 94 * of old Nevada/S10. 95 */ 96 static int use_old = 0; 97 static int use_old_with_ulong = 0; 75 98 static void (*p_xc_call)() = (void (*)())xc_call; 76 #pragma weak cpuset_all 77 #pragma weak cpuset_all_but 78 #pragma weak cpuset_only 79 99 100 #define VBI_NCPU 256 101 #define VBI_SET_WORDS (VBI_NCPU / (sizeof (ulong_t) * 8)) 102 typedef struct vbi_cpuset { 103 ulong_t words[VBI_SET_WORDS]; 104 } vbi_cpuset_t; 105 #define X_CALL_HIPRI (2) /* for old Solaris interface */ 106 107 /* 108 * module linkage stuff 109 */ 80 110 static struct modlmisc vbi_modlmisc = { 81 111 &mod_miscops, "VirtualBox Interfaces V3" … … 86 116 }; 87 117 118 extern uintptr_t kernelbase; 88 119 #define IS_KERNEL(v) ((uintptr_t)(v) >= kernelbase) 120 121 static int vbi_verbose = 0; 122 123 #define VBI_VERBOSE(msg) {if (vbi_verbose) cmn_err(CE_WARN, msg);} 89 124 90 125 int … … 95 130 /* 96 131 * Check to see if this version of virtualbox interface module will work 97 * with the kernel. The sizeof (cpuset_t) is problematic, as it changed 98 * with the change to NCPU in nevada build 87 and S10U6. 132 * with the kernel. 99 133 */ 100 if (max_cpuid + 1 != NCPU) 101 use_old_xc_call = 1; 134 if (kobj_getsymvalue("xc_init_cpu", 1) != NULL) { 135 /* 136 * Our bit vector storage needs to be large enough for the 137 * actual number of CPUs running in the sytem. 138 */ 139 if (ncpus > VBI_NCPU) 140 return (EINVAL); 141 } else { 142 use_old = 1; 143 if (max_cpuid + 1 == sizeof(ulong_t) * 8) 144 use_old_with_ulong = 1; 145 else if (max_cpuid + 1 != VBI_NCPU) 146 return (EINVAL); /* cpuset_t size mismatch */ 147 } 102 148 103 149 /* … … 141 187 (uint64_t)0, /* high limit */ 142 188 (uint64_t)0xffffffff, /* counter limit */ 143 (uint64_t) MMU_PAGESIZE, /*alignment */144 (uint64_t) MMU_PAGESIZE, /*burst size */145 (uint64_t) MMU_PAGESIZE, /*effective DMA size */189 (uint64_t)PAGESIZE, /* pagesize alignment */ 190 (uint64_t)PAGESIZE, /* pagesize burst size */ 191 (uint64_t)PAGESIZE, /* pagesize effective DMA size */ 146 192 (uint64_t)0xffffffff, /* max DMA xfer size */ 147 193 (uint64_t)0xffffffff, /* segment boundary */ … … 158 204 void *ptr; 159 205 160 if ((size & MMU_PAGEOFFSET) != 0)206 if ((size & PAGEOFFSET) != 0) 161 207 return (NULL); 162 208 163 209 attr = base_attr; 164 210 attr.dma_attr_addr_hi = *phys; 165 ptr = contig_alloc(size, &attr, MMU_PAGESIZE, 1); 166 167 if (ptr == NULL) 211 ptr = contig_alloc(size, &attr, PAGESIZE, 1); 212 213 if (ptr == NULL) { 214 VBI_VERBOSE("vbi_contig_alloc() failure"); 168 215 return (NULL); 216 } 169 217 170 218 pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr); 171 219 if (pfn == PFN_INVALID) 172 220 panic("vbi_contig_alloc(): hat_getpfnum() failed\n"); 173 *phys = (uint64_t)pfn << MMU_PAGESHIFT;221 *phys = (uint64_t)pfn << PAGESHIFT; 174 222 return (ptr); 175 223 } … … 186 234 caddr_t va; 187 235 188 if ((pa & MMU_PAGEOFFSET) || (size & MMU_PAGEOFFSET)) 236 if ((pa & PAGEOFFSET) || (size & PAGEOFFSET)) { 237 VBI_VERBOSE("vbi_kernel_map() bad pa or size"); 189 238 return (NULL); 239 } 190 240 191 241 va = vmem_alloc(heap_arena, size, VM_SLEEP); 192 242 193 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> MMU_PAGESHIFT),243 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> PAGESHIFT), 194 244 prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK); 195 245 … … 379 429 vbi_max_cpu_id(void) 380 430 { 381 return ( NCPU - 1);431 return (max_cpuid); 382 432 } 383 433 … … 385 435 vbi_cpu_maxcount(void) 386 436 { 387 return ( NCPU);437 return (max_cpuid + 1); 388 438 } 389 439 … … 420 470 vbi_execute_on_all(void *func, void *arg) 421 471 { 422 cpuset_t set; 423 ulong_t hack_set; 472 vbi_cpuset_t set; 424 473 int i; 425 474 475 for (i = 0; i < VBI_SET_WORDS; ++i) 476 set.words[i] = (ulong_t)-1L; 477 if (use_old) { 478 if (use_old_with_ulong) { 479 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, 480 set.words[0], (xc_func_t)func); 481 } else { 482 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, 483 set, (xc_func_t)func); 484 } 485 } else { 486 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func); 487 } 488 } 489 490 void 491 vbi_execute_on_others(void *func, void *arg) 492 { 493 vbi_cpuset_t set; 494 int i; 495 496 for (i = 0; i < VBI_SET_WORDS; ++i) 497 set.words[i] = (ulong_t)-1L; 498 BT_CLEAR(set.words, vbi_cpu_id()); 499 if (use_old) { 500 if (use_old_with_ulong) { 501 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, 502 set.words[0], (xc_func_t)func); 503 } else { 504 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, 505 set, (xc_func_t)func); 506 } 507 } else { 508 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func); 509 } 510 } 511 512 void 513 vbi_execute_on_one(void *func, void *arg, int c) 514 { 515 vbi_cpuset_t set; 516 int i; 517 518 for (i = 0; i < VBI_SET_WORDS; ++i) 519 set.words[i] = 0; 520 BT_SET(set.words, vbi_cpu_id()); 521 if (use_old) { 522 if (use_old_with_ulong) { 523 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, 524 set.words[0], (xc_func_t)func); 525 } else { 526 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, 527 set, (xc_func_t)func); 528 } 529 } else { 530 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func); 531 } 532 } 533 534 int 535 vbi_lock_va(void *addr, size_t len, void **handle) 536 { 537 faultcode_t err; 538 426 539 /* 427 * hack for a kernel compiled with the different NCPU than this module540 * kernel mappings on x86 are always locked, so only handle user. 428 541 */ 429 ASSERT(curthread->t_preempt >= 1); 430 if (use_old_xc_call) { 431 hack_set = 0; 432 for (i = 0; i < ncpus; ++i) 433 hack_set |= 1ul << i; 434 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set, 435 (xc_func_t)func); 436 } else { 437 CPUSET_ALL(set); 438 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set, 439 (xc_func_t)func); 440 } 441 } 442 443 void 444 vbi_execute_on_others(void *func, void *arg) 445 { 446 cpuset_t set; 447 ulong_t hack_set; 448 int i; 449 450 /* 451 * hack for a kernel compiled with the different NCPU than this module 452 */ 453 ASSERT(curthread->t_preempt >= 1); 454 if (use_old_xc_call) { 455 hack_set = 0; 456 for (i = 0; i < ncpus; ++i) { 457 if (i != CPU->cpu_id) 458 hack_set |= 1ul << i; 542 *handle = NULL; 543 if (!IS_KERNEL(addr)) { 544 err = as_fault(curproc->p_as->a_hat, curproc->p_as, 545 (caddr_t)addr, len, F_SOFTLOCK, S_WRITE); 546 if (err != 0) { 547 VBI_VERBOSE("vbi_lock_va() failed to lock"); 548 return (-1); 459 549 } 460 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set, 461 (xc_func_t)func); 462 } else { 463 CPUSET_ALL_BUT(set, CPU->cpu_id); 464 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set, 465 (xc_func_t)func); 466 } 467 } 468 469 void 470 vbi_execute_on_one(void *func, void *arg, int c) 471 { 472 cpuset_t set; 473 ulong_t hack_set; 474 475 /* 476 * hack for a kernel compiled with the different NCPU than this module 477 */ 478 ASSERT(curthread->t_preempt >= 1); 479 if (use_old_xc_call) { 480 hack_set = 1ul << c; 481 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set, 482 (xc_func_t)func); 483 } else { 484 CPUSET_ONLY(set, c); 485 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set, 486 (xc_func_t)func); 487 } 488 } 489 490 int 491 vbi_lock_va(void *addr, size_t len, void **handle) 492 { 493 page_t **ppl; 494 int rc = 0; 495 496 if (IS_KERNEL(addr)) { 497 /* kernel mappings on x86 are always locked */ 498 *handle = NULL; 499 } else { 500 rc = as_pagelock(curproc->p_as, &ppl, (caddr_t)addr, len, 501 S_WRITE); 502 if (rc != 0) 503 return (rc); 504 *handle = (void *)ppl; 505 } 506 return (rc); 507 } 508 550 } 551 return (0); 552 } 553 554 /*ARGSUSED*/ 509 555 void 510 556 vbi_unlock_va(void *addr, size_t len, void *handle) 511 557 { 512 page_t **ppl = (page_t **)handle; 513 514 if (IS_KERNEL(addr)) 515 ASSERT(handle == NULL); 516 else 517 as_pageunlock(curproc->p_as, ppl, (caddr_t)addr, len, S_WRITE); 558 if (!IS_KERNEL(addr)) 559 as_fault(curproc->p_as->a_hat, curproc->p_as, (caddr_t)addr, 560 len, F_SOFTUNLOCK, S_WRITE); 518 561 } 519 562 … … 529 572 else 530 573 hat = curproc->p_as->a_hat; 531 pfn = hat_getpfnum(hat, (caddr_t)(v & MMU_PAGEMASK));574 pfn = hat_getpfnum(hat, (caddr_t)(v & PAGEMASK)); 532 575 if (pfn == PFN_INVALID) 533 576 return (-(uint64_t)1); 534 return (((uint64_t)pfn << MMU_PAGESHIFT) | (v & MMU_PAGEOFFSET));577 return (((uint64_t)pfn << PAGESHIFT) | (v & PAGEOFFSET)); 535 578 } 536 579 … … 569 612 */ 570 613 va = seg->s_base; 571 ASSERT(((uintptr_t)va & MMU_PAGEOFFSET) == 0); 572 pgcnt = seg->s_size >> MMU_PAGESHIFT; 573 for (p = 0; p < pgcnt; ++p, va += MMU_PAGESIZE) { 574 ASSERT((a->palist[p] & MMU_PAGEOFFSET) == 0); 614 pgcnt = seg->s_size >> PAGESHIFT; 615 for (p = 0; p < pgcnt; ++p, va += PAGESIZE) { 575 616 hat_devload(as->a_hat, va, 576 MMU_PAGESIZE, a->palist[p] >> MMU_PAGESHIFT,617 PAGESIZE, a->palist[p] >> PAGESHIFT, 577 618 data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK); 578 619 } … … 598 639 } 599 640 600 /*ARGSUSED*/601 641 static int 602 642 segvbi_unmap(struct seg *seg, caddr_t addr, size_t len) 603 643 { 604 644 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 605 (len & MMU_PAGEOFFSET) || ((uintptr_t)addr & MMU_PAGEOFFSET))645 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) 606 646 panic("segvbi_unmap"); 607 647 … … 626 666 * We never demand-fault for seg_vbi. 627 667 */ 628 /*ARGSUSED*/629 668 static int 630 669 segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, … … 634 673 } 635 674 636 /*ARGSUSED*/637 675 static int 638 676 segvbi_faulta(struct seg *seg, caddr_t addr) … … 641 679 } 642 680 643 /*ARGSUSED*/644 681 static int 645 682 segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) … … 648 685 } 649 686 650 /*ARGSUSED*/651 687 static int 652 688 segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) … … 655 691 } 656 692 657 /*ARGSUSED*/658 693 static int 659 694 segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta) … … 662 697 } 663 698 664 /*ARGSUSED*/665 699 static int 666 700 segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) … … 669 703 } 670 704 671 /*ARGSUSED*/672 705 static size_t 673 706 segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) … … 675 708 size_t v; 676 709 677 for (v = 0, len = (len + MMU_PAGEOFFSET) & MMU_PAGEMASK; len;678 len -= MMU_PAGESIZE, v += MMU_PAGESIZE)710 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len; 711 len -= PAGESIZE, v += PAGESIZE) 679 712 *vec++ = 1; 680 713 return (v); 681 714 } 682 715 683 /*ARGSUSED*/684 716 static int 685 717 segvbi_lockop(struct seg *seg, caddr_t addr, … … 689 721 } 690 722 691 /*ARGSUSED*/692 723 static int 693 724 segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) … … 703 734 } 704 735 705 /*ARGSUSED*/706 736 static int 707 737 segvbi_gettype(struct seg *seg, caddr_t addr) … … 712 742 static vnode_t vbivp; 713 743 714 /*ARGSUSED*/715 744 static int 716 745 segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) … … 720 749 } 721 750 722 /*ARGSUSED*/723 751 static int 724 752 segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) … … 727 755 } 728 756 729 /*ARGSUSED*/730 757 static void 731 758 segvbi_dump(struct seg *seg) 732 759 {} 733 760 734 /*ARGSUSED*/735 761 static int 736 762 segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len, … … 740 766 } 741 767 742 /*ARGSUSED*/743 768 static int 744 769 segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) … … 747 772 } 748 773 749 /*ARGSUSED*/750 774 static int 751 775 segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid) … … 754 778 } 755 779 756 /*ARGSUSED*/757 780 static lgrp_mem_policy_info_t * 758 781 segvbi_getpolicy(struct seg *seg, caddr_t addr) … … 761 784 } 762 785 763 /*ARGSUSED*/764 786 static int 765 787 segvbi_capable(struct seg *seg, segcapability_t capability) … … 811 833 as_rangelock(as); 812 834 map_addr(va, len, 0, 0, MAP_SHARED); 813 ASSERT(((uintptr_t)*va & MMU_PAGEOFFSET) == 0);814 ASSERT((len & MMU_PAGEOFFSET) == 0);815 ASSERT(len != 0);816 835 if (*va != NULL) 817 836 error = as_map(as, *va, len, segvbi_create, &args); 818 837 else 819 838 error = ENOMEM; 839 if (error) 840 VBI_VERBOSE("vbi_user_map() failed"); 820 841 as_rangeunlock(as); 821 842 return (error); … … 906 927 { 907 928 vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP); 908 909 ASSERT(when < INT64_MAX);910 ASSERT(interval < INT64_MAX);911 ASSERT(interval + when < INT64_MAX);912 ASSERT(on_cpu == VBI_ANY_CPU || on_cpu < ncpus);913 929 914 930 t->s_handler.cyh_func = vbi_stimer_func; … … 942 958 vbi_stimer_end(vbi_stimer_t *t) 943 959 { 944 ASSERT(t->s_cyclic != CYCLIC_NONE);945 960 mutex_enter(&cpu_lock); 946 961 cyclic_remove(t->s_cyclic); … … 966 981 { 967 982 vbi_gtimer_t *t = arg; 968 t->g_func(t->g_arg, ++t->g_counters[ CPU->cpu_id]);983 t->g_func(t->g_arg, ++t->g_counters[vbi_cpu_id()]); 969 984 } 970 985 … … 1007 1022 return (NULL); 1008 1023 1009 ASSERT(when < INT64_MAX);1010 ASSERT(interval < INT64_MAX);1011 ASSERT(interval + when < INT64_MAX);1012 1013 1024 t = kmem_zalloc(sizeof (*t), KM_SLEEP); 1014 1025 t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP); … … 1032 1043 vbi_gtimer_end(vbi_gtimer_t *t) 1033 1044 { 1034 ASSERT(t->g_cyclic != CYCLIC_NONE);1035 1045 mutex_enter(&cpu_lock); 1036 1046 cyclic_remove(t->g_cyclic); -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/sys/vbi.h
r12945 r16720 37 37 * to hypervisor boundary. (void *) is for handles and function and other 38 38 * pointers. uint64 for physical addresses, size_t and int elsewhere. 39 * The goal is for this module to eventually be part of OpenSolaris once 40 * interfaces have become more stable. 39 41 */ 40 42
Note:
See TracChangeset
for help on using the changeset viewer.