VirtualBox

Changeset 16720 in vbox


Ignore:
Timestamp:
Feb 13, 2009 12:32:05 AM (16 years ago)
Author:
vboxsync
Message:

handle upcoming xc_call() change and can build as part of VirtualBox

Location:
trunk/src/VBox/Runtime/r0drv/solaris/vbi
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c

    r13323 r16720  
    5555#include <sys/sunddi.h>
    5656#include <sys/modctl.h>
    57 
    58 #include <sys/vbi.h>
    59 
    60 /*
    61  * If we are running on an old version of Solaris, then
    62  * we have to use dl_lookup to find contig_free().
     57#include <sys/machparam.h>
     58
     59#include "vbi.h"
     60
     61/*
     62 * We have to use dl_lookup to find contig_free().
    6363 */
    6464extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
     
    6868
    6969/*
    70  * Workarounds for running on old versions of solaris with lower NCPU.
    71  * If we detect this, the assumption is that NCPU was such that a cpuset_t
    72  * is just a ulong_t
    73  */
    74 static int use_old_xc_call = 0;
     70 * Workarounds for running on old versions of solaris with different cross call
     71 * interfaces. If we find xc_init_cpu() in the kenel, then just use the defined
     72 * interfaces for xc_call() from the include file where the xc_call()
     73 * interfaces just takes a pointer to a ulong_t array. The array must be long
     74 * enough to hold "ncpus" bits at runtime.
     75
     76 * The reason for the hacks is that using the type "cpuset_t" is pretty much
     77 * impossible from code built outside the Solaris source repository that wants
     78 * to run on multiple releases of Solaris.
     79 *
     80 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
     81 * "ulong_t" as cpuset_t.
     82 *
     83 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
     84 * where "x" depends on NCPU.
     85 *
     86 * We detect the difference in 64 bit support by checking the kernel value of
     87 * max_cpuid, which always holds the compiled value of NCPU - 1.
     88 *
     89 * If Solaris increases NCPU to more than 256, this module will continue
     90 * to work on all versions of Solaris as long as the number of installed
     91 * CPUs in the machine is <= VBI_NCPU. If VBI_NCPU is increased, this code
     92 * has to be re-written some to provide compatibility with older Solaris which
     93 * expects cpuset_t to be based on NCPU==256 -- or we discontinue support
     94 * of old Nevada/S10.
     95 */
     96static int use_old = 0;
     97static int use_old_with_ulong = 0;
    7598static void (*p_xc_call)() = (void (*)())xc_call;
    76 #pragma weak cpuset_all
    77 #pragma weak cpuset_all_but
    78 #pragma weak cpuset_only
    79 
     99
     100#define VBI_NCPU        256
     101#define VBI_SET_WORDS   (VBI_NCPU / (sizeof (ulong_t) * 8))
     102typedef struct vbi_cpuset {
     103        ulong_t words[VBI_SET_WORDS];
     104} vbi_cpuset_t;
     105#define X_CALL_HIPRI    (2)     /* for old Solaris interface */
     106
     107/*
     108 * module linkage stuff
     109 */
    80110static struct modlmisc vbi_modlmisc = {
    81111        &mod_miscops, "VirtualBox Interfaces V3"
     
    86116};
    87117
     118extern uintptr_t kernelbase;
    88119#define IS_KERNEL(v)    ((uintptr_t)(v) >= kernelbase)
     120
     121static int vbi_verbose = 0;
     122
     123#define VBI_VERBOSE(msg) {if (vbi_verbose) cmn_err(CE_WARN, msg);}
    89124
    90125int
     
    95130        /*
    96131         * Check to see if this version of virtualbox interface module will work
    97          * with the kernel. The sizeof (cpuset_t) is problematic, as it changed
    98          * with the change to NCPU in nevada build 87 and S10U6.
     132         * with the kernel.
    99133         */
    100         if (max_cpuid + 1 != NCPU)
    101                 use_old_xc_call = 1;
     134        if (kobj_getsymvalue("xc_init_cpu", 1) != NULL) {
     135                /*
     136                 * Our bit vector storage needs to be large enough for the
     137                 * actual number of CPUs running in the sytem.
     138                 */
     139                if (ncpus > VBI_NCPU)
     140                        return (EINVAL);
     141        } else {
     142                use_old = 1;
     143                if (max_cpuid + 1 == sizeof(ulong_t) * 8)
     144                        use_old_with_ulong = 1;
     145                else if (max_cpuid + 1 != VBI_NCPU)
     146                        return (EINVAL);        /* cpuset_t size mismatch */
     147        }
    102148
    103149        /*
     
    141187        (uint64_t)0,            /* high limit */
    142188        (uint64_t)0xffffffff,   /* counter limit */
    143         (uint64_t)MMU_PAGESIZE, /* alignment */
    144         (uint64_t)MMU_PAGESIZE, /* burst size */
    145         (uint64_t)MMU_PAGESIZE, /* effective DMA size */
     189        (uint64_t)PAGESIZE,     /* pagesize alignment */
     190        (uint64_t)PAGESIZE,     /* pagesize burst size */
     191        (uint64_t)PAGESIZE,     /* pagesize effective DMA size */
    146192        (uint64_t)0xffffffff,   /* max DMA xfer size */
    147193        (uint64_t)0xffffffff,   /* segment boundary */
     
    158204        void *ptr;
    159205
    160         if ((size & MMU_PAGEOFFSET) != 0)
     206        if ((size & PAGEOFFSET) != 0)
    161207                return (NULL);
    162208
    163209        attr = base_attr;
    164210        attr.dma_attr_addr_hi = *phys;
    165         ptr = contig_alloc(size, &attr, MMU_PAGESIZE, 1);
    166 
    167         if (ptr == NULL)
     211        ptr = contig_alloc(size, &attr, PAGESIZE, 1);
     212
     213        if (ptr == NULL) {
     214                VBI_VERBOSE("vbi_contig_alloc() failure");
    168215                return (NULL);
     216        }
    169217
    170218        pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr);
    171219        if (pfn == PFN_INVALID)
    172220                panic("vbi_contig_alloc(): hat_getpfnum() failed\n");
    173         *phys = (uint64_t)pfn << MMU_PAGESHIFT;
     221        *phys = (uint64_t)pfn << PAGESHIFT;
    174222        return (ptr);
    175223}
     
    186234        caddr_t va;
    187235
    188         if ((pa & MMU_PAGEOFFSET) || (size & MMU_PAGEOFFSET))
     236        if ((pa & PAGEOFFSET) || (size & PAGEOFFSET)) {
     237                VBI_VERBOSE("vbi_kernel_map() bad pa or size");
    189238                return (NULL);
     239        }
    190240
    191241        va = vmem_alloc(heap_arena, size, VM_SLEEP);
    192242
    193         hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> MMU_PAGESHIFT),
     243        hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> PAGESHIFT),
    194244            prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK);
    195245
     
    379429vbi_max_cpu_id(void)
    380430{
    381         return (NCPU - 1);
     431        return (max_cpuid);
    382432}
    383433
     
    385435vbi_cpu_maxcount(void)
    386436{
    387         return (NCPU);
     437        return (max_cpuid + 1);
    388438}
    389439
     
    420470vbi_execute_on_all(void *func, void *arg)
    421471{
    422         cpuset_t set;
    423         ulong_t hack_set;
     472        vbi_cpuset_t set;
    424473        int i;
    425474
     475        for (i = 0; i < VBI_SET_WORDS; ++i)
     476                set.words[i] = (ulong_t)-1L;
     477        if (use_old) {
     478                if (use_old_with_ulong) {
     479                        p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
     480                            set.words[0], (xc_func_t)func);
     481                } else {
     482                        p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
     483                            set, (xc_func_t)func);
     484                }
     485        } else {
     486                xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
     487        }
     488}
     489
     490void
     491vbi_execute_on_others(void *func, void *arg)
     492{
     493        vbi_cpuset_t set;
     494        int i;
     495
     496        for (i = 0; i < VBI_SET_WORDS; ++i)
     497                set.words[i] = (ulong_t)-1L;
     498        BT_CLEAR(set.words, vbi_cpu_id());
     499        if (use_old) {
     500                if (use_old_with_ulong) {
     501                        p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
     502                            set.words[0], (xc_func_t)func);
     503                } else {
     504                        p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
     505                            set, (xc_func_t)func);
     506                }
     507        } else {
     508                xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
     509        }
     510}
     511
     512void
     513vbi_execute_on_one(void *func, void *arg, int c)
     514{
     515        vbi_cpuset_t set;
     516        int i;
     517
     518        for (i = 0; i < VBI_SET_WORDS; ++i)
     519                set.words[i] = 0;
     520        BT_SET(set.words, vbi_cpu_id());
     521        if (use_old) {
     522                if (use_old_with_ulong) {
     523                        p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
     524                            set.words[0], (xc_func_t)func);
     525                } else {
     526                        p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
     527                            set, (xc_func_t)func);
     528                }
     529        } else {
     530                xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
     531        }
     532}
     533
     534int
     535vbi_lock_va(void *addr, size_t len, void **handle)
     536{
     537        faultcode_t err;
     538
    426539        /*
    427          * hack for a kernel compiled with the different NCPU than this module
     540         * kernel mappings on x86 are always locked, so only handle user.
    428541         */
    429         ASSERT(curthread->t_preempt >= 1);
    430         if (use_old_xc_call) {
    431                 hack_set = 0;
    432                 for (i = 0; i < ncpus; ++i)
    433                         hack_set |= 1ul << i;
    434                 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set,
    435                     (xc_func_t)func);
    436         } else {
    437                 CPUSET_ALL(set);
    438                 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set,
    439                     (xc_func_t)func);
    440         }
    441 }
    442 
    443 void
    444 vbi_execute_on_others(void *func, void *arg)
    445 {
    446         cpuset_t set;
    447         ulong_t hack_set;
    448         int i;
    449 
    450         /*
    451          * hack for a kernel compiled with the different NCPU than this module
    452          */
    453         ASSERT(curthread->t_preempt >= 1);
    454         if (use_old_xc_call) {
    455                 hack_set = 0;
    456                 for (i = 0; i < ncpus; ++i) {
    457                         if (i != CPU->cpu_id)
    458                                 hack_set |= 1ul << i;
     542        *handle = NULL;
     543        if (!IS_KERNEL(addr)) {
     544                err = as_fault(curproc->p_as->a_hat, curproc->p_as,
     545                    (caddr_t)addr, len, F_SOFTLOCK, S_WRITE);
     546                if (err != 0) {
     547                        VBI_VERBOSE("vbi_lock_va() failed to lock");
     548                        return (-1);
    459549                }
    460                 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set,
    461                     (xc_func_t)func);
    462         } else {
    463                 CPUSET_ALL_BUT(set, CPU->cpu_id);
    464                 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set,
    465                     (xc_func_t)func);
    466         }
    467 }
    468 
    469 void
    470 vbi_execute_on_one(void *func, void *arg, int c)
    471 {
    472         cpuset_t set;
    473         ulong_t hack_set;
    474 
    475         /*
    476          * hack for a kernel compiled with the different NCPU than this module
    477          */
    478         ASSERT(curthread->t_preempt >= 1);
    479         if (use_old_xc_call) {
    480                 hack_set = 1ul << c;
    481                 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set,
    482                     (xc_func_t)func);
    483         } else {
    484                 CPUSET_ONLY(set, c);
    485                 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set,
    486                     (xc_func_t)func);
    487         }
    488 }
    489 
    490 int
    491 vbi_lock_va(void *addr, size_t len, void **handle)
    492 {
    493         page_t **ppl;
    494         int rc = 0;
    495 
    496         if (IS_KERNEL(addr)) {
    497                 /* kernel mappings on x86 are always locked */
    498                 *handle = NULL;
    499         } else {
    500                 rc = as_pagelock(curproc->p_as, &ppl, (caddr_t)addr, len,
    501                     S_WRITE);
    502                 if (rc != 0)
    503                         return (rc);
    504                 *handle = (void *)ppl;
    505         }
    506         return (rc);
    507 }
    508 
     550        }
     551        return (0);
     552}
     553
     554/*ARGSUSED*/
    509555void
    510556vbi_unlock_va(void *addr, size_t len, void *handle)
    511557{
    512         page_t **ppl = (page_t **)handle;
    513 
    514         if (IS_KERNEL(addr))
    515                 ASSERT(handle == NULL);
    516         else
    517                 as_pageunlock(curproc->p_as, ppl, (caddr_t)addr, len, S_WRITE);
     558        if (!IS_KERNEL(addr))
     559                as_fault(curproc->p_as->a_hat, curproc->p_as, (caddr_t)addr,
     560                    len, F_SOFTUNLOCK, S_WRITE);
    518561}
    519562
     
    529572        else
    530573                hat = curproc->p_as->a_hat;
    531         pfn = hat_getpfnum(hat, (caddr_t)(v & MMU_PAGEMASK));
     574        pfn = hat_getpfnum(hat, (caddr_t)(v & PAGEMASK));
    532575        if (pfn == PFN_INVALID)
    533576                return (-(uint64_t)1);
    534         return (((uint64_t)pfn << MMU_PAGESHIFT) | (v & MMU_PAGEOFFSET));
     577        return (((uint64_t)pfn << PAGESHIFT) | (v & PAGEOFFSET));
    535578}
    536579
     
    569612         */
    570613        va = seg->s_base;
    571         ASSERT(((uintptr_t)va & MMU_PAGEOFFSET) == 0);
    572         pgcnt = seg->s_size >> MMU_PAGESHIFT;
    573         for (p = 0; p < pgcnt; ++p, va += MMU_PAGESIZE) {
    574                 ASSERT((a->palist[p] & MMU_PAGEOFFSET) == 0);
     614        pgcnt = seg->s_size >> PAGESHIFT;
     615        for (p = 0; p < pgcnt; ++p, va += PAGESIZE) {
    575616                hat_devload(as->a_hat, va,
    576                     MMU_PAGESIZE, a->palist[p] >> MMU_PAGESHIFT,
     617                    PAGESIZE, a->palist[p] >> PAGESHIFT,
    577618                    data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
    578619        }
     
    598639}
    599640
    600 /*ARGSUSED*/
    601641static int
    602642segvbi_unmap(struct seg *seg, caddr_t addr, size_t len)
    603643{
    604644        if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
    605             (len & MMU_PAGEOFFSET) || ((uintptr_t)addr & MMU_PAGEOFFSET))
     645            (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
    606646                panic("segvbi_unmap");
    607647
     
    626666 * We never demand-fault for seg_vbi.
    627667 */
    628 /*ARGSUSED*/
    629668static int
    630669segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
     
    634673}
    635674
    636 /*ARGSUSED*/
    637675static int
    638676segvbi_faulta(struct seg *seg, caddr_t addr)
     
    641679}
    642680
    643 /*ARGSUSED*/
    644681static int
    645682segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
     
    648685}
    649686
    650 /*ARGSUSED*/
    651687static int
    652688segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
     
    655691}
    656692
    657 /*ARGSUSED*/
    658693static int
    659694segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
     
    662697}
    663698
    664 /*ARGSUSED*/
    665699static int
    666700segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
     
    669703}
    670704
    671 /*ARGSUSED*/
    672705static size_t
    673706segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
     
    675708        size_t v;
    676709
    677         for (v = 0, len = (len + MMU_PAGEOFFSET) & MMU_PAGEMASK; len;
    678             len -= MMU_PAGESIZE, v += MMU_PAGESIZE)
     710        for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
     711            len -= PAGESIZE, v += PAGESIZE)
    679712                *vec++ = 1;
    680713        return (v);
    681714}
    682715
    683 /*ARGSUSED*/
    684716static int
    685717segvbi_lockop(struct seg *seg, caddr_t addr,
     
    689721}
    690722
    691 /*ARGSUSED*/
    692723static int
    693724segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
     
    703734}
    704735
    705 /*ARGSUSED*/
    706736static int
    707737segvbi_gettype(struct seg *seg, caddr_t addr)
     
    712742static vnode_t vbivp;
    713743
    714 /*ARGSUSED*/
    715744static int
    716745segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
     
    720749}
    721750
    722 /*ARGSUSED*/
    723751static int
    724752segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
     
    727755}
    728756
    729 /*ARGSUSED*/
    730757static void
    731758segvbi_dump(struct seg *seg)
    732759{}
    733760
    734 /*ARGSUSED*/
    735761static int
    736762segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len,
     
    740766}
    741767
    742 /*ARGSUSED*/
    743768static int
    744769segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
     
    747772}
    748773
    749 /*ARGSUSED*/
    750774static int
    751775segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
     
    754778}
    755779
    756 /*ARGSUSED*/
    757780static lgrp_mem_policy_info_t *
    758781segvbi_getpolicy(struct seg *seg, caddr_t addr)
     
    761784}
    762785
    763 /*ARGSUSED*/
    764786static int
    765787segvbi_capable(struct seg *seg, segcapability_t capability)
     
    811833        as_rangelock(as);
    812834        map_addr(va, len, 0, 0, MAP_SHARED);
    813         ASSERT(((uintptr_t)*va & MMU_PAGEOFFSET) == 0);
    814         ASSERT((len & MMU_PAGEOFFSET) == 0);
    815         ASSERT(len != 0);
    816835        if (*va != NULL)
    817836                error = as_map(as, *va, len, segvbi_create, &args);
    818837        else
    819838                error = ENOMEM;
     839        if (error)
     840                VBI_VERBOSE("vbi_user_map() failed");
    820841        as_rangeunlock(as);
    821842        return (error);
     
    906927{
    907928        vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
    908 
    909         ASSERT(when < INT64_MAX);
    910         ASSERT(interval < INT64_MAX);
    911         ASSERT(interval + when < INT64_MAX);
    912         ASSERT(on_cpu == VBI_ANY_CPU || on_cpu < ncpus);
    913929
    914930        t->s_handler.cyh_func = vbi_stimer_func;
     
    942958vbi_stimer_end(vbi_stimer_t *t)
    943959{
    944         ASSERT(t->s_cyclic != CYCLIC_NONE);
    945960        mutex_enter(&cpu_lock);
    946961        cyclic_remove(t->s_cyclic);
     
    966981{
    967982        vbi_gtimer_t *t = arg;
    968         t->g_func(t->g_arg, ++t->g_counters[CPU->cpu_id]);
     983        t->g_func(t->g_arg, ++t->g_counters[vbi_cpu_id()]);
    969984}
    970985
     
    10071022                return (NULL);
    10081023
    1009         ASSERT(when < INT64_MAX);
    1010         ASSERT(interval < INT64_MAX);
    1011         ASSERT(interval + when < INT64_MAX);
    1012 
    10131024        t = kmem_zalloc(sizeof (*t), KM_SLEEP);
    10141025        t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP);
     
    10321043vbi_gtimer_end(vbi_gtimer_t *t)
    10331044{
    1034         ASSERT(t->g_cyclic != CYCLIC_NONE);
    10351045        mutex_enter(&cpu_lock);
    10361046        cyclic_remove(t->g_cyclic);
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/sys/vbi.h

    r12945 r16720  
    3737 * to hypervisor boundary. (void *) is for handles and function and other
    3838 * pointers. uint64 for physical addresses, size_t and int elsewhere.
     39 * The goal is for this module to eventually be part of OpenSolaris once
     40 * interfaces have become more stable.
    3941 */
    4042
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette