VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c@ 19998

Last change on this file since 19998 was 19998, checked in by vboxsync, 16 years ago

Solaris/vbi: vbi_execute_on_one fix. New vbi binaries.

  • Property svn:eol-style set to native
File size: 21.3 KB
Line 
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Private interfaces for VirtualBox access to Solaris kernel internal
28 * facilities.
29 *
30 * See sys/vbi.h for what each function does.
31 */
32
33#include <sys/kmem.h>
34#include <sys/types.h>
35#include <sys/mman.h>
36#include <sys/thread.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/sdt.h>
40#include <sys/schedctl.h>
41#include <sys/time.h>
42#include <sys/sysmacros.h>
43#include <sys/cmn_err.h>
44#include <sys/vmsystm.h>
45#include <sys/cyclic.h>
46#include <sys/class.h>
47#include <sys/cpuvar.h>
48#include <sys/kobj.h>
49#include <sys/x_call.h>
50#include <sys/x86_archext.h>
51#include <vm/hat.h>
52#include <vm/seg_vn.h>
53#include <vm/seg_kmem.h>
54#include <sys/ddi.h>
55#include <sys/sunddi.h>
56#include <sys/modctl.h>
57#include <sys/machparam.h>
58
59#include "vbi.h"
60
61#define VBIPROC() ((proc_t *)vbi_proc())
62
63/*
64 * We have to use dl_lookup to find contig_free().
65 */
66extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
67extern void contig_free(void *, size_t);
68#pragma weak contig_free
69static void (*p_contig_free)(void *, size_t) = contig_free;
70
71/*
72 * Workarounds for running on old versions of solaris with different cross call
73 * interfaces. If we find xc_init_cpu() in the kenel, then just use the defined
74 * interfaces for xc_call() from the include file where the xc_call()
75 * interfaces just takes a pointer to a ulong_t array. The array must be long
76 * enough to hold "ncpus" bits at runtime.
77
78 * The reason for the hacks is that using the type "cpuset_t" is pretty much
79 * impossible from code built outside the Solaris source repository that wants
80 * to run on multiple releases of Solaris.
81 *
82 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
83 * "ulong_t" as cpuset_t.
84 *
85 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
86 * where "x" depends on NCPU.
87 *
88 * We detect the difference in 64 bit support by checking the kernel value of
89 * max_cpuid, which always holds the compiled value of NCPU - 1.
90 *
91 * If Solaris increases NCPU to more than 256, this module will continue
92 * to work on all versions of Solaris as long as the number of installed
93 * CPUs in the machine is <= VBI_NCPU. If VBI_NCPU is increased, this code
94 * has to be re-written some to provide compatibility with older Solaris which
95 * expects cpuset_t to be based on NCPU==256 -- or we discontinue support
96 * of old Nevada/S10.
97 */
98static int use_old = 0;
99static int use_old_with_ulong = 0;
100static void (*p_xc_call)() = (void (*)())xc_call;
101
102#define VBI_NCPU 256
103#define VBI_SET_WORDS (VBI_NCPU / (sizeof (ulong_t) * 8))
104typedef struct vbi_cpuset {
105 ulong_t words[VBI_SET_WORDS];
106} vbi_cpuset_t;
107#define X_CALL_HIPRI (2) /* for old Solaris interface */
108
109/*
110 * module linkage stuff
111 */
112static struct modlmisc vbi_modlmisc = {
113 &mod_miscops, "VirtualBox Interfaces V4"
114};
115
116static struct modlinkage vbi_modlinkage = {
117 MODREV_1, (void *)&vbi_modlmisc, NULL
118};
119
120extern uintptr_t kernelbase;
121#define IS_KERNEL(v) ((uintptr_t)(v) >= kernelbase)
122
123static int vbi_verbose = 0;
124
125#define VBI_VERBOSE(msg) {if (vbi_verbose) cmn_err(CE_WARN, msg);}
126
127int
128_init(void)
129{
130 int err;
131
132 /*
133 * Check to see if this version of virtualbox interface module will work
134 * with the kernel.
135 */
136 if (kobj_getsymvalue("xc_init_cpu", 1) != NULL) {
137 /*
138 * Our bit vector storage needs to be large enough for the
139 * actual number of CPUs running in the sytem.
140 */
141 if (ncpus > VBI_NCPU)
142 return (EINVAL);
143 } else {
144 use_old = 1;
145 if (max_cpuid + 1 == sizeof(ulong_t) * 8)
146 use_old_with_ulong = 1;
147 else if (max_cpuid + 1 != VBI_NCPU)
148 return (EINVAL); /* cpuset_t size mismatch */
149 }
150
151 /*
152 * In older versions of Solaris contig_free() is a static routine.
153 */
154 if (p_contig_free == NULL) {
155 p_contig_free = (void (*)(void *, size_t))
156 kobj_getsymvalue("contig_free", 1);
157 if (p_contig_free == NULL) {
158 cmn_err(CE_NOTE, " contig_free() not found in kernel");
159 return (EINVAL);
160 }
161 }
162
163 err = mod_install(&vbi_modlinkage);
164 if (err != 0)
165 return (err);
166
167 return (0);
168}
169
170int
171_fini(void)
172{
173 int err = mod_remove(&vbi_modlinkage);
174 if (err != 0)
175 return (err);
176
177 return (0);
178}
179
180int
181_info(struct modinfo *modinfop)
182{
183 return (mod_info(&vbi_modlinkage, modinfop));
184}
185
186static ddi_dma_attr_t base_attr = {
187 DMA_ATTR_V0, /* Version Number */
188 (uint64_t)0, /* lower limit */
189 (uint64_t)0, /* high limit */
190 (uint64_t)0xffffffff, /* counter limit */
191 (uint64_t)PAGESIZE, /* pagesize alignment */
192 (uint64_t)PAGESIZE, /* pagesize burst size */
193 (uint64_t)PAGESIZE, /* pagesize effective DMA size */
194 (uint64_t)0xffffffff, /* max DMA xfer size */
195 (uint64_t)0xffffffff, /* segment boundary */
196 1, /* list length (1 for contiguous) */
197 1, /* device granularity */
198 0 /* bus-specific flags */
199};
200
201void *
202vbi_contig_alloc(uint64_t *phys, size_t size)
203{
204 ddi_dma_attr_t attr;
205 pfn_t pfn;
206 void *ptr;
207
208 if ((size & PAGEOFFSET) != 0)
209 return (NULL);
210
211 attr = base_attr;
212 attr.dma_attr_addr_hi = *phys;
213 ptr = contig_alloc(size, &attr, PAGESIZE, 1);
214
215 if (ptr == NULL) {
216 VBI_VERBOSE("vbi_contig_alloc() failure");
217 return (NULL);
218 }
219
220 pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr);
221 if (pfn == PFN_INVALID)
222 panic("vbi_contig_alloc(): hat_getpfnum() failed\n");
223 *phys = (uint64_t)pfn << PAGESHIFT;
224 return (ptr);
225}
226
227void
228vbi_contig_free(void *va, size_t size)
229{
230 p_contig_free(va, size);
231}
232
233void *
234vbi_kernel_map(uint64_t pa, size_t size, uint_t prot)
235{
236 caddr_t va;
237
238 if ((pa & PAGEOFFSET) || (size & PAGEOFFSET)) {
239 VBI_VERBOSE("vbi_kernel_map() bad pa or size");
240 return (NULL);
241 }
242
243 va = vmem_alloc(heap_arena, size, VM_SLEEP);
244
245 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> PAGESHIFT),
246 prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK);
247
248 return (va);
249}
250
251void
252vbi_unmap(void *va, size_t size)
253{
254 if (IS_KERNEL(va)) {
255 hat_unload(kas.a_hat, va, size, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
256 vmem_free(heap_arena, va, size);
257 } else {
258 struct as *as = VBIPROC()->p_as;
259
260 as_rangelock(as);
261 (void) as_unmap(as, va, size);
262 as_rangeunlock(as);
263 }
264}
265
266void *
267vbi_curthread(void)
268{
269 return (curthread);
270}
271
272int
273vbi_yield(void)
274{
275 int rv = 0;
276
277 kpreempt_disable();
278 if (curthread->t_preempt == 1 && CPU->cpu_kprunrun)
279 rv = 1;
280 kpreempt_enable();
281 return (rv);
282}
283
284uint64_t
285vbi_timer_granularity(void)
286{
287 return (nsec_per_tick);
288}
289
290typedef struct vbi_timer {
291 cyc_handler_t vbi_handler;
292 cyclic_id_t vbi_cyclic;
293 uint64_t vbi_interval;
294 void (*vbi_func)();
295 void *vbi_arg1;
296 void *vbi_arg2;
297} vbi_timer_t;
298
299static void
300vbi_timer_callback(void *arg)
301{
302 vbi_timer_t *t = arg;
303
304 if (t->vbi_interval == 0)
305 vbi_timer_stop(arg);
306 t->vbi_func(t->vbi_arg1, t->vbi_arg2);
307}
308
309void *
310vbi_timer_create(void *callback, void *arg1, void *arg2, uint64_t interval)
311{
312 vbi_timer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
313
314 t->vbi_func = (void (*)())callback;
315 t->vbi_arg1 = arg1;
316 t->vbi_arg2 = arg2;
317 t->vbi_handler.cyh_func = vbi_timer_callback;
318 t->vbi_handler.cyh_arg = (void *)t;
319 t->vbi_handler.cyh_level = CY_LOCK_LEVEL;
320 t->vbi_cyclic = CYCLIC_NONE;
321 t->vbi_interval = interval;
322 return (t);
323}
324
325void
326vbi_timer_destroy(void *timer)
327{
328 vbi_timer_t *t = timer;
329 if (t != NULL) {
330 vbi_timer_stop(timer);
331 kmem_free(t, sizeof (*t));
332 }
333}
334
335void
336vbi_timer_start(void *timer, uint64_t when)
337{
338 vbi_timer_t *t = timer;
339 cyc_time_t fire_time;
340 uint64_t interval = t->vbi_interval;
341
342 mutex_enter(&cpu_lock);
343 when += gethrtime();
344 fire_time.cyt_when = when;
345 if (interval == 0)
346 fire_time.cyt_interval = when;
347 else
348 fire_time.cyt_interval = interval;
349 t->vbi_cyclic = cyclic_add(&t->vbi_handler, &fire_time);
350 mutex_exit(&cpu_lock);
351}
352
353void
354vbi_timer_stop(void *timer)
355{
356 vbi_timer_t *t = timer;
357
358 if (t->vbi_cyclic == CYCLIC_NONE)
359 return;
360 mutex_enter(&cpu_lock);
361 if (t->vbi_cyclic != CYCLIC_NONE) {
362 cyclic_remove(t->vbi_cyclic);
363 t->vbi_cyclic = CYCLIC_NONE;
364 }
365 mutex_exit(&cpu_lock);
366}
367
368uint64_t
369vbi_tod(void)
370{
371 timestruc_t ts;
372
373 mutex_enter(&tod_lock);
374 ts = tod_get();
375 mutex_exit(&tod_lock);
376 return ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec);
377}
378
379
380void *
381vbi_proc(void)
382{
383 proc_t *p;
384 drv_getparm(UPROCP, &p);
385 return (p);
386}
387
388void
389vbi_set_priority(void *thread, int priority)
390{
391 kthread_t *t = thread;
392
393 thread_lock(t);
394 (void) thread_change_pri(t, priority, 0);
395 thread_unlock(t);
396}
397
398void *
399vbi_thread_create(void *func, void *arg, size_t len, int priority)
400{
401 kthread_t *t;
402
403 t = thread_create(NULL, NULL, (void (*)())func, arg, len,
404 VBIPROC(), TS_RUN, priority);
405 return (t);
406}
407
408void
409vbi_thread_exit(void)
410{
411 thread_exit();
412}
413
414void *
415vbi_text_alloc(size_t size)
416{
417 return (segkmem_alloc(heaptext_arena, size, KM_SLEEP));
418}
419
420void
421vbi_text_free(void *va, size_t size)
422{
423 segkmem_free(heaptext_arena, va, size);
424}
425
426int
427vbi_cpu_id(void)
428{
429 return (CPU->cpu_id);
430}
431
432int
433vbi_max_cpu_id(void)
434{
435 return (max_cpuid);
436}
437
438int
439vbi_cpu_maxcount(void)
440{
441 return (max_cpuid + 1);
442}
443
444int
445vbi_cpu_count(void)
446{
447 return (ncpus);
448}
449
450int
451vbi_cpu_online(int c)
452{
453 int x;
454
455 mutex_enter(&cpu_lock);
456 x = cpu_is_online(cpu[c]);
457 mutex_exit(&cpu_lock);
458 return (x);
459}
460
461void
462vbi_preempt_disable(void)
463{
464 kpreempt_disable();
465}
466
467void
468vbi_preempt_enable(void)
469{
470 kpreempt_enable();
471}
472
473void
474vbi_execute_on_all(void *func, void *arg)
475{
476 vbi_cpuset_t set;
477 int i;
478
479 for (i = 0; i < VBI_SET_WORDS; ++i)
480 set.words[i] = (ulong_t)-1L;
481 if (use_old) {
482 if (use_old_with_ulong) {
483 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
484 set.words[0], (xc_func_t)func);
485 } else {
486 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
487 set, (xc_func_t)func);
488 }
489 } else {
490 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
491 }
492}
493
494void
495vbi_execute_on_others(void *func, void *arg)
496{
497 vbi_cpuset_t set;
498 int i;
499
500 for (i = 0; i < VBI_SET_WORDS; ++i)
501 set.words[i] = (ulong_t)-1L;
502 BT_CLEAR(set.words, vbi_cpu_id());
503 if (use_old) {
504 if (use_old_with_ulong) {
505 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
506 set.words[0], (xc_func_t)func);
507 } else {
508 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
509 set, (xc_func_t)func);
510 }
511 } else {
512 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
513 }
514}
515
516void
517vbi_execute_on_one(void *func, void *arg, int c)
518{
519 vbi_cpuset_t set;
520 int i;
521
522 for (i = 0; i < VBI_SET_WORDS; ++i)
523 set.words[i] = 0;
524 BT_SET(set.words, c);
525 if (use_old) {
526 if (use_old_with_ulong) {
527 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
528 set.words[0], (xc_func_t)func);
529 } else {
530 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
531 set, (xc_func_t)func);
532 }
533 } else {
534 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
535 }
536}
537
538int
539vbi_lock_va(void *addr, size_t len, void **handle)
540{
541 faultcode_t err;
542
543 /*
544 * kernel mappings on x86 are always locked, so only handle user.
545 */
546 *handle = NULL;
547 if (!IS_KERNEL(addr)) {
548 err = as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
549 (caddr_t)addr, len, F_SOFTLOCK, S_WRITE);
550 if (err != 0) {
551 VBI_VERBOSE("vbi_lock_va() failed to lock");
552 return (-1);
553 }
554 }
555 return (0);
556}
557
558/*ARGSUSED*/
559void
560vbi_unlock_va(void *addr, size_t len, void *handle)
561{
562 if (!IS_KERNEL(addr))
563 as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
564 (caddr_t)addr, len, F_SOFTUNLOCK, S_WRITE);
565}
566
567uint64_t
568vbi_va_to_pa(void *addr)
569{
570 struct hat *hat;
571 pfn_t pfn;
572 uintptr_t v = (uintptr_t)addr;
573
574 if (IS_KERNEL(v))
575 hat = kas.a_hat;
576 else
577 hat = VBIPROC()->p_as->a_hat;
578 pfn = hat_getpfnum(hat, (caddr_t)(v & PAGEMASK));
579 if (pfn == PFN_INVALID)
580 return (-(uint64_t)1);
581 return (((uint64_t)pfn << PAGESHIFT) | (v & PAGEOFFSET));
582}
583
584
585struct segvbi_crargs {
586 uint64_t *palist;
587 uint_t prot;
588};
589
590struct segvbi_data {
591 uint_t prot;
592};
593
594static struct seg_ops segvbi_ops;
595
596static int
597segvbi_create(struct seg *seg, void *args)
598{
599 struct segvbi_crargs *a = args;
600 struct segvbi_data *data;
601 struct as *as = seg->s_as;
602 int error = 0;
603 caddr_t va;
604 ulong_t pgcnt;
605 ulong_t p;
606
607 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
608 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
609 data->prot = a->prot | PROT_USER;
610
611 seg->s_ops = &segvbi_ops;
612 seg->s_data = data;
613
614 /*
615 * now load locked mappings to the pages
616 */
617 va = seg->s_base;
618 pgcnt = seg->s_size >> PAGESHIFT;
619 for (p = 0; p < pgcnt; ++p, va += PAGESIZE) {
620 hat_devload(as->a_hat, va,
621 PAGESIZE, a->palist[p] >> PAGESHIFT,
622 data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
623 }
624
625 return (error);
626}
627
628/*
629 * Duplicate a seg and return new segment in newseg.
630 */
631static int
632segvbi_dup(struct seg *seg, struct seg *newseg)
633{
634 struct segvbi_data *data = seg->s_data;
635 struct segvbi_data *ndata;
636
637 ndata = kmem_zalloc(sizeof (*data), KM_SLEEP);
638 ndata->prot = data->prot;
639 newseg->s_ops = &segvbi_ops;
640 newseg->s_data = ndata;
641
642 return (0);
643}
644
645static int
646segvbi_unmap(struct seg *seg, caddr_t addr, size_t len)
647{
648 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
649 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
650 panic("segvbi_unmap");
651
652 if (addr != seg->s_base || len != seg->s_size)
653 return (ENOTSUP);
654
655 hat_unload(seg->s_as->a_hat, addr, len,
656 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
657
658 seg_free(seg);
659 return (0);
660}
661
662static void
663segvbi_free(struct seg *seg)
664{
665 struct segvbi_data *data = seg->s_data;
666 kmem_free(data, sizeof (*data));
667}
668
669/*
670 * We never demand-fault for seg_vbi.
671 */
672static int
673segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
674 enum fault_type type, enum seg_rw rw)
675{
676 return (FC_MAKE_ERR(EFAULT));
677}
678
679static int
680segvbi_faulta(struct seg *seg, caddr_t addr)
681{
682 return (0);
683}
684
685static int
686segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
687{
688 return (EACCES);
689}
690
691static int
692segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
693{
694 return (EINVAL);
695}
696
697static int
698segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
699{
700 return (-1);
701}
702
703static int
704segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
705{
706 return (0);
707}
708
709static size_t
710segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
711{
712 size_t v;
713
714 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
715 len -= PAGESIZE, v += PAGESIZE)
716 *vec++ = 1;
717 return (v);
718}
719
720static int
721segvbi_lockop(struct seg *seg, caddr_t addr,
722 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
723{
724 return (0);
725}
726
727static int
728segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
729{
730 struct segvbi_data *data = seg->s_data;
731 return (data->prot);
732}
733
734static u_offset_t
735segvbi_getoffset(struct seg *seg, caddr_t addr)
736{
737 return ((uintptr_t)addr - (uintptr_t)seg->s_base);
738}
739
740static int
741segvbi_gettype(struct seg *seg, caddr_t addr)
742{
743 return (MAP_SHARED);
744}
745
746static vnode_t vbivp;
747
748static int
749segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
750{
751 *vpp = &vbivp;
752 return (0);
753}
754
755static int
756segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
757{
758 return (0);
759}
760
761static void
762segvbi_dump(struct seg *seg)
763{}
764
765static int
766segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len,
767 struct page ***ppp, enum lock_type type, enum seg_rw rw)
768{
769 return (ENOTSUP);
770}
771
772static int
773segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
774{
775 return (ENOTSUP);
776}
777
778static int
779segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
780{
781 return (ENODEV);
782}
783
784static lgrp_mem_policy_info_t *
785segvbi_getpolicy(struct seg *seg, caddr_t addr)
786{
787 return (NULL);
788}
789
790static int
791segvbi_capable(struct seg *seg, segcapability_t capability)
792{
793 return (0);
794}
795
796static struct seg_ops segvbi_ops = {
797 segvbi_dup,
798 segvbi_unmap,
799 segvbi_free,
800 segvbi_fault,
801 segvbi_faulta,
802 segvbi_setprot,
803 segvbi_checkprot,
804 (int (*)())segvbi_kluster,
805 (size_t (*)(struct seg *))NULL, /* swapout */
806 segvbi_sync,
807 segvbi_incore,
808 segvbi_lockop,
809 segvbi_getprot,
810 segvbi_getoffset,
811 segvbi_gettype,
812 segvbi_getvp,
813 segvbi_advise,
814 segvbi_dump,
815 segvbi_pagelock,
816 segvbi_setpagesize,
817 segvbi_getmemid,
818 segvbi_getpolicy,
819 segvbi_capable
820};
821
822
823
824/*
825 * Interfaces to inject physical pages into user address space
826 * and later remove them.
827 */
828int
829vbi_user_map(caddr_t *va, uint_t prot, uint64_t *palist, size_t len)
830{
831 struct as *as = VBIPROC()->p_as;
832 struct segvbi_crargs args;
833 int error = 0;
834
835 args.palist = palist;
836 args.prot = prot;
837 as_rangelock(as);
838 map_addr(va, len, 0, 0, MAP_SHARED);
839 if (*va != NULL)
840 error = as_map(as, *va, len, segvbi_create, &args);
841 else
842 error = ENOMEM;
843 if (error)
844 VBI_VERBOSE("vbi_user_map() failed");
845 as_rangeunlock(as);
846 return (error);
847}
848
849
850/*
851 * This is revision 2 of the interface.
852 */
853
854struct vbi_cpu_watch {
855 void (*vbi_cpu_func)();
856 void *vbi_cpu_arg;
857};
858
859static int
860vbi_watcher(cpu_setup_t state, int cpu, void *arg)
861{
862 vbi_cpu_watch_t *w = arg;
863 int online;
864
865 if (state == CPU_ON)
866 online = 1;
867 else if (state == CPU_OFF)
868 online = 0;
869 else
870 return (0);
871 w->vbi_cpu_func(w->vbi_cpu_arg, cpu, online);
872 return (0);
873}
874
875vbi_cpu_watch_t *
876vbi_watch_cpus(void (*func)(), void *arg, int current_too)
877{
878 int c;
879 vbi_cpu_watch_t *w;
880
881 w = kmem_alloc(sizeof (*w), KM_SLEEP);
882 w->vbi_cpu_func = func;
883 w->vbi_cpu_arg = arg;
884 mutex_enter(&cpu_lock);
885 register_cpu_setup_func(vbi_watcher, w);
886 if (current_too) {
887 for (c = 0; c < ncpus; ++c) {
888 if (cpu_is_online(cpu[c]))
889 func(arg, c, 1);
890 }
891 }
892 mutex_exit(&cpu_lock);
893 return (w);
894}
895
896void
897vbi_ignore_cpus(vbi_cpu_watch_t *w)
898{
899 mutex_enter(&cpu_lock);
900 unregister_cpu_setup_func(vbi_watcher, w);
901 mutex_exit(&cpu_lock);
902 kmem_free(w, sizeof (*w));
903}
904
905/*
906 * Simple timers are pretty much a pass through to the cyclic subsystem.
907 */
908struct vbi_stimer {
909 cyc_handler_t s_handler;
910 cyc_time_t s_fire_time;
911 cyclic_id_t s_cyclic;
912 uint64_t s_tick;
913 void (*s_func)(void *, uint64_t);
914 void *s_arg;
915};
916
917static void
918vbi_stimer_func(void *arg)
919{
920 vbi_stimer_t *t = arg;
921 t->s_func(t->s_arg, ++t->s_tick);
922}
923
924extern vbi_stimer_t *
925vbi_stimer_begin(
926 void (*func)(void *, uint64_t),
927 void *arg,
928 uint64_t when,
929 uint64_t interval,
930 int on_cpu)
931{
932 vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
933
934 t->s_handler.cyh_func = vbi_stimer_func;
935 t->s_handler.cyh_arg = t;
936 t->s_handler.cyh_level = CY_LOCK_LEVEL;
937 t->s_tick = 0;
938 t->s_func = func;
939 t->s_arg = arg;
940
941 mutex_enter(&cpu_lock);
942 if (on_cpu != VBI_ANY_CPU && !cpu_is_online(cpu[on_cpu])) {
943 t = NULL;
944 goto done;
945 }
946
947 when += gethrtime();
948 t->s_fire_time.cyt_when = when;
949 if (interval == 0)
950 t->s_fire_time.cyt_interval = INT64_MAX - when;
951 else
952 t->s_fire_time.cyt_interval = interval;
953 t->s_cyclic = cyclic_add(&t->s_handler, &t->s_fire_time);
954 if (on_cpu != VBI_ANY_CPU)
955 cyclic_bind(t->s_cyclic, cpu[on_cpu], NULL);
956done:
957 mutex_exit(&cpu_lock);
958 return (t);
959}
960
961extern void
962vbi_stimer_end(vbi_stimer_t *t)
963{
964 mutex_enter(&cpu_lock);
965 cyclic_remove(t->s_cyclic);
966 mutex_exit(&cpu_lock);
967 kmem_free(t, sizeof (*t));
968}
969
970/*
971 * Global timers are more complicated. They include a counter on the callback,
972 * that indicates the first call on a given cpu.
973 */
974struct vbi_gtimer {
975 uint64_t *g_counters;
976 void (*g_func)(void *, uint64_t);
977 void *g_arg;
978 uint64_t g_when;
979 uint64_t g_interval;
980 cyclic_id_t g_cyclic;
981};
982
983static void
984vbi_gtimer_func(void *arg)
985{
986 vbi_gtimer_t *t = arg;
987 t->g_func(t->g_arg, ++t->g_counters[vbi_cpu_id()]);
988}
989
990/*
991 * Whenever a cpu is onlined, need to reset the g_counters[] for it to zero.
992 */
993static void
994vbi_gtimer_online(void *arg, cpu_t *cpu, cyc_handler_t *h, cyc_time_t *ct)
995{
996 vbi_gtimer_t *t = arg;
997 hrtime_t now;
998
999 t->g_counters[cpu->cpu_id] = 0;
1000 h->cyh_func = vbi_gtimer_func;
1001 h->cyh_arg = t;
1002 h->cyh_level = CY_LOCK_LEVEL;
1003 now = gethrtime();
1004 if (t->g_when < now)
1005 ct->cyt_when = now + t->g_interval / 2;
1006 else
1007 ct->cyt_when = t->g_when;
1008 ct->cyt_interval = t->g_interval;
1009}
1010
1011
1012vbi_gtimer_t *
1013vbi_gtimer_begin(
1014 void (*func)(void *, uint64_t),
1015 void *arg,
1016 uint64_t when,
1017 uint64_t interval)
1018{
1019 vbi_gtimer_t *t;
1020 cyc_omni_handler_t omni;
1021
1022 /*
1023 * one shot global timer is not supported yet.
1024 */
1025 if (interval == 0)
1026 return (NULL);
1027
1028 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1029 t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP);
1030 t->g_when = when + gethrtime();
1031 t->g_interval = interval;
1032 t->g_arg = arg;
1033 t->g_func = func;
1034 t->g_cyclic = CYCLIC_NONE;
1035
1036 omni.cyo_online = (void (*)())vbi_gtimer_online;
1037 omni.cyo_offline = NULL;
1038 omni.cyo_arg = t;
1039
1040 mutex_enter(&cpu_lock);
1041 t->g_cyclic = cyclic_add_omni(&omni);
1042 mutex_exit(&cpu_lock);
1043 return (t);
1044}
1045
1046extern void
1047vbi_gtimer_end(vbi_gtimer_t *t)
1048{
1049 mutex_enter(&cpu_lock);
1050 cyclic_remove(t->g_cyclic);
1051 mutex_exit(&cpu_lock);
1052 kmem_free(t->g_counters, ncpus * sizeof (uint64_t));
1053 kmem_free(t, sizeof (*t));
1054}
1055
1056int
1057vbi_is_preempt_enabled(void)
1058{
1059 return (curthread->t_preempt == 0);
1060}
1061
1062/*
1063 * This is revision 4 of the interface. As more functions are added,
1064 * they should go after this point in the file and the revision level
1065 * increased. Also change vbi_modlmisc at the top of the file.
1066 */
1067uint_t vbi_revision_level = 4;
1068
1069void
1070vbi_poke_cpu(int c)
1071{
1072 if (c < ncpus)
1073 poke_cpu(c);
1074}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette