VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c@ 20471

Last change on this file since 20471 was 20471, checked in by vboxsync, 15 years ago

xtracker 3945 - allow lowmem allocs to be non-contig

  • Property svn:eol-style set to native
File size: 21.7 KB
Line 
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Private interfaces for VirtualBox access to Solaris kernel internal
28 * facilities.
29 *
30 * See sys/vbi.h for what each function does.
31 */
32
33#include <sys/kmem.h>
34#include <sys/types.h>
35#include <sys/mman.h>
36#include <sys/thread.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/sdt.h>
40#include <sys/schedctl.h>
41#include <sys/time.h>
42#include <sys/sysmacros.h>
43#include <sys/cmn_err.h>
44#include <sys/vmsystm.h>
45#include <sys/cyclic.h>
46#include <sys/class.h>
47#include <sys/cpuvar.h>
48#include <sys/kobj.h>
49#include <sys/x_call.h>
50#include <sys/x86_archext.h>
51#include <vm/hat.h>
52#include <vm/seg_vn.h>
53#include <vm/seg_kmem.h>
54#include <sys/ddi.h>
55#include <sys/sunddi.h>
56#include <sys/modctl.h>
57#include <sys/machparam.h>
58
59#include "vbi.h"
60
61#define VBIPROC() ((proc_t *)vbi_proc())
62
63/*
64 * We have to use dl_lookup to find contig_free().
65 */
66extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
67extern void contig_free(void *, size_t);
68#pragma weak contig_free
69static void (*p_contig_free)(void *, size_t) = contig_free;
70
71/*
72 * Workarounds for running on old versions of solaris with different cross call
73 * interfaces. If we find xc_init_cpu() in the kenel, then just use the defined
74 * interfaces for xc_call() from the include file where the xc_call()
75 * interfaces just takes a pointer to a ulong_t array. The array must be long
76 * enough to hold "ncpus" bits at runtime.
77
78 * The reason for the hacks is that using the type "cpuset_t" is pretty much
79 * impossible from code built outside the Solaris source repository that wants
80 * to run on multiple releases of Solaris.
81 *
82 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
83 * "ulong_t" as cpuset_t.
84 *
85 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
86 * where "x" depends on NCPU.
87 *
88 * We detect the difference in 64 bit support by checking the kernel value of
89 * max_cpuid, which always holds the compiled value of NCPU - 1.
90 *
91 * If Solaris increases NCPU to more than 256, this module will continue
92 * to work on all versions of Solaris as long as the number of installed
93 * CPUs in the machine is <= VBI_NCPU. If VBI_NCPU is increased, this code
94 * has to be re-written some to provide compatibility with older Solaris which
95 * expects cpuset_t to be based on NCPU==256 -- or we discontinue support
96 * of old Nevada/S10.
97 */
98static int use_old = 0;
99static int use_old_with_ulong = 0;
100static void (*p_xc_call)() = (void (*)())xc_call;
101
102#define VBI_NCPU 256
103#define VBI_SET_WORDS (VBI_NCPU / (sizeof (ulong_t) * 8))
104typedef struct vbi_cpuset {
105 ulong_t words[VBI_SET_WORDS];
106} vbi_cpuset_t;
107#define X_CALL_HIPRI (2) /* for old Solaris interface */
108
109/*
110 * module linkage stuff
111 */
112static struct modlmisc vbi_modlmisc = {
113 &mod_miscops, "VirtualBox Interfaces V5"
114};
115
116static struct modlinkage vbi_modlinkage = {
117 MODREV_1, (void *)&vbi_modlmisc, NULL
118};
119
120extern uintptr_t kernelbase;
121#define IS_KERNEL(v) ((uintptr_t)(v) >= kernelbase)
122
123static int vbi_verbose = 0;
124
125#define VBI_VERBOSE(msg) {if (vbi_verbose) cmn_err(CE_WARN, msg);}
126
127int
128_init(void)
129{
130 int err;
131
132 /*
133 * Check to see if this version of virtualbox interface module will work
134 * with the kernel.
135 */
136 if (kobj_getsymvalue("xc_init_cpu", 1) != NULL) {
137 /*
138 * Our bit vector storage needs to be large enough for the
139 * actual number of CPUs running in the sytem.
140 */
141 if (ncpus > VBI_NCPU)
142 return (EINVAL);
143 } else {
144 use_old = 1;
145 if (max_cpuid + 1 == sizeof(ulong_t) * 8)
146 use_old_with_ulong = 1;
147 else if (max_cpuid + 1 != VBI_NCPU)
148 return (EINVAL); /* cpuset_t size mismatch */
149 }
150
151 /*
152 * In older versions of Solaris contig_free() is a static routine.
153 */
154 if (p_contig_free == NULL) {
155 p_contig_free = (void (*)(void *, size_t))
156 kobj_getsymvalue("contig_free", 1);
157 if (p_contig_free == NULL) {
158 cmn_err(CE_NOTE, " contig_free() not found in kernel");
159 return (EINVAL);
160 }
161 }
162
163 err = mod_install(&vbi_modlinkage);
164 if (err != 0)
165 return (err);
166
167 return (0);
168}
169
170int
171_fini(void)
172{
173 int err = mod_remove(&vbi_modlinkage);
174 if (err != 0)
175 return (err);
176
177 return (0);
178}
179
180int
181_info(struct modinfo *modinfop)
182{
183 return (mod_info(&vbi_modlinkage, modinfop));
184}
185
186static ddi_dma_attr_t base_attr = {
187 DMA_ATTR_V0, /* Version Number */
188 (uint64_t)0, /* lower limit */
189 (uint64_t)0, /* high limit */
190 (uint64_t)0xffffffff, /* counter limit */
191 (uint64_t)PAGESIZE, /* pagesize alignment */
192 (uint64_t)PAGESIZE, /* pagesize burst size */
193 (uint64_t)PAGESIZE, /* pagesize effective DMA size */
194 (uint64_t)0xffffffff, /* max DMA xfer size */
195 (uint64_t)0xffffffff, /* segment boundary */
196 1, /* list length (1 for contiguous) */
197 1, /* device granularity */
198 0 /* bus-specific flags */
199};
200
201static void *
202vbi_internal_alloc(uint64_t *phys, size_t size, int contig)
203{
204 ddi_dma_attr_t attr;
205 pfn_t pfn;
206 void *ptr;
207 uint_t npages;
208
209 if ((size & PAGEOFFSET) != 0)
210 return (NULL);
211 npages = size >> PAGESHIFT;
212 if (npages == 0)
213 return (NULL);
214
215 attr = base_attr;
216 attr.dma_attr_addr_hi = *phys;
217 if (!contig)
218 attr.dma_attr_sgllen = npages;
219 ptr = contig_alloc(size, &attr, PAGESIZE, 1);
220
221 if (ptr == NULL) {
222 VBI_VERBOSE("vbi_internal_alloc() failure");
223 return (NULL);
224 }
225
226 pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr);
227 if (pfn == PFN_INVALID)
228 panic("vbi_contig_alloc(): hat_getpfnum() failed\n");
229 *phys = (uint64_t)pfn << PAGESHIFT;
230 return (ptr);
231}
232
233void *
234vbi_contig_alloc(uint64_t *phys, size_t size)
235{
236 return (vbi_internal_alloc(phys, size, 1));
237}
238
239void
240vbi_contig_free(void *va, size_t size)
241{
242 p_contig_free(va, size);
243}
244
245void *
246vbi_kernel_map(uint64_t pa, size_t size, uint_t prot)
247{
248 caddr_t va;
249
250 if ((pa & PAGEOFFSET) || (size & PAGEOFFSET)) {
251 VBI_VERBOSE("vbi_kernel_map() bad pa or size");
252 return (NULL);
253 }
254
255 va = vmem_alloc(heap_arena, size, VM_SLEEP);
256
257 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> PAGESHIFT),
258 prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK);
259
260 return (va);
261}
262
263void
264vbi_unmap(void *va, size_t size)
265{
266 if (IS_KERNEL(va)) {
267 hat_unload(kas.a_hat, va, size, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
268 vmem_free(heap_arena, va, size);
269 } else {
270 struct as *as = VBIPROC()->p_as;
271
272 as_rangelock(as);
273 (void) as_unmap(as, va, size);
274 as_rangeunlock(as);
275 }
276}
277
278void *
279vbi_curthread(void)
280{
281 return (curthread);
282}
283
284int
285vbi_yield(void)
286{
287 int rv = 0;
288
289 kpreempt_disable();
290 if (curthread->t_preempt == 1 && CPU->cpu_kprunrun)
291 rv = 1;
292 kpreempt_enable();
293 return (rv);
294}
295
296uint64_t
297vbi_timer_granularity(void)
298{
299 return (nsec_per_tick);
300}
301
302typedef struct vbi_timer {
303 cyc_handler_t vbi_handler;
304 cyclic_id_t vbi_cyclic;
305 uint64_t vbi_interval;
306 void (*vbi_func)();
307 void *vbi_arg1;
308 void *vbi_arg2;
309} vbi_timer_t;
310
311static void
312vbi_timer_callback(void *arg)
313{
314 vbi_timer_t *t = arg;
315
316 if (t->vbi_interval == 0)
317 vbi_timer_stop(arg);
318 t->vbi_func(t->vbi_arg1, t->vbi_arg2);
319}
320
321void *
322vbi_timer_create(void *callback, void *arg1, void *arg2, uint64_t interval)
323{
324 vbi_timer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
325
326 t->vbi_func = (void (*)())callback;
327 t->vbi_arg1 = arg1;
328 t->vbi_arg2 = arg2;
329 t->vbi_handler.cyh_func = vbi_timer_callback;
330 t->vbi_handler.cyh_arg = (void *)t;
331 t->vbi_handler.cyh_level = CY_LOCK_LEVEL;
332 t->vbi_cyclic = CYCLIC_NONE;
333 t->vbi_interval = interval;
334 return (t);
335}
336
337void
338vbi_timer_destroy(void *timer)
339{
340 vbi_timer_t *t = timer;
341 if (t != NULL) {
342 vbi_timer_stop(timer);
343 kmem_free(t, sizeof (*t));
344 }
345}
346
347void
348vbi_timer_start(void *timer, uint64_t when)
349{
350 vbi_timer_t *t = timer;
351 cyc_time_t fire_time;
352 uint64_t interval = t->vbi_interval;
353
354 mutex_enter(&cpu_lock);
355 when += gethrtime();
356 fire_time.cyt_when = when;
357 if (interval == 0)
358 fire_time.cyt_interval = when;
359 else
360 fire_time.cyt_interval = interval;
361 t->vbi_cyclic = cyclic_add(&t->vbi_handler, &fire_time);
362 mutex_exit(&cpu_lock);
363}
364
365void
366vbi_timer_stop(void *timer)
367{
368 vbi_timer_t *t = timer;
369
370 if (t->vbi_cyclic == CYCLIC_NONE)
371 return;
372 mutex_enter(&cpu_lock);
373 if (t->vbi_cyclic != CYCLIC_NONE) {
374 cyclic_remove(t->vbi_cyclic);
375 t->vbi_cyclic = CYCLIC_NONE;
376 }
377 mutex_exit(&cpu_lock);
378}
379
380uint64_t
381vbi_tod(void)
382{
383 timestruc_t ts;
384
385 mutex_enter(&tod_lock);
386 ts = tod_get();
387 mutex_exit(&tod_lock);
388 return ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec);
389}
390
391
392void *
393vbi_proc(void)
394{
395 proc_t *p;
396 drv_getparm(UPROCP, &p);
397 return (p);
398}
399
400void
401vbi_set_priority(void *thread, int priority)
402{
403 kthread_t *t = thread;
404
405 thread_lock(t);
406 (void) thread_change_pri(t, priority, 0);
407 thread_unlock(t);
408}
409
410void *
411vbi_thread_create(void *func, void *arg, size_t len, int priority)
412{
413 kthread_t *t;
414
415 t = thread_create(NULL, NULL, (void (*)())func, arg, len,
416 VBIPROC(), TS_RUN, priority);
417 return (t);
418}
419
420void
421vbi_thread_exit(void)
422{
423 thread_exit();
424}
425
426void *
427vbi_text_alloc(size_t size)
428{
429 return (segkmem_alloc(heaptext_arena, size, KM_SLEEP));
430}
431
432void
433vbi_text_free(void *va, size_t size)
434{
435 segkmem_free(heaptext_arena, va, size);
436}
437
438int
439vbi_cpu_id(void)
440{
441 return (CPU->cpu_id);
442}
443
444int
445vbi_max_cpu_id(void)
446{
447 return (max_cpuid);
448}
449
450int
451vbi_cpu_maxcount(void)
452{
453 return (max_cpuid + 1);
454}
455
456int
457vbi_cpu_count(void)
458{
459 return (ncpus);
460}
461
462int
463vbi_cpu_online(int c)
464{
465 int x;
466
467 mutex_enter(&cpu_lock);
468 x = cpu_is_online(cpu[c]);
469 mutex_exit(&cpu_lock);
470 return (x);
471}
472
473void
474vbi_preempt_disable(void)
475{
476 kpreempt_disable();
477}
478
479void
480vbi_preempt_enable(void)
481{
482 kpreempt_enable();
483}
484
485void
486vbi_execute_on_all(void *func, void *arg)
487{
488 vbi_cpuset_t set;
489 int i;
490
491 for (i = 0; i < VBI_SET_WORDS; ++i)
492 set.words[i] = (ulong_t)-1L;
493 if (use_old) {
494 if (use_old_with_ulong) {
495 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
496 set.words[0], (xc_func_t)func);
497 } else {
498 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
499 set, (xc_func_t)func);
500 }
501 } else {
502 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
503 }
504}
505
506void
507vbi_execute_on_others(void *func, void *arg)
508{
509 vbi_cpuset_t set;
510 int i;
511
512 for (i = 0; i < VBI_SET_WORDS; ++i)
513 set.words[i] = (ulong_t)-1L;
514 BT_CLEAR(set.words, vbi_cpu_id());
515 if (use_old) {
516 if (use_old_with_ulong) {
517 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
518 set.words[0], (xc_func_t)func);
519 } else {
520 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
521 set, (xc_func_t)func);
522 }
523 } else {
524 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
525 }
526}
527
528void
529vbi_execute_on_one(void *func, void *arg, int c)
530{
531 vbi_cpuset_t set;
532 int i;
533
534 for (i = 0; i < VBI_SET_WORDS; ++i)
535 set.words[i] = 0;
536 BT_SET(set.words, c);
537 if (use_old) {
538 if (use_old_with_ulong) {
539 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
540 set.words[0], (xc_func_t)func);
541 } else {
542 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
543 set, (xc_func_t)func);
544 }
545 } else {
546 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
547 }
548}
549
550int
551vbi_lock_va(void *addr, size_t len, void **handle)
552{
553 faultcode_t err;
554
555 /*
556 * kernel mappings on x86 are always locked, so only handle user.
557 */
558 *handle = NULL;
559 if (!IS_KERNEL(addr)) {
560 err = as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
561 (caddr_t)addr, len, F_SOFTLOCK, S_WRITE);
562 if (err != 0) {
563 VBI_VERBOSE("vbi_lock_va() failed to lock");
564 return (-1);
565 }
566 }
567 return (0);
568}
569
570/*ARGSUSED*/
571void
572vbi_unlock_va(void *addr, size_t len, void *handle)
573{
574 if (!IS_KERNEL(addr))
575 as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
576 (caddr_t)addr, len, F_SOFTUNLOCK, S_WRITE);
577}
578
579uint64_t
580vbi_va_to_pa(void *addr)
581{
582 struct hat *hat;
583 pfn_t pfn;
584 uintptr_t v = (uintptr_t)addr;
585
586 if (IS_KERNEL(v))
587 hat = kas.a_hat;
588 else
589 hat = VBIPROC()->p_as->a_hat;
590 pfn = hat_getpfnum(hat, (caddr_t)(v & PAGEMASK));
591 if (pfn == PFN_INVALID)
592 return (-(uint64_t)1);
593 return (((uint64_t)pfn << PAGESHIFT) | (v & PAGEOFFSET));
594}
595
596
597struct segvbi_crargs {
598 uint64_t *palist;
599 uint_t prot;
600};
601
602struct segvbi_data {
603 uint_t prot;
604};
605
606static struct seg_ops segvbi_ops;
607
608static int
609segvbi_create(struct seg *seg, void *args)
610{
611 struct segvbi_crargs *a = args;
612 struct segvbi_data *data;
613 struct as *as = seg->s_as;
614 int error = 0;
615 caddr_t va;
616 ulong_t pgcnt;
617 ulong_t p;
618
619 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
620 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
621 data->prot = a->prot | PROT_USER;
622
623 seg->s_ops = &segvbi_ops;
624 seg->s_data = data;
625
626 /*
627 * now load locked mappings to the pages
628 */
629 va = seg->s_base;
630 pgcnt = seg->s_size >> PAGESHIFT;
631 for (p = 0; p < pgcnt; ++p, va += PAGESIZE) {
632 hat_devload(as->a_hat, va,
633 PAGESIZE, a->palist[p] >> PAGESHIFT,
634 data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
635 }
636
637 return (error);
638}
639
640/*
641 * Duplicate a seg and return new segment in newseg.
642 */
643static int
644segvbi_dup(struct seg *seg, struct seg *newseg)
645{
646 struct segvbi_data *data = seg->s_data;
647 struct segvbi_data *ndata;
648
649 ndata = kmem_zalloc(sizeof (*data), KM_SLEEP);
650 ndata->prot = data->prot;
651 newseg->s_ops = &segvbi_ops;
652 newseg->s_data = ndata;
653
654 return (0);
655}
656
657static int
658segvbi_unmap(struct seg *seg, caddr_t addr, size_t len)
659{
660 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
661 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
662 panic("segvbi_unmap");
663
664 if (addr != seg->s_base || len != seg->s_size)
665 return (ENOTSUP);
666
667 hat_unload(seg->s_as->a_hat, addr, len,
668 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
669
670 seg_free(seg);
671 return (0);
672}
673
674static void
675segvbi_free(struct seg *seg)
676{
677 struct segvbi_data *data = seg->s_data;
678 kmem_free(data, sizeof (*data));
679}
680
681/*
682 * We never demand-fault for seg_vbi.
683 */
684static int
685segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
686 enum fault_type type, enum seg_rw rw)
687{
688 return (FC_MAKE_ERR(EFAULT));
689}
690
691static int
692segvbi_faulta(struct seg *seg, caddr_t addr)
693{
694 return (0);
695}
696
697static int
698segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
699{
700 return (EACCES);
701}
702
703static int
704segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
705{
706 return (EINVAL);
707}
708
709static int
710segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
711{
712 return (-1);
713}
714
715static int
716segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
717{
718 return (0);
719}
720
721static size_t
722segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
723{
724 size_t v;
725
726 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
727 len -= PAGESIZE, v += PAGESIZE)
728 *vec++ = 1;
729 return (v);
730}
731
732static int
733segvbi_lockop(struct seg *seg, caddr_t addr,
734 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
735{
736 return (0);
737}
738
739static int
740segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
741{
742 struct segvbi_data *data = seg->s_data;
743 return (data->prot);
744}
745
746static u_offset_t
747segvbi_getoffset(struct seg *seg, caddr_t addr)
748{
749 return ((uintptr_t)addr - (uintptr_t)seg->s_base);
750}
751
752static int
753segvbi_gettype(struct seg *seg, caddr_t addr)
754{
755 return (MAP_SHARED);
756}
757
758static vnode_t vbivp;
759
760static int
761segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
762{
763 *vpp = &vbivp;
764 return (0);
765}
766
767static int
768segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
769{
770 return (0);
771}
772
773static void
774segvbi_dump(struct seg *seg)
775{}
776
777static int
778segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len,
779 struct page ***ppp, enum lock_type type, enum seg_rw rw)
780{
781 return (ENOTSUP);
782}
783
784static int
785segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
786{
787 return (ENOTSUP);
788}
789
790static int
791segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
792{
793 return (ENODEV);
794}
795
796static lgrp_mem_policy_info_t *
797segvbi_getpolicy(struct seg *seg, caddr_t addr)
798{
799 return (NULL);
800}
801
802static int
803segvbi_capable(struct seg *seg, segcapability_t capability)
804{
805 return (0);
806}
807
808static struct seg_ops segvbi_ops = {
809 segvbi_dup,
810 segvbi_unmap,
811 segvbi_free,
812 segvbi_fault,
813 segvbi_faulta,
814 segvbi_setprot,
815 segvbi_checkprot,
816 (int (*)())segvbi_kluster,
817 (size_t (*)(struct seg *))NULL, /* swapout */
818 segvbi_sync,
819 segvbi_incore,
820 segvbi_lockop,
821 segvbi_getprot,
822 segvbi_getoffset,
823 segvbi_gettype,
824 segvbi_getvp,
825 segvbi_advise,
826 segvbi_dump,
827 segvbi_pagelock,
828 segvbi_setpagesize,
829 segvbi_getmemid,
830 segvbi_getpolicy,
831 segvbi_capable
832};
833
834
835
836/*
837 * Interfaces to inject physical pages into user address space
838 * and later remove them.
839 */
840int
841vbi_user_map(caddr_t *va, uint_t prot, uint64_t *palist, size_t len)
842{
843 struct as *as = VBIPROC()->p_as;
844 struct segvbi_crargs args;
845 int error = 0;
846
847 args.palist = palist;
848 args.prot = prot;
849 as_rangelock(as);
850 map_addr(va, len, 0, 0, MAP_SHARED);
851 if (*va != NULL)
852 error = as_map(as, *va, len, segvbi_create, &args);
853 else
854 error = ENOMEM;
855 if (error)
856 VBI_VERBOSE("vbi_user_map() failed");
857 as_rangeunlock(as);
858 return (error);
859}
860
861
862/*
863 * This is revision 2 of the interface.
864 */
865
866struct vbi_cpu_watch {
867 void (*vbi_cpu_func)();
868 void *vbi_cpu_arg;
869};
870
871static int
872vbi_watcher(cpu_setup_t state, int cpu, void *arg)
873{
874 vbi_cpu_watch_t *w = arg;
875 int online;
876
877 if (state == CPU_ON)
878 online = 1;
879 else if (state == CPU_OFF)
880 online = 0;
881 else
882 return (0);
883 w->vbi_cpu_func(w->vbi_cpu_arg, cpu, online);
884 return (0);
885}
886
887vbi_cpu_watch_t *
888vbi_watch_cpus(void (*func)(), void *arg, int current_too)
889{
890 int c;
891 vbi_cpu_watch_t *w;
892
893 w = kmem_alloc(sizeof (*w), KM_SLEEP);
894 w->vbi_cpu_func = func;
895 w->vbi_cpu_arg = arg;
896 mutex_enter(&cpu_lock);
897 register_cpu_setup_func(vbi_watcher, w);
898 if (current_too) {
899 for (c = 0; c < ncpus; ++c) {
900 if (cpu_is_online(cpu[c]))
901 func(arg, c, 1);
902 }
903 }
904 mutex_exit(&cpu_lock);
905 return (w);
906}
907
908void
909vbi_ignore_cpus(vbi_cpu_watch_t *w)
910{
911 mutex_enter(&cpu_lock);
912 unregister_cpu_setup_func(vbi_watcher, w);
913 mutex_exit(&cpu_lock);
914 kmem_free(w, sizeof (*w));
915}
916
917/*
918 * Simple timers are pretty much a pass through to the cyclic subsystem.
919 */
920struct vbi_stimer {
921 cyc_handler_t s_handler;
922 cyc_time_t s_fire_time;
923 cyclic_id_t s_cyclic;
924 uint64_t s_tick;
925 void (*s_func)(void *, uint64_t);
926 void *s_arg;
927};
928
929static void
930vbi_stimer_func(void *arg)
931{
932 vbi_stimer_t *t = arg;
933 t->s_func(t->s_arg, ++t->s_tick);
934}
935
936extern vbi_stimer_t *
937vbi_stimer_begin(
938 void (*func)(void *, uint64_t),
939 void *arg,
940 uint64_t when,
941 uint64_t interval,
942 int on_cpu)
943{
944 vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
945
946 t->s_handler.cyh_func = vbi_stimer_func;
947 t->s_handler.cyh_arg = t;
948 t->s_handler.cyh_level = CY_LOCK_LEVEL;
949 t->s_tick = 0;
950 t->s_func = func;
951 t->s_arg = arg;
952
953 mutex_enter(&cpu_lock);
954 if (on_cpu != VBI_ANY_CPU && !cpu_is_online(cpu[on_cpu])) {
955 t = NULL;
956 goto done;
957 }
958
959 when += gethrtime();
960 t->s_fire_time.cyt_when = when;
961 if (interval == 0)
962 t->s_fire_time.cyt_interval = INT64_MAX - when;
963 else
964 t->s_fire_time.cyt_interval = interval;
965 t->s_cyclic = cyclic_add(&t->s_handler, &t->s_fire_time);
966 if (on_cpu != VBI_ANY_CPU)
967 cyclic_bind(t->s_cyclic, cpu[on_cpu], NULL);
968done:
969 mutex_exit(&cpu_lock);
970 return (t);
971}
972
973extern void
974vbi_stimer_end(vbi_stimer_t *t)
975{
976 mutex_enter(&cpu_lock);
977 cyclic_remove(t->s_cyclic);
978 mutex_exit(&cpu_lock);
979 kmem_free(t, sizeof (*t));
980}
981
982/*
983 * Global timers are more complicated. They include a counter on the callback,
984 * that indicates the first call on a given cpu.
985 */
986struct vbi_gtimer {
987 uint64_t *g_counters;
988 void (*g_func)(void *, uint64_t);
989 void *g_arg;
990 uint64_t g_when;
991 uint64_t g_interval;
992 cyclic_id_t g_cyclic;
993};
994
995static void
996vbi_gtimer_func(void *arg)
997{
998 vbi_gtimer_t *t = arg;
999 t->g_func(t->g_arg, ++t->g_counters[vbi_cpu_id()]);
1000}
1001
1002/*
1003 * Whenever a cpu is onlined, need to reset the g_counters[] for it to zero.
1004 */
1005static void
1006vbi_gtimer_online(void *arg, cpu_t *cpu, cyc_handler_t *h, cyc_time_t *ct)
1007{
1008 vbi_gtimer_t *t = arg;
1009 hrtime_t now;
1010
1011 t->g_counters[cpu->cpu_id] = 0;
1012 h->cyh_func = vbi_gtimer_func;
1013 h->cyh_arg = t;
1014 h->cyh_level = CY_LOCK_LEVEL;
1015 now = gethrtime();
1016 if (t->g_when < now)
1017 ct->cyt_when = now + t->g_interval / 2;
1018 else
1019 ct->cyt_when = t->g_when;
1020 ct->cyt_interval = t->g_interval;
1021}
1022
1023
1024vbi_gtimer_t *
1025vbi_gtimer_begin(
1026 void (*func)(void *, uint64_t),
1027 void *arg,
1028 uint64_t when,
1029 uint64_t interval)
1030{
1031 vbi_gtimer_t *t;
1032 cyc_omni_handler_t omni;
1033
1034 /*
1035 * one shot global timer is not supported yet.
1036 */
1037 if (interval == 0)
1038 return (NULL);
1039
1040 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1041 t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP);
1042 t->g_when = when + gethrtime();
1043 t->g_interval = interval;
1044 t->g_arg = arg;
1045 t->g_func = func;
1046 t->g_cyclic = CYCLIC_NONE;
1047
1048 omni.cyo_online = (void (*)())vbi_gtimer_online;
1049 omni.cyo_offline = NULL;
1050 omni.cyo_arg = t;
1051
1052 mutex_enter(&cpu_lock);
1053 t->g_cyclic = cyclic_add_omni(&omni);
1054 mutex_exit(&cpu_lock);
1055 return (t);
1056}
1057
1058extern void
1059vbi_gtimer_end(vbi_gtimer_t *t)
1060{
1061 mutex_enter(&cpu_lock);
1062 cyclic_remove(t->g_cyclic);
1063 mutex_exit(&cpu_lock);
1064 kmem_free(t->g_counters, ncpus * sizeof (uint64_t));
1065 kmem_free(t, sizeof (*t));
1066}
1067
1068int
1069vbi_is_preempt_enabled(void)
1070{
1071 return (curthread->t_preempt == 0);
1072}
1073
1074void
1075vbi_poke_cpu(int c)
1076{
1077 if (c < ncpus)
1078 poke_cpu(c);
1079}
1080
1081/*
1082 * This is revision 5 of the interface. As more functions are added,
1083 * they should go after this point in the file and the revision level
1084 * increased. Also change vbi_modlmisc at the top of the file.
1085 */
1086uint_t vbi_revision_level = 5;
1087
1088void *
1089vbi_lowmem_alloc(uint64_t phys, size_t size)
1090{
1091 return (vbi_internal_alloc(&phys, size, 0));
1092}
1093
1094void
1095vbi_lowmem_free(void *va, size_t size)
1096{
1097 p_contig_free(va, size);
1098}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette