VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c@ 27010

Last change on this file since 27010 was 27010, checked in by vboxsync, 15 years ago

solaris/vbi/vbi.c: spaces to tabs.

  • Property svn:eol-style set to native
File size: 24.9 KB
Line 
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Private interfaces for VirtualBox access to Solaris kernel internal
28 * facilities.
29 *
30 * See sys/vbi.h for what each function does.
31 */
32
33#include <sys/kmem.h>
34#include <sys/types.h>
35#include <sys/mman.h>
36#include <sys/thread.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/sdt.h>
40#include <sys/schedctl.h>
41#include <sys/time.h>
42#include <sys/sysmacros.h>
43#include <sys/cmn_err.h>
44#include <sys/vmsystm.h>
45#include <sys/cyclic.h>
46#include <sys/class.h>
47#include <sys/cpuvar.h>
48#include <sys/kobj.h>
49#include <sys/x_call.h>
50#include <sys/x86_archext.h>
51#include <vm/hat.h>
52#include <vm/seg_vn.h>
53#include <vm/seg_kmem.h>
54#include <sys/ddi.h>
55#include <sys/sunddi.h>
56#include <sys/modctl.h>
57#include <sys/machparam.h>
58#include <sys/utsname.h>
59
60#include "vbi.h"
61
62#define VBIPROC() ((proc_t *)vbi_proc())
63
64/*
65 * We have to use dl_lookup to find contig_free().
66 */
67extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
68extern void contig_free(void *, size_t);
69#pragma weak contig_free
70static void (*p_contig_free)(void *, size_t) = contig_free;
71
72/*
73 * Workarounds for running on old versions of solaris with different cross call
74 * interfaces. If we find xc_init_cpu() in the kenel, then just use the defined
75 * interfaces for xc_call() from the include file where the xc_call()
76 * interfaces just takes a pointer to a ulong_t array. The array must be long
77 * enough to hold "ncpus" bits at runtime.
78
79 * The reason for the hacks is that using the type "cpuset_t" is pretty much
80 * impossible from code built outside the Solaris source repository that wants
81 * to run on multiple releases of Solaris.
82 *
83 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
84 * "ulong_t" as cpuset_t.
85 *
86 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
87 * where "x" depends on NCPU.
88 *
89 * We detect the difference in 64 bit support by checking the kernel value of
90 * max_cpuid, which always holds the compiled value of NCPU - 1.
91 *
92 * If Solaris increases NCPU to more than 256, this module will continue
93 * to work on all versions of Solaris as long as the number of installed
94 * CPUs in the machine is <= VBI_NCPU. If VBI_NCPU is increased, this code
95 * has to be re-written some to provide compatibility with older Solaris which
96 * expects cpuset_t to be based on NCPU==256 -- or we discontinue support
97 * of old Nevada/S10.
98 */
99static int use_old = 0;
100static int use_old_with_ulong = 0;
101static void (*p_xc_call)() = (void (*)())xc_call;
102
103#define VBI_NCPU 256
104#define VBI_SET_WORDS (VBI_NCPU / (sizeof (ulong_t) * 8))
105typedef struct vbi_cpuset {
106 ulong_t words[VBI_SET_WORDS];
107} vbi_cpuset_t;
108#define X_CALL_HIPRI (2) /* for old Solaris interface */
109
110/*
111 * module linkage stuff
112 */
113#if 0
114static struct modlmisc vbi_modlmisc = {
115 &mod_miscops, "VirtualBox Interfaces V6"
116};
117
118static struct modlinkage vbi_modlinkage = {
119 MODREV_1, { (void *)&vbi_modlmisc, NULL }
120};
121#endif
122
123extern uintptr_t kernelbase;
124#define IS_KERNEL(v) ((uintptr_t)(v) >= kernelbase)
125
126static int vbi_verbose = 0;
127
128#define VBI_VERBOSE(msg) {if (vbi_verbose) cmn_err(CE_WARN, msg);}
129
130/* Introduced in v6 */
131static int vbi_is_nevada = 0;
132
133#ifdef _LP64
134/* 64-bit Solaris 10 offsets */
135/* CPU */
136static int off_s10_cpu_runrun = 232;
137static int off_s10_cpu_kprunrun = 233;
138/* kthread_t */
139static int off_s10_t_preempt = 42;
140
141/* 64-bit Solaris 11 (Nevada/OpenSolaris) offsets */
142/* CPU */
143static int off_s11_cpu_runrun = 216;
144static int off_s11_cpu_kprunrun = 217;
145/* kthread_t */
146static int off_s11_t_preempt = 42;
147#else
148/* 32-bit Solaris 10 offsets */
149/* CPU */
150static int off_s10_cpu_runrun = 124;
151static int off_s10_cpu_kprunrun = 125;
152/* kthread_t */
153static int off_s10_t_preempt = 26;
154
155/* 32-bit Solaris 11 (Nevada/OpenSolaris) offsets */
156/* CPU */
157static int off_s11_cpu_runrun = 112;
158static int off_s11_cpu_kprunrun = 113;
159/* kthread_t */
160static int off_s11_t_preempt = 26;
161#endif
162
163
164/* Which offsets will be used */
165static int off_cpu_runrun = -1;
166static int off_cpu_kprunrun = -1;
167static int off_t_preempt = -1;
168
169#define VBI_T_PREEMPT (*((char *)curthread + off_t_preempt))
170#define VBI_CPU_KPRUNRUN (*((char *)CPU + off_cpu_kprunrun))
171#define VBI_CPU_RUNRUN (*((char *)CPU + off_cpu_runrun))
172
173#undef kpreempt_disable
174#undef kpreempt_enable
175
176#define VBI_PREEMPT_DISABLE() \
177 { \
178 VBI_T_PREEMPT++; \
179 ASSERT(VBI_T_PREEMPT >= 1); \
180 }
181#define VBI_PREEMPT_ENABLE() \
182 { \
183 ASSERT(VBI_T_PREEMPT >= 1); \
184 if (--VBI_T_PREEMPT == 0 && \
185 VBI_CPU_RUNRUN) \
186 kpreempt(KPREEMPT_SYNC); \
187 }
188
189/* End of v6 intro */
190
191#if 0
192int
193_init(void)
194{
195 int err = vbi_init();
196 if (!err)
197 err = mod_install(&vbi_modlinkage);
198 return (err);
199}
200#endif
201
202int
203vbi_init(void)
204{
205 /*
206 * Check to see if this version of virtualbox interface module will work
207 * with the kernel.
208 */
209 if (kobj_getsymvalue("xc_init_cpu", 1) != NULL) {
210 /*
211 * Our bit vector storage needs to be large enough for the
212 * actual number of CPUs running in the sytem.
213 */
214 if (ncpus > VBI_NCPU)
215 return (EINVAL);
216 } else {
217 use_old = 1;
218 if (max_cpuid + 1 == sizeof(ulong_t) * 8)
219 use_old_with_ulong = 1;
220 else if (max_cpuid + 1 != VBI_NCPU)
221 return (EINVAL); /* cpuset_t size mismatch */
222 }
223
224 /*
225 * In older versions of Solaris contig_free() is a static routine.
226 */
227 if (p_contig_free == NULL) {
228 p_contig_free = (void (*)(void *, size_t))
229 kobj_getsymvalue("contig_free", 1);
230 if (p_contig_free == NULL) {
231 cmn_err(CE_NOTE, " contig_free() not found in kernel");
232 return (EINVAL);
233 }
234 }
235
236 /*
237 * Check if this is S10 or Nevada
238 */
239 if (!strncmp(utsname.release, "5.11", sizeof("5.11") - 1))
240 {
241 /* Nevada detected... */
242 vbi_is_nevada = 1;
243
244 off_cpu_runrun = off_s11_cpu_runrun;
245 off_cpu_kprunrun = off_s11_cpu_kprunrun;
246 off_t_preempt = off_s11_t_preempt;
247 }
248 else
249 {
250 /* Solaris 10 detected... */
251 vbi_is_nevada = 0;
252
253 off_cpu_runrun = off_s10_cpu_runrun;
254 off_cpu_kprunrun = off_s10_cpu_kprunrun;
255 off_t_preempt = off_s10_t_preempt;
256 }
257
258 /*
259 * Sanity checking...
260 */
261 /* CPU */
262 char crr = VBI_CPU_RUNRUN;
263 char krr = VBI_CPU_KPRUNRUN;
264 if ( (crr < 0 || crr > 1)
265 || (krr < 0 || krr > 1))
266 {
267 cmn_err(CE_NOTE, ":CPU structure sanity check failed! OS version mismatch.\n");
268 return EINVAL;
269 }
270
271 /* Thread */
272 char t_preempt = VBI_T_PREEMPT;
273 if (t_preempt < 0 || t_preempt > 32)
274 {
275 cmn_err(CE_NOTE, ":Thread structure sanity check failed! OS version mismatch.\n");
276 return EINVAL;
277 }
278 return (0);
279}
280
281#if 0
282int
283_fini(void)
284{
285 int err = mod_remove(&vbi_modlinkage);
286 if (err != 0)
287 return (err);
288
289 return (0);
290}
291
292int
293_info(struct modinfo *modinfop)
294{
295 return (mod_info(&vbi_modlinkage, modinfop));
296}
297#endif
298
299
300static ddi_dma_attr_t base_attr = {
301 DMA_ATTR_V0, /* Version Number */
302 (uint64_t)0, /* lower limit */
303 (uint64_t)0, /* high limit */
304 (uint64_t)0xffffffff, /* counter limit */
305 (uint64_t)PAGESIZE, /* pagesize alignment */
306 (uint64_t)PAGESIZE, /* pagesize burst size */
307 (uint64_t)PAGESIZE, /* pagesize effective DMA size */
308 (uint64_t)0xffffffff, /* max DMA xfer size */
309 (uint64_t)0xffffffff, /* segment boundary */
310 1, /* list length (1 for contiguous) */
311 1, /* device granularity */
312 0 /* bus-specific flags */
313};
314
315static void *
316vbi_internal_alloc(uint64_t *phys, size_t size, uint64_t alignment, int contig)
317{
318 ddi_dma_attr_t attr;
319 pfn_t pfn;
320 void *ptr;
321 uint_t npages;
322
323 if ((size & PAGEOFFSET) != 0)
324 return (NULL);
325 npages = (size + PAGESIZE - 1) >> PAGESHIFT;
326 if (npages == 0)
327 return (NULL);
328
329 attr = base_attr;
330 attr.dma_attr_addr_hi = *phys;
331 attr.dma_attr_align = alignment;
332 if (!contig)
333 attr.dma_attr_sgllen = npages;
334 ptr = contig_alloc(size, &attr, PAGESIZE, 1);
335
336 if (ptr == NULL) {
337 VBI_VERBOSE("vbi_internal_alloc() failure");
338 return (NULL);
339 }
340
341 pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr);
342 if (pfn == PFN_INVALID)
343 panic("vbi_contig_alloc(): hat_getpfnum() failed\n");
344 *phys = (uint64_t)pfn << PAGESHIFT;
345 return (ptr);
346}
347
348void *
349vbi_contig_alloc(uint64_t *phys, size_t size)
350{
351 /* Obsolete */
352 return (vbi_internal_alloc(phys, size, PAGESIZE /* alignment */, 1 /* contiguous */));
353}
354
355void
356vbi_contig_free(void *va, size_t size)
357{
358 /* Obsolete */
359 p_contig_free(va, size);
360}
361
362void *
363vbi_kernel_map(uint64_t pa, size_t size, uint_t prot)
364{
365 caddr_t va;
366
367 if ((pa & PAGEOFFSET) || (size & PAGEOFFSET)) {
368 VBI_VERBOSE("vbi_kernel_map() bad pa or size");
369 return (NULL);
370 }
371
372 va = vmem_alloc(heap_arena, size, VM_SLEEP);
373
374 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> PAGESHIFT),
375 prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK);
376
377 return (va);
378}
379
380void
381vbi_unmap(void *va, size_t size)
382{
383 if (IS_KERNEL(va)) {
384 hat_unload(kas.a_hat, va, size, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
385 vmem_free(heap_arena, va, size);
386 } else {
387 struct as *as = VBIPROC()->p_as;
388
389 as_rangelock(as);
390 (void) as_unmap(as, va, size);
391 as_rangeunlock(as);
392 }
393}
394
395void *
396vbi_curthread(void)
397{
398 return (curthread);
399}
400
401int
402vbi_yield(void)
403{
404 int rv = 0;
405
406 vbi_preempt_disable();
407
408 char tpr = VBI_T_PREEMPT;
409 char kpr = VBI_CPU_KPRUNRUN;
410 if (tpr == 1 && kpr)
411 rv = 1;
412
413 vbi_preempt_enable();
414 return (rv);
415}
416
417uint64_t
418vbi_timer_granularity(void)
419{
420 return (nsec_per_tick);
421}
422
423typedef struct vbi_timer {
424 cyc_handler_t vbi_handler;
425 cyclic_id_t vbi_cyclic;
426 uint64_t vbi_interval;
427 void (*vbi_func)();
428 void *vbi_arg1;
429 void *vbi_arg2;
430} vbi_timer_t;
431
432static void
433vbi_timer_callback(void *arg)
434{
435 vbi_timer_t *t = arg;
436
437 if (t->vbi_interval == 0)
438 vbi_timer_stop(arg);
439 t->vbi_func(t->vbi_arg1, t->vbi_arg2);
440}
441
442void *
443vbi_timer_create(void *callback, void *arg1, void *arg2, uint64_t interval)
444{
445 vbi_timer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
446
447 t->vbi_func = (void (*)())callback;
448 t->vbi_arg1 = arg1;
449 t->vbi_arg2 = arg2;
450 t->vbi_handler.cyh_func = vbi_timer_callback;
451 t->vbi_handler.cyh_arg = (void *)t;
452 t->vbi_handler.cyh_level = CY_LOCK_LEVEL;
453 t->vbi_cyclic = CYCLIC_NONE;
454 t->vbi_interval = interval;
455 return (t);
456}
457
458void
459vbi_timer_destroy(void *timer)
460{
461 vbi_timer_t *t = timer;
462 if (t != NULL) {
463 vbi_timer_stop(timer);
464 kmem_free(t, sizeof (*t));
465 }
466}
467
468void
469vbi_timer_start(void *timer, uint64_t when)
470{
471 vbi_timer_t *t = timer;
472 cyc_time_t fire_time;
473 uint64_t interval = t->vbi_interval;
474
475 mutex_enter(&cpu_lock);
476 when += gethrtime();
477 fire_time.cyt_when = when;
478 if (interval == 0)
479 fire_time.cyt_interval = when;
480 else
481 fire_time.cyt_interval = interval;
482 t->vbi_cyclic = cyclic_add(&t->vbi_handler, &fire_time);
483 mutex_exit(&cpu_lock);
484}
485
486void
487vbi_timer_stop(void *timer)
488{
489 vbi_timer_t *t = timer;
490
491 if (t->vbi_cyclic == CYCLIC_NONE)
492 return;
493 mutex_enter(&cpu_lock);
494 if (t->vbi_cyclic != CYCLIC_NONE) {
495 cyclic_remove(t->vbi_cyclic);
496 t->vbi_cyclic = CYCLIC_NONE;
497 }
498 mutex_exit(&cpu_lock);
499}
500
501uint64_t
502vbi_tod(void)
503{
504 timestruc_t ts;
505
506 mutex_enter(&tod_lock);
507 ts = tod_get();
508 mutex_exit(&tod_lock);
509 return ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec);
510}
511
512
513void *
514vbi_proc(void)
515{
516 proc_t *p;
517 drv_getparm(UPROCP, &p);
518 return (p);
519}
520
521void
522vbi_set_priority(void *thread, int priority)
523{
524 kthread_t *t = thread;
525
526 thread_lock(t);
527 (void) thread_change_pri(t, priority, 0);
528 thread_unlock(t);
529}
530
531void *
532vbi_thread_create(void *func, void *arg, size_t len, int priority)
533{
534 kthread_t *t;
535
536 t = thread_create(NULL, NULL, (void (*)())func, arg, len,
537 VBIPROC(), TS_RUN, priority);
538 return (t);
539}
540
541void
542vbi_thread_exit(void)
543{
544 thread_exit();
545}
546
547void *
548vbi_text_alloc(size_t size)
549{
550 return (segkmem_alloc(heaptext_arena, size, KM_SLEEP));
551}
552
553void
554vbi_text_free(void *va, size_t size)
555{
556 segkmem_free(heaptext_arena, va, size);
557}
558
559int
560vbi_cpu_id(void)
561{
562 return (CPU->cpu_id);
563}
564
565int
566vbi_max_cpu_id(void)
567{
568 return (max_cpuid);
569}
570
571int
572vbi_cpu_maxcount(void)
573{
574 return (max_cpuid + 1);
575}
576
577int
578vbi_cpu_count(void)
579{
580 return (ncpus);
581}
582
583int
584vbi_cpu_online(int c)
585{
586 int x;
587
588 mutex_enter(&cpu_lock);
589 x = cpu_is_online(cpu[c]);
590 mutex_exit(&cpu_lock);
591 return (x);
592}
593
594void
595vbi_preempt_disable(void)
596{
597 VBI_PREEMPT_DISABLE();
598}
599
600void
601vbi_preempt_enable(void)
602{
603 VBI_PREEMPT_ENABLE();
604}
605
606void
607vbi_execute_on_all(void *func, void *arg)
608{
609 vbi_cpuset_t set;
610 int i;
611
612 for (i = 0; i < VBI_SET_WORDS; ++i)
613 set.words[i] = (ulong_t)-1L;
614 if (use_old) {
615 if (use_old_with_ulong) {
616 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
617 set.words[0], (xc_func_t)func);
618 } else {
619 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
620 set, (xc_func_t)func);
621 }
622 } else {
623 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
624 }
625}
626
627void
628vbi_execute_on_others(void *func, void *arg)
629{
630 vbi_cpuset_t set;
631 int i;
632
633 for (i = 0; i < VBI_SET_WORDS; ++i)
634 set.words[i] = (ulong_t)-1L;
635 BT_CLEAR(set.words, vbi_cpu_id());
636 if (use_old) {
637 if (use_old_with_ulong) {
638 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
639 set.words[0], (xc_func_t)func);
640 } else {
641 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
642 set, (xc_func_t)func);
643 }
644 } else {
645 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
646 }
647}
648
649void
650vbi_execute_on_one(void *func, void *arg, int c)
651{
652 vbi_cpuset_t set;
653 int i;
654
655 for (i = 0; i < VBI_SET_WORDS; ++i)
656 set.words[i] = 0;
657 BT_SET(set.words, c);
658 if (use_old) {
659 if (use_old_with_ulong) {
660 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
661 set.words[0], (xc_func_t)func);
662 } else {
663 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
664 set, (xc_func_t)func);
665 }
666 } else {
667 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
668 }
669}
670
671int
672vbi_lock_va(void *addr, size_t len, int access, void **handle)
673{
674 faultcode_t err;
675
676 /*
677 * kernel mappings on x86 are always locked, so only handle user.
678 */
679 *handle = NULL;
680 if (!IS_KERNEL(addr)) {
681 err = as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
682 (caddr_t)addr, len, F_SOFTLOCK, access);
683 if (err != 0) {
684 VBI_VERBOSE("vbi_lock_va() failed to lock");
685 return (-1);
686 }
687 }
688 return (0);
689}
690
691/*ARGSUSED*/
692void
693vbi_unlock_va(void *addr, size_t len, int access, void *handle)
694{
695 if (!IS_KERNEL(addr))
696 as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
697 (caddr_t)addr, len, F_SOFTUNLOCK, access);
698}
699
700uint64_t
701vbi_va_to_pa(void *addr)
702{
703 struct hat *hat;
704 pfn_t pfn;
705 uintptr_t v = (uintptr_t)addr;
706
707 if (IS_KERNEL(v))
708 hat = kas.a_hat;
709 else
710 hat = VBIPROC()->p_as->a_hat;
711 pfn = hat_getpfnum(hat, (caddr_t)(v & PAGEMASK));
712 if (pfn == PFN_INVALID)
713 return (-(uint64_t)1);
714 return (((uint64_t)pfn << PAGESHIFT) | (v & PAGEOFFSET));
715}
716
717
718struct segvbi_crargs {
719 uint64_t *palist;
720 uint_t prot;
721};
722
723struct segvbi_data {
724 uint_t prot;
725};
726
727static struct seg_ops segvbi_ops;
728
729static int
730segvbi_create(struct seg *seg, void *args)
731{
732 struct segvbi_crargs *a = args;
733 struct segvbi_data *data;
734 struct as *as = seg->s_as;
735 int error = 0;
736 caddr_t va;
737 ulong_t pgcnt;
738 ulong_t p;
739
740 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
741 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
742 data->prot = a->prot | PROT_USER;
743
744 seg->s_ops = &segvbi_ops;
745 seg->s_data = data;
746
747 /*
748 * now load locked mappings to the pages
749 */
750 va = seg->s_base;
751 pgcnt = (seg->s_size + PAGESIZE - 1) >> PAGESHIFT;
752 for (p = 0; p < pgcnt; ++p, va += PAGESIZE) {
753 hat_devload(as->a_hat, va,
754 PAGESIZE, a->palist[p] >> PAGESHIFT,
755 data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
756 }
757
758 return (error);
759}
760
761/*
762 * Duplicate a seg and return new segment in newseg.
763 */
764static int
765segvbi_dup(struct seg *seg, struct seg *newseg)
766{
767 struct segvbi_data *data = seg->s_data;
768 struct segvbi_data *ndata;
769
770 ndata = kmem_zalloc(sizeof (*data), KM_SLEEP);
771 ndata->prot = data->prot;
772 newseg->s_ops = &segvbi_ops;
773 newseg->s_data = ndata;
774
775 return (0);
776}
777
778static int
779segvbi_unmap(struct seg *seg, caddr_t addr, size_t len)
780{
781 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
782 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
783 panic("segvbi_unmap");
784
785 if (addr != seg->s_base || len != seg->s_size)
786 return (ENOTSUP);
787
788 hat_unload(seg->s_as->a_hat, addr, len,
789 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
790
791 seg_free(seg);
792 return (0);
793}
794
795static void
796segvbi_free(struct seg *seg)
797{
798 struct segvbi_data *data = seg->s_data;
799 kmem_free(data, sizeof (*data));
800}
801
802/*
803 * We never demand-fault for seg_vbi.
804 */
805static int
806segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
807 enum fault_type type, enum seg_rw rw)
808{
809 return (FC_MAKE_ERR(EFAULT));
810}
811
812static int
813segvbi_faulta(struct seg *seg, caddr_t addr)
814{
815 return (0);
816}
817
818static int
819segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
820{
821 return (EACCES);
822}
823
824static int
825segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
826{
827 return (EINVAL);
828}
829
830static int
831segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
832{
833 return (-1);
834}
835
836static int
837segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
838{
839 return (0);
840}
841
842static size_t
843segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
844{
845 size_t v;
846
847 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
848 len -= PAGESIZE, v += PAGESIZE)
849 *vec++ = 1;
850 return (v);
851}
852
853static int
854segvbi_lockop(struct seg *seg, caddr_t addr,
855 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
856{
857 return (0);
858}
859
860static int
861segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
862{
863 struct segvbi_data *data = seg->s_data;
864 return (data->prot);
865}
866
867static u_offset_t
868segvbi_getoffset(struct seg *seg, caddr_t addr)
869{
870 return ((uintptr_t)addr - (uintptr_t)seg->s_base);
871}
872
873static int
874segvbi_gettype(struct seg *seg, caddr_t addr)
875{
876 return (MAP_SHARED);
877}
878
879static vnode_t vbivp;
880
881static int
882segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
883{
884 *vpp = &vbivp;
885 return (0);
886}
887
888static int
889segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
890{
891 return (0);
892}
893
894static void
895segvbi_dump(struct seg *seg)
896{}
897
898static int
899segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len,
900 struct page ***ppp, enum lock_type type, enum seg_rw rw)
901{
902 return (ENOTSUP);
903}
904
905static int
906segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
907{
908 return (ENOTSUP);
909}
910
911static int
912segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
913{
914 return (ENODEV);
915}
916
917static lgrp_mem_policy_info_t *
918segvbi_getpolicy(struct seg *seg, caddr_t addr)
919{
920 return (NULL);
921}
922
923static int
924segvbi_capable(struct seg *seg, segcapability_t capability)
925{
926 return (0);
927}
928
929static struct seg_ops segvbi_ops = {
930 segvbi_dup,
931 segvbi_unmap,
932 segvbi_free,
933 segvbi_fault,
934 segvbi_faulta,
935 segvbi_setprot,
936 segvbi_checkprot,
937 (int (*)())segvbi_kluster,
938 (size_t (*)(struct seg *))NULL, /* swapout */
939 segvbi_sync,
940 segvbi_incore,
941 segvbi_lockop,
942 segvbi_getprot,
943 segvbi_getoffset,
944 segvbi_gettype,
945 segvbi_getvp,
946 segvbi_advise,
947 segvbi_dump,
948 segvbi_pagelock,
949 segvbi_setpagesize,
950 segvbi_getmemid,
951 segvbi_getpolicy,
952 segvbi_capable
953};
954
955
956
957/*
958 * Interfaces to inject physical pages into user address space
959 * and later remove them.
960 */
961int
962vbi_user_map(caddr_t *va, uint_t prot, uint64_t *palist, size_t len)
963{
964 struct as *as = VBIPROC()->p_as;
965 struct segvbi_crargs args;
966 int error = 0;
967
968 args.palist = palist;
969 args.prot = prot;
970 as_rangelock(as);
971 map_addr(va, len, 0, 0, MAP_SHARED);
972 if (*va != NULL)
973 {
974 error = as_map(as, *va, len, segvbi_create, &args);
975 }
976 else
977 error = ENOMEM;
978 if (error)
979 VBI_VERBOSE("vbi_user_map() failed");
980 as_rangeunlock(as);
981 return (error);
982}
983
984
985/*
986 * This is revision 2 of the interface.
987 */
988
989struct vbi_cpu_watch {
990 void (*vbi_cpu_func)();
991 void *vbi_cpu_arg;
992};
993
994static int
995vbi_watcher(cpu_setup_t state, int icpu, void *arg)
996{
997 vbi_cpu_watch_t *w = arg;
998 int online;
999
1000 if (state == CPU_ON)
1001 online = 1;
1002 else if (state == CPU_OFF)
1003 online = 0;
1004 else
1005 return (0);
1006 w->vbi_cpu_func(w->vbi_cpu_arg, icpu, online);
1007 return (0);
1008}
1009
1010vbi_cpu_watch_t *
1011vbi_watch_cpus(void (*func)(), void *arg, int current_too)
1012{
1013 int c;
1014 vbi_cpu_watch_t *w;
1015
1016 w = kmem_alloc(sizeof (*w), KM_SLEEP);
1017 w->vbi_cpu_func = func;
1018 w->vbi_cpu_arg = arg;
1019 mutex_enter(&cpu_lock);
1020 register_cpu_setup_func(vbi_watcher, w);
1021 if (current_too) {
1022 for (c = 0; c < ncpus; ++c) {
1023 if (cpu_is_online(cpu[c]))
1024 func(arg, c, 1);
1025 }
1026 }
1027 mutex_exit(&cpu_lock);
1028 return (w);
1029}
1030
1031void
1032vbi_ignore_cpus(vbi_cpu_watch_t *w)
1033{
1034 mutex_enter(&cpu_lock);
1035 unregister_cpu_setup_func(vbi_watcher, w);
1036 mutex_exit(&cpu_lock);
1037 kmem_free(w, sizeof (*w));
1038}
1039
1040/*
1041 * Simple timers are pretty much a pass through to the cyclic subsystem.
1042 */
1043struct vbi_stimer {
1044 cyc_handler_t s_handler;
1045 cyc_time_t s_fire_time;
1046 cyclic_id_t s_cyclic;
1047 uint64_t s_tick;
1048 void (*s_func)(void *, uint64_t);
1049 void *s_arg;
1050};
1051
1052static void
1053vbi_stimer_func(void *arg)
1054{
1055 vbi_stimer_t *t = arg;
1056 t->s_func(t->s_arg, ++t->s_tick);
1057}
1058
1059extern vbi_stimer_t *
1060vbi_stimer_begin(
1061 void (*func)(void *, uint64_t),
1062 void *arg,
1063 uint64_t when,
1064 uint64_t interval,
1065 int on_cpu)
1066{
1067 vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1068
1069 t->s_handler.cyh_func = vbi_stimer_func;
1070 t->s_handler.cyh_arg = t;
1071 t->s_handler.cyh_level = CY_LOCK_LEVEL;
1072 t->s_tick = 0;
1073 t->s_func = func;
1074 t->s_arg = arg;
1075
1076 mutex_enter(&cpu_lock);
1077 if (on_cpu != VBI_ANY_CPU && !cpu_is_online(cpu[on_cpu])) {
1078 t = NULL;
1079 goto done;
1080 }
1081
1082 when += gethrtime();
1083 t->s_fire_time.cyt_when = when;
1084 if (interval == 0)
1085 t->s_fire_time.cyt_interval = INT64_MAX - when;
1086 else
1087 t->s_fire_time.cyt_interval = interval;
1088 t->s_cyclic = cyclic_add(&t->s_handler, &t->s_fire_time);
1089 if (on_cpu != VBI_ANY_CPU)
1090 cyclic_bind(t->s_cyclic, cpu[on_cpu], NULL);
1091done:
1092 mutex_exit(&cpu_lock);
1093 return (t);
1094}
1095
1096extern void
1097vbi_stimer_end(vbi_stimer_t *t)
1098{
1099 mutex_enter(&cpu_lock);
1100 cyclic_remove(t->s_cyclic);
1101 mutex_exit(&cpu_lock);
1102 kmem_free(t, sizeof (*t));
1103}
1104
1105/*
1106 * Global timers are more complicated. They include a counter on the callback,
1107 * that indicates the first call on a given cpu.
1108 */
1109struct vbi_gtimer {
1110 uint64_t *g_counters;
1111 void (*g_func)(void *, uint64_t);
1112 void *g_arg;
1113 uint64_t g_when;
1114 uint64_t g_interval;
1115 cyclic_id_t g_cyclic;
1116};
1117
1118static void
1119vbi_gtimer_func(void *arg)
1120{
1121 vbi_gtimer_t *t = arg;
1122 t->g_func(t->g_arg, ++t->g_counters[vbi_cpu_id()]);
1123}
1124
1125/*
1126 * Whenever a cpu is onlined, need to reset the g_counters[] for it to zero.
1127 */
1128static void
1129vbi_gtimer_online(void *arg, cpu_t *pcpu, cyc_handler_t *h, cyc_time_t *ct)
1130{
1131 vbi_gtimer_t *t = arg;
1132 hrtime_t now;
1133
1134 t->g_counters[pcpu->cpu_id] = 0;
1135 h->cyh_func = vbi_gtimer_func;
1136 h->cyh_arg = t;
1137 h->cyh_level = CY_LOCK_LEVEL;
1138 now = gethrtime();
1139 if (t->g_when < now)
1140 ct->cyt_when = now + t->g_interval / 2;
1141 else
1142 ct->cyt_when = t->g_when;
1143 ct->cyt_interval = t->g_interval;
1144}
1145
1146
1147vbi_gtimer_t *
1148vbi_gtimer_begin(
1149 void (*func)(void *, uint64_t),
1150 void *arg,
1151 uint64_t when,
1152 uint64_t interval)
1153{
1154 vbi_gtimer_t *t;
1155 cyc_omni_handler_t omni;
1156
1157 /*
1158 * one shot global timer is not supported yet.
1159 */
1160 if (interval == 0)
1161 return (NULL);
1162
1163 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1164 t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP);
1165 t->g_when = when + gethrtime();
1166 t->g_interval = interval;
1167 t->g_arg = arg;
1168 t->g_func = func;
1169 t->g_cyclic = CYCLIC_NONE;
1170
1171 omni.cyo_online = (void (*)())vbi_gtimer_online;
1172 omni.cyo_offline = NULL;
1173 omni.cyo_arg = t;
1174
1175 mutex_enter(&cpu_lock);
1176 t->g_cyclic = cyclic_add_omni(&omni);
1177 mutex_exit(&cpu_lock);
1178 return (t);
1179}
1180
1181extern void
1182vbi_gtimer_end(vbi_gtimer_t *t)
1183{
1184 mutex_enter(&cpu_lock);
1185 cyclic_remove(t->g_cyclic);
1186 mutex_exit(&cpu_lock);
1187 kmem_free(t->g_counters, ncpus * sizeof (uint64_t));
1188 kmem_free(t, sizeof (*t));
1189}
1190
1191int
1192vbi_is_preempt_enabled(void)
1193{
1194 char tpr = VBI_T_PREEMPT;
1195 return (tpr == 0);
1196}
1197
1198void
1199vbi_poke_cpu(int c)
1200{
1201 if (c < ncpus)
1202 poke_cpu(c);
1203}
1204
1205/*
1206 * This is revision 5 of the interface. As more functions are added,
1207 * they should go after this point in the file and the revision level
1208 * increased. Also change vbi_modlmisc at the top of the file.
1209 */
1210uint_t vbi_revision_level = 7;
1211
1212void *
1213vbi_lowmem_alloc(uint64_t phys, size_t size)
1214{
1215 return (vbi_internal_alloc(&phys, size, PAGESIZE /* alignment */, 0 /* non-contiguous */));
1216}
1217
1218void
1219vbi_lowmem_free(void *va, size_t size)
1220{
1221 p_contig_free(va, size);
1222}
1223
1224/*
1225 * This is revision 6 of the interface.
1226 */
1227
1228int
1229vbi_is_preempt_pending(void)
1230{
1231 char crr = VBI_CPU_RUNRUN;
1232 char krr = VBI_CPU_KPRUNRUN;
1233 return crr != 0 || krr != 0;
1234}
1235
1236/*
1237 * This is revision 7 of the interface.
1238 */
1239
1240void *
1241vbi_phys_alloc(uint64_t *phys, size_t size, uint64_t alignment, int contig)
1242{
1243 return (vbi_internal_alloc(phys, size, alignment, contig));
1244}
1245
1246void
1247vbi_phys_free(void *va, size_t size)
1248{
1249 p_contig_free(va, size);
1250}
1251
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette