VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c@ 10954

Last change on this file since 10954 was 10954, checked in by vboxsync, 17 years ago

IPRT: Integrated vbi v2 changes (r0drv/solaris/vbi).

  • Property svn:eol-style set to native
File size: 20.0 KB
Line 
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident "%Z%%M% %I% %E% SMI"
27
28/*
29 * Private interfaces for VirtualBox access to Solaris kernel internal
30 * facilities.
31 *
32 * See sys/vbi.h for what each function does.
33 */
34
35#include <sys/kmem.h>
36#include <sys/types.h>
37#include <sys/mman.h>
38#include <sys/thread.h>
39#include <sys/mutex.h>
40#include <sys/condvar.h>
41#include <sys/sdt.h>
42#include <sys/schedctl.h>
43#include <sys/time.h>
44#include <sys/sysmacros.h>
45#include <sys/cmn_err.h>
46#include <sys/vmsystm.h>
47#include <sys/cyclic.h>
48#include <sys/class.h>
49#include <sys/cpuvar.h>
50#include <sys/kobj.h>
51#include <sys/x_call.h>
52#include <sys/x86_archext.h>
53#include <vm/hat.h>
54#include <vm/seg_vn.h>
55#include <vm/seg_kmem.h>
56#include <sys/ddi.h>
57#include <sys/sunddi.h>
58#include <sys/modctl.h>
59
60#include <sys/vbi.h>
61
62/*
63 * If we are running on an old version of Solaris, then
64 * we have to use dl_lookup to find contig_free().
65 */
66extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
67extern void contig_free(void *, size_t);
68#pragma weak contig_free
69static void (*p_contig_free)(void *, size_t) = contig_free;
70
71/*
72 * Workarounds for running on old versions of solaris with lower NCPU.
73 * If we detect this, the assumption is that NCPU was such that a cpuset_t
74 * is just a ulong_t
75 */
76static int use_old_xc_call = 0;
77static void (*p_xc_call)() = (void (*)())xc_call;
78#pragma weak cpuset_all
79#pragma weak cpuset_all_but
80#pragma weak cpuset_only
81
82static struct modlmisc vbi_modlmisc = {
83 &mod_miscops, "Vbox Interfaces Ver 1"
84};
85
86static struct modlinkage vbi_modlinkage = {
87 MODREV_1, (void *)&vbi_modlmisc, NULL
88};
89
90#define IS_KERNEL(v) ((uintptr_t)(v) >= kernelbase)
91
92int
93_init(void)
94{
95 int err;
96
97 /*
98 * Check to see if this version of virtualbox interface module will work
99 * with the kernel. The sizeof (cpuset_t) is problematic, as it changed
100 * with the change to NCPU in nevada build 87 and S10U6.
101 */
102 if (max_cpuid + 1 != NCPU)
103 use_old_xc_call = 1;
104
105 /*
106 * In older versions of Solaris contig_free() is a static routine.
107 */
108 if (p_contig_free == NULL) {
109 p_contig_free = (void (*)(void *, size_t))
110 kobj_getsymvalue("contig_free", 1);
111 if (p_contig_free == NULL) {
112 cmn_err(CE_NOTE, " contig_free() not found in kernel");
113 return (EINVAL);
114 }
115 }
116
117 err = mod_install(&vbi_modlinkage);
118 if (err != 0)
119 return (err);
120
121 return (0);
122}
123
124int
125_fini(void)
126{
127 int err = mod_remove(&vbi_modlinkage);
128 if (err != 0)
129 return (err);
130
131 return (0);
132}
133
134int
135_info(struct modinfo *modinfop)
136{
137 return (mod_info(&vbi_modlinkage, modinfop));
138}
139
140static ddi_dma_attr_t base_attr = {
141 DMA_ATTR_V0, /* Version Number */
142 (uint64_t)0, /* lower limit */
143 (uint64_t)0, /* high limit */
144 (uint64_t)0xffffffff, /* counter limit */
145 (uint64_t)MMU_PAGESIZE, /* alignment */
146 (uint64_t)MMU_PAGESIZE, /* burst size */
147 (uint64_t)MMU_PAGESIZE, /* effective DMA size */
148 (uint64_t)0xffffffff, /* max DMA xfer size */
149 (uint64_t)0xffffffff, /* segment boundary */
150 1, /* list length (1 for contiguous) */
151 1, /* device granularity */
152 0 /* bus-specific flags */
153};
154
155void *
156vbi_contig_alloc(uint64_t *phys, size_t size)
157{
158 ddi_dma_attr_t attr;
159 pfn_t pfn;
160 void *ptr;
161
162 if ((size & MMU_PAGEOFFSET) != 0)
163 return (NULL);
164
165 attr = base_attr;
166 attr.dma_attr_addr_hi = *phys;
167 ptr = contig_alloc(size, &attr, MMU_PAGESIZE, 1);
168
169 if (ptr == NULL)
170 return (NULL);
171
172 pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr);
173 if (pfn == PFN_INVALID)
174 panic("vbi_contig_alloc(): hat_getpfnum() failed\n");
175 *phys = (uint64_t)pfn << MMU_PAGESHIFT;
176 return (ptr);
177}
178
179void
180vbi_contig_free(void *va, size_t size)
181{
182 p_contig_free(va, size);
183}
184
185void *
186vbi_kernel_map(uint64_t pa, size_t size, uint_t prot)
187{
188 caddr_t va;
189
190 if ((pa & MMU_PAGEOFFSET) || (size & MMU_PAGEOFFSET))
191 return (NULL);
192
193 va = vmem_alloc(heap_arena, size, VM_SLEEP);
194
195 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> MMU_PAGESHIFT),
196 prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK);
197
198 return (va);
199}
200
201void
202vbi_unmap(void *va, size_t size)
203{
204 if (IS_KERNEL(va)) {
205 hat_unload(kas.a_hat, va, size, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
206 vmem_free(heap_arena, va, size);
207 } else {
208 struct as *as = curproc->p_as;
209
210 as_rangelock(as);
211 (void) as_unmap(as, va, size);
212 as_rangeunlock(as);
213 }
214}
215
216void *
217vbi_curthread(void)
218{
219 return (curthread);
220}
221
222int
223vbi_yield(void)
224{
225 int rv = 0;
226
227 kpreempt_disable();
228 if (curthread->t_preempt == 1 && CPU->cpu_kprunrun)
229 rv = 1;
230 kpreempt_enable();
231 return (rv);
232}
233
234uint64_t
235vbi_timer_granularity(void)
236{
237 return (nsec_per_tick);
238}
239
240typedef struct vbi_timer {
241 cyc_handler_t vbi_handler;
242 cyclic_id_t vbi_cyclic;
243 uint64_t vbi_interval;
244 void (*vbi_func)();
245 void *vbi_arg1;
246 void *vbi_arg2;
247} vbi_timer_t;
248
249static void
250vbi_timer_callback(void *arg)
251{
252 vbi_timer_t *t = arg;
253
254 if (t->vbi_interval == 0)
255 vbi_timer_stop(arg);
256 t->vbi_func(t->vbi_arg1, t->vbi_arg2);
257}
258
259void *
260vbi_timer_create(void *callback, void *arg1, void *arg2, uint64_t interval)
261{
262 vbi_timer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
263
264 t->vbi_func = (void (*)())callback;
265 t->vbi_arg1 = arg1;
266 t->vbi_arg2 = arg2;
267 t->vbi_handler.cyh_func = vbi_timer_callback;
268 t->vbi_handler.cyh_arg = (void *)t;
269 t->vbi_handler.cyh_level = CY_LOCK_LEVEL;
270 t->vbi_cyclic = CYCLIC_NONE;
271 t->vbi_interval = interval;
272 return (t);
273}
274
275void
276vbi_timer_destroy(void *timer)
277{
278 vbi_timer_t *t = timer;
279 if (t != NULL) {
280 vbi_timer_stop(timer);
281 kmem_free(t, sizeof (*t));
282 }
283}
284
285void
286vbi_timer_start(void *timer, uint64_t when)
287{
288 vbi_timer_t *t = timer;
289 cyc_time_t fire_time;
290 uint64_t interval = t->vbi_interval;
291
292 mutex_enter(&cpu_lock);
293 when += gethrtime();
294 fire_time.cyt_when = when;
295 if (interval == 0)
296 fire_time.cyt_interval = when;
297 else
298 fire_time.cyt_interval = interval;
299 t->vbi_cyclic = cyclic_add(&t->vbi_handler, &fire_time);
300 mutex_exit(&cpu_lock);
301}
302
303void
304vbi_timer_stop(void *timer)
305{
306 vbi_timer_t *t = timer;
307
308 if (t->vbi_cyclic == CYCLIC_NONE)
309 return;
310 mutex_enter(&cpu_lock);
311 if (t->vbi_cyclic != CYCLIC_NONE) {
312 cyclic_remove(t->vbi_cyclic);
313 t->vbi_cyclic = CYCLIC_NONE;
314 }
315 mutex_exit(&cpu_lock);
316}
317
318uint64_t
319vbi_tod(void)
320{
321 timestruc_t ts;
322
323 mutex_enter(&tod_lock);
324 ts = tod_get();
325 mutex_exit(&tod_lock);
326 return ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec);
327}
328
329
330void *
331vbi_proc(void)
332{
333 return (curproc);
334}
335
336void
337vbi_set_priority(void *thread, int priority)
338{
339 kthread_t *t = thread;
340
341 thread_lock(t);
342 (void) thread_change_pri(t, priority, 0);
343 thread_unlock(t);
344}
345
346void *
347vbi_thread_create(void *func, void *arg, size_t len, int priority)
348{
349 kthread_t *t;
350
351 t = thread_create(NULL, NULL, (void (*)())func, arg, len,
352 curproc, LMS_USER, priority);
353 return (t);
354}
355
356void
357vbi_thread_exit(void)
358{
359 thread_exit();
360}
361
362void *
363vbi_text_alloc(size_t size)
364{
365 return (segkmem_alloc(heaptext_arena, size, KM_SLEEP));
366}
367
368void
369vbi_text_free(void *va, size_t size)
370{
371 segkmem_free(heaptext_arena, va, size);
372}
373
374int
375vbi_cpu_id(void)
376{
377 return (CPU->cpu_id);
378}
379
380int
381vbi_max_cpu_id(void)
382{
383 return (NCPU - 1);
384}
385
386int
387vbi_cpu_maxcount(void)
388{
389 return (NCPU);
390}
391
392int
393vbi_cpu_count(void)
394{
395 return (ncpus);
396}
397
398int
399vbi_cpu_online(int c)
400{
401 int x;
402
403 mutex_enter(&cpu_lock);
404 x = cpu_is_online(cpu[c]);
405 mutex_exit(&cpu_lock);
406 return (x);
407}
408
409void
410vbi_preempt_disable(void)
411{
412 kpreempt_disable();
413}
414
415void
416vbi_preempt_enable(void)
417{
418 kpreempt_enable();
419}
420
421void
422vbi_execute_on_all(void *func, void *arg)
423{
424 cpuset_t set;
425 ulong_t hack_set;
426 int i;
427
428 /*
429 * hack for a kernel compiled with the different NCPU than this module
430 */
431 ASSERT(curthread->t_preempt >= 1);
432 if (use_old_xc_call) {
433 hack_set = 0;
434 for (i = 0; i < ncpus; ++i)
435 hack_set |= 1ul << i;
436 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set,
437 (xc_func_t)func);
438 } else {
439 CPUSET_ALL(set);
440 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set,
441 (xc_func_t)func);
442 }
443}
444
445void
446vbi_execute_on_others(void *func, void *arg)
447{
448 cpuset_t set;
449 ulong_t hack_set;
450 int i;
451
452 /*
453 * hack for a kernel compiled with the different NCPU than this module
454 */
455 ASSERT(curthread->t_preempt >= 1);
456 if (use_old_xc_call) {
457 hack_set = 0;
458 for (i = 0; i < ncpus; ++i) {
459 if (i != CPU->cpu_id)
460 hack_set |= 1ul << i;
461 }
462 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set,
463 (xc_func_t)func);
464 } else {
465 CPUSET_ALL_BUT(set, CPU->cpu_id);
466 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set,
467 (xc_func_t)func);
468 }
469}
470
471void
472vbi_execute_on_one(void *func, void *arg, int c)
473{
474 cpuset_t set;
475 ulong_t hack_set;
476
477 /*
478 * hack for a kernel compiled with the different NCPU than this module
479 */
480 ASSERT(curthread->t_preempt >= 1);
481 if (use_old_xc_call) {
482 hack_set = 1ul << c;
483 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, hack_set,
484 (xc_func_t)func);
485 } else {
486 CPUSET_ONLY(set, c);
487 xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI, set,
488 (xc_func_t)func);
489 }
490}
491
492int
493vbi_lock_va(void *addr, size_t len, void **handle)
494{
495 page_t **ppl;
496 int rc = 0;
497
498 if (IS_KERNEL(addr)) {
499 /* kernel mappings on x86 are always locked */
500 *handle = NULL;
501 } else {
502 rc = as_pagelock(curproc->p_as, &ppl, (caddr_t)addr, len,
503 S_WRITE);
504 if (rc != 0)
505 return (rc);
506 *handle = (void *)ppl;
507 }
508 return (rc);
509}
510
511void
512vbi_unlock_va(void *addr, size_t len, void *handle)
513{
514 page_t **ppl = (page_t **)handle;
515
516 if (IS_KERNEL(addr))
517 ASSERT(handle == NULL);
518 else
519 as_pageunlock(curproc->p_as, ppl, (caddr_t)addr, len, S_WRITE);
520}
521
522uint64_t
523vbi_va_to_pa(void *addr)
524{
525 struct hat *hat;
526 pfn_t pfn;
527 uintptr_t v = (uintptr_t)addr;
528
529 if (IS_KERNEL(v))
530 hat = kas.a_hat;
531 else
532 hat = curproc->p_as->a_hat;
533 pfn = hat_getpfnum(hat, (caddr_t)(v & MMU_PAGEMASK));
534 if (pfn == PFN_INVALID)
535 return (-(uint64_t)1);
536 return (((uint64_t)pfn << MMU_PAGESHIFT) | (v & MMU_PAGEOFFSET));
537}
538
539
540struct segvbi_crargs {
541 uint64_t *palist;
542 uint_t prot;
543};
544
545struct segvbi_data {
546 uint_t prot;
547};
548
549static struct seg_ops segvbi_ops;
550
551static int
552segvbi_create(struct seg *seg, void *args)
553{
554 struct segvbi_crargs *a = args;
555 struct segvbi_data *data;
556 struct as *as = seg->s_as;
557 int error = 0;
558 caddr_t va;
559 ulong_t pgcnt;
560 ulong_t p;
561
562 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
563 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
564 data->prot = a->prot | PROT_USER;
565
566 seg->s_ops = &segvbi_ops;
567 seg->s_data = data;
568
569 /*
570 * now load locked mappings to the pages
571 */
572 va = seg->s_base;
573 ASSERT(((uintptr_t)va & MMU_PAGEOFFSET) == 0);
574 pgcnt = seg->s_size >> MMU_PAGESHIFT;
575 for (p = 0; p < pgcnt; ++p, va += MMU_PAGESIZE) {
576 ASSERT((a->palist[p] & MMU_PAGEOFFSET) == 0);
577 hat_devload(as->a_hat, va,
578 MMU_PAGESIZE, a->palist[p] >> MMU_PAGESHIFT,
579 data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
580 }
581
582 return (error);
583}
584
585/*
586 * Duplicate a seg and return new segment in newseg.
587 */
588static int
589segvbi_dup(struct seg *seg, struct seg *newseg)
590{
591 struct segvbi_data *data = seg->s_data;
592 struct segvbi_data *ndata;
593
594 ndata = kmem_zalloc(sizeof (*data), KM_SLEEP);
595 ndata->prot = data->prot;
596 newseg->s_ops = &segvbi_ops;
597 newseg->s_data = ndata;
598
599 return (0);
600}
601
602/*ARGSUSED*/
603static int
604segvbi_unmap(struct seg *seg, caddr_t addr, size_t len)
605{
606 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
607 (len & MMU_PAGEOFFSET) || ((uintptr_t)addr & MMU_PAGEOFFSET))
608 panic("segvbi_unmap");
609
610 if (addr != seg->s_base || len != seg->s_size)
611 return (ENOTSUP);
612
613 hat_unload(seg->s_as->a_hat, addr, len,
614 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
615
616 seg_free(seg);
617 return (0);
618}
619
620static void
621segvbi_free(struct seg *seg)
622{
623 struct segvbi_data *data = seg->s_data;
624 kmem_free(data, sizeof (*data));
625}
626
627/*
628 * We never demand-fault for seg_vbi.
629 */
630/*ARGSUSED*/
631static int
632segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
633 enum fault_type type, enum seg_rw rw)
634{
635 return (FC_MAKE_ERR(EFAULT));
636}
637
638/*ARGSUSED*/
639static int
640segvbi_faulta(struct seg *seg, caddr_t addr)
641{
642 return (0);
643}
644
645/*ARGSUSED*/
646static int
647segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
648{
649 return (EACCES);
650}
651
652/*ARGSUSED*/
653static int
654segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
655{
656 return (EINVAL);
657}
658
659/*ARGSUSED*/
660static int
661segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
662{
663 return (-1);
664}
665
666/*ARGSUSED*/
667static int
668segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
669{
670 return (0);
671}
672
673/*ARGSUSED*/
674static size_t
675segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
676{
677 size_t v;
678
679 for (v = 0, len = (len + MMU_PAGEOFFSET) & MMU_PAGEMASK; len;
680 len -= MMU_PAGESIZE, v += MMU_PAGESIZE)
681 *vec++ = 1;
682 return (v);
683}
684
685/*ARGSUSED*/
686static int
687segvbi_lockop(struct seg *seg, caddr_t addr,
688 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
689{
690 return (0);
691}
692
693/*ARGSUSED*/
694static int
695segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
696{
697 struct segvbi_data *data = seg->s_data;
698 return (data->prot);
699}
700
701static u_offset_t
702segvbi_getoffset(struct seg *seg, caddr_t addr)
703{
704 return ((uintptr_t)addr - (uintptr_t)seg->s_base);
705}
706
707/*ARGSUSED*/
708static int
709segvbi_gettype(struct seg *seg, caddr_t addr)
710{
711 return (MAP_SHARED);
712}
713
714static vnode_t vbivp;
715
716/*ARGSUSED*/
717static int
718segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
719{
720 *vpp = &vbivp;
721 return (0);
722}
723
724/*ARGSUSED*/
725static int
726segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
727{
728 return (0);
729}
730
731/*ARGSUSED*/
732static void
733segvbi_dump(struct seg *seg)
734{}
735
736/*ARGSUSED*/
737static int
738segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len,
739 struct page ***ppp, enum lock_type type, enum seg_rw rw)
740{
741 return (ENOTSUP);
742}
743
744/*ARGSUSED*/
745static int
746segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
747{
748 return (ENOTSUP);
749}
750
751/*ARGSUSED*/
752static int
753segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
754{
755 return (ENODEV);
756}
757
758/*ARGSUSED*/
759static lgrp_mem_policy_info_t *
760segvbi_getpolicy(struct seg *seg, caddr_t addr)
761{
762 return (NULL);
763}
764
765/*ARGSUSED*/
766static int
767segvbi_capable(struct seg *seg, segcapability_t capability)
768{
769 return (0);
770}
771
772static struct seg_ops segvbi_ops = {
773 segvbi_dup,
774 segvbi_unmap,
775 segvbi_free,
776 segvbi_fault,
777 segvbi_faulta,
778 segvbi_setprot,
779 segvbi_checkprot,
780 (int (*)())segvbi_kluster,
781 (size_t (*)(struct seg *))NULL, /* swapout */
782 segvbi_sync,
783 segvbi_incore,
784 segvbi_lockop,
785 segvbi_getprot,
786 segvbi_getoffset,
787 segvbi_gettype,
788 segvbi_getvp,
789 segvbi_advise,
790 segvbi_dump,
791 segvbi_pagelock,
792 segvbi_setpagesize,
793 segvbi_getmemid,
794 segvbi_getpolicy,
795 segvbi_capable
796};
797
798
799
800/*
801 * Interfaces to inject physical pages into user address space
802 * and later remove them.
803 */
804int
805vbi_user_map(caddr_t *va, uint_t prot, uint64_t *palist, size_t len)
806{
807 struct as *as = curproc->p_as;
808 struct segvbi_crargs args;
809 int error = 0;
810
811 args.palist = palist;
812 args.prot = prot;
813 as_rangelock(as);
814 map_addr(va, len, 0, 0, MAP_SHARED);
815 ASSERT(((uintptr_t)*va & MMU_PAGEOFFSET) == 0);
816 ASSERT((len & MMU_PAGEOFFSET) == 0);
817 ASSERT(len != 0);
818 if (*va != NULL)
819 error = as_map(as, *va, len, segvbi_create, &args);
820 else
821 error = ENOMEM;
822 as_rangeunlock(as);
823 return (error);
824}
825
826/*
827 * This is revision 2 of the interface. As more functions are added,
828 * they should go after this point in the file and the revision level
829 * increased.
830 */
831uint_t vbi_revision_level = 2;
832
833struct vbi_cpu_watch {
834 void (*vbi_cpu_func)();
835 void *vbi_cpu_arg;
836};
837
838static int
839vbi_watcher(cpu_setup_t state, int cpu, void *arg)
840{
841 vbi_cpu_watch_t *w = arg;
842 int online;
843
844 if (state == CPU_ON)
845 online = 1;
846 else if (state == CPU_OFF)
847 online = 0;
848 else
849 return (0);
850 w->vbi_cpu_func(w->vbi_cpu_arg, cpu, online);
851 return (0);
852}
853
854vbi_cpu_watch_t *
855vbi_watch_cpus(void (*func)(), void *arg, int current_too)
856{
857 int c;
858 vbi_cpu_watch_t *w;
859
860 w = kmem_alloc(sizeof (*w), KM_SLEEP);
861 w->vbi_cpu_func = func;
862 w->vbi_cpu_arg = arg;
863 mutex_enter(&cpu_lock);
864 register_cpu_setup_func(vbi_watcher, w);
865 if (current_too) {
866 for (c = 0; c < ncpus; ++c) {
867 if (cpu_is_online(cpu[c]))
868 func(arg, c, 1);
869 }
870 }
871 mutex_exit(&cpu_lock);
872 return (w);
873}
874
875void
876vbi_ignore_cpus(vbi_cpu_watch_t *w)
877{
878 mutex_enter(&cpu_lock);
879 unregister_cpu_setup_func(vbi_watcher, w);
880 mutex_exit(&cpu_lock);
881 kmem_free(w, sizeof (*w));
882}
883
884/*
885 * Simple timers are pretty much a pass through to the cyclic subsystem.
886 */
887struct vbi_stimer {
888 cyc_handler_t s_handler;
889 cyc_time_t s_fire_time;
890 cyclic_id_t s_cyclic;
891 uint64_t s_tick;
892 void (*s_func)(void *, uint64_t);
893 void *s_arg;
894};
895
896static void
897vbi_stimer_func(void *arg)
898{
899 vbi_stimer_t *t = arg;
900 t->s_func(t->s_arg, t->s_tick++);
901}
902
903extern vbi_stimer_t *
904vbi_stimer_begin(
905 void (*func)(void *, uint64_t),
906 void *arg,
907 uint64_t when,
908 uint64_t interval,
909 int on_cpu)
910{
911 vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
912
913 ASSERT(when < INT64_MAX);
914 ASSERT(interval < INT64_MAX);
915 ASSERT(interval + when < INT64_MAX);
916
917 t->s_handler.cyh_func = vbi_stimer_func;
918 t->s_handler.cyh_arg = t;
919 t->s_handler.cyh_level = CY_LOCK_LEVEL;
920 t->s_tick = 0;
921 t->s_func = func;
922 t->s_arg = arg;
923
924 mutex_enter(&cpu_lock);
925 if (on_cpu != VBI_ANY_CPU && !vbi_cpu_online(on_cpu)) {
926 t = NULL;
927 goto done;
928 }
929
930 when += gethrtime();
931 t->s_fire_time.cyt_when = when;
932 if (interval == 0)
933 t->s_fire_time.cyt_interval = INT64_MAX - when;
934 else
935 t->s_fire_time.cyt_interval = interval;
936 t->s_cyclic = cyclic_add(&t->s_handler, &t->s_fire_time);
937 if (on_cpu != VBI_ANY_CPU)
938 cyclic_bind(t->s_cyclic, cpu[on_cpu], NULL);
939done:
940 mutex_exit(&cpu_lock);
941 return (t);
942}
943
944extern void
945vbi_stimer_end(vbi_stimer_t *t)
946{
947 ASSERT(t->s_cyclic != CYCLIC_NONE);
948 mutex_enter(&cpu_lock);
949 cyclic_remove(t->s_cyclic);
950 mutex_exit(&cpu_lock);
951 kmem_free(t, sizeof (*t));
952}
953
954/*
955 * Global timers are more complicated. They include a counter on the callback,
956 * that indicates the first call on a given cpu.
957 */
958struct vbi_gtimer {
959 uint64_t *g_counters;
960 void (*g_func)(void *, uint64_t);
961 void *g_arg;
962 uint64_t g_when;
963 uint64_t g_interval;
964 cyclic_id_t g_cyclic;
965};
966
967static void
968vbi_gtimer_func(void *arg)
969{
970 vbi_gtimer_t *t = arg;
971 t->g_func(t->g_arg, t->g_counters[CPU->cpu_id]);
972}
973
974/*
975 * Whenever a cpu is onlined, need to reset the g_counters[] for it to zero.
976 */
977static void
978vbi_gtimer_online(void *arg, cpu_t *cpu, cyc_handler_t *h, cyc_time_t *ct)
979{
980 vbi_gtimer_t *t = arg;
981 hrtime_t now;
982
983 t->g_counters[cpu->cpu_id] = 0;
984 h->cyh_func = vbi_gtimer_func;
985 h->cyh_arg = t;
986 h->cyh_level = CY_LOCK_LEVEL;
987 now = gethrtime();
988 if (t->g_when < now)
989 ct->cyt_when = now + t->g_interval / 2;
990 else
991 ct->cyt_when = t->g_when;
992 ct->cyt_interval = t->g_interval;
993}
994
995
996vbi_gtimer_t *
997vbi_gtimer_begin(
998 void (*func)(void *, uint64_t),
999 void *arg,
1000 uint64_t when,
1001 uint64_t interval)
1002{
1003 vbi_gtimer_t *t;
1004 cyc_omni_handler_t omni;
1005
1006 /*
1007 * one shot global timer is not supported yet.
1008 */
1009 if (interval == 0)
1010 return (NULL);
1011
1012 ASSERT(when < INT64_MAX);
1013 ASSERT(interval < INT64_MAX);
1014 ASSERT(interval + when < INT64_MAX);
1015
1016 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1017 t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP);
1018 t->g_when = when + gethrtime();
1019 t->g_interval = interval;
1020 t->g_arg = arg;
1021 t->g_func = func;
1022 t->g_cyclic = CYCLIC_NONE;
1023
1024 omni.cyo_online = (void (*)())vbi_gtimer_online;
1025 omni.cyo_offline = NULL;
1026 omni.cyo_arg = t;
1027
1028 mutex_enter(&cpu_lock);
1029 t->g_cyclic = cyclic_add_omni(&omni);
1030 mutex_exit(&cpu_lock);
1031 return (t);
1032}
1033
1034extern void
1035vbi_gtimer_end(vbi_gtimer_t *t)
1036{
1037 ASSERT(t->g_cyclic != CYCLIC_NONE);
1038 mutex_enter(&cpu_lock);
1039 cyclic_remove(t->g_cyclic);
1040 mutex_exit(&cpu_lock);
1041 kmem_free(t->g_counters, ncpus * sizeof (uint64_t));
1042 kmem_free(t, sizeof (*t));
1043}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette