VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/thread-r0drv-linux.c@ 53517

Last change on this file since 53517 was 48935, checked in by vboxsync, 11 years ago

Runtime: Whitespace and svn:keyword cleanups by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 6.3 KB
Line 
1/* $Id: thread-r0drv-linux.c 48935 2013-10-07 21:19:37Z vboxsync $ */
2/** @file
3 * IPRT - Threads, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/thread.h>
34
35#include <iprt/asm.h>
36#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/err.h>
41#include <iprt/mp.h>
42
43
44/*******************************************************************************
45* Global Variables *
46*******************************************************************************/
47#ifndef CONFIG_PREEMPT
48/** Per-cpu preemption counters. */
49static int32_t volatile g_acPreemptDisabled[NR_CPUS];
50#endif
51
52
53RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
54{
55 return (RTNATIVETHREAD)current;
56}
57RT_EXPORT_SYMBOL(RTThreadNativeSelf);
58
59
60static int rtR0ThreadLnxSleepCommon(RTMSINTERVAL cMillies)
61{
62 long cJiffies = msecs_to_jiffies(cMillies);
63 set_current_state(TASK_INTERRUPTIBLE);
64 cJiffies = schedule_timeout(cJiffies);
65 if (!cJiffies)
66 return VINF_SUCCESS;
67 return VERR_INTERRUPTED;
68}
69
70
71RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
72{
73 return rtR0ThreadLnxSleepCommon(cMillies);
74}
75RT_EXPORT_SYMBOL(RTThreadSleep);
76
77
78RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
79{
80 return rtR0ThreadLnxSleepCommon(cMillies);
81}
82RT_EXPORT_SYMBOL(RTThreadSleepNoLog);
83
84
85RTDECL(bool) RTThreadYield(void)
86{
87#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
88 yield();
89#else
90 /** @todo r=ramshankar: Can we use cond_resched() instead? */
91 set_current_state(TASK_RUNNING);
92 sys_sched_yield();
93 schedule();
94#endif
95 return true;
96}
97RT_EXPORT_SYMBOL(RTThreadYield);
98
99
100RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
101{
102#ifdef CONFIG_PREEMPT
103 Assert(hThread == NIL_RTTHREAD);
104# ifdef preemptible
105 return preemptible();
106# else
107 return preempt_count() == 0 && !in_atomic() && !irqs_disabled();
108# endif
109#else
110 int32_t c;
111
112 Assert(hThread == NIL_RTTHREAD);
113 c = g_acPreemptDisabled[smp_processor_id()];
114 AssertMsg(c >= 0 && c < 32, ("%d\n", c));
115 if (c != 0)
116 return false;
117# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
118 if (in_atomic())
119 return false;
120# endif
121# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28)
122 if (irqs_disabled())
123 return false;
124# else
125 if (!ASMIntAreEnabled())
126 return false;
127# endif
128 return true;
129#endif
130}
131RT_EXPORT_SYMBOL(RTThreadPreemptIsEnabled);
132
133
134RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
135{
136 Assert(hThread == NIL_RTTHREAD);
137#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 4)
138 return !!test_tsk_thread_flag(current, TIF_NEED_RESCHED);
139
140#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
141 return !!need_resched();
142
143#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 110)
144 return current->need_resched != 0;
145
146#else
147 return need_resched != 0;
148#endif
149}
150RT_EXPORT_SYMBOL(RTThreadPreemptIsPending);
151
152
153RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
154{
155 /* yes, RTThreadPreemptIsPending is reliable. */
156 return true;
157}
158RT_EXPORT_SYMBOL(RTThreadPreemptIsPendingTrusty);
159
160
161RTDECL(bool) RTThreadPreemptIsPossible(void)
162{
163 /** @todo r=ramshankar: What about CONFIG_PREEMPT_VOLUNTARY? That can preempt
164 * too but does so in voluntarily in explicit preemption points. */
165#ifdef CONFIG_PREEMPT
166 return true; /* yes, kernel preemption is possible. */
167#else
168 return false; /* no kernel preemption */
169#endif
170}
171RT_EXPORT_SYMBOL(RTThreadPreemptIsPossible);
172
173
174RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
175{
176#ifdef CONFIG_PREEMPT
177 AssertPtr(pState);
178 Assert(pState->u32Reserved == 0);
179 pState->u32Reserved = 42;
180 /* This ASSUMES that CONFIG_PREEMPT_COUNT is always defined with CONFIG_PREEMPT. */
181 preempt_disable();
182 RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
183
184#else /* !CONFIG_PREEMPT */
185 int32_t c;
186 AssertPtr(pState);
187 Assert(pState->u32Reserved == 0);
188
189 /* Do our own accounting. */
190 c = ASMAtomicIncS32(&g_acPreemptDisabled[smp_processor_id()]);
191 AssertMsg(c > 0 && c < 32, ("%d\n", c));
192 pState->u32Reserved = c;
193 RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
194#endif
195}
196RT_EXPORT_SYMBOL(RTThreadPreemptDisable);
197
198
199RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
200{
201#ifdef CONFIG_PREEMPT
202 AssertPtr(pState);
203 Assert(pState->u32Reserved == 42);
204 RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
205 preempt_enable();
206
207#else
208 int32_t volatile *pc;
209 AssertPtr(pState);
210 AssertMsg(pState->u32Reserved > 0 && pState->u32Reserved < 32, ("%d\n", pState->u32Reserved));
211 RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
212
213 /* Do our own accounting. */
214 pc = &g_acPreemptDisabled[smp_processor_id()];
215 AssertMsg(pState->u32Reserved == (uint32_t)*pc, ("u32Reserved=%d *pc=%d \n", pState->u32Reserved, *pc));
216 ASMAtomicUoWriteS32(pc, pState->u32Reserved - 1);
217#endif
218 pState->u32Reserved = 0;
219}
220RT_EXPORT_SYMBOL(RTThreadPreemptRestore);
221
222
223RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
224{
225 Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
226
227 return in_interrupt() != 0;
228}
229RT_EXPORT_SYMBOL(RTThreadIsInInterrupt);
230
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette