Branch data Line data Source code
1 : :
2 : : #include "Python.h"
3 : : #include "pycore_atomic.h" // _Py_atomic_int
4 : : #include "pycore_ceval.h" // _PyEval_SignalReceived()
5 : : #include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
6 : : #include "pycore_pylifecycle.h" // _PyErr_Print()
7 : : #include "pycore_initconfig.h" // _PyStatus_OK()
8 : : #include "pycore_interp.h" // _Py_RunGC()
9 : : #include "pycore_pymem.h" // _PyMem_IsPtrFreed()
10 : :
11 : : /*
12 : : Notes about the implementation:
13 : :
14 : : - The GIL is just a boolean variable (locked) whose access is protected
15 : : by a mutex (gil_mutex), and whose changes are signalled by a condition
16 : : variable (gil_cond). gil_mutex is taken for short periods of time,
17 : : and therefore mostly uncontended.
18 : :
19 : : - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
20 : : able to release the GIL on demand by another thread. A volatile boolean
21 : : variable (gil_drop_request) is used for that purpose, which is checked
22 : : at every turn of the eval loop. That variable is set after a wait of
23 : : `interval` microseconds on `gil_cond` has timed out.
24 : :
25 : : [Actually, another volatile boolean variable (eval_breaker) is used
26 : : which ORs several conditions into one. Volatile booleans are
27 : : sufficient as inter-thread signalling means since Python is run
28 : : on cache-coherent architectures only.]
29 : :
30 : : - A thread wanting to take the GIL will first let pass a given amount of
31 : : time (`interval` microseconds) before setting gil_drop_request. This
32 : : encourages a defined switching period, but doesn't enforce it since
33 : : opcodes can take an arbitrary time to execute.
34 : :
35 : : The `interval` value is available for the user to read and modify
36 : : using the Python API `sys.{get,set}switchinterval()`.
37 : :
38 : : - When a thread releases the GIL and gil_drop_request is set, that thread
39 : : ensures that another GIL-awaiting thread gets scheduled.
40 : : It does so by waiting on a condition variable (switch_cond) until
41 : : the value of last_holder is changed to something else than its
42 : : own thread state pointer, indicating that another thread was able to
43 : : take the GIL.
44 : :
45 : : This is meant to prohibit the latency-adverse behaviour on multi-core
46 : : machines where one thread would speculatively release the GIL, but still
47 : : run and end up being the first to re-acquire it, making the "timeslices"
48 : : much longer than expected.
49 : : (Note: this mechanism is enabled with FORCE_SWITCHING above)
50 : : */
51 : :
52 : : // GH-89279: Force inlining by using a macro.
53 : : #if defined(_MSC_VER) && SIZEOF_INT == 4
54 : : #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
55 : : #else
56 : : #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
57 : : #endif
58 : :
59 : : /* This can set eval_breaker to 0 even though gil_drop_request became
60 : : 1. We believe this is all right because the eval loop will release
61 : : the GIL eventually anyway. */
62 : : static inline void
63 : 30818 : COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
64 : : struct _ceval_runtime_state *ceval,
65 : : struct _ceval_state *ceval2)
66 : : {
67 [ - + - - : 30818 : _Py_atomic_store_relaxed(&ceval2->eval_breaker,
- + - - ]
68 : : _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)
69 : : | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)
70 : : && _Py_ThreadCanHandleSignals(interp))
71 : : | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do)
72 : : && _Py_ThreadCanHandlePendingCalls())
73 : : | ceval2->pending.async_exc
74 : : | _Py_atomic_load_relaxed_int32(&ceval2->gc_scheduled));
75 : 30818 : }
76 : :
77 : :
78 : : static inline void
79 : 0 : SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
80 : : {
81 : 0 : struct _ceval_state *ceval2 = &interp->ceval;
82 : 0 : _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1);
83 : 0 : _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
84 : 0 : }
85 : :
86 : :
87 : : static inline void
88 : 0 : RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
89 : : {
90 : 0 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
91 : 0 : struct _ceval_state *ceval2 = &interp->ceval;
92 : 0 : _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0);
93 : 0 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
94 : 0 : }
95 : :
96 : :
97 : : static inline void
98 : 0 : SIGNAL_PENDING_CALLS(PyInterpreterState *interp)
99 : : {
100 : 0 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
101 : 0 : struct _ceval_state *ceval2 = &interp->ceval;
102 : 0 : _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1);
103 : 0 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
104 : 0 : }
105 : :
106 : :
107 : : static inline void
108 : 22 : UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
109 : : {
110 : 22 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
111 : 22 : struct _ceval_state *ceval2 = &interp->ceval;
112 : 22 : _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
113 : 22 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
114 : 22 : }
115 : :
116 : :
117 : : static inline void
118 : 0 : SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force)
119 : : {
120 : 0 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
121 : 0 : struct _ceval_state *ceval2 = &interp->ceval;
122 : 0 : _Py_atomic_store_relaxed(&ceval->signals_pending, 1);
123 [ # # ]: 0 : if (force) {
124 : 0 : _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
125 : : }
126 : : else {
127 : : /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
128 : 0 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
129 : : }
130 : 0 : }
131 : :
132 : :
133 : : static inline void
134 : 22 : UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
135 : : {
136 : 22 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
137 : 22 : struct _ceval_state *ceval2 = &interp->ceval;
138 : 22 : _Py_atomic_store_relaxed(&ceval->signals_pending, 0);
139 : 22 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
140 : 22 : }
141 : :
142 : :
143 : : static inline void
144 : 0 : SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
145 : : {
146 : 0 : struct _ceval_state *ceval2 = &interp->ceval;
147 : 0 : ceval2->pending.async_exc = 1;
148 : 0 : _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
149 : 0 : }
150 : :
151 : :
152 : : static inline void
153 : 0 : UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
154 : : {
155 : 0 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
156 : 0 : struct _ceval_state *ceval2 = &interp->ceval;
157 : 0 : ceval2->pending.async_exc = 0;
158 : 0 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
159 : 0 : }
160 : :
161 : : #ifndef NDEBUG
162 : : /* Ensure that tstate is valid */
163 : : static int
164 : : is_tstate_valid(PyThreadState *tstate)
165 : : {
166 : : assert(!_PyMem_IsPtrFreed(tstate));
167 : : assert(!_PyMem_IsPtrFreed(tstate->interp));
168 : : return 1;
169 : : }
170 : : #endif
171 : :
172 : : /*
173 : : * Implementation of the Global Interpreter Lock (GIL).
174 : : */
175 : :
176 : : #include <stdlib.h>
177 : : #include <errno.h>
178 : :
179 : : #include "pycore_atomic.h"
180 : :
181 : :
182 : : #include "condvar.h"
183 : :
184 : : #define MUTEX_INIT(mut) \
185 : : if (PyMUTEX_INIT(&(mut))) { \
186 : : Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
187 : : #define MUTEX_FINI(mut) \
188 : : if (PyMUTEX_FINI(&(mut))) { \
189 : : Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
190 : : #define MUTEX_LOCK(mut) \
191 : : if (PyMUTEX_LOCK(&(mut))) { \
192 : : Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
193 : : #define MUTEX_UNLOCK(mut) \
194 : : if (PyMUTEX_UNLOCK(&(mut))) { \
195 : : Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
196 : :
197 : : #define COND_INIT(cond) \
198 : : if (PyCOND_INIT(&(cond))) { \
199 : : Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
200 : : #define COND_FINI(cond) \
201 : : if (PyCOND_FINI(&(cond))) { \
202 : : Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
203 : : #define COND_SIGNAL(cond) \
204 : : if (PyCOND_SIGNAL(&(cond))) { \
205 : : Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
206 : : #define COND_WAIT(cond, mut) \
207 : : if (PyCOND_WAIT(&(cond), &(mut))) { \
208 : : Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
209 : : #define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
210 : : { \
211 : : int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
212 : : if (r < 0) \
213 : : Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
214 : : if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
215 : : timeout_result = 1; \
216 : : else \
217 : : timeout_result = 0; \
218 : : } \
219 : :
220 : :
221 : : #define DEFAULT_INTERVAL 5000
222 : :
223 : 29 : static void _gil_initialize(struct _gil_runtime_state *gil)
224 : : {
225 : 29 : _Py_atomic_int uninitialized = {-1};
226 : 29 : gil->locked = uninitialized;
227 : 29 : gil->interval = DEFAULT_INTERVAL;
228 : 29 : }
229 : :
230 : 29 : static int gil_created(struct _gil_runtime_state *gil)
231 : : {
232 : 29 : return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
233 : : }
234 : :
235 : 29 : static void create_gil(struct _gil_runtime_state *gil)
236 : : {
237 [ - + ]: 29 : MUTEX_INIT(gil->mutex);
238 : : #ifdef FORCE_SWITCHING
239 [ - + ]: 29 : MUTEX_INIT(gil->switch_mutex);
240 : : #endif
241 [ - + ]: 29 : COND_INIT(gil->cond);
242 : : #ifdef FORCE_SWITCHING
243 [ - + ]: 29 : COND_INIT(gil->switch_cond);
244 : : #endif
245 : 29 : _Py_atomic_store_relaxed(&gil->last_holder, 0);
246 : : _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
247 : 29 : _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
248 : 29 : }
249 : :
250 : 0 : static void destroy_gil(struct _gil_runtime_state *gil)
251 : : {
252 : : /* some pthread-like implementations tie the mutex to the cond
253 : : * and must have the cond destroyed first.
254 : : */
255 [ # # ]: 0 : COND_FINI(gil->cond);
256 [ # # ]: 0 : MUTEX_FINI(gil->mutex);
257 : : #ifdef FORCE_SWITCHING
258 [ # # ]: 0 : COND_FINI(gil->switch_cond);
259 [ # # ]: 0 : MUTEX_FINI(gil->switch_mutex);
260 : : #endif
261 : 0 : _Py_atomic_store_explicit(&gil->locked, -1,
262 : : _Py_memory_order_release);
263 : : _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
264 : 0 : }
265 : :
266 : : #ifdef HAVE_FORK
267 : 0 : static void recreate_gil(struct _gil_runtime_state *gil)
268 : : {
269 : : _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
270 : : /* XXX should we destroy the old OS resources here? */
271 : 0 : create_gil(gil);
272 : 0 : }
273 : : #endif
274 : :
275 : : static void
276 : 30151 : drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
277 : : PyThreadState *tstate)
278 : : {
279 : 30151 : struct _gil_runtime_state *gil = &ceval->gil;
280 [ - + ]: 30151 : if (!_Py_atomic_load_relaxed(&gil->locked)) {
281 : 0 : Py_FatalError("drop_gil: GIL is not locked");
282 : : }
283 : :
284 : : /* tstate is allowed to be NULL (early interpreter init) */
285 [ + - ]: 30151 : if (tstate != NULL) {
286 : : /* Sub-interpreter support: threads might have been switched
287 : : under our feet using PyThreadState_Swap(). Fix the GIL last
288 : : holder variable so that our heuristics work. */
289 : 30151 : _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
290 : : }
291 : :
292 [ - + ]: 30151 : MUTEX_LOCK(gil->mutex);
293 : : _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
294 : 30151 : _Py_atomic_store_relaxed(&gil->locked, 0);
295 [ - + ]: 30151 : COND_SIGNAL(gil->cond);
296 [ - + ]: 30151 : MUTEX_UNLOCK(gil->mutex);
297 : :
298 : : #ifdef FORCE_SWITCHING
299 [ - + - - ]: 30151 : if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) {
300 [ # # ]: 0 : MUTEX_LOCK(gil->switch_mutex);
301 : : /* Not switched yet => wait */
302 [ # # ]: 0 : if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
303 : : {
304 : : assert(is_tstate_valid(tstate));
305 : 0 : RESET_GIL_DROP_REQUEST(tstate->interp);
306 : : /* NOTE: if COND_WAIT does not atomically start waiting when
307 : : releasing the mutex, another thread can run through, take
308 : : the GIL and drop it again, and reset the condition
309 : : before we even had a chance to wait for it. */
310 [ # # ]: 0 : COND_WAIT(gil->switch_cond, gil->switch_mutex);
311 : : }
312 [ # # ]: 0 : MUTEX_UNLOCK(gil->switch_mutex);
313 : : }
314 : : #endif
315 : 30151 : }
316 : :
317 : :
318 : : /* Check if a Python thread must exit immediately, rather than taking the GIL
319 : : if Py_Finalize() has been called.
320 : :
321 : : When this function is called by a daemon thread after Py_Finalize() has been
322 : : called, the GIL does no longer exist.
323 : :
324 : : tstate must be non-NULL. */
325 : : static inline int
326 : 60360 : tstate_must_exit(PyThreadState *tstate)
327 : : {
328 : : /* bpo-39877: Access _PyRuntime directly rather than using
329 : : tstate->interp->runtime to support calls from Python daemon threads.
330 : : After Py_Finalize() has been called, tstate can be a dangling pointer:
331 : : point to PyThreadState freed memory. */
332 : 60360 : PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
333 [ + + - + ]: 60360 : return (finalizing != NULL && finalizing != tstate);
334 : : }
335 : :
336 : :
337 : : /* Take the GIL.
338 : :
339 : : The function saves errno at entry and restores its value at exit.
340 : :
341 : : tstate must be non-NULL. */
342 : : static void
343 : 30180 : take_gil(PyThreadState *tstate)
344 : : {
345 : 30180 : int err = errno;
346 : :
347 : : assert(tstate != NULL);
348 : :
349 [ - + ]: 30180 : if (tstate_must_exit(tstate)) {
350 : : /* bpo-39877: If Py_Finalize() has been called and tstate is not the
351 : : thread which called Py_Finalize(), exit immediately the thread.
352 : :
353 : : This code path can be reached by a daemon thread after Py_Finalize()
354 : : completes. In this case, tstate is a dangling pointer: points to
355 : : PyThreadState freed memory. */
356 : 0 : PyThread_exit_thread();
357 : : }
358 : :
359 : : assert(is_tstate_valid(tstate));
360 : 30180 : PyInterpreterState *interp = tstate->interp;
361 : 30180 : struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
362 : 30180 : struct _ceval_state *ceval2 = &interp->ceval;
363 : 30180 : struct _gil_runtime_state *gil = &ceval->gil;
364 : :
365 : : /* Check that _PyEval_InitThreads() was called to create the lock */
366 : : assert(gil_created(gil));
367 : :
368 [ - + ]: 30180 : MUTEX_LOCK(gil->mutex);
369 : :
370 [ + - ]: 30180 : if (!_Py_atomic_load_relaxed(&gil->locked)) {
371 : 30180 : goto _ready;
372 : : }
373 : :
374 : 0 : int drop_requested = 0;
375 [ # # ]: 0 : while (_Py_atomic_load_relaxed(&gil->locked)) {
376 : 0 : unsigned long saved_switchnum = gil->switch_number;
377 : :
378 [ # # ]: 0 : unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
379 : 0 : int timed_out = 0;
380 [ # # # # ]: 0 : COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
381 : :
382 : : /* If we timed out and no switch occurred in the meantime, it is time
383 : : to ask the GIL-holding thread to drop it. */
384 [ # # ]: 0 : if (timed_out &&
385 [ # # ]: 0 : _Py_atomic_load_relaxed(&gil->locked) &&
386 [ # # ]: 0 : gil->switch_number == saved_switchnum)
387 : : {
388 [ # # ]: 0 : if (tstate_must_exit(tstate)) {
389 [ # # ]: 0 : MUTEX_UNLOCK(gil->mutex);
390 : : // gh-96387: If the loop requested a drop request in a previous
391 : : // iteration, reset the request. Otherwise, drop_gil() can
392 : : // block forever waiting for the thread which exited. Drop
393 : : // requests made by other threads are also reset: these threads
394 : : // may have to request again a drop request (iterate one more
395 : : // time).
396 [ # # ]: 0 : if (drop_requested) {
397 : 0 : RESET_GIL_DROP_REQUEST(interp);
398 : : }
399 : 0 : PyThread_exit_thread();
400 : : }
401 : : assert(is_tstate_valid(tstate));
402 : :
403 : 0 : SET_GIL_DROP_REQUEST(interp);
404 : 0 : drop_requested = 1;
405 : : }
406 : : }
407 : :
408 : 0 : _ready:
409 : : #ifdef FORCE_SWITCHING
410 : : /* This mutex must be taken before modifying gil->last_holder:
411 : : see drop_gil(). */
412 [ - + ]: 30180 : MUTEX_LOCK(gil->switch_mutex);
413 : : #endif
414 : : /* We now hold the GIL */
415 : 30180 : _Py_atomic_store_relaxed(&gil->locked, 1);
416 : : _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
417 : :
418 [ + + ]: 30180 : if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
419 : 29 : _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
420 : 29 : ++gil->switch_number;
421 : : }
422 : :
423 : : #ifdef FORCE_SWITCHING
424 [ - + ]: 30180 : COND_SIGNAL(gil->switch_cond);
425 [ - + ]: 30180 : MUTEX_UNLOCK(gil->switch_mutex);
426 : : #endif
427 : :
428 [ - + ]: 30180 : if (tstate_must_exit(tstate)) {
429 : : /* bpo-36475: If Py_Finalize() has been called and tstate is not
430 : : the thread which called Py_Finalize(), exit immediately the
431 : : thread.
432 : :
433 : : This code path can be reached by a daemon thread which was waiting
434 : : in take_gil() while the main thread called
435 : : wait_for_thread_shutdown() from Py_Finalize(). */
436 [ # # ]: 0 : MUTEX_UNLOCK(gil->mutex);
437 : 0 : drop_gil(ceval, ceval2, tstate);
438 : 0 : PyThread_exit_thread();
439 : : }
440 : : assert(is_tstate_valid(tstate));
441 : :
442 [ - + ]: 30180 : if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
443 : 0 : RESET_GIL_DROP_REQUEST(interp);
444 : : }
445 : : else {
446 : : /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
447 : : is a pending signal: signal received by another thread which cannot
448 : : handle signals.
449 : :
450 : : Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
451 : 30180 : COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
452 : : }
453 : :
454 : : /* Don't access tstate if the thread must exit */
455 [ - + ]: 30180 : if (tstate->async_exc != NULL) {
456 : 0 : _PyEval_SignalAsyncExc(tstate->interp);
457 : : }
458 : :
459 [ - + ]: 30180 : MUTEX_UNLOCK(gil->mutex);
460 : :
461 : 30180 : errno = err;
462 : 30180 : }
463 : :
464 : 0 : void _PyEval_SetSwitchInterval(unsigned long microseconds)
465 : : {
466 : 0 : struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
467 : 0 : gil->interval = microseconds;
468 : 0 : }
469 : :
470 : 0 : unsigned long _PyEval_GetSwitchInterval()
471 : : {
472 : 0 : struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
473 : 0 : return gil->interval;
474 : : }
475 : :
476 : :
477 : : int
478 : 0 : _PyEval_ThreadsInitialized(_PyRuntimeState *runtime)
479 : : {
480 : 0 : return gil_created(&runtime->ceval.gil);
481 : : }
482 : :
483 : : int
484 : 0 : PyEval_ThreadsInitialized(void)
485 : : {
486 : 0 : _PyRuntimeState *runtime = &_PyRuntime;
487 : 0 : return _PyEval_ThreadsInitialized(runtime);
488 : : }
489 : :
490 : : PyStatus
491 : 29 : _PyEval_InitGIL(PyThreadState *tstate)
492 : : {
493 [ - + ]: 29 : if (!_Py_IsMainInterpreter(tstate->interp)) {
494 : : /* Currently, the GIL is shared by all interpreters,
495 : : and only the main interpreter is responsible to create
496 : : and destroy it. */
497 : 0 : return _PyStatus_OK();
498 : : }
499 : :
500 : 29 : struct _gil_runtime_state *gil = &tstate->interp->runtime->ceval.gil;
501 : : assert(!gil_created(gil));
502 : :
503 : 29 : PyThread_init_thread();
504 : 29 : create_gil(gil);
505 : :
506 : 29 : take_gil(tstate);
507 : :
508 : : assert(gil_created(gil));
509 : 29 : return _PyStatus_OK();
510 : : }
511 : :
512 : : void
513 : 29 : _PyEval_FiniGIL(PyInterpreterState *interp)
514 : : {
515 [ - + ]: 29 : if (!_Py_IsMainInterpreter(interp)) {
516 : : /* Currently, the GIL is shared by all interpreters,
517 : : and only the main interpreter is responsible to create
518 : : and destroy it. */
519 : 0 : return;
520 : : }
521 : :
522 : 29 : struct _gil_runtime_state *gil = &interp->runtime->ceval.gil;
523 [ + - ]: 29 : if (!gil_created(gil)) {
524 : : /* First Py_InitializeFromConfig() call: the GIL doesn't exist
525 : : yet: do nothing. */
526 : 29 : return;
527 : : }
528 : :
529 : 0 : destroy_gil(gil);
530 : : assert(!gil_created(gil));
531 : : }
532 : :
533 : : void
534 : 0 : PyEval_InitThreads(void)
535 : : {
536 : : /* Do nothing: kept for backward compatibility */
537 : 0 : }
538 : :
539 : : void
540 : 25 : _PyEval_Fini(void)
541 : : {
542 : : #ifdef Py_STATS
543 : : _Py_PrintSpecializationStats(1);
544 : : #endif
545 : 25 : }
546 : : void
547 : 0 : PyEval_AcquireLock(void)
548 : : {
549 : 0 : _PyRuntimeState *runtime = &_PyRuntime;
550 : 0 : PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
551 : 0 : _Py_EnsureTstateNotNULL(tstate);
552 : :
553 : 0 : take_gil(tstate);
554 : 0 : }
555 : :
556 : : void
557 : 0 : PyEval_ReleaseLock(void)
558 : : {
559 : 0 : _PyRuntimeState *runtime = &_PyRuntime;
560 : 0 : PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
561 : : /* This function must succeed when the current thread state is NULL.
562 : : We therefore avoid PyThreadState_Get() which dumps a fatal error
563 : : in debug mode. */
564 : 0 : struct _ceval_runtime_state *ceval = &runtime->ceval;
565 : 0 : struct _ceval_state *ceval2 = &tstate->interp->ceval;
566 : 0 : drop_gil(ceval, ceval2, tstate);
567 : 0 : }
568 : :
569 : : void
570 : 0 : _PyEval_ReleaseLock(PyThreadState *tstate)
571 : : {
572 : 0 : struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval;
573 : 0 : struct _ceval_state *ceval2 = &tstate->interp->ceval;
574 : 0 : drop_gil(ceval, ceval2, tstate);
575 : 0 : }
576 : :
577 : : void
578 : 0 : PyEval_AcquireThread(PyThreadState *tstate)
579 : : {
580 : 0 : _Py_EnsureTstateNotNULL(tstate);
581 : :
582 : 0 : take_gil(tstate);
583 : :
584 [ # # ]: 0 : if (_PyThreadState_Swap(tstate->interp->runtime, tstate) != NULL) {
585 : 0 : Py_FatalError("non-NULL old thread state");
586 : : }
587 : 0 : }
588 : :
589 : : void
590 : 0 : PyEval_ReleaseThread(PyThreadState *tstate)
591 : : {
592 : : assert(is_tstate_valid(tstate));
593 : :
594 : 0 : _PyRuntimeState *runtime = tstate->interp->runtime;
595 : 0 : PyThreadState *new_tstate = _PyThreadState_Swap(runtime, NULL);
596 [ # # ]: 0 : if (new_tstate != tstate) {
597 : 0 : Py_FatalError("wrong thread state");
598 : : }
599 : 0 : struct _ceval_runtime_state *ceval = &runtime->ceval;
600 : 0 : struct _ceval_state *ceval2 = &tstate->interp->ceval;
601 : 0 : drop_gil(ceval, ceval2, tstate);
602 : 0 : }
603 : :
604 : : #ifdef HAVE_FORK
605 : : /* This function is called from PyOS_AfterFork_Child to destroy all threads
606 : : which are not running in the child process, and clear internal locks
607 : : which might be held by those threads. */
608 : : PyStatus
609 : 0 : _PyEval_ReInitThreads(PyThreadState *tstate)
610 : : {
611 : 0 : _PyRuntimeState *runtime = tstate->interp->runtime;
612 : :
613 : 0 : struct _gil_runtime_state *gil = &runtime->ceval.gil;
614 [ # # ]: 0 : if (!gil_created(gil)) {
615 : 0 : return _PyStatus_OK();
616 : : }
617 : 0 : recreate_gil(gil);
618 : :
619 : 0 : take_gil(tstate);
620 : :
621 : 0 : struct _pending_calls *pending = &tstate->interp->ceval.pending;
622 [ # # ]: 0 : if (_PyThread_at_fork_reinit(&pending->lock) < 0) {
623 : 0 : return _PyStatus_ERR("Can't reinitialize pending calls lock");
624 : : }
625 : :
626 : : /* Destroy all threads except the current one */
627 : 0 : _PyThreadState_DeleteExcept(tstate);
628 : 0 : return _PyStatus_OK();
629 : : }
630 : : #endif
631 : :
632 : : /* This function is used to signal that async exceptions are waiting to be
633 : : raised. */
634 : :
635 : : void
636 : 0 : _PyEval_SignalAsyncExc(PyInterpreterState *interp)
637 : : {
638 : 0 : SIGNAL_ASYNC_EXC(interp);
639 : 0 : }
640 : :
641 : : PyThreadState *
642 : 30151 : PyEval_SaveThread(void)
643 : : {
644 : 30151 : _PyRuntimeState *runtime = &_PyRuntime;
645 : 30151 : PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
646 : 30151 : _Py_EnsureTstateNotNULL(tstate);
647 : :
648 : 30151 : struct _ceval_runtime_state *ceval = &runtime->ceval;
649 : 30151 : struct _ceval_state *ceval2 = &tstate->interp->ceval;
650 : : assert(gil_created(&ceval->gil));
651 : 30151 : drop_gil(ceval, ceval2, tstate);
652 : 30151 : return tstate;
653 : : }
654 : :
655 : : void
656 : 30151 : PyEval_RestoreThread(PyThreadState *tstate)
657 : : {
658 : 30151 : _Py_EnsureTstateNotNULL(tstate);
659 : :
660 : 30151 : take_gil(tstate);
661 : :
662 : 30151 : _PyThreadState_Swap(tstate->interp->runtime, tstate);
663 : 30151 : }
664 : :
665 : :
666 : : /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
667 : : signal handlers or Mac I/O completion routines) can schedule calls
668 : : to a function to be called synchronously.
669 : : The synchronous function is called with one void* argument.
670 : : It should return 0 for success or -1 for failure -- failure should
671 : : be accompanied by an exception.
672 : :
673 : : If registry succeeds, the registry function returns 0; if it fails
674 : : (e.g. due to too many pending calls) it returns -1 (without setting
675 : : an exception condition).
676 : :
677 : : Note that because registry may occur from within signal handlers,
678 : : or other asynchronous events, calling malloc() is unsafe!
679 : :
680 : : Any thread can schedule pending calls, but only the main thread
681 : : will execute them.
682 : : There is no facility to schedule calls to a particular thread, but
683 : : that should be easy to change, should that ever be required. In
684 : : that case, the static variables here should go into the python
685 : : threadstate.
686 : : */
687 : :
688 : : void
689 : 0 : _PyEval_SignalReceived(PyInterpreterState *interp)
690 : : {
691 : : #ifdef MS_WINDOWS
692 : : // bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal
693 : : // handler which can run in a thread different than the Python thread, in
694 : : // which case _Py_ThreadCanHandleSignals() is wrong. Ignore
695 : : // _Py_ThreadCanHandleSignals() and always set eval_breaker to 1.
696 : : //
697 : : // The next eval_frame_handle_pending() call will call
698 : : // _Py_ThreadCanHandleSignals() to recompute eval_breaker.
699 : : int force = 1;
700 : : #else
701 : 0 : int force = 0;
702 : : #endif
703 : : /* bpo-30703: Function called when the C signal handler of Python gets a
704 : : signal. We cannot queue a callback using _PyEval_AddPendingCall() since
705 : : that function is not async-signal-safe. */
706 : 0 : SIGNAL_PENDING_SIGNALS(interp, force);
707 : 0 : }
708 : :
709 : : /* Push one item onto the queue while holding the lock. */
710 : : static int
711 : 0 : _push_pending_call(struct _pending_calls *pending,
712 : : int (*func)(void *), void *arg)
713 : : {
714 : 0 : int i = pending->last;
715 : 0 : int j = (i + 1) % NPENDINGCALLS;
716 [ # # ]: 0 : if (j == pending->first) {
717 : 0 : return -1; /* Queue full */
718 : : }
719 : 0 : pending->calls[i].func = func;
720 : 0 : pending->calls[i].arg = arg;
721 : 0 : pending->last = j;
722 : 0 : return 0;
723 : : }
724 : :
725 : : /* Pop one item off the queue while holding the lock. */
726 : : static void
727 : 22 : _pop_pending_call(struct _pending_calls *pending,
728 : : int (**func)(void *), void **arg)
729 : : {
730 : 22 : int i = pending->first;
731 [ + - ]: 22 : if (i == pending->last) {
732 : 22 : return; /* Queue empty */
733 : : }
734 : :
735 : 0 : *func = pending->calls[i].func;
736 : 0 : *arg = pending->calls[i].arg;
737 : 0 : pending->first = (i + 1) % NPENDINGCALLS;
738 : : }
739 : :
740 : : /* This implementation is thread-safe. It allows
741 : : scheduling to be made from any thread, and even from an executing
742 : : callback.
743 : : */
744 : :
745 : : int
746 : 0 : _PyEval_AddPendingCall(PyInterpreterState *interp,
747 : : int (*func)(void *), void *arg)
748 : : {
749 : 0 : struct _pending_calls *pending = &interp->ceval.pending;
750 : : /* Ensure that _PyEval_InitState() was called
751 : : and that _PyEval_FiniState() is not called yet. */
752 : : assert(pending->lock != NULL);
753 : :
754 : 0 : PyThread_acquire_lock(pending->lock, WAIT_LOCK);
755 : 0 : int result = _push_pending_call(pending, func, arg);
756 : 0 : PyThread_release_lock(pending->lock);
757 : :
758 : : /* signal main loop */
759 : 0 : SIGNAL_PENDING_CALLS(interp);
760 : 0 : return result;
761 : : }
762 : :
763 : : int
764 : 0 : Py_AddPendingCall(int (*func)(void *), void *arg)
765 : : {
766 : : /* Best-effort to support subinterpreters and calls with the GIL released.
767 : :
768 : : First attempt _PyThreadState_GET() since it supports subinterpreters.
769 : :
770 : : If the GIL is released, _PyThreadState_GET() returns NULL . In this
771 : : case, use PyGILState_GetThisThreadState() which works even if the GIL
772 : : is released.
773 : :
774 : : Sadly, PyGILState_GetThisThreadState() doesn't support subinterpreters:
775 : : see bpo-10915 and bpo-15751.
776 : :
777 : : Py_AddPendingCall() doesn't require the caller to hold the GIL. */
778 : 0 : PyThreadState *tstate = _PyThreadState_GET();
779 [ # # ]: 0 : if (tstate == NULL) {
780 : 0 : tstate = PyGILState_GetThisThreadState();
781 : : }
782 : :
783 : : PyInterpreterState *interp;
784 [ # # ]: 0 : if (tstate != NULL) {
785 : 0 : interp = tstate->interp;
786 : : }
787 : : else {
788 : : /* Last resort: use the main interpreter */
789 : 0 : interp = _PyInterpreterState_Main();
790 : : }
791 : 0 : return _PyEval_AddPendingCall(interp, func, arg);
792 : : }
793 : :
794 : : static int
795 : 22 : handle_signals(PyThreadState *tstate)
796 : : {
797 : : assert(is_tstate_valid(tstate));
798 [ - + ]: 22 : if (!_Py_ThreadCanHandleSignals(tstate->interp)) {
799 : 0 : return 0;
800 : : }
801 : :
802 : 22 : UNSIGNAL_PENDING_SIGNALS(tstate->interp);
803 [ - + ]: 22 : if (_PyErr_CheckSignalsTstate(tstate) < 0) {
804 : : /* On failure, re-schedule a call to handle_signals(). */
805 : 0 : SIGNAL_PENDING_SIGNALS(tstate->interp, 0);
806 : 0 : return -1;
807 : : }
808 : 22 : return 0;
809 : : }
810 : :
811 : : static int
812 : 22 : make_pending_calls(PyInterpreterState *interp)
813 : : {
814 : : /* only execute pending calls on main thread */
815 [ - + ]: 22 : if (!_Py_ThreadCanHandlePendingCalls()) {
816 : 0 : return 0;
817 : : }
818 : :
819 : : /* don't perform recursive pending calls */
820 [ - + ]: 22 : if (interp->ceval.pending.busy) {
821 : 0 : return 0;
822 : : }
823 : 22 : interp->ceval.pending.busy = 1;
824 : :
825 : : /* unsignal before starting to call callbacks, so that any callback
826 : : added in-between re-signals */
827 : 22 : UNSIGNAL_PENDING_CALLS(interp);
828 : 22 : int res = 0;
829 : :
830 : : /* perform a bounded number of calls, in case of recursion */
831 : 22 : struct _pending_calls *pending = &interp->ceval.pending;
832 [ + - ]: 22 : for (int i=0; i<NPENDINGCALLS; i++) {
833 : 22 : int (*func)(void *) = NULL;
834 : 22 : void *arg = NULL;
835 : :
836 : : /* pop one item off the queue while holding the lock */
837 : 22 : PyThread_acquire_lock(pending->lock, WAIT_LOCK);
838 : 22 : _pop_pending_call(pending, &func, &arg);
839 : 22 : PyThread_release_lock(pending->lock);
840 : :
841 : : /* having released the lock, perform the callback */
842 [ + - ]: 22 : if (func == NULL) {
843 : 22 : break;
844 : : }
845 : 0 : res = func(arg);
846 [ # # ]: 0 : if (res) {
847 : 0 : goto error;
848 : : }
849 : : }
850 : :
851 : 22 : interp->ceval.pending.busy = 0;
852 : 22 : return res;
853 : :
854 : 0 : error:
855 : 0 : interp->ceval.pending.busy = 0;
856 : 0 : SIGNAL_PENDING_CALLS(interp);
857 : 0 : return res;
858 : : }
859 : :
860 : : void
861 : 25 : _Py_FinishPendingCalls(PyThreadState *tstate)
862 : : {
863 : : assert(PyGILState_Check());
864 : : assert(is_tstate_valid(tstate));
865 : :
866 : 25 : struct _pending_calls *pending = &tstate->interp->ceval.pending;
867 : :
868 [ + - ]: 25 : if (!_Py_atomic_load_relaxed_int32(&(pending->calls_to_do))) {
869 : 25 : return;
870 : : }
871 : :
872 [ # # ]: 0 : if (make_pending_calls(tstate->interp) < 0) {
873 : 0 : PyObject *exc = _PyErr_GetRaisedException(tstate);
874 : 0 : PyErr_BadInternalCall();
875 : 0 : _PyErr_ChainExceptions1(exc);
876 : 0 : _PyErr_Print(tstate);
877 : : }
878 : : }
879 : :
880 : : /* Py_MakePendingCalls() is a simple wrapper for the sake
881 : : of backward-compatibility. */
882 : : int
883 : 22 : Py_MakePendingCalls(void)
884 : : {
885 : : assert(PyGILState_Check());
886 : :
887 : 22 : PyThreadState *tstate = _PyThreadState_GET();
888 : : assert(is_tstate_valid(tstate));
889 : :
890 : : /* Python signal handler doesn't really queue a callback: it only signals
891 : : that a signal was received, see _PyEval_SignalReceived(). */
892 : 22 : int res = handle_signals(tstate);
893 [ - + ]: 22 : if (res != 0) {
894 : 0 : return res;
895 : : }
896 : :
897 : 22 : res = make_pending_calls(tstate->interp);
898 [ - + ]: 22 : if (res != 0) {
899 : 0 : return res;
900 : : }
901 : :
902 : 22 : return 0;
903 : : }
904 : :
905 : : /* The interpreter's recursion limit */
906 : :
907 : : void
908 : 29 : _PyEval_InitRuntimeState(struct _ceval_runtime_state *ceval)
909 : : {
910 : 29 : _gil_initialize(&ceval->gil);
911 : 29 : }
912 : :
913 : : void
914 : 29 : _PyEval_InitState(struct _ceval_state *ceval, PyThread_type_lock pending_lock)
915 : : {
916 : 29 : struct _pending_calls *pending = &ceval->pending;
917 : : assert(pending->lock == NULL);
918 : :
919 : 29 : pending->lock = pending_lock;
920 : 29 : }
921 : :
922 : : void
923 : 25 : _PyEval_FiniState(struct _ceval_state *ceval)
924 : : {
925 : 25 : struct _pending_calls *pending = &ceval->pending;
926 [ + - ]: 25 : if (pending->lock != NULL) {
927 : 25 : PyThread_free_lock(pending->lock);
928 : 25 : pending->lock = NULL;
929 : : }
930 : 25 : }
931 : :
932 : : /* Handle signals, pending calls, GIL drop request
933 : : and asynchronous exception */
934 : : int
935 : 298 : _Py_HandlePending(PyThreadState *tstate)
936 : : {
937 : 298 : _PyRuntimeState * const runtime = &_PyRuntime;
938 : 298 : struct _ceval_runtime_state *ceval = &runtime->ceval;
939 : 298 : struct _ceval_state *interp_ceval_state = &tstate->interp->ceval;
940 : :
941 : : /* Pending signals */
942 [ - + ]: 298 : if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) {
943 [ # # ]: 0 : if (handle_signals(tstate) != 0) {
944 : 0 : return -1;
945 : : }
946 : : }
947 : :
948 : : /* Pending calls */
949 [ - + ]: 298 : if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->pending.calls_to_do)) {
950 [ # # ]: 0 : if (make_pending_calls(tstate->interp) != 0) {
951 : 0 : return -1;
952 : : }
953 : : }
954 : :
955 : : /* GC scheduled to run */
956 [ + + ]: 298 : if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gc_scheduled)) {
957 : 296 : _Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0);
958 : 296 : COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
959 : 296 : _Py_RunGC(tstate);
960 : : }
961 : :
962 : : /* GIL drop request */
963 [ - + ]: 298 : if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gil_drop_request)) {
964 : : /* Give another thread a chance */
965 [ # # ]: 0 : if (_PyThreadState_Swap(runtime, NULL) != tstate) {
966 : 0 : Py_FatalError("tstate mix-up");
967 : : }
968 : 0 : drop_gil(ceval, interp_ceval_state, tstate);
969 : :
970 : : /* Other threads may run now */
971 : :
972 : 0 : take_gil(tstate);
973 : :
974 [ # # ]: 0 : if (_PyThreadState_Swap(runtime, tstate) != NULL) {
975 : 0 : Py_FatalError("orphan tstate");
976 : : }
977 : : }
978 : :
979 : : /* Check for asynchronous exception. */
980 [ - + ]: 298 : if (tstate->async_exc != NULL) {
981 : 0 : PyObject *exc = tstate->async_exc;
982 : 0 : tstate->async_exc = NULL;
983 : 0 : UNSIGNAL_ASYNC_EXC(tstate->interp);
984 : 0 : _PyErr_SetNone(tstate, exc);
985 : 0 : Py_DECREF(exc);
986 : 0 : return -1;
987 : : }
988 : :
989 : :
990 : : // It is possible that some of the conditions that trigger the eval breaker
991 : : // are called in a different thread than the Python thread. An example of
992 : : // this is bpo-42296: On Windows, _PyEval_SignalReceived() can be called in
993 : : // a different thread than the Python thread, in which case
994 : : // _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the
995 : : // current Python thread with the correct _Py_ThreadCanHandleSignals()
996 : : // value. It prevents to interrupt the eval loop at every instruction if
997 : : // the current Python thread cannot handle signals (if
998 : : // _Py_ThreadCanHandleSignals() is false).
999 : 298 : COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
1000 : :
1001 : 298 : return 0;
1002 : : }
1003 : :
|