python 2.7 -> python 3.14
This commit is contained in:
565
extern/include/python/internal/mimalloc/mimalloc.h
vendored
Normal file
565
extern/include/python/internal/mimalloc/mimalloc.h
vendored
Normal file
@@ -0,0 +1,565 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_H
|
||||
#define MIMALLOC_H
|
||||
|
||||
#define MI_MALLOC_VERSION 212 // major + 2 digits minor
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Compiler specific attributes
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifdef __cplusplus
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
|
||||
#define mi_attr_noexcept noexcept
|
||||
#else
|
||||
#define mi_attr_noexcept throw()
|
||||
#endif
|
||||
#else
|
||||
#define mi_attr_noexcept
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus) && (__cplusplus >= 201703)
|
||||
#define mi_decl_nodiscard [[nodiscard]]
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl
|
||||
#define mi_decl_nodiscard __attribute__((warn_unused_result))
|
||||
#elif defined(_HAS_NODISCARD)
|
||||
#define mi_decl_nodiscard _NODISCARD
|
||||
#elif (_MSC_VER >= 1700)
|
||||
#define mi_decl_nodiscard _Check_return_
|
||||
#else
|
||||
#define mi_decl_nodiscard
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
#if !defined(MI_SHARED_LIB)
|
||||
#define mi_decl_export
|
||||
#elif defined(MI_SHARED_LIB_EXPORT)
|
||||
#define mi_decl_export __declspec(dllexport)
|
||||
#else
|
||||
#define mi_decl_export __declspec(dllimport)
|
||||
#endif
|
||||
#if defined(__MINGW32__)
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc __attribute__((malloc))
|
||||
#else
|
||||
#if (_MSC_VER >= 1900) && !defined(__EDG__)
|
||||
#define mi_decl_restrict __declspec(allocator) __declspec(restrict)
|
||||
#else
|
||||
#define mi_decl_restrict __declspec(restrict)
|
||||
#endif
|
||||
#define mi_attr_malloc
|
||||
#endif
|
||||
#define mi_cdecl __cdecl
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#elif defined(__GNUC__) // includes clang and icc
|
||||
#if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT)
|
||||
#define mi_decl_export __attribute__((visibility("default")))
|
||||
#else
|
||||
#define mi_decl_export
|
||||
#endif
|
||||
#define mi_cdecl // leads to warnings... __attribute__((cdecl))
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc __attribute__((malloc))
|
||||
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#elif defined(__INTEL_COMPILER)
|
||||
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
|
||||
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
|
||||
#define mi_attr_alloc_align(p)
|
||||
#else
|
||||
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
|
||||
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
|
||||
#define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
|
||||
#endif
|
||||
#else
|
||||
#define mi_cdecl
|
||||
#define mi_decl_export
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Includes
|
||||
// ------------------------------------------------------
|
||||
|
||||
#include <stddef.h> // size_t
|
||||
#include <stdbool.h> // bool
|
||||
#include <stdint.h> // INTPTR_MAX
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Standard malloc interface
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_export void mi_free(void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Extended functionality
|
||||
// ------------------------------------------------------
|
||||
#define MI_SMALL_WSIZE_MAX (128)
|
||||
#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*))
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Internals
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
|
||||
mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept;
|
||||
|
||||
typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg);
|
||||
mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
|
||||
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
|
||||
|
||||
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
|
||||
mi_decl_export int mi_version(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL
|
||||
mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_process_init(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_init(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_done(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs,
|
||||
size_t* current_rss, size_t* peak_rss,
|
||||
size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept;
|
||||
|
||||
// -------------------------------------------------------------------------------------
|
||||
// Aligned allocation
|
||||
// Note that `alignment` always follows `size` for consistency with unaligned
|
||||
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
|
||||
// -------------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------------------
|
||||
// Heaps: first-class, but can only allocate from the same thread that created it.
|
||||
// -------------------------------------------------------------------------------------
|
||||
|
||||
struct mi_heap_s;
|
||||
typedef struct mi_heap_s mi_heap_t;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
|
||||
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
|
||||
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
|
||||
mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
|
||||
mi_decl_export mi_heap_t* mi_heap_get_default(void);
|
||||
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
|
||||
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Zero initialized re-allocation.
|
||||
// Only valid on memory that was originally allocated with zero initialization too.
|
||||
// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc.
|
||||
// see <https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992>
|
||||
// --------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Analysis
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
|
||||
mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
|
||||
mi_decl_export bool mi_check_owned(const void* p);
|
||||
|
||||
// An area of heap space contains blocks of a single size.
|
||||
typedef struct mi_heap_area_s {
|
||||
void* blocks; // start of the area containing heap blocks
|
||||
size_t reserved; // bytes reserved for this area (virtual)
|
||||
size_t committed; // current available bytes for this area
|
||||
size_t used; // number of allocated blocks
|
||||
size_t block_size; // size in bytes of each block
|
||||
size_t full_block_size; // size in bytes of a full block including padding and metadata.
|
||||
} mi_heap_area_t;
|
||||
|
||||
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
|
||||
|
||||
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// Experimental
|
||||
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
|
||||
|
||||
// Experimental: heaps associated with specific memory arena's
|
||||
typedef int mi_arena_id_t;
|
||||
mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
#if MI_MALLOC_VERSION >= 182
|
||||
// Create a heap that only allocates in the specified arena
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
|
||||
#endif
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Convenience
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
|
||||
#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
|
||||
#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
|
||||
#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
|
||||
#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
|
||||
#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
|
||||
|
||||
#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
|
||||
#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
|
||||
#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp)))
|
||||
#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp)))
|
||||
#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp)))
|
||||
#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp)))
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Options
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef enum mi_option_e {
|
||||
// stable options
|
||||
mi_option_show_errors, // print error messages
|
||||
mi_option_show_stats, // print statistics on termination
|
||||
mi_option_verbose, // print verbose messages
|
||||
// the following options are experimental (see src/options.h)
|
||||
mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
|
||||
mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
|
||||
mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1)
|
||||
mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_deprecated_page_reset,
|
||||
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
|
||||
mi_option_deprecated_segment_reset,
|
||||
mi_option_eager_commit_delay,
|
||||
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
|
||||
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
|
||||
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
|
||||
mi_option_os_tag, // tag used for OS logging (macOS only for now)
|
||||
mi_option_max_errors, // issue at most N error messages
|
||||
mi_option_max_warnings, // issue at most N warning messages
|
||||
mi_option_max_segment_reclaim,
|
||||
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
|
||||
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
|
||||
mi_option_arena_purge_mult,
|
||||
mi_option_purge_extend_delay,
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
mi_option_eager_region_commit = mi_option_arena_eager_commit,
|
||||
mi_option_reset_decommits = mi_option_purge_decommits,
|
||||
mi_option_reset_delay = mi_option_purge_delay,
|
||||
mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
|
||||
} mi_option_t;
|
||||
|
||||
|
||||
mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option);
|
||||
mi_decl_export void mi_option_enable(mi_option_t option);
|
||||
mi_decl_export void mi_option_disable(mi_option_t option);
|
||||
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
|
||||
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
|
||||
mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
|
||||
mi_decl_export void mi_option_set(mi_option_t option, long value);
|
||||
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions.
|
||||
// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.)
|
||||
// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing.
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
|
||||
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
|
||||
mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
|
||||
mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
|
||||
|
||||
// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`.
|
||||
// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception).
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
// Implement the C++ std::allocator interface for use in STL containers.
|
||||
// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally)
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <cstddef> // std::size_t
|
||||
#include <cstdint> // PTRDIFF_MAX
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
|
||||
#include <type_traits> // std::true_type
|
||||
#include <utility> // std::forward
|
||||
#endif
|
||||
|
||||
template<class T> struct _mi_stl_allocator_common {
|
||||
typedef T value_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef value_type& reference;
|
||||
typedef value_type const& const_reference;
|
||||
typedef value_type* pointer;
|
||||
typedef value_type const* const_pointer;
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using propagate_on_container_copy_assignment = std::true_type;
|
||||
using propagate_on_container_move_assignment = std::true_type;
|
||||
using propagate_on_container_swap = std::true_type;
|
||||
template <class U, class ...Args> void construct(U* p, Args&& ...args) { ::new(p) U(std::forward<Args>(args)...); }
|
||||
template <class U> void destroy(U* p) mi_attr_noexcept { p->~U(); }
|
||||
#else
|
||||
void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
|
||||
void destroy(pointer p) { p->~value_type(); }
|
||||
#endif
|
||||
|
||||
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
|
||||
pointer address(reference x) const { return &x; }
|
||||
const_pointer address(const_reference x) const { return &x; }
|
||||
};
|
||||
|
||||
template<class T> struct mi_stl_allocator : public _mi_stl_allocator_common<T> {
|
||||
using typename _mi_stl_allocator_common<T>::size_type;
|
||||
using typename _mi_stl_allocator_common<T>::value_type;
|
||||
using typename _mi_stl_allocator_common<T>::pointer;
|
||||
template <class U> struct rebind { typedef mi_stl_allocator<U> other; };
|
||||
|
||||
mi_stl_allocator() mi_attr_noexcept = default;
|
||||
mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default;
|
||||
template<class U> mi_stl_allocator(const mi_stl_allocator<U>&) mi_attr_noexcept { }
|
||||
mi_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T* p, size_type) { mi_free(p); }
|
||||
|
||||
#if (__cplusplus >= 201703L) // C++17
|
||||
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_new_n(count, sizeof(T))); }
|
||||
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
|
||||
#else
|
||||
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_new_n(count, sizeof(value_type))); }
|
||||
#endif
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using is_always_equal = std::true_type;
|
||||
#endif
|
||||
};
|
||||
|
||||
template<class T1,class T2> bool operator==(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return true; }
|
||||
template<class T1,class T2> bool operator!=(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return false; }
|
||||
|
||||
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
|
||||
#define MI_HAS_HEAP_STL_ALLOCATOR 1
|
||||
|
||||
#include <memory> // std::shared_ptr
|
||||
|
||||
// Common base class for STL allocators in a specific heap
|
||||
template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common<T> {
|
||||
using typename _mi_stl_allocator_common<T>::size_type;
|
||||
using typename _mi_stl_allocator_common<T>::value_type;
|
||||
using typename _mi_stl_allocator_common<T>::pointer;
|
||||
|
||||
_mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp) { } /* will not delete nor destroy the passed in heap */
|
||||
|
||||
#if (__cplusplus >= 201703L) // C++17
|
||||
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
|
||||
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
|
||||
#else
|
||||
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
|
||||
#endif
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using is_always_equal = std::false_type;
|
||||
#endif
|
||||
|
||||
void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
|
||||
template<class U> bool is_equal(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) const { return (this->heap == x.heap); }
|
||||
|
||||
protected:
|
||||
std::shared_ptr<mi_heap_t> heap;
|
||||
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
|
||||
|
||||
_mi_heap_stl_allocator_common() {
|
||||
mi_heap_t* hp = mi_heap_new();
|
||||
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
|
||||
}
|
||||
_mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
|
||||
template<class U> _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) mi_attr_noexcept : heap(x.heap) { }
|
||||
|
||||
private:
|
||||
static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
|
||||
static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
|
||||
};
|
||||
|
||||
// STL allocator allocation in a specific heap
|
||||
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
|
||||
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
|
||||
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
|
||||
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
|
||||
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
|
||||
|
||||
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T* p, size_type) { mi_free(p); }
|
||||
template<class U> struct rebind { typedef mi_heap_stl_allocator<U> other; };
|
||||
};
|
||||
|
||||
template<class T1, class T2> bool operator==(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
|
||||
template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
|
||||
|
||||
|
||||
// STL allocator allocation in a specific heap, where `free` does nothing and
|
||||
// the heap is destroyed in one go on destruction -- use with care!
|
||||
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
|
||||
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
|
||||
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
|
||||
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
|
||||
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
|
||||
|
||||
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ }
|
||||
template<class U> struct rebind { typedef mi_heap_destroy_stl_allocator<U> other; };
|
||||
};
|
||||
|
||||
template<class T1, class T2> bool operator==(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
|
||||
template<class T1, class T2> bool operator!=(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
|
||||
|
||||
#endif // C++11
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif
|
||||
392
extern/include/python/internal/mimalloc/mimalloc/atomic.h
vendored
Normal file
392
extern/include/python/internal/mimalloc/mimalloc/atomic.h
vendored
Normal file
@@ -0,0 +1,392 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_ATOMIC_H
|
||||
#define MIMALLOC_ATOMIC_H
|
||||
|
||||
// --------------------------------------------------------------------------------------------
|
||||
// Atomics
|
||||
// We need to be portable between C, C++, and MSVC.
|
||||
// We base the primitives on the C/C++ atomics and create a minimal wrapper for MSVC in C compilation mode.
|
||||
// This is why we try to use only `uintptr_t` and `<type>*` as atomic types.
|
||||
// To gain better insight in the range of used atomics, we use explicitly named memory order operations
|
||||
// instead of passing the memory order as a parameter.
|
||||
// -----------------------------------------------------------------------------------------------
|
||||
|
||||
#if defined(__cplusplus)
|
||||
// Use C++ atomics
|
||||
#include <atomic>
|
||||
#define _Atomic(tp) std::atomic<tp>
|
||||
#define mi_atomic(name) std::atomic_##name
|
||||
#define mi_memory_order(name) std::memory_order_##name
|
||||
#if (__cplusplus >= 202002L) // c++20, see issue #571
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#elif !defined(ATOMIC_VAR_INIT)
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#else
|
||||
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
// Use MSVC C wrapper for C11 atomics
|
||||
#define _Atomic(tp) tp
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#define mi_atomic(name) mi_atomic_##name
|
||||
#define mi_memory_order(name) mi_memory_order_##name
|
||||
#else
|
||||
// Use C11 atomics
|
||||
#include <stdatomic.h>
|
||||
#define mi_atomic(name) atomic_##name
|
||||
#define mi_memory_order(name) memory_order_##name
|
||||
#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#elif !defined(ATOMIC_VAR_INIT)
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#else
|
||||
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Various defines for all used memory orders in mimalloc
|
||||
#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \
|
||||
mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail)
|
||||
|
||||
#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \
|
||||
mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail)
|
||||
|
||||
#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
|
||||
#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
|
||||
#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
|
||||
#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
|
||||
|
||||
#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
|
||||
#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1)
|
||||
#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1)
|
||||
#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1)
|
||||
#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1)
|
||||
|
||||
static inline void mi_atomic_yield(void);
|
||||
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add);
|
||||
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
|
||||
|
||||
|
||||
#if defined(__cplusplus) || !defined(_MSC_VER)
|
||||
|
||||
// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
|
||||
// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
|
||||
#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p)
|
||||
#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p)
|
||||
|
||||
// In C++ we need to add casts to help resolve templates if NULL is passed
|
||||
#if defined(__cplusplus)
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x)
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x)
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
|
||||
#else
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x)
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x)
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
|
||||
#endif
|
||||
|
||||
// These are used by the statistics
|
||||
static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
|
||||
return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
|
||||
int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
|
||||
while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ };
|
||||
}
|
||||
|
||||
// Used by timers
|
||||
#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
|
||||
|
||||
#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
|
||||
#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
|
||||
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics.
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <intrin.h>
|
||||
#ifdef _WIN64
|
||||
typedef LONG64 msc_intptr_t;
|
||||
#define MI_64(f) f##64
|
||||
#else
|
||||
typedef LONG msc_intptr_t;
|
||||
#define MI_64(f) f
|
||||
#endif
|
||||
|
||||
typedef enum mi_memory_order_e {
|
||||
mi_memory_order_relaxed,
|
||||
mi_memory_order_consume,
|
||||
mi_memory_order_acquire,
|
||||
mi_memory_order_release,
|
||||
mi_memory_order_acq_rel,
|
||||
mi_memory_order_seq_cst
|
||||
} mi_memory_order;
|
||||
|
||||
static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
|
||||
}
|
||||
static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
|
||||
(void)(mo1); (void)(mo2);
|
||||
uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
|
||||
if (read == *expected) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
*expected = read;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
|
||||
return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
|
||||
}
|
||||
static inline void mi_atomic_thread_fence(mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
_Atomic(uintptr_t) x = 0;
|
||||
mi_atomic_exchange_explicit(&x, 1, mo);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
return *p;
|
||||
#else
|
||||
uintptr_t x = *p;
|
||||
if (mo > mi_memory_order_relaxed) {
|
||||
while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ };
|
||||
}
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
*p = x;
|
||||
#else
|
||||
mi_atomic_exchange_explicit(p, x, mo);
|
||||
#endif
|
||||
}
|
||||
static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_X64)
|
||||
return *p;
|
||||
#else
|
||||
int64_t old = *p;
|
||||
int64_t x = old;
|
||||
while ((old = InterlockedCompareExchange64(p, x, old)) != x) {
|
||||
x = old;
|
||||
}
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(x_M_IX86) || defined(_M_X64)
|
||||
*p = x;
|
||||
#else
|
||||
InterlockedExchange64(p, x);
|
||||
#endif
|
||||
}
|
||||
|
||||
// These are used by the statistics
|
||||
static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) {
|
||||
#ifdef _WIN64
|
||||
return (int64_t)mi_atomic_addi((int64_t*)p, add);
|
||||
#else
|
||||
int64_t current;
|
||||
int64_t sum;
|
||||
do {
|
||||
current = *p;
|
||||
sum = current + add;
|
||||
} while (_InterlockedCompareExchange64(p, sum, current) != current);
|
||||
return current;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
|
||||
int64_t current;
|
||||
do {
|
||||
current = *p;
|
||||
} while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
|
||||
}
|
||||
|
||||
static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
|
||||
mi_atomic_addi64_relaxed(p, i);
|
||||
}
|
||||
|
||||
static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
|
||||
int64_t read = _InterlockedCompareExchange64(p, des, *exp);
|
||||
if (read == *exp) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
*exp = read;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The pointer macros cast to `uintptr_t`.
|
||||
#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
|
||||
#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||
|
||||
#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed))
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Atomically add a signed value; returns the previous value.
|
||||
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) {
|
||||
return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add);
|
||||
}
|
||||
|
||||
// Atomically subtract a signed value; returns the previous value.
|
||||
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
|
||||
return (intptr_t)mi_atomic_addi(p, -sub);
|
||||
}
|
||||
|
||||
typedef _Atomic(uintptr_t) mi_atomic_once_t;
|
||||
|
||||
// Returns true only on the first invocation
|
||||
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
|
||||
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
|
||||
uintptr_t expected = 0;
|
||||
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
|
||||
}
|
||||
|
||||
typedef _Atomic(uintptr_t) mi_atomic_guard_t;
|
||||
|
||||
// Allows only one thread to execute at a time
|
||||
#define mi_atomic_guard(guard) \
|
||||
uintptr_t _mi_guard_expected = 0; \
|
||||
for(bool _mi_guard_once = true; \
|
||||
_mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
|
||||
(mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
|
||||
|
||||
|
||||
|
||||
// Yield
|
||||
#if defined(__cplusplus)
|
||||
#include <thread>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
YieldProcessor();
|
||||
}
|
||||
#elif defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
_mm_pause();
|
||||
}
|
||||
#elif (defined(__GNUC__) || defined(__clang__)) && \
|
||||
(defined(__x86_64__) || defined(__i386__) || \
|
||||
defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__))
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("wfe");
|
||||
}
|
||||
#elif defined(__arm__)
|
||||
#if __ARM_ARCH >= 7
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("yield" ::: "memory");
|
||||
}
|
||||
#else
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("nop" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
|
||||
#ifdef __APPLE__
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("or r27,r27,r27" ::: "memory");
|
||||
}
|
||||
#else
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(__sun)
|
||||
// Fallback for other archs
|
||||
#include <synch.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
smt_pause();
|
||||
}
|
||||
#elif defined(__wasi__)
|
||||
#include <sched.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sched_yield();
|
||||
}
|
||||
#else
|
||||
#include <unistd.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sleep(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif // __MIMALLOC_ATOMIC_H
|
||||
969
extern/include/python/internal/mimalloc/mimalloc/internal.h
vendored
Normal file
969
extern/include/python/internal/mimalloc/mimalloc/internal.h
vendored
Normal file
@@ -0,0 +1,969 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_INTERNAL_H
|
||||
#define MIMALLOC_INTERNAL_H
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file contains the internal API's of mimalloc and various utility
|
||||
// functions and macros.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
#include "types.h"
|
||||
#include "track.h"
|
||||
|
||||
#if (MI_DEBUG>0)
|
||||
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
|
||||
#else
|
||||
#define mi_trace_message(...)
|
||||
#endif
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
||||
#define __wasi__
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
#define mi_decl_externc extern "C"
|
||||
#else
|
||||
#define mi_decl_externc
|
||||
#endif
|
||||
|
||||
// pthreads
|
||||
#if !defined(_WIN32) && !defined(__wasi__)
|
||||
#define MI_USE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
// "options.c"
|
||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
|
||||
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
|
||||
void _mi_warning_message(const char* fmt, ...);
|
||||
void _mi_verbose_message(const char* fmt, ...);
|
||||
void _mi_trace_message(const char* fmt, ...);
|
||||
void _mi_options_init(void);
|
||||
void _mi_error_message(int err, const char* fmt, ...);
|
||||
|
||||
// random.c
|
||||
void _mi_random_init(mi_random_ctx_t* ctx);
|
||||
void _mi_random_init_weak(mi_random_ctx_t* ctx);
|
||||
void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
|
||||
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
|
||||
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
|
||||
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
|
||||
uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
|
||||
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
|
||||
|
||||
// init.c
|
||||
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
|
||||
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
|
||||
bool _mi_is_main_thread(void);
|
||||
size_t _mi_current_thread_count(void);
|
||||
bool _mi_preloading(void); // true while the C runtime is not initialized yet
|
||||
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
|
||||
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
|
||||
void _mi_thread_done(mi_heap_t* heap);
|
||||
void _mi_thread_data_collect(void);
|
||||
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
|
||||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
|
||||
|
||||
size_t _mi_os_page_size(void);
|
||||
size_t _mi_os_good_alloc_size(size_t size);
|
||||
bool _mi_os_has_overcommit(void);
|
||||
bool _mi_os_has_virtual_reserve(void);
|
||||
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_protect(void* addr, size_t size);
|
||||
bool _mi_os_unprotect(void* addr, size_t size);
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
|
||||
|
||||
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
|
||||
|
||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
||||
bool _mi_os_use_large_page(size_t size, size_t alignment);
|
||||
size_t _mi_os_large_page_size(void);
|
||||
|
||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
||||
|
||||
// arena.c
|
||||
mi_arena_id_t _mi_arena_id_none(void);
|
||||
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
|
||||
bool _mi_arena_contains(const void* p);
|
||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
||||
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
|
||||
|
||||
// "segment-map.c"
|
||||
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
||||
void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
||||
|
||||
// "segment.c"
|
||||
extern mi_abandoned_pool_t _mi_abandoned_default; // global abandoned pool
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||
bool _mi_abandoned_pool_visit_blocks(mi_abandoned_pool_t* pool, uint8_t page_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#else
|
||||
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#endif
|
||||
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_await_readers(mi_abandoned_pool_t *pool);
|
||||
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
|
||||
void _mi_page_unfull(mi_page_t* page);
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
|
||||
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
|
||||
void _mi_heap_delayed_free_all(mi_heap_t* heap);
|
||||
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
|
||||
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
|
||||
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
|
||||
void _mi_deferred_free(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_free_collect(mi_page_t* page,bool force);
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin); // for stats
|
||||
uint8_t _mi_bin(size_t size); // for stats
|
||||
|
||||
// "heap.c"
|
||||
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag);
|
||||
void _mi_heap_destroy_pages(mi_heap_t* heap);
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
||||
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
|
||||
void _mi_heap_unsafe_destroy_all(void);
|
||||
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
|
||||
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// "stats.c"
|
||||
void _mi_stats_done(mi_stats_t* stats);
|
||||
mi_msecs_t _mi_clock_now(void);
|
||||
mi_msecs_t _mi_clock_end(mi_msecs_t start);
|
||||
mi_msecs_t _mi_clock_start(void);
|
||||
|
||||
// "alloc.c"
|
||||
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
||||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||
|
||||
// option.c, c primitives
|
||||
char _mi_toupper(char c);
|
||||
int _mi_strnicmp(const char* s, const char* t, size_t n);
|
||||
void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
|
||||
void _mi_strlcat(char* dest, const char* src, size_t dest_size);
|
||||
size_t _mi_strlen(const char* s);
|
||||
size_t _mi_strnlen(const char* s, size_t max_len);
|
||||
|
||||
|
||||
#if MI_DEBUG>1
|
||||
bool _mi_page_is_valid(mi_page_t* page);
|
||||
#endif
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Branches
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define mi_unlikely(x) (__builtin_expect(!!(x),false))
|
||||
#define mi_likely(x) (__builtin_expect(!!(x),true))
|
||||
#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
|
||||
#define mi_unlikely(x) (x) [[unlikely]]
|
||||
#define mi_likely(x) (x) [[likely]]
|
||||
#else
|
||||
#define mi_unlikely(x) (x)
|
||||
#define mi_likely(x) (x)
|
||||
#endif
|
||||
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Error codes passed to `_mi_fatal_error`
|
||||
All are recoverable but EFAULT is a serious error and aborts by default in secure mode.
|
||||
For portability define undefined error codes using common Unix codes:
|
||||
<https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html>
|
||||
----------------------------------------------------------- */
|
||||
#include <errno.h>
|
||||
#ifndef EAGAIN // double free
|
||||
#define EAGAIN (11)
|
||||
#endif
|
||||
#ifndef ENOMEM // out of memory
|
||||
#define ENOMEM (12)
|
||||
#endif
|
||||
#ifndef EFAULT // corrupted free-list or meta-data
|
||||
#define EFAULT (14)
|
||||
#endif
|
||||
#ifndef EINVAL // trying to free an invalid pointer
|
||||
#define EINVAL (22)
|
||||
#endif
|
||||
#ifndef EOVERFLOW // count*size overflow
|
||||
#define EOVERFLOW (75)
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Inlined definitions
|
||||
----------------------------------------------------------- */
|
||||
#define MI_UNUSED(x) (void)(x)
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_UNUSED_RELEASE(x)
|
||||
#else
|
||||
#define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
|
||||
#endif
|
||||
|
||||
#define MI_INIT4(x) x(),x(),x(),x()
|
||||
#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
|
||||
#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
|
||||
#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
|
||||
#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
|
||||
#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
|
||||
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
|
||||
|
||||
|
||||
#include <string.h>
|
||||
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
|
||||
#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
|
||||
|
||||
// Is `x` a power of two? (0 is considered a power of two)
|
||||
static inline bool _mi_is_power_of_two(uintptr_t x) {
|
||||
return ((x & (x - 1)) == 0);
|
||||
}
|
||||
|
||||
// Is a pointer aligned?
|
||||
static inline bool _mi_is_aligned(void* p, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
return (((uintptr_t)p % alignment) == 0);
|
||||
}
|
||||
|
||||
// Align upwards
|
||||
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return ((sz + mask) & ~mask);
|
||||
}
|
||||
else {
|
||||
return (((sz + mask)/alignment)*alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Align downwards
|
||||
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return (sz & ~mask);
|
||||
}
|
||||
else {
|
||||
return ((sz / alignment) * alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
||||
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
||||
mi_assert_internal(divider != 0);
|
||||
return (divider == 0 ? size : ((size + divider - 1) / divider));
|
||||
}
|
||||
|
||||
// Is memory zero initialized?
|
||||
static inline bool mi_mem_is_zero(const void* p, size_t size) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (((uint8_t*)p)[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Align a byte size to a size in _machine words_,
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline size_t _mi_wsize_from_size(size_t size) {
|
||||
mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t));
|
||||
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
|
||||
}
|
||||
|
||||
// Overflow detecting multiply
|
||||
#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
|
||||
#include <limits.h> // UINT_MAX, ULONG_MAX
|
||||
#if defined(_CLOCK_T) // for Illumos
|
||||
#undef _CLOCK_T
|
||||
#endif
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
return __builtin_umull_overflow(count, size, (unsigned long *)total);
|
||||
#elif (SIZE_MAX == UINT_MAX)
|
||||
return __builtin_umul_overflow(count, size, (unsigned int *)total);
|
||||
#else
|
||||
return __builtin_umulll_overflow(count, size, (unsigned long long *)total);
|
||||
#endif
|
||||
}
|
||||
#else /* __builtin_umul_overflow is unavailable */
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
|
||||
*total = count * size;
|
||||
// note: gcc/clang optimize this to directly check the overflow flag
|
||||
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Safe multiply `count*size` into `total`; return `true` on overflow.
|
||||
static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) {
|
||||
if (count==1) { // quick check for the case where count is one (common for C++ allocators)
|
||||
*total = size;
|
||||
return false;
|
||||
}
|
||||
else if mi_unlikely(mi_mul_overflow(count, size, total)) {
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
|
||||
#endif
|
||||
*total = SIZE_MAX;
|
||||
return true;
|
||||
}
|
||||
else return false;
|
||||
}
|
||||
|
||||
|
||||
/*----------------------------------------------------------------------------------------
|
||||
Heap functions
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
|
||||
|
||||
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
|
||||
return (heap->tld->heap_backing == heap);
|
||||
}
|
||||
|
||||
static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
return (heap != &_mi_heap_empty);
|
||||
}
|
||||
|
||||
static inline uintptr_t _mi_ptr_cookie(const void* p) {
|
||||
extern mi_heap_t _mi_heap_main;
|
||||
mi_assert_internal(_mi_heap_main.cookie != 0);
|
||||
return ((uintptr_t)p ^ _mi_heap_main.cookie);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Pages
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
|
||||
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
|
||||
const size_t idx = _mi_wsize_from_size(size);
|
||||
mi_assert_internal(idx < MI_PAGES_DIRECT);
|
||||
return heap->pages_free_direct[idx];
|
||||
}
|
||||
|
||||
// Segment that contains the pointer
|
||||
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
|
||||
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
|
||||
// therefore we align one byte before `p`.
|
||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||
mi_assert_internal(p != NULL);
|
||||
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
||||
}
|
||||
|
||||
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
||||
mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
|
||||
return (mi_page_t*)(s);
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
|
||||
mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
|
||||
return (mi_slice_t*)(p);
|
||||
}
|
||||
|
||||
// Segment belonging to a page
|
||||
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
||||
return segment;
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
|
||||
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
|
||||
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
|
||||
mi_assert_internal(start->slice_offset == 0);
|
||||
mi_assert_internal(start + start->slice_count > slice);
|
||||
return start;
|
||||
}
|
||||
|
||||
// Get the page containing the pointer (performance critical as it is called in mi_free)
|
||||
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
|
||||
mi_assert_internal(p > (void*)segment);
|
||||
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
||||
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
|
||||
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
|
||||
mi_assert_internal(idx <= segment->slice_entries);
|
||||
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
|
||||
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
|
||||
mi_assert_internal(slice->slice_offset == 0);
|
||||
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
|
||||
return mi_slice_to_page(slice);
|
||||
}
|
||||
|
||||
// Quick page start for initialized pages
|
||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
||||
return _mi_segment_page_start(segment, page, page_size);
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
static inline mi_page_t* _mi_ptr_page(void* p) {
|
||||
return _mi_segment_page_of(_mi_ptr_segment(p), p);
|
||||
}
|
||||
|
||||
// Get the block size of a page (special case for huge objects)
|
||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
||||
return bsize;
|
||||
}
|
||||
else {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
}
|
||||
|
||||
// Get the usable block size of a page without fixed padding.
|
||||
// This may still include internal padding due to alignment and rounding up size classes.
|
||||
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
|
||||
return mi_page_block_size(page) - MI_PADDING_SIZE;
|
||||
}
|
||||
|
||||
// size of a segment
|
||||
static inline size_t mi_segment_size(mi_segment_t* segment) {
|
||||
return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
|
||||
}
|
||||
|
||||
static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
|
||||
return (uint8_t*)segment + mi_segment_size(segment);
|
||||
}
|
||||
|
||||
// Thread free access
|
||||
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
|
||||
return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3);
|
||||
}
|
||||
|
||||
static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
|
||||
return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3);
|
||||
}
|
||||
|
||||
// Heap access
|
||||
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
|
||||
return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap));
|
||||
}
|
||||
|
||||
static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
|
||||
mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
|
||||
mi_atomic_store_release(&page->xheap,(uintptr_t)heap);
|
||||
}
|
||||
|
||||
// Thread free flag helpers
|
||||
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
|
||||
return (mi_block_t*)(tf & ~0x03);
|
||||
}
|
||||
static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
|
||||
return (mi_delayed_t)(tf & 0x03);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
|
||||
return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
|
||||
return mi_tf_make(mi_tf_block(tf),delayed);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
|
||||
return mi_tf_make(block, mi_tf_delayed(tf));
|
||||
}
|
||||
|
||||
// are all blocks in a page freed?
|
||||
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
|
||||
static inline bool mi_page_all_free(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
return (page->used == 0);
|
||||
}
|
||||
|
||||
// are there any available blocks?
|
||||
static inline bool mi_page_has_any_available(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL && page->reserved > 0);
|
||||
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
|
||||
}
|
||||
|
||||
// are there immediately available blocks, i.e. blocks available on the free list.
|
||||
static inline bool mi_page_immediate_available(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
return (page->free != NULL);
|
||||
}
|
||||
|
||||
// is more than 7/8th of a page in use?
|
||||
static inline bool mi_page_mostly_used(const mi_page_t* page) {
|
||||
if (page==NULL) return true;
|
||||
uint16_t frac = page->reserved / 8U;
|
||||
return (page->reserved - page->used <= frac);
|
||||
}
|
||||
|
||||
static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) {
|
||||
return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
|
||||
}
|
||||
|
||||
|
||||
|
||||
//-----------------------------------------------------------
|
||||
// Page flags
|
||||
//-----------------------------------------------------------
|
||||
static inline bool mi_page_is_in_full(const mi_page_t* page) {
|
||||
return page->flags.x.in_full;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
|
||||
page->flags.x.in_full = in_full;
|
||||
}
|
||||
|
||||
static inline bool mi_page_has_aligned(const mi_page_t* page) {
|
||||
return page->flags.x.has_aligned;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
||||
page->flags.x.has_aligned = has_aligned;
|
||||
}
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Encoding/Decoding the free list next pointers
|
||||
|
||||
This is to protect against buffer overflow exploits where the
|
||||
free list is mutated. Many hardened allocators xor the next pointer `p`
|
||||
with a secret key `k1`, as `p^k1`. This prevents overwriting with known
|
||||
values but might be still too weak: if the attacker can guess
|
||||
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
|
||||
Moreover, if multiple blocks can be read as well, the attacker can
|
||||
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
|
||||
about the pointers (and subsequently `k1`).
|
||||
|
||||
Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<<k1)+k1`.
|
||||
Since these operations are not associative, the above approaches do not
|
||||
work so well any more even if the `p` can be guesstimated. For example,
|
||||
for the read case we can subtract two entries to discard the `+k1` term,
|
||||
but that leads to `((p1^k2)<<<k1) - ((p2^k2)<<<k1)` at best.
|
||||
We include the left-rotation since xor and addition are otherwise linear
|
||||
in the lowest bit. Finally, both keys are unique per page which reduces
|
||||
the re-use of keys by a large factor.
|
||||
|
||||
We also pass a separate `null` value to be used as `NULL` or otherwise
|
||||
`(k2<<<k1)+k1` would appear (too) often as a sentinel value.
|
||||
------------------------------------------------------------------- */
|
||||
|
||||
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
|
||||
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
|
||||
}
|
||||
|
||||
static inline bool mi_is_in_same_page(const void* p, const void* q) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
if (_mi_ptr_segment(q) != segment) return false;
|
||||
// assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
|
||||
mi_page_t* page = _mi_segment_page_of(segment, p);
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
|
||||
}
|
||||
|
||||
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
|
||||
shift %= MI_INTPTR_BITS;
|
||||
return (shift==0 ? x : ((x << shift) | (x >> (MI_INTPTR_BITS - shift))));
|
||||
}
|
||||
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
|
||||
shift %= MI_INTPTR_BITS;
|
||||
return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift))));
|
||||
}
|
||||
|
||||
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
|
||||
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
|
||||
return (p==null ? NULL : p);
|
||||
}
|
||||
|
||||
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
|
||||
uintptr_t x = (uintptr_t)(p==NULL ? null : p);
|
||||
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
|
||||
}
|
||||
|
||||
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
|
||||
mi_track_mem_defined(block,sizeof(mi_block_t));
|
||||
mi_block_t* next;
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
next = (mi_block_t*)mi_ptr_decode(null, mi_atomic_load_relaxed((_Atomic(mi_encoded_t)*)&block->next), keys);
|
||||
#else
|
||||
MI_UNUSED(keys); MI_UNUSED(null);
|
||||
next = (mi_block_t*)mi_atomic_load_relaxed((_Atomic(mi_encoded_t)*)&block->next);
|
||||
#endif
|
||||
mi_track_mem_noaccess(block,sizeof(mi_block_t));
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
|
||||
mi_track_mem_undefined(block,sizeof(mi_block_t));
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_atomic_store_relaxed(&block->next, mi_ptr_encode(null, next, keys));
|
||||
#else
|
||||
MI_UNUSED(keys); MI_UNUSED(null);
|
||||
mi_atomic_store_relaxed(&block->next, (mi_encoded_t)next);
|
||||
#endif
|
||||
mi_track_mem_noaccess(block,sizeof(mi_block_t));
|
||||
}
|
||||
|
||||
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_block_t* next = mi_block_nextx(page,block,page->keys);
|
||||
// check for free list corruption: is `next` at least in the same page?
|
||||
// TODO: check if `next` is `page->block_size` aligned?
|
||||
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
|
||||
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
|
||||
next = NULL;
|
||||
}
|
||||
return next;
|
||||
#else
|
||||
MI_UNUSED(page);
|
||||
return mi_block_nextx(page,block,NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_block_set_nextx(page,block,next, page->keys);
|
||||
#else
|
||||
MI_UNUSED(page);
|
||||
mi_block_set_nextx(page,block,next,NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// commit mask
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = ~((size_t)0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != ~((size_t)0)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// defined in `segment.c`:
|
||||
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
|
||||
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
|
||||
|
||||
#define mi_commit_mask_foreach(cm,idx,count) \
|
||||
idx = 0; \
|
||||
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
|
||||
|
||||
#define mi_commit_mask_foreach_end() \
|
||||
idx += count; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
memory id's
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
|
||||
mi_memid_t memid;
|
||||
_mi_memzero_var(memid);
|
||||
memid.memkind = memkind;
|
||||
return memid;
|
||||
}
|
||||
|
||||
static inline mi_memid_t _mi_memid_none(void) {
|
||||
return _mi_memid_create(MI_MEM_NONE);
|
||||
}
|
||||
|
||||
static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) {
|
||||
mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
|
||||
memid.initially_committed = committed;
|
||||
memid.initially_zero = is_zero;
|
||||
memid.is_pinned = is_large;
|
||||
return memid;
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Fast "random" shuffle
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
|
||||
if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
|
||||
#if (MI_INTPTR_SIZE==8)
|
||||
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
|
||||
x ^= x >> 30;
|
||||
x *= 0xbf58476d1ce4e5b9UL;
|
||||
x ^= x >> 27;
|
||||
x *= 0x94d049bb133111ebUL;
|
||||
x ^= x >> 31;
|
||||
#elif (MI_INTPTR_SIZE==4)
|
||||
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
|
||||
x ^= x >> 16;
|
||||
x *= 0x7feb352dUL;
|
||||
x ^= x >> 15;
|
||||
x *= 0x846ca68bUL;
|
||||
x ^= x >> 16;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Optimize numa node access for the common case (= one node)
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
int _mi_os_numa_node_get(mi_os_tld_t* tld);
|
||||
size_t _mi_os_numa_node_count_get(void);
|
||||
|
||||
extern _Atomic(size_t) _mi_numa_node_count;
|
||||
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
|
||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
||||
else return _mi_os_numa_node_get(tld);
|
||||
}
|
||||
static inline size_t _mi_os_numa_node_count(void) {
|
||||
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
|
||||
if mi_likely(count > 0) { return count; }
|
||||
else return _mi_os_numa_node_count_get();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__)
|
||||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_clzl(x);
|
||||
#else
|
||||
return __builtin_clzll(x);
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_ctzl(x);
|
||||
#else
|
||||
return __builtin_ctzll(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#include <intrin.h> // BitScanReverse64
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanReverse(&idx, x);
|
||||
#else
|
||||
_BitScanReverse64(&idx, x);
|
||||
#endif
|
||||
return ((MI_INTPTR_BITS - 1) - idx);
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanForward(&idx, x);
|
||||
#else
|
||||
_BitScanForward64(&idx, x);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline size_t mi_ctz32(uint32_t x) {
|
||||
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
|
||||
static const unsigned char debruijn[32] = {
|
||||
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
|
||||
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
|
||||
};
|
||||
if (x==0) return 32;
|
||||
return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
|
||||
}
|
||||
static inline size_t mi_clz32(uint32_t x) {
|
||||
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
|
||||
static const uint8_t debruijn[32] = {
|
||||
31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
|
||||
23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0
|
||||
};
|
||||
if (x==0) return 32;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
|
||||
}
|
||||
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (MI_INTPTR_BITS <= 32)
|
||||
return mi_clz32((uint32_t)x);
|
||||
#else
|
||||
size_t count = mi_clz32((uint32_t)(x >> 32));
|
||||
if (count < 32) return count;
|
||||
return (32 + mi_clz32((uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (MI_INTPTR_BITS <= 32)
|
||||
return mi_ctz32((uint32_t)x);
|
||||
#else
|
||||
size_t count = mi_ctz32((uint32_t)x);
|
||||
if (count < 32) return count;
|
||||
return (32 + mi_ctz32((uint32_t)(x>>32)));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
|
||||
static inline size_t mi_bsr(uintptr_t x) {
|
||||
return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x));
|
||||
}
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------------------
|
||||
// Provide our own `_mi_memcpy` for potential performance optimizations.
|
||||
//
|
||||
// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
|
||||
// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
|
||||
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
|
||||
// ---------------------------------------------------------------------------------
|
||||
|
||||
#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
|
||||
#include <intrin.h>
|
||||
extern bool _mi_cpu_has_fsrm;
|
||||
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
|
||||
if (_mi_cpu_has_fsrm) {
|
||||
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
|
||||
}
|
||||
else {
|
||||
memcpy(dst, src, n);
|
||||
}
|
||||
}
|
||||
static inline void _mi_memzero(void* dst, size_t n) {
|
||||
if (_mi_cpu_has_fsrm) {
|
||||
__stosb((unsigned char*)dst, 0, n);
|
||||
}
|
||||
else {
|
||||
memset(dst, 0, n);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
|
||||
memcpy(dst, src, n);
|
||||
}
|
||||
static inline void _mi_memzero(void* dst, size_t n) {
|
||||
memset(dst, 0, n);
|
||||
}
|
||||
#endif
|
||||
|
||||
// -------------------------------------------------------------------------------
|
||||
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
|
||||
// This is used for example in `mi_realloc`.
|
||||
// -------------------------------------------------------------------------------
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
// On GCC/CLang we provide a hint that the pointers are word aligned.
|
||||
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
|
||||
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
|
||||
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
|
||||
const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
|
||||
_mi_memcpy(adst, asrc, n);
|
||||
}
|
||||
|
||||
static inline void _mi_memzero_aligned(void* dst, size_t n) {
|
||||
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
|
||||
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
|
||||
_mi_memzero(adst, n);
|
||||
}
|
||||
#else
|
||||
// Default fallback on `_mi_memcpy`
|
||||
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
|
||||
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
|
||||
_mi_memcpy(dst, src, n);
|
||||
}
|
||||
|
||||
static inline void _mi_memzero_aligned(void* dst, size_t n) {
|
||||
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
|
||||
_mi_memzero(dst, n);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
329
extern/include/python/internal/mimalloc/mimalloc/prim.h
vendored
Normal file
329
extern/include/python/internal/mimalloc/mimalloc/prim.h
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_PRIM_H
|
||||
#define MIMALLOC_PRIM_H
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file specifies the primitive portability API.
|
||||
// Each OS/host needs to implement these primitives, see `src/prim`
|
||||
// for implementations on Window, macOS, WASI, and Linux/Unix.
|
||||
//
|
||||
// note: on all primitive functions, we always have result parameters != NUL, and:
|
||||
// addr != NULL and page aligned
|
||||
// size > 0 and page aligned
|
||||
// return value is an error code an int where 0 is success.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// OS memory configuration
|
||||
typedef struct mi_os_mem_config_s {
|
||||
size_t page_size; // 4KiB
|
||||
size_t large_page_size; // 2MiB
|
||||
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
|
||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||
bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
|
||||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
} mi_os_mem_config_t;
|
||||
|
||||
// Initialize
|
||||
void _mi_prim_mem_init( mi_os_mem_config_t* config );
|
||||
|
||||
// Free OS memory
|
||||
int _mi_prim_free(void* addr, size_t size );
|
||||
|
||||
// Allocate OS memory. Return NULL on error.
|
||||
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
|
||||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
|
||||
// which will later be committed explicitly using `_mi_prim_commit`.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: !commit => !allow_large
|
||||
// try_alignment >= _mi_os_page_size() and a power of 2
|
||||
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
|
||||
|
||||
// Commit memory. Returns error code or 0 on success.
|
||||
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
|
||||
// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
|
||||
int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
|
||||
|
||||
// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
|
||||
// if the memory would need to be re-committed. For example, on Windows this is always true,
|
||||
// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
|
||||
// pre: needs_recommit != NULL
|
||||
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
|
||||
|
||||
// Reset memory. The range keeps being accessible but the content might be reset.
|
||||
// Returns error code or 0 on success.
|
||||
int _mi_prim_reset(void* addr, size_t size);
|
||||
|
||||
// Protect memory. Returns error code or 0 on success.
|
||||
int _mi_prim_protect(void* addr, size_t size, bool protect);
|
||||
|
||||
// Allocate huge (1GiB) pages possibly associated with a NUMA node.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: size > 0 and a multiple of 1GiB.
|
||||
// numa_node is either negative (don't care), or a numa node number.
|
||||
int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
|
||||
|
||||
// Return the current NUMA node
|
||||
size_t _mi_prim_numa_node(void);
|
||||
|
||||
// Return the number of logical NUMA nodes
|
||||
size_t _mi_prim_numa_node_count(void);
|
||||
|
||||
// Clock ticks
|
||||
mi_msecs_t _mi_prim_clock_now(void);
|
||||
|
||||
// Return process information (only for statistics)
|
||||
typedef struct mi_process_info_s {
|
||||
mi_msecs_t elapsed;
|
||||
mi_msecs_t utime;
|
||||
mi_msecs_t stime;
|
||||
size_t current_rss;
|
||||
size_t peak_rss;
|
||||
size_t current_commit;
|
||||
size_t peak_commit;
|
||||
size_t page_faults;
|
||||
} mi_process_info_t;
|
||||
|
||||
void _mi_prim_process_info(mi_process_info_t* pinfo);
|
||||
|
||||
// Default stderr output. (only for warnings etc. with verbose enabled)
|
||||
// msg != NULL && _mi_strlen(msg) > 0
|
||||
void _mi_prim_out_stderr( const char* msg );
|
||||
|
||||
// Get an environment variable. (only for options)
|
||||
// name != NULL, result != NULL, result_size >= 64
|
||||
bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
|
||||
|
||||
|
||||
// Fill a buffer with strong randomness; return `false` on error or if
|
||||
// there is no strong randomization available.
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len);
|
||||
|
||||
// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
|
||||
void _mi_prim_thread_init_auto_done(void);
|
||||
|
||||
// Called on process exit and may take action to clean up resources associated with the thread auto done.
|
||||
void _mi_prim_thread_done_auto_done(void);
|
||||
|
||||
// Called when the default heap for a thread changes
|
||||
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
|
||||
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Thread id: `_mi_prim_thread_id()`
|
||||
//
|
||||
// Getting the thread id should be performant as it is called in the
|
||||
// fast path of `_mi_free` and we specialize for various platforms as
|
||||
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
|
||||
// We only require _mi_prim_thread_id() to return a unique id
|
||||
// for each thread (unequal to zero).
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// defined in `init.c`; do not use these directly
|
||||
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
|
||||
extern bool _mi_process_is_initialized; // has mi_process_init been called?
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
|
||||
|
||||
#ifdef MI_PRIM_THREAD_ID
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
return MI_PRIM_THREAD_ID();
|
||||
}
|
||||
|
||||
#elif defined(_WIN32)
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
// Windows: works on Intel and ARM in both 32- and 64-bit
|
||||
return (uintptr_t)NtCurrentTeb();
|
||||
}
|
||||
|
||||
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
|
||||
// both the OS and libc implementation so we use specific tests for each main platform.
|
||||
// If you test on another platform and it works please send a PR :-)
|
||||
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
|
||||
#elif defined(__GNUC__) && ( \
|
||||
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|
||||
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
|
||||
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|
||||
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
)
|
||||
|
||||
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
|
||||
void* res;
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
res = tcb[slot];
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
res = tcb[slot];
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
// setting a tls slot is only used on macOS for now
|
||||
static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
tcb[slot] = value;
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
tcb[slot] = value;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
#if defined(__BIONIC__)
|
||||
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
|
||||
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
|
||||
return (uintptr_t)mi_prim_tls_slot(1);
|
||||
#else
|
||||
// in all our other targets, slot 0 is the thread id
|
||||
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
|
||||
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
|
||||
return (uintptr_t)mi_prim_tls_slot(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
return (uintptr_t)&_mi_heap_default;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------------------
|
||||
The thread local default heap: `_mi_prim_get_default_heap()`
|
||||
This is inlined here as it is on the fast path for allocation functions.
|
||||
|
||||
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
|
||||
__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
|
||||
that the storage will always be available (allocated on the thread stacks).
|
||||
|
||||
On some platforms though we cannot use that when overriding `malloc` since the underlying
|
||||
TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
|
||||
We try to circumvent this in an efficient way:
|
||||
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
|
||||
loader itself calls `malloc` even before the modules are initialized.
|
||||
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
|
||||
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void);
|
||||
|
||||
#if defined(MI_MALLOC_OVERRIDE)
|
||||
#if defined(__APPLE__) // macOS
|
||||
#define MI_TLS_SLOT 89 // seems unused?
|
||||
// #define MI_TLS_RECURSE_GUARD 1
|
||||
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
|
||||
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
|
||||
#elif defined(__OpenBSD__)
|
||||
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
|
||||
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
|
||||
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
|
||||
// #elif defined(__DragonFly__)
|
||||
// #warning "mimalloc is not working correctly on DragonFly yet."
|
||||
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
|
||||
#elif defined(__ANDROID__)
|
||||
// See issue #381
|
||||
#define MI_TLS_PTHREAD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MI_TLS_SLOT)
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
|
||||
if mi_unlikely(heap == NULL) {
|
||||
#ifdef __GNUC__
|
||||
__asm(""); // prevent conditional load of the address of _mi_heap_empty
|
||||
#endif
|
||||
heap = (mi_heap_t*)&_mi_heap_empty;
|
||||
}
|
||||
return heap;
|
||||
}
|
||||
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
|
||||
static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
|
||||
pthread_t self = pthread_self();
|
||||
#if defined(__DragonFly__)
|
||||
if (self==NULL) return NULL;
|
||||
#endif
|
||||
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
|
||||
}
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
|
||||
if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
|
||||
mi_heap_t* heap = *pheap;
|
||||
if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
|
||||
return heap;
|
||||
}
|
||||
|
||||
#elif defined(MI_TLS_PTHREAD)
|
||||
|
||||
extern pthread_key_t _mi_heap_default_key;
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
|
||||
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
|
||||
}
|
||||
|
||||
#else // default using a thread local variable; used on most platforms.
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
#if defined(MI_TLS_RECURSE_GUARD)
|
||||
if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
|
||||
#endif
|
||||
return _mi_heap_default;
|
||||
}
|
||||
|
||||
#endif // mi_prim_get_default_heap()
|
||||
|
||||
|
||||
|
||||
#endif // MIMALLOC_PRIM_H
|
||||
147
extern/include/python/internal/mimalloc/mimalloc/track.h
vendored
Normal file
147
extern/include/python/internal/mimalloc/mimalloc/track.h
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_TRACK_H
|
||||
#define MIMALLOC_TRACK_H
|
||||
|
||||
/* ------------------------------------------------------------------------------------------------------
|
||||
Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
|
||||
These can be defined for tracking allocation:
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero)
|
||||
#define mi_track_free_size(p,_size)
|
||||
|
||||
The macros are set up such that the size passed to `mi_track_free_size`
|
||||
always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
|
||||
The `reqsize` is what the user requested, and `size >= reqsize`.
|
||||
The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
|
||||
or otherwise it is the usable block size which may be larger than the original request.
|
||||
Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
|
||||
The `zero` parameter is `true` if the allocated block is zero initialized.
|
||||
|
||||
Optional:
|
||||
|
||||
#define mi_track_align(p,alignedp,offset,size)
|
||||
#define mi_track_resize(p,oldsize,newsize)
|
||||
#define mi_track_init()
|
||||
|
||||
The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
|
||||
The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
|
||||
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
|
||||
`mi_track_init` is called at program start.
|
||||
|
||||
The following macros are for tools like asan and valgrind to track whether memory is
|
||||
defined, undefined, or not accessible at all:
|
||||
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
#define mi_track_mem_noaccess(p,size)
|
||||
|
||||
-------------------------------------------------------------------------------------------------------*/
|
||||
|
||||
#if MI_TRACK_VALGRIND
|
||||
// valgrind tool
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
|
||||
#define MI_TRACK_TOOL "valgrind"
|
||||
|
||||
#include <valgrind/valgrind.h>
|
||||
#include <valgrind/memcheck.h>
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
|
||||
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
|
||||
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
|
||||
|
||||
#elif MI_TRACK_ASAN
|
||||
// address sanitizer
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 0
|
||||
#define MI_TRACK_TOOL "asan"
|
||||
|
||||
#include <sanitizer/asan_interface.h>
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
|
||||
#elif MI_TRACK_ETW
|
||||
// windows event tracing
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 1
|
||||
#define MI_TRACK_TOOL "ETW"
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include "../src/prim/windows/etw.h"
|
||||
|
||||
#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
|
||||
#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
|
||||
|
||||
#else
|
||||
// no tracking
|
||||
|
||||
#define MI_TRACK_ENABLED 0
|
||||
#define MI_TRACK_HEAP_DESTROY 0
|
||||
#define MI_TRACK_TOOL "none"
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero)
|
||||
#define mi_track_free_size(p,_size)
|
||||
|
||||
#endif
|
||||
|
||||
// -------------------
|
||||
// Utility definitions
|
||||
|
||||
#ifndef mi_track_resize
|
||||
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_align
|
||||
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_init
|
||||
#define mi_track_init()
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_defined
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_undefined
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_noaccess
|
||||
#define mi_track_mem_noaccess(p,size)
|
||||
#endif
|
||||
|
||||
|
||||
#if MI_PADDING
|
||||
#define mi_track_malloc(p,reqsize,zero) \
|
||||
if ((p)!=NULL) { \
|
||||
mi_assert_internal(mi_usable_size(p)==(reqsize)); \
|
||||
mi_track_malloc_size(p,reqsize,reqsize,zero); \
|
||||
}
|
||||
#else
|
||||
#define mi_track_malloc(p,reqsize,zero) \
|
||||
if ((p)!=NULL) { \
|
||||
mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
|
||||
mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
721
extern/include/python/internal/mimalloc/mimalloc/types.h
vendored
Normal file
721
extern/include/python/internal/mimalloc/mimalloc/types.h
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_TYPES_H
|
||||
#define MIMALLOC_TYPES_H
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file contains the main type definitions for mimalloc:
|
||||
// mi_heap_t : all data for a thread-local heap, contains
|
||||
// lists of all managed heap pages.
|
||||
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
|
||||
// are allocated.
|
||||
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
|
||||
// where objects are allocated.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
|
||||
#include <stddef.h> // ptrdiff_t
|
||||
#include <stdint.h> // uintptr_t, uint16_t, etc
|
||||
#include "atomic.h" // _Atomic
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable:4214) // bitfield is not int
|
||||
#endif
|
||||
|
||||
// Minimal alignment necessary. On most platforms 16 bytes are needed
|
||||
// due to SSE registers for example. This must be at least `sizeof(void*)`
|
||||
#ifndef MI_MAX_ALIGN_SIZE
|
||||
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
|
||||
#endif
|
||||
|
||||
#define MI_CACHE_LINE 64
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
|
||||
#pragma warning(disable:26812) // unscoped enum warning
|
||||
#define mi_decl_noinline __declspec(noinline)
|
||||
#define mi_decl_thread __declspec(thread)
|
||||
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
|
||||
#define mi_decl_noinline __attribute__((noinline))
|
||||
#define mi_decl_thread __thread
|
||||
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
|
||||
#else
|
||||
#define mi_decl_noinline
|
||||
#define mi_decl_thread __thread // hope for the best :-)
|
||||
#define mi_decl_cache_align
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Variants
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Define NDEBUG in the release version to disable assertions.
|
||||
// #define NDEBUG
|
||||
|
||||
// Define MI_TRACK_<tool> to enable tracking support
|
||||
// #define MI_TRACK_VALGRIND 1
|
||||
// #define MI_TRACK_ASAN 1
|
||||
// #define MI_TRACK_ETW 1
|
||||
|
||||
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
|
||||
// #define MI_STAT 1
|
||||
|
||||
// Define MI_SECURE to enable security mitigations
|
||||
// #define MI_SECURE 1 // guard page around metadata
|
||||
// #define MI_SECURE 2 // guard page around each mimalloc page
|
||||
// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
|
||||
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
|
||||
|
||||
#if !defined(MI_SECURE)
|
||||
#define MI_SECURE 0
|
||||
#endif
|
||||
|
||||
// Define MI_DEBUG for debug mode
|
||||
// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
|
||||
// #define MI_DEBUG 2 // + internal assertion checks
|
||||
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
|
||||
#if !defined(MI_DEBUG)
|
||||
#if !defined(NDEBUG) || defined(_DEBUG)
|
||||
#define MI_DEBUG 2
|
||||
#else
|
||||
#define MI_DEBUG 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
|
||||
// The padding can detect buffer overflow on free.
|
||||
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
|
||||
#define MI_PADDING 1
|
||||
#endif
|
||||
|
||||
// Check padding bytes; allows byte-precise buffer overflow detection
|
||||
#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_PADDING_CHECK 1
|
||||
#endif
|
||||
|
||||
|
||||
// Encoded free lists allow detection of corrupted free lists
|
||||
// and can detect buffer overflows, modify after free, and double `free`s.
|
||||
#if (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_ENCODE_FREELIST 1
|
||||
#endif
|
||||
|
||||
|
||||
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
|
||||
// but that makes it not possible to visit them during a heap walk or include them in a
|
||||
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
|
||||
// another thread so most memory is available until it gets properly freed by the owning thread.
|
||||
// #define MI_HUGE_PAGE_ABANDON 1
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Platform specific values
|
||||
// ------------------------------------------------------
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Size of a pointer.
|
||||
// We assume that `sizeof(void*)==sizeof(intptr_t)`
|
||||
// and it holds for all platforms we know of.
|
||||
//
|
||||
// However, the C standard only requires that:
|
||||
// p == (void*)((intptr_t)p))
|
||||
// but we also need:
|
||||
// i == (intptr_t)((void*)i)
|
||||
// or otherwise one might define an intptr_t type that is larger than a pointer...
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if INTPTR_MAX > INT64_MAX
|
||||
# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
|
||||
#elif INTPTR_MAX == INT64_MAX
|
||||
# define MI_INTPTR_SHIFT (3)
|
||||
#elif INTPTR_MAX == INT32_MAX
|
||||
# define MI_INTPTR_SHIFT (2)
|
||||
#else
|
||||
#error platform pointers must be 32, 64, or 128 bits
|
||||
#endif
|
||||
|
||||
#if SIZE_MAX == UINT64_MAX
|
||||
# define MI_SIZE_SHIFT (3)
|
||||
typedef int64_t mi_ssize_t;
|
||||
#elif SIZE_MAX == UINT32_MAX
|
||||
# define MI_SIZE_SHIFT (2)
|
||||
typedef int32_t mi_ssize_t;
|
||||
#else
|
||||
#error platform objects must be 32 or 64 bits
|
||||
#endif
|
||||
|
||||
#if (SIZE_MAX/2) > LONG_MAX
|
||||
# define MI_ZU(x) x##ULL
|
||||
# define MI_ZI(x) x##LL
|
||||
#else
|
||||
# define MI_ZU(x) x##UL
|
||||
# define MI_ZI(x) x##L
|
||||
#endif
|
||||
|
||||
#define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
|
||||
#define MI_INTPTR_BITS (MI_INTPTR_SIZE*8)
|
||||
|
||||
#define MI_SIZE_SIZE (1<<MI_SIZE_SHIFT)
|
||||
#define MI_SIZE_BITS (MI_SIZE_SIZE*8)
|
||||
|
||||
#define MI_KiB (MI_ZU(1024))
|
||||
#define MI_MiB (MI_KiB*MI_KiB)
|
||||
#define MI_GiB (MI_MiB*MI_KiB)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Main internal data-structures
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Main tuning parameters for segment and page sizes
|
||||
// Sizes for 64-bit (usually divide by two for 32-bit)
|
||||
#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
|
||||
|
||||
#if MI_INTPTR_SIZE > 4
|
||||
#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
|
||||
#else
|
||||
#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
|
||||
#endif
|
||||
|
||||
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
|
||||
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
|
||||
|
||||
|
||||
// Derived constants
|
||||
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
|
||||
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
|
||||
#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
|
||||
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
|
||||
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
|
||||
|
||||
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
|
||||
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
|
||||
|
||||
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
|
||||
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
|
||||
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
|
||||
#define MI_BIN_HUGE (73U)
|
||||
|
||||
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
|
||||
#error "mimalloc internal: define more bins"
|
||||
#endif
|
||||
|
||||
// Maximum slice offset (15)
|
||||
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||
|
||||
// Used as a special value to encode block sizes in 32 bits.
|
||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
||||
|
||||
// blocks up to this size are always allocated aligned
|
||||
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
|
||||
|
||||
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc pages contain allocated blocks
|
||||
// ------------------------------------------------------
|
||||
|
||||
// The free lists use encoded next fields
|
||||
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
|
||||
typedef uintptr_t mi_encoded_t;
|
||||
|
||||
// thread id's
|
||||
typedef size_t mi_threadid_t;
|
||||
|
||||
// free lists contain blocks
|
||||
typedef struct mi_block_s {
|
||||
_Atomic(mi_encoded_t) next;
|
||||
} mi_block_t;
|
||||
|
||||
|
||||
// The delayed flags are used for efficient multi-threaded free-ing
|
||||
typedef enum mi_delayed_e {
|
||||
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
|
||||
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
|
||||
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
|
||||
MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim
|
||||
} mi_delayed_t;
|
||||
|
||||
|
||||
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
|
||||
// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
|
||||
#if !MI_TSAN
|
||||
typedef union mi_page_flags_s {
|
||||
uint8_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full : 1;
|
||||
uint8_t has_aligned : 1;
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#else
|
||||
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
|
||||
typedef union mi_page_flags_s {
|
||||
uint16_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full;
|
||||
uint8_t has_aligned;
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#endif
|
||||
|
||||
// Thread free list.
|
||||
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
|
||||
typedef uintptr_t mi_thread_free_t;
|
||||
|
||||
// A page contains blocks of one specific size (`block_size`).
|
||||
// Each page has three list of free blocks:
|
||||
// `free` for blocks that can be allocated,
|
||||
// `local_free` for freed blocks that are not yet available to `mi_malloc`
|
||||
// `thread_free` for freed blocks by other threads
|
||||
// The `local_free` and `thread_free` lists are migrated to the `free` list
|
||||
// when it is exhausted. The separate `local_free` list is necessary to
|
||||
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
||||
// avoiding atomic operations in the common case.
|
||||
//
|
||||
//
|
||||
// `used - |thread_free|` == actual blocks that are in use (alive)
|
||||
// `used - |thread_free| + |free| + |local_free| == capacity`
|
||||
//
|
||||
// We don't count `freed` (as |free|) but use `used` to reduce
|
||||
// the number of memory accesses in the `mi_page_all_free` function(s).
|
||||
//
|
||||
// Notes:
|
||||
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
|
||||
// - Using `uint16_t` does not seem to slow things down
|
||||
// - The size is 8 words on 64-bit which helps the page index calculations
|
||||
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
|
||||
// and 12 are still good for address calculation)
|
||||
// - To limit the structure size, the `xblock_size` is 32-bits only; for
|
||||
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
|
||||
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||
// concurrent frees where only the first concurrent free adds to the owning
|
||||
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
|
||||
// The invariant is that no-delayed-free is only set if there is
|
||||
// at least one block that will be added, or as already been added, to
|
||||
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
||||
// will be freed correctly even if only other threads free blocks.
|
||||
typedef struct mi_page_s {
|
||||
// "owned" by the segment
|
||||
uint32_t slice_count; // slices in this page (0 if not a page)
|
||||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
|
||||
uint8_t use_qsbr : 1; // delay page freeing using qsbr
|
||||
uint8_t tag : 4; // tag from the owning heap
|
||||
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
|
||||
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire : 7; // expiration count for retired blocks
|
||||
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
#endif
|
||||
|
||||
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
|
||||
_Atomic(uintptr_t) xheap;
|
||||
|
||||
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
struct llist_node qsbr_node;
|
||||
uint64_t qsbr_goal;
|
||||
#endif
|
||||
|
||||
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
|
||||
#if MI_INTPTR_SIZE==8 && !defined(Py_GIL_DISABLED)
|
||||
uintptr_t padding[1];
|
||||
#endif
|
||||
} mi_page_t;
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc segments contain mimalloc pages
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef enum mi_page_kind_e {
|
||||
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
|
||||
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
|
||||
MI_PAGE_LARGE, // larger blocks go into a page of just one block
|
||||
MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
|
||||
} mi_page_kind_t;
|
||||
|
||||
typedef enum mi_segment_kind_e {
|
||||
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
|
||||
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
|
||||
} mi_segment_kind_t;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// A segment holds a commit mask where a bit is set if
|
||||
// the corresponding MI_COMMIT_SIZE area is committed.
|
||||
// The MI_COMMIT_SIZE must be a multiple of the slice
|
||||
// size. If it is equal we have the most fine grained
|
||||
// decommit (but setting it higher can be more efficient).
|
||||
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
|
||||
// be committed in one go which can be set higher than
|
||||
// MI_COMMIT_SIZE for efficiency (while the decommit mask
|
||||
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
|
||||
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
|
||||
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
|
||||
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
|
||||
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
|
||||
|
||||
#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
|
||||
#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
|
||||
#endif
|
||||
|
||||
typedef struct mi_commit_mask_s {
|
||||
size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
|
||||
} mi_commit_mask_t;
|
||||
|
||||
typedef mi_page_t mi_slice_t;
|
||||
typedef int64_t mi_msecs_t;
|
||||
|
||||
|
||||
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
||||
typedef enum mi_memkind_e {
|
||||
MI_MEM_NONE, // not allocated
|
||||
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
|
||||
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
|
||||
MI_MEM_OS, // allocated from the OS
|
||||
MI_MEM_OS_HUGE, // allocated as huge os pages
|
||||
MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
|
||||
MI_MEM_ARENA // allocated from an arena (the usual case)
|
||||
} mi_memkind_t;
|
||||
|
||||
static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
|
||||
return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
|
||||
}
|
||||
|
||||
typedef struct mi_memid_os_info {
|
||||
void* base; // actual base address of the block (used for offset aligned allocations)
|
||||
size_t alignment; // alignment at allocation
|
||||
} mi_memid_os_info_t;
|
||||
|
||||
typedef struct mi_memid_arena_info {
|
||||
size_t block_index; // index in the arena
|
||||
mi_arena_id_t id; // arena id (>= 1)
|
||||
bool is_exclusive; // the arena can only be used for specific arena allocations
|
||||
} mi_memid_arena_info_t;
|
||||
|
||||
typedef struct mi_memid_s {
|
||||
union {
|
||||
mi_memid_os_info_t os; // only used for MI_MEM_OS
|
||||
mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
|
||||
} mem;
|
||||
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
|
||||
bool initially_committed;// `true` if the memory was originally allocated as committed
|
||||
bool initially_zero; // `true` if the memory was originally zero initialized
|
||||
mi_memkind_t memkind;
|
||||
} mi_memid_t;
|
||||
|
||||
|
||||
// Segments are large allocated memory blocks (8mb on 64 bit) from
|
||||
// the OS. Inside segments we allocated fixed size _pages_ that
|
||||
// contain blocks.
|
||||
typedef struct mi_segment_s {
|
||||
// constant fields
|
||||
mi_memid_t memid; // memory id for arena allocation
|
||||
bool allow_decommit;
|
||||
bool allow_purge;
|
||||
size_t segment_size;
|
||||
|
||||
// segment fields
|
||||
mi_msecs_t purge_expire;
|
||||
mi_commit_mask_t purge_mask;
|
||||
mi_commit_mask_t commit_mask;
|
||||
|
||||
_Atomic(struct mi_segment_s*) abandoned_next;
|
||||
|
||||
// from here is zero initialized
|
||||
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
|
||||
|
||||
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
||||
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
|
||||
size_t used; // count of pages in use
|
||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
||||
|
||||
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
|
||||
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
|
||||
|
||||
// layout like this to optimize access in `mi_free`
|
||||
mi_segment_kind_t kind;
|
||||
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
|
||||
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
|
||||
|
||||
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
|
||||
} mi_segment_t;
|
||||
|
||||
typedef uintptr_t mi_tagged_segment_t;
|
||||
|
||||
// Segments unowned by any thread are put in a shared pool
|
||||
typedef struct mi_abandoned_pool_s {
|
||||
// This is a list of visited abandoned pages that were full at the time.
|
||||
// this list migrates to `abandoned` when that becomes NULL. The use of
|
||||
// this list reduces contention and the rate at which segments are visited.
|
||||
mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL
|
||||
|
||||
// The abandoned page list (tagged as it supports pop)
|
||||
mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
|
||||
|
||||
// Maintain these for debug purposes (these counts may be a bit off)
|
||||
mi_decl_cache_align _Atomic(size_t) abandoned_count;
|
||||
mi_decl_cache_align _Atomic(size_t) abandoned_visited_count;
|
||||
|
||||
// We also maintain a count of current readers of the abandoned list
|
||||
// in order to prevent resetting/decommitting segment memory if it might
|
||||
// still be read.
|
||||
mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0
|
||||
} mi_abandoned_pool_t;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Heaps
|
||||
// Provide first-class heaps to allocate from.
|
||||
// A heap just owns a set of pages for allocation and
|
||||
// can only be allocate/reallocate from the thread that created it.
|
||||
// Freeing blocks can be done from any thread though.
|
||||
// Per thread, the segments are shared among its heaps.
|
||||
// Per thread, there is always a default heap that is
|
||||
// used for allocation; it is initialized to statically
|
||||
// point to an empty heap to avoid initialization checks
|
||||
// in the fast path.
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Thread local data
|
||||
typedef struct mi_tld_s mi_tld_t;
|
||||
|
||||
// Pages of a certain block size are held in a queue.
|
||||
typedef struct mi_page_queue_s {
|
||||
mi_page_t* first;
|
||||
mi_page_t* last;
|
||||
size_t block_size;
|
||||
} mi_page_queue_t;
|
||||
|
||||
#define MI_BIN_FULL (MI_BIN_HUGE+1)
|
||||
|
||||
// Random context
|
||||
typedef struct mi_random_cxt_s {
|
||||
uint32_t input[16];
|
||||
uint32_t output[16];
|
||||
int output_available;
|
||||
bool weak;
|
||||
} mi_random_ctx_t;
|
||||
|
||||
|
||||
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
|
||||
#if (MI_PADDING)
|
||||
typedef struct mi_padding_s {
|
||||
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
|
||||
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
|
||||
} mi_padding_t;
|
||||
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
|
||||
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
|
||||
#else
|
||||
#define MI_PADDING_SIZE 0
|
||||
#define MI_PADDING_WSIZE 0
|
||||
#endif
|
||||
|
||||
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
|
||||
|
||||
|
||||
// A heap owns a set of pages.
|
||||
struct mi_heap_s {
|
||||
mi_tld_t* tld;
|
||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
_Atomic(mi_block_t*) thread_delayed_free;
|
||||
mi_threadid_t thread_id; // thread this heap belongs too
|
||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
||||
mi_random_ctx_t random; // random number context used for secure allocation
|
||||
size_t page_count; // total number of pages in the `pages` queues.
|
||||
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
|
||||
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||
mi_heap_t* next; // list of heaps per thread
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
uint8_t tag; // custom identifier for this heap
|
||||
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
|
||||
bool page_use_qsbr; // should freeing pages be delayed using QSBR
|
||||
};
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Debug
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if !defined(MI_DEBUG_UNINIT)
|
||||
#define MI_DEBUG_UNINIT (0xD0)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_FREED)
|
||||
#define MI_DEBUG_FREED (0xDF)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_PADDING)
|
||||
#define MI_DEBUG_PADDING (0xDE)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG)
|
||||
// use our own assertion to print without memory allocation
|
||||
void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
|
||||
#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
|
||||
#else
|
||||
#define mi_assert(x)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
#define mi_assert_internal mi_assert
|
||||
#else
|
||||
#define mi_assert_internal(x)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>2)
|
||||
#define mi_assert_expensive mi_assert
|
||||
#else
|
||||
#define mi_assert_expensive(x)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Statistics
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifndef MI_STAT
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_STAT 2
|
||||
#else
|
||||
#define MI_STAT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef struct mi_stat_count_s {
|
||||
int64_t allocated;
|
||||
int64_t freed;
|
||||
int64_t peak;
|
||||
int64_t current;
|
||||
} mi_stat_count_t;
|
||||
|
||||
typedef struct mi_stat_counter_s {
|
||||
int64_t total;
|
||||
int64_t count;
|
||||
} mi_stat_counter_t;
|
||||
|
||||
typedef struct mi_stats_s {
|
||||
mi_stat_count_t segments;
|
||||
mi_stat_count_t pages;
|
||||
mi_stat_count_t reserved;
|
||||
mi_stat_count_t committed;
|
||||
mi_stat_count_t reset;
|
||||
mi_stat_count_t purged;
|
||||
mi_stat_count_t page_committed;
|
||||
mi_stat_count_t segments_abandoned;
|
||||
mi_stat_count_t pages_abandoned;
|
||||
mi_stat_count_t threads;
|
||||
mi_stat_count_t normal;
|
||||
mi_stat_count_t huge;
|
||||
mi_stat_count_t large;
|
||||
mi_stat_count_t malloc;
|
||||
mi_stat_count_t segments_cache;
|
||||
mi_stat_counter_t pages_extended;
|
||||
mi_stat_counter_t mmap_calls;
|
||||
mi_stat_counter_t commit_calls;
|
||||
mi_stat_counter_t reset_calls;
|
||||
mi_stat_counter_t purge_calls;
|
||||
mi_stat_counter_t page_no_retire;
|
||||
mi_stat_counter_t searches;
|
||||
mi_stat_counter_t normal_count;
|
||||
mi_stat_counter_t huge_count;
|
||||
mi_stat_counter_t large_count;
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
|
||||
#endif
|
||||
} mi_stats_t;
|
||||
|
||||
|
||||
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||
|
||||
#if (MI_STAT)
|
||||
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
|
||||
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
|
||||
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
|
||||
#else
|
||||
#define mi_stat_increase(stat,amount) (void)0
|
||||
#define mi_stat_decrease(stat,amount) (void)0
|
||||
#define mi_stat_counter_increase(stat,amount) (void)0
|
||||
#endif
|
||||
|
||||
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Thread Local data
|
||||
// ------------------------------------------------------
|
||||
|
||||
// A "span" is an available range of slices. The span queues keep
|
||||
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
|
||||
typedef struct mi_span_queue_s {
|
||||
mi_slice_t* first;
|
||||
mi_slice_t* last;
|
||||
size_t slice_count;
|
||||
} mi_span_queue_t;
|
||||
|
||||
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
|
||||
|
||||
// OS thread local data
|
||||
typedef struct mi_os_tld_s {
|
||||
size_t region_idx; // start point for next allocation
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
} mi_os_tld_t;
|
||||
|
||||
|
||||
// Segments thread local data
|
||||
typedef struct mi_segments_tld_s {
|
||||
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
|
||||
size_t count; // current number of segments;
|
||||
size_t peak_count; // peak number of segments
|
||||
size_t current_size; // current size of all segments
|
||||
size_t peak_size; // peak size of all segments
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
mi_os_tld_t* os; // points to os stats
|
||||
mi_abandoned_pool_t* abandoned; // pool of abandoned segments
|
||||
} mi_segments_tld_t;
|
||||
|
||||
// Thread local data
|
||||
struct mi_tld_s {
|
||||
unsigned long long heartbeat; // monotonic heartbeat count
|
||||
bool recurse; // true if deferred was called; used to prevent infinite recursion.
|
||||
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
|
||||
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
|
||||
mi_segments_tld_t segments; // segment tld
|
||||
mi_os_tld_t os; // os tld
|
||||
mi_stats_t stats; // statistics
|
||||
};
|
||||
|
||||
#endif
|
||||
61
extern/include/python/internal/pycore_abstract.h
vendored
Normal file
61
extern/include/python/internal/pycore_abstract.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
#ifndef Py_INTERNAL_ABSTRACT_H
|
||||
#define Py_INTERNAL_ABSTRACT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Fast inlined version of PyIndex_Check()
|
||||
static inline int
|
||||
_PyIndex_Check(PyObject *obj)
|
||||
{
|
||||
PyNumberMethods *tp_as_number = Py_TYPE(obj)->tp_as_number;
|
||||
return (tp_as_number != NULL && tp_as_number->nb_index != NULL);
|
||||
}
|
||||
|
||||
PyObject *_PyNumber_PowerNoMod(PyObject *lhs, PyObject *rhs);
|
||||
PyObject *_PyNumber_InPlacePowerNoMod(PyObject *lhs, PyObject *rhs);
|
||||
|
||||
extern int _PyObject_HasLen(PyObject *o);
|
||||
|
||||
/* === Sequence protocol ================================================ */
|
||||
|
||||
#define PY_ITERSEARCH_COUNT 1
|
||||
#define PY_ITERSEARCH_INDEX 2
|
||||
#define PY_ITERSEARCH_CONTAINS 3
|
||||
|
||||
/* Iterate over seq.
|
||||
|
||||
Result depends on the operation:
|
||||
|
||||
PY_ITERSEARCH_COUNT: return # of times obj appears in seq; -1 if
|
||||
error.
|
||||
PY_ITERSEARCH_INDEX: return 0-based index of first occurrence of
|
||||
obj in seq; set ValueError and return -1 if none found;
|
||||
also return -1 on error.
|
||||
PY_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on
|
||||
error. */
|
||||
extern Py_ssize_t _PySequence_IterSearch(PyObject *seq,
|
||||
PyObject *obj, int operation);
|
||||
|
||||
/* === Mapping protocol ================================================= */
|
||||
|
||||
extern int _PyObject_RealIsInstance(PyObject *inst, PyObject *cls);
|
||||
|
||||
extern int _PyObject_RealIsSubclass(PyObject *derived, PyObject *cls);
|
||||
|
||||
// Convert Python int to Py_ssize_t. Do nothing if the argument is None.
|
||||
// Export for '_bisect' shared extension.
|
||||
PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *);
|
||||
|
||||
// Same as PyNumber_Index() but can return an instance of a subclass of int.
|
||||
// Export for 'math' shared extension.
|
||||
PyAPI_FUNC(PyObject*) _PyNumber_Index(PyObject *o);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_ABSTRACT_H */
|
||||
112
extern/include/python/internal/pycore_asdl.h
vendored
Normal file
112
extern/include/python/internal/pycore_asdl.h
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
#ifndef Py_INTERNAL_ASDL_H
|
||||
#define Py_INTERNAL_ASDL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pyarena.h" // _PyArena_Malloc()
|
||||
|
||||
typedef PyObject * identifier;
|
||||
typedef PyObject * string;
|
||||
typedef PyObject * object;
|
||||
typedef PyObject * constant;
|
||||
|
||||
/* It would be nice if the code generated by asdl_c.py was completely
|
||||
independent of Python, but it is a goal the requires too much work
|
||||
at this stage. So, for example, I'll represent identifiers as
|
||||
interned Python strings.
|
||||
*/
|
||||
|
||||
#define _ASDL_SEQ_HEAD \
|
||||
Py_ssize_t size; \
|
||||
void **elements;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
} asdl_seq;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
void *typed_elements[1];
|
||||
} asdl_generic_seq;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
PyObject *typed_elements[1];
|
||||
} asdl_identifier_seq;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
int typed_elements[1];
|
||||
} asdl_int_seq;
|
||||
|
||||
asdl_generic_seq *_Py_asdl_generic_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
asdl_identifier_seq *_Py_asdl_identifier_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
|
||||
#define GENERATE_ASDL_SEQ_CONSTRUCTOR(NAME, TYPE) \
|
||||
asdl_ ## NAME ## _seq *_Py_asdl_ ## NAME ## _seq_new(Py_ssize_t size, PyArena *arena) \
|
||||
{ \
|
||||
asdl_ ## NAME ## _seq *seq = NULL; \
|
||||
size_t n; \
|
||||
/* check size is sane */ \
|
||||
if (size < 0 || \
|
||||
(size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) { \
|
||||
PyErr_NoMemory(); \
|
||||
return NULL; \
|
||||
} \
|
||||
n = (size ? (sizeof(TYPE *) * (size - 1)) : 0); \
|
||||
/* check if size can be added safely */ \
|
||||
if (n > SIZE_MAX - sizeof(asdl_ ## NAME ## _seq)) { \
|
||||
PyErr_NoMemory(); \
|
||||
return NULL; \
|
||||
} \
|
||||
n += sizeof(asdl_ ## NAME ## _seq); \
|
||||
seq = (asdl_ ## NAME ## _seq *)_PyArena_Malloc(arena, n); \
|
||||
if (!seq) { \
|
||||
PyErr_NoMemory(); \
|
||||
return NULL; \
|
||||
} \
|
||||
memset(seq, 0, n); \
|
||||
seq->size = size; \
|
||||
seq->elements = (void**)seq->typed_elements; \
|
||||
return seq; \
|
||||
}
|
||||
|
||||
#define asdl_seq_GET_UNTYPED(S, I) _Py_RVALUE((S)->elements[(I)])
|
||||
#define asdl_seq_GET(S, I) _Py_RVALUE((S)->typed_elements[(I)])
|
||||
#define asdl_seq_LEN(S) _Py_RVALUE(((S) == NULL ? 0 : (S)->size))
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
# define asdl_seq_SET(S, I, V) \
|
||||
do { \
|
||||
Py_ssize_t _asdl_i = (I); \
|
||||
assert((S) != NULL); \
|
||||
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
|
||||
(S)->typed_elements[_asdl_i] = (V); \
|
||||
} while (0)
|
||||
#else
|
||||
# define asdl_seq_SET(S, I, V) _Py_RVALUE((S)->typed_elements[(I)] = (V))
|
||||
#endif
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
# define asdl_seq_SET_UNTYPED(S, I, V) \
|
||||
do { \
|
||||
Py_ssize_t _asdl_i = (I); \
|
||||
assert((S) != NULL); \
|
||||
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
|
||||
(S)->elements[_asdl_i] = (V); \
|
||||
} while (0)
|
||||
#else
|
||||
# define asdl_seq_SET_UNTYPED(S, I, V) _Py_RVALUE((S)->elements[(I)] = (V))
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_ASDL_H */
|
||||
945
extern/include/python/internal/pycore_ast.h
vendored
Normal file
945
extern/include/python/internal/pycore_ast.h
vendored
Normal file
@@ -0,0 +1,945 @@
|
||||
// File automatically generated by Parser/asdl_c.py.
|
||||
|
||||
#ifndef Py_INTERNAL_AST_H
|
||||
#define Py_INTERNAL_AST_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_asdl.h" // _ASDL_SEQ_HEAD
|
||||
|
||||
typedef struct _mod *mod_ty;
|
||||
|
||||
typedef struct _stmt *stmt_ty;
|
||||
|
||||
typedef struct _expr *expr_ty;
|
||||
|
||||
typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
|
||||
|
||||
typedef enum _boolop { And=1, Or=2 } boolop_ty;
|
||||
|
||||
typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
|
||||
LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
|
||||
FloorDiv=13 } operator_ty;
|
||||
|
||||
typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
|
||||
|
||||
typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8,
|
||||
In=9, NotIn=10 } cmpop_ty;
|
||||
|
||||
typedef struct _comprehension *comprehension_ty;
|
||||
|
||||
typedef struct _excepthandler *excepthandler_ty;
|
||||
|
||||
typedef struct _arguments *arguments_ty;
|
||||
|
||||
typedef struct _arg *arg_ty;
|
||||
|
||||
typedef struct _keyword *keyword_ty;
|
||||
|
||||
typedef struct _alias *alias_ty;
|
||||
|
||||
typedef struct _withitem *withitem_ty;
|
||||
|
||||
typedef struct _match_case *match_case_ty;
|
||||
|
||||
typedef struct _pattern *pattern_ty;
|
||||
|
||||
typedef struct _type_ignore *type_ignore_ty;
|
||||
|
||||
typedef struct _type_param *type_param_ty;
|
||||
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
mod_ty typed_elements[1];
|
||||
} asdl_mod_seq;
|
||||
|
||||
asdl_mod_seq *_Py_asdl_mod_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
stmt_ty typed_elements[1];
|
||||
} asdl_stmt_seq;
|
||||
|
||||
asdl_stmt_seq *_Py_asdl_stmt_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
expr_ty typed_elements[1];
|
||||
} asdl_expr_seq;
|
||||
|
||||
asdl_expr_seq *_Py_asdl_expr_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
comprehension_ty typed_elements[1];
|
||||
} asdl_comprehension_seq;
|
||||
|
||||
asdl_comprehension_seq *_Py_asdl_comprehension_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
excepthandler_ty typed_elements[1];
|
||||
} asdl_excepthandler_seq;
|
||||
|
||||
asdl_excepthandler_seq *_Py_asdl_excepthandler_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
arguments_ty typed_elements[1];
|
||||
} asdl_arguments_seq;
|
||||
|
||||
asdl_arguments_seq *_Py_asdl_arguments_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
arg_ty typed_elements[1];
|
||||
} asdl_arg_seq;
|
||||
|
||||
asdl_arg_seq *_Py_asdl_arg_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
keyword_ty typed_elements[1];
|
||||
} asdl_keyword_seq;
|
||||
|
||||
asdl_keyword_seq *_Py_asdl_keyword_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
alias_ty typed_elements[1];
|
||||
} asdl_alias_seq;
|
||||
|
||||
asdl_alias_seq *_Py_asdl_alias_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
withitem_ty typed_elements[1];
|
||||
} asdl_withitem_seq;
|
||||
|
||||
asdl_withitem_seq *_Py_asdl_withitem_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
match_case_ty typed_elements[1];
|
||||
} asdl_match_case_seq;
|
||||
|
||||
asdl_match_case_seq *_Py_asdl_match_case_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
pattern_ty typed_elements[1];
|
||||
} asdl_pattern_seq;
|
||||
|
||||
asdl_pattern_seq *_Py_asdl_pattern_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
type_ignore_ty typed_elements[1];
|
||||
} asdl_type_ignore_seq;
|
||||
|
||||
asdl_type_ignore_seq *_Py_asdl_type_ignore_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
type_param_ty typed_elements[1];
|
||||
} asdl_type_param_seq;
|
||||
|
||||
asdl_type_param_seq *_Py_asdl_type_param_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
|
||||
enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
|
||||
FunctionType_kind=4};
|
||||
struct _mod {
|
||||
enum _mod_kind kind;
|
||||
union {
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
asdl_type_ignore_seq *type_ignores;
|
||||
} Module;
|
||||
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
} Interactive;
|
||||
|
||||
struct {
|
||||
expr_ty body;
|
||||
} Expression;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *argtypes;
|
||||
expr_ty returns;
|
||||
} FunctionType;
|
||||
|
||||
} v;
|
||||
};
|
||||
|
||||
enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3,
|
||||
Return_kind=4, Delete_kind=5, Assign_kind=6,
|
||||
TypeAlias_kind=7, AugAssign_kind=8, AnnAssign_kind=9,
|
||||
For_kind=10, AsyncFor_kind=11, While_kind=12, If_kind=13,
|
||||
With_kind=14, AsyncWith_kind=15, Match_kind=16,
|
||||
Raise_kind=17, Try_kind=18, TryStar_kind=19, Assert_kind=20,
|
||||
Import_kind=21, ImportFrom_kind=22, Global_kind=23,
|
||||
Nonlocal_kind=24, Expr_kind=25, Pass_kind=26, Break_kind=27,
|
||||
Continue_kind=28};
|
||||
struct _stmt {
|
||||
enum _stmt_kind kind;
|
||||
union {
|
||||
struct {
|
||||
identifier name;
|
||||
arguments_ty args;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_expr_seq *decorator_list;
|
||||
expr_ty returns;
|
||||
string type_comment;
|
||||
asdl_type_param_seq *type_params;
|
||||
} FunctionDef;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
arguments_ty args;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_expr_seq *decorator_list;
|
||||
expr_ty returns;
|
||||
string type_comment;
|
||||
asdl_type_param_seq *type_params;
|
||||
} AsyncFunctionDef;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
asdl_expr_seq *bases;
|
||||
asdl_keyword_seq *keywords;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_expr_seq *decorator_list;
|
||||
asdl_type_param_seq *type_params;
|
||||
} ClassDef;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Return;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *targets;
|
||||
} Delete;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *targets;
|
||||
expr_ty value;
|
||||
string type_comment;
|
||||
} Assign;
|
||||
|
||||
struct {
|
||||
expr_ty name;
|
||||
asdl_type_param_seq *type_params;
|
||||
expr_ty value;
|
||||
} TypeAlias;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
operator_ty op;
|
||||
expr_ty value;
|
||||
} AugAssign;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty annotation;
|
||||
expr_ty value;
|
||||
int simple;
|
||||
} AnnAssign;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty iter;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
string type_comment;
|
||||
} For;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty iter;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
string type_comment;
|
||||
} AsyncFor;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
} While;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
} If;
|
||||
|
||||
struct {
|
||||
asdl_withitem_seq *items;
|
||||
asdl_stmt_seq *body;
|
||||
string type_comment;
|
||||
} With;
|
||||
|
||||
struct {
|
||||
asdl_withitem_seq *items;
|
||||
asdl_stmt_seq *body;
|
||||
string type_comment;
|
||||
} AsyncWith;
|
||||
|
||||
struct {
|
||||
expr_ty subject;
|
||||
asdl_match_case_seq *cases;
|
||||
} Match;
|
||||
|
||||
struct {
|
||||
expr_ty exc;
|
||||
expr_ty cause;
|
||||
} Raise;
|
||||
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
asdl_excepthandler_seq *handlers;
|
||||
asdl_stmt_seq *orelse;
|
||||
asdl_stmt_seq *finalbody;
|
||||
} Try;
|
||||
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
asdl_excepthandler_seq *handlers;
|
||||
asdl_stmt_seq *orelse;
|
||||
asdl_stmt_seq *finalbody;
|
||||
} TryStar;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
expr_ty msg;
|
||||
} Assert;
|
||||
|
||||
struct {
|
||||
asdl_alias_seq *names;
|
||||
} Import;
|
||||
|
||||
struct {
|
||||
identifier module;
|
||||
asdl_alias_seq *names;
|
||||
int level;
|
||||
} ImportFrom;
|
||||
|
||||
struct {
|
||||
asdl_identifier_seq *names;
|
||||
} Global;
|
||||
|
||||
struct {
|
||||
asdl_identifier_seq *names;
|
||||
} Nonlocal;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Expr;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
|
||||
Lambda_kind=5, IfExp_kind=6, Dict_kind=7, Set_kind=8,
|
||||
ListComp_kind=9, SetComp_kind=10, DictComp_kind=11,
|
||||
GeneratorExp_kind=12, Await_kind=13, Yield_kind=14,
|
||||
YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
|
||||
FormattedValue_kind=18, Interpolation_kind=19,
|
||||
JoinedStr_kind=20, TemplateStr_kind=21, Constant_kind=22,
|
||||
Attribute_kind=23, Subscript_kind=24, Starred_kind=25,
|
||||
Name_kind=26, List_kind=27, Tuple_kind=28, Slice_kind=29};
|
||||
struct _expr {
|
||||
enum _expr_kind kind;
|
||||
union {
|
||||
struct {
|
||||
boolop_ty op;
|
||||
asdl_expr_seq *values;
|
||||
} BoolOp;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty value;
|
||||
} NamedExpr;
|
||||
|
||||
struct {
|
||||
expr_ty left;
|
||||
operator_ty op;
|
||||
expr_ty right;
|
||||
} BinOp;
|
||||
|
||||
struct {
|
||||
unaryop_ty op;
|
||||
expr_ty operand;
|
||||
} UnaryOp;
|
||||
|
||||
struct {
|
||||
arguments_ty args;
|
||||
expr_ty body;
|
||||
} Lambda;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
expr_ty body;
|
||||
expr_ty orelse;
|
||||
} IfExp;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *keys;
|
||||
asdl_expr_seq *values;
|
||||
} Dict;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *elts;
|
||||
} Set;
|
||||
|
||||
struct {
|
||||
expr_ty elt;
|
||||
asdl_comprehension_seq *generators;
|
||||
} ListComp;
|
||||
|
||||
struct {
|
||||
expr_ty elt;
|
||||
asdl_comprehension_seq *generators;
|
||||
} SetComp;
|
||||
|
||||
struct {
|
||||
expr_ty key;
|
||||
expr_ty value;
|
||||
asdl_comprehension_seq *generators;
|
||||
} DictComp;
|
||||
|
||||
struct {
|
||||
expr_ty elt;
|
||||
asdl_comprehension_seq *generators;
|
||||
} GeneratorExp;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Await;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Yield;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} YieldFrom;
|
||||
|
||||
struct {
|
||||
expr_ty left;
|
||||
asdl_int_seq *ops;
|
||||
asdl_expr_seq *comparators;
|
||||
} Compare;
|
||||
|
||||
struct {
|
||||
expr_ty func;
|
||||
asdl_expr_seq *args;
|
||||
asdl_keyword_seq *keywords;
|
||||
} Call;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
int conversion;
|
||||
expr_ty format_spec;
|
||||
} FormattedValue;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
constant str;
|
||||
int conversion;
|
||||
expr_ty format_spec;
|
||||
} Interpolation;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *values;
|
||||
} JoinedStr;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *values;
|
||||
} TemplateStr;
|
||||
|
||||
struct {
|
||||
constant value;
|
||||
string kind;
|
||||
} Constant;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
identifier attr;
|
||||
expr_context_ty ctx;
|
||||
} Attribute;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
expr_ty slice;
|
||||
expr_context_ty ctx;
|
||||
} Subscript;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
expr_context_ty ctx;
|
||||
} Starred;
|
||||
|
||||
struct {
|
||||
identifier id;
|
||||
expr_context_ty ctx;
|
||||
} Name;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *elts;
|
||||
expr_context_ty ctx;
|
||||
} List;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *elts;
|
||||
expr_context_ty ctx;
|
||||
} Tuple;
|
||||
|
||||
struct {
|
||||
expr_ty lower;
|
||||
expr_ty upper;
|
||||
expr_ty step;
|
||||
} Slice;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _comprehension {
|
||||
expr_ty target;
|
||||
expr_ty iter;
|
||||
asdl_expr_seq *ifs;
|
||||
int is_async;
|
||||
};
|
||||
|
||||
enum _excepthandler_kind {ExceptHandler_kind=1};
|
||||
struct _excepthandler {
|
||||
enum _excepthandler_kind kind;
|
||||
union {
|
||||
struct {
|
||||
expr_ty type;
|
||||
identifier name;
|
||||
asdl_stmt_seq *body;
|
||||
} ExceptHandler;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _arguments {
|
||||
asdl_arg_seq *posonlyargs;
|
||||
asdl_arg_seq *args;
|
||||
arg_ty vararg;
|
||||
asdl_arg_seq *kwonlyargs;
|
||||
asdl_expr_seq *kw_defaults;
|
||||
arg_ty kwarg;
|
||||
asdl_expr_seq *defaults;
|
||||
};
|
||||
|
||||
struct _arg {
|
||||
identifier arg;
|
||||
expr_ty annotation;
|
||||
string type_comment;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _keyword {
|
||||
identifier arg;
|
||||
expr_ty value;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _alias {
|
||||
identifier name;
|
||||
identifier asname;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _withitem {
|
||||
expr_ty context_expr;
|
||||
expr_ty optional_vars;
|
||||
};
|
||||
|
||||
struct _match_case {
|
||||
pattern_ty pattern;
|
||||
expr_ty guard;
|
||||
asdl_stmt_seq *body;
|
||||
};
|
||||
|
||||
enum _pattern_kind {MatchValue_kind=1, MatchSingleton_kind=2,
|
||||
MatchSequence_kind=3, MatchMapping_kind=4,
|
||||
MatchClass_kind=5, MatchStar_kind=6, MatchAs_kind=7,
|
||||
MatchOr_kind=8};
|
||||
struct _pattern {
|
||||
enum _pattern_kind kind;
|
||||
union {
|
||||
struct {
|
||||
expr_ty value;
|
||||
} MatchValue;
|
||||
|
||||
struct {
|
||||
constant value;
|
||||
} MatchSingleton;
|
||||
|
||||
struct {
|
||||
asdl_pattern_seq *patterns;
|
||||
} MatchSequence;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *keys;
|
||||
asdl_pattern_seq *patterns;
|
||||
identifier rest;
|
||||
} MatchMapping;
|
||||
|
||||
struct {
|
||||
expr_ty cls;
|
||||
asdl_pattern_seq *patterns;
|
||||
asdl_identifier_seq *kwd_attrs;
|
||||
asdl_pattern_seq *kwd_patterns;
|
||||
} MatchClass;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
} MatchStar;
|
||||
|
||||
struct {
|
||||
pattern_ty pattern;
|
||||
identifier name;
|
||||
} MatchAs;
|
||||
|
||||
struct {
|
||||
asdl_pattern_seq *patterns;
|
||||
} MatchOr;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
enum _type_ignore_kind {TypeIgnore_kind=1};
|
||||
struct _type_ignore {
|
||||
enum _type_ignore_kind kind;
|
||||
union {
|
||||
struct {
|
||||
int lineno;
|
||||
string tag;
|
||||
} TypeIgnore;
|
||||
|
||||
} v;
|
||||
};
|
||||
|
||||
enum _type_param_kind {TypeVar_kind=1, ParamSpec_kind=2, TypeVarTuple_kind=3};
|
||||
struct _type_param {
|
||||
enum _type_param_kind kind;
|
||||
union {
|
||||
struct {
|
||||
identifier name;
|
||||
expr_ty bound;
|
||||
expr_ty default_value;
|
||||
} TypeVar;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
expr_ty default_value;
|
||||
} ParamSpec;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
expr_ty default_value;
|
||||
} TypeVarTuple;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
|
||||
// Note: these macros affect function definitions, not only call sites.
|
||||
mod_ty _PyAST_Module(asdl_stmt_seq * body, asdl_type_ignore_seq * type_ignores,
|
||||
PyArena *arena);
|
||||
mod_ty _PyAST_Interactive(asdl_stmt_seq * body, PyArena *arena);
|
||||
mod_ty _PyAST_Expression(expr_ty body, PyArena *arena);
|
||||
mod_ty _PyAST_FunctionType(asdl_expr_seq * argtypes, expr_ty returns, PyArena
|
||||
*arena);
|
||||
stmt_ty _PyAST_FunctionDef(identifier name, arguments_ty args, asdl_stmt_seq *
|
||||
body, asdl_expr_seq * decorator_list, expr_ty
|
||||
returns, string type_comment, asdl_type_param_seq *
|
||||
type_params, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AsyncFunctionDef(identifier name, arguments_ty args,
|
||||
asdl_stmt_seq * body, asdl_expr_seq *
|
||||
decorator_list, expr_ty returns, string
|
||||
type_comment, asdl_type_param_seq *
|
||||
type_params, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_ClassDef(identifier name, asdl_expr_seq * bases,
|
||||
asdl_keyword_seq * keywords, asdl_stmt_seq * body,
|
||||
asdl_expr_seq * decorator_list, asdl_type_param_seq *
|
||||
type_params, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Return(expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Assign(asdl_expr_seq * targets, expr_ty value, string
|
||||
type_comment, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_TypeAlias(expr_ty name, asdl_type_param_seq * type_params,
|
||||
expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AugAssign(expr_ty target, operator_ty op, expr_ty value, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int
|
||||
simple, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_For(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
|
||||
asdl_stmt_seq * orelse, string type_comment, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
stmt_ty _PyAST_AsyncFor(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
|
||||
asdl_stmt_seq * orelse, string type_comment, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_While(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq *
|
||||
orelse, int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_If(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq * orelse,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_With(asdl_withitem_seq * items, asdl_stmt_seq * body, string
|
||||
type_comment, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AsyncWith(asdl_withitem_seq * items, asdl_stmt_seq * body,
|
||||
string type_comment, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Match(expr_ty subject, asdl_match_case_seq * cases, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
stmt_ty _PyAST_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Try(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
|
||||
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
|
||||
lineno, int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
stmt_ty _PyAST_TryStar(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
|
||||
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Import(asdl_alias_seq * names, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_ImportFrom(identifier module, asdl_alias_seq * names, int level,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Global(asdl_identifier_seq * names, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Nonlocal(asdl_identifier_seq * names, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
stmt_ty _PyAST_Expr(expr_ty value, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Pass(int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Break(int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Continue(int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_BoolOp(boolop_ty op, asdl_expr_seq * values, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_NamedExpr(expr_ty target, expr_ty value, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
expr_ty _PyAST_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
expr_ty _PyAST_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Lambda(arguments_ty args, expr_ty body, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
expr_ty _PyAST_Dict(asdl_expr_seq * keys, asdl_expr_seq * values, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Set(asdl_expr_seq * elts, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_ListComp(expr_ty elt, asdl_comprehension_seq * generators, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_SetComp(expr_ty elt, asdl_comprehension_seq * generators, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_DictComp(expr_ty key, expr_ty value, asdl_comprehension_seq *
|
||||
generators, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_GeneratorExp(expr_ty elt, asdl_comprehension_seq * generators,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Await(expr_ty value, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Yield(expr_ty value, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_YieldFrom(expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Compare(expr_ty left, asdl_int_seq * ops, asdl_expr_seq *
|
||||
comparators, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Call(expr_ty func, asdl_expr_seq * args, asdl_keyword_seq *
|
||||
keywords, int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_FormattedValue(expr_ty value, int conversion, expr_ty
|
||||
format_spec, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Interpolation(expr_ty value, constant str, int conversion,
|
||||
expr_ty format_spec, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_JoinedStr(asdl_expr_seq * values, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_TemplateStr(asdl_expr_seq * values, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Constant(constant value, string kind, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Attribute(expr_ty value, identifier attr, expr_context_ty ctx,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Starred(expr_ty value, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Name(identifier id, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_List(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Tuple(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
comprehension_ty _PyAST_comprehension(expr_ty target, expr_ty iter,
|
||||
asdl_expr_seq * ifs, int is_async,
|
||||
PyArena *arena);
|
||||
excepthandler_ty _PyAST_ExceptHandler(expr_ty type, identifier name,
|
||||
asdl_stmt_seq * body, int lineno, int
|
||||
col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
arguments_ty _PyAST_arguments(asdl_arg_seq * posonlyargs, asdl_arg_seq * args,
|
||||
arg_ty vararg, asdl_arg_seq * kwonlyargs,
|
||||
asdl_expr_seq * kw_defaults, arg_ty kwarg,
|
||||
asdl_expr_seq * defaults, PyArena *arena);
|
||||
arg_ty _PyAST_arg(identifier arg, expr_ty annotation, string type_comment, int
|
||||
lineno, int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
keyword_ty _PyAST_keyword(identifier arg, expr_ty value, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
alias_ty _PyAST_alias(identifier name, identifier asname, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
withitem_ty _PyAST_withitem(expr_ty context_expr, expr_ty optional_vars,
|
||||
PyArena *arena);
|
||||
match_case_ty _PyAST_match_case(pattern_ty pattern, expr_ty guard,
|
||||
asdl_stmt_seq * body, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchValue(expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchSingleton(constant value, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
pattern_ty _PyAST_MatchSequence(asdl_pattern_seq * patterns, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
pattern_ty _PyAST_MatchMapping(asdl_expr_seq * keys, asdl_pattern_seq *
|
||||
patterns, identifier rest, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
pattern_ty _PyAST_MatchClass(expr_ty cls, asdl_pattern_seq * patterns,
|
||||
asdl_identifier_seq * kwd_attrs, asdl_pattern_seq
|
||||
* kwd_patterns, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchStar(identifier name, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchAs(pattern_ty pattern, identifier name, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
pattern_ty _PyAST_MatchOr(asdl_pattern_seq * patterns, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
type_ignore_ty _PyAST_TypeIgnore(int lineno, string tag, PyArena *arena);
|
||||
type_param_ty _PyAST_TypeVar(identifier name, expr_ty bound, expr_ty
|
||||
default_value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
type_param_ty _PyAST_ParamSpec(identifier name, expr_ty default_value, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
type_param_ty _PyAST_TypeVarTuple(identifier name, expr_ty default_value, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
|
||||
|
||||
PyObject* PyAST_mod2obj(mod_ty t);
|
||||
int PyAst_CheckMode(PyObject *ast, int mode);
|
||||
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
|
||||
int PyAST_Check(PyObject* obj);
|
||||
|
||||
extern int _PyAST_Validate(mod_ty);
|
||||
|
||||
/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
|
||||
extern PyObject* _PyAST_ExprAsUnicode(expr_ty);
|
||||
|
||||
/* Return the borrowed reference to the first literal string in the
|
||||
sequence of statements or NULL if it doesn't start from a literal string.
|
||||
Doesn't set exception. */
|
||||
extern PyObject* _PyAST_GetDocString(asdl_stmt_seq *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_AST_H */
|
||||
271
extern/include/python/internal/pycore_ast_state.h
vendored
Normal file
271
extern/include/python/internal/pycore_ast_state.h
vendored
Normal file
@@ -0,0 +1,271 @@
|
||||
// File automatically generated by Parser/asdl_c.py.
|
||||
|
||||
#ifndef Py_INTERNAL_AST_STATE_H
|
||||
#define Py_INTERNAL_AST_STATE_H
|
||||
|
||||
#include "pycore_lock.h" // _PyOnceFlag
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
struct ast_state {
|
||||
_PyOnceFlag once;
|
||||
int finalized;
|
||||
PyObject *AST_type;
|
||||
PyObject *Add_singleton;
|
||||
PyObject *Add_type;
|
||||
PyObject *And_singleton;
|
||||
PyObject *And_type;
|
||||
PyObject *AnnAssign_type;
|
||||
PyObject *Assert_type;
|
||||
PyObject *Assign_type;
|
||||
PyObject *AsyncFor_type;
|
||||
PyObject *AsyncFunctionDef_type;
|
||||
PyObject *AsyncWith_type;
|
||||
PyObject *Attribute_type;
|
||||
PyObject *AugAssign_type;
|
||||
PyObject *Await_type;
|
||||
PyObject *BinOp_type;
|
||||
PyObject *BitAnd_singleton;
|
||||
PyObject *BitAnd_type;
|
||||
PyObject *BitOr_singleton;
|
||||
PyObject *BitOr_type;
|
||||
PyObject *BitXor_singleton;
|
||||
PyObject *BitXor_type;
|
||||
PyObject *BoolOp_type;
|
||||
PyObject *Break_type;
|
||||
PyObject *Call_type;
|
||||
PyObject *ClassDef_type;
|
||||
PyObject *Compare_type;
|
||||
PyObject *Constant_type;
|
||||
PyObject *Continue_type;
|
||||
PyObject *Del_singleton;
|
||||
PyObject *Del_type;
|
||||
PyObject *Delete_type;
|
||||
PyObject *DictComp_type;
|
||||
PyObject *Dict_type;
|
||||
PyObject *Div_singleton;
|
||||
PyObject *Div_type;
|
||||
PyObject *Eq_singleton;
|
||||
PyObject *Eq_type;
|
||||
PyObject *ExceptHandler_type;
|
||||
PyObject *Expr_type;
|
||||
PyObject *Expression_type;
|
||||
PyObject *FloorDiv_singleton;
|
||||
PyObject *FloorDiv_type;
|
||||
PyObject *For_type;
|
||||
PyObject *FormattedValue_type;
|
||||
PyObject *FunctionDef_type;
|
||||
PyObject *FunctionType_type;
|
||||
PyObject *GeneratorExp_type;
|
||||
PyObject *Global_type;
|
||||
PyObject *GtE_singleton;
|
||||
PyObject *GtE_type;
|
||||
PyObject *Gt_singleton;
|
||||
PyObject *Gt_type;
|
||||
PyObject *IfExp_type;
|
||||
PyObject *If_type;
|
||||
PyObject *ImportFrom_type;
|
||||
PyObject *Import_type;
|
||||
PyObject *In_singleton;
|
||||
PyObject *In_type;
|
||||
PyObject *Interactive_type;
|
||||
PyObject *Interpolation_type;
|
||||
PyObject *Invert_singleton;
|
||||
PyObject *Invert_type;
|
||||
PyObject *IsNot_singleton;
|
||||
PyObject *IsNot_type;
|
||||
PyObject *Is_singleton;
|
||||
PyObject *Is_type;
|
||||
PyObject *JoinedStr_type;
|
||||
PyObject *LShift_singleton;
|
||||
PyObject *LShift_type;
|
||||
PyObject *Lambda_type;
|
||||
PyObject *ListComp_type;
|
||||
PyObject *List_type;
|
||||
PyObject *Load_singleton;
|
||||
PyObject *Load_type;
|
||||
PyObject *LtE_singleton;
|
||||
PyObject *LtE_type;
|
||||
PyObject *Lt_singleton;
|
||||
PyObject *Lt_type;
|
||||
PyObject *MatMult_singleton;
|
||||
PyObject *MatMult_type;
|
||||
PyObject *MatchAs_type;
|
||||
PyObject *MatchClass_type;
|
||||
PyObject *MatchMapping_type;
|
||||
PyObject *MatchOr_type;
|
||||
PyObject *MatchSequence_type;
|
||||
PyObject *MatchSingleton_type;
|
||||
PyObject *MatchStar_type;
|
||||
PyObject *MatchValue_type;
|
||||
PyObject *Match_type;
|
||||
PyObject *Mod_singleton;
|
||||
PyObject *Mod_type;
|
||||
PyObject *Module_type;
|
||||
PyObject *Mult_singleton;
|
||||
PyObject *Mult_type;
|
||||
PyObject *Name_type;
|
||||
PyObject *NamedExpr_type;
|
||||
PyObject *Nonlocal_type;
|
||||
PyObject *NotEq_singleton;
|
||||
PyObject *NotEq_type;
|
||||
PyObject *NotIn_singleton;
|
||||
PyObject *NotIn_type;
|
||||
PyObject *Not_singleton;
|
||||
PyObject *Not_type;
|
||||
PyObject *Or_singleton;
|
||||
PyObject *Or_type;
|
||||
PyObject *ParamSpec_type;
|
||||
PyObject *Pass_type;
|
||||
PyObject *Pow_singleton;
|
||||
PyObject *Pow_type;
|
||||
PyObject *RShift_singleton;
|
||||
PyObject *RShift_type;
|
||||
PyObject *Raise_type;
|
||||
PyObject *Return_type;
|
||||
PyObject *SetComp_type;
|
||||
PyObject *Set_type;
|
||||
PyObject *Slice_type;
|
||||
PyObject *Starred_type;
|
||||
PyObject *Store_singleton;
|
||||
PyObject *Store_type;
|
||||
PyObject *Sub_singleton;
|
||||
PyObject *Sub_type;
|
||||
PyObject *Subscript_type;
|
||||
PyObject *TemplateStr_type;
|
||||
PyObject *TryStar_type;
|
||||
PyObject *Try_type;
|
||||
PyObject *Tuple_type;
|
||||
PyObject *TypeAlias_type;
|
||||
PyObject *TypeIgnore_type;
|
||||
PyObject *TypeVarTuple_type;
|
||||
PyObject *TypeVar_type;
|
||||
PyObject *UAdd_singleton;
|
||||
PyObject *UAdd_type;
|
||||
PyObject *USub_singleton;
|
||||
PyObject *USub_type;
|
||||
PyObject *UnaryOp_type;
|
||||
PyObject *While_type;
|
||||
PyObject *With_type;
|
||||
PyObject *YieldFrom_type;
|
||||
PyObject *Yield_type;
|
||||
PyObject *__dict__;
|
||||
PyObject *__doc__;
|
||||
PyObject *__match_args__;
|
||||
PyObject *__module__;
|
||||
PyObject *_attributes;
|
||||
PyObject *_fields;
|
||||
PyObject *alias_type;
|
||||
PyObject *annotation;
|
||||
PyObject *arg;
|
||||
PyObject *arg_type;
|
||||
PyObject *args;
|
||||
PyObject *argtypes;
|
||||
PyObject *arguments_type;
|
||||
PyObject *asname;
|
||||
PyObject *ast;
|
||||
PyObject *attr;
|
||||
PyObject *bases;
|
||||
PyObject *body;
|
||||
PyObject *boolop_type;
|
||||
PyObject *bound;
|
||||
PyObject *cases;
|
||||
PyObject *cause;
|
||||
PyObject *cls;
|
||||
PyObject *cmpop_type;
|
||||
PyObject *col_offset;
|
||||
PyObject *comparators;
|
||||
PyObject *comprehension_type;
|
||||
PyObject *context_expr;
|
||||
PyObject *conversion;
|
||||
PyObject *ctx;
|
||||
PyObject *decorator_list;
|
||||
PyObject *default_value;
|
||||
PyObject *defaults;
|
||||
PyObject *elt;
|
||||
PyObject *elts;
|
||||
PyObject *end_col_offset;
|
||||
PyObject *end_lineno;
|
||||
PyObject *exc;
|
||||
PyObject *excepthandler_type;
|
||||
PyObject *expr_context_type;
|
||||
PyObject *expr_type;
|
||||
PyObject *finalbody;
|
||||
PyObject *format_spec;
|
||||
PyObject *func;
|
||||
PyObject *generators;
|
||||
PyObject *guard;
|
||||
PyObject *handlers;
|
||||
PyObject *id;
|
||||
PyObject *ifs;
|
||||
PyObject *is_async;
|
||||
PyObject *items;
|
||||
PyObject *iter;
|
||||
PyObject *key;
|
||||
PyObject *keys;
|
||||
PyObject *keyword_type;
|
||||
PyObject *keywords;
|
||||
PyObject *kind;
|
||||
PyObject *kw_defaults;
|
||||
PyObject *kwarg;
|
||||
PyObject *kwd_attrs;
|
||||
PyObject *kwd_patterns;
|
||||
PyObject *kwonlyargs;
|
||||
PyObject *left;
|
||||
PyObject *level;
|
||||
PyObject *lineno;
|
||||
PyObject *lower;
|
||||
PyObject *match_case_type;
|
||||
PyObject *mod_type;
|
||||
PyObject *module;
|
||||
PyObject *msg;
|
||||
PyObject *name;
|
||||
PyObject *names;
|
||||
PyObject *op;
|
||||
PyObject *operand;
|
||||
PyObject *operator_type;
|
||||
PyObject *ops;
|
||||
PyObject *optional_vars;
|
||||
PyObject *orelse;
|
||||
PyObject *pattern;
|
||||
PyObject *pattern_type;
|
||||
PyObject *patterns;
|
||||
PyObject *posonlyargs;
|
||||
PyObject *rest;
|
||||
PyObject *returns;
|
||||
PyObject *right;
|
||||
PyObject *simple;
|
||||
PyObject *slice;
|
||||
PyObject *step;
|
||||
PyObject *stmt_type;
|
||||
PyObject *str;
|
||||
PyObject *subject;
|
||||
PyObject *tag;
|
||||
PyObject *target;
|
||||
PyObject *targets;
|
||||
PyObject *test;
|
||||
PyObject *type;
|
||||
PyObject *type_comment;
|
||||
PyObject *type_ignore_type;
|
||||
PyObject *type_ignores;
|
||||
PyObject *type_param_type;
|
||||
PyObject *type_params;
|
||||
PyObject *unaryop_type;
|
||||
PyObject *upper;
|
||||
PyObject *value;
|
||||
PyObject *values;
|
||||
PyObject *vararg;
|
||||
PyObject *withitem_type;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_AST_STATE_H */
|
||||
|
||||
31
extern/include/python/internal/pycore_atexit.h
vendored
Normal file
31
extern/include/python/internal/pycore_atexit.h
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
#ifndef Py_INTERNAL_ATEXIT_H
|
||||
#define Py_INTERNAL_ATEXIT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define _PyAtExit_LockCallbacks(state) PyMutex_Lock(&state->ll_callbacks_lock);
|
||||
# define _PyAtExit_UnlockCallbacks(state) PyMutex_Unlock(&state->ll_callbacks_lock);
|
||||
#else
|
||||
# define _PyAtExit_LockCallbacks(state)
|
||||
# define _PyAtExit_UnlockCallbacks(state)
|
||||
#endif
|
||||
|
||||
// Export for '_interpchannels' shared extension
|
||||
PyAPI_FUNC(int) _Py_AtExit(
|
||||
PyInterpreterState *interp,
|
||||
atexit_datacallbackfunc func,
|
||||
void *data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_ATEXIT_H */
|
||||
35
extern/include/python/internal/pycore_audit.h
vendored
Normal file
35
extern/include/python/internal/pycore_audit.h
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef Py_INTERNAL_AUDIT_H
|
||||
#define Py_INTERNAL_AUDIT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* Runtime audit hook state */
|
||||
|
||||
typedef struct _Py_AuditHookEntry {
|
||||
struct _Py_AuditHookEntry *next;
|
||||
Py_AuditHookFunction hookCFunction;
|
||||
void *userData;
|
||||
} _Py_AuditHookEntry;
|
||||
|
||||
|
||||
extern int _PySys_Audit(
|
||||
PyThreadState *tstate,
|
||||
const char *event,
|
||||
const char *argFormat,
|
||||
...);
|
||||
|
||||
// _PySys_ClearAuditHooks() must not be exported: use extern rather than
|
||||
// PyAPI_FUNC(). We want minimal exposure of this function.
|
||||
extern void _PySys_ClearAuditHooks(PyThreadState *tstate);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_AUDIT_H */
|
||||
133
extern/include/python/internal/pycore_backoff.h
vendored
Normal file
133
extern/include/python/internal/pycore_backoff.h
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
|
||||
#ifndef Py_INTERNAL_BACKOFF_H
|
||||
#define Py_INTERNAL_BACKOFF_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdbool.h>
|
||||
#include "pycore_structs.h" // _Py_BackoffCounter
|
||||
|
||||
/* 16-bit countdown counters using exponential backoff.
|
||||
|
||||
These are used by the adaptive specializer to count down until
|
||||
it is time to specialize an instruction. If specialization fails
|
||||
the counter is reset using exponential backoff.
|
||||
|
||||
Another use is for the Tier 2 optimizer to decide when to create
|
||||
a new Tier 2 trace (executor). Again, exponential backoff is used.
|
||||
|
||||
The 16-bit counter is structured as a 12-bit unsigned 'value'
|
||||
and a 4-bit 'backoff' field. When resetting the counter, the
|
||||
backoff field is incremented (until it reaches a limit) and the
|
||||
value is set to a bit mask representing the value 2**backoff - 1.
|
||||
The maximum backoff is 12 (the number of bits in the value).
|
||||
|
||||
There is an exceptional value which must not be updated, 0xFFFF.
|
||||
*/
|
||||
|
||||
#define BACKOFF_BITS 4
|
||||
#define MAX_BACKOFF 12
|
||||
#define UNREACHABLE_BACKOFF 15
|
||||
|
||||
static inline bool
|
||||
is_unreachable_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
return counter.value_and_backoff == UNREACHABLE_BACKOFF;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
make_backoff_counter(uint16_t value, uint16_t backoff)
|
||||
{
|
||||
assert(backoff <= 15);
|
||||
assert(value <= 0xFFF);
|
||||
_Py_BackoffCounter result;
|
||||
result.value_and_backoff = (value << BACKOFF_BITS) | backoff;
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
forge_backoff_counter(uint16_t counter)
|
||||
{
|
||||
_Py_BackoffCounter result;
|
||||
result.value_and_backoff = counter;
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
restart_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
assert(!is_unreachable_backoff_counter(counter));
|
||||
int backoff = counter.value_and_backoff & 15;
|
||||
if (backoff < MAX_BACKOFF) {
|
||||
return make_backoff_counter((1 << (backoff + 1)) - 1, backoff + 1);
|
||||
}
|
||||
else {
|
||||
return make_backoff_counter((1 << MAX_BACKOFF) - 1, MAX_BACKOFF);
|
||||
}
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
pause_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
_Py_BackoffCounter result;
|
||||
result.value_and_backoff = counter.value_and_backoff | (1 << BACKOFF_BITS);
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
advance_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
_Py_BackoffCounter result;
|
||||
result.value_and_backoff = counter.value_and_backoff - (1 << BACKOFF_BITS);
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
backoff_counter_triggers(_Py_BackoffCounter counter)
|
||||
{
|
||||
/* Test whether the value is zero and the backoff is not UNREACHABLE_BACKOFF */
|
||||
return counter.value_and_backoff < UNREACHABLE_BACKOFF;
|
||||
}
|
||||
|
||||
/* Initial JUMP_BACKWARD counter.
|
||||
* This determines when we create a trace for a loop. */
|
||||
#define JUMP_BACKWARD_INITIAL_VALUE 4095
|
||||
#define JUMP_BACKWARD_INITIAL_BACKOFF 12
|
||||
static inline _Py_BackoffCounter
|
||||
initial_jump_backoff_counter(void)
|
||||
{
|
||||
return make_backoff_counter(JUMP_BACKWARD_INITIAL_VALUE,
|
||||
JUMP_BACKWARD_INITIAL_BACKOFF);
|
||||
}
|
||||
|
||||
/* Initial exit temperature.
|
||||
* Must be larger than ADAPTIVE_COOLDOWN_VALUE,
|
||||
* otherwise when a side exit warms up we may construct
|
||||
* a new trace before the Tier 1 code has properly re-specialized. */
|
||||
#define SIDE_EXIT_INITIAL_VALUE 4095
|
||||
#define SIDE_EXIT_INITIAL_BACKOFF 12
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
initial_temperature_backoff_counter(void)
|
||||
{
|
||||
return make_backoff_counter(SIDE_EXIT_INITIAL_VALUE,
|
||||
SIDE_EXIT_INITIAL_BACKOFF);
|
||||
}
|
||||
|
||||
/* Unreachable backoff counter. */
|
||||
static inline _Py_BackoffCounter
|
||||
initial_unreachable_backoff_counter(void)
|
||||
{
|
||||
return make_backoff_counter(0, UNREACHABLE_BACKOFF);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BACKOFF_H */
|
||||
186
extern/include/python/internal/pycore_bitutils.h
vendored
Normal file
186
extern/include/python/internal/pycore_bitutils.h
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
/* Bit and bytes utilities.
|
||||
|
||||
Bytes swap functions, reverse order of bytes:
|
||||
|
||||
- _Py_bswap16(uint16_t)
|
||||
- _Py_bswap32(uint32_t)
|
||||
- _Py_bswap64(uint64_t)
|
||||
*/
|
||||
|
||||
#ifndef Py_INTERNAL_BITUTILS_H
|
||||
#define Py_INTERNAL_BITUTILS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) \
|
||||
&& ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
|
||||
/* __builtin_bswap16() is available since GCC 4.8,
|
||||
__builtin_bswap32() is available since GCC 4.3,
|
||||
__builtin_bswap64() is available since GCC 4.3. */
|
||||
# define _PY_HAVE_BUILTIN_BSWAP
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# include <intrin.h> // _byteswap_uint64()
|
||||
#endif
|
||||
|
||||
|
||||
static inline uint16_t
|
||||
_Py_bswap16(uint16_t word)
|
||||
{
|
||||
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
|
||||
return __builtin_bswap16(word);
|
||||
#elif defined(_MSC_VER)
|
||||
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
|
||||
return _byteswap_ushort(word);
|
||||
#else
|
||||
// Portable implementation which doesn't rely on circular bit shift
|
||||
return ( ((word & UINT16_C(0x00FF)) << 8)
|
||||
| ((word & UINT16_C(0xFF00)) >> 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
_Py_bswap32(uint32_t word)
|
||||
{
|
||||
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
|
||||
return __builtin_bswap32(word);
|
||||
#elif defined(_MSC_VER)
|
||||
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
|
||||
return _byteswap_ulong(word);
|
||||
#else
|
||||
// Portable implementation which doesn't rely on circular bit shift
|
||||
return ( ((word & UINT32_C(0x000000FF)) << 24)
|
||||
| ((word & UINT32_C(0x0000FF00)) << 8)
|
||||
| ((word & UINT32_C(0x00FF0000)) >> 8)
|
||||
| ((word & UINT32_C(0xFF000000)) >> 24));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
_Py_bswap64(uint64_t word)
|
||||
{
|
||||
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
|
||||
return __builtin_bswap64(word);
|
||||
#elif defined(_MSC_VER)
|
||||
return _byteswap_uint64(word);
|
||||
#else
|
||||
// Portable implementation which doesn't rely on circular bit shift
|
||||
return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
|
||||
| ((word & UINT64_C(0x000000000000FF00)) << 40)
|
||||
| ((word & UINT64_C(0x0000000000FF0000)) << 24)
|
||||
| ((word & UINT64_C(0x00000000FF000000)) << 8)
|
||||
| ((word & UINT64_C(0x000000FF00000000)) >> 8)
|
||||
| ((word & UINT64_C(0x0000FF0000000000)) >> 24)
|
||||
| ((word & UINT64_C(0x00FF000000000000)) >> 40)
|
||||
| ((word & UINT64_C(0xFF00000000000000)) >> 56));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Population count: count the number of 1's in 'x'
|
||||
// (number of bits set to 1), also known as the hamming weight.
|
||||
//
|
||||
// Implementation note. CPUID is not used, to test if x86 POPCNT instruction
|
||||
// can be used, to keep the implementation simple. For example, Visual Studio
|
||||
// __popcnt() is not used this reason. The clang and GCC builtin function can
|
||||
// use the x86 POPCNT instruction if the target architecture has SSE4a or
|
||||
// newer.
|
||||
static inline int
|
||||
_Py_popcount32(uint32_t x)
|
||||
{
|
||||
#if (defined(__clang__) || defined(__GNUC__))
|
||||
|
||||
#if SIZEOF_INT >= 4
|
||||
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned int));
|
||||
return __builtin_popcount(x);
|
||||
#else
|
||||
// The C standard guarantees that unsigned long will always be big enough
|
||||
// to hold a uint32_t value without losing information.
|
||||
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned long));
|
||||
return __builtin_popcountl(x);
|
||||
#endif
|
||||
|
||||
#else
|
||||
// 32-bit SWAR (SIMD Within A Register) popcount
|
||||
|
||||
// Binary: 0 1 0 1 ...
|
||||
const uint32_t M1 = 0x55555555;
|
||||
// Binary: 00 11 00 11. ..
|
||||
const uint32_t M2 = 0x33333333;
|
||||
// Binary: 0000 1111 0000 1111 ...
|
||||
const uint32_t M4 = 0x0F0F0F0F;
|
||||
|
||||
// Put count of each 2 bits into those 2 bits
|
||||
x = x - ((x >> 1) & M1);
|
||||
// Put count of each 4 bits into those 4 bits
|
||||
x = (x & M2) + ((x >> 2) & M2);
|
||||
// Put count of each 8 bits into those 8 bits
|
||||
x = (x + (x >> 4)) & M4;
|
||||
// Sum of the 4 byte counts.
|
||||
// Take care when considering changes to the next line. Portability and
|
||||
// correctness are delicate here, thanks to C's "integer promotions" (C99
|
||||
// §6.3.1.1p2). On machines where the `int` type has width greater than 32
|
||||
// bits, `x` will be promoted to an `int`, and following C's "usual
|
||||
// arithmetic conversions" (C99 §6.3.1.8), the multiplication will be
|
||||
// performed as a multiplication of two `unsigned int` operands. In this
|
||||
// case it's critical that we cast back to `uint32_t` in order to keep only
|
||||
// the least significant 32 bits. On machines where the `int` type has
|
||||
// width no greater than 32, the multiplication is of two 32-bit unsigned
|
||||
// integer types, and the (uint32_t) cast is a no-op. In both cases, we
|
||||
// avoid the risk of undefined behaviour due to overflow of a
|
||||
// multiplication of signed integer types.
|
||||
return (uint32_t)(x * 0x01010101U) >> 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Return the index of the most significant 1 bit in 'x'. This is the smallest
|
||||
// integer k such that x < 2**k. Equivalent to floor(log2(x)) + 1 for x != 0.
|
||||
static inline int
|
||||
_Py_bit_length(unsigned long x)
|
||||
{
|
||||
#if (defined(__clang__) || defined(__GNUC__))
|
||||
if (x != 0) {
|
||||
// __builtin_clzl() is available since GCC 3.4.
|
||||
// Undefined behavior for x == 0.
|
||||
return (int)sizeof(unsigned long) * 8 - __builtin_clzl(x);
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
}
|
||||
#elif defined(_MSC_VER)
|
||||
// _BitScanReverse() is documented to search 32 bits.
|
||||
Py_BUILD_ASSERT(sizeof(unsigned long) <= 4);
|
||||
unsigned long msb;
|
||||
if (_BitScanReverse(&msb, x)) {
|
||||
return (int)msb + 1;
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
const int BIT_LENGTH_TABLE[32] = {
|
||||
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
|
||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
};
|
||||
int msb = 0;
|
||||
while (x >= 32) {
|
||||
msb += 6;
|
||||
x >>= 6;
|
||||
}
|
||||
msb += BIT_LENGTH_TABLE[x];
|
||||
return msb;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BITUTILS_H */
|
||||
321
extern/include/python/internal/pycore_blocks_output_buffer.h
vendored
Normal file
321
extern/include/python/internal/pycore_blocks_output_buffer.h
vendored
Normal file
@@ -0,0 +1,321 @@
|
||||
/*
|
||||
_BlocksOutputBuffer is used to maintain an output buffer
|
||||
that has unpredictable size. Suitable for compression/decompression
|
||||
API (bz2/lzma/zlib) that has stream->next_out and stream->avail_out:
|
||||
|
||||
stream->next_out: point to the next output position.
|
||||
stream->avail_out: the number of available bytes left in the buffer.
|
||||
|
||||
It maintains a list of bytes object, so there is no overhead of resizing
|
||||
the buffer.
|
||||
|
||||
Usage:
|
||||
|
||||
1, Initialize the struct instance like this:
|
||||
_BlocksOutputBuffer buffer = {.list = NULL};
|
||||
Set .list to NULL for _BlocksOutputBuffer_OnError()
|
||||
|
||||
2, Initialize the buffer use one of these functions:
|
||||
_BlocksOutputBuffer_InitAndGrow()
|
||||
_BlocksOutputBuffer_InitWithSize()
|
||||
|
||||
3, If (avail_out == 0), grow the buffer:
|
||||
_BlocksOutputBuffer_Grow()
|
||||
|
||||
4, Get the current outputted data size:
|
||||
_BlocksOutputBuffer_GetDataSize()
|
||||
|
||||
5, Finish the buffer, and return a bytes object:
|
||||
_BlocksOutputBuffer_Finish()
|
||||
|
||||
6, Clean up the buffer when an error occurred:
|
||||
_BlocksOutputBuffer_OnError()
|
||||
*/
|
||||
|
||||
#ifndef Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
|
||||
#define Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "Python.h"
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
// List of bytes objects
|
||||
PyObject *list;
|
||||
// Number of whole allocated size
|
||||
Py_ssize_t allocated;
|
||||
// Max length of the buffer, negative number means unlimited length.
|
||||
Py_ssize_t max_length;
|
||||
} _BlocksOutputBuffer;
|
||||
|
||||
static const char unable_allocate_msg[] = "Unable to allocate output buffer.";
|
||||
|
||||
/* In 32-bit build, the max block size should <= INT32_MAX. */
|
||||
#define OUTPUT_BUFFER_MAX_BLOCK_SIZE (256*1024*1024)
|
||||
|
||||
/* Block size sequence */
|
||||
#define KB (1024)
|
||||
#define MB (1024*1024)
|
||||
static const Py_ssize_t BUFFER_BLOCK_SIZE[] =
|
||||
{ 32*KB, 64*KB, 256*KB, 1*MB, 4*MB, 8*MB, 16*MB, 16*MB,
|
||||
32*MB, 32*MB, 32*MB, 32*MB, 64*MB, 64*MB, 128*MB, 128*MB,
|
||||
OUTPUT_BUFFER_MAX_BLOCK_SIZE };
|
||||
#undef KB
|
||||
#undef MB
|
||||
|
||||
/* According to the block sizes defined by BUFFER_BLOCK_SIZE, the whole
|
||||
allocated size growth step is:
|
||||
1 32 KB +32 KB
|
||||
2 96 KB +64 KB
|
||||
3 352 KB +256 KB
|
||||
4 1.34 MB +1 MB
|
||||
5 5.34 MB +4 MB
|
||||
6 13.34 MB +8 MB
|
||||
7 29.34 MB +16 MB
|
||||
8 45.34 MB +16 MB
|
||||
9 77.34 MB +32 MB
|
||||
10 109.34 MB +32 MB
|
||||
11 141.34 MB +32 MB
|
||||
12 173.34 MB +32 MB
|
||||
13 237.34 MB +64 MB
|
||||
14 301.34 MB +64 MB
|
||||
15 429.34 MB +128 MB
|
||||
16 557.34 MB +128 MB
|
||||
17 813.34 MB +256 MB
|
||||
18 1069.34 MB +256 MB
|
||||
19 1325.34 MB +256 MB
|
||||
20 1581.34 MB +256 MB
|
||||
21 1837.34 MB +256 MB
|
||||
22 2093.34 MB +256 MB
|
||||
...
|
||||
*/
|
||||
|
||||
/* Initialize the buffer, and grow the buffer.
|
||||
|
||||
max_length: Max length of the buffer, -1 for unlimited length.
|
||||
|
||||
On success, return allocated size (>=0)
|
||||
On failure, return -1
|
||||
*/
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t max_length,
|
||||
void **next_out)
|
||||
{
|
||||
PyObject *b;
|
||||
Py_ssize_t block_size;
|
||||
|
||||
// ensure .list was set to NULL
|
||||
assert(buffer->list == NULL);
|
||||
|
||||
// get block size
|
||||
if (0 <= max_length && max_length < BUFFER_BLOCK_SIZE[0]) {
|
||||
block_size = max_length;
|
||||
} else {
|
||||
block_size = BUFFER_BLOCK_SIZE[0];
|
||||
}
|
||||
|
||||
// the first block
|
||||
b = PyBytes_FromStringAndSize(NULL, block_size);
|
||||
if (b == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// create the list
|
||||
buffer->list = PyList_New(1);
|
||||
if (buffer->list == NULL) {
|
||||
Py_DECREF(b);
|
||||
return -1;
|
||||
}
|
||||
PyList_SET_ITEM(buffer->list, 0, b);
|
||||
|
||||
// set variables
|
||||
buffer->allocated = block_size;
|
||||
buffer->max_length = max_length;
|
||||
|
||||
*next_out = PyBytes_AS_STRING(b);
|
||||
return block_size;
|
||||
}
|
||||
|
||||
/* Initialize the buffer, with an initial size.
|
||||
|
||||
Check block size limit in the outer wrapper function. For example, some libs
|
||||
accept UINT32_MAX as the maximum block size, then init_size should <= it.
|
||||
|
||||
On success, return allocated size (>=0)
|
||||
On failure, return -1
|
||||
*/
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t init_size,
|
||||
void **next_out)
|
||||
{
|
||||
PyObject *b;
|
||||
|
||||
// ensure .list was set to NULL
|
||||
assert(buffer->list == NULL);
|
||||
|
||||
// the first block
|
||||
b = PyBytes_FromStringAndSize(NULL, init_size);
|
||||
if (b == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// create the list
|
||||
buffer->list = PyList_New(1);
|
||||
if (buffer->list == NULL) {
|
||||
Py_DECREF(b);
|
||||
return -1;
|
||||
}
|
||||
PyList_SET_ITEM(buffer->list, 0, b);
|
||||
|
||||
// set variables
|
||||
buffer->allocated = init_size;
|
||||
buffer->max_length = -1;
|
||||
|
||||
*next_out = PyBytes_AS_STRING(b);
|
||||
return init_size;
|
||||
}
|
||||
|
||||
/* Grow the buffer. The avail_out must be 0, please check it before calling.
|
||||
|
||||
On success, return allocated size (>=0)
|
||||
On failure, return -1
|
||||
*/
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_Grow(_BlocksOutputBuffer *buffer,
|
||||
void **next_out,
|
||||
const Py_ssize_t avail_out)
|
||||
{
|
||||
PyObject *b;
|
||||
const Py_ssize_t list_len = Py_SIZE(buffer->list);
|
||||
Py_ssize_t block_size;
|
||||
|
||||
// ensure no gaps in the data
|
||||
if (avail_out != 0) {
|
||||
PyErr_SetString(PyExc_SystemError,
|
||||
"avail_out is non-zero in _BlocksOutputBuffer_Grow().");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// get block size
|
||||
if (list_len < (Py_ssize_t) Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE)) {
|
||||
block_size = BUFFER_BLOCK_SIZE[list_len];
|
||||
} else {
|
||||
block_size = BUFFER_BLOCK_SIZE[Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE) - 1];
|
||||
}
|
||||
|
||||
// check max_length
|
||||
if (buffer->max_length >= 0) {
|
||||
// if (rest == 0), should not grow the buffer.
|
||||
Py_ssize_t rest = buffer->max_length - buffer->allocated;
|
||||
assert(rest > 0);
|
||||
|
||||
// block_size of the last block
|
||||
if (block_size > rest) {
|
||||
block_size = rest;
|
||||
}
|
||||
}
|
||||
|
||||
// check buffer->allocated overflow
|
||||
if (block_size > PY_SSIZE_T_MAX - buffer->allocated) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// create the block
|
||||
b = PyBytes_FromStringAndSize(NULL, block_size);
|
||||
if (b == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return -1;
|
||||
}
|
||||
if (PyList_Append(buffer->list, b) < 0) {
|
||||
Py_DECREF(b);
|
||||
return -1;
|
||||
}
|
||||
Py_DECREF(b);
|
||||
|
||||
// set variables
|
||||
buffer->allocated += block_size;
|
||||
|
||||
*next_out = PyBytes_AS_STRING(b);
|
||||
return block_size;
|
||||
}
|
||||
|
||||
/* Return the current outputted data size. */
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_GetDataSize(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t avail_out)
|
||||
{
|
||||
return buffer->allocated - avail_out;
|
||||
}
|
||||
|
||||
/* Finish the buffer.
|
||||
|
||||
Return a bytes object on success
|
||||
Return NULL on failure
|
||||
*/
|
||||
static inline PyObject *
|
||||
_BlocksOutputBuffer_Finish(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t avail_out)
|
||||
{
|
||||
PyObject *result, *block;
|
||||
const Py_ssize_t list_len = Py_SIZE(buffer->list);
|
||||
|
||||
// fast path for single block
|
||||
if ((list_len == 1 && avail_out == 0) ||
|
||||
(list_len == 2 && Py_SIZE(PyList_GET_ITEM(buffer->list, 1)) == avail_out))
|
||||
{
|
||||
block = PyList_GET_ITEM(buffer->list, 0);
|
||||
Py_INCREF(block);
|
||||
|
||||
Py_CLEAR(buffer->list);
|
||||
return block;
|
||||
}
|
||||
|
||||
// final bytes object
|
||||
result = PyBytes_FromStringAndSize(NULL, buffer->allocated - avail_out);
|
||||
if (result == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// memory copy
|
||||
if (list_len > 0) {
|
||||
char *posi = PyBytes_AS_STRING(result);
|
||||
|
||||
// blocks except the last one
|
||||
Py_ssize_t i = 0;
|
||||
for (; i < list_len-1; i++) {
|
||||
block = PyList_GET_ITEM(buffer->list, i);
|
||||
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block));
|
||||
posi += Py_SIZE(block);
|
||||
}
|
||||
// the last block
|
||||
block = PyList_GET_ITEM(buffer->list, i);
|
||||
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block) - avail_out);
|
||||
} else {
|
||||
assert(Py_SIZE(result) == 0);
|
||||
}
|
||||
|
||||
Py_CLEAR(buffer->list);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Clean up the buffer when an error occurred. */
|
||||
static inline void
|
||||
_BlocksOutputBuffer_OnError(_BlocksOutputBuffer *buffer)
|
||||
{
|
||||
Py_CLEAR(buffer->list);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */
|
||||
73
extern/include/python/internal/pycore_brc.h
vendored
Normal file
73
extern/include/python/internal/pycore_brc.h
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
#ifndef Py_INTERNAL_BRC_H
|
||||
#define Py_INTERNAL_BRC_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "pycore_llist.h" // struct llist_node
|
||||
#include "pycore_object_stack.h" // _PyObjectStack
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
// Prime number to avoid correlations with memory addresses.
|
||||
#define _Py_BRC_NUM_BUCKETS 257
|
||||
|
||||
// Hash table bucket
|
||||
struct _brc_bucket {
|
||||
// Mutex protects both the bucket and thread state queues in this bucket.
|
||||
PyMutex mutex;
|
||||
|
||||
// Linked list of _PyThreadStateImpl objects hashed to this bucket.
|
||||
struct llist_node root;
|
||||
};
|
||||
|
||||
// Per-interpreter biased reference counting state
|
||||
struct _brc_state {
|
||||
// Hash table of thread states by thread-id. Thread states within a bucket
|
||||
// are chained using a doubly-linked list.
|
||||
struct _brc_bucket table[_Py_BRC_NUM_BUCKETS];
|
||||
};
|
||||
|
||||
// Per-thread biased reference counting state
|
||||
struct _brc_thread_state {
|
||||
// Linked-list of thread states per hash bucket
|
||||
struct llist_node bucket_node;
|
||||
|
||||
// Thread-id as determined by _PyThread_Id()
|
||||
uintptr_t tid;
|
||||
|
||||
// Objects with refcounts to be merged (protected by bucket mutex)
|
||||
_PyObjectStack objects_to_merge;
|
||||
|
||||
// Local stack of objects to be merged (not accessed by other threads)
|
||||
_PyObjectStack local_objects_to_merge;
|
||||
};
|
||||
|
||||
// Initialize/finalize the per-thread biased reference counting state
|
||||
void _Py_brc_init_thread(PyThreadState *tstate);
|
||||
void _Py_brc_remove_thread(PyThreadState *tstate);
|
||||
|
||||
// Initialize per-interpreter state
|
||||
void _Py_brc_init_state(PyInterpreterState *interp);
|
||||
|
||||
void _Py_brc_after_fork(PyInterpreterState *interp);
|
||||
|
||||
// Enqueues an object to be merged by it's owning thread (tid). This
|
||||
// steals a reference to the object.
|
||||
void _Py_brc_queue_object(PyObject *ob);
|
||||
|
||||
// Merge the refcounts of queued objects for the current thread.
|
||||
void _Py_brc_merge_refcounts(PyThreadState *tstate);
|
||||
|
||||
#endif /* Py_GIL_DISABLED */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BRC_H */
|
||||
82
extern/include/python/internal/pycore_bytes_methods.h
vendored
Normal file
82
extern/include/python/internal/pycore_bytes_methods.h
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
#ifndef Py_LIMITED_API
|
||||
#ifndef Py_BYTES_CTYPE_H
|
||||
#define Py_BYTES_CTYPE_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The internal implementation behind PyBytes (bytes) and PyByteArray (bytearray)
|
||||
* methods of the given names, they operate on ASCII byte strings.
|
||||
*/
|
||||
extern PyObject* _Py_bytes_isspace(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isalpha(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isalnum(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isascii(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isdigit(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_islower(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isupper(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_istitle(const char *cptr, Py_ssize_t len);
|
||||
|
||||
/* These store their len sized answer in the given preallocated *result arg. */
|
||||
extern void _Py_bytes_lower(char *result, const char *cptr, Py_ssize_t len);
|
||||
extern void _Py_bytes_upper(char *result, const char *cptr, Py_ssize_t len);
|
||||
extern void _Py_bytes_title(char *result, const char *s, Py_ssize_t len);
|
||||
extern void _Py_bytes_capitalize(char *result, const char *s, Py_ssize_t len);
|
||||
extern void _Py_bytes_swapcase(char *result, const char *s, Py_ssize_t len);
|
||||
|
||||
extern PyObject *_Py_bytes_find(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_index(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_rfind(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_rindex(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_count(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern int _Py_bytes_contains(const char *str, Py_ssize_t len, PyObject *arg);
|
||||
extern PyObject *_Py_bytes_startswith(const char *str, Py_ssize_t len,
|
||||
PyObject *subobj, Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_endswith(const char *str, Py_ssize_t len,
|
||||
PyObject *subobj, Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
/* The maketrans() static method. */
|
||||
extern PyObject* _Py_bytes_maketrans(Py_buffer *frm, Py_buffer *to);
|
||||
|
||||
/* Shared __doc__ strings. */
|
||||
extern const char _Py_isspace__doc__[];
|
||||
extern const char _Py_isalpha__doc__[];
|
||||
extern const char _Py_isalnum__doc__[];
|
||||
extern const char _Py_isascii__doc__[];
|
||||
extern const char _Py_isdigit__doc__[];
|
||||
extern const char _Py_islower__doc__[];
|
||||
extern const char _Py_isupper__doc__[];
|
||||
extern const char _Py_istitle__doc__[];
|
||||
extern const char _Py_lower__doc__[];
|
||||
extern const char _Py_upper__doc__[];
|
||||
extern const char _Py_title__doc__[];
|
||||
extern const char _Py_capitalize__doc__[];
|
||||
extern const char _Py_swapcase__doc__[];
|
||||
extern const char _Py_count__doc__[];
|
||||
extern const char _Py_find__doc__[];
|
||||
extern const char _Py_index__doc__[];
|
||||
extern const char _Py_rfind__doc__[];
|
||||
extern const char _Py_rindex__doc__[];
|
||||
extern const char _Py_startswith__doc__[];
|
||||
extern const char _Py_endswith__doc__[];
|
||||
extern const char _Py_maketrans__doc__[];
|
||||
extern const char _Py_expandtabs__doc__[];
|
||||
extern const char _Py_ljust__doc__[];
|
||||
extern const char _Py_rjust__doc__[];
|
||||
extern const char _Py_center__doc__[];
|
||||
extern const char _Py_zfill__doc__[];
|
||||
|
||||
/* this is needed because some docs are shared from the .o, not static */
|
||||
#define PyDoc_STRVAR_shared(name,str) const char name[] = PyDoc_STR(str)
|
||||
|
||||
#endif /* !Py_BYTES_CTYPE_H */
|
||||
#endif /* !Py_LIMITED_API */
|
||||
149
extern/include/python/internal/pycore_bytesobject.h
vendored
Normal file
149
extern/include/python/internal/pycore_bytesobject.h
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
#ifndef Py_INTERNAL_BYTESOBJECT_H
|
||||
#define Py_INTERNAL_BYTESOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyObject* _PyBytes_FormatEx(
|
||||
const char *format,
|
||||
Py_ssize_t format_len,
|
||||
PyObject *args,
|
||||
int use_bytearray);
|
||||
|
||||
extern PyObject* _PyBytes_FromHex(
|
||||
PyObject *string,
|
||||
int use_bytearray);
|
||||
|
||||
// Helper for PyBytes_DecodeEscape that detects invalid escape chars.
|
||||
// Export for test_peg_generator.
|
||||
PyAPI_FUNC(PyObject*) _PyBytes_DecodeEscape2(const char *, Py_ssize_t,
|
||||
const char *,
|
||||
int *, const char **);
|
||||
|
||||
|
||||
// Substring Search.
|
||||
//
|
||||
// Returns the index of the first occurrence of
|
||||
// a substring ("needle") in a larger text ("haystack").
|
||||
// If the needle is not found, return -1.
|
||||
// If the needle is found, add offset to the index.
|
||||
//
|
||||
// Export for 'mmap' shared extension.
|
||||
PyAPI_FUNC(Py_ssize_t)
|
||||
_PyBytes_Find(const char *haystack, Py_ssize_t len_haystack,
|
||||
const char *needle, Py_ssize_t len_needle,
|
||||
Py_ssize_t offset);
|
||||
|
||||
// Same as above, but search right-to-left.
|
||||
// Export for 'mmap' shared extension.
|
||||
PyAPI_FUNC(Py_ssize_t)
|
||||
_PyBytes_ReverseFind(const char *haystack, Py_ssize_t len_haystack,
|
||||
const char *needle, Py_ssize_t len_needle,
|
||||
Py_ssize_t offset);
|
||||
|
||||
|
||||
// Helper function to implement the repeat and inplace repeat methods on a
|
||||
// buffer.
|
||||
//
|
||||
// len_dest is assumed to be an integer multiple of len_src.
|
||||
// If src equals dest, then assume the operation is inplace.
|
||||
//
|
||||
// This method repeately doubles the number of bytes copied to reduce
|
||||
// the number of invocations of memcpy.
|
||||
//
|
||||
// Export for 'array' shared extension.
|
||||
PyAPI_FUNC(void)
|
||||
_PyBytes_Repeat(char* dest, Py_ssize_t len_dest,
|
||||
const char* src, Py_ssize_t len_src);
|
||||
|
||||
/* --- _PyBytesWriter ----------------------------------------------------- */
|
||||
|
||||
/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer".
|
||||
A _PyBytesWriter variable must be declared at the end of variables in a
|
||||
function to optimize the memory allocation on the stack. */
|
||||
typedef struct {
|
||||
/* bytes, bytearray or NULL (when the small buffer is used) */
|
||||
PyObject *buffer;
|
||||
|
||||
/* Number of allocated size. */
|
||||
Py_ssize_t allocated;
|
||||
|
||||
/* Minimum number of allocated bytes,
|
||||
incremented by _PyBytesWriter_Prepare() */
|
||||
Py_ssize_t min_size;
|
||||
|
||||
/* If non-zero, use a bytearray instead of a bytes object for buffer. */
|
||||
int use_bytearray;
|
||||
|
||||
/* If non-zero, overallocate the buffer (default: 0).
|
||||
This flag must be zero if use_bytearray is non-zero. */
|
||||
int overallocate;
|
||||
|
||||
/* Stack buffer */
|
||||
int use_small_buffer;
|
||||
char small_buffer[512];
|
||||
} _PyBytesWriter;
|
||||
|
||||
/* Initialize a bytes writer
|
||||
|
||||
By default, the overallocation is disabled. Set the overallocate attribute
|
||||
to control the allocation of the buffer.
|
||||
|
||||
Export _PyBytesWriter API for '_pickle' shared extension. */
|
||||
PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer);
|
||||
|
||||
/* Get the buffer content and reset the writer.
|
||||
Return a bytes object, or a bytearray object if use_bytearray is non-zero.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer,
|
||||
void *str);
|
||||
|
||||
/* Deallocate memory of a writer (clear its internal buffer). */
|
||||
PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer);
|
||||
|
||||
/* Allocate the buffer to write size bytes.
|
||||
Return the pointer to the beginning of buffer data.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer,
|
||||
Py_ssize_t size);
|
||||
|
||||
/* Ensure that the buffer is large enough to write *size* bytes.
|
||||
Add size to the writer minimum size (min_size attribute).
|
||||
|
||||
str is the current pointer inside the buffer.
|
||||
Return the updated current pointer inside the buffer.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer,
|
||||
void *str,
|
||||
Py_ssize_t size);
|
||||
|
||||
/* Resize the buffer to make it larger.
|
||||
The new buffer may be larger than size bytes because of overallocation.
|
||||
Return the updated current pointer inside the buffer.
|
||||
Raise an exception and return NULL on error.
|
||||
|
||||
Note: size must be greater than the number of allocated bytes in the writer.
|
||||
|
||||
This function doesn't use the writer minimum size (min_size attribute).
|
||||
|
||||
See also _PyBytesWriter_Prepare().
|
||||
*/
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer,
|
||||
void *str,
|
||||
Py_ssize_t size);
|
||||
|
||||
/* Write bytes.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer,
|
||||
void *str,
|
||||
const void *bytes,
|
||||
Py_ssize_t size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BYTESOBJECT_H */
|
||||
39
extern/include/python/internal/pycore_c_array.h
vendored
Normal file
39
extern/include/python/internal/pycore_c_array.h
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
#ifndef Py_INTERNAL_C_ARRAY_H
|
||||
#define Py_INTERNAL_C_ARRAY_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* Utility for a number of growing arrays */
|
||||
|
||||
typedef struct {
|
||||
void *array; /* pointer to the array */
|
||||
int allocated_entries; /* pointer to the capacity of the array */
|
||||
size_t item_size; /* size of each element */
|
||||
int initial_num_entries; /* initial allocation size */
|
||||
} _Py_c_array_t;
|
||||
|
||||
|
||||
int _Py_CArray_Init(_Py_c_array_t* array, int item_size, int initial_num_entries);
|
||||
void _Py_CArray_Fini(_Py_c_array_t* array);
|
||||
|
||||
/* If idx is out of bounds:
|
||||
* If arr->array is NULL, allocate arr->initial_num_entries slots.
|
||||
* Otherwise, double its size.
|
||||
*
|
||||
* Return 0 if successful and -1 (with exception set) otherwise.
|
||||
*/
|
||||
int _Py_CArray_EnsureCapacity(_Py_c_array_t *c_array, int idx);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !Py_INTERNAL_C_ARRAY_H */
|
||||
206
extern/include/python/internal/pycore_call.h
vendored
Normal file
206
extern/include/python/internal/pycore_call.h
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
#ifndef Py_INTERNAL_CALL_H
|
||||
#define Py_INTERNAL_CALL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_code.h" // EVAL_CALL_STAT_INC_IF_FUNCTION()
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_stats.h"
|
||||
|
||||
/* Suggested size (number of positional arguments) for arrays of PyObject*
|
||||
allocated on a C stack to avoid allocating memory on the heap memory. Such
|
||||
array is used to pass positional arguments to call functions of the
|
||||
PyObject_Vectorcall() family.
|
||||
|
||||
The size is chosen to not abuse the C stack and so limit the risk of stack
|
||||
overflow. The size is also chosen to allow using the small stack for most
|
||||
function calls of the Python standard library. On 64-bit CPU, it allocates
|
||||
40 bytes on the stack. */
|
||||
#define _PY_FASTCALL_SMALL_STACK 5
|
||||
|
||||
|
||||
// Export for 'math' shared extension, used via _PyObject_VectorcallTstate()
|
||||
// static inline function.
|
||||
PyAPI_FUNC(PyObject*) _Py_CheckFunctionResult(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *result,
|
||||
const char *where);
|
||||
|
||||
extern PyObject* _PyObject_Call_Prepend(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *obj,
|
||||
PyObject *args,
|
||||
PyObject *kwargs);
|
||||
|
||||
extern PyObject* _PyObject_VectorcallDictTstate(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *const *args,
|
||||
size_t nargsf,
|
||||
PyObject *kwargs);
|
||||
|
||||
extern PyObject* _PyObject_Call(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *args,
|
||||
PyObject *kwargs);
|
||||
|
||||
extern PyObject * _PyObject_CallMethodFormat(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
// Export for 'array' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyObject_CallMethod(
|
||||
PyObject *obj,
|
||||
PyObject *name,
|
||||
const char *format, ...);
|
||||
|
||||
extern PyObject* _PyObject_CallMethodIdObjArgs(
|
||||
PyObject *obj,
|
||||
_Py_Identifier *name,
|
||||
...);
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_VectorcallMethodId(
|
||||
_Py_Identifier *name, PyObject *const *args,
|
||||
size_t nargsf, PyObject *kwnames)
|
||||
{
|
||||
PyObject *oname = _PyUnicode_FromId(name); /* borrowed */
|
||||
if (!oname) {
|
||||
return _Py_NULL;
|
||||
}
|
||||
return PyObject_VectorcallMethod(oname, args, nargsf, kwnames);
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_CallMethodIdNoArgs(PyObject *self, _Py_Identifier *name)
|
||||
{
|
||||
size_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
|
||||
return _PyObject_VectorcallMethodId(name, &self, nargsf, _Py_NULL);
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_CallMethodIdOneArg(PyObject *self, _Py_Identifier *name, PyObject *arg)
|
||||
{
|
||||
PyObject *args[2] = {self, arg};
|
||||
size_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET;
|
||||
assert(arg != NULL);
|
||||
return _PyObject_VectorcallMethodId(name, args, nargsf, _Py_NULL);
|
||||
}
|
||||
|
||||
|
||||
/* === Vectorcall protocol (PEP 590) ============================= */
|
||||
|
||||
// Call callable using tp_call. Arguments are like PyObject_Vectorcall(),
|
||||
// except that nargs is plainly the number of arguments without flags.
|
||||
//
|
||||
// Export for 'math' shared extension, used via _PyObject_VectorcallTstate()
|
||||
// static inline function.
|
||||
PyAPI_FUNC(PyObject*) _PyObject_MakeTpCall(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *const *args, Py_ssize_t nargs,
|
||||
PyObject *keywords);
|
||||
|
||||
// Static inline variant of public PyVectorcall_Function().
|
||||
static inline vectorcallfunc
|
||||
_PyVectorcall_FunctionInline(PyObject *callable)
|
||||
{
|
||||
assert(callable != NULL);
|
||||
|
||||
PyTypeObject *tp = Py_TYPE(callable);
|
||||
if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) {
|
||||
return NULL;
|
||||
}
|
||||
assert(PyCallable_Check(callable));
|
||||
|
||||
Py_ssize_t offset = tp->tp_vectorcall_offset;
|
||||
assert(offset > 0);
|
||||
|
||||
vectorcallfunc ptr;
|
||||
memcpy(&ptr, (char *) callable + offset, sizeof(ptr));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
/* Call the callable object 'callable' with the "vectorcall" calling
|
||||
convention.
|
||||
|
||||
args is a C array for positional arguments.
|
||||
|
||||
nargsf is the number of positional arguments plus optionally the flag
|
||||
PY_VECTORCALL_ARGUMENTS_OFFSET which means that the caller is allowed to
|
||||
modify args[-1].
|
||||
|
||||
kwnames is a tuple of keyword names. The values of the keyword arguments
|
||||
are stored in "args" after the positional arguments (note that the number
|
||||
of keyword arguments does not change nargsf). kwnames can also be NULL if
|
||||
there are no keyword arguments.
|
||||
|
||||
keywords must only contain strings and all keys must be unique.
|
||||
|
||||
Return the result on success. Raise an exception and return NULL on
|
||||
error. */
|
||||
static inline PyObject *
|
||||
_PyObject_VectorcallTstate(PyThreadState *tstate, PyObject *callable,
|
||||
PyObject *const *args, size_t nargsf,
|
||||
PyObject *kwnames)
|
||||
{
|
||||
vectorcallfunc func;
|
||||
PyObject *res;
|
||||
|
||||
assert(kwnames == NULL || PyTuple_Check(kwnames));
|
||||
assert(args != NULL || PyVectorcall_NARGS(nargsf) == 0);
|
||||
|
||||
func = _PyVectorcall_FunctionInline(callable);
|
||||
if (func == NULL) {
|
||||
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
|
||||
return _PyObject_MakeTpCall(tstate, callable, args, nargs, kwnames);
|
||||
}
|
||||
res = func(callable, args, nargsf, kwnames);
|
||||
return _Py_CheckFunctionResult(tstate, callable, res, NULL);
|
||||
}
|
||||
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_CallNoArgsTstate(PyThreadState *tstate, PyObject *func) {
|
||||
return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
|
||||
// Private static inline function variant of public PyObject_CallNoArgs()
|
||||
static inline PyObject *
|
||||
_PyObject_CallNoArgs(PyObject *func) {
|
||||
EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_API, func);
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
|
||||
extern PyObject *const *
|
||||
_PyStack_UnpackDict(PyThreadState *tstate,
|
||||
PyObject *const *args, Py_ssize_t nargs,
|
||||
PyObject *kwargs, PyObject **p_kwnames);
|
||||
|
||||
extern void _PyStack_UnpackDict_Free(
|
||||
PyObject *const *stack,
|
||||
Py_ssize_t nargs,
|
||||
PyObject *kwnames);
|
||||
|
||||
extern void _PyStack_UnpackDict_FreeNoDecRef(
|
||||
PyObject *const *stack,
|
||||
PyObject *kwnames);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CALL_H */
|
||||
17
extern/include/python/internal/pycore_capsule.h
vendored
Normal file
17
extern/include/python/internal/pycore_capsule.h
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef Py_INTERNAL_PYCAPSULE_H
|
||||
#define Py_INTERNAL_PYCAPSULE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Export for '_socket' shared extension
|
||||
PyAPI_FUNC(int) _PyCapsule_SetTraverse(PyObject *op, traverseproc traverse_func, inquiry clear_func);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYCAPSULE_H */
|
||||
75
extern/include/python/internal/pycore_cell.h
vendored
Normal file
75
extern/include/python/internal/pycore_cell.h
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
#ifndef Py_INTERNAL_CELL_H
|
||||
#define Py_INTERNAL_CELL_H
|
||||
|
||||
#include "pycore_critical_section.h"
|
||||
#include "pycore_object.h"
|
||||
#include "pycore_stackref.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Sets the cell contents to `value` and return previous contents. Steals a
|
||||
// reference to `value`.
|
||||
static inline PyObject *
|
||||
PyCell_SwapTakeRef(PyCellObject *cell, PyObject *value)
|
||||
{
|
||||
PyObject *old_value;
|
||||
Py_BEGIN_CRITICAL_SECTION(cell);
|
||||
old_value = cell->ob_ref;
|
||||
FT_ATOMIC_STORE_PTR_RELEASE(cell->ob_ref, value);
|
||||
Py_END_CRITICAL_SECTION();
|
||||
return old_value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
PyCell_SetTakeRef(PyCellObject *cell, PyObject *value)
|
||||
{
|
||||
PyObject *old_value = PyCell_SwapTakeRef(cell, value);
|
||||
Py_XDECREF(old_value);
|
||||
}
|
||||
|
||||
// Gets the cell contents. Returns a new reference.
|
||||
static inline PyObject *
|
||||
PyCell_GetRef(PyCellObject *cell)
|
||||
{
|
||||
PyObject *res;
|
||||
Py_BEGIN_CRITICAL_SECTION(cell);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
res = _Py_XNewRefWithLock(cell->ob_ref);
|
||||
#else
|
||||
res = Py_XNewRef(cell->ob_ref);
|
||||
#endif
|
||||
Py_END_CRITICAL_SECTION();
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline _PyStackRef
|
||||
_PyCell_GetStackRef(PyCellObject *cell)
|
||||
{
|
||||
PyObject *value;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
value = _Py_atomic_load_ptr(&cell->ob_ref);
|
||||
if (value == NULL) {
|
||||
return PyStackRef_NULL;
|
||||
}
|
||||
_PyStackRef ref;
|
||||
if (_Py_TryIncrefCompareStackRef(&cell->ob_ref, value, &ref)) {
|
||||
return ref;
|
||||
}
|
||||
#endif
|
||||
value = PyCell_GetRef(cell);
|
||||
if (value == NULL) {
|
||||
return PyStackRef_NULL;
|
||||
}
|
||||
return PyStackRef_FromPyObjectSteal(value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CELL_H */
|
||||
389
extern/include/python/internal/pycore_ceval.h
vendored
Normal file
389
extern/include/python/internal/pycore_ceval.h
vendored
Normal file
@@ -0,0 +1,389 @@
|
||||
#ifndef Py_INTERNAL_CEVAL_H
|
||||
#define Py_INTERNAL_CEVAL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "dynamic_annotations.h" // _Py_ANNOTATE_RWLOCK_CREATE
|
||||
|
||||
#include "pycore_code.h" // _PyCode_GetTLBCFast()
|
||||
#include "pycore_interp.h" // PyInterpreterState.eval_frame
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_stats.h" // EVAL_CALL_STAT_INC()
|
||||
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
struct _ceval_runtime_state;
|
||||
|
||||
// Export for '_lsprof' shared extension
|
||||
PyAPI_FUNC(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
|
||||
extern int _PyEval_SetProfileAllThreads(PyInterpreterState *interp, Py_tracefunc func, PyObject *arg);
|
||||
|
||||
extern int _PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
|
||||
extern int _PyEval_SetTraceAllThreads(PyInterpreterState *interp, Py_tracefunc func, PyObject *arg);
|
||||
|
||||
extern int _PyEval_SetOpcodeTrace(PyFrameObject *f, bool enable);
|
||||
|
||||
// Helper to look up a builtin object
|
||||
// Export for 'array' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyEval_GetBuiltin(PyObject *);
|
||||
|
||||
extern PyObject* _PyEval_GetBuiltinId(_Py_Identifier *);
|
||||
|
||||
extern void _PyEval_SetSwitchInterval(unsigned long microseconds);
|
||||
extern unsigned long _PyEval_GetSwitchInterval(void);
|
||||
|
||||
// Export for '_queue' shared extension
|
||||
PyAPI_FUNC(int) _PyEval_MakePendingCalls(PyThreadState *);
|
||||
|
||||
#ifndef Py_DEFAULT_RECURSION_LIMIT
|
||||
# define Py_DEFAULT_RECURSION_LIMIT 1000
|
||||
#endif
|
||||
|
||||
extern void _Py_FinishPendingCalls(PyThreadState *tstate);
|
||||
extern void _PyEval_InitState(PyInterpreterState *);
|
||||
extern void _PyEval_SignalReceived(void);
|
||||
|
||||
// bitwise flags:
|
||||
#define _Py_PENDING_MAINTHREADONLY 1
|
||||
#define _Py_PENDING_RAWFREE 2
|
||||
|
||||
typedef int _Py_add_pending_call_result;
|
||||
#define _Py_ADD_PENDING_SUCCESS 0
|
||||
#define _Py_ADD_PENDING_FULL -1
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(_Py_add_pending_call_result) _PyEval_AddPendingCall(
|
||||
PyInterpreterState *interp,
|
||||
_Py_pending_call_func func,
|
||||
void *arg,
|
||||
int flags);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
extern PyStatus _PyEval_ReInitThreads(PyThreadState *tstate);
|
||||
#endif
|
||||
|
||||
// Used by sys.call_tracing()
|
||||
extern PyObject* _PyEval_CallTracing(PyObject *func, PyObject *args);
|
||||
|
||||
// Used by sys.get_asyncgen_hooks()
|
||||
extern PyObject* _PyEval_GetAsyncGenFirstiter(void);
|
||||
extern PyObject* _PyEval_GetAsyncGenFinalizer(void);
|
||||
|
||||
// Used by sys.set_asyncgen_hooks()
|
||||
extern int _PyEval_SetAsyncGenFirstiter(PyObject *);
|
||||
extern int _PyEval_SetAsyncGenFinalizer(PyObject *);
|
||||
|
||||
// Used by sys.get_coroutine_origin_tracking_depth()
|
||||
// and sys.set_coroutine_origin_tracking_depth()
|
||||
extern int _PyEval_GetCoroutineOriginTrackingDepth(void);
|
||||
extern int _PyEval_SetCoroutineOriginTrackingDepth(int depth);
|
||||
|
||||
extern void _PyEval_Fini(void);
|
||||
|
||||
|
||||
extern PyObject* _PyEval_GetBuiltins(PyThreadState *tstate);
|
||||
|
||||
// Trampoline API
|
||||
|
||||
typedef struct {
|
||||
// Callback to initialize the trampoline state
|
||||
void* (*init_state)(void);
|
||||
// Callback to register every trampoline being created
|
||||
void (*write_state)(void* state, const void *code_addr,
|
||||
unsigned int code_size, PyCodeObject* code);
|
||||
// Callback to free the trampoline state
|
||||
int (*free_state)(void* state);
|
||||
} _PyPerf_Callbacks;
|
||||
|
||||
extern int _PyPerfTrampoline_SetCallbacks(_PyPerf_Callbacks *);
|
||||
extern void _PyPerfTrampoline_GetCallbacks(_PyPerf_Callbacks *);
|
||||
extern int _PyPerfTrampoline_Init(int activate);
|
||||
extern int _PyPerfTrampoline_Fini(void);
|
||||
extern int _PyIsPerfTrampolineActive(void);
|
||||
extern PyStatus _PyPerfTrampoline_AfterFork_Child(void);
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
extern _PyPerf_Callbacks _Py_perfmap_callbacks;
|
||||
extern _PyPerf_Callbacks _Py_perfmap_jit_callbacks;
|
||||
#endif
|
||||
|
||||
static inline PyObject*
|
||||
_PyEval_EvalFrame(PyThreadState *tstate, _PyInterpreterFrame *frame, int throwflag)
|
||||
{
|
||||
EVAL_CALL_STAT_INC(EVAL_CALL_TOTAL);
|
||||
if (tstate->interp->eval_frame == NULL) {
|
||||
return _PyEval_EvalFrameDefault(tstate, frame, throwflag);
|
||||
}
|
||||
return tstate->interp->eval_frame(tstate, frame, throwflag);
|
||||
}
|
||||
|
||||
extern PyObject*
|
||||
_PyEval_Vector(PyThreadState *tstate,
|
||||
PyFunctionObject *func, PyObject *locals,
|
||||
PyObject* const* args, size_t argcount,
|
||||
PyObject *kwnames);
|
||||
|
||||
extern int _PyEval_ThreadsInitialized(void);
|
||||
extern void _PyEval_InitGIL(PyThreadState *tstate, int own_gil);
|
||||
extern void _PyEval_FiniGIL(PyInterpreterState *interp);
|
||||
|
||||
extern void _PyEval_AcquireLock(PyThreadState *tstate);
|
||||
|
||||
extern void _PyEval_ReleaseLock(PyInterpreterState *, PyThreadState *,
|
||||
int final_release);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Returns 0 or 1 if the GIL for the given thread's interpreter is disabled or
|
||||
// enabled, respectively.
|
||||
//
|
||||
// The enabled state of the GIL will not change while one or more threads are
|
||||
// attached.
|
||||
static inline int
|
||||
_PyEval_IsGILEnabled(PyThreadState *tstate)
|
||||
{
|
||||
struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
|
||||
return _Py_atomic_load_int_relaxed(&gil->enabled) != 0;
|
||||
}
|
||||
|
||||
// Enable or disable the GIL used by the interpreter that owns tstate, which
|
||||
// must be the current thread. This may affect other interpreters, if the GIL
|
||||
// is shared. All three functions will be no-ops (and return 0) if the
|
||||
// interpreter's `enable_gil' config is not _PyConfig_GIL_DEFAULT.
|
||||
//
|
||||
// Every call to _PyEval_EnableGILTransient() must be paired with exactly one
|
||||
// call to either _PyEval_EnableGILPermanent() or
|
||||
// _PyEval_DisableGIL(). _PyEval_EnableGILPermanent() and _PyEval_DisableGIL()
|
||||
// must only be called while the GIL is enabled from a call to
|
||||
// _PyEval_EnableGILTransient().
|
||||
//
|
||||
// _PyEval_EnableGILTransient() returns 1 if it enabled the GIL, or 0 if the
|
||||
// GIL was already enabled, whether transiently or permanently. The caller will
|
||||
// hold the GIL upon return.
|
||||
//
|
||||
// _PyEval_EnableGILPermanent() returns 1 if it permanently enabled the GIL
|
||||
// (which must already be enabled), or 0 if it was already permanently
|
||||
// enabled. Once _PyEval_EnableGILPermanent() has been called once, all
|
||||
// subsequent calls to any of the three functions will be no-ops.
|
||||
//
|
||||
// _PyEval_DisableGIL() returns 1 if it disabled the GIL, or 0 if the GIL was
|
||||
// kept enabled because of another request, whether transient or permanent.
|
||||
//
|
||||
// All three functions must be called by an attached thread (this implies that
|
||||
// if the GIL is enabled, the current thread must hold it).
|
||||
extern int _PyEval_EnableGILTransient(PyThreadState *tstate);
|
||||
extern int _PyEval_EnableGILPermanent(PyThreadState *tstate);
|
||||
extern int _PyEval_DisableGIL(PyThreadState *state);
|
||||
|
||||
|
||||
static inline _Py_CODEUNIT *
|
||||
_PyEval_GetExecutableCode(PyThreadState *tstate, PyCodeObject *co)
|
||||
{
|
||||
_Py_CODEUNIT *bc = _PyCode_GetTLBCFast(tstate, co);
|
||||
if (bc != NULL) {
|
||||
return bc;
|
||||
}
|
||||
return _PyCode_GetTLBC(co);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
extern void _PyEval_DeactivateOpCache(void);
|
||||
|
||||
|
||||
/* --- _Py_EnterRecursiveCall() ----------------------------------------- */
|
||||
|
||||
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
// Overflow if stack pointer is between soft limit and the base of the hardware stack.
|
||||
// If it is below the hardware stack base, assume that we have the wrong stack limits, and do nothing.
|
||||
// We could have the wrong stack limits because of limited platform support, or user-space threads.
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
return here_addr < _tstate->c_stack_soft_limit && here_addr >= _tstate->c_stack_soft_limit - 2 * _PyOS_STACK_MARGIN_BYTES;
|
||||
#else
|
||||
return here_addr > _tstate->c_stack_soft_limit && here_addr <= _tstate->c_stack_soft_limit + 2 * _PyOS_STACK_MARGIN_BYTES;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Export for '_json' shared extension, used via _Py_EnterRecursiveCall()
|
||||
// static inline function.
|
||||
PyAPI_FUNC(int) _Py_CheckRecursiveCall(
|
||||
PyThreadState *tstate,
|
||||
const char *where);
|
||||
|
||||
int _Py_CheckRecursiveCallPy(
|
||||
PyThreadState *tstate);
|
||||
|
||||
static inline int _Py_EnterRecursiveCallTstate(PyThreadState *tstate,
|
||||
const char *where) {
|
||||
return (_Py_MakeRecCheck(tstate) && _Py_CheckRecursiveCall(tstate, where));
|
||||
}
|
||||
|
||||
static inline int _Py_EnterRecursiveCall(const char *where) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
return _Py_EnterRecursiveCallTstate(tstate, where);
|
||||
}
|
||||
|
||||
static inline void _Py_LeaveRecursiveCallTstate(PyThreadState *tstate) {
|
||||
(void)tstate;
|
||||
}
|
||||
|
||||
PyAPI_FUNC(void) _Py_InitializeRecursionLimits(PyThreadState *tstate);
|
||||
|
||||
static inline int _Py_ReachedRecursionLimit(PyThreadState *tstate) {
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
assert(_tstate->c_stack_hard_limit != 0);
|
||||
#if _Py_STACK_GROWS_DOWN
|
||||
return here_addr <= _tstate->c_stack_soft_limit;
|
||||
#else
|
||||
return here_addr >= _tstate->c_stack_soft_limit;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void _Py_LeaveRecursiveCall(void) {
|
||||
}
|
||||
|
||||
extern _PyInterpreterFrame* _PyEval_GetFrame(void);
|
||||
|
||||
extern PyObject * _PyEval_GetGlobalsFromRunningMain(PyThreadState *);
|
||||
extern int _PyEval_EnsureBuiltins(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
PyObject **p_builtins);
|
||||
extern int _PyEval_EnsureBuiltinsWithModule(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
PyObject **p_builtins);
|
||||
|
||||
PyAPI_FUNC(PyObject *)_Py_MakeCoro(PyFunctionObject *func);
|
||||
|
||||
/* Handle signals, pending calls, GIL drop request
|
||||
and asynchronous exception */
|
||||
PyAPI_FUNC(int) _Py_HandlePending(PyThreadState *tstate);
|
||||
|
||||
extern PyObject * _PyEval_GetFrameLocals(void);
|
||||
|
||||
typedef PyObject *(*conversion_func)(PyObject *);
|
||||
|
||||
PyAPI_DATA(const binaryfunc) _PyEval_BinaryOps[];
|
||||
PyAPI_DATA(const conversion_func) _PyEval_ConversionFuncs[];
|
||||
|
||||
typedef struct _special_method {
|
||||
PyObject *name;
|
||||
const char *error;
|
||||
const char *error_suggestion; // improved optional suggestion
|
||||
} _Py_SpecialMethod;
|
||||
|
||||
PyAPI_DATA(const _Py_SpecialMethod) _Py_SpecialMethods[];
|
||||
PyAPI_DATA(const size_t) _Py_FunctionAttributeOffsets[];
|
||||
|
||||
PyAPI_FUNC(int) _PyEval_CheckExceptStarTypeValid(PyThreadState *tstate, PyObject* right);
|
||||
PyAPI_FUNC(int) _PyEval_CheckExceptTypeValid(PyThreadState *tstate, PyObject* right);
|
||||
PyAPI_FUNC(int) _PyEval_ExceptionGroupMatch(_PyInterpreterFrame *, PyObject* exc_value, PyObject *match_type, PyObject **match, PyObject **rest);
|
||||
PyAPI_FUNC(void) _PyEval_FormatAwaitableError(PyThreadState *tstate, PyTypeObject *type, int oparg);
|
||||
PyAPI_FUNC(void) _PyEval_FormatExcCheckArg(PyThreadState *tstate, PyObject *exc, const char *format_str, PyObject *obj);
|
||||
PyAPI_FUNC(void) _PyEval_FormatExcUnbound(PyThreadState *tstate, PyCodeObject *co, int oparg);
|
||||
PyAPI_FUNC(void) _PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwargs);
|
||||
PyAPI_FUNC(PyObject *) _PyEval_ImportFrom(PyThreadState *, PyObject *, PyObject *);
|
||||
PyAPI_FUNC(PyObject *) _PyEval_ImportName(PyThreadState *, _PyInterpreterFrame *, PyObject *, PyObject *, PyObject *);
|
||||
PyAPI_FUNC(PyObject *)_PyEval_MatchClass(PyThreadState *tstate, PyObject *subject, PyObject *type, Py_ssize_t nargs, PyObject *kwargs);
|
||||
PyAPI_FUNC(PyObject *)_PyEval_MatchKeys(PyThreadState *tstate, PyObject *map, PyObject *keys);
|
||||
PyAPI_FUNC(void) _PyEval_MonitorRaise(PyThreadState *tstate, _PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
|
||||
PyAPI_FUNC(bool) _PyEval_NoToolsForUnwind(PyThreadState *tstate);
|
||||
PyAPI_FUNC(int) _PyEval_UnpackIterableStackRef(PyThreadState *tstate, PyObject *v, int argcnt, int argcntafter, _PyStackRef *sp);
|
||||
PyAPI_FUNC(void) _PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame *frame);
|
||||
PyAPI_FUNC(PyObject **) _PyObjectArray_FromStackRefArray(_PyStackRef *input, Py_ssize_t nargs, PyObject **scratch);
|
||||
|
||||
PyAPI_FUNC(void) _PyObjectArray_Free(PyObject **array, PyObject **scratch);
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyEval_GetANext(PyObject *aiter);
|
||||
PyAPI_FUNC(void) _PyEval_LoadGlobalStackRef(PyObject *globals, PyObject *builtins, PyObject *name, _PyStackRef *writeto);
|
||||
PyAPI_FUNC(PyObject *) _PyEval_GetAwaitable(PyObject *iterable, int oparg);
|
||||
PyAPI_FUNC(PyObject *) _PyEval_LoadName(PyThreadState *tstate, _PyInterpreterFrame *frame, PyObject *name);
|
||||
PyAPI_FUNC(int)
|
||||
_Py_Check_ArgsIterable(PyThreadState *tstate, PyObject *func, PyObject *args);
|
||||
|
||||
/*
|
||||
* Indicate whether a special method of given 'oparg' can use the (improved)
|
||||
* alternative error message instead. Only methods loaded by LOAD_SPECIAL
|
||||
* support alternative error messages.
|
||||
*
|
||||
* Symbol is exported for the JIT (see discussion on GH-132218).
|
||||
*/
|
||||
PyAPI_FUNC(int)
|
||||
_PyEval_SpecialMethodCanSuggest(PyObject *self, int oparg);
|
||||
|
||||
/* Bits that can be set in PyThreadState.eval_breaker */
|
||||
#define _PY_GIL_DROP_REQUEST_BIT (1U << 0)
|
||||
#define _PY_SIGNALS_PENDING_BIT (1U << 1)
|
||||
#define _PY_CALLS_TO_DO_BIT (1U << 2)
|
||||
#define _PY_ASYNC_EXCEPTION_BIT (1U << 3)
|
||||
#define _PY_GC_SCHEDULED_BIT (1U << 4)
|
||||
#define _PY_EVAL_PLEASE_STOP_BIT (1U << 5)
|
||||
#define _PY_EVAL_EXPLICIT_MERGE_BIT (1U << 6)
|
||||
#define _PY_EVAL_JIT_INVALIDATE_COLD_BIT (1U << 7)
|
||||
|
||||
/* Reserve a few bits for future use */
|
||||
#define _PY_EVAL_EVENTS_BITS 8
|
||||
#define _PY_EVAL_EVENTS_MASK ((1 << _PY_EVAL_EVENTS_BITS)-1)
|
||||
|
||||
static inline void
|
||||
_Py_set_eval_breaker_bit(PyThreadState *tstate, uintptr_t bit)
|
||||
{
|
||||
_Py_atomic_or_uintptr(&tstate->eval_breaker, bit);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_unset_eval_breaker_bit(PyThreadState *tstate, uintptr_t bit)
|
||||
{
|
||||
_Py_atomic_and_uintptr(&tstate->eval_breaker, ~bit);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_eval_breaker_bit_is_set(PyThreadState *tstate, uintptr_t bit)
|
||||
{
|
||||
uintptr_t b = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
|
||||
return (b & bit) != 0;
|
||||
}
|
||||
|
||||
// Free-threaded builds use these functions to set or unset a bit on all
|
||||
// threads in the given interpreter.
|
||||
void _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
|
||||
void _Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
|
||||
|
||||
PyAPI_FUNC(_PyStackRef) _PyFloat_FromDouble_ConsumeInputs(_PyStackRef left, _PyStackRef right, double value);
|
||||
|
||||
#ifndef Py_SUPPORTS_REMOTE_DEBUG
|
||||
#if defined(__APPLE__)
|
||||
#include <TargetConditionals.h>
|
||||
# if !defined(TARGET_OS_OSX)
|
||||
// Older macOS SDKs do not define TARGET_OS_OSX
|
||||
# define TARGET_OS_OSX 1
|
||||
# endif
|
||||
#endif
|
||||
#if ((defined(__APPLE__) && TARGET_OS_OSX) || defined(MS_WINDOWS) || (defined(__linux__) && HAVE_PROCESS_VM_READV))
|
||||
# define Py_SUPPORTS_REMOTE_DEBUG 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(Py_REMOTE_DEBUG) && defined(Py_SUPPORTS_REMOTE_DEBUG)
|
||||
extern int _PyRunRemoteDebugger(PyThreadState *tstate);
|
||||
#endif
|
||||
|
||||
/* Special methods used by LOAD_SPECIAL */
|
||||
#define SPECIAL___ENTER__ 0
|
||||
#define SPECIAL___EXIT__ 1
|
||||
#define SPECIAL___AENTER__ 2
|
||||
#define SPECIAL___AEXIT__ 3
|
||||
#define SPECIAL_MAX 3
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CEVAL_H */
|
||||
48
extern/include/python/internal/pycore_ceval_state.h
vendored
Normal file
48
extern/include/python/internal/pycore_ceval_state.h
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef Py_INTERNAL_CEVAL_STATE_H
|
||||
#define Py_INTERNAL_CEVAL_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_gil.h" // struct _gil_runtime_state
|
||||
|
||||
|
||||
#define MAXPENDINGCALLS PENDINGCALLSARRAYSIZE
|
||||
/* For interpreter-level pending calls, we want to avoid spending too
|
||||
much time on pending calls in any one thread, so we apply a limit. */
|
||||
#if MAXPENDINGCALLS > 100
|
||||
# define MAXPENDINGCALLSLOOP 100
|
||||
#else
|
||||
# define MAXPENDINGCALLSLOOP MAXPENDINGCALLS
|
||||
#endif
|
||||
|
||||
/* We keep the number small to preserve as much compatibility
|
||||
as possible with earlier versions. */
|
||||
#define MAXPENDINGCALLS_MAIN 32
|
||||
/* For the main thread, we want to make sure all pending calls are
|
||||
run at once, for the sake of prompt signal handling. This is
|
||||
unlikely to cause any problems since there should be very few
|
||||
pending calls for the main thread. */
|
||||
#define MAXPENDINGCALLSLOOP_MAIN 0
|
||||
|
||||
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
# define _PyEval_RUNTIME_PERF_INIT \
|
||||
{ \
|
||||
.status = PERF_STATUS_NO_INIT, \
|
||||
.extra_code_index = -1, \
|
||||
.persist_after_fork = 0, \
|
||||
}
|
||||
#else
|
||||
# define _PyEval_RUNTIME_PERF_INIT {0}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CEVAL_STATE_H */
|
||||
671
extern/include/python/internal/pycore_code.h
vendored
Normal file
671
extern/include/python/internal/pycore_code.h
vendored
Normal file
@@ -0,0 +1,671 @@
|
||||
#ifndef Py_INTERNAL_CODE_H
|
||||
#define Py_INTERNAL_CODE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_backoff.h" // _Py_BackoffCounter
|
||||
#include "pycore_structs.h" // _Py_CODEUNIT
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
|
||||
|
||||
#define _PyCode_CODE(CO) _Py_RVALUE((_Py_CODEUNIT *)(CO)->co_code_adaptive)
|
||||
#define _PyCode_NBYTES(CO) (Py_SIZE(CO) * (Py_ssize_t)sizeof(_Py_CODEUNIT))
|
||||
|
||||
|
||||
/* These macros only remain defined for compatibility. */
|
||||
#define _Py_OPCODE(word) ((word).op.code)
|
||||
#define _Py_OPARG(word) ((word).op.arg)
|
||||
|
||||
static inline _Py_CODEUNIT
|
||||
_py_make_codeunit(uint8_t opcode, uint8_t oparg)
|
||||
{
|
||||
// No designated initialisers because of C++ compat
|
||||
_Py_CODEUNIT word;
|
||||
word.op.code = opcode;
|
||||
word.op.arg = oparg;
|
||||
return word;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_py_set_opcode(_Py_CODEUNIT *word, uint8_t opcode)
|
||||
{
|
||||
word->op.code = opcode;
|
||||
}
|
||||
|
||||
#define _Py_MAKE_CODEUNIT(opcode, oparg) _py_make_codeunit((opcode), (oparg))
|
||||
#define _Py_SET_OPCODE(word, opcode) _py_set_opcode(&(word), (opcode))
|
||||
|
||||
|
||||
// We hide some of the newer PyCodeObject fields behind macros.
|
||||
// This helps with backporting certain changes to 3.12.
|
||||
#define _PyCode_HAS_EXECUTORS(CODE) \
|
||||
(CODE->co_executors != NULL)
|
||||
#define _PyCode_HAS_INSTRUMENTATION(CODE) \
|
||||
(CODE->_co_instrumentation_version > 0)
|
||||
|
||||
|
||||
extern PyStatus _PyCode_Init(PyInterpreterState *interp);
|
||||
extern void _PyCode_Fini(PyInterpreterState *interp);
|
||||
|
||||
|
||||
/* PEP 659
|
||||
* Specialization and quickening structs and helper functions
|
||||
*/
|
||||
|
||||
|
||||
// Inline caches. If you change the number of cache entries for an instruction,
|
||||
// you must *also* update the number of cache entries in Lib/opcode.py and bump
|
||||
// the magic number in Lib/importlib/_bootstrap_external.py!
|
||||
|
||||
#define CACHE_ENTRIES(cache) (sizeof(cache)/sizeof(_Py_CODEUNIT))
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t module_keys_version;
|
||||
uint16_t builtin_keys_version;
|
||||
uint16_t index;
|
||||
} _PyLoadGlobalCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t external_cache[4];
|
||||
} _PyBinaryOpCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyUnpackSequenceCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
|
||||
CACHE_ENTRIES(_PyUnpackSequenceCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyCompareOpCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PySuperAttrCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t version[2];
|
||||
uint16_t index;
|
||||
} _PyAttrCache;
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t type_version[2];
|
||||
union {
|
||||
uint16_t keys_version[2];
|
||||
uint16_t dict_offset;
|
||||
};
|
||||
uint16_t descr[4];
|
||||
} _PyLoadMethodCache;
|
||||
|
||||
|
||||
// MUST be the max(_PyAttrCache, _PyLoadMethodCache)
|
||||
#define INLINE_CACHE_ENTRIES_LOAD_ATTR CACHE_ENTRIES(_PyLoadMethodCache)
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t func_version[2];
|
||||
} _PyCallCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache)
|
||||
#define INLINE_CACHE_ENTRIES_CALL_KW CACHE_ENTRIES(_PyCallCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyStoreSubscrCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyForIterCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PySendCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t version[2];
|
||||
} _PyToBoolCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_TO_BOOL CACHE_ENTRIES(_PyToBoolCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyContainsOpCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_CONTAINS_OP CACHE_ENTRIES(_PyContainsOpCache)
|
||||
|
||||
/* "Locals plus" for a code object is the set of locals + cell vars +
|
||||
* free vars. This relates to variable names as well as offsets into
|
||||
* the "fast locals" storage array of execution frames. The compiler
|
||||
* builds the list of names, their offsets, and the corresponding
|
||||
* kind of local.
|
||||
*
|
||||
* Those kinds represent the source of the initial value and the
|
||||
* variable's scope (as related to closures). A "local" is an
|
||||
* argument or other variable defined in the current scope. A "free"
|
||||
* variable is one that is defined in an outer scope and comes from
|
||||
* the function's closure. A "cell" variable is a local that escapes
|
||||
* into an inner function as part of a closure, and thus must be
|
||||
* wrapped in a cell. Any "local" can also be a "cell", but the
|
||||
* "free" kind is mutually exclusive with both.
|
||||
*/
|
||||
|
||||
// Note that these all fit within a byte, as do combinations.
|
||||
#define CO_FAST_ARG_POS (0x02) // pos-only, pos-or-kw, varargs
|
||||
#define CO_FAST_ARG_KW (0x04) // kw-only, pos-or-kw, varkwargs
|
||||
#define CO_FAST_ARG_VAR (0x08) // varargs, varkwargs
|
||||
#define CO_FAST_ARG (CO_FAST_ARG_POS | CO_FAST_ARG_KW | CO_FAST_ARG_VAR)
|
||||
#define CO_FAST_HIDDEN (0x10)
|
||||
#define CO_FAST_LOCAL (0x20)
|
||||
#define CO_FAST_CELL (0x40)
|
||||
#define CO_FAST_FREE (0x80)
|
||||
|
||||
typedef unsigned char _PyLocals_Kind;
|
||||
|
||||
static inline _PyLocals_Kind
|
||||
_PyLocals_GetKind(PyObject *kinds, int i)
|
||||
{
|
||||
assert(PyBytes_Check(kinds));
|
||||
assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
|
||||
char *ptr = PyBytes_AS_STRING(kinds);
|
||||
return (_PyLocals_Kind)(ptr[i]);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyLocals_SetKind(PyObject *kinds, int i, _PyLocals_Kind kind)
|
||||
{
|
||||
assert(PyBytes_Check(kinds));
|
||||
assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
|
||||
char *ptr = PyBytes_AS_STRING(kinds);
|
||||
ptr[i] = (char) kind;
|
||||
}
|
||||
|
||||
|
||||
struct _PyCodeConstructor {
|
||||
/* metadata */
|
||||
PyObject *filename;
|
||||
PyObject *name;
|
||||
PyObject *qualname;
|
||||
int flags;
|
||||
|
||||
/* the code */
|
||||
PyObject *code;
|
||||
int firstlineno;
|
||||
PyObject *linetable;
|
||||
|
||||
/* used by the code */
|
||||
PyObject *consts;
|
||||
PyObject *names;
|
||||
|
||||
/* mapping frame offsets to information */
|
||||
PyObject *localsplusnames; // Tuple of strings
|
||||
PyObject *localspluskinds; // Bytes object, one byte per variable
|
||||
|
||||
/* args (within varnames) */
|
||||
int argcount;
|
||||
int posonlyargcount;
|
||||
// XXX Replace argcount with posorkwargcount (argcount - posonlyargcount).
|
||||
int kwonlyargcount;
|
||||
|
||||
/* needed to create the frame */
|
||||
int stacksize;
|
||||
|
||||
/* used by the eval loop */
|
||||
PyObject *exceptiontable;
|
||||
};
|
||||
|
||||
// Using an "arguments struct" like this is helpful for maintainability
|
||||
// in a case such as this with many parameters. It does bear a risk:
|
||||
// if the struct changes and callers are not updated properly then the
|
||||
// compiler will not catch problems (like a missing argument). This can
|
||||
// cause hard-to-debug problems. The risk is mitigated by the use of
|
||||
// check_code() in codeobject.c. However, we may decide to switch
|
||||
// back to a regular function signature. Regardless, this approach
|
||||
// wouldn't be appropriate if this weren't a strictly internal API.
|
||||
// (See the comments in https://github.com/python/cpython/pull/26258.)
|
||||
extern int _PyCode_Validate(struct _PyCodeConstructor *);
|
||||
extern PyCodeObject* _PyCode_New(struct _PyCodeConstructor *);
|
||||
|
||||
|
||||
/* Private API */
|
||||
|
||||
/* Getters for internal PyCodeObject data. */
|
||||
extern PyObject* _PyCode_GetVarnames(PyCodeObject *);
|
||||
extern PyObject* _PyCode_GetCellvars(PyCodeObject *);
|
||||
extern PyObject* _PyCode_GetFreevars(PyCodeObject *);
|
||||
extern PyObject* _PyCode_GetCode(PyCodeObject *);
|
||||
|
||||
/** API for initializing the line number tables. */
|
||||
extern int _PyCode_InitAddressRange(PyCodeObject* co, PyCodeAddressRange *bounds);
|
||||
|
||||
/** Out of process API for initializing the location table. */
|
||||
extern void _PyLineTable_InitAddressRange(
|
||||
const char *linetable,
|
||||
Py_ssize_t length,
|
||||
int firstlineno,
|
||||
PyCodeAddressRange *range);
|
||||
|
||||
/** API for traversing the line number table. */
|
||||
extern int _PyLineTable_NextAddressRange(PyCodeAddressRange *range);
|
||||
extern int _PyLineTable_PreviousAddressRange(PyCodeAddressRange *range);
|
||||
|
||||
// Similar to PyCode_Addr2Line(), but return -1 if the code object is invalid
|
||||
// and can be called without an attached tstate. Used by dump_frame() in
|
||||
// Python/traceback.c. The function uses heuristics to detect freed memory,
|
||||
// it's not 100% reliable.
|
||||
extern int _PyCode_SafeAddr2Line(PyCodeObject *co, int addr);
|
||||
|
||||
|
||||
/** API for executors */
|
||||
extern void _PyCode_Clear_Executors(PyCodeObject *code);
|
||||
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// gh-115999 tracks progress on addressing this.
|
||||
#define ENABLE_SPECIALIZATION 0
|
||||
// Use this to enable specialization families once they are thread-safe. All
|
||||
// uses will be replaced with ENABLE_SPECIALIZATION once all families are
|
||||
// thread-safe.
|
||||
#define ENABLE_SPECIALIZATION_FT 1
|
||||
#else
|
||||
#define ENABLE_SPECIALIZATION 1
|
||||
#define ENABLE_SPECIALIZATION_FT ENABLE_SPECIALIZATION
|
||||
#endif
|
||||
|
||||
/* Specialization functions */
|
||||
|
||||
extern void _Py_Specialize_LoadSuperAttr(_PyStackRef global_super, _PyStackRef cls,
|
||||
_Py_CODEUNIT *instr, int load_method);
|
||||
extern void _Py_Specialize_LoadAttr(_PyStackRef owner, _Py_CODEUNIT *instr,
|
||||
PyObject *name);
|
||||
extern void _Py_Specialize_StoreAttr(_PyStackRef owner, _Py_CODEUNIT *instr,
|
||||
PyObject *name);
|
||||
extern void _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins,
|
||||
_Py_CODEUNIT *instr, PyObject *name);
|
||||
extern void _Py_Specialize_StoreSubscr(_PyStackRef container, _PyStackRef sub,
|
||||
_Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_Call(_PyStackRef callable, _Py_CODEUNIT *instr,
|
||||
int nargs);
|
||||
extern void _Py_Specialize_CallKw(_PyStackRef callable, _Py_CODEUNIT *instr,
|
||||
int nargs);
|
||||
extern void _Py_Specialize_BinaryOp(_PyStackRef lhs, _PyStackRef rhs, _Py_CODEUNIT *instr,
|
||||
int oparg, _PyStackRef *locals);
|
||||
extern void _Py_Specialize_CompareOp(_PyStackRef lhs, _PyStackRef rhs,
|
||||
_Py_CODEUNIT *instr, int oparg);
|
||||
extern void _Py_Specialize_UnpackSequence(_PyStackRef seq, _Py_CODEUNIT *instr,
|
||||
int oparg);
|
||||
extern void _Py_Specialize_ForIter(_PyStackRef iter, _Py_CODEUNIT *instr, int oparg);
|
||||
extern void _Py_Specialize_Send(_PyStackRef receiver, _Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_ToBool(_PyStackRef value, _Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_ContainsOp(_PyStackRef value, _Py_CODEUNIT *instr);
|
||||
extern void _Py_GatherStats_GetIter(_PyStackRef iterable);
|
||||
|
||||
// Utility functions for reading/writing 32/64-bit values in the inline caches.
|
||||
// Great care should be taken to ensure that these functions remain correct and
|
||||
// performant! They should compile to just "move" instructions on all supported
|
||||
// compilers and platforms.
|
||||
|
||||
// We use memcpy to let the C compiler handle unaligned accesses and endianness
|
||||
// issues for us. It also seems to produce better code than manual copying for
|
||||
// most compilers (see https://blog.regehr.org/archives/959 for more info).
|
||||
|
||||
static inline void
|
||||
write_u32(uint16_t *p, uint32_t val)
|
||||
{
|
||||
memcpy(p, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void
|
||||
write_u64(uint16_t *p, uint64_t val)
|
||||
{
|
||||
memcpy(p, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void
|
||||
write_ptr(uint16_t *p, void *val)
|
||||
{
|
||||
memcpy(p, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
read_u16(uint16_t *p)
|
||||
{
|
||||
return *p;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
read_u32(uint16_t *p)
|
||||
{
|
||||
uint32_t val;
|
||||
memcpy(&val, p, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
read_u64(uint16_t *p)
|
||||
{
|
||||
uint64_t val;
|
||||
memcpy(&val, p, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
read_obj(uint16_t *p)
|
||||
{
|
||||
PyObject *val;
|
||||
memcpy(&val, p, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
/* See InternalDocs/exception_handling.md for details.
|
||||
*/
|
||||
static inline unsigned char *
|
||||
parse_varint(unsigned char *p, int *result) {
|
||||
int val = p[0] & 63;
|
||||
while (p[0] & 64) {
|
||||
p++;
|
||||
val = (val << 6) | (p[0] & 63);
|
||||
}
|
||||
*result = val;
|
||||
return p+1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_varint(uint8_t *ptr, unsigned int val)
|
||||
{
|
||||
int written = 1;
|
||||
while (val >= 64) {
|
||||
*ptr++ = 64 | (val & 63);
|
||||
val >>= 6;
|
||||
written++;
|
||||
}
|
||||
*ptr = (uint8_t)val;
|
||||
return written;
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_signed_varint(uint8_t *ptr, int val)
|
||||
{
|
||||
unsigned int uval;
|
||||
if (val < 0) {
|
||||
// (unsigned int)(-val) has an undefined behavior for INT_MIN
|
||||
uval = ((0 - (unsigned int)val) << 1) | 1;
|
||||
}
|
||||
else {
|
||||
uval = (unsigned int)val << 1;
|
||||
}
|
||||
return write_varint(ptr, uval);
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_location_entry_start(uint8_t *ptr, int code, int length)
|
||||
{
|
||||
assert((code & 15) == code);
|
||||
*ptr = 128 | (uint8_t)(code << 3) | (uint8_t)(length - 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/** Counters
|
||||
* The first 16-bit value in each inline cache is a counter.
|
||||
*
|
||||
* When counting executions until the next specialization attempt,
|
||||
* exponential backoff is used to reduce the number of specialization failures.
|
||||
* See pycore_backoff.h for more details.
|
||||
* On a specialization failure, the backoff counter is restarted.
|
||||
*/
|
||||
|
||||
// A value of 1 means that we attempt to specialize the *second* time each
|
||||
// instruction is executed. Executing twice is a much better indicator of
|
||||
// "hotness" than executing once, but additional warmup delays only prevent
|
||||
// specialization. Most types stabilize by the second execution, too:
|
||||
#define ADAPTIVE_WARMUP_VALUE 1
|
||||
#define ADAPTIVE_WARMUP_BACKOFF 1
|
||||
|
||||
// A value of 52 means that we attempt to re-specialize after 53 misses (a prime
|
||||
// number, useful for avoiding artifacts if every nth value is a different type
|
||||
// or something). Setting the backoff to 0 means that the counter is reset to
|
||||
// the same state as a warming-up instruction (value == 1, backoff == 1) after
|
||||
// deoptimization. This isn't strictly necessary, but it is bit easier to reason
|
||||
// about when thinking about the opcode transitions as a state machine:
|
||||
#define ADAPTIVE_COOLDOWN_VALUE 52
|
||||
#define ADAPTIVE_COOLDOWN_BACKOFF 0
|
||||
|
||||
// Can't assert this in pycore_backoff.h because of header order dependencies
|
||||
#if SIDE_EXIT_INITIAL_VALUE <= ADAPTIVE_COOLDOWN_VALUE
|
||||
# error "Cold exit value should be larger than adaptive cooldown value"
|
||||
#endif
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_bits(uint16_t value, uint16_t backoff) {
|
||||
return make_backoff_counter(value, backoff);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_warmup(void) {
|
||||
return adaptive_counter_bits(ADAPTIVE_WARMUP_VALUE,
|
||||
ADAPTIVE_WARMUP_BACKOFF);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_cooldown(void) {
|
||||
return adaptive_counter_bits(ADAPTIVE_COOLDOWN_VALUE,
|
||||
ADAPTIVE_COOLDOWN_BACKOFF);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_backoff(_Py_BackoffCounter counter) {
|
||||
return restart_backoff_counter(counter);
|
||||
}
|
||||
|
||||
/* Specialization Extensions */
|
||||
|
||||
/* callbacks for an external specialization */
|
||||
typedef int (*binaryopguardfunc)(PyObject *lhs, PyObject *rhs);
|
||||
typedef PyObject *(*binaryopactionfunc)(PyObject *lhs, PyObject *rhs);
|
||||
|
||||
typedef struct {
|
||||
int oparg;
|
||||
binaryopguardfunc guard;
|
||||
binaryopactionfunc action;
|
||||
} _PyBinaryOpSpecializationDescr;
|
||||
|
||||
/* Comparison bit masks. */
|
||||
|
||||
/* Note this evaluates its arguments twice each */
|
||||
#define COMPARISON_BIT(x, y) (1 << (2 * ((x) >= (y)) + ((x) <= (y))))
|
||||
|
||||
/*
|
||||
* The following bits are chosen so that the value of
|
||||
* COMPARSION_BIT(left, right)
|
||||
* masked by the values below will be non-zero if the
|
||||
* comparison is true, and zero if it is false */
|
||||
|
||||
/* This is for values that are unordered, ie. NaN, not types that are unordered, e.g. sets */
|
||||
#define COMPARISON_UNORDERED 1
|
||||
|
||||
#define COMPARISON_LESS_THAN 2
|
||||
#define COMPARISON_GREATER_THAN 4
|
||||
#define COMPARISON_EQUALS 8
|
||||
|
||||
#define COMPARISON_NOT_EQUALS (COMPARISON_UNORDERED | COMPARISON_LESS_THAN | COMPARISON_GREATER_THAN)
|
||||
|
||||
extern int _Py_Instrument(PyCodeObject *co, PyInterpreterState *interp);
|
||||
|
||||
extern _Py_CODEUNIT _Py_GetBaseCodeUnit(PyCodeObject *code, int offset);
|
||||
|
||||
extern int _PyInstruction_GetLength(PyCodeObject *code, int offset);
|
||||
|
||||
extern PyObject *_PyInstrumentation_BranchesIterator(PyCodeObject *code);
|
||||
|
||||
struct _PyCode8 _PyCode_DEF(8);
|
||||
|
||||
PyAPI_DATA(const struct _PyCode8) _Py_InitCleanup;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static inline _PyCodeArray *
|
||||
_PyCode_GetTLBCArray(PyCodeObject *co)
|
||||
{
|
||||
return _Py_STATIC_CAST(_PyCodeArray *,
|
||||
_Py_atomic_load_ptr_acquire(&co->co_tlbc));
|
||||
}
|
||||
|
||||
// Return a pointer to the thread-local bytecode for the current thread, if it
|
||||
// exists.
|
||||
static inline _Py_CODEUNIT *
|
||||
_PyCode_GetTLBCFast(PyThreadState *tstate, PyCodeObject *co)
|
||||
{
|
||||
_PyCodeArray *code = _PyCode_GetTLBCArray(co);
|
||||
int32_t idx = ((_PyThreadStateImpl*) tstate)->tlbc_index;
|
||||
if (idx < code->size && code->entries[idx] != NULL) {
|
||||
return (_Py_CODEUNIT *) code->entries[idx];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Return a pointer to the thread-local bytecode for the current thread,
|
||||
// creating it if necessary.
|
||||
extern _Py_CODEUNIT *_PyCode_GetTLBC(PyCodeObject *co);
|
||||
|
||||
// Reserve an index for the current thread into thread-local bytecode
|
||||
// arrays
|
||||
//
|
||||
// Returns the reserved index or -1 on error.
|
||||
extern int32_t _Py_ReserveTLBCIndex(PyInterpreterState *interp);
|
||||
|
||||
// Release the current thread's index into thread-local bytecode arrays
|
||||
extern void _Py_ClearTLBCIndex(_PyThreadStateImpl *tstate);
|
||||
|
||||
// Free all TLBC copies not associated with live threads.
|
||||
//
|
||||
// Returns 0 on success or -1 on error.
|
||||
extern int _Py_ClearUnusedTLBC(PyInterpreterState *interp);
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
int total;
|
||||
struct co_locals_counts {
|
||||
int total;
|
||||
struct {
|
||||
int total;
|
||||
int numposonly;
|
||||
int numposorkw;
|
||||
int numkwonly;
|
||||
int varargs;
|
||||
int varkwargs;
|
||||
} args;
|
||||
int numpure;
|
||||
struct {
|
||||
int total;
|
||||
// numargs does not contribute to locals.total.
|
||||
int numargs;
|
||||
int numothers;
|
||||
} cells;
|
||||
struct {
|
||||
int total;
|
||||
int numpure;
|
||||
int numcells;
|
||||
} hidden;
|
||||
} locals;
|
||||
int numfree; // nonlocal
|
||||
struct co_unbound_counts {
|
||||
int total;
|
||||
struct {
|
||||
int total;
|
||||
int numglobal;
|
||||
int numbuiltin;
|
||||
int numunknown;
|
||||
} globals;
|
||||
int numattrs;
|
||||
int numunknown;
|
||||
} unbound;
|
||||
} _PyCode_var_counts_t;
|
||||
|
||||
PyAPI_FUNC(void) _PyCode_GetVarCounts(
|
||||
PyCodeObject *,
|
||||
_PyCode_var_counts_t *);
|
||||
PyAPI_FUNC(int) _PyCode_SetUnboundVarCounts(
|
||||
PyThreadState *,
|
||||
PyCodeObject *,
|
||||
_PyCode_var_counts_t *,
|
||||
PyObject *globalnames,
|
||||
PyObject *attrnames,
|
||||
PyObject *globalsns,
|
||||
PyObject *builtinsns);
|
||||
|
||||
|
||||
/* "Stateless" code is a function or code object which does not rely on
|
||||
* external state or internal state. It may rely on arguments and
|
||||
* builtins, but not globals or a closure. Thus it does not rely
|
||||
* on __globals__ or __closure__, and a stateless function
|
||||
* is equivalent to its code object.
|
||||
*
|
||||
* Stateless code also does not keep any persistent state
|
||||
* of its own, so it can't have any executors, monitoring,
|
||||
* instrumentation, or "extras" (i.e. co_extra).
|
||||
*
|
||||
* Stateless code may create nested functions, including closures.
|
||||
* However, nested functions must themselves be stateless, except they
|
||||
* *can* close on the enclosing locals.
|
||||
*
|
||||
* Stateless code may return any value, including nested functions and closures.
|
||||
*
|
||||
* Stateless code that takes no arguments and doesn't return anything
|
||||
* may be treated like a script.
|
||||
*
|
||||
* We consider stateless code to be "portable" if it does not return
|
||||
* any object that holds a reference to any of the code's locals. Thus
|
||||
* generators and coroutines are not portable. Likewise a function
|
||||
* that returns a closure is not portable. The concept of
|
||||
* portability is useful in cases where the code is run
|
||||
* in a different execution context than where
|
||||
* the return value will be used. */
|
||||
|
||||
PyAPI_FUNC(int) _PyCode_CheckNoInternalState(PyCodeObject *, const char **);
|
||||
PyAPI_FUNC(int) _PyCode_CheckNoExternalState(
|
||||
PyCodeObject *,
|
||||
_PyCode_var_counts_t *,
|
||||
const char **);
|
||||
PyAPI_FUNC(int) _PyCode_VerifyStateless(
|
||||
PyThreadState *,
|
||||
PyCodeObject *,
|
||||
PyObject *globalnames,
|
||||
PyObject *globalsns,
|
||||
PyObject *builtinsns);
|
||||
|
||||
PyAPI_FUNC(int) _PyCode_CheckPureFunction(PyCodeObject *, const char **);
|
||||
PyAPI_FUNC(int) _PyCode_ReturnsOnlyNone(PyCodeObject *);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CODE_H */
|
||||
76
extern/include/python/internal/pycore_codecs.h
vendored
Normal file
76
extern/include/python/internal/pycore_codecs.h
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
#ifndef Py_INTERNAL_CODECS_H
|
||||
#define Py_INTERNAL_CODECS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_interp_structs.h" // struct codecs_state
|
||||
|
||||
/* Initialize codecs-related state for the given interpreter, including
|
||||
registering the first codec search function. Must be called before any other
|
||||
PyCodec-related functions, and while only one thread is active. */
|
||||
extern PyStatus _PyCodec_InitRegistry(PyInterpreterState *interp);
|
||||
|
||||
/* Finalize codecs-related state for the given interpreter. No PyCodec-related
|
||||
functions other than PyCodec_Unregister() may be called after this. */
|
||||
extern void _PyCodec_Fini(PyInterpreterState *interp);
|
||||
|
||||
extern PyObject* _PyCodec_Lookup(const char *encoding);
|
||||
|
||||
/*
|
||||
* Un-register the error handling callback function registered under
|
||||
* the given 'name'. Only custom error handlers can be un-registered.
|
||||
*
|
||||
* - Return -1 and set an exception if 'name' refers to a built-in
|
||||
* error handling name (e.g., 'strict'), or if an error occurred.
|
||||
* - Return 0 if no custom error handler can be found for 'name'.
|
||||
* - Return 1 if the custom error handler was successfully removed.
|
||||
*/
|
||||
extern int _PyCodec_UnregisterError(const char *name);
|
||||
|
||||
/* Text codec specific encoding and decoding API.
|
||||
|
||||
Checks the encoding against a list of codecs which do not
|
||||
implement a str<->bytes encoding before attempting the
|
||||
operation.
|
||||
|
||||
Please note that these APIs are internal and should not
|
||||
be used in Python C extensions.
|
||||
|
||||
XXX (ncoghlan): should we make these, or something like them, public
|
||||
in Python 3.5+?
|
||||
|
||||
*/
|
||||
extern PyObject* _PyCodec_LookupTextEncoding(
|
||||
const char *encoding,
|
||||
const char *alternate_command);
|
||||
|
||||
extern PyObject* _PyCodec_EncodeText(
|
||||
PyObject *object,
|
||||
const char *encoding,
|
||||
const char *errors);
|
||||
|
||||
extern PyObject* _PyCodec_DecodeText(
|
||||
PyObject *object,
|
||||
const char *encoding,
|
||||
const char *errors);
|
||||
|
||||
/* These two aren't actually text encoding specific, but _io.TextIOWrapper
|
||||
* is the only current API consumer.
|
||||
*/
|
||||
extern PyObject* _PyCodecInfo_GetIncrementalDecoder(
|
||||
PyObject *codec_info,
|
||||
const char *errors);
|
||||
|
||||
extern PyObject* _PyCodecInfo_GetIncrementalEncoder(
|
||||
PyObject *codec_info,
|
||||
const char *errors);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CODECS_H */
|
||||
230
extern/include/python/internal/pycore_compile.h
vendored
Normal file
230
extern/include/python/internal/pycore_compile.h
vendored
Normal file
@@ -0,0 +1,230 @@
|
||||
#ifndef Py_INTERNAL_COMPILE_H
|
||||
#define Py_INTERNAL_COMPILE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "pycore_ast.h" // mod_ty
|
||||
#include "pycore_symtable.h" // _Py_SourceLocation
|
||||
#include "pycore_instruction_sequence.h"
|
||||
|
||||
/* A soft limit for stack use, to avoid excessive
|
||||
* memory use for large constants, etc.
|
||||
*
|
||||
* The value 30 is plucked out of thin air.
|
||||
* Code that could use more stack than this is
|
||||
* rare, so the exact value is unimportant.
|
||||
*/
|
||||
#define _PY_STACK_USE_GUIDELINE 30
|
||||
|
||||
struct _arena; // Type defined in pycore_pyarena.h
|
||||
struct _mod; // Type defined in pycore_ast.h
|
||||
|
||||
// Export for 'test_peg_generator' shared extension
|
||||
PyAPI_FUNC(PyCodeObject*) _PyAST_Compile(
|
||||
struct _mod *mod,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags,
|
||||
int optimize,
|
||||
struct _arena *arena);
|
||||
|
||||
/* AST preprocessing */
|
||||
extern int _PyCompile_AstPreprocess(
|
||||
struct _mod *mod,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags,
|
||||
int optimize,
|
||||
struct _arena *arena,
|
||||
int syntax_check_only);
|
||||
|
||||
extern int _PyAST_Preprocess(
|
||||
struct _mod *,
|
||||
struct _arena *arena,
|
||||
PyObject *filename,
|
||||
int optimize,
|
||||
int ff_features,
|
||||
int syntax_check_only,
|
||||
int enable_warnings);
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject *u_name;
|
||||
PyObject *u_qualname; /* dot-separated qualified name (lazy) */
|
||||
|
||||
/* The following fields are dicts that map objects to
|
||||
the index of them in co_XXX. The index is used as
|
||||
the argument for opcodes that refer to those collections.
|
||||
*/
|
||||
PyObject *u_consts; /* all constants */
|
||||
PyObject *u_names; /* all names */
|
||||
PyObject *u_varnames; /* local variables */
|
||||
PyObject *u_cellvars; /* cell variables */
|
||||
PyObject *u_freevars; /* free variables */
|
||||
PyObject *u_fasthidden; /* dict; keys are names that are fast-locals only
|
||||
temporarily within an inlined comprehension. When
|
||||
value is True, treat as fast-local. */
|
||||
|
||||
Py_ssize_t u_argcount; /* number of arguments for block */
|
||||
Py_ssize_t u_posonlyargcount; /* number of positional only arguments for block */
|
||||
Py_ssize_t u_kwonlyargcount; /* number of keyword only arguments for block */
|
||||
|
||||
int u_firstlineno; /* the first lineno of the block */
|
||||
} _PyCompile_CodeUnitMetadata;
|
||||
|
||||
struct _PyCompiler;
|
||||
|
||||
typedef enum {
|
||||
COMPILE_OP_FAST,
|
||||
COMPILE_OP_GLOBAL,
|
||||
COMPILE_OP_DEREF,
|
||||
COMPILE_OP_NAME,
|
||||
} _PyCompile_optype;
|
||||
|
||||
/* _PyCompile_FBlockInfo tracks the current frame block.
|
||||
*
|
||||
* A frame block is used to handle loops, try/except, and try/finally.
|
||||
* It's called a frame block to distinguish it from a basic block in the
|
||||
* compiler IR.
|
||||
*/
|
||||
|
||||
enum _PyCompile_FBlockType {
|
||||
COMPILE_FBLOCK_WHILE_LOOP,
|
||||
COMPILE_FBLOCK_FOR_LOOP,
|
||||
COMPILE_FBLOCK_TRY_EXCEPT,
|
||||
COMPILE_FBLOCK_FINALLY_TRY,
|
||||
COMPILE_FBLOCK_FINALLY_END,
|
||||
COMPILE_FBLOCK_WITH,
|
||||
COMPILE_FBLOCK_ASYNC_WITH,
|
||||
COMPILE_FBLOCK_HANDLER_CLEANUP,
|
||||
COMPILE_FBLOCK_POP_VALUE,
|
||||
COMPILE_FBLOCK_EXCEPTION_HANDLER,
|
||||
COMPILE_FBLOCK_EXCEPTION_GROUP_HANDLER,
|
||||
COMPILE_FBLOCK_ASYNC_COMPREHENSION_GENERATOR,
|
||||
COMPILE_FBLOCK_STOP_ITERATION,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
enum _PyCompile_FBlockType fb_type;
|
||||
_PyJumpTargetLabel fb_block;
|
||||
_Py_SourceLocation fb_loc;
|
||||
/* (optional) type-specific exit or cleanup block */
|
||||
_PyJumpTargetLabel fb_exit;
|
||||
/* (optional) additional information required for unwinding */
|
||||
void *fb_datum;
|
||||
} _PyCompile_FBlockInfo;
|
||||
|
||||
|
||||
int _PyCompile_PushFBlock(struct _PyCompiler *c, _Py_SourceLocation loc,
|
||||
enum _PyCompile_FBlockType t,
|
||||
_PyJumpTargetLabel block_label,
|
||||
_PyJumpTargetLabel exit, void *datum);
|
||||
void _PyCompile_PopFBlock(struct _PyCompiler *c, enum _PyCompile_FBlockType t,
|
||||
_PyJumpTargetLabel block_label);
|
||||
_PyCompile_FBlockInfo *_PyCompile_TopFBlock(struct _PyCompiler *c);
|
||||
|
||||
int _PyCompile_EnterScope(struct _PyCompiler *c, identifier name, int scope_type,
|
||||
void *key, int lineno, PyObject *private,
|
||||
_PyCompile_CodeUnitMetadata *umd);
|
||||
void _PyCompile_ExitScope(struct _PyCompiler *c);
|
||||
Py_ssize_t _PyCompile_AddConst(struct _PyCompiler *c, PyObject *o);
|
||||
_PyInstructionSequence *_PyCompile_InstrSequence(struct _PyCompiler *c);
|
||||
int _PyCompile_StartAnnotationSetup(struct _PyCompiler *c);
|
||||
int _PyCompile_EndAnnotationSetup(struct _PyCompiler *c);
|
||||
int _PyCompile_FutureFeatures(struct _PyCompiler *c);
|
||||
void _PyCompile_DeferredAnnotations(
|
||||
struct _PyCompiler *c, PyObject **deferred_annotations,
|
||||
PyObject **conditional_annotation_indices);
|
||||
PyObject *_PyCompile_Mangle(struct _PyCompiler *c, PyObject *name);
|
||||
PyObject *_PyCompile_MaybeMangle(struct _PyCompiler *c, PyObject *name);
|
||||
int _PyCompile_MaybeAddStaticAttributeToClass(struct _PyCompiler *c, expr_ty e);
|
||||
int _PyCompile_GetRefType(struct _PyCompiler *c, PyObject *name);
|
||||
int _PyCompile_LookupCellvar(struct _PyCompiler *c, PyObject *name);
|
||||
int _PyCompile_ResolveNameop(struct _PyCompiler *c, PyObject *mangled, int scope,
|
||||
_PyCompile_optype *optype, Py_ssize_t *arg);
|
||||
|
||||
int _PyCompile_IsInteractiveTopLevel(struct _PyCompiler *c);
|
||||
int _PyCompile_IsInInlinedComp(struct _PyCompiler *c);
|
||||
int _PyCompile_ScopeType(struct _PyCompiler *c);
|
||||
int _PyCompile_OptimizationLevel(struct _PyCompiler *c);
|
||||
int _PyCompile_LookupArg(struct _PyCompiler *c, PyCodeObject *co, PyObject *name);
|
||||
PyObject *_PyCompile_Qualname(struct _PyCompiler *c);
|
||||
_PyCompile_CodeUnitMetadata *_PyCompile_Metadata(struct _PyCompiler *c);
|
||||
PyObject *_PyCompile_StaticAttributesAsTuple(struct _PyCompiler *c);
|
||||
|
||||
struct symtable *_PyCompile_Symtable(struct _PyCompiler *c);
|
||||
PySTEntryObject *_PyCompile_SymtableEntry(struct _PyCompiler *c);
|
||||
|
||||
enum {
|
||||
COMPILE_SCOPE_MODULE,
|
||||
COMPILE_SCOPE_CLASS,
|
||||
COMPILE_SCOPE_FUNCTION,
|
||||
COMPILE_SCOPE_ASYNC_FUNCTION,
|
||||
COMPILE_SCOPE_LAMBDA,
|
||||
COMPILE_SCOPE_COMPREHENSION,
|
||||
COMPILE_SCOPE_ANNOTATIONS,
|
||||
};
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject *pushed_locals;
|
||||
PyObject *temp_symbols;
|
||||
PyObject *fast_hidden;
|
||||
_PyJumpTargetLabel cleanup;
|
||||
} _PyCompile_InlinedComprehensionState;
|
||||
|
||||
int _PyCompile_TweakInlinedComprehensionScopes(struct _PyCompiler *c, _Py_SourceLocation loc,
|
||||
PySTEntryObject *entry,
|
||||
_PyCompile_InlinedComprehensionState *state);
|
||||
int _PyCompile_RevertInlinedComprehensionScopes(struct _PyCompiler *c, _Py_SourceLocation loc,
|
||||
_PyCompile_InlinedComprehensionState *state);
|
||||
int _PyCompile_AddDeferredAnnotation(struct _PyCompiler *c, stmt_ty s,
|
||||
PyObject **conditional_annotation_index);
|
||||
void _PyCompile_EnterConditionalBlock(struct _PyCompiler *c);
|
||||
void _PyCompile_LeaveConditionalBlock(struct _PyCompiler *c);
|
||||
|
||||
int _PyCodegen_AddReturnAtEnd(struct _PyCompiler *c, int addNone);
|
||||
int _PyCodegen_EnterAnonymousScope(struct _PyCompiler* c, mod_ty mod);
|
||||
int _PyCodegen_Expression(struct _PyCompiler *c, expr_ty e);
|
||||
int _PyCodegen_Module(struct _PyCompiler *c, _Py_SourceLocation loc, asdl_stmt_seq *stmts,
|
||||
bool is_interactive);
|
||||
|
||||
int _PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj);
|
||||
|
||||
PyCodeObject *_PyCompile_OptimizeAndAssemble(struct _PyCompiler *c, int addNone);
|
||||
|
||||
Py_ssize_t _PyCompile_DictAddObj(PyObject *dict, PyObject *o);
|
||||
int _PyCompile_Error(struct _PyCompiler *c, _Py_SourceLocation loc, const char *format, ...);
|
||||
int _PyCompile_Warn(struct _PyCompiler *c, _Py_SourceLocation loc, const char *format, ...);
|
||||
|
||||
// Export for '_opcode' extension module
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_GetUnaryIntrinsicName(int index);
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_GetBinaryIntrinsicName(int index);
|
||||
|
||||
/* Access compiler internals for unit testing */
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_CleanDoc(PyObject *doc);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_CodeGen(
|
||||
PyObject *ast,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags,
|
||||
int optimize,
|
||||
int compile_mode);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyCodeObject*)
|
||||
_PyCompile_Assemble(_PyCompile_CodeUnitMetadata *umd, PyObject *filename,
|
||||
PyObject *instructions);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_COMPILE_H */
|
||||
34
extern/include/python/internal/pycore_complexobject.h
vendored
Normal file
34
extern/include/python/internal/pycore_complexobject.h
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef Py_INTERNAL_COMPLEXOBJECT_H
|
||||
#define Py_INTERNAL_COMPLEXOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_unicodeobject.h" // _PyUnicodeWriter
|
||||
|
||||
/* Format the object based on the format_spec, as defined in PEP 3101
|
||||
(Advanced String Formatting). */
|
||||
extern int _PyComplex_FormatAdvancedWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
PyObject *format_spec,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
// Operations on complex numbers.
|
||||
PyAPI_FUNC(Py_complex) _Py_cr_sum(Py_complex, double);
|
||||
PyAPI_FUNC(Py_complex) _Py_cr_diff(Py_complex, double);
|
||||
PyAPI_FUNC(Py_complex) _Py_rc_diff(double, Py_complex);
|
||||
PyAPI_FUNC(Py_complex) _Py_cr_prod(Py_complex, double);
|
||||
PyAPI_FUNC(Py_complex) _Py_cr_quot(Py_complex, double);
|
||||
PyAPI_FUNC(Py_complex) _Py_rc_quot(double, Py_complex);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_COMPLEXOBJECT_H
|
||||
93
extern/include/python/internal/pycore_condvar.h
vendored
Normal file
93
extern/include/python/internal/pycore_condvar.h
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
#ifndef Py_INTERNAL_CONDVAR_H
|
||||
#define Py_INTERNAL_CONDVAR_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pythread.h" // _POSIX_THREADS
|
||||
|
||||
|
||||
#ifdef _POSIX_THREADS
|
||||
/*
|
||||
* POSIX support
|
||||
*/
|
||||
#define Py_HAVE_CONDVAR
|
||||
|
||||
#ifdef HAVE_PTHREAD_H
|
||||
# include <pthread.h> // pthread_mutex_t
|
||||
#endif
|
||||
|
||||
#define PyMUTEX_T pthread_mutex_t
|
||||
#define PyCOND_T pthread_cond_t
|
||||
|
||||
#elif defined(NT_THREADS)
|
||||
/*
|
||||
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
|
||||
*
|
||||
* Emulated condition variables ones that work with XP and later, plus
|
||||
* example native support on VISTA and onwards.
|
||||
*/
|
||||
#define Py_HAVE_CONDVAR
|
||||
|
||||
/* include windows if it hasn't been done before */
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
#include <windows.h> // CRITICAL_SECTION
|
||||
|
||||
/* options */
|
||||
/* emulated condition variables are provided for those that want
|
||||
* to target Windows XP or earlier. Modify this macro to enable them.
|
||||
*/
|
||||
#ifndef _PY_EMULATED_WIN_CV
|
||||
#define _PY_EMULATED_WIN_CV 0 /* use non-emulated condition variables */
|
||||
#endif
|
||||
|
||||
/* fall back to emulation if targeting earlier than Vista */
|
||||
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
|
||||
#undef _PY_EMULATED_WIN_CV
|
||||
#define _PY_EMULATED_WIN_CV 1
|
||||
#endif
|
||||
|
||||
#if _PY_EMULATED_WIN_CV
|
||||
|
||||
typedef CRITICAL_SECTION PyMUTEX_T;
|
||||
|
||||
/* The ConditionVariable object. From XP onwards it is easily emulated
|
||||
with a Semaphore.
|
||||
Semaphores are available on Windows XP (2003 server) and later.
|
||||
We use a Semaphore rather than an auto-reset event, because although
|
||||
an auto-reset event might appear to solve the lost-wakeup bug (race
|
||||
condition between releasing the outer lock and waiting) because it
|
||||
maintains state even though a wait hasn't happened, there is still
|
||||
a lost wakeup problem if more than one thread are interrupted in the
|
||||
critical place. A semaphore solves that, because its state is
|
||||
counted, not Boolean.
|
||||
Because it is ok to signal a condition variable with no one
|
||||
waiting, we need to keep track of the number of
|
||||
waiting threads. Otherwise, the semaphore's state could rise
|
||||
without bound. This also helps reduce the number of "spurious wakeups"
|
||||
that would otherwise happen.
|
||||
*/
|
||||
|
||||
typedef struct _PyCOND_T
|
||||
{
|
||||
HANDLE sem;
|
||||
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
|
||||
} PyCOND_T;
|
||||
|
||||
#else /* !_PY_EMULATED_WIN_CV */
|
||||
|
||||
/* Use native Windows primitives if build target is Vista or higher */
|
||||
|
||||
/* SRWLOCK is faster and better than CriticalSection */
|
||||
typedef SRWLOCK PyMUTEX_T;
|
||||
|
||||
typedef CONDITION_VARIABLE PyCOND_T;
|
||||
|
||||
#endif /* _PY_EMULATED_WIN_CV */
|
||||
|
||||
#endif /* _POSIX_THREADS, NT_THREADS */
|
||||
|
||||
#endif /* Py_INTERNAL_CONDVAR_H */
|
||||
59
extern/include/python/internal/pycore_context.h
vendored
Normal file
59
extern/include/python/internal/pycore_context.h
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
#ifndef Py_INTERNAL_CONTEXT_H
|
||||
#define Py_INTERNAL_CONTEXT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_structs.h"
|
||||
|
||||
extern PyTypeObject _PyContextTokenMissing_Type;
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
PyStatus _PyContext_Init(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
} _PyContextTokenMissing;
|
||||
|
||||
struct _pycontextobject {
|
||||
PyObject_HEAD
|
||||
PyContext *ctx_prev;
|
||||
PyHamtObject *ctx_vars;
|
||||
PyObject *ctx_weakreflist;
|
||||
int ctx_entered;
|
||||
};
|
||||
|
||||
|
||||
struct _pycontextvarobject {
|
||||
PyObject_HEAD
|
||||
PyObject *var_name;
|
||||
PyObject *var_default;
|
||||
#ifndef Py_GIL_DISABLED
|
||||
PyObject *var_cached;
|
||||
uint64_t var_cached_tsid;
|
||||
uint64_t var_cached_tsver;
|
||||
#endif
|
||||
Py_hash_t var_hash;
|
||||
};
|
||||
|
||||
|
||||
struct _pycontexttokenobject {
|
||||
PyObject_HEAD
|
||||
PyContext *tok_ctx;
|
||||
PyContextVar *tok_var;
|
||||
PyObject *tok_oldval;
|
||||
int tok_used;
|
||||
};
|
||||
|
||||
|
||||
// _testinternalcapi.hamt() used by tests.
|
||||
// Export for '_testcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyContext_NewHamtForTests(void);
|
||||
|
||||
|
||||
#endif /* !Py_INTERNAL_CONTEXT_H */
|
||||
237
extern/include/python/internal/pycore_critical_section.h
vendored
Normal file
237
extern/include/python/internal/pycore_critical_section.h
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
#ifndef Py_INTERNAL_CRITICAL_SECTION_H
|
||||
#define Py_INTERNAL_CRITICAL_SECTION_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex_LockFast()
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Tagged pointers to critical sections use the two least significant bits to
|
||||
// mark if the pointed-to critical section is inactive and whether it is a
|
||||
// PyCriticalSection2 object.
|
||||
#define _Py_CRITICAL_SECTION_INACTIVE 0x1
|
||||
#define _Py_CRITICAL_SECTION_TWO_MUTEXES 0x2
|
||||
#define _Py_CRITICAL_SECTION_MASK 0x3
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Specialized version of critical section locking to safely use
|
||||
// PySequence_Fast APIs without the GIL. For performance, the argument *to*
|
||||
// PySequence_Fast() is provided to the macro, not the *result* of
|
||||
// PySequence_Fast(), which would require an extra test to determine if the
|
||||
// lock must be acquired.
|
||||
# define Py_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \
|
||||
{ \
|
||||
PyObject *_orig_seq = _PyObject_CAST(original); \
|
||||
const bool _should_lock_cs = PyList_CheckExact(_orig_seq); \
|
||||
PyCriticalSection _cs; \
|
||||
if (_should_lock_cs) { \
|
||||
_PyCriticalSection_Begin(&_cs, _orig_seq); \
|
||||
}
|
||||
|
||||
# define Py_END_CRITICAL_SECTION_SEQUENCE_FAST() \
|
||||
if (_should_lock_cs) { \
|
||||
PyCriticalSection_End(&_cs); \
|
||||
} \
|
||||
}
|
||||
|
||||
// Asserts that the mutex is locked. The mutex must be held by the
|
||||
// top-most critical section otherwise there's the possibility
|
||||
// that the mutex would be swalled out in some code paths.
|
||||
#define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex) \
|
||||
_PyCriticalSection_AssertHeld(mutex)
|
||||
|
||||
// Asserts that the mutex for the given object is locked. The mutex must
|
||||
// be held by the top-most critical section otherwise there's the
|
||||
// possibility that the mutex would be swalled out in some code paths.
|
||||
#ifdef Py_DEBUG
|
||||
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op) \
|
||||
if (Py_REFCNT(op) != 1) { \
|
||||
_Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(&_PyObject_CAST(op)->ob_mutex); \
|
||||
}
|
||||
|
||||
#else /* Py_DEBUG */
|
||||
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op)
|
||||
|
||||
#endif /* Py_DEBUG */
|
||||
|
||||
#else /* !Py_GIL_DISABLED */
|
||||
// The critical section APIs are no-ops with the GIL.
|
||||
# define Py_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) {
|
||||
# define Py_END_CRITICAL_SECTION_SEQUENCE_FAST() }
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex)
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op)
|
||||
#endif /* !Py_GIL_DISABLED */
|
||||
|
||||
// Resumes the top-most critical section.
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection_Resume(PyThreadState *tstate);
|
||||
|
||||
// (private) slow path for locking the mutex
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m);
|
||||
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection2_BeginSlow(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
|
||||
int is_m1_locked);
|
||||
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection_SuspendAll(PyThreadState *tstate);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static inline int
|
||||
_PyCriticalSection_IsActive(uintptr_t tag)
|
||||
{
|
||||
return tag != 0 && (tag & _Py_CRITICAL_SECTION_INACTIVE) == 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m)
|
||||
{
|
||||
if (PyMutex_LockFast(m)) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
c->_cs_mutex = m;
|
||||
c->_cs_prev = tstate->critical_section;
|
||||
tstate->critical_section = (uintptr_t)c;
|
||||
}
|
||||
else {
|
||||
_PyCriticalSection_BeginSlow(c, m);
|
||||
}
|
||||
}
|
||||
#define PyCriticalSection_BeginMutex _PyCriticalSection_BeginMutex
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op)
|
||||
{
|
||||
_PyCriticalSection_BeginMutex(c, &op->ob_mutex);
|
||||
}
|
||||
#define PyCriticalSection_Begin _PyCriticalSection_Begin
|
||||
|
||||
// Removes the top-most critical section from the thread's stack of critical
|
||||
// sections. If the new top-most critical section is inactive, then it is
|
||||
// resumed.
|
||||
static inline void
|
||||
_PyCriticalSection_Pop(PyCriticalSection *c)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
uintptr_t prev = c->_cs_prev;
|
||||
tstate->critical_section = prev;
|
||||
|
||||
if ((prev & _Py_CRITICAL_SECTION_INACTIVE) != 0) {
|
||||
_PyCriticalSection_Resume(tstate);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_End(PyCriticalSection *c)
|
||||
{
|
||||
// If the mutex is NULL, we used the fast path in
|
||||
// _PyCriticalSection_BeginSlow for locks already held in the top-most
|
||||
// critical section, and we shouldn't unlock or pop this critical section.
|
||||
if (c->_cs_mutex == NULL) {
|
||||
return;
|
||||
}
|
||||
PyMutex_Unlock(c->_cs_mutex);
|
||||
_PyCriticalSection_Pop(c);
|
||||
}
|
||||
#define PyCriticalSection_End _PyCriticalSection_End
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection2_BeginMutex(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2)
|
||||
{
|
||||
if (m1 == m2) {
|
||||
// If the two mutex arguments are the same, treat this as a critical
|
||||
// section with a single mutex.
|
||||
c->_cs_mutex2 = NULL;
|
||||
_PyCriticalSection_BeginMutex(&c->_cs_base, m1);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((uintptr_t)m2 < (uintptr_t)m1) {
|
||||
// Sort the mutexes so that the lower address is locked first.
|
||||
// The exact order does not matter, but we need to acquire the mutexes
|
||||
// in a consistent order to avoid lock ordering deadlocks.
|
||||
PyMutex *tmp = m1;
|
||||
m1 = m2;
|
||||
m2 = tmp;
|
||||
}
|
||||
|
||||
if (PyMutex_LockFast(m1)) {
|
||||
if (PyMutex_LockFast(m2)) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
c->_cs_base._cs_mutex = m1;
|
||||
c->_cs_mutex2 = m2;
|
||||
c->_cs_base._cs_prev = tstate->critical_section;
|
||||
|
||||
uintptr_t p = (uintptr_t)c | _Py_CRITICAL_SECTION_TWO_MUTEXES;
|
||||
tstate->critical_section = p;
|
||||
}
|
||||
else {
|
||||
_PyCriticalSection2_BeginSlow(c, m1, m2, 1);
|
||||
}
|
||||
}
|
||||
else {
|
||||
_PyCriticalSection2_BeginSlow(c, m1, m2, 0);
|
||||
}
|
||||
}
|
||||
#define PyCriticalSection2_BeginMutex _PyCriticalSection2_BeginMutex
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b)
|
||||
{
|
||||
_PyCriticalSection2_BeginMutex(c, &a->ob_mutex, &b->ob_mutex);
|
||||
}
|
||||
#define PyCriticalSection2_Begin _PyCriticalSection2_Begin
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection2_End(PyCriticalSection2 *c)
|
||||
{
|
||||
// if mutex1 is NULL, we used the fast path in
|
||||
// _PyCriticalSection_BeginSlow for mutexes that are already held,
|
||||
// which should only happen when mutex1 and mutex2 were the same mutex,
|
||||
// and mutex2 should also be NULL.
|
||||
if (c->_cs_base._cs_mutex == NULL) {
|
||||
assert(c->_cs_mutex2 == NULL);
|
||||
return;
|
||||
}
|
||||
if (c->_cs_mutex2) {
|
||||
PyMutex_Unlock(c->_cs_mutex2);
|
||||
}
|
||||
PyMutex_Unlock(c->_cs_base._cs_mutex);
|
||||
_PyCriticalSection_Pop(&c->_cs_base);
|
||||
}
|
||||
#define PyCriticalSection2_End _PyCriticalSection2_End
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_AssertHeld(PyMutex *mutex)
|
||||
{
|
||||
#ifdef Py_DEBUG
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
uintptr_t prev = tstate->critical_section;
|
||||
if (prev & _Py_CRITICAL_SECTION_TWO_MUTEXES) {
|
||||
PyCriticalSection2 *cs = (PyCriticalSection2 *)(prev & ~_Py_CRITICAL_SECTION_MASK);
|
||||
assert(cs != NULL && (cs->_cs_base._cs_mutex == mutex || cs->_cs_mutex2 == mutex));
|
||||
}
|
||||
else {
|
||||
PyCriticalSection *cs = (PyCriticalSection *)(tstate->critical_section & ~_Py_CRITICAL_SECTION_MASK);
|
||||
assert(cs != NULL && cs->_cs_mutex == mutex);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* Py_GIL_DISABLED */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CRITICAL_SECTION_H */
|
||||
406
extern/include/python/internal/pycore_crossinterp.h
vendored
Normal file
406
extern/include/python/internal/pycore_crossinterp.h
vendored
Normal file
@@ -0,0 +1,406 @@
|
||||
#ifndef Py_INTERNAL_CROSSINTERP_H
|
||||
#define Py_INTERNAL_CROSSINTERP_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pyerrors.h"
|
||||
|
||||
|
||||
/**************/
|
||||
/* exceptions */
|
||||
/**************/
|
||||
|
||||
PyAPI_DATA(PyObject *) PyExc_InterpreterError;
|
||||
PyAPI_DATA(PyObject *) PyExc_InterpreterNotFoundError;
|
||||
|
||||
|
||||
/***************************/
|
||||
/* cross-interpreter calls */
|
||||
/***************************/
|
||||
|
||||
typedef int (*_Py_simple_func)(void *);
|
||||
extern int _Py_CallInInterpreter(
|
||||
PyInterpreterState *interp,
|
||||
_Py_simple_func func,
|
||||
void *arg);
|
||||
extern int _Py_CallInInterpreterAndRawFree(
|
||||
PyInterpreterState *interp,
|
||||
_Py_simple_func func,
|
||||
void *arg);
|
||||
|
||||
|
||||
/**************************/
|
||||
/* cross-interpreter data */
|
||||
/**************************/
|
||||
|
||||
typedef struct _xidata _PyXIData_t;
|
||||
typedef PyObject *(*xid_newobjfunc)(_PyXIData_t *);
|
||||
typedef void (*xid_freefunc)(void *);
|
||||
|
||||
// _PyXIData_t is similar to Py_buffer as an effectively
|
||||
// opaque struct that holds data outside the object machinery. This
|
||||
// is necessary to pass safely between interpreters in the same process.
|
||||
struct _xidata {
|
||||
// data is the cross-interpreter-safe derivation of a Python object
|
||||
// (see _PyObject_GetXIData). It will be NULL if the
|
||||
// new_object func (below) encodes the data.
|
||||
void *data;
|
||||
// obj is the Python object from which the data was derived. This
|
||||
// is non-NULL only if the data remains bound to the object in some
|
||||
// way, such that the object must be "released" (via a decref) when
|
||||
// the data is released. In that case the code that sets the field,
|
||||
// likely a registered "xidatafunc", is responsible for
|
||||
// ensuring it owns the reference (i.e. incref).
|
||||
PyObject *obj;
|
||||
// interpid is the ID of the owning interpreter of the original
|
||||
// object. It corresponds to the active interpreter when
|
||||
// _PyObject_GetXIData() was called. This should only
|
||||
// be set by the cross-interpreter machinery.
|
||||
//
|
||||
// We use the ID rather than the PyInterpreterState to avoid issues
|
||||
// with deleted interpreters. Note that IDs are never re-used, so
|
||||
// each one will always correspond to a specific interpreter
|
||||
// (whether still alive or not).
|
||||
int64_t interpid;
|
||||
// new_object is a function that returns a new object in the current
|
||||
// interpreter given the data. The resulting object (a new
|
||||
// reference) will be equivalent to the original object. This field
|
||||
// is required.
|
||||
xid_newobjfunc new_object;
|
||||
// free is called when the data is released. If it is NULL then
|
||||
// nothing will be done to free the data. For some types this is
|
||||
// okay (e.g. bytes) and for those types this field should be set
|
||||
// to NULL. However, for most the data was allocated just for
|
||||
// cross-interpreter use, so it must be freed when
|
||||
// _PyXIData_Release is called or the memory will
|
||||
// leak. In that case, at the very least this field should be set
|
||||
// to PyMem_RawFree (the default if not explicitly set to NULL).
|
||||
// The call will happen with the original interpreter activated.
|
||||
xid_freefunc free;
|
||||
};
|
||||
|
||||
PyAPI_FUNC(_PyXIData_t *) _PyXIData_New(void);
|
||||
PyAPI_FUNC(void) _PyXIData_Free(_PyXIData_t *data);
|
||||
|
||||
#define _PyXIData_DATA(DATA) ((DATA)->data)
|
||||
#define _PyXIData_OBJ(DATA) ((DATA)->obj)
|
||||
#define _PyXIData_INTERPID(DATA) ((DATA)->interpid)
|
||||
// Users should not need getters for "new_object" or "free".
|
||||
|
||||
|
||||
/* defining cross-interpreter data */
|
||||
|
||||
PyAPI_FUNC(void) _PyXIData_Init(
|
||||
_PyXIData_t *data,
|
||||
PyInterpreterState *interp, void *shared, PyObject *obj,
|
||||
xid_newobjfunc new_object);
|
||||
PyAPI_FUNC(int) _PyXIData_InitWithSize(
|
||||
_PyXIData_t *,
|
||||
PyInterpreterState *interp, const size_t, PyObject *,
|
||||
xid_newobjfunc);
|
||||
PyAPI_FUNC(void) _PyXIData_Clear(PyInterpreterState *, _PyXIData_t *);
|
||||
|
||||
// Normally the Init* functions are sufficient. The only time
|
||||
// additional initialization might be needed is to set the "free" func,
|
||||
// though that should be infrequent.
|
||||
#define _PyXIData_SET_FREE(DATA, FUNC) \
|
||||
do { \
|
||||
(DATA)->free = (FUNC); \
|
||||
} while (0)
|
||||
#define _PyXIData_CHECK_FREE(DATA, FUNC) \
|
||||
((DATA)->free == (FUNC))
|
||||
// Additionally, some shareable types are essentially light wrappers
|
||||
// around other shareable types. The xidatafunc of the wrapper
|
||||
// can often be implemented by calling the wrapped object's
|
||||
// xidatafunc and then changing the "new_object" function.
|
||||
// We have _PyXIData_SET_NEW_OBJECT() here for that,
|
||||
// but might be better to have a function like
|
||||
// _PyXIData_AdaptToWrapper() instead.
|
||||
#define _PyXIData_SET_NEW_OBJECT(DATA, FUNC) \
|
||||
do { \
|
||||
(DATA)->new_object = (FUNC); \
|
||||
} while (0)
|
||||
#define _PyXIData_CHECK_NEW_OBJECT(DATA, FUNC) \
|
||||
((DATA)->new_object == (FUNC))
|
||||
|
||||
|
||||
/* getting cross-interpreter data */
|
||||
|
||||
typedef int xidata_fallback_t;
|
||||
#define _PyXIDATA_XIDATA_ONLY (0)
|
||||
#define _PyXIDATA_FULL_FALLBACK (1)
|
||||
|
||||
// Technically, we don't need two different function types;
|
||||
// we could go with just the fallback one. However, only container
|
||||
// types like tuple need it, so always having the extra arg would be
|
||||
// a bit unfortunate. It's also nice to be able to clearly distinguish
|
||||
// between types that might call _PyObject_GetXIData() and those that won't.
|
||||
//
|
||||
typedef int (*xidatafunc)(PyThreadState *, PyObject *, _PyXIData_t *);
|
||||
typedef int (*xidatafbfunc)(
|
||||
PyThreadState *, PyObject *, xidata_fallback_t, _PyXIData_t *);
|
||||
typedef struct {
|
||||
xidatafunc basic;
|
||||
xidatafbfunc fallback;
|
||||
} _PyXIData_getdata_t;
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyXIData_GetNotShareableErrorType(PyThreadState *);
|
||||
PyAPI_FUNC(void) _PyXIData_SetNotShareableError(PyThreadState *, const char *);
|
||||
PyAPI_FUNC(void) _PyXIData_FormatNotShareableError(
|
||||
PyThreadState *,
|
||||
const char *,
|
||||
...);
|
||||
|
||||
PyAPI_FUNC(_PyXIData_getdata_t) _PyXIData_Lookup(
|
||||
PyThreadState *,
|
||||
PyObject *);
|
||||
PyAPI_FUNC(int) _PyObject_CheckXIData(
|
||||
PyThreadState *,
|
||||
PyObject *);
|
||||
|
||||
PyAPI_FUNC(int) _PyObject_GetXIDataNoFallback(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyObject_GetXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
xidata_fallback_t,
|
||||
_PyXIData_t *);
|
||||
|
||||
// _PyObject_GetXIData() for bytes
|
||||
typedef struct {
|
||||
const char *bytes;
|
||||
Py_ssize_t len;
|
||||
} _PyBytes_data_t;
|
||||
PyAPI_FUNC(int) _PyBytes_GetData(PyObject *, _PyBytes_data_t *);
|
||||
PyAPI_FUNC(PyObject *) _PyBytes_FromData(_PyBytes_data_t *);
|
||||
PyAPI_FUNC(PyObject *) _PyBytes_FromXIData(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyBytes_GetXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
PyAPI_FUNC(_PyBytes_data_t *) _PyBytes_GetXIDataWrapped(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
size_t,
|
||||
xid_newobjfunc,
|
||||
_PyXIData_t *);
|
||||
|
||||
// _PyObject_GetXIData() for pickle
|
||||
PyAPI_DATA(PyObject *) _PyPickle_LoadFromXIData(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyPickle_GetXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
|
||||
// _PyObject_GetXIData() for marshal
|
||||
PyAPI_FUNC(PyObject *) _PyMarshal_ReadObjectFromXIData(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyMarshal_GetXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
|
||||
// _PyObject_GetXIData() for code objects
|
||||
PyAPI_FUNC(PyObject *) _PyCode_FromXIData(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyCode_GetXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyCode_GetScriptXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyCode_GetPureScriptXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
|
||||
// _PyObject_GetXIData() for functions
|
||||
PyAPI_FUNC(PyObject *) _PyFunction_FromXIData(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyFunction_GetXIData(
|
||||
PyThreadState *,
|
||||
PyObject *,
|
||||
_PyXIData_t *);
|
||||
|
||||
|
||||
/* using cross-interpreter data */
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyXIData_NewObject(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyXIData_Release(_PyXIData_t *);
|
||||
PyAPI_FUNC(int) _PyXIData_ReleaseAndRawFree(_PyXIData_t *);
|
||||
|
||||
|
||||
/* cross-interpreter data registry */
|
||||
|
||||
#define Py_CORE_CROSSINTERP_DATA_REGISTRY_H
|
||||
#include "pycore_crossinterp_data_registry.h"
|
||||
#undef Py_CORE_CROSSINTERP_DATA_REGISTRY_H
|
||||
|
||||
|
||||
/*****************************/
|
||||
/* runtime state & lifecycle */
|
||||
/*****************************/
|
||||
|
||||
typedef struct _xid_lookup_state _PyXIData_lookup_t;
|
||||
|
||||
typedef struct {
|
||||
// builtin types
|
||||
_PyXIData_lookup_t data_lookup;
|
||||
} _PyXI_global_state_t;
|
||||
|
||||
typedef struct {
|
||||
// heap types
|
||||
_PyXIData_lookup_t data_lookup;
|
||||
|
||||
struct xi_exceptions {
|
||||
// static types
|
||||
PyObject *PyExc_InterpreterError;
|
||||
PyObject *PyExc_InterpreterNotFoundError;
|
||||
// heap types
|
||||
PyObject *PyExc_NotShareableError;
|
||||
} exceptions;
|
||||
} _PyXI_state_t;
|
||||
|
||||
#define _PyXI_GET_GLOBAL_STATE(interp) (&(interp)->runtime->xi)
|
||||
#define _PyXI_GET_STATE(interp) (&(interp)->xi)
|
||||
|
||||
#ifndef Py_BUILD_CORE_MODULE
|
||||
extern PyStatus _PyXI_Init(PyInterpreterState *interp);
|
||||
extern void _PyXI_Fini(PyInterpreterState *interp);
|
||||
extern PyStatus _PyXI_InitTypes(PyInterpreterState *interp);
|
||||
extern void _PyXI_FiniTypes(PyInterpreterState *interp);
|
||||
#endif // Py_BUILD_CORE_MODULE
|
||||
|
||||
int _Py_xi_global_state_init(_PyXI_global_state_t *);
|
||||
void _Py_xi_global_state_fini(_PyXI_global_state_t *);
|
||||
int _Py_xi_state_init(_PyXI_state_t *, PyInterpreterState *);
|
||||
void _Py_xi_state_fini(_PyXI_state_t *, PyInterpreterState *);
|
||||
|
||||
|
||||
/***************************/
|
||||
/* short-term data sharing */
|
||||
/***************************/
|
||||
|
||||
// Ultimately we'd like to preserve enough information about the
|
||||
// exception and traceback that we could re-constitute (or at least
|
||||
// simulate, a la traceback.TracebackException), and even chain, a copy
|
||||
// of the exception in the calling interpreter.
|
||||
|
||||
typedef struct _excinfo {
|
||||
struct _excinfo_type {
|
||||
PyTypeObject *builtin;
|
||||
const char *name;
|
||||
const char *qualname;
|
||||
const char *module;
|
||||
} type;
|
||||
const char *msg;
|
||||
const char *errdisplay;
|
||||
} _PyXI_excinfo;
|
||||
|
||||
PyAPI_FUNC(_PyXI_excinfo *) _PyXI_NewExcInfo(PyObject *exc);
|
||||
PyAPI_FUNC(void) _PyXI_FreeExcInfo(_PyXI_excinfo *info);
|
||||
PyAPI_FUNC(PyObject *) _PyXI_FormatExcInfo(_PyXI_excinfo *info);
|
||||
PyAPI_FUNC(PyObject *) _PyXI_ExcInfoAsObject(_PyXI_excinfo *info);
|
||||
|
||||
|
||||
typedef enum error_code {
|
||||
_PyXI_ERR_NO_ERROR = 0,
|
||||
_PyXI_ERR_UNCAUGHT_EXCEPTION = -1,
|
||||
_PyXI_ERR_OTHER = -2,
|
||||
_PyXI_ERR_NO_MEMORY = -3,
|
||||
_PyXI_ERR_ALREADY_RUNNING = -4,
|
||||
_PyXI_ERR_MAIN_NS_FAILURE = -5,
|
||||
_PyXI_ERR_APPLY_NS_FAILURE = -6,
|
||||
_PyXI_ERR_PRESERVE_FAILURE = -7,
|
||||
_PyXI_ERR_EXC_PROPAGATION_FAILURE = -8,
|
||||
_PyXI_ERR_NOT_SHAREABLE = -9,
|
||||
} _PyXI_errcode;
|
||||
|
||||
typedef struct xi_failure _PyXI_failure;
|
||||
|
||||
PyAPI_FUNC(_PyXI_failure *) _PyXI_NewFailure(void);
|
||||
PyAPI_FUNC(void) _PyXI_FreeFailure(_PyXI_failure *);
|
||||
PyAPI_FUNC(_PyXI_errcode) _PyXI_GetFailureCode(_PyXI_failure *);
|
||||
PyAPI_FUNC(int) _PyXI_InitFailure(_PyXI_failure *, _PyXI_errcode, PyObject *);
|
||||
PyAPI_FUNC(void) _PyXI_InitFailureUTF8(
|
||||
_PyXI_failure *,
|
||||
_PyXI_errcode,
|
||||
const char *);
|
||||
|
||||
PyAPI_FUNC(int) _PyXI_UnwrapNotShareableError(
|
||||
PyThreadState *,
|
||||
_PyXI_failure *);
|
||||
|
||||
|
||||
// A cross-interpreter session involves entering an interpreter
|
||||
// with _PyXI_Enter(), doing some work with it, and finally exiting
|
||||
// that interpreter with _PyXI_Exit().
|
||||
//
|
||||
// At the boundaries of the session, both entering and exiting,
|
||||
// data may be exchanged between the previous interpreter and the
|
||||
// target one in a thread-safe way that does not violate the
|
||||
// isolation between interpreters. This includes setting objects
|
||||
// in the target's __main__ module on the way in, and capturing
|
||||
// uncaught exceptions on the way out.
|
||||
typedef struct xi_session _PyXI_session;
|
||||
|
||||
PyAPI_FUNC(_PyXI_session *) _PyXI_NewSession(void);
|
||||
PyAPI_FUNC(void) _PyXI_FreeSession(_PyXI_session *);
|
||||
|
||||
typedef struct {
|
||||
PyObject *preserved;
|
||||
PyObject *excinfo;
|
||||
_PyXI_errcode errcode;
|
||||
} _PyXI_session_result;
|
||||
PyAPI_FUNC(void) _PyXI_ClearResult(_PyXI_session_result *);
|
||||
|
||||
PyAPI_FUNC(int) _PyXI_Enter(
|
||||
_PyXI_session *session,
|
||||
PyInterpreterState *interp,
|
||||
PyObject *nsupdates,
|
||||
_PyXI_session_result *);
|
||||
PyAPI_FUNC(int) _PyXI_Exit(
|
||||
_PyXI_session *,
|
||||
_PyXI_failure *,
|
||||
_PyXI_session_result *);
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyXI_GetMainNamespace(
|
||||
_PyXI_session *,
|
||||
_PyXI_failure *);
|
||||
|
||||
PyAPI_FUNC(int) _PyXI_Preserve(
|
||||
_PyXI_session *,
|
||||
const char *,
|
||||
PyObject *,
|
||||
_PyXI_failure *);
|
||||
PyAPI_FUNC(PyObject *) _PyXI_GetPreserved(
|
||||
_PyXI_session_result *,
|
||||
const char *);
|
||||
|
||||
|
||||
/*************/
|
||||
/* other API */
|
||||
/*************/
|
||||
|
||||
// Export for _testinternalcapi shared extension
|
||||
PyAPI_FUNC(PyInterpreterState *) _PyXI_NewInterpreter(
|
||||
PyInterpreterConfig *config,
|
||||
long *maybe_whence,
|
||||
PyThreadState **p_tstate,
|
||||
PyThreadState **p_save_tstate);
|
||||
PyAPI_FUNC(void) _PyXI_EndInterpreter(
|
||||
PyInterpreterState *interp,
|
||||
PyThreadState *tstate,
|
||||
PyThreadState **p_save_tstate);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CROSSINTERP_H */
|
||||
41
extern/include/python/internal/pycore_crossinterp_data_registry.h
vendored
Normal file
41
extern/include/python/internal/pycore_crossinterp_data_registry.h
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
#ifndef Py_CORE_CROSSINTERP_DATA_REGISTRY_H
|
||||
# error "this header must not be included directly"
|
||||
#endif
|
||||
|
||||
|
||||
// For now we use a global registry of shareable classes. An
|
||||
// alternative would be to add a tp_* slot for a class's
|
||||
// xidatafunc. It would be simpler and more efficient.
|
||||
|
||||
struct _xid_regitem;
|
||||
|
||||
typedef struct _xid_regitem {
|
||||
struct _xid_regitem *prev;
|
||||
struct _xid_regitem *next;
|
||||
/* This can be a dangling pointer, but only if weakref is set. */
|
||||
PyTypeObject *cls;
|
||||
/* This is NULL for builtin types. */
|
||||
PyObject *weakref;
|
||||
size_t refcount;
|
||||
_PyXIData_getdata_t getdata;
|
||||
} _PyXIData_regitem_t;
|
||||
|
||||
typedef struct {
|
||||
int global; /* builtin types or heap types */
|
||||
int initialized;
|
||||
PyMutex mutex;
|
||||
_PyXIData_regitem_t *head;
|
||||
} _PyXIData_registry_t;
|
||||
|
||||
PyAPI_FUNC(int) _PyXIData_RegisterClass(
|
||||
PyThreadState *,
|
||||
PyTypeObject *,
|
||||
_PyXIData_getdata_t);
|
||||
PyAPI_FUNC(int) _PyXIData_UnregisterClass(
|
||||
PyThreadState *,
|
||||
PyTypeObject *);
|
||||
|
||||
struct _xid_lookup_state {
|
||||
// XXX Remove this field once we have a tp_* slot.
|
||||
_PyXIData_registry_t registry;
|
||||
};
|
||||
379
extern/include/python/internal/pycore_debug_offsets.h
vendored
Normal file
379
extern/include/python/internal/pycore_debug_offsets.h
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
#ifndef Py_INTERNAL_DEBUG_OFFSETS_H
|
||||
#define Py_INTERNAL_DEBUG_OFFSETS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
#define _Py_Debug_Cookie "xdebugpy"
|
||||
|
||||
#if defined(__APPLE__)
|
||||
# include <mach-o/loader.h>
|
||||
#endif
|
||||
|
||||
// Macros to burn global values in custom sections so out-of-process
|
||||
// profilers can locate them easily.
|
||||
#define GENERATE_DEBUG_SECTION(name, declaration) \
|
||||
_GENERATE_DEBUG_SECTION_WINDOWS(name) \
|
||||
_GENERATE_DEBUG_SECTION_APPLE(name) \
|
||||
declaration \
|
||||
_GENERATE_DEBUG_SECTION_LINUX(name)
|
||||
|
||||
// Please note that section names are truncated to eight bytes
|
||||
// on Windows!
|
||||
#if defined(MS_WINDOWS)
|
||||
#define _GENERATE_DEBUG_SECTION_WINDOWS(name) \
|
||||
_Pragma(Py_STRINGIFY(section(Py_STRINGIFY(name), read, write))) \
|
||||
__declspec(allocate(Py_STRINGIFY(name)))
|
||||
#else
|
||||
#define _GENERATE_DEBUG_SECTION_WINDOWS(name)
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#define _GENERATE_DEBUG_SECTION_APPLE(name) \
|
||||
__attribute__((section(SEG_DATA "," Py_STRINGIFY(name)))) \
|
||||
__attribute__((used))
|
||||
#else
|
||||
#define _GENERATE_DEBUG_SECTION_APPLE(name)
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) && (defined(__GNUC__) || defined(__clang__))
|
||||
#define _GENERATE_DEBUG_SECTION_LINUX(name) \
|
||||
__attribute__((section("." Py_STRINGIFY(name)))) \
|
||||
__attribute__((used))
|
||||
#else
|
||||
#define _GENERATE_DEBUG_SECTION_LINUX(name)
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define _Py_Debug_gilruntimestate_enabled offsetof(struct _gil_runtime_state, enabled)
|
||||
# define _Py_Debug_Free_Threaded 1
|
||||
# define _Py_Debug_code_object_co_tlbc offsetof(PyCodeObject, co_tlbc)
|
||||
# define _Py_Debug_interpreter_frame_tlbc_index offsetof(_PyInterpreterFrame, tlbc_index)
|
||||
# define _Py_Debug_interpreter_state_tlbc_generation offsetof(PyInterpreterState, tlbc_indices.tlbc_generation)
|
||||
#else
|
||||
# define _Py_Debug_gilruntimestate_enabled 0
|
||||
# define _Py_Debug_Free_Threaded 0
|
||||
# define _Py_Debug_code_object_co_tlbc 0
|
||||
# define _Py_Debug_interpreter_frame_tlbc_index 0
|
||||
# define _Py_Debug_interpreter_state_tlbc_generation 0
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct _Py_DebugOffsets {
|
||||
char cookie[8] _Py_NONSTRING;
|
||||
uint64_t version;
|
||||
uint64_t free_threaded;
|
||||
// Runtime state offset;
|
||||
struct _runtime_state {
|
||||
uint64_t size;
|
||||
uint64_t finalizing;
|
||||
uint64_t interpreters_head;
|
||||
} runtime_state;
|
||||
|
||||
// Interpreter state offset;
|
||||
struct _interpreter_state {
|
||||
uint64_t size;
|
||||
uint64_t id;
|
||||
uint64_t next;
|
||||
uint64_t threads_head;
|
||||
uint64_t threads_main;
|
||||
uint64_t gc;
|
||||
uint64_t imports_modules;
|
||||
uint64_t sysdict;
|
||||
uint64_t builtins;
|
||||
uint64_t ceval_gil;
|
||||
uint64_t gil_runtime_state;
|
||||
uint64_t gil_runtime_state_enabled;
|
||||
uint64_t gil_runtime_state_locked;
|
||||
uint64_t gil_runtime_state_holder;
|
||||
uint64_t code_object_generation;
|
||||
uint64_t tlbc_generation;
|
||||
} interpreter_state;
|
||||
|
||||
// Thread state offset;
|
||||
struct _thread_state{
|
||||
uint64_t size;
|
||||
uint64_t prev;
|
||||
uint64_t next;
|
||||
uint64_t interp;
|
||||
uint64_t current_frame;
|
||||
uint64_t thread_id;
|
||||
uint64_t native_thread_id;
|
||||
uint64_t datastack_chunk;
|
||||
uint64_t status;
|
||||
} thread_state;
|
||||
|
||||
// InterpreterFrame offset;
|
||||
struct _interpreter_frame {
|
||||
uint64_t size;
|
||||
uint64_t previous;
|
||||
uint64_t executable;
|
||||
uint64_t instr_ptr;
|
||||
uint64_t localsplus;
|
||||
uint64_t owner;
|
||||
uint64_t stackpointer;
|
||||
uint64_t tlbc_index;
|
||||
} interpreter_frame;
|
||||
|
||||
// Code object offset;
|
||||
struct _code_object {
|
||||
uint64_t size;
|
||||
uint64_t filename;
|
||||
uint64_t name;
|
||||
uint64_t qualname;
|
||||
uint64_t linetable;
|
||||
uint64_t firstlineno;
|
||||
uint64_t argcount;
|
||||
uint64_t localsplusnames;
|
||||
uint64_t localspluskinds;
|
||||
uint64_t co_code_adaptive;
|
||||
uint64_t co_tlbc;
|
||||
} code_object;
|
||||
|
||||
// PyObject offset;
|
||||
struct _pyobject {
|
||||
uint64_t size;
|
||||
uint64_t ob_type;
|
||||
} pyobject;
|
||||
|
||||
// PyTypeObject object offset;
|
||||
struct _type_object {
|
||||
uint64_t size;
|
||||
uint64_t tp_name;
|
||||
uint64_t tp_repr;
|
||||
uint64_t tp_flags;
|
||||
} type_object;
|
||||
|
||||
// PyTuple object offset;
|
||||
struct _tuple_object {
|
||||
uint64_t size;
|
||||
uint64_t ob_item;
|
||||
uint64_t ob_size;
|
||||
} tuple_object;
|
||||
|
||||
// PyList object offset;
|
||||
struct _list_object {
|
||||
uint64_t size;
|
||||
uint64_t ob_item;
|
||||
uint64_t ob_size;
|
||||
} list_object;
|
||||
|
||||
// PySet object offset;
|
||||
struct _set_object {
|
||||
uint64_t size;
|
||||
uint64_t used;
|
||||
uint64_t table;
|
||||
uint64_t mask;
|
||||
} set_object;
|
||||
|
||||
// PyDict object offset;
|
||||
struct _dict_object {
|
||||
uint64_t size;
|
||||
uint64_t ma_keys;
|
||||
uint64_t ma_values;
|
||||
} dict_object;
|
||||
|
||||
// PyFloat object offset;
|
||||
struct _float_object {
|
||||
uint64_t size;
|
||||
uint64_t ob_fval;
|
||||
} float_object;
|
||||
|
||||
// PyLong object offset;
|
||||
struct _long_object {
|
||||
uint64_t size;
|
||||
uint64_t lv_tag;
|
||||
uint64_t ob_digit;
|
||||
} long_object;
|
||||
|
||||
// PyBytes object offset;
|
||||
struct _bytes_object {
|
||||
uint64_t size;
|
||||
uint64_t ob_size;
|
||||
uint64_t ob_sval;
|
||||
} bytes_object;
|
||||
|
||||
// Unicode object offset;
|
||||
struct _unicode_object {
|
||||
uint64_t size;
|
||||
uint64_t state;
|
||||
uint64_t length;
|
||||
uint64_t asciiobject_size;
|
||||
} unicode_object;
|
||||
|
||||
// GC runtime state offset;
|
||||
struct _gc {
|
||||
uint64_t size;
|
||||
uint64_t collecting;
|
||||
} gc;
|
||||
|
||||
// Generator object offset;
|
||||
struct _gen_object {
|
||||
uint64_t size;
|
||||
uint64_t gi_name;
|
||||
uint64_t gi_iframe;
|
||||
uint64_t gi_frame_state;
|
||||
} gen_object;
|
||||
|
||||
struct _llist_node {
|
||||
uint64_t next;
|
||||
uint64_t prev;
|
||||
} llist_node;
|
||||
|
||||
struct _debugger_support {
|
||||
uint64_t eval_breaker;
|
||||
uint64_t remote_debugger_support;
|
||||
uint64_t remote_debugging_enabled;
|
||||
uint64_t debugger_pending_call;
|
||||
uint64_t debugger_script_path;
|
||||
uint64_t debugger_script_path_size;
|
||||
} debugger_support;
|
||||
} _Py_DebugOffsets;
|
||||
|
||||
|
||||
#define _Py_DebugOffsets_INIT(debug_cookie) { \
|
||||
.cookie = debug_cookie, \
|
||||
.version = PY_VERSION_HEX, \
|
||||
.free_threaded = _Py_Debug_Free_Threaded, \
|
||||
.runtime_state = { \
|
||||
.size = sizeof(_PyRuntimeState), \
|
||||
.finalizing = offsetof(_PyRuntimeState, _finalizing), \
|
||||
.interpreters_head = offsetof(_PyRuntimeState, interpreters.head), \
|
||||
}, \
|
||||
.interpreter_state = { \
|
||||
.size = sizeof(PyInterpreterState), \
|
||||
.id = offsetof(PyInterpreterState, id), \
|
||||
.next = offsetof(PyInterpreterState, next), \
|
||||
.threads_head = offsetof(PyInterpreterState, threads.head), \
|
||||
.threads_main = offsetof(PyInterpreterState, threads.main), \
|
||||
.gc = offsetof(PyInterpreterState, gc), \
|
||||
.imports_modules = offsetof(PyInterpreterState, imports.modules), \
|
||||
.sysdict = offsetof(PyInterpreterState, sysdict), \
|
||||
.builtins = offsetof(PyInterpreterState, builtins), \
|
||||
.ceval_gil = offsetof(PyInterpreterState, ceval.gil), \
|
||||
.gil_runtime_state = offsetof(PyInterpreterState, _gil), \
|
||||
.gil_runtime_state_enabled = _Py_Debug_gilruntimestate_enabled, \
|
||||
.gil_runtime_state_locked = offsetof(PyInterpreterState, _gil.locked), \
|
||||
.gil_runtime_state_holder = offsetof(PyInterpreterState, _gil.last_holder), \
|
||||
.code_object_generation = offsetof(PyInterpreterState, _code_object_generation), \
|
||||
.tlbc_generation = _Py_Debug_interpreter_state_tlbc_generation, \
|
||||
}, \
|
||||
.thread_state = { \
|
||||
.size = sizeof(PyThreadState), \
|
||||
.prev = offsetof(PyThreadState, prev), \
|
||||
.next = offsetof(PyThreadState, next), \
|
||||
.interp = offsetof(PyThreadState, interp), \
|
||||
.current_frame = offsetof(PyThreadState, current_frame), \
|
||||
.thread_id = offsetof(PyThreadState, thread_id), \
|
||||
.native_thread_id = offsetof(PyThreadState, native_thread_id), \
|
||||
.datastack_chunk = offsetof(PyThreadState, datastack_chunk), \
|
||||
.status = offsetof(PyThreadState, _status), \
|
||||
}, \
|
||||
.interpreter_frame = { \
|
||||
.size = sizeof(_PyInterpreterFrame), \
|
||||
.previous = offsetof(_PyInterpreterFrame, previous), \
|
||||
.executable = offsetof(_PyInterpreterFrame, f_executable), \
|
||||
.instr_ptr = offsetof(_PyInterpreterFrame, instr_ptr), \
|
||||
.localsplus = offsetof(_PyInterpreterFrame, localsplus), \
|
||||
.owner = offsetof(_PyInterpreterFrame, owner), \
|
||||
.stackpointer = offsetof(_PyInterpreterFrame, stackpointer), \
|
||||
.tlbc_index = _Py_Debug_interpreter_frame_tlbc_index, \
|
||||
}, \
|
||||
.code_object = { \
|
||||
.size = sizeof(PyCodeObject), \
|
||||
.filename = offsetof(PyCodeObject, co_filename), \
|
||||
.name = offsetof(PyCodeObject, co_name), \
|
||||
.qualname = offsetof(PyCodeObject, co_qualname), \
|
||||
.linetable = offsetof(PyCodeObject, co_linetable), \
|
||||
.firstlineno = offsetof(PyCodeObject, co_firstlineno), \
|
||||
.argcount = offsetof(PyCodeObject, co_argcount), \
|
||||
.localsplusnames = offsetof(PyCodeObject, co_localsplusnames), \
|
||||
.localspluskinds = offsetof(PyCodeObject, co_localspluskinds), \
|
||||
.co_code_adaptive = offsetof(PyCodeObject, co_code_adaptive), \
|
||||
.co_tlbc = _Py_Debug_code_object_co_tlbc, \
|
||||
}, \
|
||||
.pyobject = { \
|
||||
.size = sizeof(PyObject), \
|
||||
.ob_type = offsetof(PyObject, ob_type), \
|
||||
}, \
|
||||
.type_object = { \
|
||||
.size = sizeof(PyTypeObject), \
|
||||
.tp_name = offsetof(PyTypeObject, tp_name), \
|
||||
.tp_repr = offsetof(PyTypeObject, tp_repr), \
|
||||
.tp_flags = offsetof(PyTypeObject, tp_flags), \
|
||||
}, \
|
||||
.tuple_object = { \
|
||||
.size = sizeof(PyTupleObject), \
|
||||
.ob_item = offsetof(PyTupleObject, ob_item), \
|
||||
.ob_size = offsetof(PyTupleObject, ob_base.ob_size), \
|
||||
}, \
|
||||
.list_object = { \
|
||||
.size = sizeof(PyListObject), \
|
||||
.ob_item = offsetof(PyListObject, ob_item), \
|
||||
.ob_size = offsetof(PyListObject, ob_base.ob_size), \
|
||||
}, \
|
||||
.set_object = { \
|
||||
.size = sizeof(PySetObject), \
|
||||
.used = offsetof(PySetObject, used), \
|
||||
.table = offsetof(PySetObject, table), \
|
||||
.mask = offsetof(PySetObject, mask), \
|
||||
}, \
|
||||
.dict_object = { \
|
||||
.size = sizeof(PyDictObject), \
|
||||
.ma_keys = offsetof(PyDictObject, ma_keys), \
|
||||
.ma_values = offsetof(PyDictObject, ma_values), \
|
||||
}, \
|
||||
.float_object = { \
|
||||
.size = sizeof(PyFloatObject), \
|
||||
.ob_fval = offsetof(PyFloatObject, ob_fval), \
|
||||
}, \
|
||||
.long_object = { \
|
||||
.size = sizeof(PyLongObject), \
|
||||
.lv_tag = offsetof(PyLongObject, long_value.lv_tag), \
|
||||
.ob_digit = offsetof(PyLongObject, long_value.ob_digit), \
|
||||
}, \
|
||||
.bytes_object = { \
|
||||
.size = sizeof(PyBytesObject), \
|
||||
.ob_size = offsetof(PyBytesObject, ob_base.ob_size), \
|
||||
.ob_sval = offsetof(PyBytesObject, ob_sval), \
|
||||
}, \
|
||||
.unicode_object = { \
|
||||
.size = sizeof(PyUnicodeObject), \
|
||||
.state = offsetof(PyUnicodeObject, _base._base.state), \
|
||||
.length = offsetof(PyUnicodeObject, _base._base.length), \
|
||||
.asciiobject_size = sizeof(PyASCIIObject), \
|
||||
}, \
|
||||
.gc = { \
|
||||
.size = sizeof(struct _gc_runtime_state), \
|
||||
.collecting = offsetof(struct _gc_runtime_state, collecting), \
|
||||
}, \
|
||||
.gen_object = { \
|
||||
.size = sizeof(PyGenObject), \
|
||||
.gi_name = offsetof(PyGenObject, gi_name), \
|
||||
.gi_iframe = offsetof(PyGenObject, gi_iframe), \
|
||||
.gi_frame_state = offsetof(PyGenObject, gi_frame_state), \
|
||||
}, \
|
||||
.llist_node = { \
|
||||
.next = offsetof(struct llist_node, next), \
|
||||
.prev = offsetof(struct llist_node, prev), \
|
||||
}, \
|
||||
.debugger_support = { \
|
||||
.eval_breaker = offsetof(PyThreadState, eval_breaker), \
|
||||
.remote_debugger_support = offsetof(PyThreadState, remote_debugger_support), \
|
||||
.remote_debugging_enabled = offsetof(PyInterpreterState, config.remote_debug), \
|
||||
.debugger_pending_call = offsetof(_PyRemoteDebuggerSupport, debugger_pending_call), \
|
||||
.debugger_script_path = offsetof(_PyRemoteDebuggerSupport, debugger_script_path), \
|
||||
.debugger_script_path_size = _Py_MAX_SCRIPT_PATH_SIZE, \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DEBUG_OFFSETS_H */
|
||||
28
extern/include/python/internal/pycore_descrobject.h
vendored
Normal file
28
extern/include/python/internal/pycore_descrobject.h
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef Py_INTERNAL_DESCROBJECT_H
|
||||
#define Py_INTERNAL_DESCROBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyObject *prop_get;
|
||||
PyObject *prop_set;
|
||||
PyObject *prop_del;
|
||||
PyObject *prop_doc;
|
||||
PyObject *prop_name;
|
||||
int getter_doc;
|
||||
} propertyobject;
|
||||
|
||||
typedef propertyobject _PyPropertyObject;
|
||||
|
||||
extern PyTypeObject _PyMethodWrapper_Type;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DESCROBJECT_H */
|
||||
410
extern/include/python/internal/pycore_dict.h
vendored
Normal file
410
extern/include/python/internal/pycore_dict.h
vendored
Normal file
@@ -0,0 +1,410 @@
|
||||
#ifndef Py_INTERNAL_DICT_H
|
||||
#define Py_INTERNAL_DICT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_object.h" // PyManagedDictPointer
|
||||
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_LOAD_SSIZE_ACQUIRE
|
||||
#include "pycore_stackref.h" // _PyStackRef
|
||||
#include "pycore_stats.h"
|
||||
|
||||
// Unsafe flavor of PyDict_GetItemWithError(): no error checking
|
||||
extern PyObject* _PyDict_GetItemWithError(PyObject *dp, PyObject *key);
|
||||
|
||||
// Delete an item from a dict if a predicate is true
|
||||
// Returns -1 on error, 1 if the item was deleted, 0 otherwise
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_DelItemIf(PyObject *mp, PyObject *key,
|
||||
int (*predicate)(PyObject *value, void *arg),
|
||||
void *arg);
|
||||
|
||||
// "KnownHash" variants
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key,
|
||||
PyObject *item, Py_hash_t hash);
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key,
|
||||
Py_hash_t hash);
|
||||
|
||||
extern int _PyDict_DelItem_KnownHash_LockHeld(PyObject *mp, PyObject *key,
|
||||
Py_hash_t hash);
|
||||
|
||||
extern int _PyDict_Contains_KnownHash(PyObject *, PyObject *, Py_hash_t);
|
||||
|
||||
// "Id" variants
|
||||
extern PyObject* _PyDict_GetItemIdWithError(PyObject *dp,
|
||||
_Py_Identifier *key);
|
||||
extern int _PyDict_ContainsId(PyObject *, _Py_Identifier *);
|
||||
extern int _PyDict_SetItemId(PyObject *dp, _Py_Identifier *key, PyObject *item);
|
||||
extern int _PyDict_DelItemId(PyObject *mp, _Py_Identifier *key);
|
||||
|
||||
extern int _PyDict_Next(
|
||||
PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, Py_hash_t *hash);
|
||||
|
||||
extern int _PyDict_HasOnlyStringKeys(PyObject *mp);
|
||||
|
||||
// Export for '_ctypes' shared extension
|
||||
PyAPI_FUNC(Py_ssize_t) _PyDict_SizeOf(PyDictObject *);
|
||||
|
||||
extern Py_ssize_t _PyDict_SizeOf_LockHeld(PyDictObject *);
|
||||
|
||||
#define _PyDict_HasSplitTable(d) ((d)->ma_values != NULL)
|
||||
|
||||
/* Like PyDict_Merge, but override can be 0, 1 or 2. If override is 0,
|
||||
the first occurrence of a key wins, if override is 1, the last occurrence
|
||||
of a key wins, if override is 2, a KeyError with conflicting key as
|
||||
argument is raised.
|
||||
*/
|
||||
PyAPI_FUNC(int) _PyDict_MergeEx(PyObject *mp, PyObject *other, int override);
|
||||
|
||||
extern void _PyDict_DebugMallocStats(FILE *out);
|
||||
|
||||
|
||||
/* _PyDictView */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyDictObject *dv_dict;
|
||||
} _PyDictViewObject;
|
||||
|
||||
extern PyObject* _PyDictView_New(PyObject *, PyTypeObject *);
|
||||
extern PyObject* _PyDictView_Intersect(PyObject* self, PyObject *other);
|
||||
|
||||
/* other API */
|
||||
|
||||
typedef struct {
|
||||
/* Cached hash code of me_key. */
|
||||
Py_hash_t me_hash;
|
||||
PyObject *me_key;
|
||||
PyObject *me_value; /* This field is only meaningful for combined tables */
|
||||
} PyDictKeyEntry;
|
||||
|
||||
typedef struct {
|
||||
PyObject *me_key; /* The key must be Unicode and have hash. */
|
||||
PyObject *me_value; /* This field is only meaningful for combined tables */
|
||||
} PyDictUnicodeEntry;
|
||||
|
||||
extern PyDictKeysObject *_PyDict_NewKeysForClass(PyHeapTypeObject *);
|
||||
extern PyObject *_PyDict_FromKeys(PyObject *, PyObject *, PyObject *);
|
||||
|
||||
/* Gets a version number unique to the current state of the keys of dict, if possible.
|
||||
* Returns the version number, or zero if it was not possible to get a version number. */
|
||||
extern uint32_t _PyDictKeys_GetVersionForCurrentState(
|
||||
PyInterpreterState *interp, PyDictKeysObject *dictkeys);
|
||||
|
||||
/* Gets a version number unique to the current state of the keys of dict, if possible.
|
||||
*
|
||||
* In free-threaded builds ensures that the dict can be used for lock-free
|
||||
* reads if a version was assigned.
|
||||
*
|
||||
* The caller must hold the per-object lock on dict.
|
||||
*
|
||||
* Returns the version number, or zero if it was not possible to get a version number. */
|
||||
extern uint32_t _PyDict_GetKeysVersionForCurrentState(
|
||||
PyInterpreterState *interp, PyDictObject *dict);
|
||||
|
||||
extern size_t _PyDict_KeysSize(PyDictKeysObject *keys);
|
||||
|
||||
extern void _PyDictKeys_DecRef(PyDictKeysObject *keys);
|
||||
|
||||
/* _Py_dict_lookup() returns index of entry which can be used like DK_ENTRIES(dk)[index].
|
||||
* -1 when no entry found, -3 when compare raises error.
|
||||
*/
|
||||
extern Py_ssize_t _Py_dict_lookup(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr);
|
||||
extern Py_ssize_t _Py_dict_lookup_threadsafe(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr);
|
||||
extern Py_ssize_t _Py_dict_lookup_threadsafe_stackref(PyDictObject *mp, PyObject *key, Py_hash_t hash, _PyStackRef *value_addr);
|
||||
|
||||
extern Py_ssize_t _PyDict_LookupIndex(PyDictObject *, PyObject *);
|
||||
extern Py_ssize_t _PyDictKeys_StringLookup(PyDictKeysObject* dictkeys, PyObject *key);
|
||||
|
||||
/* Look up a string key in an all unicode dict keys, assign the keys object a version, and
|
||||
* store it in version.
|
||||
*
|
||||
* Returns DKIX_ERROR if key is not a string or if the keys object is not all
|
||||
* strings.
|
||||
*
|
||||
* Returns DKIX_EMPTY if the key is not present.
|
||||
*/
|
||||
extern Py_ssize_t _PyDictKeys_StringLookupAndVersion(PyDictKeysObject* dictkeys, PyObject *key, uint32_t *version);
|
||||
extern Py_ssize_t _PyDictKeys_StringLookupSplit(PyDictKeysObject* dictkeys, PyObject *key);
|
||||
PyAPI_FUNC(PyObject *)_PyDict_LoadGlobal(PyDictObject *, PyDictObject *, PyObject *);
|
||||
PyAPI_FUNC(void) _PyDict_LoadGlobalStackRef(PyDictObject *, PyDictObject *, PyObject *, _PyStackRef *);
|
||||
|
||||
// Loads the __builtins__ object from the globals dict. Returns a new reference.
|
||||
extern PyObject *_PyDict_LoadBuiltinsFromGlobals(PyObject *globals);
|
||||
|
||||
/* Consumes references to key and value */
|
||||
PyAPI_FUNC(int) _PyDict_SetItem_Take2(PyDictObject *op, PyObject *key, PyObject *value);
|
||||
extern int _PyDict_SetItem_LockHeld(PyDictObject *dict, PyObject *name, PyObject *value);
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_SetItem_KnownHash_LockHeld(PyDictObject *mp, PyObject *key,
|
||||
PyObject *value, Py_hash_t hash);
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_GetItemRef_KnownHash_LockHeld(PyDictObject *op, PyObject *key, Py_hash_t hash, PyObject **result);
|
||||
extern int _PyDict_GetItemRef_KnownHash(PyDictObject *op, PyObject *key, Py_hash_t hash, PyObject **result);
|
||||
extern int _PyDict_GetItemRef_Unicode_LockHeld(PyDictObject *op, PyObject *key, PyObject **result);
|
||||
extern int _PyObjectDict_SetItem(PyTypeObject *tp, PyObject *obj, PyObject **dictptr, PyObject *name, PyObject *value);
|
||||
|
||||
extern int _PyDict_Pop_KnownHash(
|
||||
PyDictObject *dict,
|
||||
PyObject *key,
|
||||
Py_hash_t hash,
|
||||
PyObject **result);
|
||||
|
||||
extern void _PyDict_Clear_LockHeld(PyObject *op);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyAPI_FUNC(void) _PyDict_EnsureSharedOnRead(PyDictObject *mp);
|
||||
#endif
|
||||
|
||||
#define DKIX_EMPTY (-1)
|
||||
#define DKIX_DUMMY (-2) /* Used internally */
|
||||
#define DKIX_ERROR (-3)
|
||||
#define DKIX_KEY_CHANGED (-4) /* Used internally */
|
||||
|
||||
typedef enum {
|
||||
DICT_KEYS_GENERAL = 0,
|
||||
DICT_KEYS_UNICODE = 1,
|
||||
DICT_KEYS_SPLIT = 2
|
||||
} DictKeysKind;
|
||||
|
||||
/* See dictobject.c for actual layout of DictKeysObject */
|
||||
struct _dictkeysobject {
|
||||
Py_ssize_t dk_refcnt;
|
||||
|
||||
/* Size of the hash table (dk_indices). It must be a power of 2. */
|
||||
uint8_t dk_log2_size;
|
||||
|
||||
/* Size of the hash table (dk_indices) by bytes. */
|
||||
uint8_t dk_log2_index_bytes;
|
||||
|
||||
/* Kind of keys */
|
||||
uint8_t dk_kind;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* Lock used to protect shared keys */
|
||||
PyMutex dk_mutex;
|
||||
#endif
|
||||
|
||||
/* Version number -- Reset to 0 by any modification to keys */
|
||||
uint32_t dk_version;
|
||||
|
||||
/* Number of usable entries in dk_entries. */
|
||||
Py_ssize_t dk_usable;
|
||||
|
||||
/* Number of used entries in dk_entries. */
|
||||
Py_ssize_t dk_nentries;
|
||||
|
||||
|
||||
/* Actual hash table of dk_size entries. It holds indices in dk_entries,
|
||||
or DKIX_EMPTY(-1) or DKIX_DUMMY(-2).
|
||||
|
||||
Indices must be: 0 <= indice < USABLE_FRACTION(dk_size).
|
||||
|
||||
The size in bytes of an indice depends on dk_size:
|
||||
|
||||
- 1 byte if dk_size <= 0xff (char*)
|
||||
- 2 bytes if dk_size <= 0xffff (int16_t*)
|
||||
- 4 bytes if dk_size <= 0xffffffff (int32_t*)
|
||||
- 8 bytes otherwise (int64_t*)
|
||||
|
||||
Dynamically sized, SIZEOF_VOID_P is minimum. */
|
||||
char dk_indices[]; /* char is required to avoid strict aliasing. */
|
||||
|
||||
/* "PyDictKeyEntry or PyDictUnicodeEntry dk_entries[USABLE_FRACTION(DK_SIZE(dk))];" array follows:
|
||||
see the DK_ENTRIES() / DK_UNICODE_ENTRIES() functions below */
|
||||
};
|
||||
|
||||
/* This must be no more than 250, for the prefix size to fit in one byte. */
|
||||
#define SHARED_KEYS_MAX_SIZE 30
|
||||
#define NEXT_LOG2_SHARED_KEYS_MAX_SIZE 6
|
||||
|
||||
/* Layout of dict values:
|
||||
*
|
||||
* The PyObject *values are preceded by an array of bytes holding
|
||||
* the insertion order and size.
|
||||
* [-1] = prefix size. [-2] = used size. size[-2-n...] = insertion order.
|
||||
*/
|
||||
struct _dictvalues {
|
||||
uint8_t capacity;
|
||||
uint8_t size;
|
||||
uint8_t embedded;
|
||||
uint8_t valid;
|
||||
PyObject *values[1];
|
||||
};
|
||||
|
||||
#define DK_LOG_SIZE(dk) _Py_RVALUE((dk)->dk_log2_size)
|
||||
#if SIZEOF_VOID_P > 4
|
||||
#define DK_SIZE(dk) (((int64_t)1)<<DK_LOG_SIZE(dk))
|
||||
#else
|
||||
#define DK_SIZE(dk) (1<<DK_LOG_SIZE(dk))
|
||||
#endif
|
||||
|
||||
static inline void* _DK_ENTRIES(PyDictKeysObject *dk) {
|
||||
int8_t *indices = (int8_t*)(dk->dk_indices);
|
||||
size_t index = (size_t)1 << dk->dk_log2_index_bytes;
|
||||
return (&indices[index]);
|
||||
}
|
||||
|
||||
static inline PyDictKeyEntry* DK_ENTRIES(PyDictKeysObject *dk) {
|
||||
assert(dk->dk_kind == DICT_KEYS_GENERAL);
|
||||
return (PyDictKeyEntry*)_DK_ENTRIES(dk);
|
||||
}
|
||||
static inline PyDictUnicodeEntry* DK_UNICODE_ENTRIES(PyDictKeysObject *dk) {
|
||||
assert(dk->dk_kind != DICT_KEYS_GENERAL);
|
||||
return (PyDictUnicodeEntry*)_DK_ENTRIES(dk);
|
||||
}
|
||||
|
||||
#define DK_IS_UNICODE(dk) ((dk)->dk_kind != DICT_KEYS_GENERAL)
|
||||
|
||||
#define DICT_VERSION_INCREMENT (1 << (DICT_MAX_WATCHERS + DICT_WATCHED_MUTATION_BITS))
|
||||
#define DICT_WATCHER_MASK ((1 << DICT_MAX_WATCHERS) - 1)
|
||||
#define DICT_WATCHER_AND_MODIFICATION_MASK ((1 << (DICT_MAX_WATCHERS + DICT_WATCHED_MUTATION_BITS)) - 1)
|
||||
#define DICT_UNIQUE_ID_SHIFT (32)
|
||||
#define DICT_UNIQUE_ID_MAX ((UINT64_C(1) << (64 - DICT_UNIQUE_ID_SHIFT)) - 1)
|
||||
|
||||
|
||||
PyAPI_FUNC(void)
|
||||
_PyDict_SendEvent(int watcher_bits,
|
||||
PyDict_WatchEvent event,
|
||||
PyDictObject *mp,
|
||||
PyObject *key,
|
||||
PyObject *value);
|
||||
|
||||
static inline void
|
||||
_PyDict_NotifyEvent(PyInterpreterState *interp,
|
||||
PyDict_WatchEvent event,
|
||||
PyDictObject *mp,
|
||||
PyObject *key,
|
||||
PyObject *value)
|
||||
{
|
||||
assert(Py_REFCNT((PyObject*)mp) > 0);
|
||||
int watcher_bits = mp->_ma_watcher_tag & DICT_WATCHER_MASK;
|
||||
if (watcher_bits) {
|
||||
RARE_EVENT_STAT_INC(watched_dict_modification);
|
||||
_PyDict_SendEvent(watcher_bits, event, mp, key, value);
|
||||
}
|
||||
}
|
||||
|
||||
extern PyDictObject *_PyObject_MaterializeManagedDict(PyObject *obj);
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyDict_FromItems(
|
||||
PyObject *const *keys, Py_ssize_t keys_offset,
|
||||
PyObject *const *values, Py_ssize_t values_offset,
|
||||
Py_ssize_t length);
|
||||
|
||||
static inline uint8_t *
|
||||
get_insertion_order_array(PyDictValues *values)
|
||||
{
|
||||
return (uint8_t *)&values->values[values->capacity];
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyDictValues_AddToInsertionOrder(PyDictValues *values, Py_ssize_t ix)
|
||||
{
|
||||
assert(ix < SHARED_KEYS_MAX_SIZE);
|
||||
int size = values->size;
|
||||
uint8_t *array = get_insertion_order_array(values);
|
||||
assert(size < values->capacity);
|
||||
assert(((uint8_t)ix) == ix);
|
||||
array[size] = (uint8_t)ix;
|
||||
values->size = size+1;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
shared_keys_usable_size(PyDictKeysObject *keys)
|
||||
{
|
||||
// dk_usable will decrease for each instance that is created and each
|
||||
// value that is added. dk_nentries will increase for each value that
|
||||
// is added. We want to always return the right value or larger.
|
||||
// We therefore increase dk_nentries first and we decrease dk_usable
|
||||
// second, and conversely here we read dk_usable first and dk_entries
|
||||
// second (to avoid the case where we read entries before the increment
|
||||
// and read usable after the decrement)
|
||||
Py_ssize_t dk_usable = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(keys->dk_usable);
|
||||
Py_ssize_t dk_nentries = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(keys->dk_nentries);
|
||||
return dk_nentries + dk_usable;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
_PyInlineValuesSize(PyTypeObject *tp)
|
||||
{
|
||||
PyDictKeysObject *keys = ((PyHeapTypeObject*)tp)->ht_cached_keys;
|
||||
assert(keys != NULL);
|
||||
size_t size = shared_keys_usable_size(keys);
|
||||
size_t prefix_size = _Py_SIZE_ROUND_UP(size, sizeof(PyObject *));
|
||||
assert(prefix_size < 256);
|
||||
return prefix_size + (size + 1) * sizeof(PyObject *);
|
||||
}
|
||||
|
||||
int
|
||||
_PyDict_DetachFromObject(PyDictObject *dict, PyObject *obj);
|
||||
|
||||
// Enables per-thread ref counting on this dict in the free threading build
|
||||
extern void _PyDict_EnablePerThreadRefcounting(PyObject *op);
|
||||
|
||||
PyDictObject *_PyObject_MaterializeManagedDict_LockHeld(PyObject *);
|
||||
|
||||
// See `_Py_INCREF_TYPE()` in pycore_object.h
|
||||
#ifndef Py_GIL_DISABLED
|
||||
# define _Py_INCREF_DICT Py_INCREF
|
||||
# define _Py_DECREF_DICT Py_DECREF
|
||||
# define _Py_INCREF_BUILTINS Py_INCREF
|
||||
# define _Py_DECREF_BUILTINS Py_DECREF
|
||||
#else
|
||||
static inline Py_ssize_t
|
||||
_PyDict_UniqueId(PyDictObject *mp)
|
||||
{
|
||||
return (Py_ssize_t)(mp->_ma_watcher_tag >> DICT_UNIQUE_ID_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_INCREF_DICT(PyObject *op)
|
||||
{
|
||||
assert(PyDict_Check(op));
|
||||
Py_ssize_t id = _PyDict_UniqueId((PyDictObject *)op);
|
||||
_Py_THREAD_INCREF_OBJECT(op, id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_DECREF_DICT(PyObject *op)
|
||||
{
|
||||
assert(PyDict_Check(op));
|
||||
Py_ssize_t id = _PyDict_UniqueId((PyDictObject *)op);
|
||||
_Py_THREAD_DECREF_OBJECT(op, id);
|
||||
}
|
||||
|
||||
// Like `_Py_INCREF_DICT`, but also handles non-dict objects because builtins
|
||||
// may not be a dict.
|
||||
static inline void
|
||||
_Py_INCREF_BUILTINS(PyObject *op)
|
||||
{
|
||||
if (PyDict_CheckExact(op)) {
|
||||
_Py_INCREF_DICT(op);
|
||||
}
|
||||
else {
|
||||
Py_INCREF(op);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_DECREF_BUILTINS(PyObject *op)
|
||||
{
|
||||
if (PyDict_CheckExact(op)) {
|
||||
_Py_DECREF_DICT(op);
|
||||
}
|
||||
else {
|
||||
Py_DECREF(op);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DICT_H */
|
||||
28
extern/include/python/internal/pycore_dict_state.h
vendored
Normal file
28
extern/include/python/internal/pycore_dict_state.h
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef Py_INTERNAL_DICT_STATE_H
|
||||
#define Py_INTERNAL_DICT_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#define DICT_MAX_WATCHERS 8
|
||||
#define DICT_WATCHED_MUTATION_BITS 4
|
||||
|
||||
struct _Py_dict_state {
|
||||
uint32_t next_keys_version;
|
||||
PyDict_WatchCallback watchers[DICT_MAX_WATCHERS];
|
||||
};
|
||||
|
||||
#define _dict_state_INIT \
|
||||
{ \
|
||||
.next_keys_version = 2, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DICT_STATE_H */
|
||||
40
extern/include/python/internal/pycore_dtoa.h
vendored
Normal file
40
extern/include/python/internal/pycore_dtoa.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
#ifndef Py_INTERNAL_DTOA_H
|
||||
#define Py_INTERNAL_DTOA_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR
|
||||
|
||||
|
||||
#if defined(Py_USING_MEMORY_DEBUGGER) || _PY_SHORT_FLOAT_REPR == 0
|
||||
|
||||
#define _dtoa_state_INIT(INTERP) \
|
||||
{0}
|
||||
|
||||
#else
|
||||
|
||||
#define _dtoa_state_INIT(INTERP) \
|
||||
{ \
|
||||
.preallocated_next = (INTERP)->dtoa.preallocated, \
|
||||
}
|
||||
#endif
|
||||
|
||||
extern double _Py_dg_strtod(const char *str, char **ptr);
|
||||
extern char* _Py_dg_dtoa(double d, int mode, int ndigits,
|
||||
int *decpt, int *sign, char **rve);
|
||||
extern void _Py_dg_freedtoa(char *s);
|
||||
|
||||
|
||||
extern PyStatus _PyDtoa_Init(PyInterpreterState *interp);
|
||||
extern void _PyDtoa_Fini(PyInterpreterState *interp);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DTOA_H */
|
||||
30
extern/include/python/internal/pycore_emscripten_signal.h
vendored
Normal file
30
extern/include/python/internal/pycore_emscripten_signal.h
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
#ifndef Py_EMSCRIPTEN_SIGNAL_H
|
||||
#define Py_EMSCRIPTEN_SIGNAL_H
|
||||
|
||||
#if defined(__EMSCRIPTEN__)
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
void
|
||||
_Py_CheckEmscriptenSignals(void);
|
||||
|
||||
void
|
||||
_Py_CheckEmscriptenSignalsPeriodically(void);
|
||||
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS() _Py_CheckEmscriptenSignals()
|
||||
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY() _Py_CheckEmscriptenSignalsPeriodically()
|
||||
|
||||
extern int Py_EMSCRIPTEN_SIGNAL_HANDLING;
|
||||
extern int _Py_emscripten_signal_clock;
|
||||
|
||||
#else
|
||||
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS()
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY()
|
||||
|
||||
#endif // defined(__EMSCRIPTEN__)
|
||||
|
||||
#endif // ndef Py_EMSCRIPTEN_SIGNAL_H
|
||||
70
extern/include/python/internal/pycore_emscripten_trampoline.h
vendored
Normal file
70
extern/include/python/internal/pycore_emscripten_trampoline.h
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
#ifndef Py_EMSCRIPTEN_TRAMPOLINE_H
|
||||
#define Py_EMSCRIPTEN_TRAMPOLINE_H
|
||||
|
||||
#include "pycore_typedefs.h" // _PyRuntimeState
|
||||
|
||||
/**
|
||||
* C function call trampolines to mitigate bad function pointer casts.
|
||||
*
|
||||
* Section 6.3.2.3, paragraph 8 reads:
|
||||
*
|
||||
* A pointer to a function of one type may be converted to a pointer to a
|
||||
* function of another type and back again; the result shall compare equal to
|
||||
* the original pointer. If a converted pointer is used to call a function
|
||||
* whose type is not compatible with the pointed-to type, the behavior is
|
||||
* undefined.
|
||||
*
|
||||
* Typical native ABIs ignore additional arguments or fill in missing values
|
||||
* with 0/NULL in function pointer cast. Compilers do not show warnings when a
|
||||
* function pointer is explicitly casted to an incompatible type.
|
||||
*
|
||||
* Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict
|
||||
* function signature checks. Argument count, types, and return type must match.
|
||||
*
|
||||
* Third party code unintentionally rely on problematic fpcasts. The call
|
||||
* trampoline mitigates common occurrences of bad fpcasts on Emscripten.
|
||||
*/
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
|
||||
|
||||
void
|
||||
_Py_EmscriptenTrampoline_Init(_PyRuntimeState *runtime);
|
||||
|
||||
PyObject*
|
||||
_PyEM_TrampolineCall(PyCFunctionWithKeywords func,
|
||||
PyObject* self,
|
||||
PyObject* args,
|
||||
PyObject* kw);
|
||||
|
||||
#define _PyCFunction_TrampolineCall(meth, self, args) \
|
||||
_PyEM_TrampolineCall(*_PyCFunctionWithKeywords_CAST(meth), (self), (args), NULL)
|
||||
|
||||
#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
|
||||
_PyEM_TrampolineCall((meth), (self), (args), (kw))
|
||||
|
||||
#define descr_set_trampoline_call(set, obj, value, closure) \
|
||||
((int)_PyEM_TrampolineCall(_PyCFunctionWithKeywords_CAST(set), (obj), \
|
||||
(value), (PyObject*)(closure)))
|
||||
|
||||
#define descr_get_trampoline_call(get, obj, closure) \
|
||||
_PyEM_TrampolineCall(_PyCFunctionWithKeywords_CAST(get), (obj), \
|
||||
(PyObject*)(closure), NULL)
|
||||
|
||||
|
||||
#else // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
|
||||
|
||||
#define _PyCFunction_TrampolineCall(meth, self, args) \
|
||||
(meth)((self), (args))
|
||||
|
||||
#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
|
||||
(meth)((self), (args), (kw))
|
||||
|
||||
#define descr_set_trampoline_call(set, obj, value, closure) \
|
||||
(set)((obj), (value), (closure))
|
||||
|
||||
#define descr_get_trampoline_call(get, obj, closure) \
|
||||
(get)((obj), (closure))
|
||||
|
||||
#endif // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
|
||||
|
||||
#endif // ndef Py_EMSCRIPTEN_SIGNAL_H
|
||||
40
extern/include/python/internal/pycore_exceptions.h
vendored
Normal file
40
extern/include/python/internal/pycore_exceptions.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
#ifndef Py_INTERNAL_EXCEPTIONS_H
|
||||
#define Py_INTERNAL_EXCEPTIONS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern PyStatus _PyExc_InitState(PyInterpreterState *);
|
||||
extern PyStatus _PyExc_InitGlobalObjects(PyInterpreterState *);
|
||||
extern int _PyExc_InitTypes(PyInterpreterState *);
|
||||
extern void _PyExc_Fini(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
struct _Py_exc_state {
|
||||
// The dict mapping from errno codes to OSError subclasses
|
||||
PyObject *errnomap;
|
||||
PyBaseExceptionObject *memerrors_freelist;
|
||||
int memerrors_numfree;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyMutex memerrors_lock;
|
||||
#endif
|
||||
// The ExceptionGroup type
|
||||
PyObject *PyExc_ExceptionGroup;
|
||||
};
|
||||
|
||||
extern void _PyExc_ClearExceptionGroupType(PyInterpreterState *);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_EXCEPTIONS_H */
|
||||
100
extern/include/python/internal/pycore_faulthandler.h
vendored
Normal file
100
extern/include/python/internal/pycore_faulthandler.h
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
#ifndef Py_INTERNAL_FAULTHANDLER_H
|
||||
#define Py_INTERNAL_FAULTHANDLER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SIGACTION
|
||||
# include <signal.h> // sigaction
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef MS_WINDOWS
|
||||
/* register() is useless on Windows, because only SIGSEGV, SIGABRT and
|
||||
SIGILL can be handled by the process, and these signals can only be used
|
||||
with enable(), not using register() */
|
||||
# define FAULTHANDLER_USER
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_SIGACTION
|
||||
/* Using an alternative stack requires sigaltstack()
|
||||
and sigaction() SA_ONSTACK */
|
||||
# ifdef HAVE_SIGALTSTACK
|
||||
# define FAULTHANDLER_USE_ALT_STACK
|
||||
# endif
|
||||
typedef struct sigaction _Py_sighandler_t;
|
||||
#else
|
||||
typedef PyOS_sighandler_t _Py_sighandler_t;
|
||||
#endif // HAVE_SIGACTION
|
||||
|
||||
|
||||
#ifdef FAULTHANDLER_USER
|
||||
struct faulthandler_user_signal {
|
||||
int enabled;
|
||||
PyObject *file;
|
||||
int fd;
|
||||
int all_threads;
|
||||
int chain;
|
||||
_Py_sighandler_t previous;
|
||||
PyInterpreterState *interp;
|
||||
};
|
||||
#endif /* FAULTHANDLER_USER */
|
||||
|
||||
|
||||
struct _faulthandler_runtime_state {
|
||||
struct {
|
||||
int enabled;
|
||||
PyObject *file;
|
||||
int fd;
|
||||
int all_threads;
|
||||
PyInterpreterState *interp;
|
||||
#ifdef MS_WINDOWS
|
||||
void *exc_handler;
|
||||
#endif
|
||||
int c_stack;
|
||||
} fatal_error;
|
||||
|
||||
struct {
|
||||
PyObject *file;
|
||||
int fd;
|
||||
PY_TIMEOUT_T timeout_us; /* timeout in microseconds */
|
||||
int repeat;
|
||||
PyInterpreterState *interp;
|
||||
int exit;
|
||||
char *header;
|
||||
size_t header_len;
|
||||
/* The main thread always holds this lock. It is only released when
|
||||
faulthandler_thread() is interrupted before this thread exits, or at
|
||||
Python exit. */
|
||||
PyThread_type_lock cancel_event;
|
||||
/* released by child thread when joined */
|
||||
PyThread_type_lock running;
|
||||
} thread;
|
||||
|
||||
#ifdef FAULTHANDLER_USER
|
||||
struct faulthandler_user_signal *user_signals;
|
||||
#endif
|
||||
|
||||
#ifdef FAULTHANDLER_USE_ALT_STACK
|
||||
stack_t stack;
|
||||
stack_t old_stack;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define _faulthandler_runtime_state_INIT \
|
||||
{ \
|
||||
.fatal_error = { \
|
||||
.fd = -1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FAULTHANDLER_H */
|
||||
320
extern/include/python/internal/pycore_fileutils.h
vendored
Normal file
320
extern/include/python/internal/pycore_fileutils.h
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
#ifndef Py_INTERNAL_FILEUTILS_H
|
||||
#define Py_INTERNAL_FILEUTILS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <locale.h> // struct lconv
|
||||
#include "pycore_interp_structs.h" // _Py_error_handler
|
||||
|
||||
|
||||
/* A routine to check if a file descriptor can be select()-ed. */
|
||||
#ifdef _MSC_VER
|
||||
/* On Windows, any socket fd can be select()-ed, no matter how high */
|
||||
#define _PyIsSelectable_fd(FD) (1)
|
||||
#else
|
||||
#define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE)
|
||||
#endif
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(int) _Py_DecodeLocaleEx(
|
||||
const char *arg,
|
||||
wchar_t **wstr,
|
||||
size_t *wlen,
|
||||
const char **reason,
|
||||
int current_locale,
|
||||
_Py_error_handler errors);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(int) _Py_EncodeLocaleEx(
|
||||
const wchar_t *text,
|
||||
char **str,
|
||||
size_t *error_pos,
|
||||
const char **reason,
|
||||
int current_locale,
|
||||
_Py_error_handler errors);
|
||||
|
||||
extern char* _Py_EncodeLocaleRaw(
|
||||
const wchar_t *text,
|
||||
size_t *error_pos);
|
||||
|
||||
extern PyObject* _Py_device_encoding(int);
|
||||
|
||||
#if defined(MS_WINDOWS) || defined(__APPLE__)
|
||||
/* On Windows, the count parameter of read() is an int (bpo-9015, bpo-9611).
|
||||
On macOS 10.13, read() and write() with more than INT_MAX bytes
|
||||
fail with EINVAL (bpo-24658). */
|
||||
# define _PY_READ_MAX INT_MAX
|
||||
# define _PY_WRITE_MAX INT_MAX
|
||||
#else
|
||||
/* write() should truncate the input to PY_SSIZE_T_MAX bytes,
|
||||
but it's safer to do it ourself to have a portable behaviour */
|
||||
# define _PY_READ_MAX PY_SSIZE_T_MAX
|
||||
# define _PY_WRITE_MAX PY_SSIZE_T_MAX
|
||||
#endif
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
struct _Py_stat_struct {
|
||||
uint64_t st_dev;
|
||||
uint64_t st_ino;
|
||||
unsigned short st_mode;
|
||||
int st_nlink;
|
||||
int st_uid;
|
||||
int st_gid;
|
||||
unsigned long st_rdev;
|
||||
__int64 st_size;
|
||||
time_t st_atime;
|
||||
int st_atime_nsec;
|
||||
time_t st_mtime;
|
||||
int st_mtime_nsec;
|
||||
time_t st_ctime;
|
||||
int st_ctime_nsec;
|
||||
time_t st_birthtime;
|
||||
int st_birthtime_nsec;
|
||||
unsigned long st_file_attributes;
|
||||
unsigned long st_reparse_tag;
|
||||
uint64_t st_ino_high;
|
||||
};
|
||||
#else
|
||||
# define _Py_stat_struct stat
|
||||
#endif
|
||||
|
||||
// Export for 'mmap' shared extension
|
||||
PyAPI_FUNC(int) _Py_fstat(
|
||||
int fd,
|
||||
struct _Py_stat_struct *status);
|
||||
|
||||
// Export for 'mmap' shared extension
|
||||
PyAPI_FUNC(int) _Py_fstat_noraise(
|
||||
int fd,
|
||||
struct _Py_stat_struct *status);
|
||||
|
||||
// Export for '_tkinter' shared extension
|
||||
PyAPI_FUNC(int) _Py_stat(
|
||||
PyObject *path,
|
||||
struct stat *status);
|
||||
|
||||
// Export for 'select' shared extension (Solaris newDevPollObject())
|
||||
PyAPI_FUNC(int) _Py_open(
|
||||
const char *pathname,
|
||||
int flags);
|
||||
|
||||
// Export for '_posixsubprocess' shared extension
|
||||
PyAPI_FUNC(int) _Py_open_noraise(
|
||||
const char *pathname,
|
||||
int flags);
|
||||
|
||||
extern FILE* _Py_wfopen(
|
||||
const wchar_t *path,
|
||||
const wchar_t *mode);
|
||||
|
||||
extern Py_ssize_t _Py_read(
|
||||
int fd,
|
||||
void *buf,
|
||||
size_t count);
|
||||
|
||||
// Export for 'select' shared extension (Solaris devpoll_flush())
|
||||
PyAPI_FUNC(Py_ssize_t) _Py_write(
|
||||
int fd,
|
||||
const void *buf,
|
||||
size_t count);
|
||||
|
||||
// Export for '_posixsubprocess' shared extension
|
||||
PyAPI_FUNC(Py_ssize_t) _Py_write_noraise(
|
||||
int fd,
|
||||
const void *buf,
|
||||
size_t count);
|
||||
|
||||
#ifdef HAVE_READLINK
|
||||
extern int _Py_wreadlink(
|
||||
const wchar_t *path,
|
||||
wchar_t *buf,
|
||||
/* Number of characters of 'buf' buffer
|
||||
including the trailing NUL character */
|
||||
size_t buflen);
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_REALPATH
|
||||
extern wchar_t* _Py_wrealpath(
|
||||
const wchar_t *path,
|
||||
wchar_t *resolved_path,
|
||||
/* Number of characters of 'resolved_path' buffer
|
||||
including the trailing NUL character */
|
||||
size_t resolved_path_len);
|
||||
#endif
|
||||
|
||||
extern wchar_t* _Py_wgetcwd(
|
||||
wchar_t *buf,
|
||||
/* Number of characters of 'buf' buffer
|
||||
including the trailing NUL character */
|
||||
size_t buflen);
|
||||
|
||||
extern int _Py_get_inheritable(int fd);
|
||||
|
||||
// Export for '_socket' shared extension
|
||||
PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable,
|
||||
int *atomic_flag_works);
|
||||
|
||||
// Export for '_posixsubprocess' shared extension
|
||||
PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable,
|
||||
int *atomic_flag_works);
|
||||
|
||||
// Export for '_socket' shared extension
|
||||
PyAPI_FUNC(int) _Py_dup(int fd);
|
||||
|
||||
extern int _Py_get_blocking(int fd);
|
||||
|
||||
extern int _Py_set_blocking(int fd, int blocking);
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
extern void* _Py_get_osfhandle_noraise(int fd);
|
||||
|
||||
// Export for '_testconsole' shared extension
|
||||
PyAPI_FUNC(void*) _Py_get_osfhandle(int fd);
|
||||
|
||||
extern int _Py_open_osfhandle_noraise(void *handle, int flags);
|
||||
|
||||
extern int _Py_open_osfhandle(void *handle, int flags);
|
||||
#endif /* MS_WINDOWS */
|
||||
|
||||
// This is used after getting NULL back from Py_DecodeLocale().
|
||||
#define DECODE_LOCALE_ERR(NAME, LEN) \
|
||||
((LEN) == (size_t)-2) \
|
||||
? _PyStatus_ERR("cannot decode " NAME) \
|
||||
: _PyStatus_NO_MEMORY()
|
||||
|
||||
extern int _Py_HasFileSystemDefaultEncodeErrors;
|
||||
|
||||
extern int _Py_DecodeUTF8Ex(
|
||||
const char *arg,
|
||||
Py_ssize_t arglen,
|
||||
wchar_t **wstr,
|
||||
size_t *wlen,
|
||||
const char **reason,
|
||||
_Py_error_handler errors);
|
||||
|
||||
extern int _Py_EncodeUTF8Ex(
|
||||
const wchar_t *text,
|
||||
char **str,
|
||||
size_t *error_pos,
|
||||
const char **reason,
|
||||
int raw_malloc,
|
||||
_Py_error_handler errors);
|
||||
|
||||
extern wchar_t* _Py_DecodeUTF8_surrogateescape(
|
||||
const char *arg,
|
||||
Py_ssize_t arglen,
|
||||
size_t *wlen);
|
||||
|
||||
extern int
|
||||
_Py_wstat(const wchar_t *, struct stat *);
|
||||
|
||||
extern int _Py_GetForceASCII(void);
|
||||
|
||||
/* Reset "force ASCII" mode (if it was initialized).
|
||||
|
||||
This function should be called when Python changes the LC_CTYPE locale,
|
||||
so the "force ASCII" mode can be detected again on the new locale
|
||||
encoding. */
|
||||
extern void _Py_ResetForceASCII(void);
|
||||
|
||||
|
||||
extern int _Py_GetLocaleconvNumeric(
|
||||
struct lconv *lc,
|
||||
PyObject **decimal_point,
|
||||
PyObject **thousands_sep);
|
||||
|
||||
// Export for '_posixsubprocess' (on macOS)
|
||||
PyAPI_FUNC(void) _Py_closerange(int first, int last);
|
||||
|
||||
extern wchar_t* _Py_GetLocaleEncoding(void);
|
||||
extern PyObject* _Py_GetLocaleEncodingObject(void);
|
||||
|
||||
#ifdef HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION
|
||||
extern int _Py_LocaleUsesNonUnicodeWchar(void);
|
||||
|
||||
extern wchar_t* _Py_DecodeNonUnicodeWchar(
|
||||
const wchar_t* native,
|
||||
Py_ssize_t size);
|
||||
|
||||
extern int _Py_EncodeNonUnicodeWchar_InPlace(
|
||||
wchar_t* unicode,
|
||||
Py_ssize_t size);
|
||||
#endif
|
||||
|
||||
extern int _Py_isabs(const wchar_t *path);
|
||||
extern int _Py_abspath(const wchar_t *path, wchar_t **abspath_p);
|
||||
#ifdef MS_WINDOWS
|
||||
extern int _PyOS_getfullpathname(const wchar_t *path, wchar_t **abspath_p);
|
||||
#endif
|
||||
extern wchar_t* _Py_join_relfile(const wchar_t *dirname,
|
||||
const wchar_t *relfile);
|
||||
extern int _Py_add_relfile(wchar_t *dirname,
|
||||
const wchar_t *relfile,
|
||||
size_t bufsize);
|
||||
extern size_t _Py_find_basename(const wchar_t *filename);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(wchar_t*) _Py_normpath(wchar_t *path, Py_ssize_t size);
|
||||
|
||||
extern wchar_t *_Py_normpath_and_size(wchar_t *path, Py_ssize_t size, Py_ssize_t *length);
|
||||
|
||||
// The Windows Games API family does not provide these functions
|
||||
// so provide our own implementations. Remove them in case they get added
|
||||
// to the Games API family
|
||||
#if defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP)
|
||||
#include <winerror.h> // HRESULT
|
||||
|
||||
extern HRESULT PathCchSkipRoot(const wchar_t *pszPath, const wchar_t **ppszRootEnd);
|
||||
#endif /* defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP) */
|
||||
|
||||
extern void _Py_skiproot(const wchar_t *path, Py_ssize_t size, Py_ssize_t *drvsize, Py_ssize_t *rootsize);
|
||||
|
||||
// Macros to protect CRT calls against instant termination when passed an
|
||||
// invalid parameter (bpo-23524). IPH stands for Invalid Parameter Handler.
|
||||
// Usage:
|
||||
//
|
||||
// _Py_BEGIN_SUPPRESS_IPH
|
||||
// ...
|
||||
// _Py_END_SUPPRESS_IPH
|
||||
#if defined _MSC_VER && _MSC_VER >= 1900
|
||||
|
||||
# include <stdlib.h> // _set_thread_local_invalid_parameter_handler()
|
||||
|
||||
extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
|
||||
# define _Py_BEGIN_SUPPRESS_IPH \
|
||||
{ _invalid_parameter_handler _Py_old_handler = \
|
||||
_set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
|
||||
# define _Py_END_SUPPRESS_IPH \
|
||||
_set_thread_local_invalid_parameter_handler(_Py_old_handler); }
|
||||
#else
|
||||
# define _Py_BEGIN_SUPPRESS_IPH
|
||||
# define _Py_END_SUPPRESS_IPH
|
||||
#endif /* _MSC_VER >= 1900 */
|
||||
|
||||
// Export for 'select' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_FileDescriptor_Converter(PyObject *, void *);
|
||||
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(char*) _Py_UniversalNewlineFgetsWithSize(char *, int, FILE*, PyObject *, size_t*);
|
||||
|
||||
extern int _PyFile_Flush(PyObject *);
|
||||
|
||||
#ifndef MS_WINDOWS
|
||||
extern int _Py_GetTicksPerSecond(long *ticks_per_second);
|
||||
#endif
|
||||
|
||||
// Export for '_testcapi' shared extension
|
||||
PyAPI_FUNC(int) _Py_IsValidFD(int fd);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FILEUTILS_H */
|
||||
98
extern/include/python/internal/pycore_fileutils_windows.h
vendored
Normal file
98
extern/include/python/internal/pycore_fileutils_windows.h
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
#ifndef Py_INTERNAL_FILEUTILS_WINDOWS_H
|
||||
#define Py_INTERNAL_FILEUTILS_WINDOWS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
|
||||
#if !defined(NTDDI_WIN10_NI) || !(NTDDI_VERSION >= NTDDI_WIN10_NI)
|
||||
typedef struct _FILE_STAT_BASIC_INFORMATION {
|
||||
LARGE_INTEGER FileId;
|
||||
LARGE_INTEGER CreationTime;
|
||||
LARGE_INTEGER LastAccessTime;
|
||||
LARGE_INTEGER LastWriteTime;
|
||||
LARGE_INTEGER ChangeTime;
|
||||
LARGE_INTEGER AllocationSize;
|
||||
LARGE_INTEGER EndOfFile;
|
||||
ULONG FileAttributes;
|
||||
ULONG ReparseTag;
|
||||
ULONG NumberOfLinks;
|
||||
ULONG DeviceType;
|
||||
ULONG DeviceCharacteristics;
|
||||
ULONG Reserved;
|
||||
LARGE_INTEGER VolumeSerialNumber;
|
||||
FILE_ID_128 FileId128;
|
||||
} FILE_STAT_BASIC_INFORMATION;
|
||||
|
||||
typedef enum _FILE_INFO_BY_NAME_CLASS {
|
||||
FileStatByNameInfo,
|
||||
FileStatLxByNameInfo,
|
||||
FileCaseSensitiveByNameInfo,
|
||||
FileStatBasicByNameInfo,
|
||||
MaximumFileInfoByNameClass
|
||||
} FILE_INFO_BY_NAME_CLASS;
|
||||
#endif
|
||||
|
||||
typedef BOOL (WINAPI *PGetFileInformationByName)(
|
||||
PCWSTR FileName,
|
||||
FILE_INFO_BY_NAME_CLASS FileInformationClass,
|
||||
PVOID FileInfoBuffer,
|
||||
ULONG FileInfoBufferSize
|
||||
);
|
||||
|
||||
static inline BOOL _Py_GetFileInformationByName(
|
||||
PCWSTR FileName,
|
||||
FILE_INFO_BY_NAME_CLASS FileInformationClass,
|
||||
PVOID FileInfoBuffer,
|
||||
ULONG FileInfoBufferSize
|
||||
) {
|
||||
static PGetFileInformationByName GetFileInformationByName = NULL;
|
||||
static int GetFileInformationByName_init = -1;
|
||||
|
||||
if (GetFileInformationByName_init < 0) {
|
||||
HMODULE hMod = LoadLibraryW(L"api-ms-win-core-file-l2-1-4");
|
||||
GetFileInformationByName_init = 0;
|
||||
if (hMod) {
|
||||
GetFileInformationByName = (PGetFileInformationByName)GetProcAddress(
|
||||
hMod, "GetFileInformationByName");
|
||||
if (GetFileInformationByName) {
|
||||
GetFileInformationByName_init = 1;
|
||||
} else {
|
||||
FreeLibrary(hMod);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (GetFileInformationByName_init <= 0) {
|
||||
SetLastError(ERROR_NOT_SUPPORTED);
|
||||
return FALSE;
|
||||
}
|
||||
return GetFileInformationByName(FileName, FileInformationClass, FileInfoBuffer, FileInfoBufferSize);
|
||||
}
|
||||
|
||||
static inline BOOL _Py_GetFileInformationByName_ErrorIsTrustworthy(int error)
|
||||
{
|
||||
switch(error) {
|
||||
case ERROR_FILE_NOT_FOUND:
|
||||
case ERROR_PATH_NOT_FOUND:
|
||||
case ERROR_NOT_READY:
|
||||
case ERROR_BAD_NET_NAME:
|
||||
case ERROR_BAD_NETPATH:
|
||||
case ERROR_BAD_PATHNAME:
|
||||
case ERROR_INVALID_NAME:
|
||||
case ERROR_FILENAME_EXCED_RANGE:
|
||||
return TRUE;
|
||||
case ERROR_NOT_SUPPORTED:
|
||||
return FALSE;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
49
extern/include/python/internal/pycore_floatobject.h
vendored
Normal file
49
extern/include/python/internal/pycore_floatobject.h
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
#ifndef Py_INTERNAL_FLOATOBJECT_H
|
||||
#define Py_INTERNAL_FLOATOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_unicodeobject.h" // _PyUnicodeWriter
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern void _PyFloat_InitState(PyInterpreterState *);
|
||||
extern PyStatus _PyFloat_InitTypes(PyInterpreterState *);
|
||||
extern void _PyFloat_FiniType(PyInterpreterState *);
|
||||
|
||||
|
||||
|
||||
|
||||
PyAPI_FUNC(void) _PyFloat_ExactDealloc(PyObject *op);
|
||||
|
||||
|
||||
extern void _PyFloat_DebugMallocStats(FILE* out);
|
||||
|
||||
|
||||
/* Format the object based on the format_spec, as defined in PEP 3101
|
||||
(Advanced String Formatting). */
|
||||
extern int _PyFloat_FormatAdvancedWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
PyObject *format_spec,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
extern PyObject* _Py_string_to_number_with_underscores(
|
||||
const char *str, Py_ssize_t len, const char *what, PyObject *obj, void *arg,
|
||||
PyObject *(*innerfunc)(const char *, Py_ssize_t, void *));
|
||||
|
||||
extern double _Py_parse_inf_or_nan(const char *p, char **endptr);
|
||||
|
||||
extern int _Py_convert_int_to_double(PyObject **v, double *dbl);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FLOATOBJECT_H */
|
||||
47
extern/include/python/internal/pycore_flowgraph.h
vendored
Normal file
47
extern/include/python/internal/pycore_flowgraph.h
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
#ifndef Py_INTERNAL_CFG_H
|
||||
#define Py_INTERNAL_CFG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_compile.h"
|
||||
#include "pycore_instruction_sequence.h"
|
||||
#include "pycore_opcode_utils.h"
|
||||
|
||||
struct _PyCfgBuilder;
|
||||
|
||||
int _PyCfgBuilder_UseLabel(struct _PyCfgBuilder *g, _PyJumpTargetLabel lbl);
|
||||
int _PyCfgBuilder_Addop(struct _PyCfgBuilder *g, int opcode, int oparg, _Py_SourceLocation loc);
|
||||
|
||||
struct _PyCfgBuilder* _PyCfgBuilder_New(void);
|
||||
void _PyCfgBuilder_Free(struct _PyCfgBuilder *g);
|
||||
int _PyCfgBuilder_CheckSize(struct _PyCfgBuilder* g);
|
||||
|
||||
int _PyCfg_OptimizeCodeUnit(struct _PyCfgBuilder *g, PyObject *consts, PyObject *const_cache,
|
||||
int nlocals, int nparams, int firstlineno);
|
||||
|
||||
struct _PyCfgBuilder* _PyCfg_FromInstructionSequence(_PyInstructionSequence *seq);
|
||||
int _PyCfg_ToInstructionSequence(struct _PyCfgBuilder *g, _PyInstructionSequence *seq);
|
||||
int _PyCfg_OptimizedCfgToInstructionSequence(struct _PyCfgBuilder *g, _PyCompile_CodeUnitMetadata *umd,
|
||||
int code_flags, int *stackdepth, int *nlocalsplus,
|
||||
_PyInstructionSequence *seq);
|
||||
|
||||
PyCodeObject *
|
||||
_PyAssemble_MakeCodeObject(_PyCompile_CodeUnitMetadata *u, PyObject *const_cache,
|
||||
PyObject *consts, int maxdepth, _PyInstructionSequence *instrs,
|
||||
int nlocalsplus, int code_flags, PyObject *filename);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_OptimizeCfg(
|
||||
PyObject *instructions,
|
||||
PyObject *consts,
|
||||
int nlocals);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CFG_H */
|
||||
27
extern/include/python/internal/pycore_format.h
vendored
Normal file
27
extern/include/python/internal/pycore_format.h
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
#ifndef Py_INTERNAL_FORMAT_H
|
||||
#define Py_INTERNAL_FORMAT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Format codes
|
||||
* F_LJUST '-'
|
||||
* F_SIGN '+'
|
||||
* F_BLANK ' '
|
||||
* F_ALT '#'
|
||||
* F_ZERO '0'
|
||||
*/
|
||||
#define F_LJUST (1<<0)
|
||||
#define F_SIGN (1<<1)
|
||||
#define F_BLANK (1<<2)
|
||||
#define F_ALT (1<<3)
|
||||
#define F_ZERO (1<<4)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FORMAT_H */
|
||||
61
extern/include/python/internal/pycore_frame.h
vendored
Normal file
61
extern/include/python/internal/pycore_frame.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/* See InternalDocs/frames.md for an explanation of the frame stack
|
||||
* including explanation of the PyFrameObject and _PyInterpreterFrame
|
||||
* structs. */
|
||||
|
||||
#ifndef Py_INTERNAL_FRAME_H
|
||||
#define Py_INTERNAL_FRAME_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
||||
|
||||
|
||||
struct _frame {
|
||||
PyObject_HEAD
|
||||
PyFrameObject *f_back; /* previous frame, or NULL */
|
||||
_PyInterpreterFrame *f_frame; /* points to the frame data */
|
||||
PyObject *f_trace; /* Trace function */
|
||||
int f_lineno; /* Current line number. Only valid if non-zero */
|
||||
char f_trace_lines; /* Emit per-line trace events? */
|
||||
char f_trace_opcodes; /* Emit per-opcode trace events? */
|
||||
PyObject *f_extra_locals; /* Dict for locals set by users using f_locals, could be NULL */
|
||||
/* This is purely for backwards compatibility for PyEval_GetLocals.
|
||||
PyEval_GetLocals requires a borrowed reference so the actual reference
|
||||
is stored here */
|
||||
PyObject *f_locals_cache;
|
||||
/* A tuple containing strong references to fast locals that were overwritten
|
||||
* via f_locals. Borrowed references to these locals may exist in frames
|
||||
* closer to the top of the stack. The references in this tuple act as
|
||||
* "support" for the borrowed references, ensuring that they remain valid.
|
||||
*/
|
||||
PyObject *f_overwritten_fast_locals;
|
||||
/* The frame data, if this frame object owns the frame */
|
||||
PyObject *_f_frame_data[1];
|
||||
};
|
||||
|
||||
extern PyFrameObject* _PyFrame_New_NoTrack(PyCodeObject *code);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
typedef enum _framestate {
|
||||
FRAME_CREATED = -3,
|
||||
FRAME_SUSPENDED = -2,
|
||||
FRAME_SUSPENDED_YIELD_FROM = -1,
|
||||
FRAME_EXECUTING = 0,
|
||||
FRAME_COMPLETED = 1,
|
||||
FRAME_CLEARED = 4
|
||||
} PyFrameState;
|
||||
|
||||
#define FRAME_STATE_SUSPENDED(S) ((S) == FRAME_SUSPENDED || (S) == FRAME_SUSPENDED_YIELD_FROM)
|
||||
#define FRAME_STATE_FINISHED(S) ((S) >= FRAME_COMPLETED)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FRAME_H */
|
||||
111
extern/include/python/internal/pycore_freelist.h
vendored
Normal file
111
extern/include/python/internal/pycore_freelist.h
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
#ifndef Py_INTERNAL_FREELIST_H
|
||||
#define Py_INTERNAL_FREELIST_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist_state.h" // struct _Py_freelists
|
||||
#include "pycore_interp_structs.h" // PyInterpreterState
|
||||
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_STORE_PTR_RELAXED()
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET
|
||||
#include "pycore_stats.h" // OBJECT_STAT_INC
|
||||
|
||||
static inline struct _Py_freelists *
|
||||
_Py_freelists_GET(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
#ifdef Py_DEBUG
|
||||
_Py_EnsureTstateNotNULL(tstate);
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return &((_PyThreadStateImpl*)tstate)->freelists;
|
||||
#else
|
||||
return &tstate->interp->object_state.freelists;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Pushes `op` to the freelist, calls `freefunc` if the freelist is full
|
||||
#define _Py_FREELIST_FREE(NAME, op, freefunc) \
|
||||
_PyFreeList_Free(&_Py_freelists_GET()->NAME, _PyObject_CAST(op), \
|
||||
Py_ ## NAME ## _MAXFREELIST, freefunc)
|
||||
// Pushes `op` to the freelist, returns 1 if successful, 0 if the freelist is full
|
||||
#define _Py_FREELIST_PUSH(NAME, op, limit) \
|
||||
_PyFreeList_Push(&_Py_freelists_GET()->NAME, _PyObject_CAST(op), limit)
|
||||
|
||||
// Pops a PyObject from the freelist, returns NULL if the freelist is empty.
|
||||
#define _Py_FREELIST_POP(TYPE, NAME) \
|
||||
_Py_CAST(TYPE*, _PyFreeList_Pop(&_Py_freelists_GET()->NAME))
|
||||
|
||||
// Pops a non-PyObject data structure from the freelist, returns NULL if the
|
||||
// freelist is empty.
|
||||
#define _Py_FREELIST_POP_MEM(NAME) \
|
||||
_PyFreeList_PopMem(&_Py_freelists_GET()->NAME)
|
||||
|
||||
#define _Py_FREELIST_SIZE(NAME) (int)((_Py_freelists_GET()->NAME).size)
|
||||
|
||||
static inline int
|
||||
_PyFreeList_Push(struct _Py_freelist *fl, void *obj, Py_ssize_t maxsize)
|
||||
{
|
||||
if (fl->size < maxsize && fl->size >= 0) {
|
||||
FT_ATOMIC_STORE_PTR_RELAXED(*(void **)obj, fl->freelist);
|
||||
fl->freelist = obj;
|
||||
fl->size++;
|
||||
OBJECT_STAT_INC(to_freelist);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyFreeList_Free(struct _Py_freelist *fl, void *obj, Py_ssize_t maxsize,
|
||||
freefunc dofree)
|
||||
{
|
||||
if (!_PyFreeList_Push(fl, obj, maxsize)) {
|
||||
dofree(obj);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *
|
||||
_PyFreeList_PopNoStats(struct _Py_freelist *fl)
|
||||
{
|
||||
void *obj = fl->freelist;
|
||||
if (obj != NULL) {
|
||||
assert(fl->size > 0);
|
||||
fl->freelist = *(void **)obj;
|
||||
fl->size--;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
_PyFreeList_Pop(struct _Py_freelist *fl)
|
||||
{
|
||||
PyObject *op = _PyFreeList_PopNoStats(fl);
|
||||
if (op != NULL) {
|
||||
OBJECT_STAT_INC(from_freelist);
|
||||
_Py_NewReference(op);
|
||||
}
|
||||
return op;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
_PyFreeList_PopMem(struct _Py_freelist *fl)
|
||||
{
|
||||
void *op = _PyFreeList_PopNoStats(fl);
|
||||
if (op != NULL) {
|
||||
OBJECT_STAT_INC(from_freelist);
|
||||
}
|
||||
return op;
|
||||
}
|
||||
|
||||
extern void _PyObject_ClearFreeLists(struct _Py_freelists *freelists, int is_finalization);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FREELIST_H */
|
||||
70
extern/include/python/internal/pycore_freelist_state.h
vendored
Normal file
70
extern/include/python/internal/pycore_freelist_state.h
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
#ifndef Py_INTERNAL_FREELIST_STATE_H
|
||||
#define Py_INTERNAL_FREELIST_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
# define PyTuple_MAXSAVESIZE 20 // Largest tuple to save on freelist
|
||||
# define Py_tuple_MAXFREELIST 2000 // Maximum number of tuples of each size to save
|
||||
# define Py_lists_MAXFREELIST 80
|
||||
# define Py_list_iters_MAXFREELIST 10
|
||||
# define Py_tuple_iters_MAXFREELIST 10
|
||||
# define Py_dicts_MAXFREELIST 80
|
||||
# define Py_dictkeys_MAXFREELIST 80
|
||||
# define Py_floats_MAXFREELIST 100
|
||||
# define Py_ints_MAXFREELIST 100
|
||||
# define Py_slices_MAXFREELIST 1
|
||||
# define Py_ranges_MAXFREELIST 6
|
||||
# define Py_range_iters_MAXFREELIST 6
|
||||
# define Py_contexts_MAXFREELIST 255
|
||||
# define Py_async_gens_MAXFREELIST 80
|
||||
# define Py_async_gen_asends_MAXFREELIST 80
|
||||
# define Py_futureiters_MAXFREELIST 255
|
||||
# define Py_object_stack_chunks_MAXFREELIST 4
|
||||
# define Py_unicode_writers_MAXFREELIST 1
|
||||
# define Py_pycfunctionobject_MAXFREELIST 16
|
||||
# define Py_pycmethodobject_MAXFREELIST 16
|
||||
# define Py_pymethodobjects_MAXFREELIST 20
|
||||
|
||||
// A generic freelist of either PyObjects or other data structures.
|
||||
struct _Py_freelist {
|
||||
// Entries are linked together using the first word of the object.
|
||||
// For PyObjects, this overlaps with the `ob_refcnt` field or the `ob_tid`
|
||||
// field.
|
||||
void *freelist;
|
||||
|
||||
// The number of items in the free list or -1 if the free list is disabled
|
||||
Py_ssize_t size;
|
||||
};
|
||||
|
||||
struct _Py_freelists {
|
||||
struct _Py_freelist floats;
|
||||
struct _Py_freelist ints;
|
||||
struct _Py_freelist tuples[PyTuple_MAXSAVESIZE];
|
||||
struct _Py_freelist lists;
|
||||
struct _Py_freelist list_iters;
|
||||
struct _Py_freelist tuple_iters;
|
||||
struct _Py_freelist dicts;
|
||||
struct _Py_freelist dictkeys;
|
||||
struct _Py_freelist slices;
|
||||
struct _Py_freelist ranges;
|
||||
struct _Py_freelist range_iters;
|
||||
struct _Py_freelist contexts;
|
||||
struct _Py_freelist async_gens;
|
||||
struct _Py_freelist async_gen_asends;
|
||||
struct _Py_freelist futureiters;
|
||||
struct _Py_freelist object_stack_chunks;
|
||||
struct _Py_freelist unicode_writers;
|
||||
struct _Py_freelist pycfunctionobject;
|
||||
struct _Py_freelist pycmethodobject;
|
||||
struct _Py_freelist pymethodobjects;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FREELIST_STATE_H */
|
||||
53
extern/include/python/internal/pycore_function.h
vendored
Normal file
53
extern/include/python/internal/pycore_function.h
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
#ifndef Py_INTERNAL_FUNCTION_H
|
||||
#define Py_INTERNAL_FUNCTION_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyObject* _PyFunction_Vectorcall(
|
||||
PyObject *func,
|
||||
PyObject *const *stack,
|
||||
size_t nargsf,
|
||||
PyObject *kwnames);
|
||||
|
||||
|
||||
#define FUNC_VERSION_UNSET 0
|
||||
#define FUNC_VERSION_CLEARED 1
|
||||
#define FUNC_VERSION_FIRST_VALID 2
|
||||
|
||||
extern PyFunctionObject* _PyFunction_FromConstructor(PyFrameConstructor *constr);
|
||||
|
||||
static inline int
|
||||
_PyFunction_IsVersionValid(uint32_t version)
|
||||
{
|
||||
return version >= FUNC_VERSION_FIRST_VALID;
|
||||
}
|
||||
|
||||
extern uint32_t _PyFunction_GetVersionForCurrentState(PyFunctionObject *func);
|
||||
PyAPI_FUNC(void) _PyFunction_SetVersion(PyFunctionObject *func, uint32_t version);
|
||||
void _PyFunction_ClearCodeByVersion(uint32_t version);
|
||||
PyFunctionObject *_PyFunction_LookupByVersion(uint32_t version, PyObject **p_code);
|
||||
|
||||
extern PyObject *_Py_set_function_type_params(
|
||||
PyThreadState* unused, PyObject *func, PyObject *type_params);
|
||||
|
||||
|
||||
/* See pycore_code.h for explanation about what "stateless" means. */
|
||||
|
||||
PyAPI_FUNC(int)
|
||||
_PyFunction_VerifyStateless(PyThreadState *, PyObject *);
|
||||
|
||||
static inline PyObject* _PyFunction_GET_BUILTINS(PyObject *func) {
|
||||
return _PyFunction_CAST(func)->func_builtins;
|
||||
}
|
||||
#define _PyFunction_GET_BUILTINS(func) _PyFunction_GET_BUILTINS(_PyObject_CAST(func))
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FUNCTION_H */
|
||||
378
extern/include/python/internal/pycore_gc.h
vendored
Normal file
378
extern/include/python/internal/pycore_gc.h
vendored
Normal file
@@ -0,0 +1,378 @@
|
||||
#ifndef Py_INTERNAL_GC_H
|
||||
#define Py_INTERNAL_GC_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_interp_structs.h" // PyGC_Head
|
||||
#include "pycore_pystate.h" // _PyInterpreterState_GET()
|
||||
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
||||
|
||||
|
||||
/* Get an object's GC head */
|
||||
static inline PyGC_Head* _Py_AS_GC(PyObject *op) {
|
||||
char *gc = ((char*)op) - sizeof(PyGC_Head);
|
||||
return (PyGC_Head*)gc;
|
||||
}
|
||||
|
||||
/* Get the object given the GC head */
|
||||
static inline PyObject* _Py_FROM_GC(PyGC_Head *gc) {
|
||||
char *op = ((char *)gc) + sizeof(PyGC_Head);
|
||||
return (PyObject *)op;
|
||||
}
|
||||
|
||||
|
||||
/* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds)
|
||||
*
|
||||
* Setting the bits requires a relaxed store. The per-object lock must also be
|
||||
* held, except when the object is only visible to a single thread (e.g. during
|
||||
* object initialization or destruction).
|
||||
*
|
||||
* Reading the bits requires using a relaxed load, but does not require holding
|
||||
* the per-object lock.
|
||||
*/
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define _PyGC_BITS_TRACKED (1<<0) // Tracked by the GC
|
||||
# define _PyGC_BITS_FINALIZED (1<<1) // tp_finalize was called
|
||||
# define _PyGC_BITS_UNREACHABLE (1<<2)
|
||||
# define _PyGC_BITS_FROZEN (1<<3)
|
||||
# define _PyGC_BITS_SHARED (1<<4)
|
||||
# define _PyGC_BITS_ALIVE (1<<5) // Reachable from a known root.
|
||||
# define _PyGC_BITS_DEFERRED (1<<6) // Use deferred reference counting
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static inline void
|
||||
_PyObject_SET_GC_BITS(PyObject *op, uint8_t new_bits)
|
||||
{
|
||||
uint8_t bits = _Py_atomic_load_uint8_relaxed(&op->ob_gc_bits);
|
||||
_Py_atomic_store_uint8_relaxed(&op->ob_gc_bits, bits | new_bits);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyObject_HAS_GC_BITS(PyObject *op, uint8_t bits)
|
||||
{
|
||||
return (_Py_atomic_load_uint8_relaxed(&op->ob_gc_bits) & bits) != 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyObject_CLEAR_GC_BITS(PyObject *op, uint8_t bits_to_clear)
|
||||
{
|
||||
uint8_t bits = _Py_atomic_load_uint8_relaxed(&op->ob_gc_bits);
|
||||
_Py_atomic_store_uint8_relaxed(&op->ob_gc_bits, bits & ~bits_to_clear);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* True if the object is currently tracked by the GC. */
|
||||
static inline int _PyObject_GC_IS_TRACKED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
return (gc->_gc_next != 0);
|
||||
#endif
|
||||
}
|
||||
#define _PyObject_GC_IS_TRACKED(op) _PyObject_GC_IS_TRACKED(_Py_CAST(PyObject*, op))
|
||||
|
||||
/* True if the object may be tracked by the GC in the future, or already is.
|
||||
This can be useful to implement some optimizations. */
|
||||
static inline int _PyObject_GC_MAY_BE_TRACKED(PyObject *obj) {
|
||||
if (!PyObject_IS_GC(obj)) {
|
||||
return 0;
|
||||
}
|
||||
if (PyTuple_CheckExact(obj)) {
|
||||
return _PyObject_GC_IS_TRACKED(obj);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
/* True if memory the object references is shared between
|
||||
* multiple threads and needs special purpose when freeing
|
||||
* those references due to the possibility of in-flight
|
||||
* lock-free reads occurring. The object is responsible
|
||||
* for calling _PyMem_FreeDelayed on the referenced
|
||||
* memory. */
|
||||
static inline int _PyObject_GC_IS_SHARED(PyObject *op) {
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_SHARED);
|
||||
}
|
||||
#define _PyObject_GC_IS_SHARED(op) _PyObject_GC_IS_SHARED(_Py_CAST(PyObject*, op))
|
||||
|
||||
static inline void _PyObject_GC_SET_SHARED(PyObject *op) {
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_SHARED);
|
||||
}
|
||||
#define _PyObject_GC_SET_SHARED(op) _PyObject_GC_SET_SHARED(_Py_CAST(PyObject*, op))
|
||||
|
||||
#endif
|
||||
|
||||
/* Bit flags for _gc_prev */
|
||||
/* Bit 0 is set when tp_finalize is called */
|
||||
#define _PyGC_PREV_MASK_FINALIZED ((uintptr_t)1)
|
||||
/* Bit 1 is set when the object is in generation which is GCed currently. */
|
||||
#define _PyGC_PREV_MASK_COLLECTING ((uintptr_t)2)
|
||||
|
||||
/* Bit 0 in _gc_next is the old space bit.
|
||||
* It is set as follows:
|
||||
* Young: gcstate->visited_space
|
||||
* old[0]: 0
|
||||
* old[1]: 1
|
||||
* permanent: 0
|
||||
*
|
||||
* During a collection all objects handled should have the bit set to
|
||||
* gcstate->visited_space, as objects are moved from the young gen
|
||||
* and the increment into old[gcstate->visited_space].
|
||||
* When object are moved from the pending space, old[gcstate->visited_space^1]
|
||||
* into the increment, the old space bit is flipped.
|
||||
*/
|
||||
#define _PyGC_NEXT_MASK_OLD_SPACE_1 1
|
||||
|
||||
#define _PyGC_PREV_SHIFT 2
|
||||
#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
|
||||
|
||||
/* set for debugging information */
|
||||
#define _PyGC_DEBUG_STATS (1<<0) /* print collection statistics */
|
||||
#define _PyGC_DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
|
||||
#define _PyGC_DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
|
||||
#define _PyGC_DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
|
||||
#define _PyGC_DEBUG_LEAK _PyGC_DEBUG_COLLECTABLE | \
|
||||
_PyGC_DEBUG_UNCOLLECTABLE | \
|
||||
_PyGC_DEBUG_SAVEALL
|
||||
|
||||
typedef enum {
|
||||
// GC was triggered by heap allocation
|
||||
_Py_GC_REASON_HEAP,
|
||||
|
||||
// GC was called during shutdown
|
||||
_Py_GC_REASON_SHUTDOWN,
|
||||
|
||||
// GC was called by gc.collect() or PyGC_Collect()
|
||||
_Py_GC_REASON_MANUAL
|
||||
} _PyGC_Reason;
|
||||
|
||||
// Lowest bit of _gc_next is used for flags only in GC.
|
||||
// But it is always 0 for normal code.
|
||||
static inline PyGC_Head* _PyGCHead_NEXT(PyGC_Head *gc) {
|
||||
uintptr_t next = gc->_gc_next & _PyGC_PREV_MASK;
|
||||
return (PyGC_Head*)next;
|
||||
}
|
||||
static inline void _PyGCHead_SET_NEXT(PyGC_Head *gc, PyGC_Head *next) {
|
||||
uintptr_t unext = (uintptr_t)next;
|
||||
assert((unext & ~_PyGC_PREV_MASK) == 0);
|
||||
gc->_gc_next = (gc->_gc_next & ~_PyGC_PREV_MASK) | unext;
|
||||
}
|
||||
|
||||
// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
|
||||
static inline PyGC_Head* _PyGCHead_PREV(PyGC_Head *gc) {
|
||||
uintptr_t prev = (gc->_gc_prev & _PyGC_PREV_MASK);
|
||||
return (PyGC_Head*)prev;
|
||||
}
|
||||
|
||||
static inline void _PyGCHead_SET_PREV(PyGC_Head *gc, PyGC_Head *prev) {
|
||||
uintptr_t uprev = (uintptr_t)prev;
|
||||
assert((uprev & ~_PyGC_PREV_MASK) == 0);
|
||||
gc->_gc_prev = ((gc->_gc_prev & ~_PyGC_PREV_MASK) | uprev);
|
||||
}
|
||||
|
||||
static inline int _PyGC_FINALIZED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_FINALIZED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
return ((gc->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0);
|
||||
#endif
|
||||
}
|
||||
static inline void _PyGC_SET_FINALIZED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_FINALIZED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
gc->_gc_prev |= _PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
static inline void _PyGC_CLEAR_FINALIZED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_FINALIZED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
gc->_gc_prev &= ~_PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
|
||||
extern void _Py_ScheduleGC(PyThreadState *tstate);
|
||||
|
||||
#ifndef Py_GIL_DISABLED
|
||||
extern void _Py_TriggerGC(struct _gc_runtime_state *gcstate);
|
||||
#endif
|
||||
|
||||
|
||||
/* Tell the GC to track this object.
|
||||
*
|
||||
* The object must not be tracked by the GC.
|
||||
*
|
||||
* NB: While the object is tracked by the collector, it must be safe to call the
|
||||
* ob_traverse method.
|
||||
*
|
||||
* Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
|
||||
* because it's not object header. So we don't use _PyGCHead_PREV() and
|
||||
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
|
||||
*
|
||||
* See also the public PyObject_GC_Track() function.
|
||||
*/
|
||||
static inline void _PyObject_GC_TRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
|
||||
"object already tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
_PyObject_ASSERT_FROM(op,
|
||||
(gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
|
||||
"object is in generation which is garbage collected",
|
||||
filename, lineno, __func__);
|
||||
|
||||
struct _gc_runtime_state *gcstate = &_PyInterpreterState_GET()->gc;
|
||||
PyGC_Head *generation0 = &gcstate->young.head;
|
||||
PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
|
||||
_PyGCHead_SET_NEXT(last, gc);
|
||||
_PyGCHead_SET_PREV(gc, last);
|
||||
uintptr_t not_visited = 1 ^ gcstate->visited_space;
|
||||
gc->_gc_next = ((uintptr_t)generation0) | not_visited;
|
||||
generation0->_gc_prev = (uintptr_t)gc;
|
||||
gcstate->young.count++; /* number of tracked GC objects */
|
||||
gcstate->heap_size++;
|
||||
if (gcstate->young.count > gcstate->young.threshold) {
|
||||
_Py_TriggerGC(gcstate);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Tell the GC to stop tracking this object.
|
||||
*
|
||||
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
|
||||
* must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
|
||||
*
|
||||
* The object must be tracked by the GC.
|
||||
*
|
||||
* See also the public PyObject_GC_UnTrack() which accept an object which is
|
||||
* not tracked.
|
||||
*/
|
||||
static inline void _PyObject_GC_UNTRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
|
||||
"object not tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
PyGC_Head *prev = _PyGCHead_PREV(gc);
|
||||
PyGC_Head *next = _PyGCHead_NEXT(gc);
|
||||
_PyGCHead_SET_NEXT(prev, next);
|
||||
_PyGCHead_SET_PREV(next, prev);
|
||||
gc->_gc_next = 0;
|
||||
gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
|
||||
struct _gc_runtime_state *gcstate = &_PyInterpreterState_GET()->gc;
|
||||
if (gcstate->young.count > 0) {
|
||||
gcstate->young.count--;
|
||||
}
|
||||
gcstate->heap_size--;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
NOTE: about untracking of mutable objects.
|
||||
|
||||
Certain types of container cannot participate in a reference cycle, and
|
||||
so do not need to be tracked by the garbage collector. Untracking these
|
||||
objects reduces the cost of garbage collections. However, determining
|
||||
which objects may be untracked is not free, and the costs must be
|
||||
weighed against the benefits for garbage collection.
|
||||
|
||||
There are two possible strategies for when to untrack a container:
|
||||
|
||||
i) When the container is created.
|
||||
ii) When the container is examined by the garbage collector.
|
||||
|
||||
Tuples containing only immutable objects (integers, strings etc, and
|
||||
recursively, tuples of immutable objects) do not need to be tracked.
|
||||
The interpreter creates a large number of tuples, many of which will
|
||||
not survive until garbage collection. It is therefore not worthwhile
|
||||
to untrack eligible tuples at creation time.
|
||||
|
||||
Instead, all tuples except the empty tuple are tracked when created.
|
||||
During garbage collection it is determined whether any surviving tuples
|
||||
can be untracked. A tuple can be untracked if all of its contents are
|
||||
already not tracked. Tuples are examined for untracking in all garbage
|
||||
collection cycles. It may take more than one cycle to untrack a tuple.
|
||||
|
||||
Dictionaries containing only immutable objects also do not need to be
|
||||
tracked. Dictionaries are untracked when created. If a tracked item is
|
||||
inserted into a dictionary (either as a key or value), the dictionary
|
||||
becomes tracked. During a full garbage collection (all generations),
|
||||
the collector will untrack any dictionaries whose contents are not
|
||||
tracked.
|
||||
|
||||
The module provides the python function is_tracked(obj), which returns
|
||||
the CURRENT tracking status of the object. Subsequent garbage
|
||||
collections may change the tracking status of the object.
|
||||
|
||||
Untracking of certain containers was introduced in issue #4688, and
|
||||
the algorithm was refined in response to issue #14775.
|
||||
*/
|
||||
|
||||
extern void _PyGC_InitState(struct _gc_runtime_state *);
|
||||
|
||||
extern Py_ssize_t _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason);
|
||||
extern void _PyGC_CollectNoFail(PyThreadState *tstate);
|
||||
|
||||
/* Freeze objects tracked by the GC and ignore them in future collections. */
|
||||
extern void _PyGC_Freeze(PyInterpreterState *interp);
|
||||
/* Unfreezes objects placing them in the oldest generation */
|
||||
extern void _PyGC_Unfreeze(PyInterpreterState *interp);
|
||||
/* Number of frozen objects */
|
||||
extern Py_ssize_t _PyGC_GetFreezeCount(PyInterpreterState *interp);
|
||||
|
||||
extern PyObject *_PyGC_GetObjects(PyInterpreterState *interp, int generation);
|
||||
extern PyObject *_PyGC_GetReferrers(PyInterpreterState *interp, PyObject *objs);
|
||||
|
||||
// Functions to clear types free lists
|
||||
extern void _PyGC_ClearAllFreeLists(PyInterpreterState *interp);
|
||||
extern void _Py_RunGC(PyThreadState *tstate);
|
||||
|
||||
union _PyStackRef;
|
||||
|
||||
// GC visit callback for tracked interpreter frames
|
||||
extern int _PyGC_VisitFrameStack(_PyInterpreterFrame *frame, visitproc visit, void *arg);
|
||||
extern int _PyGC_VisitStackRef(union _PyStackRef *ref, visitproc visit, void *arg);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
extern void _PyGC_VisitObjectsWorldStopped(PyInterpreterState *interp,
|
||||
gcvisitobjects_t callback, void *arg);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GC_H */
|
||||
43
extern/include/python/internal/pycore_genobject.h
vendored
Normal file
43
extern/include/python/internal/pycore_genobject.h
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
#ifndef Py_INTERNAL_GENOBJECT_H
|
||||
#define Py_INTERNAL_GENOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_interpframe_structs.h" // _PyGenObject
|
||||
|
||||
#include <stddef.h> // offsetof()
|
||||
|
||||
|
||||
static inline
|
||||
PyGenObject *_PyGen_GetGeneratorFromFrame(_PyInterpreterFrame *frame)
|
||||
{
|
||||
assert(frame->owner == FRAME_OWNED_BY_GENERATOR);
|
||||
size_t offset_in_gen = offsetof(PyGenObject, gi_iframe);
|
||||
return (PyGenObject *)(((char *)frame) - offset_in_gen);
|
||||
}
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyGen_yf(PyGenObject *);
|
||||
extern void _PyGen_Finalize(PyObject *self);
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *);
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **);
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyCoro_GetAwaitableIter(PyObject *o);
|
||||
extern PyObject *_PyAsyncGenValueWrapperNew(PyThreadState *state, PyObject *);
|
||||
|
||||
extern PyTypeObject _PyCoroWrapper_Type;
|
||||
extern PyTypeObject _PyAsyncGenWrappedValue_Type;
|
||||
extern PyTypeObject _PyAsyncGenAThrow_Type;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GENOBJECT_H */
|
||||
22
extern/include/python/internal/pycore_getopt.h
vendored
Normal file
22
extern/include/python/internal/pycore_getopt.h
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
#ifndef Py_INTERNAL_PYGETOPT_H
|
||||
#define Py_INTERNAL_PYGETOPT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern int _PyOS_opterr;
|
||||
extern Py_ssize_t _PyOS_optind;
|
||||
extern const wchar_t *_PyOS_optarg;
|
||||
|
||||
extern void _PyOS_ResetGetOpt(void);
|
||||
|
||||
typedef struct {
|
||||
const wchar_t *name;
|
||||
int has_arg;
|
||||
int val;
|
||||
} _PyOS_LongOption;
|
||||
|
||||
extern int _PyOS_GetOpt(Py_ssize_t argc, wchar_t * const *argv, int *longindex);
|
||||
|
||||
#endif /* !Py_INTERNAL_PYGETOPT_H */
|
||||
66
extern/include/python/internal/pycore_gil.h
vendored
Normal file
66
extern/include/python/internal/pycore_gil.h
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef Py_INTERNAL_GIL_H
|
||||
#define Py_INTERNAL_GIL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_condvar.h" // PyCOND_T
|
||||
|
||||
#ifndef Py_HAVE_CONDVAR
|
||||
# error You need either a POSIX-compatible or a Windows system!
|
||||
#endif
|
||||
|
||||
/* Enable if you want to force the switching of threads at least
|
||||
every `interval`. */
|
||||
#undef FORCE_SWITCHING
|
||||
#define FORCE_SWITCHING
|
||||
|
||||
struct _gil_runtime_state {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* If this GIL is disabled, enabled == 0.
|
||||
|
||||
If this GIL is enabled transiently (most likely to initialize a module
|
||||
of unknown safety), enabled indicates the number of active transient
|
||||
requests.
|
||||
|
||||
If this GIL is enabled permanently, enabled == INT_MAX.
|
||||
|
||||
It must not be modified directly; use _PyEval_EnableGILTransiently(),
|
||||
_PyEval_EnableGILPermanently(), and _PyEval_DisableGIL()
|
||||
|
||||
It is always read and written atomically, but a thread can assume its
|
||||
value will be stable as long as that thread is attached or knows that no
|
||||
other threads are attached (e.g., during a stop-the-world.). */
|
||||
int enabled;
|
||||
#endif
|
||||
/* microseconds (the Python API uses seconds, though) */
|
||||
unsigned long interval;
|
||||
/* Last PyThreadState holding / having held the GIL. This helps us
|
||||
know whether anyone else was scheduled after we dropped the GIL. */
|
||||
PyThreadState* last_holder;
|
||||
/* Whether the GIL is already taken (-1 if uninitialized). This is
|
||||
atomic because it can be read without any lock taken in ceval.c. */
|
||||
int locked;
|
||||
/* Number of GIL switches since the beginning. */
|
||||
unsigned long switch_number;
|
||||
/* This condition variable allows one or several threads to wait
|
||||
until the GIL is released. In addition, the mutex also protects
|
||||
the above variables. */
|
||||
PyCOND_T cond;
|
||||
PyMUTEX_T mutex;
|
||||
#ifdef FORCE_SWITCHING
|
||||
/* This condition variable helps the GIL-releasing thread wait for
|
||||
a GIL-awaiting thread to be scheduled and take the GIL. */
|
||||
PyCOND_T switch_cond;
|
||||
PyMUTEX_T switch_mutex;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GIL_H */
|
||||
34
extern/include/python/internal/pycore_global_objects.h
vendored
Normal file
34
extern/include/python/internal/pycore_global_objects.h
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef Py_INTERNAL_GLOBAL_OBJECTS_H
|
||||
#define Py_INTERNAL_GLOBAL_OBJECTS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
// Only immutable objects should be considered runtime-global.
|
||||
// All others must be per-interpreter.
|
||||
|
||||
#define _Py_GLOBAL_OBJECT(NAME) \
|
||||
_PyRuntime.static_objects.NAME
|
||||
#define _Py_SINGLETON(NAME) \
|
||||
_Py_GLOBAL_OBJECT(singletons.NAME)
|
||||
|
||||
|
||||
#define _Py_INTERP_CACHED_OBJECT(interp, NAME) \
|
||||
(interp)->cached_objects.NAME
|
||||
|
||||
|
||||
#define _Py_INTERP_STATIC_OBJECT(interp, NAME) \
|
||||
(interp)->static_objects.NAME
|
||||
#define _Py_INTERP_SINGLETON(interp, NAME) \
|
||||
_Py_INTERP_STATIC_OBJECT(interp, singletons.NAME)
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GLOBAL_OBJECTS_H */
|
||||
1592
extern/include/python/internal/pycore_global_objects_fini_generated.h
vendored
Normal file
1592
extern/include/python/internal/pycore_global_objects_fini_generated.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
854
extern/include/python/internal/pycore_global_strings.h
vendored
Normal file
854
extern/include/python/internal/pycore_global_strings.h
vendored
Normal file
@@ -0,0 +1,854 @@
|
||||
#ifndef Py_INTERNAL_GLOBAL_STRINGS_H
|
||||
#define Py_INTERNAL_GLOBAL_STRINGS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_global_objects.h" // struct _Py_SINGLETON
|
||||
|
||||
// The data structure & init here are inspired by Tools/build/deepfreeze.py.
|
||||
|
||||
// All field names generated by ASCII_STR() have a common prefix,
|
||||
// to help avoid collisions with keywords, macros, etc.
|
||||
|
||||
#define STRUCT_FOR_ASCII_STR(LITERAL) \
|
||||
struct { \
|
||||
PyASCIIObject _ascii; \
|
||||
uint8_t _data[sizeof(LITERAL)]; \
|
||||
}
|
||||
#define STRUCT_FOR_STR(NAME, LITERAL) \
|
||||
STRUCT_FOR_ASCII_STR(LITERAL) _py_ ## NAME;
|
||||
#define STRUCT_FOR_ID(NAME) \
|
||||
STRUCT_FOR_ASCII_STR(#NAME) _py_ ## NAME;
|
||||
|
||||
// XXX Order by frequency of use?
|
||||
|
||||
/* The following is auto-generated by Tools/build/generate_global_objects.py. */
|
||||
struct _Py_global_strings {
|
||||
struct {
|
||||
STRUCT_FOR_STR(anon_dictcomp, "<dictcomp>")
|
||||
STRUCT_FOR_STR(anon_genexpr, "<genexpr>")
|
||||
STRUCT_FOR_STR(anon_lambda, "<lambda>")
|
||||
STRUCT_FOR_STR(anon_listcomp, "<listcomp>")
|
||||
STRUCT_FOR_STR(anon_module, "<module>")
|
||||
STRUCT_FOR_STR(anon_null, "<NULL>")
|
||||
STRUCT_FOR_STR(anon_setcomp, "<setcomp>")
|
||||
STRUCT_FOR_STR(anon_string, "<string>")
|
||||
STRUCT_FOR_STR(anon_unknown, "<unknown>")
|
||||
STRUCT_FOR_STR(dbl_close_br, "}}")
|
||||
STRUCT_FOR_STR(dbl_open_br, "{{")
|
||||
STRUCT_FOR_STR(dbl_percent, "%%")
|
||||
STRUCT_FOR_STR(defaults, ".defaults")
|
||||
STRUCT_FOR_STR(dot_locals, ".<locals>")
|
||||
STRUCT_FOR_STR(empty, "")
|
||||
STRUCT_FOR_STR(format, ".format")
|
||||
STRUCT_FOR_STR(generic_base, ".generic_base")
|
||||
STRUCT_FOR_STR(json_decoder, "json.decoder")
|
||||
STRUCT_FOR_STR(kwdefaults, ".kwdefaults")
|
||||
STRUCT_FOR_STR(list_err, "list index out of range")
|
||||
STRUCT_FOR_STR(str_replace_inf, "1e309")
|
||||
STRUCT_FOR_STR(type_params, ".type_params")
|
||||
STRUCT_FOR_STR(utf_8, "utf-8")
|
||||
} literals;
|
||||
|
||||
struct {
|
||||
STRUCT_FOR_ID(CANCELLED)
|
||||
STRUCT_FOR_ID(FINISHED)
|
||||
STRUCT_FOR_ID(False)
|
||||
STRUCT_FOR_ID(JSONDecodeError)
|
||||
STRUCT_FOR_ID(PENDING)
|
||||
STRUCT_FOR_ID(Py_Repr)
|
||||
STRUCT_FOR_ID(TextIOWrapper)
|
||||
STRUCT_FOR_ID(True)
|
||||
STRUCT_FOR_ID(WarningMessage)
|
||||
STRUCT_FOR_ID(_WindowsConsoleIO)
|
||||
STRUCT_FOR_ID(__IOBase_closed)
|
||||
STRUCT_FOR_ID(__abc_tpflags__)
|
||||
STRUCT_FOR_ID(__abs__)
|
||||
STRUCT_FOR_ID(__abstractmethods__)
|
||||
STRUCT_FOR_ID(__add__)
|
||||
STRUCT_FOR_ID(__aenter__)
|
||||
STRUCT_FOR_ID(__aexit__)
|
||||
STRUCT_FOR_ID(__aiter__)
|
||||
STRUCT_FOR_ID(__all__)
|
||||
STRUCT_FOR_ID(__and__)
|
||||
STRUCT_FOR_ID(__anext__)
|
||||
STRUCT_FOR_ID(__annotate__)
|
||||
STRUCT_FOR_ID(__annotate_func__)
|
||||
STRUCT_FOR_ID(__annotations__)
|
||||
STRUCT_FOR_ID(__annotations_cache__)
|
||||
STRUCT_FOR_ID(__args__)
|
||||
STRUCT_FOR_ID(__await__)
|
||||
STRUCT_FOR_ID(__bases__)
|
||||
STRUCT_FOR_ID(__bool__)
|
||||
STRUCT_FOR_ID(__buffer__)
|
||||
STRUCT_FOR_ID(__build_class__)
|
||||
STRUCT_FOR_ID(__builtins__)
|
||||
STRUCT_FOR_ID(__bytes__)
|
||||
STRUCT_FOR_ID(__call__)
|
||||
STRUCT_FOR_ID(__cantrace__)
|
||||
STRUCT_FOR_ID(__ceil__)
|
||||
STRUCT_FOR_ID(__class__)
|
||||
STRUCT_FOR_ID(__class_getitem__)
|
||||
STRUCT_FOR_ID(__classcell__)
|
||||
STRUCT_FOR_ID(__classdict__)
|
||||
STRUCT_FOR_ID(__classdictcell__)
|
||||
STRUCT_FOR_ID(__complex__)
|
||||
STRUCT_FOR_ID(__conditional_annotations__)
|
||||
STRUCT_FOR_ID(__contains__)
|
||||
STRUCT_FOR_ID(__ctypes_from_outparam__)
|
||||
STRUCT_FOR_ID(__del__)
|
||||
STRUCT_FOR_ID(__delattr__)
|
||||
STRUCT_FOR_ID(__delete__)
|
||||
STRUCT_FOR_ID(__delitem__)
|
||||
STRUCT_FOR_ID(__dict__)
|
||||
STRUCT_FOR_ID(__dictoffset__)
|
||||
STRUCT_FOR_ID(__dir__)
|
||||
STRUCT_FOR_ID(__divmod__)
|
||||
STRUCT_FOR_ID(__doc__)
|
||||
STRUCT_FOR_ID(__enter__)
|
||||
STRUCT_FOR_ID(__eq__)
|
||||
STRUCT_FOR_ID(__exit__)
|
||||
STRUCT_FOR_ID(__file__)
|
||||
STRUCT_FOR_ID(__firstlineno__)
|
||||
STRUCT_FOR_ID(__float__)
|
||||
STRUCT_FOR_ID(__floor__)
|
||||
STRUCT_FOR_ID(__floordiv__)
|
||||
STRUCT_FOR_ID(__format__)
|
||||
STRUCT_FOR_ID(__fspath__)
|
||||
STRUCT_FOR_ID(__ge__)
|
||||
STRUCT_FOR_ID(__get__)
|
||||
STRUCT_FOR_ID(__getattr__)
|
||||
STRUCT_FOR_ID(__getattribute__)
|
||||
STRUCT_FOR_ID(__getinitargs__)
|
||||
STRUCT_FOR_ID(__getitem__)
|
||||
STRUCT_FOR_ID(__getnewargs__)
|
||||
STRUCT_FOR_ID(__getnewargs_ex__)
|
||||
STRUCT_FOR_ID(__getstate__)
|
||||
STRUCT_FOR_ID(__gt__)
|
||||
STRUCT_FOR_ID(__hash__)
|
||||
STRUCT_FOR_ID(__iadd__)
|
||||
STRUCT_FOR_ID(__iand__)
|
||||
STRUCT_FOR_ID(__ifloordiv__)
|
||||
STRUCT_FOR_ID(__ilshift__)
|
||||
STRUCT_FOR_ID(__imatmul__)
|
||||
STRUCT_FOR_ID(__imod__)
|
||||
STRUCT_FOR_ID(__import__)
|
||||
STRUCT_FOR_ID(__imul__)
|
||||
STRUCT_FOR_ID(__index__)
|
||||
STRUCT_FOR_ID(__init__)
|
||||
STRUCT_FOR_ID(__init_subclass__)
|
||||
STRUCT_FOR_ID(__instancecheck__)
|
||||
STRUCT_FOR_ID(__int__)
|
||||
STRUCT_FOR_ID(__invert__)
|
||||
STRUCT_FOR_ID(__ior__)
|
||||
STRUCT_FOR_ID(__ipow__)
|
||||
STRUCT_FOR_ID(__irshift__)
|
||||
STRUCT_FOR_ID(__isabstractmethod__)
|
||||
STRUCT_FOR_ID(__isub__)
|
||||
STRUCT_FOR_ID(__iter__)
|
||||
STRUCT_FOR_ID(__itruediv__)
|
||||
STRUCT_FOR_ID(__ixor__)
|
||||
STRUCT_FOR_ID(__le__)
|
||||
STRUCT_FOR_ID(__len__)
|
||||
STRUCT_FOR_ID(__length_hint__)
|
||||
STRUCT_FOR_ID(__lltrace__)
|
||||
STRUCT_FOR_ID(__loader__)
|
||||
STRUCT_FOR_ID(__lshift__)
|
||||
STRUCT_FOR_ID(__lt__)
|
||||
STRUCT_FOR_ID(__main__)
|
||||
STRUCT_FOR_ID(__match_args__)
|
||||
STRUCT_FOR_ID(__matmul__)
|
||||
STRUCT_FOR_ID(__missing__)
|
||||
STRUCT_FOR_ID(__mod__)
|
||||
STRUCT_FOR_ID(__module__)
|
||||
STRUCT_FOR_ID(__mro_entries__)
|
||||
STRUCT_FOR_ID(__mul__)
|
||||
STRUCT_FOR_ID(__name__)
|
||||
STRUCT_FOR_ID(__ne__)
|
||||
STRUCT_FOR_ID(__neg__)
|
||||
STRUCT_FOR_ID(__new__)
|
||||
STRUCT_FOR_ID(__newobj__)
|
||||
STRUCT_FOR_ID(__newobj_ex__)
|
||||
STRUCT_FOR_ID(__next__)
|
||||
STRUCT_FOR_ID(__notes__)
|
||||
STRUCT_FOR_ID(__or__)
|
||||
STRUCT_FOR_ID(__orig_class__)
|
||||
STRUCT_FOR_ID(__origin__)
|
||||
STRUCT_FOR_ID(__package__)
|
||||
STRUCT_FOR_ID(__parameters__)
|
||||
STRUCT_FOR_ID(__path__)
|
||||
STRUCT_FOR_ID(__pos__)
|
||||
STRUCT_FOR_ID(__pow__)
|
||||
STRUCT_FOR_ID(__prepare__)
|
||||
STRUCT_FOR_ID(__qualname__)
|
||||
STRUCT_FOR_ID(__radd__)
|
||||
STRUCT_FOR_ID(__rand__)
|
||||
STRUCT_FOR_ID(__rdivmod__)
|
||||
STRUCT_FOR_ID(__reduce__)
|
||||
STRUCT_FOR_ID(__reduce_ex__)
|
||||
STRUCT_FOR_ID(__release_buffer__)
|
||||
STRUCT_FOR_ID(__repr__)
|
||||
STRUCT_FOR_ID(__reversed__)
|
||||
STRUCT_FOR_ID(__rfloordiv__)
|
||||
STRUCT_FOR_ID(__rlshift__)
|
||||
STRUCT_FOR_ID(__rmatmul__)
|
||||
STRUCT_FOR_ID(__rmod__)
|
||||
STRUCT_FOR_ID(__rmul__)
|
||||
STRUCT_FOR_ID(__ror__)
|
||||
STRUCT_FOR_ID(__round__)
|
||||
STRUCT_FOR_ID(__rpow__)
|
||||
STRUCT_FOR_ID(__rrshift__)
|
||||
STRUCT_FOR_ID(__rshift__)
|
||||
STRUCT_FOR_ID(__rsub__)
|
||||
STRUCT_FOR_ID(__rtruediv__)
|
||||
STRUCT_FOR_ID(__rxor__)
|
||||
STRUCT_FOR_ID(__set__)
|
||||
STRUCT_FOR_ID(__set_name__)
|
||||
STRUCT_FOR_ID(__setattr__)
|
||||
STRUCT_FOR_ID(__setitem__)
|
||||
STRUCT_FOR_ID(__setstate__)
|
||||
STRUCT_FOR_ID(__sizeof__)
|
||||
STRUCT_FOR_ID(__slotnames__)
|
||||
STRUCT_FOR_ID(__slots__)
|
||||
STRUCT_FOR_ID(__spec__)
|
||||
STRUCT_FOR_ID(__static_attributes__)
|
||||
STRUCT_FOR_ID(__str__)
|
||||
STRUCT_FOR_ID(__sub__)
|
||||
STRUCT_FOR_ID(__subclasscheck__)
|
||||
STRUCT_FOR_ID(__subclasshook__)
|
||||
STRUCT_FOR_ID(__truediv__)
|
||||
STRUCT_FOR_ID(__trunc__)
|
||||
STRUCT_FOR_ID(__type_params__)
|
||||
STRUCT_FOR_ID(__typing_is_unpacked_typevartuple__)
|
||||
STRUCT_FOR_ID(__typing_prepare_subst__)
|
||||
STRUCT_FOR_ID(__typing_subst__)
|
||||
STRUCT_FOR_ID(__typing_unpacked_tuple_args__)
|
||||
STRUCT_FOR_ID(__warningregistry__)
|
||||
STRUCT_FOR_ID(__weaklistoffset__)
|
||||
STRUCT_FOR_ID(__weakref__)
|
||||
STRUCT_FOR_ID(__xor__)
|
||||
STRUCT_FOR_ID(_abc_impl)
|
||||
STRUCT_FOR_ID(_abstract_)
|
||||
STRUCT_FOR_ID(_active)
|
||||
STRUCT_FOR_ID(_anonymous_)
|
||||
STRUCT_FOR_ID(_argtypes_)
|
||||
STRUCT_FOR_ID(_as_parameter_)
|
||||
STRUCT_FOR_ID(_asyncio_future_blocking)
|
||||
STRUCT_FOR_ID(_blksize)
|
||||
STRUCT_FOR_ID(_bootstrap)
|
||||
STRUCT_FOR_ID(_check_retval_)
|
||||
STRUCT_FOR_ID(_dealloc_warn)
|
||||
STRUCT_FOR_ID(_feature_version)
|
||||
STRUCT_FOR_ID(_field_types)
|
||||
STRUCT_FOR_ID(_fields_)
|
||||
STRUCT_FOR_ID(_filters)
|
||||
STRUCT_FOR_ID(_finalizing)
|
||||
STRUCT_FOR_ID(_find_and_load)
|
||||
STRUCT_FOR_ID(_fix_up_module)
|
||||
STRUCT_FOR_ID(_flags_)
|
||||
STRUCT_FOR_ID(_get_sourcefile)
|
||||
STRUCT_FOR_ID(_handle_fromlist)
|
||||
STRUCT_FOR_ID(_initializing)
|
||||
STRUCT_FOR_ID(_internal_use)
|
||||
STRUCT_FOR_ID(_io)
|
||||
STRUCT_FOR_ID(_is_text_encoding)
|
||||
STRUCT_FOR_ID(_isatty_open_only)
|
||||
STRUCT_FOR_ID(_length_)
|
||||
STRUCT_FOR_ID(_limbo)
|
||||
STRUCT_FOR_ID(_lock_unlock_module)
|
||||
STRUCT_FOR_ID(_loop)
|
||||
STRUCT_FOR_ID(_needs_com_addref_)
|
||||
STRUCT_FOR_ID(_only_immortal)
|
||||
STRUCT_FOR_ID(_restype_)
|
||||
STRUCT_FOR_ID(_showwarnmsg)
|
||||
STRUCT_FOR_ID(_shutdown)
|
||||
STRUCT_FOR_ID(_slotnames)
|
||||
STRUCT_FOR_ID(_strptime)
|
||||
STRUCT_FOR_ID(_strptime_datetime_date)
|
||||
STRUCT_FOR_ID(_strptime_datetime_datetime)
|
||||
STRUCT_FOR_ID(_strptime_datetime_time)
|
||||
STRUCT_FOR_ID(_type_)
|
||||
STRUCT_FOR_ID(_uninitialized_submodules)
|
||||
STRUCT_FOR_ID(_warn_unawaited_coroutine)
|
||||
STRUCT_FOR_ID(_xoptions)
|
||||
STRUCT_FOR_ID(abs_tol)
|
||||
STRUCT_FOR_ID(access)
|
||||
STRUCT_FOR_ID(aclose)
|
||||
STRUCT_FOR_ID(add)
|
||||
STRUCT_FOR_ID(add_done_callback)
|
||||
STRUCT_FOR_ID(after_in_child)
|
||||
STRUCT_FOR_ID(after_in_parent)
|
||||
STRUCT_FOR_ID(aggregate_class)
|
||||
STRUCT_FOR_ID(alias)
|
||||
STRUCT_FOR_ID(align)
|
||||
STRUCT_FOR_ID(all)
|
||||
STRUCT_FOR_ID(all_threads)
|
||||
STRUCT_FOR_ID(allow_code)
|
||||
STRUCT_FOR_ID(any)
|
||||
STRUCT_FOR_ID(append)
|
||||
STRUCT_FOR_ID(arg)
|
||||
STRUCT_FOR_ID(argdefs)
|
||||
STRUCT_FOR_ID(args)
|
||||
STRUCT_FOR_ID(arguments)
|
||||
STRUCT_FOR_ID(argv)
|
||||
STRUCT_FOR_ID(as_integer_ratio)
|
||||
STRUCT_FOR_ID(asend)
|
||||
STRUCT_FOR_ID(ast)
|
||||
STRUCT_FOR_ID(athrow)
|
||||
STRUCT_FOR_ID(attribute)
|
||||
STRUCT_FOR_ID(authorizer_callback)
|
||||
STRUCT_FOR_ID(autocommit)
|
||||
STRUCT_FOR_ID(backtick)
|
||||
STRUCT_FOR_ID(base)
|
||||
STRUCT_FOR_ID(before)
|
||||
STRUCT_FOR_ID(big)
|
||||
STRUCT_FOR_ID(binary_form)
|
||||
STRUCT_FOR_ID(bit_offset)
|
||||
STRUCT_FOR_ID(bit_size)
|
||||
STRUCT_FOR_ID(block)
|
||||
STRUCT_FOR_ID(bound)
|
||||
STRUCT_FOR_ID(buffer)
|
||||
STRUCT_FOR_ID(buffer_callback)
|
||||
STRUCT_FOR_ID(buffer_size)
|
||||
STRUCT_FOR_ID(buffering)
|
||||
STRUCT_FOR_ID(buffers)
|
||||
STRUCT_FOR_ID(bufsize)
|
||||
STRUCT_FOR_ID(builtins)
|
||||
STRUCT_FOR_ID(byte_offset)
|
||||
STRUCT_FOR_ID(byte_size)
|
||||
STRUCT_FOR_ID(byteorder)
|
||||
STRUCT_FOR_ID(bytes)
|
||||
STRUCT_FOR_ID(bytes_per_sep)
|
||||
STRUCT_FOR_ID(c_call)
|
||||
STRUCT_FOR_ID(c_exception)
|
||||
STRUCT_FOR_ID(c_parameter_type)
|
||||
STRUCT_FOR_ID(c_return)
|
||||
STRUCT_FOR_ID(cached_datetime_module)
|
||||
STRUCT_FOR_ID(cached_statements)
|
||||
STRUCT_FOR_ID(cadata)
|
||||
STRUCT_FOR_ID(cafile)
|
||||
STRUCT_FOR_ID(call)
|
||||
STRUCT_FOR_ID(call_exception_handler)
|
||||
STRUCT_FOR_ID(call_soon)
|
||||
STRUCT_FOR_ID(callback)
|
||||
STRUCT_FOR_ID(cancel)
|
||||
STRUCT_FOR_ID(capath)
|
||||
STRUCT_FOR_ID(category)
|
||||
STRUCT_FOR_ID(cb_type)
|
||||
STRUCT_FOR_ID(certfile)
|
||||
STRUCT_FOR_ID(check_same_thread)
|
||||
STRUCT_FOR_ID(clear)
|
||||
STRUCT_FOR_ID(close)
|
||||
STRUCT_FOR_ID(closed)
|
||||
STRUCT_FOR_ID(closefd)
|
||||
STRUCT_FOR_ID(closure)
|
||||
STRUCT_FOR_ID(co_argcount)
|
||||
STRUCT_FOR_ID(co_cellvars)
|
||||
STRUCT_FOR_ID(co_code)
|
||||
STRUCT_FOR_ID(co_consts)
|
||||
STRUCT_FOR_ID(co_exceptiontable)
|
||||
STRUCT_FOR_ID(co_filename)
|
||||
STRUCT_FOR_ID(co_firstlineno)
|
||||
STRUCT_FOR_ID(co_flags)
|
||||
STRUCT_FOR_ID(co_freevars)
|
||||
STRUCT_FOR_ID(co_kwonlyargcount)
|
||||
STRUCT_FOR_ID(co_linetable)
|
||||
STRUCT_FOR_ID(co_name)
|
||||
STRUCT_FOR_ID(co_names)
|
||||
STRUCT_FOR_ID(co_nlocals)
|
||||
STRUCT_FOR_ID(co_posonlyargcount)
|
||||
STRUCT_FOR_ID(co_qualname)
|
||||
STRUCT_FOR_ID(co_stacksize)
|
||||
STRUCT_FOR_ID(co_varnames)
|
||||
STRUCT_FOR_ID(code)
|
||||
STRUCT_FOR_ID(col_offset)
|
||||
STRUCT_FOR_ID(command)
|
||||
STRUCT_FOR_ID(comment_factory)
|
||||
STRUCT_FOR_ID(compile_mode)
|
||||
STRUCT_FOR_ID(consts)
|
||||
STRUCT_FOR_ID(context)
|
||||
STRUCT_FOR_ID(contravariant)
|
||||
STRUCT_FOR_ID(conversion)
|
||||
STRUCT_FOR_ID(cookie)
|
||||
STRUCT_FOR_ID(copy)
|
||||
STRUCT_FOR_ID(copyreg)
|
||||
STRUCT_FOR_ID(coro)
|
||||
STRUCT_FOR_ID(count)
|
||||
STRUCT_FOR_ID(covariant)
|
||||
STRUCT_FOR_ID(cwd)
|
||||
STRUCT_FOR_ID(d_parameter_type)
|
||||
STRUCT_FOR_ID(data)
|
||||
STRUCT_FOR_ID(database)
|
||||
STRUCT_FOR_ID(day)
|
||||
STRUCT_FOR_ID(debug)
|
||||
STRUCT_FOR_ID(decode)
|
||||
STRUCT_FOR_ID(decoder)
|
||||
STRUCT_FOR_ID(default)
|
||||
STRUCT_FOR_ID(defaultaction)
|
||||
STRUCT_FOR_ID(delete)
|
||||
STRUCT_FOR_ID(depth)
|
||||
STRUCT_FOR_ID(desired_access)
|
||||
STRUCT_FOR_ID(detect_types)
|
||||
STRUCT_FOR_ID(deterministic)
|
||||
STRUCT_FOR_ID(device)
|
||||
STRUCT_FOR_ID(dict)
|
||||
STRUCT_FOR_ID(dictcomp)
|
||||
STRUCT_FOR_ID(difference_update)
|
||||
STRUCT_FOR_ID(digest)
|
||||
STRUCT_FOR_ID(digest_size)
|
||||
STRUCT_FOR_ID(digestmod)
|
||||
STRUCT_FOR_ID(dir_fd)
|
||||
STRUCT_FOR_ID(discard)
|
||||
STRUCT_FOR_ID(dispatch_table)
|
||||
STRUCT_FOR_ID(displayhook)
|
||||
STRUCT_FOR_ID(dklen)
|
||||
STRUCT_FOR_ID(doc)
|
||||
STRUCT_FOR_ID(done)
|
||||
STRUCT_FOR_ID(dont_inherit)
|
||||
STRUCT_FOR_ID(dst)
|
||||
STRUCT_FOR_ID(dst_dir_fd)
|
||||
STRUCT_FOR_ID(eager_start)
|
||||
STRUCT_FOR_ID(effective_ids)
|
||||
STRUCT_FOR_ID(element_factory)
|
||||
STRUCT_FOR_ID(encode)
|
||||
STRUCT_FOR_ID(encoding)
|
||||
STRUCT_FOR_ID(end)
|
||||
STRUCT_FOR_ID(end_col_offset)
|
||||
STRUCT_FOR_ID(end_lineno)
|
||||
STRUCT_FOR_ID(end_offset)
|
||||
STRUCT_FOR_ID(endpos)
|
||||
STRUCT_FOR_ID(entrypoint)
|
||||
STRUCT_FOR_ID(env)
|
||||
STRUCT_FOR_ID(errors)
|
||||
STRUCT_FOR_ID(event)
|
||||
STRUCT_FOR_ID(eventmask)
|
||||
STRUCT_FOR_ID(exc_type)
|
||||
STRUCT_FOR_ID(exc_value)
|
||||
STRUCT_FOR_ID(excepthook)
|
||||
STRUCT_FOR_ID(exception)
|
||||
STRUCT_FOR_ID(existing_file_name)
|
||||
STRUCT_FOR_ID(exp)
|
||||
STRUCT_FOR_ID(expression)
|
||||
STRUCT_FOR_ID(extend)
|
||||
STRUCT_FOR_ID(extra_tokens)
|
||||
STRUCT_FOR_ID(facility)
|
||||
STRUCT_FOR_ID(factory)
|
||||
STRUCT_FOR_ID(false)
|
||||
STRUCT_FOR_ID(family)
|
||||
STRUCT_FOR_ID(fanout)
|
||||
STRUCT_FOR_ID(fd)
|
||||
STRUCT_FOR_ID(fd2)
|
||||
STRUCT_FOR_ID(fdel)
|
||||
STRUCT_FOR_ID(fget)
|
||||
STRUCT_FOR_ID(fields)
|
||||
STRUCT_FOR_ID(file)
|
||||
STRUCT_FOR_ID(file_actions)
|
||||
STRUCT_FOR_ID(filename)
|
||||
STRUCT_FOR_ID(fileno)
|
||||
STRUCT_FOR_ID(filepath)
|
||||
STRUCT_FOR_ID(fillvalue)
|
||||
STRUCT_FOR_ID(filter)
|
||||
STRUCT_FOR_ID(filters)
|
||||
STRUCT_FOR_ID(final)
|
||||
STRUCT_FOR_ID(find_class)
|
||||
STRUCT_FOR_ID(fix_imports)
|
||||
STRUCT_FOR_ID(flags)
|
||||
STRUCT_FOR_ID(flush)
|
||||
STRUCT_FOR_ID(fold)
|
||||
STRUCT_FOR_ID(follow_symlinks)
|
||||
STRUCT_FOR_ID(format)
|
||||
STRUCT_FOR_ID(format_spec)
|
||||
STRUCT_FOR_ID(frame_buffer)
|
||||
STRUCT_FOR_ID(from_param)
|
||||
STRUCT_FOR_ID(fromlist)
|
||||
STRUCT_FOR_ID(fromtimestamp)
|
||||
STRUCT_FOR_ID(fromutc)
|
||||
STRUCT_FOR_ID(fset)
|
||||
STRUCT_FOR_ID(func)
|
||||
STRUCT_FOR_ID(future)
|
||||
STRUCT_FOR_ID(generation)
|
||||
STRUCT_FOR_ID(genexpr)
|
||||
STRUCT_FOR_ID(get)
|
||||
STRUCT_FOR_ID(get_debug)
|
||||
STRUCT_FOR_ID(get_event_loop)
|
||||
STRUCT_FOR_ID(get_loop)
|
||||
STRUCT_FOR_ID(get_source)
|
||||
STRUCT_FOR_ID(getattr)
|
||||
STRUCT_FOR_ID(getstate)
|
||||
STRUCT_FOR_ID(gid)
|
||||
STRUCT_FOR_ID(globals)
|
||||
STRUCT_FOR_ID(groupindex)
|
||||
STRUCT_FOR_ID(groups)
|
||||
STRUCT_FOR_ID(handle)
|
||||
STRUCT_FOR_ID(handle_seq)
|
||||
STRUCT_FOR_ID(has_location)
|
||||
STRUCT_FOR_ID(hash_name)
|
||||
STRUCT_FOR_ID(header)
|
||||
STRUCT_FOR_ID(headers)
|
||||
STRUCT_FOR_ID(hi)
|
||||
STRUCT_FOR_ID(hook)
|
||||
STRUCT_FOR_ID(hour)
|
||||
STRUCT_FOR_ID(id)
|
||||
STRUCT_FOR_ID(ident)
|
||||
STRUCT_FOR_ID(identity_hint)
|
||||
STRUCT_FOR_ID(ignore)
|
||||
STRUCT_FOR_ID(imag)
|
||||
STRUCT_FOR_ID(importlib)
|
||||
STRUCT_FOR_ID(in_fd)
|
||||
STRUCT_FOR_ID(incoming)
|
||||
STRUCT_FOR_ID(index)
|
||||
STRUCT_FOR_ID(indexgroup)
|
||||
STRUCT_FOR_ID(inf)
|
||||
STRUCT_FOR_ID(infer_variance)
|
||||
STRUCT_FOR_ID(inherit_handle)
|
||||
STRUCT_FOR_ID(inheritable)
|
||||
STRUCT_FOR_ID(initial)
|
||||
STRUCT_FOR_ID(initial_bytes)
|
||||
STRUCT_FOR_ID(initial_owner)
|
||||
STRUCT_FOR_ID(initial_state)
|
||||
STRUCT_FOR_ID(initial_value)
|
||||
STRUCT_FOR_ID(initval)
|
||||
STRUCT_FOR_ID(inner_size)
|
||||
STRUCT_FOR_ID(input)
|
||||
STRUCT_FOR_ID(insert_comments)
|
||||
STRUCT_FOR_ID(insert_pis)
|
||||
STRUCT_FOR_ID(instructions)
|
||||
STRUCT_FOR_ID(intern)
|
||||
STRUCT_FOR_ID(intersection)
|
||||
STRUCT_FOR_ID(interval)
|
||||
STRUCT_FOR_ID(io)
|
||||
STRUCT_FOR_ID(is_compress)
|
||||
STRUCT_FOR_ID(is_raw)
|
||||
STRUCT_FOR_ID(is_running)
|
||||
STRUCT_FOR_ID(is_struct)
|
||||
STRUCT_FOR_ID(isatty)
|
||||
STRUCT_FOR_ID(isinstance)
|
||||
STRUCT_FOR_ID(isoformat)
|
||||
STRUCT_FOR_ID(isolation_level)
|
||||
STRUCT_FOR_ID(istext)
|
||||
STRUCT_FOR_ID(item)
|
||||
STRUCT_FOR_ID(items)
|
||||
STRUCT_FOR_ID(iter)
|
||||
STRUCT_FOR_ID(iterable)
|
||||
STRUCT_FOR_ID(iterations)
|
||||
STRUCT_FOR_ID(join)
|
||||
STRUCT_FOR_ID(jump)
|
||||
STRUCT_FOR_ID(keepends)
|
||||
STRUCT_FOR_ID(key)
|
||||
STRUCT_FOR_ID(keyfile)
|
||||
STRUCT_FOR_ID(keys)
|
||||
STRUCT_FOR_ID(kind)
|
||||
STRUCT_FOR_ID(kw)
|
||||
STRUCT_FOR_ID(kw1)
|
||||
STRUCT_FOR_ID(kw2)
|
||||
STRUCT_FOR_ID(kwdefaults)
|
||||
STRUCT_FOR_ID(label)
|
||||
STRUCT_FOR_ID(lambda)
|
||||
STRUCT_FOR_ID(last)
|
||||
STRUCT_FOR_ID(last_exc)
|
||||
STRUCT_FOR_ID(last_node)
|
||||
STRUCT_FOR_ID(last_traceback)
|
||||
STRUCT_FOR_ID(last_type)
|
||||
STRUCT_FOR_ID(last_value)
|
||||
STRUCT_FOR_ID(latin1)
|
||||
STRUCT_FOR_ID(leaf_size)
|
||||
STRUCT_FOR_ID(len)
|
||||
STRUCT_FOR_ID(length)
|
||||
STRUCT_FOR_ID(level)
|
||||
STRUCT_FOR_ID(limit)
|
||||
STRUCT_FOR_ID(line)
|
||||
STRUCT_FOR_ID(line_buffering)
|
||||
STRUCT_FOR_ID(lineno)
|
||||
STRUCT_FOR_ID(listcomp)
|
||||
STRUCT_FOR_ID(little)
|
||||
STRUCT_FOR_ID(lo)
|
||||
STRUCT_FOR_ID(locale)
|
||||
STRUCT_FOR_ID(locals)
|
||||
STRUCT_FOR_ID(logoption)
|
||||
STRUCT_FOR_ID(loop)
|
||||
STRUCT_FOR_ID(manual_reset)
|
||||
STRUCT_FOR_ID(mapping)
|
||||
STRUCT_FOR_ID(match)
|
||||
STRUCT_FOR_ID(max_length)
|
||||
STRUCT_FOR_ID(maxdigits)
|
||||
STRUCT_FOR_ID(maxevents)
|
||||
STRUCT_FOR_ID(maxlen)
|
||||
STRUCT_FOR_ID(maxmem)
|
||||
STRUCT_FOR_ID(maxsplit)
|
||||
STRUCT_FOR_ID(maxvalue)
|
||||
STRUCT_FOR_ID(memLevel)
|
||||
STRUCT_FOR_ID(memlimit)
|
||||
STRUCT_FOR_ID(message)
|
||||
STRUCT_FOR_ID(metaclass)
|
||||
STRUCT_FOR_ID(metadata)
|
||||
STRUCT_FOR_ID(method)
|
||||
STRUCT_FOR_ID(microsecond)
|
||||
STRUCT_FOR_ID(milliseconds)
|
||||
STRUCT_FOR_ID(minute)
|
||||
STRUCT_FOR_ID(mod)
|
||||
STRUCT_FOR_ID(mode)
|
||||
STRUCT_FOR_ID(module)
|
||||
STRUCT_FOR_ID(module_globals)
|
||||
STRUCT_FOR_ID(modules)
|
||||
STRUCT_FOR_ID(month)
|
||||
STRUCT_FOR_ID(mro)
|
||||
STRUCT_FOR_ID(msg)
|
||||
STRUCT_FOR_ID(mutex)
|
||||
STRUCT_FOR_ID(mycmp)
|
||||
STRUCT_FOR_ID(n_arg)
|
||||
STRUCT_FOR_ID(n_fields)
|
||||
STRUCT_FOR_ID(n_sequence_fields)
|
||||
STRUCT_FOR_ID(n_unnamed_fields)
|
||||
STRUCT_FOR_ID(name)
|
||||
STRUCT_FOR_ID(name_from)
|
||||
STRUCT_FOR_ID(namespace_separator)
|
||||
STRUCT_FOR_ID(namespaces)
|
||||
STRUCT_FOR_ID(narg)
|
||||
STRUCT_FOR_ID(ndigits)
|
||||
STRUCT_FOR_ID(nested)
|
||||
STRUCT_FOR_ID(new_file_name)
|
||||
STRUCT_FOR_ID(new_limit)
|
||||
STRUCT_FOR_ID(newline)
|
||||
STRUCT_FOR_ID(newlines)
|
||||
STRUCT_FOR_ID(next)
|
||||
STRUCT_FOR_ID(nlocals)
|
||||
STRUCT_FOR_ID(node_depth)
|
||||
STRUCT_FOR_ID(node_offset)
|
||||
STRUCT_FOR_ID(ns)
|
||||
STRUCT_FOR_ID(nstype)
|
||||
STRUCT_FOR_ID(nt)
|
||||
STRUCT_FOR_ID(null)
|
||||
STRUCT_FOR_ID(number)
|
||||
STRUCT_FOR_ID(obj)
|
||||
STRUCT_FOR_ID(object)
|
||||
STRUCT_FOR_ID(offset)
|
||||
STRUCT_FOR_ID(offset_dst)
|
||||
STRUCT_FOR_ID(offset_src)
|
||||
STRUCT_FOR_ID(on_type_read)
|
||||
STRUCT_FOR_ID(onceregistry)
|
||||
STRUCT_FOR_ID(only_active_thread)
|
||||
STRUCT_FOR_ID(only_keys)
|
||||
STRUCT_FOR_ID(oparg)
|
||||
STRUCT_FOR_ID(opcode)
|
||||
STRUCT_FOR_ID(open)
|
||||
STRUCT_FOR_ID(opener)
|
||||
STRUCT_FOR_ID(operation)
|
||||
STRUCT_FOR_ID(optimize)
|
||||
STRUCT_FOR_ID(options)
|
||||
STRUCT_FOR_ID(order)
|
||||
STRUCT_FOR_ID(origin)
|
||||
STRUCT_FOR_ID(out_fd)
|
||||
STRUCT_FOR_ID(outgoing)
|
||||
STRUCT_FOR_ID(outpath)
|
||||
STRUCT_FOR_ID(overlapped)
|
||||
STRUCT_FOR_ID(owner)
|
||||
STRUCT_FOR_ID(pages)
|
||||
STRUCT_FOR_ID(parameter)
|
||||
STRUCT_FOR_ID(parent)
|
||||
STRUCT_FOR_ID(password)
|
||||
STRUCT_FOR_ID(path)
|
||||
STRUCT_FOR_ID(pattern)
|
||||
STRUCT_FOR_ID(peek)
|
||||
STRUCT_FOR_ID(persistent_id)
|
||||
STRUCT_FOR_ID(persistent_load)
|
||||
STRUCT_FOR_ID(person)
|
||||
STRUCT_FOR_ID(pi_factory)
|
||||
STRUCT_FOR_ID(pid)
|
||||
STRUCT_FOR_ID(policy)
|
||||
STRUCT_FOR_ID(pos)
|
||||
STRUCT_FOR_ID(pos1)
|
||||
STRUCT_FOR_ID(pos2)
|
||||
STRUCT_FOR_ID(posix)
|
||||
STRUCT_FOR_ID(print_file_and_line)
|
||||
STRUCT_FOR_ID(priority)
|
||||
STRUCT_FOR_ID(progress)
|
||||
STRUCT_FOR_ID(progress_handler)
|
||||
STRUCT_FOR_ID(progress_routine)
|
||||
STRUCT_FOR_ID(proto)
|
||||
STRUCT_FOR_ID(protocol)
|
||||
STRUCT_FOR_ID(ps1)
|
||||
STRUCT_FOR_ID(ps2)
|
||||
STRUCT_FOR_ID(query)
|
||||
STRUCT_FOR_ID(quotetabs)
|
||||
STRUCT_FOR_ID(raw)
|
||||
STRUCT_FOR_ID(read)
|
||||
STRUCT_FOR_ID(read1)
|
||||
STRUCT_FOR_ID(readable)
|
||||
STRUCT_FOR_ID(readall)
|
||||
STRUCT_FOR_ID(readinto)
|
||||
STRUCT_FOR_ID(readinto1)
|
||||
STRUCT_FOR_ID(readline)
|
||||
STRUCT_FOR_ID(readonly)
|
||||
STRUCT_FOR_ID(real)
|
||||
STRUCT_FOR_ID(reducer_override)
|
||||
STRUCT_FOR_ID(registry)
|
||||
STRUCT_FOR_ID(rel_tol)
|
||||
STRUCT_FOR_ID(release)
|
||||
STRUCT_FOR_ID(reload)
|
||||
STRUCT_FOR_ID(repl)
|
||||
STRUCT_FOR_ID(replace)
|
||||
STRUCT_FOR_ID(reserved)
|
||||
STRUCT_FOR_ID(reset)
|
||||
STRUCT_FOR_ID(resetids)
|
||||
STRUCT_FOR_ID(return)
|
||||
STRUCT_FOR_ID(reverse)
|
||||
STRUCT_FOR_ID(reversed)
|
||||
STRUCT_FOR_ID(salt)
|
||||
STRUCT_FOR_ID(sched_priority)
|
||||
STRUCT_FOR_ID(scheduler)
|
||||
STRUCT_FOR_ID(script)
|
||||
STRUCT_FOR_ID(second)
|
||||
STRUCT_FOR_ID(security_attributes)
|
||||
STRUCT_FOR_ID(seek)
|
||||
STRUCT_FOR_ID(seekable)
|
||||
STRUCT_FOR_ID(selectors)
|
||||
STRUCT_FOR_ID(self)
|
||||
STRUCT_FOR_ID(send)
|
||||
STRUCT_FOR_ID(sep)
|
||||
STRUCT_FOR_ID(sequence)
|
||||
STRUCT_FOR_ID(server_hostname)
|
||||
STRUCT_FOR_ID(server_side)
|
||||
STRUCT_FOR_ID(session)
|
||||
STRUCT_FOR_ID(setcomp)
|
||||
STRUCT_FOR_ID(setpgroup)
|
||||
STRUCT_FOR_ID(setsid)
|
||||
STRUCT_FOR_ID(setsigdef)
|
||||
STRUCT_FOR_ID(setsigmask)
|
||||
STRUCT_FOR_ID(setstate)
|
||||
STRUCT_FOR_ID(shape)
|
||||
STRUCT_FOR_ID(show_cmd)
|
||||
STRUCT_FOR_ID(signed)
|
||||
STRUCT_FOR_ID(size)
|
||||
STRUCT_FOR_ID(sizehint)
|
||||
STRUCT_FOR_ID(skip_file_prefixes)
|
||||
STRUCT_FOR_ID(sleep)
|
||||
STRUCT_FOR_ID(sock)
|
||||
STRUCT_FOR_ID(sort)
|
||||
STRUCT_FOR_ID(source)
|
||||
STRUCT_FOR_ID(source_traceback)
|
||||
STRUCT_FOR_ID(spam)
|
||||
STRUCT_FOR_ID(src)
|
||||
STRUCT_FOR_ID(src_dir_fd)
|
||||
STRUCT_FOR_ID(stacklevel)
|
||||
STRUCT_FOR_ID(start)
|
||||
STRUCT_FOR_ID(statement)
|
||||
STRUCT_FOR_ID(status)
|
||||
STRUCT_FOR_ID(stderr)
|
||||
STRUCT_FOR_ID(stdin)
|
||||
STRUCT_FOR_ID(stdout)
|
||||
STRUCT_FOR_ID(step)
|
||||
STRUCT_FOR_ID(steps)
|
||||
STRUCT_FOR_ID(store_name)
|
||||
STRUCT_FOR_ID(strategy)
|
||||
STRUCT_FOR_ID(strftime)
|
||||
STRUCT_FOR_ID(strict)
|
||||
STRUCT_FOR_ID(strict_mode)
|
||||
STRUCT_FOR_ID(string)
|
||||
STRUCT_FOR_ID(sub_key)
|
||||
STRUCT_FOR_ID(subcalls)
|
||||
STRUCT_FOR_ID(symmetric_difference_update)
|
||||
STRUCT_FOR_ID(tabsize)
|
||||
STRUCT_FOR_ID(tag)
|
||||
STRUCT_FOR_ID(target)
|
||||
STRUCT_FOR_ID(target_is_directory)
|
||||
STRUCT_FOR_ID(task)
|
||||
STRUCT_FOR_ID(tb_frame)
|
||||
STRUCT_FOR_ID(tb_lasti)
|
||||
STRUCT_FOR_ID(tb_lineno)
|
||||
STRUCT_FOR_ID(tb_next)
|
||||
STRUCT_FOR_ID(tell)
|
||||
STRUCT_FOR_ID(template)
|
||||
STRUCT_FOR_ID(term)
|
||||
STRUCT_FOR_ID(text)
|
||||
STRUCT_FOR_ID(threading)
|
||||
STRUCT_FOR_ID(throw)
|
||||
STRUCT_FOR_ID(timeout)
|
||||
STRUCT_FOR_ID(timer)
|
||||
STRUCT_FOR_ID(times)
|
||||
STRUCT_FOR_ID(timetuple)
|
||||
STRUCT_FOR_ID(timeunit)
|
||||
STRUCT_FOR_ID(top)
|
||||
STRUCT_FOR_ID(trace_callback)
|
||||
STRUCT_FOR_ID(traceback)
|
||||
STRUCT_FOR_ID(trailers)
|
||||
STRUCT_FOR_ID(translate)
|
||||
STRUCT_FOR_ID(true)
|
||||
STRUCT_FOR_ID(truncate)
|
||||
STRUCT_FOR_ID(twice)
|
||||
STRUCT_FOR_ID(txt)
|
||||
STRUCT_FOR_ID(type)
|
||||
STRUCT_FOR_ID(type_params)
|
||||
STRUCT_FOR_ID(tz)
|
||||
STRUCT_FOR_ID(tzinfo)
|
||||
STRUCT_FOR_ID(tzname)
|
||||
STRUCT_FOR_ID(uid)
|
||||
STRUCT_FOR_ID(unlink)
|
||||
STRUCT_FOR_ID(unraisablehook)
|
||||
STRUCT_FOR_ID(uri)
|
||||
STRUCT_FOR_ID(usedforsecurity)
|
||||
STRUCT_FOR_ID(value)
|
||||
STRUCT_FOR_ID(values)
|
||||
STRUCT_FOR_ID(version)
|
||||
STRUCT_FOR_ID(volume)
|
||||
STRUCT_FOR_ID(wait_all)
|
||||
STRUCT_FOR_ID(warn_on_full_buffer)
|
||||
STRUCT_FOR_ID(warnings)
|
||||
STRUCT_FOR_ID(warnoptions)
|
||||
STRUCT_FOR_ID(wbits)
|
||||
STRUCT_FOR_ID(week)
|
||||
STRUCT_FOR_ID(weekday)
|
||||
STRUCT_FOR_ID(which)
|
||||
STRUCT_FOR_ID(who)
|
||||
STRUCT_FOR_ID(withdata)
|
||||
STRUCT_FOR_ID(writable)
|
||||
STRUCT_FOR_ID(write)
|
||||
STRUCT_FOR_ID(write_through)
|
||||
STRUCT_FOR_ID(year)
|
||||
STRUCT_FOR_ID(zdict)
|
||||
STRUCT_FOR_ID(zstd_dict)
|
||||
} identifiers;
|
||||
struct {
|
||||
PyASCIIObject _ascii;
|
||||
uint8_t _data[2];
|
||||
} ascii[128];
|
||||
struct {
|
||||
PyCompactUnicodeObject _latin1;
|
||||
uint8_t _data[2];
|
||||
} latin1[128];
|
||||
};
|
||||
/* End auto-generated code */
|
||||
|
||||
#undef ID
|
||||
#undef STR
|
||||
|
||||
|
||||
#define _Py_ID(NAME) \
|
||||
(_Py_SINGLETON(strings.identifiers._py_ ## NAME._ascii.ob_base))
|
||||
#define _Py_STR(NAME) \
|
||||
(_Py_SINGLETON(strings.literals._py_ ## NAME._ascii.ob_base))
|
||||
#define _Py_LATIN1_CHR(CH) \
|
||||
((CH) < 128 \
|
||||
? (PyObject*)&_Py_SINGLETON(strings).ascii[(CH)] \
|
||||
: (PyObject*)&_Py_SINGLETON(strings).latin1[(CH) - 128])
|
||||
|
||||
/* _Py_DECLARE_STR() should precede all uses of _Py_STR() in a function.
|
||||
|
||||
This is true even if the same string has already been declared
|
||||
elsewhere, even in the same file. Mismatched duplicates are detected
|
||||
by Tools/scripts/generate-global-objects.py.
|
||||
|
||||
Pairing _Py_DECLARE_STR() with every use of _Py_STR() makes sure the
|
||||
string keeps working even if the declaration is removed somewhere
|
||||
else. It also makes it clear what the actual string is at every
|
||||
place it is being used. */
|
||||
#define _Py_DECLARE_STR(name, str)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GLOBAL_STRINGS_H */
|
||||
113
extern/include/python/internal/pycore_hamt.h
vendored
Normal file
113
extern/include/python/internal/pycore_hamt.h
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
#ifndef Py_INTERNAL_HAMT_H
|
||||
#define Py_INTERNAL_HAMT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_structs.h" // PyHamtNode
|
||||
|
||||
/*
|
||||
HAMT tree is shaped by hashes of keys. Every group of 5 bits of a hash denotes
|
||||
the exact position of the key in one level of the tree. Since we're using
|
||||
32 bit hashes, we can have at most 7 such levels. Although if there are
|
||||
two distinct keys with equal hashes, they will have to occupy the same
|
||||
cell in the 7th level of the tree -- so we'd put them in a "collision" node.
|
||||
Which brings the total possible tree depth to 8. Read more about the actual
|
||||
layout of the HAMT tree in `hamt.c`.
|
||||
|
||||
This constant is used to define a datastucture for storing iteration state.
|
||||
*/
|
||||
#define _Py_HAMT_MAX_TREE_DEPTH 8
|
||||
|
||||
|
||||
extern PyTypeObject _PyHamt_Type;
|
||||
extern PyTypeObject _PyHamt_ArrayNode_Type;
|
||||
extern PyTypeObject _PyHamt_BitmapNode_Type;
|
||||
extern PyTypeObject _PyHamt_CollisionNode_Type;
|
||||
extern PyTypeObject _PyHamtKeys_Type;
|
||||
extern PyTypeObject _PyHamtValues_Type;
|
||||
extern PyTypeObject _PyHamtItems_Type;
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
#define PyHamt_Check(o) Py_IS_TYPE((o), &_PyHamt_Type)
|
||||
|
||||
|
||||
/* A struct to hold the state of depth-first traverse of the tree.
|
||||
|
||||
HAMT is an immutable collection. Iterators will hold a strong reference
|
||||
to it, and every node in the HAMT has strong references to its children.
|
||||
|
||||
So for iterators, we can implement zero allocations and zero reference
|
||||
inc/dec depth-first iteration.
|
||||
|
||||
- i_nodes: an array of seven pointers to tree nodes
|
||||
- i_level: the current node in i_nodes
|
||||
- i_pos: an array of positions within nodes in i_nodes.
|
||||
*/
|
||||
typedef struct {
|
||||
PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH];
|
||||
Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH];
|
||||
int8_t i_level;
|
||||
} PyHamtIteratorState;
|
||||
|
||||
|
||||
/* Base iterator object.
|
||||
|
||||
Contains the iteration state, a pointer to the HAMT tree,
|
||||
and a pointer to the 'yield function'. The latter is a simple
|
||||
function that returns a key/value tuple for the 'Items' iterator,
|
||||
just a key for the 'Keys' iterator, and a value for the 'Values'
|
||||
iterator.
|
||||
*/
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyHamtObject *hi_obj;
|
||||
PyHamtIteratorState hi_iter;
|
||||
binaryfunc hi_yield;
|
||||
} PyHamtIterator;
|
||||
|
||||
|
||||
/* Create a new HAMT immutable mapping. */
|
||||
PyHamtObject * _PyHamt_New(void);
|
||||
|
||||
/* Return a new collection based on "o", but with an additional
|
||||
key/val pair. */
|
||||
PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val);
|
||||
|
||||
/* Return a new collection based on "o", but without "key". */
|
||||
PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key);
|
||||
|
||||
/* Find "key" in the "o" collection.
|
||||
|
||||
Return:
|
||||
- -1: An error occurred.
|
||||
- 0: "key" wasn't found in "o".
|
||||
- 1: "key" is in "o"; "*val" is set to its value (a borrowed ref).
|
||||
*/
|
||||
int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val);
|
||||
|
||||
/* Check if "v" is equal to "w".
|
||||
|
||||
Return:
|
||||
- 0: v != w
|
||||
- 1: v == w
|
||||
- -1: An error occurred.
|
||||
*/
|
||||
int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w);
|
||||
|
||||
/* Return the size of "o"; equivalent of "len(o)". */
|
||||
Py_ssize_t _PyHamt_Len(PyHamtObject *o);
|
||||
|
||||
/* Return a Keys iterator over "o". */
|
||||
PyObject * _PyHamt_NewIterKeys(PyHamtObject *o);
|
||||
|
||||
/* Return a Values iterator over "o". */
|
||||
PyObject * _PyHamt_NewIterValues(PyHamtObject *o);
|
||||
|
||||
/* Return a Items iterator over "o". */
|
||||
PyObject * _PyHamt_NewIterItems(PyHamtObject *o);
|
||||
|
||||
#endif /* !Py_INTERNAL_HAMT_H */
|
||||
150
extern/include/python/internal/pycore_hashtable.h
vendored
Normal file
150
extern/include/python/internal/pycore_hashtable.h
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
#ifndef Py_INTERNAL_HASHTABLE_H
|
||||
#define Py_INTERNAL_HASHTABLE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Single linked list */
|
||||
|
||||
typedef struct _Py_slist_item_s {
|
||||
struct _Py_slist_item_s *next;
|
||||
} _Py_slist_item_t;
|
||||
|
||||
typedef struct {
|
||||
_Py_slist_item_t *head;
|
||||
} _Py_slist_t;
|
||||
|
||||
#define _Py_SLIST_ITEM_NEXT(ITEM) _Py_RVALUE(((_Py_slist_item_t *)(ITEM))->next)
|
||||
|
||||
#define _Py_SLIST_HEAD(SLIST) _Py_RVALUE(((_Py_slist_t *)(SLIST))->head)
|
||||
|
||||
|
||||
/* _Py_hashtable: table entry */
|
||||
|
||||
typedef struct {
|
||||
/* used by _Py_hashtable_t.buckets to link entries */
|
||||
_Py_slist_item_t _Py_slist_item;
|
||||
|
||||
Py_uhash_t key_hash;
|
||||
void *key;
|
||||
void *value;
|
||||
} _Py_hashtable_entry_t;
|
||||
|
||||
|
||||
/* _Py_hashtable: prototypes */
|
||||
|
||||
/* Forward declaration */
|
||||
struct _Py_hashtable_t;
|
||||
typedef struct _Py_hashtable_t _Py_hashtable_t;
|
||||
|
||||
typedef Py_uhash_t (*_Py_hashtable_hash_func) (const void *key);
|
||||
typedef int (*_Py_hashtable_compare_func) (const void *key1, const void *key2);
|
||||
typedef void (*_Py_hashtable_destroy_func) (void *key);
|
||||
typedef _Py_hashtable_entry_t* (*_Py_hashtable_get_entry_func)(_Py_hashtable_t *ht,
|
||||
const void *key);
|
||||
|
||||
typedef struct {
|
||||
// Allocate a memory block
|
||||
void* (*malloc) (size_t size);
|
||||
|
||||
// Release a memory block
|
||||
void (*free) (void *ptr);
|
||||
} _Py_hashtable_allocator_t;
|
||||
|
||||
|
||||
/* _Py_hashtable: table */
|
||||
struct _Py_hashtable_t {
|
||||
size_t nentries; // Total number of entries in the table
|
||||
size_t nbuckets;
|
||||
_Py_slist_t *buckets;
|
||||
|
||||
_Py_hashtable_get_entry_func get_entry_func;
|
||||
_Py_hashtable_hash_func hash_func;
|
||||
_Py_hashtable_compare_func compare_func;
|
||||
_Py_hashtable_destroy_func key_destroy_func;
|
||||
_Py_hashtable_destroy_func value_destroy_func;
|
||||
_Py_hashtable_allocator_t alloc;
|
||||
};
|
||||
|
||||
// Export _Py_hashtable functions for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new(
|
||||
_Py_hashtable_hash_func hash_func,
|
||||
_Py_hashtable_compare_func compare_func);
|
||||
|
||||
/* Hash a pointer (void*) */
|
||||
PyAPI_FUNC(Py_uhash_t) _Py_hashtable_hash_ptr(const void *key);
|
||||
|
||||
/* Comparison using memcmp() */
|
||||
PyAPI_FUNC(int) _Py_hashtable_compare_direct(
|
||||
const void *key1,
|
||||
const void *key2);
|
||||
|
||||
PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new_full(
|
||||
_Py_hashtable_hash_func hash_func,
|
||||
_Py_hashtable_compare_func compare_func,
|
||||
_Py_hashtable_destroy_func key_destroy_func,
|
||||
_Py_hashtable_destroy_func value_destroy_func,
|
||||
_Py_hashtable_allocator_t *allocator);
|
||||
|
||||
PyAPI_FUNC(void) _Py_hashtable_destroy(_Py_hashtable_t *ht);
|
||||
|
||||
PyAPI_FUNC(void) _Py_hashtable_clear(_Py_hashtable_t *ht);
|
||||
|
||||
typedef int (*_Py_hashtable_foreach_func) (_Py_hashtable_t *ht,
|
||||
const void *key, const void *value,
|
||||
void *user_data);
|
||||
|
||||
/* Call func() on each entry of the hashtable.
|
||||
Iteration stops if func() result is non-zero, in this case it's the result
|
||||
of the call. Otherwise, the function returns 0. */
|
||||
PyAPI_FUNC(int) _Py_hashtable_foreach(
|
||||
_Py_hashtable_t *ht,
|
||||
_Py_hashtable_foreach_func func,
|
||||
void *user_data);
|
||||
|
||||
PyAPI_FUNC(size_t) _Py_hashtable_size(const _Py_hashtable_t *ht);
|
||||
PyAPI_FUNC(size_t) _Py_hashtable_len(const _Py_hashtable_t *ht);
|
||||
|
||||
/* Add a new entry to the hash. The key must not be present in the hash table.
|
||||
Return 0 on success, -1 on memory error. */
|
||||
PyAPI_FUNC(int) _Py_hashtable_set(
|
||||
_Py_hashtable_t *ht,
|
||||
const void *key,
|
||||
void *value);
|
||||
|
||||
|
||||
/* Get an entry.
|
||||
Return NULL if the key does not exist. */
|
||||
static inline _Py_hashtable_entry_t *
|
||||
_Py_hashtable_get_entry(_Py_hashtable_t *ht, const void *key)
|
||||
{
|
||||
return ht->get_entry_func(ht, key);
|
||||
}
|
||||
|
||||
|
||||
/* Get value from an entry.
|
||||
Return NULL if the entry is not found.
|
||||
|
||||
Use _Py_hashtable_get_entry() to distinguish entry value equal to NULL
|
||||
and entry not found. */
|
||||
PyAPI_FUNC(void*) _Py_hashtable_get(_Py_hashtable_t *ht, const void *key);
|
||||
|
||||
|
||||
/* Remove a key and its associated value without calling key and value destroy
|
||||
functions.
|
||||
|
||||
Return the removed value if the key was found.
|
||||
Return NULL if the key was not found. */
|
||||
PyAPI_FUNC(void*) _Py_hashtable_steal(
|
||||
_Py_hashtable_t *ht,
|
||||
const void *key);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_HASHTABLE_H */
|
||||
141
extern/include/python/internal/pycore_import.h
vendored
Normal file
141
extern/include/python/internal/pycore_import.h
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
#ifndef Py_LIMITED_API
|
||||
#ifndef Py_INTERNAL_IMPORT_H
|
||||
#define Py_INTERNAL_IMPORT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_hashtable.h" // _Py_hashtable_t
|
||||
#include "pycore_interp_structs.h" // _import_state
|
||||
|
||||
extern int _PyImport_IsInitialized(PyInterpreterState *);
|
||||
|
||||
// Export for 'pyexpat' shared extension
|
||||
PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module);
|
||||
|
||||
extern int _PyImport_SetModuleString(const char *name, PyObject* module);
|
||||
|
||||
extern void _PyImport_AcquireLock(PyInterpreterState *interp);
|
||||
extern void _PyImport_ReleaseLock(PyInterpreterState *interp);
|
||||
extern void _PyImport_ReInitLock(PyInterpreterState *interp);
|
||||
|
||||
// This is used exclusively for the sys and builtins modules:
|
||||
extern int _PyImport_FixupBuiltin(
|
||||
PyThreadState *tstate,
|
||||
PyObject *mod,
|
||||
const char *name, /* UTF-8 encoded string */
|
||||
PyObject *modules
|
||||
);
|
||||
|
||||
#ifdef HAVE_DLOPEN
|
||||
# include <dlfcn.h> // RTLD_NOW, RTLD_LAZY
|
||||
# if HAVE_DECL_RTLD_NOW
|
||||
# define _Py_DLOPEN_FLAGS RTLD_NOW
|
||||
# else
|
||||
# define _Py_DLOPEN_FLAGS RTLD_LAZY
|
||||
# endif
|
||||
# define DLOPENFLAGS_INIT .dlopenflags = _Py_DLOPEN_FLAGS,
|
||||
#else
|
||||
# define _Py_DLOPEN_FLAGS 0
|
||||
# define DLOPENFLAGS_INIT
|
||||
#endif
|
||||
|
||||
#define IMPORTS_INIT \
|
||||
{ \
|
||||
DLOPENFLAGS_INIT \
|
||||
.find_and_load = { \
|
||||
.header = 1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
extern void _PyImport_ClearCore(PyInterpreterState *interp);
|
||||
|
||||
extern Py_ssize_t _PyImport_GetNextModuleIndex(void);
|
||||
extern const char * _PyImport_ResolveNameWithPackageContext(const char *name);
|
||||
extern const char * _PyImport_SwapPackageContext(const char *newcontext);
|
||||
|
||||
extern int _PyImport_GetDLOpenFlags(PyInterpreterState *interp);
|
||||
extern void _PyImport_SetDLOpenFlags(PyInterpreterState *interp, int new_val);
|
||||
|
||||
extern PyObject * _PyImport_InitModules(PyInterpreterState *interp);
|
||||
extern PyObject * _PyImport_GetModules(PyInterpreterState *interp);
|
||||
extern PyObject * _PyImport_GetModulesRef(PyInterpreterState *interp);
|
||||
extern void _PyImport_ClearModules(PyInterpreterState *interp);
|
||||
|
||||
extern void _PyImport_ClearModulesByIndex(PyInterpreterState *interp);
|
||||
|
||||
extern int _PyImport_InitDefaultImportFunc(PyInterpreterState *interp);
|
||||
extern int _PyImport_IsDefaultImportFunc(
|
||||
PyInterpreterState *interp,
|
||||
PyObject *func);
|
||||
|
||||
extern PyObject * _PyImport_GetImportlibLoader(
|
||||
PyInterpreterState *interp,
|
||||
const char *loader_name);
|
||||
extern PyObject * _PyImport_GetImportlibExternalLoader(
|
||||
PyInterpreterState *interp,
|
||||
const char *loader_name);
|
||||
extern PyObject * _PyImport_BlessMyLoader(
|
||||
PyInterpreterState *interp,
|
||||
PyObject *module_globals);
|
||||
extern PyObject * _PyImport_ImportlibModuleRepr(
|
||||
PyInterpreterState *interp,
|
||||
PyObject *module);
|
||||
|
||||
|
||||
extern PyStatus _PyImport_Init(void);
|
||||
extern void _PyImport_Fini(void);
|
||||
extern void _PyImport_Fini2(void);
|
||||
|
||||
extern PyStatus _PyImport_InitCore(
|
||||
PyThreadState *tstate,
|
||||
PyObject *sysmod,
|
||||
int importlib);
|
||||
extern PyStatus _PyImport_InitExternal(PyThreadState *tstate);
|
||||
extern void _PyImport_FiniCore(PyInterpreterState *interp);
|
||||
extern void _PyImport_FiniExternal(PyInterpreterState *interp);
|
||||
|
||||
|
||||
extern PyObject* _PyImport_GetBuiltinModuleNames(void);
|
||||
|
||||
struct _module_alias {
|
||||
const char *name; /* ASCII encoded string */
|
||||
const char *orig; /* ASCII encoded string */
|
||||
};
|
||||
|
||||
// Export these 3 symbols for test_ctypes
|
||||
PyAPI_DATA(const struct _frozen*) _PyImport_FrozenBootstrap;
|
||||
PyAPI_DATA(const struct _frozen*) _PyImport_FrozenStdlib;
|
||||
PyAPI_DATA(const struct _frozen*) _PyImport_FrozenTest;
|
||||
|
||||
extern const struct _module_alias * _PyImport_FrozenAliases;
|
||||
|
||||
extern int _PyImport_CheckSubinterpIncompatibleExtensionAllowed(
|
||||
const char *name);
|
||||
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(int) _PyImport_ClearExtension(PyObject *name, PyObject *filename);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Assuming that the GIL is enabled from a call to
|
||||
// _PyEval_EnableGILTransient(), resolve the transient request depending on the
|
||||
// state of the module argument:
|
||||
// - If module is NULL or a PyModuleObject with md_gil == Py_MOD_GIL_NOT_USED,
|
||||
// call _PyEval_DisableGIL().
|
||||
// - Otherwise, call _PyEval_EnableGILPermanent(). If the GIL was not already
|
||||
// enabled permanently, issue a warning referencing the module's name.
|
||||
//
|
||||
// This function may raise an exception.
|
||||
extern int _PyImport_CheckGILForModule(PyObject *module, PyObject *module_name);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_IMPORT_H */
|
||||
#endif /* !Py_LIMITED_API */
|
||||
139
extern/include/python/internal/pycore_importdl.h
vendored
Normal file
139
extern/include/python/internal/pycore_importdl.h
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
#ifndef Py_INTERNAL_IMPORTDL_H
|
||||
#define Py_INTERNAL_IMPORTDL_H
|
||||
|
||||
#include "patchlevel.h" // PY_MAJOR_VERSION
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
extern const char *_PyImport_DynLoadFiletab[];
|
||||
|
||||
|
||||
typedef enum ext_module_kind {
|
||||
_Py_ext_module_kind_UNKNOWN = 0,
|
||||
_Py_ext_module_kind_SINGLEPHASE = 1,
|
||||
_Py_ext_module_kind_MULTIPHASE = 2,
|
||||
_Py_ext_module_kind_INVALID = 3,
|
||||
} _Py_ext_module_kind;
|
||||
|
||||
typedef enum ext_module_origin {
|
||||
_Py_ext_module_origin_CORE = 1,
|
||||
_Py_ext_module_origin_BUILTIN = 2,
|
||||
_Py_ext_module_origin_DYNAMIC = 3,
|
||||
} _Py_ext_module_origin;
|
||||
|
||||
/* Input for loading an extension module. */
|
||||
struct _Py_ext_module_loader_info {
|
||||
PyObject *filename;
|
||||
#ifndef MS_WINDOWS
|
||||
PyObject *filename_encoded;
|
||||
#endif
|
||||
PyObject *name;
|
||||
PyObject *name_encoded;
|
||||
/* path is always a borrowed ref of name or filename,
|
||||
* depending on if it's builtin or not. */
|
||||
PyObject *path;
|
||||
_Py_ext_module_origin origin;
|
||||
const char *hook_prefix;
|
||||
const char *newcontext;
|
||||
};
|
||||
extern void _Py_ext_module_loader_info_clear(
|
||||
struct _Py_ext_module_loader_info *info);
|
||||
extern int _Py_ext_module_loader_info_init(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
PyObject *name,
|
||||
PyObject *filename,
|
||||
_Py_ext_module_origin origin);
|
||||
extern int _Py_ext_module_loader_info_init_for_core(
|
||||
struct _Py_ext_module_loader_info *p_info,
|
||||
PyObject *name);
|
||||
extern int _Py_ext_module_loader_info_init_for_builtin(
|
||||
struct _Py_ext_module_loader_info *p_info,
|
||||
PyObject *name);
|
||||
#ifdef HAVE_DYNAMIC_LOADING
|
||||
extern int _Py_ext_module_loader_info_init_from_spec(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
PyObject *spec);
|
||||
#endif
|
||||
|
||||
/* The result from running an extension module's init function. */
|
||||
struct _Py_ext_module_loader_result {
|
||||
PyModuleDef *def;
|
||||
PyObject *module;
|
||||
_Py_ext_module_kind kind;
|
||||
struct _Py_ext_module_loader_result_error *err;
|
||||
struct _Py_ext_module_loader_result_error {
|
||||
enum _Py_ext_module_loader_result_error_kind {
|
||||
_Py_ext_module_loader_result_EXCEPTION = 0,
|
||||
_Py_ext_module_loader_result_ERR_MISSING = 1,
|
||||
_Py_ext_module_loader_result_ERR_UNREPORTED_EXC = 2,
|
||||
_Py_ext_module_loader_result_ERR_UNINITIALIZED = 3,
|
||||
_Py_ext_module_loader_result_ERR_NONASCII_NOT_MULTIPHASE = 4,
|
||||
_Py_ext_module_loader_result_ERR_NOT_MODULE = 5,
|
||||
_Py_ext_module_loader_result_ERR_MISSING_DEF = 6,
|
||||
} kind;
|
||||
PyObject *exc;
|
||||
} _err;
|
||||
};
|
||||
extern void _Py_ext_module_loader_result_clear(
|
||||
struct _Py_ext_module_loader_result *res);
|
||||
extern void _Py_ext_module_loader_result_apply_error(
|
||||
struct _Py_ext_module_loader_result *res,
|
||||
const char *name);
|
||||
|
||||
/* The module init function. */
|
||||
typedef PyObject *(*PyModInitFunction)(void);
|
||||
#ifdef HAVE_DYNAMIC_LOADING
|
||||
extern PyModInitFunction _PyImport_GetModInitFunc(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
FILE *fp);
|
||||
#endif
|
||||
extern int _PyImport_RunModInitFunc(
|
||||
PyModInitFunction p0,
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
struct _Py_ext_module_loader_result *p_res);
|
||||
|
||||
|
||||
/* Max length of module suffix searched for -- accommodates "module.slb" */
|
||||
#define MAXSUFFIXSIZE 12
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
#include <windows.h>
|
||||
typedef FARPROC dl_funcptr;
|
||||
|
||||
#ifdef _DEBUG
|
||||
# define PYD_DEBUG_SUFFIX "_d"
|
||||
#else
|
||||
# define PYD_DEBUG_SUFFIX ""
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define PYD_THREADING_TAG "t"
|
||||
#else
|
||||
# define PYD_THREADING_TAG ""
|
||||
#endif
|
||||
|
||||
#ifdef PYD_PLATFORM_TAG
|
||||
# define PYD_SOABI "cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) PYD_THREADING_TAG "-" PYD_PLATFORM_TAG
|
||||
#else
|
||||
# define PYD_SOABI "cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) PYD_THREADING_TAG
|
||||
#endif
|
||||
|
||||
#define PYD_TAGGED_SUFFIX PYD_DEBUG_SUFFIX "." PYD_SOABI ".pyd"
|
||||
#define PYD_UNTAGGED_SUFFIX PYD_DEBUG_SUFFIX ".pyd"
|
||||
|
||||
#else
|
||||
typedef void (*dl_funcptr)(void);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_IMPORTDL_H */
|
||||
36
extern/include/python/internal/pycore_index_pool.h
vendored
Normal file
36
extern/include/python/internal/pycore_index_pool.h
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
#ifndef Py_INTERNAL_INDEX_POOL_H
|
||||
#define Py_INTERNAL_INDEX_POOL_H
|
||||
|
||||
#include "Python.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
#include "pycore_interp_structs.h"
|
||||
|
||||
// This contains code for allocating unique indices in an array. It is used by
|
||||
// the free-threaded build to assign each thread a globally unique index into
|
||||
// each code object's thread-local bytecode array.
|
||||
|
||||
|
||||
// Allocate the smallest available index. Returns -1 on error.
|
||||
extern int32_t _PyIndexPool_AllocIndex(_PyIndexPool *indices);
|
||||
|
||||
// Release `index` back to the pool
|
||||
extern void _PyIndexPool_FreeIndex(_PyIndexPool *indices, int32_t index);
|
||||
|
||||
extern void _PyIndexPool_Fini(_PyIndexPool *indices);
|
||||
|
||||
#endif // Py_GIL_DISABLED
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_INDEX_POOL_H
|
||||
197
extern/include/python/internal/pycore_initconfig.h
vendored
Normal file
197
extern/include/python/internal/pycore_initconfig.h
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
#ifndef Py_INTERNAL_CORECONFIG_H
|
||||
#define Py_INTERNAL_CORECONFIG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_typedefs.h" // _PyRuntimeState
|
||||
|
||||
/* --- PyStatus ----------------------------------------------- */
|
||||
|
||||
/* Almost all errors causing Python initialization to fail */
|
||||
#ifdef _MSC_VER
|
||||
/* Visual Studio 2015 doesn't implement C99 __func__ in C */
|
||||
# define _PyStatus_GET_FUNC() __FUNCTION__
|
||||
#else
|
||||
# define _PyStatus_GET_FUNC() __func__
|
||||
#endif
|
||||
|
||||
#define _PyStatus_OK() \
|
||||
(PyStatus){._type = _PyStatus_TYPE_OK}
|
||||
/* other fields are set to 0 */
|
||||
#define _PyStatus_ERR(ERR_MSG) \
|
||||
(PyStatus){ \
|
||||
._type = _PyStatus_TYPE_ERROR, \
|
||||
.func = _PyStatus_GET_FUNC(), \
|
||||
.err_msg = (ERR_MSG)}
|
||||
/* other fields are set to 0 */
|
||||
#define _PyStatus_NO_MEMORY_ERRMSG "memory allocation failed"
|
||||
#define _PyStatus_NO_MEMORY() _PyStatus_ERR(_PyStatus_NO_MEMORY_ERRMSG)
|
||||
#define _PyStatus_EXIT(EXITCODE) \
|
||||
(PyStatus){ \
|
||||
._type = _PyStatus_TYPE_EXIT, \
|
||||
.exitcode = (EXITCODE)}
|
||||
#define _PyStatus_IS_ERROR(err) \
|
||||
((err)._type == _PyStatus_TYPE_ERROR)
|
||||
#define _PyStatus_IS_EXIT(err) \
|
||||
((err)._type == _PyStatus_TYPE_EXIT)
|
||||
#define _PyStatus_EXCEPTION(err) \
|
||||
((err)._type != _PyStatus_TYPE_OK)
|
||||
#define _PyStatus_UPDATE_FUNC(err) \
|
||||
do { (err).func = _PyStatus_GET_FUNC(); } while (0)
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyErr_SetFromPyStatus(PyStatus status);
|
||||
|
||||
|
||||
/* --- PyWideStringList ------------------------------------------------ */
|
||||
|
||||
#define _PyWideStringList_INIT (PyWideStringList){.length = 0, .items = NULL}
|
||||
|
||||
#ifndef NDEBUG
|
||||
extern int _PyWideStringList_CheckConsistency(const PyWideStringList *list);
|
||||
#endif
|
||||
extern void _PyWideStringList_Clear(PyWideStringList *list);
|
||||
extern int _PyWideStringList_Copy(PyWideStringList *list,
|
||||
const PyWideStringList *list2);
|
||||
extern PyStatus _PyWideStringList_Extend(PyWideStringList *list,
|
||||
const PyWideStringList *list2);
|
||||
extern PyObject* _PyWideStringList_AsList(const PyWideStringList *list);
|
||||
|
||||
|
||||
/* --- _PyArgv ---------------------------------------------------- */
|
||||
|
||||
typedef struct _PyArgv {
|
||||
Py_ssize_t argc;
|
||||
int use_bytes_argv;
|
||||
char * const *bytes_argv;
|
||||
wchar_t * const *wchar_argv;
|
||||
} _PyArgv;
|
||||
|
||||
extern PyStatus _PyArgv_AsWstrList(const _PyArgv *args,
|
||||
PyWideStringList *list);
|
||||
|
||||
|
||||
/* --- Helper functions ------------------------------------------- */
|
||||
|
||||
extern int _Py_str_to_int(
|
||||
const char *str,
|
||||
int *result);
|
||||
extern const wchar_t* _Py_get_xoption(
|
||||
const PyWideStringList *xoptions,
|
||||
const wchar_t *name);
|
||||
extern const char* _Py_GetEnv(
|
||||
int use_environment,
|
||||
const char *name);
|
||||
extern void _Py_get_env_flag(
|
||||
int use_environment,
|
||||
int *flag,
|
||||
const char *name);
|
||||
|
||||
/* Py_GetArgcArgv() helper */
|
||||
extern void _Py_ClearArgcArgv(void);
|
||||
|
||||
|
||||
/* --- _PyPreCmdline ------------------------------------------------- */
|
||||
|
||||
typedef struct {
|
||||
PyWideStringList argv;
|
||||
PyWideStringList xoptions; /* "-X value" option */
|
||||
int isolated; /* -I option */
|
||||
int use_environment; /* -E option */
|
||||
int dev_mode; /* -X dev and PYTHONDEVMODE */
|
||||
int warn_default_encoding; /* -X warn_default_encoding and PYTHONWARNDEFAULTENCODING */
|
||||
} _PyPreCmdline;
|
||||
|
||||
#define _PyPreCmdline_INIT \
|
||||
(_PyPreCmdline){ \
|
||||
.use_environment = -1, \
|
||||
.isolated = -1, \
|
||||
.dev_mode = -1}
|
||||
/* Note: _PyPreCmdline_INIT sets other fields to 0/NULL */
|
||||
|
||||
extern void _PyPreCmdline_Clear(_PyPreCmdline *cmdline);
|
||||
extern PyStatus _PyPreCmdline_SetArgv(_PyPreCmdline *cmdline,
|
||||
const _PyArgv *args);
|
||||
extern PyStatus _PyPreCmdline_SetConfig(
|
||||
const _PyPreCmdline *cmdline,
|
||||
PyConfig *config);
|
||||
extern PyStatus _PyPreCmdline_Read(_PyPreCmdline *cmdline,
|
||||
const PyPreConfig *preconfig);
|
||||
|
||||
|
||||
/* --- PyPreConfig ----------------------------------------------- */
|
||||
|
||||
// Export for '_testembed' program
|
||||
PyAPI_FUNC(void) _PyPreConfig_InitCompatConfig(PyPreConfig *preconfig);
|
||||
|
||||
extern void _PyPreConfig_InitFromConfig(
|
||||
PyPreConfig *preconfig,
|
||||
const PyConfig *config);
|
||||
extern PyStatus _PyPreConfig_InitFromPreConfig(
|
||||
PyPreConfig *preconfig,
|
||||
const PyPreConfig *config2);
|
||||
extern PyObject* _PyPreConfig_AsDict(const PyPreConfig *preconfig);
|
||||
extern void _PyPreConfig_GetConfig(PyPreConfig *preconfig,
|
||||
const PyConfig *config);
|
||||
extern PyStatus _PyPreConfig_Read(PyPreConfig *preconfig,
|
||||
const _PyArgv *args);
|
||||
extern PyStatus _PyPreConfig_Write(const PyPreConfig *preconfig);
|
||||
|
||||
|
||||
/* --- PyConfig ---------------------------------------------- */
|
||||
|
||||
typedef enum {
|
||||
/* Py_Initialize() API: backward compatibility with Python 3.6 and 3.7 */
|
||||
_PyConfig_INIT_COMPAT = 1,
|
||||
_PyConfig_INIT_PYTHON = 2,
|
||||
_PyConfig_INIT_ISOLATED = 3
|
||||
} _PyConfigInitEnum;
|
||||
|
||||
typedef enum {
|
||||
/* In free threaded builds, this means that the GIL is disabled at startup,
|
||||
but may be enabled by loading an incompatible extension module. */
|
||||
_PyConfig_GIL_DEFAULT = -1,
|
||||
|
||||
/* The GIL has been forced off or on, and will not be affected by module loading. */
|
||||
_PyConfig_GIL_DISABLE = 0,
|
||||
_PyConfig_GIL_ENABLE = 1,
|
||||
} _PyConfigGILEnum;
|
||||
|
||||
// Export for '_testembed' program
|
||||
PyAPI_FUNC(void) _PyConfig_InitCompatConfig(PyConfig *config);
|
||||
|
||||
extern PyStatus _PyConfig_Copy(
|
||||
PyConfig *config,
|
||||
const PyConfig *config2);
|
||||
extern PyStatus _PyConfig_InitPathConfig(
|
||||
PyConfig *config,
|
||||
int compute_path_config);
|
||||
extern PyStatus _PyConfig_InitImportConfig(PyConfig *config);
|
||||
extern PyStatus _PyConfig_Read(PyConfig *config, int compute_path_config);
|
||||
extern PyStatus _PyConfig_Write(const PyConfig *config,
|
||||
_PyRuntimeState *runtime);
|
||||
extern PyStatus _PyConfig_SetPyArgv(
|
||||
PyConfig *config,
|
||||
const _PyArgv *args);
|
||||
extern PyObject* _PyConfig_CreateXOptionsDict(const PyConfig *config);
|
||||
|
||||
extern void _Py_DumpPathConfig(PyThreadState *tstate);
|
||||
|
||||
|
||||
/* --- Function used for testing ---------------------------------- */
|
||||
|
||||
// Export these functions for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyConfig_AsDict(const PyConfig *config);
|
||||
PyAPI_FUNC(int) _PyConfig_FromDict(PyConfig *config, PyObject *dict);
|
||||
PyAPI_FUNC(PyObject*) _Py_Get_Getpath_CodeObject(void);
|
||||
PyAPI_FUNC(PyObject*) _Py_GetConfigsAsDict(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CORECONFIG_H */
|
||||
83
extern/include/python/internal/pycore_instruction_sequence.h
vendored
Normal file
83
extern/include/python/internal/pycore_instruction_sequence.h
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
#ifndef Py_INTERNAL_INSTRUCTION_SEQUENCE_H
|
||||
#define Py_INTERNAL_INSTRUCTION_SEQUENCE_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_symtable.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
int h_label;
|
||||
int h_startdepth;
|
||||
int h_preserve_lasti;
|
||||
} _PyExceptHandlerInfo;
|
||||
|
||||
typedef struct {
|
||||
int i_opcode;
|
||||
int i_oparg;
|
||||
_Py_SourceLocation i_loc;
|
||||
_PyExceptHandlerInfo i_except_handler_info;
|
||||
|
||||
/* Temporary fields, used by the assembler and in instr_sequence_to_cfg */
|
||||
int i_target;
|
||||
int i_offset;
|
||||
} _PyInstruction;
|
||||
|
||||
typedef struct instruction_sequence {
|
||||
PyObject_HEAD
|
||||
_PyInstruction *s_instrs;
|
||||
int s_allocated;
|
||||
int s_used;
|
||||
|
||||
int s_next_free_label; /* next free label id */
|
||||
|
||||
/* Map of a label id to instruction offset (index into s_instrs).
|
||||
* If s_labelmap is NULL, then each label id is the offset itself.
|
||||
*/
|
||||
int *s_labelmap;
|
||||
int s_labelmap_size;
|
||||
|
||||
/* PyList of instruction sequences of nested functions */
|
||||
PyObject *s_nested;
|
||||
|
||||
/* Code for creating annotations, spliced into the main sequence later */
|
||||
struct instruction_sequence *s_annotations_code;
|
||||
} _PyInstructionSequence;
|
||||
|
||||
typedef struct {
|
||||
int id;
|
||||
} _PyJumpTargetLabel;
|
||||
|
||||
#define NO_LABEL ((const _PyJumpTargetLabel){-1})
|
||||
|
||||
#define SAME_JUMP_TARGET_LABEL(L1, L2) ((L1).id == (L2).id)
|
||||
#define IS_JUMP_TARGET_LABEL(L) (!SAME_JUMP_TARGET_LABEL((L), (NO_LABEL)))
|
||||
|
||||
PyAPI_FUNC(PyObject*)_PyInstructionSequence_New(void);
|
||||
|
||||
int _PyInstructionSequence_UseLabel(_PyInstructionSequence *seq, int lbl);
|
||||
int _PyInstructionSequence_Addop(_PyInstructionSequence *seq,
|
||||
int opcode, int oparg,
|
||||
_Py_SourceLocation loc);
|
||||
_PyJumpTargetLabel _PyInstructionSequence_NewLabel(_PyInstructionSequence *seq);
|
||||
int _PyInstructionSequence_ApplyLabelMap(_PyInstructionSequence *seq);
|
||||
int _PyInstructionSequence_InsertInstruction(_PyInstructionSequence *seq, int pos,
|
||||
int opcode, int oparg, _Py_SourceLocation loc);
|
||||
int _PyInstructionSequence_SetAnnotationsCode(_PyInstructionSequence *seq,
|
||||
_PyInstructionSequence *annotations);
|
||||
int _PyInstructionSequence_AddNested(_PyInstructionSequence *seq, _PyInstructionSequence *nested);
|
||||
void PyInstructionSequence_Fini(_PyInstructionSequence *seq);
|
||||
|
||||
extern PyTypeObject _PyInstructionSequence_Type;
|
||||
#define _PyInstructionSequence_Check(v) Py_IS_TYPE((v), &_PyInstructionSequence_Type)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_INSTRUCTION_SEQUENCE_H */
|
||||
127
extern/include/python/internal/pycore_instruments.h
vendored
Normal file
127
extern/include/python/internal/pycore_instruments.h
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
#ifndef Py_INTERNAL_INSTRUMENT_H
|
||||
#define Py_INTERNAL_INSTRUMENT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_structs.h" // _Py_CODEUNIT
|
||||
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef uint32_t _PyMonitoringEventSet;
|
||||
|
||||
/* Tool IDs */
|
||||
|
||||
/* These are defined in PEP 669 for convenience to avoid clashes */
|
||||
#define PY_MONITORING_DEBUGGER_ID 0
|
||||
#define PY_MONITORING_COVERAGE_ID 1
|
||||
#define PY_MONITORING_PROFILER_ID 2
|
||||
#define PY_MONITORING_OPTIMIZER_ID 5
|
||||
|
||||
/* Internal IDs used to support sys.setprofile() and sys.settrace() */
|
||||
#define PY_MONITORING_SYS_PROFILE_ID 6
|
||||
#define PY_MONITORING_SYS_TRACE_ID 7
|
||||
|
||||
|
||||
PyObject *_PyMonitoring_RegisterCallback(int tool_id, int event_id, PyObject *obj);
|
||||
|
||||
int _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events);
|
||||
int _PyMonitoring_SetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEventSet events);
|
||||
int _PyMonitoring_GetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEventSet *events);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_line(PyThreadState *tstate, _PyInterpreterFrame* frame,
|
||||
_Py_CODEUNIT *instr, _Py_CODEUNIT *prev);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_instruction(
|
||||
PyThreadState *tstate, _PyInterpreterFrame* frame, _Py_CODEUNIT *instr);
|
||||
|
||||
_Py_CODEUNIT *
|
||||
_Py_call_instrumentation_jump(
|
||||
_Py_CODEUNIT *instr, PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *src, _Py_CODEUNIT *dest);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_arg(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_2args(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
|
||||
|
||||
extern void
|
||||
_Py_call_instrumentation_exc2(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
|
||||
|
||||
extern int
|
||||
_Py_Instrumentation_GetLine(PyCodeObject *code, int index);
|
||||
|
||||
extern PyObject _PyInstrumentation_MISSING;
|
||||
extern PyObject _PyInstrumentation_DISABLE;
|
||||
|
||||
|
||||
/* Total tool ids available */
|
||||
#define PY_MONITORING_TOOL_IDS 8
|
||||
/* Count of all local monitoring events */
|
||||
#define _PY_MONITORING_LOCAL_EVENTS 11
|
||||
/* Count of all "real" monitoring events (not derived from other events) */
|
||||
#define _PY_MONITORING_UNGROUPED_EVENTS 16
|
||||
/* Count of all monitoring events */
|
||||
#define _PY_MONITORING_EVENTS 19
|
||||
|
||||
/* Tables of which tools are active for each monitored event. */
|
||||
typedef struct _Py_LocalMonitors {
|
||||
uint8_t tools[_PY_MONITORING_LOCAL_EVENTS];
|
||||
} _Py_LocalMonitors;
|
||||
|
||||
typedef struct _Py_GlobalMonitors {
|
||||
uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS];
|
||||
} _Py_GlobalMonitors;
|
||||
|
||||
/* Ancillary data structure used for instrumentation.
|
||||
Line instrumentation creates this with sufficient
|
||||
space for one entry per code unit. The total size
|
||||
of the data will be `bytes_per_entry * Py_SIZE(code)` */
|
||||
typedef struct {
|
||||
uint8_t bytes_per_entry;
|
||||
uint8_t data[1];
|
||||
} _PyCoLineInstrumentationData;
|
||||
|
||||
|
||||
/* Main data structure used for instrumentation.
|
||||
* This is allocated when needed for instrumentation
|
||||
*/
|
||||
typedef struct _PyCoMonitoringData {
|
||||
/* Monitoring specific to this code object */
|
||||
_Py_LocalMonitors local_monitors;
|
||||
/* Monitoring that is active on this code object */
|
||||
_Py_LocalMonitors active_monitors;
|
||||
/* The tools that are to be notified for events for the matching code unit */
|
||||
uint8_t *tools;
|
||||
/* The version of tools when they instrument the code */
|
||||
uintptr_t tool_versions[PY_MONITORING_TOOL_IDS];
|
||||
/* Information to support line events */
|
||||
_PyCoLineInstrumentationData *lines;
|
||||
/* The tools that are to be notified for line events for the matching code unit */
|
||||
uint8_t *line_tools;
|
||||
/* Information to support instruction events */
|
||||
/* The underlying instructions, which can themselves be instrumented */
|
||||
uint8_t *per_instruction_opcodes;
|
||||
/* The tools that are to be notified for instruction events for the matching code unit */
|
||||
uint8_t *per_instruction_tools;
|
||||
} _PyCoMonitoringData;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_INSTRUMENT_H */
|
||||
109
extern/include/python/internal/pycore_interp.h
vendored
Normal file
109
extern/include/python/internal/pycore_interp.h
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
#ifndef Py_INTERNAL_INTERP_H
|
||||
#define Py_INTERNAL_INTERP_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_interp_structs.h" // PyInterpreterState
|
||||
|
||||
|
||||
/* interpreter state */
|
||||
|
||||
#define _PyInterpreterState_WHENCE_NOTSET -1
|
||||
#define _PyInterpreterState_WHENCE_UNKNOWN 0
|
||||
#define _PyInterpreterState_WHENCE_RUNTIME 1
|
||||
#define _PyInterpreterState_WHENCE_LEGACY_CAPI 2
|
||||
#define _PyInterpreterState_WHENCE_CAPI 3
|
||||
#define _PyInterpreterState_WHENCE_XI 4
|
||||
#define _PyInterpreterState_WHENCE_STDLIB 5
|
||||
#define _PyInterpreterState_WHENCE_MAX 5
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
extern void _PyInterpreterState_Clear(PyThreadState *tstate);
|
||||
|
||||
static inline PyThreadState*
|
||||
_PyInterpreterState_GetFinalizing(PyInterpreterState *interp) {
|
||||
return (PyThreadState*)_Py_atomic_load_ptr_relaxed(&interp->_finalizing);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
_PyInterpreterState_GetFinalizingID(PyInterpreterState *interp) {
|
||||
return _Py_atomic_load_ulong_relaxed(&interp->_finalizing_id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyInterpreterState_SetFinalizing(PyInterpreterState *interp, PyThreadState *tstate) {
|
||||
_Py_atomic_store_ptr_relaxed(&interp->_finalizing, tstate);
|
||||
if (tstate == NULL) {
|
||||
_Py_atomic_store_ulong_relaxed(&interp->_finalizing_id, 0);
|
||||
}
|
||||
else {
|
||||
// XXX Re-enable this assert once gh-109860 is fixed.
|
||||
//assert(tstate->thread_id == PyThread_get_thread_ident());
|
||||
_Py_atomic_store_ulong_relaxed(&interp->_finalizing_id,
|
||||
tstate->thread_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Exports for the _testinternalcapi module.
|
||||
PyAPI_FUNC(int64_t) _PyInterpreterState_ObjectToID(PyObject *);
|
||||
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(int64_t);
|
||||
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpIDObject(PyObject *);
|
||||
PyAPI_FUNC(void) _PyInterpreterState_IDIncref(PyInterpreterState *);
|
||||
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *);
|
||||
|
||||
PyAPI_FUNC(int) _PyInterpreterState_IsReady(PyInterpreterState *interp);
|
||||
|
||||
PyAPI_FUNC(long) _PyInterpreterState_GetWhence(PyInterpreterState *interp);
|
||||
extern void _PyInterpreterState_SetWhence(
|
||||
PyInterpreterState *interp,
|
||||
long whence);
|
||||
|
||||
/*
|
||||
Runtime Feature Flags
|
||||
|
||||
Each flag indicate whether or not a specific runtime feature
|
||||
is available in a given context. For example, forking the process
|
||||
might not be allowed in the current interpreter (i.e. os.fork() would fail).
|
||||
*/
|
||||
|
||||
/* Set if the interpreter share obmalloc runtime state
|
||||
with the main interpreter. */
|
||||
#define Py_RTFLAGS_USE_MAIN_OBMALLOC (1UL << 5)
|
||||
|
||||
/* Set if import should check a module for subinterpreter support. */
|
||||
#define Py_RTFLAGS_MULTI_INTERP_EXTENSIONS (1UL << 8)
|
||||
|
||||
/* Set if threads are allowed. */
|
||||
#define Py_RTFLAGS_THREADS (1UL << 10)
|
||||
|
||||
/* Set if daemon threads are allowed. */
|
||||
#define Py_RTFLAGS_DAEMON_THREADS (1UL << 11)
|
||||
|
||||
/* Set if os.fork() is allowed. */
|
||||
#define Py_RTFLAGS_FORK (1UL << 15)
|
||||
|
||||
/* Set if os.exec*() is allowed. */
|
||||
#define Py_RTFLAGS_EXEC (1UL << 16)
|
||||
|
||||
extern int _PyInterpreterState_HasFeature(PyInterpreterState *interp,
|
||||
unsigned long feature);
|
||||
|
||||
PyAPI_FUNC(PyStatus) _PyInterpreterState_New(
|
||||
PyThreadState *tstate,
|
||||
PyInterpreterState **pinterp);
|
||||
|
||||
extern const PyConfig* _PyInterpreterState_GetConfig(
|
||||
PyInterpreterState *interp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_INTERP_H */
|
||||
980
extern/include/python/internal/pycore_interp_structs.h
vendored
Normal file
980
extern/include/python/internal/pycore_interp_structs.h
vendored
Normal file
@@ -0,0 +1,980 @@
|
||||
/* This file contains the struct definitions for interpreter state
|
||||
* and other necessary structs */
|
||||
|
||||
#ifndef Py_INTERNAL_INTERP_STRUCTS_H
|
||||
#define Py_INTERNAL_INTERP_STRUCTS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "pycore_ast_state.h" // struct ast_state
|
||||
#include "pycore_llist.h" // struct llist_node
|
||||
#include "pycore_opcode_utils.h" // NUM_COMMON_CONSTANTS
|
||||
#include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR
|
||||
#include "pycore_structs.h" // PyHamtObject
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
#include "pycore_typedefs.h" // _PyRuntimeState
|
||||
|
||||
|
||||
#define CODE_MAX_WATCHERS 8
|
||||
#define CONTEXT_MAX_WATCHERS 8
|
||||
#define FUNC_MAX_WATCHERS 8
|
||||
#define TYPE_MAX_WATCHERS 8
|
||||
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// This should be prime but otherwise the choice is arbitrary. A larger value
|
||||
// increases concurrency at the expense of memory.
|
||||
# define NUM_WEAKREF_LIST_LOCKS 127
|
||||
#endif
|
||||
|
||||
typedef int (*_Py_pending_call_func)(void *);
|
||||
|
||||
struct _pending_call {
|
||||
_Py_pending_call_func func;
|
||||
void *arg;
|
||||
int flags;
|
||||
};
|
||||
|
||||
#define PENDINGCALLSARRAYSIZE 300
|
||||
|
||||
struct _pending_calls {
|
||||
PyThreadState *handling_thread;
|
||||
PyMutex mutex;
|
||||
/* Request for running pending calls. */
|
||||
int32_t npending;
|
||||
/* The maximum allowed number of pending calls.
|
||||
If the queue fills up to this point then _PyEval_AddPendingCall()
|
||||
will return _Py_ADD_PENDING_FULL. */
|
||||
int32_t max;
|
||||
/* We don't want a flood of pending calls to interrupt any one thread
|
||||
for too long, so we keep a limit on the number handled per pass.
|
||||
A value of 0 means there is no limit (other than the maximum
|
||||
size of the list of pending calls). */
|
||||
int32_t maxloop;
|
||||
struct _pending_call calls[PENDINGCALLSARRAYSIZE];
|
||||
int first;
|
||||
int next;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
|
||||
PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
|
||||
PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
|
||||
} perf_status_t;
|
||||
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
struct code_arena_st;
|
||||
|
||||
struct trampoline_api_st {
|
||||
void* (*init_state)(void);
|
||||
void (*write_state)(void* state, const void *code_addr,
|
||||
unsigned int code_size, PyCodeObject* code);
|
||||
int (*free_state)(void* state);
|
||||
void *state;
|
||||
Py_ssize_t code_padding;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
struct _ceval_runtime_state {
|
||||
struct {
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
perf_status_t status;
|
||||
int perf_trampoline_type;
|
||||
Py_ssize_t extra_code_index;
|
||||
struct code_arena_st *code_arena;
|
||||
struct trampoline_api_st trampoline_api;
|
||||
FILE *map_file;
|
||||
Py_ssize_t persist_after_fork;
|
||||
_PyFrameEvalFunction prev_eval_frame;
|
||||
Py_ssize_t trampoline_refcount;
|
||||
int code_watcher_id;
|
||||
#else
|
||||
int _not_used;
|
||||
#endif
|
||||
} perf;
|
||||
/* Pending calls to be made only on the main thread. */
|
||||
// The signal machinery falls back on this
|
||||
// so it must be especially stable and efficient.
|
||||
// For example, we use a preallocated array
|
||||
// for the list of pending calls.
|
||||
struct _pending_calls pending_mainthread;
|
||||
PyMutex unused_sys_trace_profile_mutex; // kept for ABI compatibility
|
||||
};
|
||||
|
||||
|
||||
struct _ceval_state {
|
||||
/* This variable holds the global instrumentation version. When a thread is
|
||||
running, this value is overlaid onto PyThreadState.eval_breaker so that
|
||||
changes in the instrumentation version will trigger the eval breaker. */
|
||||
uintptr_t instrumentation_version;
|
||||
int recursion_limit;
|
||||
struct _gil_runtime_state *gil;
|
||||
int own_gil;
|
||||
struct _pending_calls pending;
|
||||
};
|
||||
|
||||
|
||||
//###############
|
||||
// runtime atexit
|
||||
|
||||
typedef void (*atexit_callbackfunc)(void);
|
||||
|
||||
struct _atexit_runtime_state {
|
||||
PyMutex mutex;
|
||||
#define NEXITFUNCS 32
|
||||
atexit_callbackfunc callbacks[NEXITFUNCS];
|
||||
int ncallbacks;
|
||||
};
|
||||
|
||||
|
||||
//###################
|
||||
// interpreter atexit
|
||||
|
||||
typedef void (*atexit_datacallbackfunc)(void *);
|
||||
|
||||
typedef struct atexit_callback {
|
||||
atexit_datacallbackfunc func;
|
||||
void *data;
|
||||
struct atexit_callback *next;
|
||||
} atexit_callback;
|
||||
|
||||
struct atexit_state {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyMutex ll_callbacks_lock;
|
||||
#endif
|
||||
atexit_callback *ll_callbacks;
|
||||
|
||||
// XXX The rest of the state could be moved to the atexit module state
|
||||
// and a low-level callback added for it during module exec.
|
||||
// For the moment we leave it here.
|
||||
|
||||
// List containing tuples with callback information.
|
||||
// e.g. [(func, args, kwargs), ...]
|
||||
PyObject *callbacks;
|
||||
};
|
||||
|
||||
|
||||
/****** Garbage collector **********/
|
||||
|
||||
/* GC information is stored BEFORE the object structure. */
|
||||
typedef struct {
|
||||
// Tagged pointer to next object in the list.
|
||||
// 0 means the object is not tracked
|
||||
uintptr_t _gc_next;
|
||||
|
||||
// Tagged pointer to previous object in the list.
|
||||
// Lowest two bits are used for flags documented later.
|
||||
uintptr_t _gc_prev;
|
||||
} PyGC_Head;
|
||||
|
||||
#define _PyGC_Head_UNUSED PyGC_Head
|
||||
|
||||
struct gc_generation {
|
||||
PyGC_Head head;
|
||||
int threshold; /* collection threshold */
|
||||
int count; /* count of allocations or collections of younger
|
||||
generations */
|
||||
};
|
||||
|
||||
struct gc_collection_stats {
|
||||
/* number of collected objects */
|
||||
Py_ssize_t collected;
|
||||
/* total number of uncollectable objects (put into gc.garbage) */
|
||||
Py_ssize_t uncollectable;
|
||||
};
|
||||
|
||||
/* Running stats per generation */
|
||||
struct gc_generation_stats {
|
||||
/* total number of collections */
|
||||
Py_ssize_t collections;
|
||||
/* total number of collected objects */
|
||||
Py_ssize_t collected;
|
||||
/* total number of uncollectable objects (put into gc.garbage) */
|
||||
Py_ssize_t uncollectable;
|
||||
};
|
||||
|
||||
enum _GCPhase {
|
||||
GC_PHASE_MARK = 0,
|
||||
GC_PHASE_COLLECT = 1
|
||||
};
|
||||
|
||||
/* If we change this, we need to change the default value in the
|
||||
signature of gc.collect. */
|
||||
#define NUM_GENERATIONS 3
|
||||
|
||||
struct _gc_runtime_state {
|
||||
/* List of objects that still need to be cleaned up, singly linked
|
||||
* via their gc headers' gc_prev pointers. */
|
||||
PyObject *trash_delete_later;
|
||||
/* Current call-stack depth of tp_dealloc calls. */
|
||||
int trash_delete_nesting;
|
||||
|
||||
/* Is automatic collection enabled? */
|
||||
int enabled;
|
||||
int debug;
|
||||
/* linked lists of container objects */
|
||||
struct gc_generation young;
|
||||
struct gc_generation old[2];
|
||||
/* a permanent generation which won't be collected */
|
||||
struct gc_generation permanent_generation;
|
||||
struct gc_generation_stats generation_stats[NUM_GENERATIONS];
|
||||
/* true if we are currently running the collector */
|
||||
int collecting;
|
||||
/* list of uncollectable objects */
|
||||
PyObject *garbage;
|
||||
/* a list of callbacks to be invoked when collection is performed */
|
||||
PyObject *callbacks;
|
||||
|
||||
Py_ssize_t heap_size;
|
||||
Py_ssize_t work_to_do;
|
||||
/* Which of the old spaces is the visited space */
|
||||
int visited_space;
|
||||
int phase;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* This is the number of objects that survived the last full
|
||||
collection. It approximates the number of long lived objects
|
||||
tracked by the GC.
|
||||
|
||||
(by "full collection", we mean a collection of the oldest
|
||||
generation). */
|
||||
Py_ssize_t long_lived_total;
|
||||
/* This is the number of objects that survived all "non-full"
|
||||
collections, and are awaiting to undergo a full collection for
|
||||
the first time. */
|
||||
Py_ssize_t long_lived_pending;
|
||||
|
||||
/* True if gc.freeze() has been used. */
|
||||
int freeze_active;
|
||||
|
||||
/* Memory usage of the process (RSS + swap) after last GC. */
|
||||
Py_ssize_t last_mem;
|
||||
|
||||
/* This accumulates the new object count whenever collection is deferred
|
||||
due to the RSS increase condition not being meet. Reset on collection. */
|
||||
Py_ssize_t deferred_count;
|
||||
|
||||
/* Mutex held for gc_should_collect_mem_usage(). */
|
||||
PyMutex mutex;
|
||||
#endif
|
||||
};
|
||||
|
||||
#include "pycore_gil.h" // struct _gil_runtime_state
|
||||
|
||||
/**** Import ********/
|
||||
|
||||
struct _import_runtime_state {
|
||||
/* The builtin modules (defined in config.c). */
|
||||
struct _inittab *inittab;
|
||||
/* The most recent value assigned to a PyModuleDef.m_base.m_index.
|
||||
This is incremented each time PyModuleDef_Init() is called,
|
||||
which is just about every time an extension module is imported.
|
||||
See PyInterpreterState.modules_by_index for more info. */
|
||||
Py_ssize_t last_module_index;
|
||||
struct {
|
||||
/* A lock to guard the cache. */
|
||||
PyMutex mutex;
|
||||
/* The actual cache of (filename, name, PyModuleDef) for modules.
|
||||
Only legacy (single-phase init) extension modules are added
|
||||
and only if they support multiple initialization (m_size >= 0)
|
||||
or are imported in the main interpreter.
|
||||
This is initialized lazily in fix_up_extension() in import.c.
|
||||
Modules are added there and looked up in _imp.find_extension(). */
|
||||
struct _Py_hashtable_t *hashtable;
|
||||
} extensions;
|
||||
/* Package context -- the full module name for package imports */
|
||||
const char * pkgcontext;
|
||||
};
|
||||
|
||||
struct _import_state {
|
||||
/* cached sys.modules dictionary */
|
||||
PyObject *modules;
|
||||
/* This is the list of module objects for all legacy (single-phase init)
|
||||
extension modules ever loaded in this process (i.e. imported
|
||||
in this interpreter or in any other). Py_None stands in for
|
||||
modules that haven't actually been imported in this interpreter.
|
||||
|
||||
A module's index (PyModuleDef.m_base.m_index) is used to look up
|
||||
the corresponding module object for this interpreter, if any.
|
||||
(See PyState_FindModule().) When any extension module
|
||||
is initialized during import, its moduledef gets initialized by
|
||||
PyModuleDef_Init(), and the first time that happens for each
|
||||
PyModuleDef, its index gets set to the current value of
|
||||
a global counter (see _PyRuntimeState.imports.last_module_index).
|
||||
The entry for that index in this interpreter remains unset until
|
||||
the module is actually imported here. (Py_None is used as
|
||||
a placeholder.) Note that multi-phase init modules always get
|
||||
an index for which there will never be a module set.
|
||||
|
||||
This is initialized lazily in PyState_AddModule(), which is also
|
||||
where modules get added. */
|
||||
PyObject *modules_by_index;
|
||||
/* importlib module._bootstrap */
|
||||
PyObject *importlib;
|
||||
/* override for config->use_frozen_modules (for tests)
|
||||
(-1: "off", 1: "on", 0: no override) */
|
||||
int override_frozen_modules;
|
||||
int override_multi_interp_extensions_check;
|
||||
#ifdef HAVE_DLOPEN
|
||||
int dlopenflags;
|
||||
#endif
|
||||
PyObject *import_func;
|
||||
/* The global import lock. */
|
||||
_PyRecursiveMutex lock;
|
||||
/* diagnostic info in PyImport_ImportModuleLevelObject() */
|
||||
struct {
|
||||
int import_level;
|
||||
PyTime_t accumulated;
|
||||
int header;
|
||||
} find_and_load;
|
||||
};
|
||||
|
||||
|
||||
|
||||
/********** Interpreter state **************/
|
||||
|
||||
#include "pycore_object_state.h" // struct _py_object_state
|
||||
#include "pycore_crossinterp.h" // _PyXI_state_t
|
||||
|
||||
|
||||
struct _Py_long_state {
|
||||
int max_str_digits;
|
||||
};
|
||||
|
||||
struct codecs_state {
|
||||
// A list of callable objects used to search for codecs.
|
||||
PyObject *search_path;
|
||||
|
||||
// A dict mapping codec names to codecs returned from a callable in
|
||||
// search_path.
|
||||
PyObject *search_cache;
|
||||
|
||||
// A dict mapping error handling strategies to functions to implement them.
|
||||
PyObject *error_registry;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Used to safely delete a specific item from search_path.
|
||||
PyMutex search_path_mutex;
|
||||
#endif
|
||||
|
||||
// Whether or not the rest of the state is initialized.
|
||||
int initialized;
|
||||
};
|
||||
|
||||
// Support for stop-the-world events. This exists in both the PyRuntime struct
|
||||
// for global pauses and in each PyInterpreterState for per-interpreter pauses.
|
||||
struct _stoptheworld_state {
|
||||
PyMutex mutex; // Serializes stop-the-world attempts.
|
||||
|
||||
// NOTE: The below fields are protected by HEAD_LOCK(runtime), not by the
|
||||
// above mutex.
|
||||
bool requested; // Set when a pause is requested.
|
||||
bool world_stopped; // Set when the world is stopped.
|
||||
bool is_global; // Set when contained in PyRuntime struct.
|
||||
|
||||
PyEvent stop_event; // Set when thread_countdown reaches zero.
|
||||
Py_ssize_t thread_countdown; // Number of threads that must pause.
|
||||
|
||||
PyThreadState *requester; // Thread that requested the pause (may be NULL).
|
||||
};
|
||||
|
||||
/* Tracks some rare events per-interpreter, used by the optimizer to turn on/off
|
||||
specific optimizations. */
|
||||
typedef struct _rare_events {
|
||||
/* Setting an object's class, obj.__class__ = ... */
|
||||
uint8_t set_class;
|
||||
/* Setting the bases of a class, cls.__bases__ = ... */
|
||||
uint8_t set_bases;
|
||||
/* Setting the PEP 523 frame eval function, _PyInterpreterState_SetFrameEvalFunc() */
|
||||
uint8_t set_eval_frame_func;
|
||||
/* Modifying the builtins, __builtins__.__dict__[var] = ... */
|
||||
uint8_t builtin_dict;
|
||||
/* Modifying a function, e.g. func.__defaults__ = ..., etc. */
|
||||
uint8_t func_modification;
|
||||
} _rare_events;
|
||||
|
||||
struct
|
||||
Bigint {
|
||||
struct Bigint *next;
|
||||
int k, maxwds, sign, wds;
|
||||
uint32_t x[1];
|
||||
};
|
||||
|
||||
#if defined(Py_USING_MEMORY_DEBUGGER) || _PY_SHORT_FLOAT_REPR == 0
|
||||
|
||||
struct _dtoa_state {
|
||||
int _not_used;
|
||||
};
|
||||
|
||||
#else // !Py_USING_MEMORY_DEBUGGER && _PY_SHORT_FLOAT_REPR != 0
|
||||
|
||||
/* The size of the Bigint freelist */
|
||||
#define Bigint_Kmax 7
|
||||
|
||||
/* The size of the cached powers of 5 array */
|
||||
#define Bigint_Pow5size 8
|
||||
|
||||
#ifndef PRIVATE_MEM
|
||||
#define PRIVATE_MEM 2304
|
||||
#endif
|
||||
#define Bigint_PREALLOC_SIZE \
|
||||
((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
|
||||
|
||||
struct _dtoa_state {
|
||||
// p5s is an array of powers of 5 of the form:
|
||||
// 5**(2**(i+2)) for 0 <= i < Bigint_Pow5size
|
||||
struct Bigint *p5s[Bigint_Pow5size];
|
||||
// XXX This should be freed during runtime fini.
|
||||
struct Bigint *freelist[Bigint_Kmax+1];
|
||||
double preallocated[Bigint_PREALLOC_SIZE];
|
||||
double *preallocated_next;
|
||||
};
|
||||
|
||||
#endif // !Py_USING_MEMORY_DEBUGGER
|
||||
|
||||
struct _py_code_state {
|
||||
PyMutex mutex;
|
||||
// Interned constants from code objects. Used by the free-threaded build.
|
||||
struct _Py_hashtable_t *constants;
|
||||
};
|
||||
|
||||
#define FUNC_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
|
||||
|
||||
struct _func_version_cache_item {
|
||||
PyFunctionObject *func;
|
||||
PyObject *code;
|
||||
};
|
||||
|
||||
struct _py_func_state {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Protects next_version
|
||||
PyMutex mutex;
|
||||
#endif
|
||||
|
||||
uint32_t next_version;
|
||||
// Borrowed references to function and code objects whose
|
||||
// func_version % FUNC_VERSION_CACHE_SIZE
|
||||
// once was equal to the index in the table.
|
||||
// They are cleared when the function or code object is deallocated.
|
||||
struct _func_version_cache_item func_version_cache[FUNC_VERSION_CACHE_SIZE];
|
||||
};
|
||||
|
||||
#include "pycore_dict_state.h" // struct _Py_dict_state
|
||||
#include "pycore_exceptions.h" // struct _Py_exc_state
|
||||
|
||||
|
||||
/****** type state *********/
|
||||
|
||||
/* For now we hard-code this to a value for which we are confident
|
||||
all the static builtin types will fit (for all builds). */
|
||||
#define _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES 200
|
||||
#define _Py_MAX_MANAGED_STATIC_EXT_TYPES 10
|
||||
#define _Py_MAX_MANAGED_STATIC_TYPES \
|
||||
(_Py_MAX_MANAGED_STATIC_BUILTIN_TYPES + _Py_MAX_MANAGED_STATIC_EXT_TYPES)
|
||||
|
||||
struct _types_runtime_state {
|
||||
/* Used to set PyTypeObject.tp_version_tag for core static types. */
|
||||
// bpo-42745: next_version_tag remains shared by all interpreters
|
||||
// because of static types.
|
||||
unsigned int next_version_tag;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
PyTypeObject *type;
|
||||
int64_t interp_count;
|
||||
} types[_Py_MAX_MANAGED_STATIC_TYPES];
|
||||
} managed_static;
|
||||
};
|
||||
|
||||
|
||||
// Type attribute lookup cache: speed up attribute and method lookups,
|
||||
// see _PyType_Lookup().
|
||||
struct type_cache_entry {
|
||||
unsigned int version; // initialized from type->tp_version_tag
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PySeqLock sequence;
|
||||
#endif
|
||||
PyObject *name; // reference to exactly a str or None
|
||||
PyObject *value; // borrowed reference or NULL
|
||||
};
|
||||
|
||||
#define MCACHE_SIZE_EXP 12
|
||||
|
||||
struct type_cache {
|
||||
struct type_cache_entry hashtable[1 << MCACHE_SIZE_EXP];
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
PyTypeObject *type;
|
||||
int isbuiltin;
|
||||
int readying;
|
||||
int ready;
|
||||
// XXX tp_dict can probably be statically allocated,
|
||||
// instead of dynamically and stored on the interpreter.
|
||||
PyObject *tp_dict;
|
||||
PyObject *tp_subclasses;
|
||||
/* We never clean up weakrefs for static builtin types since
|
||||
they will effectively never get triggered. However, there
|
||||
are also some diagnostic uses for the list of weakrefs,
|
||||
so we still keep it. */
|
||||
PyObject *tp_weaklist;
|
||||
} managed_static_type_state;
|
||||
|
||||
#define TYPE_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
|
||||
|
||||
struct types_state {
|
||||
/* Used to set PyTypeObject.tp_version_tag.
|
||||
It starts at _Py_MAX_GLOBAL_TYPE_VERSION_TAG + 1,
|
||||
where all those lower numbers are used for core static types. */
|
||||
unsigned int next_version_tag;
|
||||
|
||||
struct type_cache type_cache;
|
||||
|
||||
/* Every static builtin type is initialized for each interpreter
|
||||
during its own initialization, including for the main interpreter
|
||||
during global runtime initialization. This is done by calling
|
||||
_PyStaticType_InitBuiltin().
|
||||
|
||||
The first time a static builtin type is initialized, all the
|
||||
normal PyType_Ready() stuff happens. The only difference from
|
||||
normal is that there are three PyTypeObject fields holding
|
||||
objects which are stored here (on PyInterpreterState) rather
|
||||
than in the corresponding PyTypeObject fields. Those are:
|
||||
tp_dict (cls.__dict__), tp_subclasses (cls.__subclasses__),
|
||||
and tp_weaklist.
|
||||
|
||||
When a subinterpreter is initialized, each static builtin type
|
||||
is still initialized, but only the interpreter-specific portion,
|
||||
namely those three objects.
|
||||
|
||||
Those objects are stored in the PyInterpreterState.types.builtins
|
||||
array, at the index corresponding to each specific static builtin
|
||||
type. That index (a size_t value) is stored in the tp_subclasses
|
||||
field. For static builtin types, we re-purposed the now-unused
|
||||
tp_subclasses to avoid adding another field to PyTypeObject.
|
||||
In all other cases tp_subclasses holds a dict like before.
|
||||
(The field was previously defined as PyObject*, but is now void*
|
||||
to reflect its dual use.)
|
||||
|
||||
The index for each static builtin type isn't statically assigned.
|
||||
Instead it is calculated the first time a type is initialized
|
||||
(by the main interpreter). The index matches the order in which
|
||||
the type was initialized relative to the others. The actual
|
||||
value comes from the current value of num_builtins_initialized,
|
||||
as each type is initialized for the main interpreter.
|
||||
|
||||
num_builtins_initialized is incremented once for each static
|
||||
builtin type. Once initialization is over for a subinterpreter,
|
||||
the value will be the same as for all other interpreters. */
|
||||
struct {
|
||||
size_t num_initialized;
|
||||
managed_static_type_state initialized[_Py_MAX_MANAGED_STATIC_BUILTIN_TYPES];
|
||||
} builtins;
|
||||
/* We apply a similar strategy for managed extension modules. */
|
||||
struct {
|
||||
size_t num_initialized;
|
||||
size_t next_index;
|
||||
managed_static_type_state initialized[_Py_MAX_MANAGED_STATIC_EXT_TYPES];
|
||||
} for_extensions;
|
||||
PyMutex mutex;
|
||||
|
||||
// Borrowed references to type objects whose
|
||||
// tp_version_tag % TYPE_VERSION_CACHE_SIZE
|
||||
// once was equal to the index in the table.
|
||||
// They are cleared when the type object is deallocated.
|
||||
PyTypeObject *type_version_cache[TYPE_VERSION_CACHE_SIZE];
|
||||
};
|
||||
|
||||
struct _warnings_runtime_state {
|
||||
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
|
||||
get_warnings_attr() will reset these variables accordingly. */
|
||||
PyObject *filters; /* List */
|
||||
PyObject *once_registry; /* Dict */
|
||||
PyObject *default_action; /* String */
|
||||
_PyRecursiveMutex lock;
|
||||
long filters_version;
|
||||
PyObject *context;
|
||||
};
|
||||
|
||||
struct _Py_mem_interp_free_queue {
|
||||
int has_work; // true if the queue is not empty
|
||||
PyMutex mutex; // protects the queue
|
||||
struct llist_node head; // queue of _mem_work_chunk items
|
||||
};
|
||||
|
||||
|
||||
/****** Unicode state *********/
|
||||
|
||||
typedef enum {
|
||||
_Py_ERROR_UNKNOWN=0,
|
||||
_Py_ERROR_STRICT,
|
||||
_Py_ERROR_SURROGATEESCAPE,
|
||||
_Py_ERROR_REPLACE,
|
||||
_Py_ERROR_IGNORE,
|
||||
_Py_ERROR_BACKSLASHREPLACE,
|
||||
_Py_ERROR_SURROGATEPASS,
|
||||
_Py_ERROR_XMLCHARREFREPLACE,
|
||||
_Py_ERROR_OTHER
|
||||
} _Py_error_handler;
|
||||
|
||||
struct _Py_unicode_runtime_ids {
|
||||
PyMutex mutex;
|
||||
// next_index value must be preserved when Py_Initialize()/Py_Finalize()
|
||||
// is called multiple times: see _PyUnicode_FromId() implementation.
|
||||
Py_ssize_t next_index;
|
||||
};
|
||||
|
||||
struct _Py_unicode_runtime_state {
|
||||
struct _Py_unicode_runtime_ids ids;
|
||||
};
|
||||
|
||||
/* fs_codec.encoding is initialized to NULL.
|
||||
Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
|
||||
struct _Py_unicode_fs_codec {
|
||||
char *encoding; // Filesystem encoding (encoded to UTF-8)
|
||||
int utf8; // encoding=="utf-8"?
|
||||
char *errors; // Filesystem errors (encoded to UTF-8)
|
||||
_Py_error_handler error_handler;
|
||||
};
|
||||
|
||||
struct _Py_unicode_ids {
|
||||
Py_ssize_t size;
|
||||
PyObject **array;
|
||||
};
|
||||
|
||||
#include "pycore_ucnhash.h" // _PyUnicode_Name_CAPI
|
||||
|
||||
struct _Py_unicode_state {
|
||||
struct _Py_unicode_fs_codec fs_codec;
|
||||
|
||||
_PyUnicode_Name_CAPI *ucnhash_capi;
|
||||
|
||||
// Unicode identifiers (_Py_Identifier): see _PyUnicode_FromId()
|
||||
struct _Py_unicode_ids ids;
|
||||
};
|
||||
|
||||
// Borrowed references to common callables:
|
||||
struct callable_cache {
|
||||
PyObject *isinstance;
|
||||
PyObject *len;
|
||||
PyObject *list_append;
|
||||
PyObject *object__getattribute__;
|
||||
};
|
||||
|
||||
/* Length of array of slotdef pointers used to store slots with the
|
||||
same __name__. There should be at most MAX_EQUIV-1 slotdef entries with
|
||||
the same __name__, for any __name__. Since that's a static property, it is
|
||||
appropriate to declare fixed-size arrays for this. */
|
||||
#define MAX_EQUIV 10
|
||||
|
||||
typedef struct wrapperbase pytype_slotdef;
|
||||
|
||||
|
||||
struct _Py_interp_cached_objects {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyMutex interned_mutex;
|
||||
#endif
|
||||
PyObject *interned_strings;
|
||||
|
||||
/* object.__reduce__ */
|
||||
PyObject *objreduce;
|
||||
PyObject *type_slots_pname;
|
||||
pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
|
||||
|
||||
/* TypeVar and related types */
|
||||
PyTypeObject *generic_type;
|
||||
PyTypeObject *typevar_type;
|
||||
PyTypeObject *typevartuple_type;
|
||||
PyTypeObject *paramspec_type;
|
||||
PyTypeObject *paramspecargs_type;
|
||||
PyTypeObject *paramspeckwargs_type;
|
||||
PyTypeObject *constevaluator_type;
|
||||
};
|
||||
|
||||
struct _Py_interp_static_objects {
|
||||
struct {
|
||||
int _not_used;
|
||||
// hamt_empty is here instead of global because of its weakreflist.
|
||||
_PyGC_Head_UNUSED _hamt_empty_gc_not_used;
|
||||
PyHamtObject hamt_empty;
|
||||
PyBaseExceptionObject last_resort_memory_error;
|
||||
} singletons;
|
||||
};
|
||||
|
||||
#include "pycore_instruments.h" // PY_MONITORING_TOOL_IDS
|
||||
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
// A min-heap of indices
|
||||
typedef struct _PyIndexHeap {
|
||||
int32_t *values;
|
||||
|
||||
// Number of items stored in values
|
||||
Py_ssize_t size;
|
||||
|
||||
// Maximum number of items that can be stored in values
|
||||
Py_ssize_t capacity;
|
||||
} _PyIndexHeap;
|
||||
|
||||
// An unbounded pool of indices. Indices are allocated starting from 0. They
|
||||
// may be released back to the pool once they are no longer in use.
|
||||
typedef struct _PyIndexPool {
|
||||
PyMutex mutex;
|
||||
|
||||
// Min heap of indices available for allocation
|
||||
_PyIndexHeap free_indices;
|
||||
|
||||
// Next index to allocate if no free indices are available
|
||||
int32_t next_index;
|
||||
|
||||
// Generation counter incremented on thread creation/destruction
|
||||
// Used for TLBC cache invalidation in remote debugging
|
||||
uint32_t tlbc_generation;
|
||||
} _PyIndexPool;
|
||||
|
||||
typedef union _Py_unique_id_entry {
|
||||
// Points to the next free type id, when part of the freelist
|
||||
union _Py_unique_id_entry *next;
|
||||
|
||||
// Stores the object when the id is assigned
|
||||
PyObject *obj;
|
||||
} _Py_unique_id_entry;
|
||||
|
||||
struct _Py_unique_id_pool {
|
||||
PyMutex mutex;
|
||||
|
||||
// combined table of object with allocated unique ids and unallocated ids.
|
||||
_Py_unique_id_entry *table;
|
||||
|
||||
// Next entry to allocate inside 'table' or NULL
|
||||
_Py_unique_id_entry *freelist;
|
||||
|
||||
// size of 'table'
|
||||
Py_ssize_t size;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* PyInterpreterState holds the global state for one of the runtime's
|
||||
interpreters. Typically the initial (main) interpreter is the only one.
|
||||
|
||||
The PyInterpreterState typedef is in Include/pytypedefs.h.
|
||||
*/
|
||||
struct _is {
|
||||
|
||||
/* This struct contains the eval_breaker,
|
||||
* which is by far the hottest field in this struct
|
||||
* and should be placed at the beginning. */
|
||||
struct _ceval_state ceval;
|
||||
|
||||
// unused, kept for ABI compatibility
|
||||
void *_malloced;
|
||||
|
||||
PyInterpreterState *next;
|
||||
|
||||
int64_t id;
|
||||
Py_ssize_t id_refcount;
|
||||
int requires_idref;
|
||||
|
||||
long _whence;
|
||||
|
||||
/* Has been initialized to a safe state.
|
||||
|
||||
In order to be effective, this must be set to 0 during or right
|
||||
after allocation. */
|
||||
int _initialized;
|
||||
/* Has been fully initialized via pylifecycle.c. */
|
||||
int _ready;
|
||||
int finalizing;
|
||||
|
||||
uintptr_t last_restart_version;
|
||||
struct pythreads {
|
||||
uint64_t next_unique_id;
|
||||
/* The linked list of threads, newest first. */
|
||||
PyThreadState *head;
|
||||
_PyThreadStateImpl *preallocated;
|
||||
/* The thread currently executing in the __main__ module, if any. */
|
||||
PyThreadState *main;
|
||||
/* Used in Modules/_threadmodule.c. */
|
||||
Py_ssize_t count;
|
||||
/* Support for runtime thread stack size tuning.
|
||||
A value of 0 means using the platform's default stack size
|
||||
or the size specified by the THREAD_STACK_SIZE macro. */
|
||||
/* Used in Python/thread.c. */
|
||||
size_t stacksize;
|
||||
} threads;
|
||||
|
||||
/* Reference to the _PyRuntime global variable. This field exists
|
||||
to not have to pass runtime in addition to tstate to a function.
|
||||
Get runtime from tstate: tstate->interp->runtime. */
|
||||
_PyRuntimeState *runtime;
|
||||
|
||||
/* Set by Py_EndInterpreter().
|
||||
|
||||
Use _PyInterpreterState_GetFinalizing()
|
||||
and _PyInterpreterState_SetFinalizing()
|
||||
to access it, don't access it directly. */
|
||||
PyThreadState* _finalizing;
|
||||
/* The ID of the OS thread in which we are finalizing. */
|
||||
unsigned long _finalizing_id;
|
||||
|
||||
struct _gc_runtime_state gc;
|
||||
|
||||
/* The following fields are here to avoid allocation during init.
|
||||
The data is exposed through PyInterpreterState pointer fields.
|
||||
These fields should not be accessed directly outside of init.
|
||||
|
||||
All other PyInterpreterState pointer fields are populated when
|
||||
needed and default to NULL.
|
||||
|
||||
For now there are some exceptions to that rule, which require
|
||||
allocation during init. These will be addressed on a case-by-case
|
||||
basis. Also see _PyRuntimeState regarding the various mutex fields.
|
||||
*/
|
||||
|
||||
// Dictionary of the sys module
|
||||
PyObject *sysdict;
|
||||
|
||||
// Dictionary of the builtins module
|
||||
PyObject *builtins;
|
||||
|
||||
struct _import_state imports;
|
||||
|
||||
/* The per-interpreter GIL, which might not be used. */
|
||||
struct _gil_runtime_state _gil;
|
||||
|
||||
uint64_t _code_object_generation;
|
||||
|
||||
/* ---------- IMPORTANT ---------------------------
|
||||
The fields above this line are declared as early as
|
||||
possible to facilitate out-of-process observability
|
||||
tools. */
|
||||
|
||||
struct codecs_state codecs;
|
||||
|
||||
PyConfig config;
|
||||
unsigned long feature_flags;
|
||||
|
||||
PyObject *dict; /* Stores per-interpreter state */
|
||||
|
||||
PyObject *sysdict_copy;
|
||||
PyObject *builtins_copy;
|
||||
// Initialized to _PyEval_EvalFrameDefault().
|
||||
_PyFrameEvalFunction eval_frame;
|
||||
|
||||
PyFunction_WatchCallback func_watchers[FUNC_MAX_WATCHERS];
|
||||
// One bit is set for each non-NULL entry in func_watchers
|
||||
uint8_t active_func_watchers;
|
||||
|
||||
Py_ssize_t co_extra_user_count;
|
||||
freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
|
||||
|
||||
/* cross-interpreter data and utils */
|
||||
_PyXI_state_t xi;
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
PyObject *before_forkers;
|
||||
PyObject *after_forkers_parent;
|
||||
PyObject *after_forkers_child;
|
||||
#endif
|
||||
|
||||
struct _warnings_runtime_state warnings;
|
||||
struct atexit_state atexit;
|
||||
struct _stoptheworld_state stoptheworld;
|
||||
struct _qsbr_shared qsbr;
|
||||
|
||||
#if defined(Py_GIL_DISABLED)
|
||||
struct _mimalloc_interp_state mimalloc;
|
||||
struct _brc_state brc; // biased reference counting state
|
||||
struct _Py_unique_id_pool unique_ids; // object ids for per-thread refcounts
|
||||
PyMutex weakref_locks[NUM_WEAKREF_LIST_LOCKS];
|
||||
_PyIndexPool tlbc_indices;
|
||||
#endif
|
||||
// Per-interpreter list of tasks, any lingering tasks from thread
|
||||
// states gets added here and removed from the corresponding
|
||||
// thread state's list.
|
||||
struct llist_node asyncio_tasks_head;
|
||||
// `asyncio_tasks_lock` is used when tasks are moved
|
||||
// from thread's list to interpreter's list.
|
||||
PyMutex asyncio_tasks_lock;
|
||||
|
||||
// Per-interpreter state for the obmalloc allocator. For the main
|
||||
// interpreter and for all interpreters that don't have their
|
||||
// own obmalloc state, this points to the static structure in
|
||||
// obmalloc.c obmalloc_state_main. For other interpreters, it is
|
||||
// heap allocated by _PyMem_init_obmalloc() and freed when the
|
||||
// interpreter structure is freed. In the case of a heap allocated
|
||||
// obmalloc state, it is not safe to hold on to or use memory after
|
||||
// the interpreter is freed. The obmalloc state corresponding to
|
||||
// that allocated memory is gone. See free_obmalloc_arenas() for
|
||||
// more comments.
|
||||
struct _obmalloc_state *obmalloc;
|
||||
|
||||
PyObject *audit_hooks;
|
||||
PyType_WatchCallback type_watchers[TYPE_MAX_WATCHERS];
|
||||
PyCode_WatchCallback code_watchers[CODE_MAX_WATCHERS];
|
||||
PyContext_WatchCallback context_watchers[CONTEXT_MAX_WATCHERS];
|
||||
// One bit is set for each non-NULL entry in code_watchers
|
||||
uint8_t active_code_watchers;
|
||||
uint8_t active_context_watchers;
|
||||
|
||||
struct _py_object_state object_state;
|
||||
struct _Py_unicode_state unicode;
|
||||
struct _Py_long_state long_state;
|
||||
struct _dtoa_state dtoa;
|
||||
struct _py_func_state func_state;
|
||||
struct _py_code_state code_state;
|
||||
|
||||
struct _Py_dict_state dict_state;
|
||||
struct _Py_exc_state exc_state;
|
||||
struct _Py_mem_interp_free_queue mem_free_queue;
|
||||
|
||||
struct ast_state ast;
|
||||
struct types_state types;
|
||||
struct callable_cache callable_cache;
|
||||
PyObject *common_consts[NUM_COMMON_CONSTANTS];
|
||||
bool jit;
|
||||
struct _PyExecutorObject *executor_list_head;
|
||||
struct _PyExecutorObject *executor_deletion_list_head;
|
||||
int executor_deletion_list_remaining_capacity;
|
||||
size_t trace_run_counter;
|
||||
_rare_events rare_events;
|
||||
PyDict_WatchCallback builtins_dict_watcher;
|
||||
|
||||
_Py_GlobalMonitors monitors;
|
||||
_PyOnceFlag sys_profile_once_flag;
|
||||
_PyOnceFlag sys_trace_once_flag;
|
||||
Py_ssize_t sys_profiling_threads; /* Count of threads with c_profilefunc set */
|
||||
Py_ssize_t sys_tracing_threads; /* Count of threads with c_tracefunc set */
|
||||
PyObject *monitoring_callables[PY_MONITORING_TOOL_IDS][_PY_MONITORING_EVENTS];
|
||||
PyObject *monitoring_tool_names[PY_MONITORING_TOOL_IDS];
|
||||
uintptr_t monitoring_tool_versions[PY_MONITORING_TOOL_IDS];
|
||||
|
||||
struct _Py_interp_cached_objects cached_objects;
|
||||
struct _Py_interp_static_objects static_objects;
|
||||
|
||||
Py_ssize_t _interactive_src_count;
|
||||
|
||||
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
|
||||
uint64_t next_stackref;
|
||||
_Py_hashtable_t *open_stackrefs_table;
|
||||
# ifdef Py_STACKREF_CLOSE_DEBUG
|
||||
_Py_hashtable_t *closed_stackrefs_table;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* the initial PyInterpreterState.threads.head */
|
||||
_PyThreadStateImpl _initial_thread;
|
||||
// _initial_thread should be the last field of PyInterpreterState.
|
||||
// See https://github.com/python/cpython/issues/127117.
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* Py_INTERNAL_INTERP_STRUCTS_H */
|
||||
401
extern/include/python/internal/pycore_interpframe.h
vendored
Normal file
401
extern/include/python/internal/pycore_interpframe.h
vendored
Normal file
@@ -0,0 +1,401 @@
|
||||
#ifndef Py_INTERNAL_INTERP_FRAME_H
|
||||
#define Py_INTERNAL_INTERP_FRAME_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_code.h" // _PyCode_CODE()
|
||||
#include "pycore_interpframe_structs.h" // _PyInterpreterFrame
|
||||
#include "pycore_stackref.h" // PyStackRef_AsPyObjectBorrow()
|
||||
#include "pycore_stats.h" // CALL_STAT_INC()
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define _PyInterpreterFrame_LASTI(IF) \
|
||||
((int)((IF)->instr_ptr - _PyFrame_GetBytecode((IF))))
|
||||
|
||||
static inline PyCodeObject *_PyFrame_GetCode(_PyInterpreterFrame *f) {
|
||||
assert(!PyStackRef_IsNull(f->f_executable));
|
||||
PyObject *executable = PyStackRef_AsPyObjectBorrow(f->f_executable);
|
||||
assert(PyCode_Check(executable));
|
||||
return (PyCodeObject *)executable;
|
||||
}
|
||||
|
||||
// Similar to _PyFrame_GetCode(), but return NULL if the frame is invalid or
|
||||
// freed. Used by dump_frame() in Python/traceback.c. The function uses
|
||||
// heuristics to detect freed memory, it's not 100% reliable.
|
||||
static inline PyCodeObject*
|
||||
_PyFrame_SafeGetCode(_PyInterpreterFrame *f)
|
||||
{
|
||||
// globals and builtins may be NULL on a legit frame, but it's unlikely.
|
||||
// It's more likely that it's a sign of an invalid frame.
|
||||
if (f->f_globals == NULL || f->f_builtins == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (PyStackRef_IsNull(f->f_executable)) {
|
||||
return NULL;
|
||||
}
|
||||
void *ptr;
|
||||
memcpy(&ptr, &f->f_executable, sizeof(f->f_executable));
|
||||
if (_PyMem_IsPtrFreed(ptr)) {
|
||||
return NULL;
|
||||
}
|
||||
PyObject *executable = PyStackRef_AsPyObjectBorrow(f->f_executable);
|
||||
if (_PyObject_IsFreed(executable)) {
|
||||
return NULL;
|
||||
}
|
||||
if (!PyCode_Check(executable)) {
|
||||
return NULL;
|
||||
}
|
||||
return (PyCodeObject *)executable;
|
||||
}
|
||||
|
||||
static inline _Py_CODEUNIT *
|
||||
_PyFrame_GetBytecode(_PyInterpreterFrame *f)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyCodeObject *co = _PyFrame_GetCode(f);
|
||||
_PyCodeArray *tlbc = _PyCode_GetTLBCArray(co);
|
||||
assert(f->tlbc_index >= 0 && f->tlbc_index < tlbc->size);
|
||||
return (_Py_CODEUNIT *)tlbc->entries[f->tlbc_index];
|
||||
#else
|
||||
return _PyCode_CODE(_PyFrame_GetCode(f));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Similar to PyUnstable_InterpreterFrame_GetLasti(), but return NULL if the
|
||||
// frame is invalid or freed. Used by dump_frame() in Python/traceback.c. The
|
||||
// function uses heuristics to detect freed memory, it's not 100% reliable.
|
||||
static inline int
|
||||
_PyFrame_SafeGetLasti(struct _PyInterpreterFrame *f)
|
||||
{
|
||||
// Code based on _PyFrame_GetBytecode() but replace _PyFrame_GetCode()
|
||||
// with _PyFrame_SafeGetCode().
|
||||
PyCodeObject *co = _PyFrame_SafeGetCode(f);
|
||||
if (co == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
_Py_CODEUNIT *bytecode;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyCodeArray *tlbc = _PyCode_GetTLBCArray(co);
|
||||
assert(f->tlbc_index >= 0 && f->tlbc_index < tlbc->size);
|
||||
bytecode = (_Py_CODEUNIT *)tlbc->entries[f->tlbc_index];
|
||||
#else
|
||||
bytecode = _PyCode_CODE(co);
|
||||
#endif
|
||||
|
||||
return (int)(f->instr_ptr - bytecode) * sizeof(_Py_CODEUNIT);
|
||||
}
|
||||
|
||||
static inline PyFunctionObject *_PyFrame_GetFunction(_PyInterpreterFrame *f) {
|
||||
PyObject *func = PyStackRef_AsPyObjectBorrow(f->f_funcobj);
|
||||
assert(PyFunction_Check(func));
|
||||
return (PyFunctionObject *)func;
|
||||
}
|
||||
|
||||
static inline _PyStackRef *_PyFrame_Stackbase(_PyInterpreterFrame *f) {
|
||||
return (f->localsplus + _PyFrame_GetCode(f)->co_nlocalsplus);
|
||||
}
|
||||
|
||||
static inline _PyStackRef _PyFrame_StackPeek(_PyInterpreterFrame *f) {
|
||||
assert(f->stackpointer > f->localsplus + _PyFrame_GetCode(f)->co_nlocalsplus);
|
||||
assert(!PyStackRef_IsNull(f->stackpointer[-1]));
|
||||
return f->stackpointer[-1];
|
||||
}
|
||||
|
||||
static inline _PyStackRef _PyFrame_StackPop(_PyInterpreterFrame *f) {
|
||||
assert(f->stackpointer > f->localsplus + _PyFrame_GetCode(f)->co_nlocalsplus);
|
||||
f->stackpointer--;
|
||||
return *f->stackpointer;
|
||||
}
|
||||
|
||||
static inline void _PyFrame_StackPush(_PyInterpreterFrame *f, _PyStackRef value) {
|
||||
*f->stackpointer = value;
|
||||
f->stackpointer++;
|
||||
}
|
||||
|
||||
#define FRAME_SPECIALS_SIZE ((int)((sizeof(_PyInterpreterFrame)-1)/sizeof(PyObject *)))
|
||||
|
||||
static inline int
|
||||
_PyFrame_NumSlotsForCodeObject(PyCodeObject *code)
|
||||
{
|
||||
/* This function needs to remain in sync with the calculation of
|
||||
* co_framesize in Tools/build/deepfreeze.py */
|
||||
assert(code->co_framesize >= FRAME_SPECIALS_SIZE);
|
||||
return code->co_framesize - FRAME_SPECIALS_SIZE;
|
||||
}
|
||||
|
||||
static inline void _PyFrame_Copy(_PyInterpreterFrame *src, _PyInterpreterFrame *dest)
|
||||
{
|
||||
dest->f_executable = PyStackRef_MakeHeapSafe(src->f_executable);
|
||||
// Don't leave a dangling pointer to the old frame when creating generators
|
||||
// and coroutines:
|
||||
dest->previous = NULL;
|
||||
dest->f_funcobj = PyStackRef_MakeHeapSafe(src->f_funcobj);
|
||||
dest->f_globals = src->f_globals;
|
||||
dest->f_builtins = src->f_builtins;
|
||||
dest->f_locals = src->f_locals;
|
||||
dest->frame_obj = src->frame_obj;
|
||||
dest->instr_ptr = src->instr_ptr;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
dest->tlbc_index = src->tlbc_index;
|
||||
#endif
|
||||
assert(src->stackpointer != NULL);
|
||||
int stacktop = (int)(src->stackpointer - src->localsplus);
|
||||
assert(stacktop >= 0);
|
||||
dest->stackpointer = dest->localsplus + stacktop;
|
||||
for (int i = 0; i < stacktop; i++) {
|
||||
dest->localsplus[i] = PyStackRef_MakeHeapSafe(src->localsplus[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
static inline void
|
||||
_PyFrame_InitializeTLBC(PyThreadState *tstate, _PyInterpreterFrame *frame,
|
||||
PyCodeObject *code)
|
||||
{
|
||||
_Py_CODEUNIT *tlbc = _PyCode_GetTLBCFast(tstate, code);
|
||||
if (tlbc == NULL) {
|
||||
// No thread-local bytecode exists for this thread yet; use the main
|
||||
// thread's copy, deferring thread-local bytecode creation to the
|
||||
// execution of RESUME.
|
||||
frame->instr_ptr = _PyCode_CODE(code);
|
||||
frame->tlbc_index = 0;
|
||||
}
|
||||
else {
|
||||
frame->instr_ptr = tlbc;
|
||||
frame->tlbc_index = ((_PyThreadStateImpl *)tstate)->tlbc_index;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Consumes reference to func and locals.
|
||||
Does not initialize frame->previous, which happens
|
||||
when frame is linked into the frame stack.
|
||||
*/
|
||||
static inline void
|
||||
_PyFrame_Initialize(
|
||||
PyThreadState *tstate, _PyInterpreterFrame *frame, _PyStackRef func,
|
||||
PyObject *locals, PyCodeObject *code, int null_locals_from, _PyInterpreterFrame *previous)
|
||||
{
|
||||
frame->previous = previous;
|
||||
frame->f_funcobj = func;
|
||||
frame->f_executable = PyStackRef_FromPyObjectNew(code);
|
||||
PyFunctionObject *func_obj = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(func);
|
||||
frame->f_builtins = func_obj->func_builtins;
|
||||
frame->f_globals = func_obj->func_globals;
|
||||
frame->f_locals = locals;
|
||||
frame->stackpointer = frame->localsplus + code->co_nlocalsplus;
|
||||
frame->frame_obj = NULL;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyFrame_InitializeTLBC(tstate, frame, code);
|
||||
#else
|
||||
(void)tstate;
|
||||
frame->instr_ptr = _PyCode_CODE(code);
|
||||
#endif
|
||||
frame->return_offset = 0;
|
||||
frame->owner = FRAME_OWNED_BY_THREAD;
|
||||
frame->visited = 0;
|
||||
#ifdef Py_DEBUG
|
||||
frame->lltrace = 0;
|
||||
#endif
|
||||
|
||||
for (int i = null_locals_from; i < code->co_nlocalsplus; i++) {
|
||||
frame->localsplus[i] = PyStackRef_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Gets the pointer to the locals array
|
||||
* that precedes this frame.
|
||||
*/
|
||||
static inline _PyStackRef*
|
||||
_PyFrame_GetLocalsArray(_PyInterpreterFrame *frame)
|
||||
{
|
||||
return frame->localsplus;
|
||||
}
|
||||
|
||||
// Fetches the stack pointer, and (on debug builds) sets stackpointer to NULL.
|
||||
// Having stackpointer == NULL makes it easier to catch missing stack pointer
|
||||
// spills/restores (which could expose invalid values to the GC) using asserts.
|
||||
static inline _PyStackRef*
|
||||
_PyFrame_GetStackPointer(_PyInterpreterFrame *frame)
|
||||
{
|
||||
assert(frame->stackpointer != NULL);
|
||||
_PyStackRef *sp = frame->stackpointer;
|
||||
#ifndef NDEBUG
|
||||
frame->stackpointer = NULL;
|
||||
#endif
|
||||
return sp;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyFrame_SetStackPointer(_PyInterpreterFrame *frame, _PyStackRef *stack_pointer)
|
||||
{
|
||||
assert(frame->stackpointer == NULL);
|
||||
frame->stackpointer = stack_pointer;
|
||||
}
|
||||
|
||||
/* Determine whether a frame is incomplete.
|
||||
* A frame is incomplete if it is part way through
|
||||
* creating cell objects or a generator or coroutine.
|
||||
*
|
||||
* Frames on the frame stack are incomplete until the
|
||||
* first RESUME instruction.
|
||||
* Frames owned by a generator are always complete.
|
||||
*
|
||||
* NOTE: We allow racy accesses to the instruction pointer
|
||||
* from other threads for sys._current_frames() and similar APIs.
|
||||
*/
|
||||
static inline bool _Py_NO_SANITIZE_THREAD
|
||||
_PyFrame_IsIncomplete(_PyInterpreterFrame *frame)
|
||||
{
|
||||
if (frame->owner >= FRAME_OWNED_BY_INTERPRETER) {
|
||||
return true;
|
||||
}
|
||||
return frame->owner != FRAME_OWNED_BY_GENERATOR &&
|
||||
frame->instr_ptr < _PyFrame_GetBytecode(frame) +
|
||||
_PyFrame_GetCode(frame)->_co_firsttraceable;
|
||||
}
|
||||
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyFrame_GetFirstComplete(_PyInterpreterFrame *frame)
|
||||
{
|
||||
while (frame && _PyFrame_IsIncomplete(frame)) {
|
||||
frame = frame->previous;
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyThreadState_GetFrame(PyThreadState *tstate)
|
||||
{
|
||||
return _PyFrame_GetFirstComplete(tstate->current_frame);
|
||||
}
|
||||
|
||||
/* For use by _PyFrame_GetFrameObject
|
||||
Do not call directly. */
|
||||
PyFrameObject *
|
||||
_PyFrame_MakeAndSetFrameObject(_PyInterpreterFrame *frame);
|
||||
|
||||
/* Gets the PyFrameObject for this frame, lazily
|
||||
* creating it if necessary.
|
||||
* Returns a borrowed reference */
|
||||
static inline PyFrameObject *
|
||||
_PyFrame_GetFrameObject(_PyInterpreterFrame *frame)
|
||||
{
|
||||
|
||||
assert(!_PyFrame_IsIncomplete(frame));
|
||||
PyFrameObject *res = frame->frame_obj;
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
return _PyFrame_MakeAndSetFrameObject(frame);
|
||||
}
|
||||
|
||||
void
|
||||
_PyFrame_ClearLocals(_PyInterpreterFrame *frame);
|
||||
|
||||
/* Clears all references in the frame.
|
||||
* If take is non-zero, then the _PyInterpreterFrame frame
|
||||
* may be transferred to the frame object it references
|
||||
* instead of being cleared. Either way
|
||||
* the caller no longer owns the references
|
||||
* in the frame.
|
||||
* take should be set to 1 for heap allocated
|
||||
* frames like the ones in generators and coroutines.
|
||||
*/
|
||||
void
|
||||
_PyFrame_ClearExceptCode(_PyInterpreterFrame * frame);
|
||||
|
||||
int
|
||||
_PyFrame_Traverse(_PyInterpreterFrame *frame, visitproc visit, void *arg);
|
||||
|
||||
bool
|
||||
_PyFrame_HasHiddenLocals(_PyInterpreterFrame *frame);
|
||||
|
||||
PyObject *
|
||||
_PyFrame_GetLocals(_PyInterpreterFrame *frame);
|
||||
|
||||
static inline bool
|
||||
_PyThreadState_HasStackSpace(PyThreadState *tstate, int size)
|
||||
{
|
||||
assert(
|
||||
(tstate->datastack_top == NULL && tstate->datastack_limit == NULL)
|
||||
||
|
||||
(tstate->datastack_top != NULL && tstate->datastack_limit != NULL)
|
||||
);
|
||||
return tstate->datastack_top != NULL &&
|
||||
size < tstate->datastack_limit - tstate->datastack_top;
|
||||
}
|
||||
|
||||
extern _PyInterpreterFrame *
|
||||
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size);
|
||||
|
||||
PyAPI_FUNC(void) _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame *frame);
|
||||
|
||||
/* Pushes a frame without checking for space.
|
||||
* Must be guarded by _PyThreadState_HasStackSpace()
|
||||
* Consumes reference to func. */
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyFrame_PushUnchecked(PyThreadState *tstate, _PyStackRef func, int null_locals_from, _PyInterpreterFrame * previous)
|
||||
{
|
||||
CALL_STAT_INC(frames_pushed);
|
||||
PyFunctionObject *func_obj = (PyFunctionObject *)PyStackRef_AsPyObjectBorrow(func);
|
||||
PyCodeObject *code = (PyCodeObject *)func_obj->func_code;
|
||||
_PyInterpreterFrame *new_frame = (_PyInterpreterFrame *)tstate->datastack_top;
|
||||
tstate->datastack_top += code->co_framesize;
|
||||
assert(tstate->datastack_top < tstate->datastack_limit);
|
||||
_PyFrame_Initialize(tstate, new_frame, func, NULL, code, null_locals_from,
|
||||
previous);
|
||||
return new_frame;
|
||||
}
|
||||
|
||||
/* Pushes a trampoline frame without checking for space.
|
||||
* Must be guarded by _PyThreadState_HasStackSpace() */
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyFrame_PushTrampolineUnchecked(PyThreadState *tstate, PyCodeObject *code, int stackdepth, _PyInterpreterFrame * previous)
|
||||
{
|
||||
CALL_STAT_INC(frames_pushed);
|
||||
_PyInterpreterFrame *frame = (_PyInterpreterFrame *)tstate->datastack_top;
|
||||
tstate->datastack_top += code->co_framesize;
|
||||
assert(tstate->datastack_top < tstate->datastack_limit);
|
||||
frame->previous = previous;
|
||||
frame->f_funcobj = PyStackRef_None;
|
||||
frame->f_executable = PyStackRef_FromPyObjectNew(code);
|
||||
#ifdef Py_DEBUG
|
||||
frame->f_builtins = NULL;
|
||||
frame->f_globals = NULL;
|
||||
#endif
|
||||
frame->f_locals = NULL;
|
||||
assert(stackdepth <= code->co_stacksize);
|
||||
frame->stackpointer = frame->localsplus + code->co_nlocalsplus + stackdepth;
|
||||
frame->frame_obj = NULL;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyFrame_InitializeTLBC(tstate, frame, code);
|
||||
#else
|
||||
frame->instr_ptr = _PyCode_CODE(code);
|
||||
#endif
|
||||
frame->owner = FRAME_OWNED_BY_THREAD;
|
||||
frame->visited = 0;
|
||||
#ifdef Py_DEBUG
|
||||
frame->lltrace = 0;
|
||||
#endif
|
||||
frame->return_offset = 0;
|
||||
return frame;
|
||||
}
|
||||
|
||||
PyAPI_FUNC(_PyInterpreterFrame *)
|
||||
_PyEvalFramePushAndInit(PyThreadState *tstate, _PyStackRef func,
|
||||
PyObject *locals, _PyStackRef const *args,
|
||||
size_t argcount, PyObject *kwnames,
|
||||
_PyInterpreterFrame *previous);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_INTERP_FRAME_H
|
||||
95
extern/include/python/internal/pycore_interpframe_structs.h
vendored
Normal file
95
extern/include/python/internal/pycore_interpframe_structs.h
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
/* Structures used by pycore_debug_offsets.h.
|
||||
*
|
||||
* See InternalDocs/frames.md for an explanation of the frame stack
|
||||
* including explanation of the PyFrameObject and _PyInterpreterFrame
|
||||
* structs.
|
||||
*/
|
||||
|
||||
#ifndef Py_INTERNAL_INTERP_FRAME_STRUCTS_H
|
||||
#define Py_INTERNAL_INTERP_FRAME_STRUCTS_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_structs.h" // _PyStackRef
|
||||
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum _frameowner {
|
||||
FRAME_OWNED_BY_THREAD = 0,
|
||||
FRAME_OWNED_BY_GENERATOR = 1,
|
||||
FRAME_OWNED_BY_FRAME_OBJECT = 2,
|
||||
FRAME_OWNED_BY_INTERPRETER = 3,
|
||||
FRAME_OWNED_BY_CSTACK = 4,
|
||||
};
|
||||
|
||||
struct _PyInterpreterFrame {
|
||||
_PyStackRef f_executable; /* Deferred or strong reference (code object or None) */
|
||||
struct _PyInterpreterFrame *previous;
|
||||
_PyStackRef f_funcobj; /* Deferred or strong reference. Only valid if not on C stack */
|
||||
PyObject *f_globals; /* Borrowed reference. Only valid if not on C stack */
|
||||
PyObject *f_builtins; /* Borrowed reference. Only valid if not on C stack */
|
||||
PyObject *f_locals; /* Strong reference, may be NULL. Only valid if not on C stack */
|
||||
PyFrameObject *frame_obj; /* Strong reference, may be NULL. Only valid if not on C stack */
|
||||
_Py_CODEUNIT *instr_ptr; /* Instruction currently executing (or about to begin) */
|
||||
_PyStackRef *stackpointer;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* Index of thread-local bytecode containing instr_ptr. */
|
||||
int32_t tlbc_index;
|
||||
#endif
|
||||
uint16_t return_offset; /* Only relevant during a function call */
|
||||
char owner;
|
||||
#ifdef Py_DEBUG
|
||||
uint8_t visited:1;
|
||||
uint8_t lltrace:7;
|
||||
#else
|
||||
uint8_t visited;
|
||||
#endif
|
||||
/* Locals and stack */
|
||||
_PyStackRef localsplus[1];
|
||||
};
|
||||
|
||||
|
||||
/* _PyGenObject_HEAD defines the initial segment of generator
|
||||
and coroutine objects. */
|
||||
#define _PyGenObject_HEAD(prefix) \
|
||||
PyObject_HEAD \
|
||||
/* List of weak reference. */ \
|
||||
PyObject *prefix##_weakreflist; \
|
||||
/* Name of the generator. */ \
|
||||
PyObject *prefix##_name; \
|
||||
/* Qualified name of the generator. */ \
|
||||
PyObject *prefix##_qualname; \
|
||||
_PyErr_StackItem prefix##_exc_state; \
|
||||
PyObject *prefix##_origin_or_finalizer; \
|
||||
char prefix##_hooks_inited; \
|
||||
char prefix##_closed; \
|
||||
char prefix##_running_async; \
|
||||
/* The frame */ \
|
||||
int8_t prefix##_frame_state; \
|
||||
_PyInterpreterFrame prefix##_iframe; \
|
||||
|
||||
struct _PyGenObject {
|
||||
/* The gi_ prefix is intended to remind of generator-iterator. */
|
||||
_PyGenObject_HEAD(gi)
|
||||
};
|
||||
|
||||
struct _PyCoroObject {
|
||||
_PyGenObject_HEAD(cr)
|
||||
};
|
||||
|
||||
struct _PyAsyncGenObject {
|
||||
_PyGenObject_HEAD(ag)
|
||||
};
|
||||
|
||||
#undef _PyGenObject_HEAD
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_INTERP_FRAME_STRUCTS_H
|
||||
26
extern/include/python/internal/pycore_interpolation.h
vendored
Normal file
26
extern/include/python/internal/pycore_interpolation.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef Py_INTERNAL_INTERPOLATION_H
|
||||
#define Py_INTERNAL_INTERPOLATION_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyTypeObject _PyInterpolation_Type;
|
||||
|
||||
#define _PyInterpolation_CheckExact(op) Py_IS_TYPE((op), &_PyInterpolation_Type)
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyInterpolation_Build(PyObject *value, PyObject *str,
|
||||
int conversion, PyObject *format_spec);
|
||||
|
||||
extern PyStatus _PyInterpolation_InitTypes(PyInterpreterState *interp);
|
||||
extern PyObject *_PyInterpolation_GetValueRef(PyObject *interpolation);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
51
extern/include/python/internal/pycore_intrinsics.h
vendored
Normal file
51
extern/include/python/internal/pycore_intrinsics.h
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
#ifndef Py_INTERNAL_INTRINSIC_H
|
||||
#define Py_INTERNAL_INTRINSIC_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Unary Functions: */
|
||||
#define INTRINSIC_1_INVALID 0
|
||||
#define INTRINSIC_PRINT 1
|
||||
#define INTRINSIC_IMPORT_STAR 2
|
||||
#define INTRINSIC_STOPITERATION_ERROR 3
|
||||
#define INTRINSIC_ASYNC_GEN_WRAP 4
|
||||
#define INTRINSIC_UNARY_POSITIVE 5
|
||||
#define INTRINSIC_LIST_TO_TUPLE 6
|
||||
#define INTRINSIC_TYPEVAR 7
|
||||
#define INTRINSIC_PARAMSPEC 8
|
||||
#define INTRINSIC_TYPEVARTUPLE 9
|
||||
#define INTRINSIC_SUBSCRIPT_GENERIC 10
|
||||
#define INTRINSIC_TYPEALIAS 11
|
||||
|
||||
#define MAX_INTRINSIC_1 11
|
||||
|
||||
|
||||
/* Binary Functions: */
|
||||
#define INTRINSIC_2_INVALID 0
|
||||
#define INTRINSIC_PREP_RERAISE_STAR 1
|
||||
#define INTRINSIC_TYPEVAR_WITH_BOUND 2
|
||||
#define INTRINSIC_TYPEVAR_WITH_CONSTRAINTS 3
|
||||
#define INTRINSIC_SET_FUNCTION_TYPE_PARAMS 4
|
||||
#define INTRINSIC_SET_TYPEPARAM_DEFAULT 5
|
||||
|
||||
#define MAX_INTRINSIC_2 5
|
||||
|
||||
typedef PyObject *(*intrinsic_func1)(PyThreadState* tstate, PyObject *value);
|
||||
typedef PyObject *(*intrinsic_func2)(PyThreadState* tstate, PyObject *value1, PyObject *value2);
|
||||
|
||||
typedef struct {
|
||||
intrinsic_func1 func;
|
||||
const char *name;
|
||||
} intrinsic_func1_info;
|
||||
|
||||
typedef struct {
|
||||
intrinsic_func2 func;
|
||||
const char *name;
|
||||
} intrinsic_func2_info;
|
||||
|
||||
PyAPI_DATA(const intrinsic_func1_info) _PyIntrinsics_UnaryFunctions[];
|
||||
PyAPI_DATA(const intrinsic_func2_info) _PyIntrinsics_BinaryFunctions[];
|
||||
|
||||
#endif // !Py_INTERNAL_INTRINSIC_H
|
||||
29
extern/include/python/internal/pycore_jit.h
vendored
Normal file
29
extern/include/python/internal/pycore_jit.h
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
#ifndef Py_INTERNAL_JIT_H
|
||||
#define Py_INTERNAL_JIT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "pycore_interp.h"
|
||||
#include "pycore_optimizer.h"
|
||||
#include "pycore_stackref.h"
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef _Py_JIT
|
||||
|
||||
typedef _Py_CODEUNIT *(*jit_func)(_PyInterpreterFrame *frame, _PyStackRef *stack_pointer, PyThreadState *tstate);
|
||||
|
||||
int _PyJIT_Compile(_PyExecutorObject *executor, const _PyUOpInstruction *trace, size_t length);
|
||||
void _PyJIT_Free(_PyExecutorObject *executor);
|
||||
|
||||
#endif // _Py_JIT
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // !Py_INTERNAL_JIT_H
|
||||
81
extern/include/python/internal/pycore_list.h
vendored
Normal file
81
extern/include/python/internal/pycore_list.h
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
#ifndef Py_INTERNAL_LIST_H
|
||||
#define Py_INTERNAL_LIST_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
#include "pycore_stackref.h"
|
||||
#endif
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyList_Extend(PyListObject *, PyObject *);
|
||||
PyAPI_FUNC(PyObject) *_PyList_SliceSubscript(PyObject*, PyObject*);
|
||||
extern void _PyList_DebugMallocStats(FILE *out);
|
||||
// _PyList_GetItemRef should be used only when the object is known as a list
|
||||
// because it doesn't raise TypeError when the object is not a list, whereas PyList_GetItemRef does.
|
||||
extern PyObject* _PyList_GetItemRef(PyListObject *, Py_ssize_t i);
|
||||
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Returns -1 in case of races with other threads.
|
||||
extern int _PyList_GetItemRefNoLock(PyListObject *, Py_ssize_t, _PyStackRef *);
|
||||
#endif
|
||||
|
||||
#define _PyList_ITEMS(op) _Py_RVALUE(_PyList_CAST(op)->ob_item)
|
||||
|
||||
PyAPI_FUNC(int)
|
||||
_PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem);
|
||||
|
||||
// In free-threaded build: self should be locked by the caller, if it should be thread-safe.
|
||||
static inline int
|
||||
_PyList_AppendTakeRef(PyListObject *self, PyObject *newitem)
|
||||
{
|
||||
assert(self != NULL && newitem != NULL);
|
||||
assert(PyList_Check(self));
|
||||
Py_ssize_t len = Py_SIZE(self);
|
||||
Py_ssize_t allocated = self->allocated;
|
||||
assert((size_t)len + 1 < PY_SSIZE_T_MAX);
|
||||
if (allocated > len) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_Py_atomic_store_ptr_release(&self->ob_item[len], newitem);
|
||||
#else
|
||||
PyList_SET_ITEM(self, len, newitem);
|
||||
#endif
|
||||
Py_SET_SIZE(self, len + 1);
|
||||
return 0;
|
||||
}
|
||||
return _PyList_AppendTakeRefListResize(self, newitem);
|
||||
}
|
||||
|
||||
// Repeat the bytes of a buffer in place
|
||||
static inline void
|
||||
_Py_memory_repeat(char* dest, Py_ssize_t len_dest, Py_ssize_t len_src)
|
||||
{
|
||||
assert(len_src > 0);
|
||||
Py_ssize_t copied = len_src;
|
||||
while (copied < len_dest) {
|
||||
Py_ssize_t bytes_to_copy = Py_MIN(copied, len_dest - copied);
|
||||
memcpy(dest + copied, dest, (size_t)bytes_to_copy);
|
||||
copied += bytes_to_copy;
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
Py_ssize_t it_index;
|
||||
PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
|
||||
} _PyListIterObject;
|
||||
|
||||
union _PyStackRef;
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyList_FromStackRefStealOnSuccess(const union _PyStackRef *src, Py_ssize_t n);
|
||||
PyAPI_FUNC(PyObject *)_PyList_AsTupleAndClear(PyListObject *v);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LIST_H */
|
||||
106
extern/include/python/internal/pycore_llist.h
vendored
Normal file
106
extern/include/python/internal/pycore_llist.h
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// A doubly-linked list that can be embedded in a struct.
|
||||
//
|
||||
// Usage:
|
||||
// struct llist_node head = LLIST_INIT(head);
|
||||
// typedef struct {
|
||||
// ...
|
||||
// struct llist_node node;
|
||||
// ...
|
||||
// } MyObj;
|
||||
//
|
||||
// llist_insert_tail(&head, &obj->node);
|
||||
// llist_remove(&obj->node);
|
||||
//
|
||||
// struct llist_node *node;
|
||||
// llist_for_each(node, &head) {
|
||||
// MyObj *obj = llist_data(node, MyObj, node);
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
|
||||
#ifndef Py_INTERNAL_LLIST_H
|
||||
#define Py_INTERNAL_LLIST_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "Py_BUILD_CORE must be defined to include this header"
|
||||
#endif
|
||||
|
||||
struct llist_node {
|
||||
struct llist_node *next;
|
||||
struct llist_node *prev;
|
||||
};
|
||||
|
||||
// Get the struct containing a node.
|
||||
#define llist_data(node, type, member) (_Py_CONTAINER_OF(node, type, member))
|
||||
|
||||
// Iterate over a list.
|
||||
#define llist_for_each(node, head) \
|
||||
for (node = (head)->next; node != (head); node = node->next)
|
||||
|
||||
// Iterate over a list, but allow removal of the current node.
|
||||
#define llist_for_each_safe(node, head) \
|
||||
for (struct llist_node *_next = (node = (head)->next, node->next); \
|
||||
node != (head); node = _next, _next = node->next)
|
||||
|
||||
#define LLIST_INIT(head) { &head, &head }
|
||||
|
||||
static inline void
|
||||
llist_init(struct llist_node *head)
|
||||
{
|
||||
head->next = head;
|
||||
head->prev = head;
|
||||
}
|
||||
|
||||
// Returns 1 if the list is empty, 0 otherwise.
|
||||
static inline int
|
||||
llist_empty(struct llist_node *head)
|
||||
{
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
// Appends to the tail of the list.
|
||||
static inline void
|
||||
llist_insert_tail(struct llist_node *head, struct llist_node *node)
|
||||
{
|
||||
node->prev = head->prev;
|
||||
node->next = head;
|
||||
head->prev->next = node;
|
||||
head->prev = node;
|
||||
}
|
||||
|
||||
// Remove a node from the list.
|
||||
static inline void
|
||||
llist_remove(struct llist_node *node)
|
||||
{
|
||||
struct llist_node *prev = node->prev;
|
||||
struct llist_node *next = node->next;
|
||||
prev->next = next;
|
||||
next->prev = prev;
|
||||
node->prev = NULL;
|
||||
node->next = NULL;
|
||||
}
|
||||
|
||||
// Append all nodes from head2 onto head1. head2 is left empty.
|
||||
static inline void
|
||||
llist_concat(struct llist_node *head1, struct llist_node *head2)
|
||||
{
|
||||
if (!llist_empty(head2)) {
|
||||
head1->prev->next = head2->next;
|
||||
head2->next->prev = head1->prev;
|
||||
|
||||
head1->prev = head2->prev;
|
||||
head2->prev->next = head1;
|
||||
llist_init(head2);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LLIST_H */
|
||||
236
extern/include/python/internal/pycore_lock.h
vendored
Normal file
236
extern/include/python/internal/pycore_lock.h
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
// Lightweight locks and other synchronization mechanisms.
|
||||
//
|
||||
// These implementations are based on WebKit's WTF::Lock. See
|
||||
// https://webkit.org/blog/6161/locking-in-webkit/ for a description of the
|
||||
// design.
|
||||
#ifndef Py_INTERNAL_LOCK_H
|
||||
#define Py_INTERNAL_LOCK_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
//_Py_UNLOCKED is defined as 0 and _Py_LOCKED as 1 in Include/cpython/lock.h
|
||||
#define _Py_HAS_PARKED 2
|
||||
#define _Py_ONCE_INITIALIZED 4
|
||||
|
||||
static inline int
|
||||
PyMutex_LockFast(PyMutex *m)
|
||||
{
|
||||
uint8_t expected = _Py_UNLOCKED;
|
||||
uint8_t *lock_bits = &m->_bits;
|
||||
return _Py_atomic_compare_exchange_uint8(lock_bits, &expected, _Py_LOCKED);
|
||||
}
|
||||
|
||||
// Re-initializes the mutex after a fork to the unlocked state.
|
||||
static inline void
|
||||
_PyMutex_at_fork_reinit(PyMutex *m)
|
||||
{
|
||||
memset(m, 0, sizeof(*m));
|
||||
}
|
||||
|
||||
typedef enum _PyLockFlags {
|
||||
// Do not detach/release the GIL when waiting on the lock.
|
||||
_Py_LOCK_DONT_DETACH = 0,
|
||||
|
||||
// Detach/release the GIL while waiting on the lock.
|
||||
_PY_LOCK_DETACH = 1,
|
||||
|
||||
// Handle signals if interrupted while waiting on the lock.
|
||||
_PY_LOCK_HANDLE_SIGNALS = 2,
|
||||
} _PyLockFlags;
|
||||
|
||||
// Lock a mutex with an optional timeout and additional options. See
|
||||
// _PyLockFlags for details.
|
||||
extern PyAPI_FUNC(PyLockStatus)
|
||||
_PyMutex_LockTimed(PyMutex *m, PyTime_t timeout_ns, _PyLockFlags flags);
|
||||
|
||||
// Lock a mutex with additional options. See _PyLockFlags for details.
|
||||
static inline void
|
||||
PyMutex_LockFlags(PyMutex *m, _PyLockFlags flags)
|
||||
{
|
||||
uint8_t expected = _Py_UNLOCKED;
|
||||
if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) {
|
||||
_PyMutex_LockTimed(m, -1, flags);
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock a mutex, returns -1 if the mutex is not locked (used for improved
|
||||
// error messages) otherwise returns 0.
|
||||
extern int _PyMutex_TryUnlock(PyMutex *m);
|
||||
|
||||
|
||||
// PyEvent is a one-time event notification
|
||||
typedef struct {
|
||||
uint8_t v;
|
||||
} PyEvent;
|
||||
|
||||
// Check if the event is set without blocking. Returns 1 if the event is set or
|
||||
// 0 otherwise.
|
||||
PyAPI_FUNC(int) _PyEvent_IsSet(PyEvent *evt);
|
||||
|
||||
// Set the event and notify any waiting threads.
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyEvent_Notify(PyEvent *evt);
|
||||
|
||||
// Wait for the event to be set. If the event is already set, then this returns
|
||||
// immediately.
|
||||
PyAPI_FUNC(void) PyEvent_Wait(PyEvent *evt);
|
||||
|
||||
// Wait for the event to be set, or until the timeout expires. If the event is
|
||||
// already set, then this returns immediately. Returns 1 if the event was set,
|
||||
// and 0 if the timeout expired or thread was interrupted. If `detach` is
|
||||
// true, then the thread will detach/release the GIL while waiting.
|
||||
PyAPI_FUNC(int)
|
||||
PyEvent_WaitTimed(PyEvent *evt, PyTime_t timeout_ns, int detach);
|
||||
|
||||
// _PyRawMutex implements a word-sized mutex that that does not depend on the
|
||||
// parking lot API, and therefore can be used in the parking lot
|
||||
// implementation.
|
||||
//
|
||||
// The mutex uses a packed representation: the least significant bit is used to
|
||||
// indicate whether the mutex is locked or not. The remaining bits are either
|
||||
// zero or a pointer to a `struct raw_mutex_entry` (see lock.c).
|
||||
typedef struct {
|
||||
uintptr_t v;
|
||||
} _PyRawMutex;
|
||||
|
||||
// Slow paths for lock/unlock
|
||||
extern void _PyRawMutex_LockSlow(_PyRawMutex *m);
|
||||
extern void _PyRawMutex_UnlockSlow(_PyRawMutex *m);
|
||||
|
||||
static inline void
|
||||
_PyRawMutex_Lock(_PyRawMutex *m)
|
||||
{
|
||||
uintptr_t unlocked = _Py_UNLOCKED;
|
||||
if (_Py_atomic_compare_exchange_uintptr(&m->v, &unlocked, _Py_LOCKED)) {
|
||||
return;
|
||||
}
|
||||
_PyRawMutex_LockSlow(m);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyRawMutex_Unlock(_PyRawMutex *m)
|
||||
{
|
||||
uintptr_t locked = _Py_LOCKED;
|
||||
if (_Py_atomic_compare_exchange_uintptr(&m->v, &locked, _Py_UNLOCKED)) {
|
||||
return;
|
||||
}
|
||||
_PyRawMutex_UnlockSlow(m);
|
||||
}
|
||||
|
||||
// Type signature for one-time initialization functions. The function should
|
||||
// return 0 on success and -1 on failure.
|
||||
typedef int _Py_once_fn_t(void *arg);
|
||||
|
||||
// (private) slow path for one time initialization
|
||||
PyAPI_FUNC(int)
|
||||
_PyOnceFlag_CallOnceSlow(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg);
|
||||
|
||||
// Calls `fn` once using `flag`. The `arg` is passed to the call to `fn`.
|
||||
//
|
||||
// Returns 0 on success and -1 on failure.
|
||||
//
|
||||
// If `fn` returns 0 (success), then subsequent calls immediately return 0.
|
||||
// If `fn` returns -1 (failure), then subsequent calls will retry the call.
|
||||
static inline int
|
||||
_PyOnceFlag_CallOnce(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg)
|
||||
{
|
||||
if (_Py_atomic_load_uint8(&flag->v) == _Py_ONCE_INITIALIZED) {
|
||||
return 0;
|
||||
}
|
||||
return _PyOnceFlag_CallOnceSlow(flag, fn, arg);
|
||||
}
|
||||
|
||||
// A recursive mutex. The mutex should zero-initialized.
|
||||
typedef struct {
|
||||
PyMutex mutex;
|
||||
unsigned long long thread; // i.e., PyThread_get_thread_ident_ex()
|
||||
size_t level;
|
||||
} _PyRecursiveMutex;
|
||||
|
||||
PyAPI_FUNC(int) _PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m);
|
||||
PyAPI_FUNC(void) _PyRecursiveMutex_Lock(_PyRecursiveMutex *m);
|
||||
extern PyLockStatus _PyRecursiveMutex_LockTimed(_PyRecursiveMutex *m, PyTime_t timeout, _PyLockFlags flags);
|
||||
PyAPI_FUNC(void) _PyRecursiveMutex_Unlock(_PyRecursiveMutex *m);
|
||||
extern int _PyRecursiveMutex_TryUnlock(_PyRecursiveMutex *m);
|
||||
|
||||
// A readers-writer (RW) lock. The lock supports multiple concurrent readers or
|
||||
// a single writer. The lock is write-preferring: if a writer is waiting while
|
||||
// the lock is read-locked then, new readers will be blocked. This avoids
|
||||
// starvation of writers.
|
||||
//
|
||||
// In C++, the equivalent synchronization primitive is std::shared_mutex
|
||||
// with shared ("read") and exclusive ("write") locking.
|
||||
//
|
||||
// The two least significant bits are used to indicate if the lock is
|
||||
// write-locked and if there are parked threads (either readers or writers)
|
||||
// waiting to acquire the lock. The remaining bits are used to indicate the
|
||||
// number of readers holding the lock.
|
||||
//
|
||||
// 0b000..00000: unlocked
|
||||
// 0bnnn..nnn00: nnn..nnn readers holding the lock
|
||||
// 0bnnn..nnn10: nnn..nnn readers holding the lock and a writer is waiting
|
||||
// 0b00000..010: unlocked with awoken writer about to acquire lock
|
||||
// 0b00000..001: write-locked
|
||||
// 0b00000..011: write-locked and readers or other writers are waiting
|
||||
//
|
||||
// Note that reader_count must be zero if the lock is held by a writer, and
|
||||
// vice versa. The lock can only be held by readers or a writer, but not both.
|
||||
//
|
||||
// The design is optimized for simplicity of the implementation. The lock is
|
||||
// not fair: if fairness is desired, use an additional PyMutex to serialize
|
||||
// writers. The lock is also not reentrant.
|
||||
typedef struct {
|
||||
uintptr_t bits;
|
||||
} _PyRWMutex;
|
||||
|
||||
// Read lock (i.e., shared lock)
|
||||
PyAPI_FUNC(void) _PyRWMutex_RLock(_PyRWMutex *rwmutex);
|
||||
PyAPI_FUNC(void) _PyRWMutex_RUnlock(_PyRWMutex *rwmutex);
|
||||
|
||||
// Write lock (i.e., exclusive lock)
|
||||
PyAPI_FUNC(void) _PyRWMutex_Lock(_PyRWMutex *rwmutex);
|
||||
PyAPI_FUNC(void) _PyRWMutex_Unlock(_PyRWMutex *rwmutex);
|
||||
|
||||
// Similar to linux seqlock: https://en.wikipedia.org/wiki/Seqlock
|
||||
// We use a sequence number to lock the writer, an even sequence means we're unlocked, an odd
|
||||
// sequence means we're locked. Readers will read the sequence before attempting to read the
|
||||
// underlying data and then read the sequence number again after reading the data. If the
|
||||
// sequence has not changed the data is valid.
|
||||
//
|
||||
// Differs a little bit in that we use CAS on sequence as the lock, instead of a separate spin lock.
|
||||
// The writer can also detect that the undelering data has not changed and abandon the write
|
||||
// and restore the previous sequence.
|
||||
typedef struct {
|
||||
uint32_t sequence;
|
||||
} _PySeqLock;
|
||||
|
||||
// Lock the sequence lock for the writer
|
||||
PyAPI_FUNC(void) _PySeqLock_LockWrite(_PySeqLock *seqlock);
|
||||
|
||||
// Unlock the sequence lock and move to the next sequence number.
|
||||
PyAPI_FUNC(void) _PySeqLock_UnlockWrite(_PySeqLock *seqlock);
|
||||
|
||||
// Abandon the current update indicating that no mutations have occurred
|
||||
// and restore the previous sequence value.
|
||||
PyAPI_FUNC(void) _PySeqLock_AbandonWrite(_PySeqLock *seqlock);
|
||||
|
||||
// Begin a read operation and return the current sequence number.
|
||||
PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock);
|
||||
|
||||
// End the read operation and confirm that the sequence number has not changed.
|
||||
// Returns 1 if the read was successful or 0 if the read should be retried.
|
||||
PyAPI_FUNC(int) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
|
||||
|
||||
// Check if the lock was held during a fork and clear the lock. Returns 1
|
||||
// if the lock was held and any associated data should be cleared.
|
||||
PyAPI_FUNC(int) _PySeqLock_AfterFork(_PySeqLock *seqlock);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LOCK_H */
|
||||
319
extern/include/python/internal/pycore_long.h
vendored
Normal file
319
extern/include/python/internal/pycore_long.h
vendored
Normal file
@@ -0,0 +1,319 @@
|
||||
#ifndef Py_INTERNAL_LONG_H
|
||||
#define Py_INTERNAL_LONG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_bytesobject.h" // _PyBytesWriter
|
||||
#include "pycore_runtime.h" // _Py_SINGLETON()
|
||||
|
||||
/*
|
||||
* Default int base conversion size limitation: Denial of Service prevention.
|
||||
*
|
||||
* Chosen such that this isn't wildly slow on modern hardware and so that
|
||||
* everyone's existing deployed numpy test suite passes before
|
||||
* https://github.com/numpy/numpy/issues/22098 is widely available.
|
||||
*
|
||||
* $ python -m timeit -s 's = "1"*4300' 'int(s)'
|
||||
* 2000 loops, best of 5: 125 usec per loop
|
||||
* $ python -m timeit -s 's = "1"*4300; v = int(s)' 'str(v)'
|
||||
* 1000 loops, best of 5: 311 usec per loop
|
||||
* (zen2 cloud VM)
|
||||
*
|
||||
* 4300 decimal digits fits a ~14284 bit number.
|
||||
*/
|
||||
#define _PY_LONG_DEFAULT_MAX_STR_DIGITS 4300
|
||||
/*
|
||||
* Threshold for max digits check. For performance reasons int() and
|
||||
* int.__str__() don't checks values that are smaller than this
|
||||
* threshold. Acts as a guaranteed minimum size limit for bignums that
|
||||
* applications can expect from CPython.
|
||||
*
|
||||
* % python -m timeit -s 's = "1"*640; v = int(s)' 'str(int(s))'
|
||||
* 20000 loops, best of 5: 12 usec per loop
|
||||
*
|
||||
* "640 digits should be enough for anyone." - gps
|
||||
* fits a ~2126 bit decimal number.
|
||||
*/
|
||||
#define _PY_LONG_MAX_STR_DIGITS_THRESHOLD 640
|
||||
|
||||
#if ((_PY_LONG_DEFAULT_MAX_STR_DIGITS != 0) && \
|
||||
(_PY_LONG_DEFAULT_MAX_STR_DIGITS < _PY_LONG_MAX_STR_DIGITS_THRESHOLD))
|
||||
# error "_PY_LONG_DEFAULT_MAX_STR_DIGITS smaller than threshold."
|
||||
#endif
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern PyStatus _PyLong_InitTypes(PyInterpreterState *);
|
||||
extern void _PyLong_FiniTypes(PyInterpreterState *interp);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
PyAPI_FUNC(void) _PyLong_ExactDealloc(PyObject *self);
|
||||
|
||||
#define _PyLong_SMALL_INTS _Py_SINGLETON(small_ints)
|
||||
|
||||
// _PyLong_GetZero() and _PyLong_GetOne() must always be available
|
||||
// _PyLong_FromUnsignedChar must always be available
|
||||
#if _PY_NSMALLPOSINTS < 257
|
||||
# error "_PY_NSMALLPOSINTS must be greater than or equal to 257"
|
||||
#endif
|
||||
|
||||
#define _PY_IS_SMALL_INT(val) ((val) >= 0 && (val) < 256 && (val) < _PY_NSMALLPOSINTS)
|
||||
|
||||
// Return a reference to the immortal zero singleton.
|
||||
// The function cannot return NULL.
|
||||
static inline PyObject* _PyLong_GetZero(void)
|
||||
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS]; }
|
||||
|
||||
// Return a reference to the immortal one singleton.
|
||||
// The function cannot return NULL.
|
||||
static inline PyObject* _PyLong_GetOne(void)
|
||||
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+1]; }
|
||||
|
||||
static inline PyObject* _PyLong_FromUnsignedChar(unsigned char i)
|
||||
{
|
||||
return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+i];
|
||||
}
|
||||
|
||||
// _PyLong_Frexp returns a double x and an exponent e such that the
|
||||
// true value is approximately equal to x * 2**e. x is
|
||||
// 0.0 if and only if the input is 0 (in which case, e and x are both
|
||||
// zeroes); otherwise, 0.5 <= abs(x) < 1.0.
|
||||
// Always successful.
|
||||
//
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_DATA(double) _PyLong_Frexp(PyLongObject *a, int64_t *e);
|
||||
|
||||
extern PyObject* _PyLong_FromBytes(const char *, Py_ssize_t, int);
|
||||
|
||||
// _PyLong_DivmodNear. Given integers a and b, compute the nearest
|
||||
// integer q to the exact quotient a / b, rounding to the nearest even integer
|
||||
// in the case of a tie. Return (q, r), where r = a - q*b. The remainder r
|
||||
// will satisfy abs(r) <= abs(b)/2, with equality possible only if q is
|
||||
// even.
|
||||
//
|
||||
// Export for '_datetime' shared extension.
|
||||
PyAPI_DATA(PyObject*) _PyLong_DivmodNear(PyObject *, PyObject *);
|
||||
|
||||
// _PyLong_Format: Convert the long to a string object with given base,
|
||||
// appending a base prefix of 0[box] if base is 2, 8 or 16.
|
||||
// Export for '_tkinter' shared extension.
|
||||
PyAPI_DATA(PyObject*) _PyLong_Format(PyObject *obj, int base);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_DATA(PyObject*) _PyLong_Rshift(PyObject *, int64_t);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_DATA(PyObject*) _PyLong_Lshift(PyObject *, int64_t);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyLong_Add(PyLongObject *left, PyLongObject *right);
|
||||
PyAPI_FUNC(PyObject*) _PyLong_Multiply(PyLongObject *left, PyLongObject *right);
|
||||
PyAPI_FUNC(PyObject*) _PyLong_Subtract(PyLongObject *left, PyLongObject *right);
|
||||
|
||||
// Export for 'binascii' shared extension.
|
||||
PyAPI_DATA(unsigned char) _PyLong_DigitValue[256];
|
||||
|
||||
/* Format the object based on the format_spec, as defined in PEP 3101
|
||||
(Advanced String Formatting). */
|
||||
extern int _PyLong_FormatAdvancedWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
PyObject *format_spec,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
extern int _PyLong_FormatWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
int base,
|
||||
int alternate);
|
||||
|
||||
extern char* _PyLong_FormatBytesWriter(
|
||||
_PyBytesWriter *writer,
|
||||
char *str,
|
||||
PyObject *obj,
|
||||
int base,
|
||||
int alternate);
|
||||
|
||||
// Argument converters used by Argument Clinic
|
||||
|
||||
// Export for 'select' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedShort_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_testclinic' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedInt_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_blake2' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedLong_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_blake2' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedLongLong_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_testclinic' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_Size_t_Converter(PyObject *, void *);
|
||||
|
||||
PyAPI_FUNC(int) _PyLong_UInt8_Converter(PyObject *, void *);
|
||||
PyAPI_FUNC(int) _PyLong_UInt16_Converter(PyObject *, void *);
|
||||
PyAPI_FUNC(int) _PyLong_UInt32_Converter(PyObject *, void *);
|
||||
PyAPI_FUNC(int) _PyLong_UInt64_Converter(PyObject *, void *);
|
||||
|
||||
/* Long value tag bits:
|
||||
* 0-1: Sign bits value = (1-sign), ie. negative=2, positive=0, zero=1.
|
||||
* 2: Set to 1 for the small ints
|
||||
* 3+ Unsigned digit count
|
||||
*/
|
||||
#define SIGN_MASK 3
|
||||
#define SIGN_ZERO 1
|
||||
#define SIGN_NEGATIVE 2
|
||||
#define NON_SIZE_BITS 3
|
||||
#define IMMORTALITY_BIT_MASK (1 << 2)
|
||||
|
||||
/* The functions _PyLong_IsCompact and _PyLong_CompactValue are defined
|
||||
* in Include/cpython/longobject.h, since they need to be inline.
|
||||
*
|
||||
* "Compact" values have at least one bit to spare,
|
||||
* so that addition and subtraction can be performed on the values
|
||||
* without risk of overflow.
|
||||
*
|
||||
* The inline functions need tag bits.
|
||||
* For readability, rather than do `#define SIGN_MASK _PyLong_SIGN_MASK`
|
||||
* we define them to the numbers in both places and then assert that
|
||||
* they're the same.
|
||||
*/
|
||||
#if SIGN_MASK != _PyLong_SIGN_MASK
|
||||
# error "SIGN_MASK does not match _PyLong_SIGN_MASK"
|
||||
#endif
|
||||
#if NON_SIZE_BITS != _PyLong_NON_SIZE_BITS
|
||||
# error "NON_SIZE_BITS does not match _PyLong_NON_SIZE_BITS"
|
||||
#endif
|
||||
|
||||
/* All *compact" values are guaranteed to fit into
|
||||
* a Py_ssize_t with at least one bit to spare.
|
||||
* In other words, for 64 bit machines, compact
|
||||
* will be signed 63 (or fewer) bit values
|
||||
*/
|
||||
|
||||
/* Return 1 if the argument is compact int */
|
||||
static inline int
|
||||
_PyLong_IsNonNegativeCompact(const PyLongObject* op) {
|
||||
assert(PyLong_Check(op));
|
||||
return ((op->long_value.lv_tag & ~IMMORTALITY_BIT_MASK) <= (1 << NON_SIZE_BITS));
|
||||
}
|
||||
|
||||
|
||||
static inline int
|
||||
_PyLong_BothAreCompact(const PyLongObject* a, const PyLongObject* b) {
|
||||
assert(PyLong_Check(a));
|
||||
assert(PyLong_Check(b));
|
||||
return (a->long_value.lv_tag | b->long_value.lv_tag) < (2 << NON_SIZE_BITS);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
_PyLong_IsZero(const PyLongObject *op)
|
||||
{
|
||||
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_ZERO;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
_PyLong_IsNegative(const PyLongObject *op)
|
||||
{
|
||||
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_NEGATIVE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
_PyLong_IsPositive(const PyLongObject *op)
|
||||
{
|
||||
return (op->long_value.lv_tag & SIGN_MASK) == 0;
|
||||
}
|
||||
|
||||
static inline Py_ssize_t
|
||||
_PyLong_DigitCount(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
return (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS);
|
||||
}
|
||||
|
||||
/* Equivalent to _PyLong_DigitCount(op) * _PyLong_NonCompactSign(op) */
|
||||
static inline Py_ssize_t
|
||||
_PyLong_SignedDigitCount(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
Py_ssize_t sign = 1 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
return sign * (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyLong_CompactSign(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
assert(_PyLong_IsCompact((PyLongObject *)op));
|
||||
return 1 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyLong_NonCompactSign(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
assert(!_PyLong_IsCompact((PyLongObject *)op));
|
||||
return 1 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
/* Do a and b have the same sign? */
|
||||
static inline int
|
||||
_PyLong_SameSign(const PyLongObject *a, const PyLongObject *b)
|
||||
{
|
||||
return (a->long_value.lv_tag & SIGN_MASK) == (b->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
#define TAG_FROM_SIGN_AND_SIZE(sign, size) \
|
||||
((uintptr_t)(1 - (sign)) | ((uintptr_t)(size) << NON_SIZE_BITS))
|
||||
|
||||
static inline void
|
||||
_PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size)
|
||||
{
|
||||
assert(size >= 0);
|
||||
assert(-1 <= sign && sign <= 1);
|
||||
assert(sign != 0 || size == 0);
|
||||
op->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(sign, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyLong_SetDigitCount(PyLongObject *op, Py_ssize_t size)
|
||||
{
|
||||
assert(size >= 0);
|
||||
op->long_value.lv_tag = (((size_t)size) << NON_SIZE_BITS) | (op->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
#define NON_SIZE_MASK ~(uintptr_t)((1 << NON_SIZE_BITS) - 1)
|
||||
|
||||
static inline void
|
||||
_PyLong_FlipSign(PyLongObject *op) {
|
||||
unsigned int flipped_sign = 2 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
op->long_value.lv_tag &= NON_SIZE_MASK;
|
||||
op->long_value.lv_tag |= flipped_sign;
|
||||
}
|
||||
|
||||
#define _PyLong_DIGIT_INIT(val) \
|
||||
{ \
|
||||
.ob_base = _PyObject_HEAD_INIT(&PyLong_Type), \
|
||||
.long_value = { \
|
||||
.lv_tag = TAG_FROM_SIGN_AND_SIZE( \
|
||||
(val) == 0 ? 0 : ((val) < 0 ? -1 : 1), \
|
||||
(val) == 0 ? 0 : 1) | IMMORTALITY_BIT_MASK, \
|
||||
{ ((val) >= 0 ? (val) : -(val)) }, \
|
||||
} \
|
||||
}
|
||||
|
||||
#define _PyLong_FALSE_TAG TAG_FROM_SIGN_AND_SIZE(0, 0)
|
||||
#define _PyLong_TRUE_TAG TAG_FROM_SIGN_AND_SIZE(1, 1)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LONG_H */
|
||||
305
extern/include/python/internal/pycore_magic_number.h
vendored
Normal file
305
extern/include/python/internal/pycore_magic_number.h
vendored
Normal file
@@ -0,0 +1,305 @@
|
||||
#ifndef Py_INTERNAL_MAGIC_NUMBER_H
|
||||
#define Py_INTERNAL_MAGIC_NUMBER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
||||
Magic number to reject .pyc files generated by other Python versions.
|
||||
It should change for each incompatible change to the bytecode.
|
||||
|
||||
PYC_MAGIC_NUMBER must change whenever the bytecode emitted by the compiler may
|
||||
no longer be understood by older implementations of the eval loop (usually due
|
||||
to the addition of new opcodes).
|
||||
|
||||
The value of CR and LF is incorporated so if you ever read or write
|
||||
a .pyc file in text mode the magic number will be wrong; also, the
|
||||
Apple MPW compiler swaps their values, botching string constants.
|
||||
|
||||
There were a variety of old schemes for setting the magic number. Starting with
|
||||
Python 3.11, Python 3.n starts with magic number 2900+50n. Within each minor
|
||||
version, the magic number is incremented by 1 each time the file format changes.
|
||||
|
||||
Known values:
|
||||
Python 1.5: 20121
|
||||
Python 1.5.1: 20121
|
||||
Python 1.5.2: 20121
|
||||
Python 1.6: 50428
|
||||
Python 2.0: 50823
|
||||
Python 2.0.1: 50823
|
||||
Python 2.1: 60202
|
||||
Python 2.1.1: 60202
|
||||
Python 2.1.2: 60202
|
||||
Python 2.2: 60717
|
||||
Python 2.3a0: 62011
|
||||
Python 2.3a0: 62021
|
||||
Python 2.3a0: 62011 (!)
|
||||
Python 2.4a0: 62041
|
||||
Python 2.4a3: 62051
|
||||
Python 2.4b1: 62061
|
||||
Python 2.5a0: 62071
|
||||
Python 2.5a0: 62081 (ast-branch)
|
||||
Python 2.5a0: 62091 (with)
|
||||
Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
|
||||
Python 2.5b3: 62101 (fix wrong code: for x, in ...)
|
||||
Python 2.5b3: 62111 (fix wrong code: x += yield)
|
||||
Python 2.5c1: 62121 (fix wrong lnotab with for loops and
|
||||
storing constants that should have been removed)
|
||||
Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
|
||||
Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
|
||||
Python 2.6a1: 62161 (WITH_CLEANUP optimization)
|
||||
Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
|
||||
Python 2.7a0: 62181 (optimize conditional branches:
|
||||
introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
|
||||
Python 2.7a0 62191 (introduce SETUP_WITH)
|
||||
Python 2.7a0 62201 (introduce BUILD_SET)
|
||||
Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
|
||||
Python 3000: 3000
|
||||
3010 (removed UNARY_CONVERT)
|
||||
3020 (added BUILD_SET)
|
||||
3030 (added keyword-only parameters)
|
||||
3040 (added signature annotations)
|
||||
3050 (print becomes a function)
|
||||
3060 (PEP 3115 metaclass syntax)
|
||||
3061 (string literals become unicode)
|
||||
3071 (PEP 3109 raise changes)
|
||||
3081 (PEP 3137 make __file__ and __name__ unicode)
|
||||
3091 (kill str8 interning)
|
||||
3101 (merge from 2.6a0, see 62151)
|
||||
3103 (__file__ points to source file)
|
||||
Python 3.0a4: 3111 (WITH_CLEANUP optimization).
|
||||
Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT
|
||||
#3021)
|
||||
Python 3.1a1: 3141 (optimize list, set and dict comprehensions:
|
||||
change LIST_APPEND and SET_ADD, add MAP_ADD #2183)
|
||||
Python 3.1a1: 3151 (optimize conditional branches:
|
||||
introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
|
||||
#4715)
|
||||
Python 3.2a1: 3160 (add SETUP_WITH #6101)
|
||||
Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225)
|
||||
Python 3.2a3 3180 (add DELETE_DEREF #4617)
|
||||
Python 3.3a1 3190 (__class__ super closure changed)
|
||||
Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448)
|
||||
Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645)
|
||||
Python 3.3a2 3220 (changed PEP 380 implementation #14230)
|
||||
Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857)
|
||||
Python 3.4a1 3250 (evaluate positional default arguments before
|
||||
keyword-only defaults #16967)
|
||||
Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
|
||||
free vars #17853)
|
||||
Python 3.4a1 3270 (various tweaks to the __class__ closure #12370)
|
||||
Python 3.4a1 3280 (remove implicit class argument)
|
||||
Python 3.4a4 3290 (changes to __qualname__ computation #19301)
|
||||
Python 3.4a4 3300 (more changes to __qualname__ computation #19301)
|
||||
Python 3.4rc2 3310 (alter __qualname__ computation #20625)
|
||||
Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176)
|
||||
Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292)
|
||||
Python 3.5b2 3340 (fix dictionary display evaluation order #11205)
|
||||
Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400)
|
||||
Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286)
|
||||
Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483)
|
||||
Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107)
|
||||
Python 3.6a2 3370 (16 bit wordcode #26647)
|
||||
Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140)
|
||||
Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE
|
||||
#27095)
|
||||
Python 3.6b1 3373 (add BUILD_STRING opcode #27078)
|
||||
Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes
|
||||
#27985)
|
||||
Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL
|
||||
#27213)
|
||||
Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722)
|
||||
Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257)
|
||||
Python 3.6rc1 3379 (more thorough __class__ validation #23722)
|
||||
Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110)
|
||||
Python 3.7a2 3391 (update GET_AITER #31709)
|
||||
Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650)
|
||||
Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550)
|
||||
Python 3.7b5 3394 (restored docstring as the first stmt in the body;
|
||||
this might affected the first line number #32911)
|
||||
Python 3.8a1 3400 (move frame block handling to compiler #17611)
|
||||
Python 3.8a1 3401 (add END_ASYNC_FOR #33041)
|
||||
Python 3.8a1 3410 (PEP570 Python Positional-Only Parameters #36540)
|
||||
Python 3.8b2 3411 (Reverse evaluation order of key: value in dict
|
||||
comprehensions #35224)
|
||||
Python 3.8b2 3412 (Swap the position of positional args and positional
|
||||
only args in ast.arguments #37593)
|
||||
Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830)
|
||||
Python 3.9a0 3420 (add LOAD_ASSERTION_ERROR #34880)
|
||||
Python 3.9a0 3421 (simplified bytecode for with blocks #32949)
|
||||
Python 3.9a0 3422 (remove BEGIN_FINALLY, END_FINALLY, CALL_FINALLY, POP_FINALLY bytecodes #33387)
|
||||
Python 3.9a2 3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156)
|
||||
Python 3.9a2 3424 (simplify bytecodes for *value unpacking)
|
||||
Python 3.9a2 3425 (simplify bytecodes for **value unpacking)
|
||||
Python 3.10a1 3430 (Make 'annotations' future by default)
|
||||
Python 3.10a1 3431 (New line number table format -- PEP 626)
|
||||
Python 3.10a2 3432 (Function annotation for MAKE_FUNCTION is changed from dict to tuple bpo-42202)
|
||||
Python 3.10a2 3433 (RERAISE restores f_lasti if oparg != 0)
|
||||
Python 3.10a6 3434 (PEP 634: Structural Pattern Matching)
|
||||
Python 3.10a7 3435 Use instruction offsets (as opposed to byte offsets).
|
||||
Python 3.10b1 3436 (Add GEN_START bytecode #43683)
|
||||
Python 3.10b1 3437 (Undo making 'annotations' future by default - We like to dance among core devs!)
|
||||
Python 3.10b1 3438 Safer line number table handling.
|
||||
Python 3.10b1 3439 (Add ROT_N)
|
||||
Python 3.11a1 3450 Use exception table for unwinding ("zero cost" exception handling)
|
||||
Python 3.11a1 3451 (Add CALL_METHOD_KW)
|
||||
Python 3.11a1 3452 (drop nlocals from marshaled code objects)
|
||||
Python 3.11a1 3453 (add co_fastlocalnames and co_fastlocalkinds)
|
||||
Python 3.11a1 3454 (compute cell offsets relative to locals bpo-43693)
|
||||
Python 3.11a1 3455 (add MAKE_CELL bpo-43693)
|
||||
Python 3.11a1 3456 (interleave cell args bpo-43693)
|
||||
Python 3.11a1 3457 (Change localsplus to a bytes object bpo-43693)
|
||||
Python 3.11a1 3458 (imported objects now don't use LOAD_METHOD/CALL_METHOD)
|
||||
Python 3.11a1 3459 (PEP 657: add end line numbers and column offsets for instructions)
|
||||
Python 3.11a1 3460 (Add co_qualname field to PyCodeObject bpo-44530)
|
||||
Python 3.11a1 3461 (JUMP_ABSOLUTE must jump backwards)
|
||||
Python 3.11a2 3462 (bpo-44511: remove COPY_DICT_WITHOUT_KEYS, change
|
||||
MATCH_CLASS and MATCH_KEYS, and add COPY)
|
||||
Python 3.11a3 3463 (bpo-45711: JUMP_IF_NOT_EXC_MATCH no longer pops the
|
||||
active exception)
|
||||
Python 3.11a3 3464 (bpo-45636: Merge numeric BINARY_*INPLACE_* into
|
||||
BINARY_OP)
|
||||
Python 3.11a3 3465 (Add COPY_FREE_VARS opcode)
|
||||
Python 3.11a4 3466 (bpo-45292: PEP-654 except*)
|
||||
Python 3.11a4 3467 (Change CALL_xxx opcodes)
|
||||
Python 3.11a4 3468 (Add SEND opcode)
|
||||
Python 3.11a4 3469 (bpo-45711: remove type, traceback from exc_info)
|
||||
Python 3.11a4 3470 (bpo-46221: PREP_RERAISE_STAR no longer pushes lasti)
|
||||
Python 3.11a4 3471 (bpo-46202: remove pop POP_EXCEPT_AND_RERAISE)
|
||||
Python 3.11a4 3472 (bpo-46009: replace GEN_START with POP_TOP)
|
||||
Python 3.11a4 3473 (Add POP_JUMP_IF_NOT_NONE/POP_JUMP_IF_NONE opcodes)
|
||||
Python 3.11a4 3474 (Add RESUME opcode)
|
||||
Python 3.11a5 3475 (Add RETURN_GENERATOR opcode)
|
||||
Python 3.11a5 3476 (Add ASYNC_GEN_WRAP opcode)
|
||||
Python 3.11a5 3477 (Replace DUP_TOP/DUP_TOP_TWO with COPY and
|
||||
ROT_TWO/ROT_THREE/ROT_FOUR/ROT_N with SWAP)
|
||||
Python 3.11a5 3478 (New CALL opcodes)
|
||||
Python 3.11a5 3479 (Add PUSH_NULL opcode)
|
||||
Python 3.11a5 3480 (New CALL opcodes, second iteration)
|
||||
Python 3.11a5 3481 (Use inline cache for BINARY_OP)
|
||||
Python 3.11a5 3482 (Use inline caching for UNPACK_SEQUENCE and LOAD_GLOBAL)
|
||||
Python 3.11a5 3483 (Use inline caching for COMPARE_OP and BINARY_SUBSCR)
|
||||
Python 3.11a5 3484 (Use inline caching for LOAD_ATTR, LOAD_METHOD, and
|
||||
STORE_ATTR)
|
||||
Python 3.11a5 3485 (Add an oparg to GET_AWAITABLE)
|
||||
Python 3.11a6 3486 (Use inline caching for PRECALL and CALL)
|
||||
Python 3.11a6 3487 (Remove the adaptive "oparg counter" mechanism)
|
||||
Python 3.11a6 3488 (LOAD_GLOBAL can push additional NULL)
|
||||
Python 3.11a6 3489 (Add JUMP_BACKWARD, remove JUMP_ABSOLUTE)
|
||||
Python 3.11a6 3490 (remove JUMP_IF_NOT_EXC_MATCH, add CHECK_EXC_MATCH)
|
||||
Python 3.11a6 3491 (remove JUMP_IF_NOT_EG_MATCH, add CHECK_EG_MATCH,
|
||||
add JUMP_BACKWARD_NO_INTERRUPT, make JUMP_NO_INTERRUPT virtual)
|
||||
Python 3.11a7 3492 (make POP_JUMP_IF_NONE/NOT_NONE/TRUE/FALSE relative)
|
||||
Python 3.11a7 3493 (Make JUMP_IF_TRUE_OR_POP/JUMP_IF_FALSE_OR_POP relative)
|
||||
Python 3.11a7 3494 (New location info table)
|
||||
Python 3.11b4 3495 (Set line number of module's RESUME instr to 0 per PEP 626)
|
||||
Python 3.12a1 3500 (Remove PRECALL opcode)
|
||||
Python 3.12a1 3501 (YIELD_VALUE oparg == stack_depth)
|
||||
Python 3.12a1 3502 (LOAD_FAST_CHECK, no NULL-check in LOAD_FAST)
|
||||
Python 3.12a1 3503 (Shrink LOAD_METHOD cache)
|
||||
Python 3.12a1 3504 (Merge LOAD_METHOD back into LOAD_ATTR)
|
||||
Python 3.12a1 3505 (Specialization/Cache for FOR_ITER)
|
||||
Python 3.12a1 3506 (Add BINARY_SLICE and STORE_SLICE instructions)
|
||||
Python 3.12a1 3507 (Set lineno of module's RESUME to 0)
|
||||
Python 3.12a1 3508 (Add CLEANUP_THROW)
|
||||
Python 3.12a1 3509 (Conditional jumps only jump forward)
|
||||
Python 3.12a2 3510 (FOR_ITER leaves iterator on the stack)
|
||||
Python 3.12a2 3511 (Add STOPITERATION_ERROR instruction)
|
||||
Python 3.12a2 3512 (Remove all unused consts from code objects)
|
||||
Python 3.12a4 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR)
|
||||
Python 3.12a4 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE)
|
||||
Python 3.12a5 3515 (Embed jump mask in COMPARE_OP oparg)
|
||||
Python 3.12a5 3516 (Add COMPARE_AND_BRANCH instruction)
|
||||
Python 3.12a5 3517 (Change YIELD_VALUE oparg to exception block depth)
|
||||
Python 3.12a6 3518 (Add RETURN_CONST instruction)
|
||||
Python 3.12a6 3519 (Modify SEND instruction)
|
||||
Python 3.12a6 3520 (Remove PREP_RERAISE_STAR, add CALL_INTRINSIC_2)
|
||||
Python 3.12a7 3521 (Shrink the LOAD_GLOBAL caches)
|
||||
Python 3.12a7 3522 (Removed JUMP_IF_FALSE_OR_POP/JUMP_IF_TRUE_OR_POP)
|
||||
Python 3.12a7 3523 (Convert COMPARE_AND_BRANCH back to COMPARE_OP)
|
||||
Python 3.12a7 3524 (Shrink the BINARY_SUBSCR caches)
|
||||
Python 3.12b1 3525 (Shrink the CALL caches)
|
||||
Python 3.12b1 3526 (Add instrumentation support)
|
||||
Python 3.12b1 3527 (Add LOAD_SUPER_ATTR)
|
||||
Python 3.12b1 3528 (Add LOAD_SUPER_ATTR_METHOD specialization)
|
||||
Python 3.12b1 3529 (Inline list/dict/set comprehensions)
|
||||
Python 3.12b1 3530 (Shrink the LOAD_SUPER_ATTR caches)
|
||||
Python 3.12b1 3531 (Add PEP 695 changes)
|
||||
Python 3.13a1 3550 (Plugin optimizer support)
|
||||
Python 3.13a1 3551 (Compact superinstructions)
|
||||
Python 3.13a1 3552 (Remove LOAD_FAST__LOAD_CONST and LOAD_CONST__LOAD_FAST)
|
||||
Python 3.13a1 3553 (Add SET_FUNCTION_ATTRIBUTE)
|
||||
Python 3.13a1 3554 (more efficient bytecodes for f-strings)
|
||||
Python 3.13a1 3555 (generate specialized opcodes metadata from bytecodes.c)
|
||||
Python 3.13a1 3556 (Convert LOAD_CLOSURE to a pseudo-op)
|
||||
Python 3.13a1 3557 (Make the conversion to boolean in jumps explicit)
|
||||
Python 3.13a1 3558 (Reorder the stack items for CALL)
|
||||
Python 3.13a1 3559 (Generate opcode IDs from bytecodes.c)
|
||||
Python 3.13a1 3560 (Add RESUME_CHECK instruction)
|
||||
Python 3.13a1 3561 (Add cache entry to branch instructions)
|
||||
Python 3.13a1 3562 (Assign opcode IDs for internal ops in separate range)
|
||||
Python 3.13a1 3563 (Add CALL_KW and remove KW_NAMES)
|
||||
Python 3.13a1 3564 (Removed oparg from YIELD_VALUE, changed oparg values of RESUME)
|
||||
Python 3.13a1 3565 (Oparg of YIELD_VALUE indicates whether it is in a yield-from)
|
||||
Python 3.13a1 3566 (Emit JUMP_NO_INTERRUPT instead of JUMP for non-loop no-lineno cases)
|
||||
Python 3.13a1 3567 (Reimplement line number propagation by the compiler)
|
||||
Python 3.13a1 3568 (Change semantics of END_FOR)
|
||||
Python 3.13a5 3569 (Specialize CONTAINS_OP)
|
||||
Python 3.13a6 3570 (Add __firstlineno__ class attribute)
|
||||
Python 3.13b1 3571 (Fix miscompilation of private names in generic classes)
|
||||
Python 3.14a1 3600 (Add LOAD_COMMON_CONSTANT)
|
||||
Python 3.14a1 3601 (Fix miscompilation of private names in generic classes)
|
||||
Python 3.14a1 3602 (Add LOAD_SPECIAL. Remove BEFORE_WITH and BEFORE_ASYNC_WITH)
|
||||
Python 3.14a1 3603 (Remove BUILD_CONST_KEY_MAP)
|
||||
Python 3.14a1 3604 (Do not duplicate test at end of while statements)
|
||||
Python 3.14a1 3605 (Move ENTER_EXECUTOR to opcode 255)
|
||||
Python 3.14a1 3606 (Specialize CALL_KW)
|
||||
Python 3.14a1 3607 (Add pseudo instructions JUMP_IF_TRUE/FALSE)
|
||||
Python 3.14a1 3608 (Add support for slices)
|
||||
Python 3.14a2 3609 (Add LOAD_SMALL_INT and LOAD_CONST_IMMORTAL instructions, remove RETURN_CONST)
|
||||
Python 3.14a4 3610 (Add VALUE_WITH_FAKE_GLOBALS format to annotationlib)
|
||||
Python 3.14a4 3611 (Add NOT_TAKEN instruction)
|
||||
Python 3.14a4 3612 (Add POP_ITER and INSTRUMENTED_POP_ITER)
|
||||
Python 3.14a4 3613 (Add LOAD_CONST_MORTAL instruction)
|
||||
Python 3.14a5 3614 (Add BINARY_OP_EXTEND)
|
||||
Python 3.14a5 3615 (CALL_FUNCTION_EX always take a kwargs argument)
|
||||
Python 3.14a5 3616 (Remove BINARY_SUBSCR and family. Make them BINARY_OPs)
|
||||
Python 3.14a6 3617 (Branch monitoring for async for loops)
|
||||
Python 3.14a6 3618 (Add oparg to END_ASYNC_FOR)
|
||||
Python 3.14a6 3619 (Renumber RESUME opcode from 149 to 128)
|
||||
Python 3.14a6 3620 (Optimize bytecode for all/any/tuple called on a genexp)
|
||||
Python 3.14a7 3621 (Optimize LOAD_FAST opcodes into LOAD_FAST_BORROW)
|
||||
Python 3.14a7 3622 (Store annotations in different class dict keys)
|
||||
Python 3.14a7 3623 (Add BUILD_INTERPOLATION & BUILD_TEMPLATE opcodes)
|
||||
Python 3.14b1 3624 (Don't optimize LOAD_FAST when local is killed by DELETE_FAST)
|
||||
Python 3.14b3 3625 (Fix handling of opcodes that may leave operands on the stack when optimizing LOAD_FAST)
|
||||
Python 3.14rc2 3626 (Fix missing exception handlers in logical expression)
|
||||
Python 3.14rc3 3627 (Fix miscompilation of some module-level annotations)
|
||||
|
||||
Python 3.15 will start with 3650
|
||||
|
||||
Please don't copy-paste the same pre-release tag for new entries above!!!
|
||||
You should always use the *upcoming* tag. For example, if 3.12a6 came out
|
||||
a week ago, I should put "Python 3.12a7" next to my new magic number.
|
||||
|
||||
Whenever PYC_MAGIC_NUMBER is changed, the ranges in the magic_values array in
|
||||
PC/launcher.c must also be updated.
|
||||
|
||||
*/
|
||||
|
||||
#define PYC_MAGIC_NUMBER 3627
|
||||
/* This is equivalent to converting PYC_MAGIC_NUMBER to 2 bytes
|
||||
(little-endian) and then appending b'\r\n'. */
|
||||
#define PYC_MAGIC_NUMBER_TOKEN \
|
||||
((uint32_t)PYC_MAGIC_NUMBER | ((uint32_t)'\r' << 16) | ((uint32_t)'\n' << 24))
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_MAGIC_NUMBER_H
|
||||
20
extern/include/python/internal/pycore_memoryobject.h
vendored
Normal file
20
extern/include/python/internal/pycore_memoryobject.h
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef Py_INTERNAL_MEMORYOBJECT_H
|
||||
#define Py_INTERNAL_MEMORYOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyTypeObject _PyManagedBuffer_Type;
|
||||
|
||||
PyObject *
|
||||
_PyMemoryView_FromBufferProc(PyObject *v, int flags,
|
||||
getbufferproc bufferproc);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_MEMORYOBJECT_H */
|
||||
69
extern/include/python/internal/pycore_mimalloc.h
vendored
Normal file
69
extern/include/python/internal/pycore_mimalloc.h
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
#ifndef Py_INTERNAL_MIMALLOC_H
|
||||
#define Py_INTERNAL_MIMALLOC_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#if defined(MIMALLOC_H) || defined(MIMALLOC_TYPES_H)
|
||||
# error "pycore_mimalloc.h must be included before mimalloc.h"
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
_Py_MIMALLOC_HEAP_MEM = 0, // PyMem_Malloc() and friends
|
||||
_Py_MIMALLOC_HEAP_OBJECT = 1, // non-GC objects
|
||||
_Py_MIMALLOC_HEAP_GC = 2, // GC objects without pre-header
|
||||
_Py_MIMALLOC_HEAP_GC_PRE = 3, // GC objects with pre-header
|
||||
_Py_MIMALLOC_HEAP_COUNT
|
||||
} _Py_mimalloc_heap_id;
|
||||
|
||||
#include "pycore_pymem.h"
|
||||
|
||||
#ifdef WITH_MIMALLOC
|
||||
# ifdef Py_GIL_DISABLED
|
||||
# define MI_PRIM_THREAD_ID _Py_ThreadId
|
||||
# endif
|
||||
# define MI_DEBUG_UNINIT PYMEM_CLEANBYTE
|
||||
# define MI_DEBUG_FREED PYMEM_DEADBYTE
|
||||
# define MI_DEBUG_PADDING PYMEM_FORBIDDENBYTE
|
||||
#ifdef Py_DEBUG
|
||||
# define MI_DEBUG 2
|
||||
#else
|
||||
# define MI_DEBUG 0
|
||||
#endif
|
||||
|
||||
#ifdef _Py_THREAD_SANITIZER
|
||||
# define MI_TSAN 1
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C++" {
|
||||
#endif
|
||||
|
||||
#include "mimalloc/mimalloc.h"
|
||||
#include "mimalloc/mimalloc/types.h"
|
||||
#include "mimalloc/mimalloc/internal.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
struct _mimalloc_interp_state {
|
||||
// When exiting, threads place any segments with live blocks in this
|
||||
// shared pool for other threads to claim and reuse.
|
||||
mi_abandoned_pool_t abandoned_pool;
|
||||
};
|
||||
|
||||
struct _mimalloc_thread_state {
|
||||
mi_heap_t *current_object_heap;
|
||||
mi_heap_t heaps[_Py_MIMALLOC_HEAP_COUNT];
|
||||
mi_tld_t tld;
|
||||
int initialized;
|
||||
struct llist_node page_list;
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif // Py_INTERNAL_MIMALLOC_H
|
||||
99
extern/include/python/internal/pycore_modsupport.h
vendored
Normal file
99
extern/include/python/internal/pycore_modsupport.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
#ifndef Py_INTERNAL_MODSUPPORT_H
|
||||
#define Py_INTERNAL_MODSUPPORT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
extern int _PyArg_NoKwnames(const char *funcname, PyObject *kwnames);
|
||||
#define _PyArg_NoKwnames(funcname, kwnames) \
|
||||
((kwnames) == NULL || _PyArg_NoKwnames((funcname), (kwnames)))
|
||||
|
||||
// Export for '_bz2' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_NoPositional(const char *funcname, PyObject *args);
|
||||
#define _PyArg_NoPositional(funcname, args) \
|
||||
((args) == NULL || _PyArg_NoPositional((funcname), (args)))
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_NoKeywords(const char *funcname, PyObject *kwargs);
|
||||
#define _PyArg_NoKeywords(funcname, kwargs) \
|
||||
((kwargs) == NULL || _PyArg_NoKeywords((funcname), (kwargs)))
|
||||
|
||||
// Export for 'zlib' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_CheckPositional(const char *, Py_ssize_t,
|
||||
Py_ssize_t, Py_ssize_t);
|
||||
#define _Py_ANY_VARARGS(n) ((n) == PY_SSIZE_T_MAX)
|
||||
#define _PyArg_CheckPositional(funcname, nargs, min, max) \
|
||||
((!_Py_ANY_VARARGS(max) && (min) <= (nargs) && (nargs) <= (max)) \
|
||||
|| _PyArg_CheckPositional((funcname), (nargs), (min), (max)))
|
||||
|
||||
extern PyObject ** _Py_VaBuildStack(
|
||||
PyObject **small_stack,
|
||||
Py_ssize_t small_stack_len,
|
||||
const char *format,
|
||||
va_list va,
|
||||
Py_ssize_t *p_nargs);
|
||||
|
||||
extern PyObject* _PyModule_CreateInitialized(PyModuleDef*, int apiver);
|
||||
|
||||
// Export for '_curses' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_ParseStack(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern int _PyArg_UnpackStack(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
const char *name,
|
||||
Py_ssize_t min,
|
||||
Py_ssize_t max,
|
||||
...);
|
||||
|
||||
// Export for '_heapq' shared extension
|
||||
PyAPI_FUNC(void) _PyArg_BadArgument(
|
||||
const char *fname,
|
||||
const char *displayname,
|
||||
const char *expected,
|
||||
PyObject *arg);
|
||||
|
||||
// --- _PyArg_Parser API ---------------------------------------------------
|
||||
|
||||
// Export for '_dbm' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_ParseStackAndKeywords(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
PyObject *kwnames,
|
||||
struct _PyArg_Parser *,
|
||||
...);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywords(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
PyObject *kwargs,
|
||||
PyObject *kwnames,
|
||||
struct _PyArg_Parser *parser,
|
||||
int minpos,
|
||||
int maxpos,
|
||||
int minkw,
|
||||
int varpos,
|
||||
PyObject **buf);
|
||||
#define _PyArg_UnpackKeywords(args, nargs, kwargs, kwnames, parser, minpos, maxpos, minkw, varpos, buf) \
|
||||
(((minkw) == 0 && (kwargs) == NULL && (kwnames) == NULL && \
|
||||
(minpos) <= (nargs) && ((varpos) || (nargs) <= (maxpos)) && (args) != NULL) ? \
|
||||
(args) : \
|
||||
_PyArg_UnpackKeywords((args), (nargs), (kwargs), (kwnames), (parser), \
|
||||
(minpos), (maxpos), (minkw), (varpos), (buf)))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_MODSUPPORT_H
|
||||
|
||||
62
extern/include/python/internal/pycore_moduleobject.h
vendored
Normal file
62
extern/include/python/internal/pycore_moduleobject.h
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
#ifndef Py_INTERNAL_MODULEOBJECT_H
|
||||
#define Py_INTERNAL_MODULEOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern void _PyModule_Clear(PyObject *);
|
||||
extern void _PyModule_ClearDict(PyObject *);
|
||||
extern int _PyModuleSpec_IsInitializing(PyObject *);
|
||||
extern int _PyModuleSpec_GetFileOrigin(PyObject *, PyObject **);
|
||||
extern int _PyModule_IsPossiblyShadowing(PyObject *);
|
||||
|
||||
extern int _PyModule_IsExtension(PyObject *obj);
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyObject *md_dict;
|
||||
PyModuleDef *md_def;
|
||||
void *md_state;
|
||||
PyObject *md_weaklist;
|
||||
// for logging purposes after md_dict is cleared
|
||||
PyObject *md_name;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
void *md_gil;
|
||||
#endif
|
||||
} PyModuleObject;
|
||||
|
||||
static inline PyModuleDef* _PyModule_GetDef(PyObject *mod) {
|
||||
assert(PyModule_Check(mod));
|
||||
return ((PyModuleObject *)mod)->md_def;
|
||||
}
|
||||
|
||||
static inline void* _PyModule_GetState(PyObject* mod) {
|
||||
assert(PyModule_Check(mod));
|
||||
return ((PyModuleObject *)mod)->md_state;
|
||||
}
|
||||
|
||||
static inline PyObject* _PyModule_GetDict(PyObject *mod) {
|
||||
assert(PyModule_Check(mod));
|
||||
PyObject *dict = ((PyModuleObject *)mod) -> md_dict;
|
||||
// _PyModule_GetDict(mod) must not be used after calling module_clear(mod)
|
||||
assert(dict != NULL);
|
||||
return dict; // borrowed reference
|
||||
}
|
||||
|
||||
extern PyObject * _PyModule_GetFilenameObject(PyObject *);
|
||||
extern Py_ssize_t _PyModule_GetFilenameUTF8(
|
||||
PyObject *module,
|
||||
char *buffer,
|
||||
Py_ssize_t maxlen);
|
||||
|
||||
PyObject* _Py_module_getattro_impl(PyModuleObject *m, PyObject *name, int suppress);
|
||||
PyObject* _Py_module_getattro(PyObject *m, PyObject *name);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_MODULEOBJECT_H */
|
||||
21
extern/include/python/internal/pycore_namespace.h
vendored
Normal file
21
extern/include/python/internal/pycore_namespace.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Simple namespace object interface
|
||||
|
||||
#ifndef Py_INTERNAL_NAMESPACE_H
|
||||
#define Py_INTERNAL_NAMESPACE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyTypeObject _PyNamespace_Type;
|
||||
|
||||
// Export for '_testmultiphase' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyNamespace_New(PyObject *kwds);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_NAMESPACE_H
|
||||
1029
extern/include/python/internal/pycore_object.h
vendored
Normal file
1029
extern/include/python/internal/pycore_object.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
71
extern/include/python/internal/pycore_object_alloc.h
vendored
Normal file
71
extern/include/python/internal/pycore_object_alloc.h
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_ALLOC_H
|
||||
#define Py_INTERNAL_OBJECT_ALLOC_H
|
||||
|
||||
#include "pycore_object.h" // _PyType_HasFeature()
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
static inline mi_heap_t *
|
||||
_PyObject_GetAllocationHeap(_PyThreadStateImpl *tstate, PyTypeObject *tp)
|
||||
{
|
||||
struct _mimalloc_thread_state *m = &tstate->mimalloc;
|
||||
if (_PyType_HasFeature(tp, Py_TPFLAGS_PREHEADER)) {
|
||||
return &m->heaps[_Py_MIMALLOC_HEAP_GC_PRE];
|
||||
}
|
||||
else if (_PyType_IS_GC(tp)) {
|
||||
return &m->heaps[_Py_MIMALLOC_HEAP_GC];
|
||||
}
|
||||
else {
|
||||
return &m->heaps[_Py_MIMALLOC_HEAP_OBJECT];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Sets the heap used for PyObject_Malloc(), PyObject_Realloc(), etc. calls in
|
||||
// Py_GIL_DISABLED builds. We use different heaps depending on if the object
|
||||
// supports GC and if it has a pre-header. We smuggle the choice of heap
|
||||
// through the _mimalloc_thread_state. In the default build, this simply
|
||||
// calls PyObject_Malloc().
|
||||
static inline void *
|
||||
_PyObject_MallocWithType(PyTypeObject *tp, size_t size)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
|
||||
struct _mimalloc_thread_state *m = &tstate->mimalloc;
|
||||
m->current_object_heap = _PyObject_GetAllocationHeap(tstate, tp);
|
||||
#endif
|
||||
void *mem = PyObject_Malloc(size);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
m->current_object_heap = &m->heaps[_Py_MIMALLOC_HEAP_OBJECT];
|
||||
#endif
|
||||
return mem;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
_PyObject_ReallocWithType(PyTypeObject *tp, void *ptr, size_t size)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
|
||||
struct _mimalloc_thread_state *m = &tstate->mimalloc;
|
||||
m->current_object_heap = _PyObject_GetAllocationHeap(tstate, tp);
|
||||
#endif
|
||||
void *mem = PyObject_Realloc(ptr, size);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
m->current_object_heap = &m->heaps[_Py_MIMALLOC_HEAP_OBJECT];
|
||||
#endif
|
||||
return mem;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBJECT_ALLOC_H
|
||||
32
extern/include/python/internal/pycore_object_deferred.h
vendored
Normal file
32
extern/include/python/internal/pycore_object_deferred.h
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_DEFERRED_H
|
||||
#define Py_INTERNAL_OBJECT_DEFERRED_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "pycore_gc.h"
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Mark an object as supporting deferred reference counting. This is a no-op
|
||||
// in the default (with GIL) build. Objects that use deferred reference
|
||||
// counting should be tracked by the GC so that they are eventually collected.
|
||||
extern void _PyObject_SetDeferredRefcount(PyObject *op);
|
||||
|
||||
static inline int
|
||||
_PyObject_HasDeferredRefcount(PyObject *op)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_DEFERRED);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBJECT_DEFERRED_H
|
||||
95
extern/include/python/internal/pycore_object_stack.h
vendored
Normal file
95
extern/include/python/internal/pycore_object_stack.h
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_STACK_H
|
||||
#define Py_INTERNAL_OBJECT_STACK_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// _PyObjectStack is a stack of Python objects implemented as a linked list of
|
||||
// fixed size buffers.
|
||||
|
||||
// Chosen so that _PyObjectStackChunk is a power-of-two size.
|
||||
#define _Py_OBJECT_STACK_CHUNK_SIZE 254
|
||||
|
||||
typedef struct _PyObjectStackChunk {
|
||||
struct _PyObjectStackChunk *prev;
|
||||
Py_ssize_t n;
|
||||
PyObject *objs[_Py_OBJECT_STACK_CHUNK_SIZE];
|
||||
} _PyObjectStackChunk;
|
||||
|
||||
typedef struct _PyObjectStack {
|
||||
_PyObjectStackChunk *head;
|
||||
} _PyObjectStack;
|
||||
|
||||
|
||||
extern _PyObjectStackChunk *
|
||||
_PyObjectStackChunk_New(void);
|
||||
|
||||
extern void
|
||||
_PyObjectStackChunk_Free(_PyObjectStackChunk *);
|
||||
|
||||
// Push an item onto the stack. Return -1 on allocation failure, 0 on success.
|
||||
static inline int
|
||||
_PyObjectStack_Push(_PyObjectStack *stack, PyObject *obj)
|
||||
{
|
||||
_PyObjectStackChunk *buf = stack->head;
|
||||
if (buf == NULL || buf->n == _Py_OBJECT_STACK_CHUNK_SIZE) {
|
||||
buf = _PyObjectStackChunk_New();
|
||||
if (buf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
buf->prev = stack->head;
|
||||
buf->n = 0;
|
||||
stack->head = buf;
|
||||
}
|
||||
|
||||
assert(buf->n >= 0 && buf->n < _Py_OBJECT_STACK_CHUNK_SIZE);
|
||||
buf->objs[buf->n] = obj;
|
||||
buf->n++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Pop the top item from the stack. Return NULL if the stack is empty.
|
||||
static inline PyObject *
|
||||
_PyObjectStack_Pop(_PyObjectStack *stack)
|
||||
{
|
||||
_PyObjectStackChunk *buf = stack->head;
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(buf->n > 0 && buf->n <= _Py_OBJECT_STACK_CHUNK_SIZE);
|
||||
buf->n--;
|
||||
PyObject *obj = buf->objs[buf->n];
|
||||
if (buf->n == 0) {
|
||||
stack->head = buf->prev;
|
||||
_PyObjectStackChunk_Free(buf);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
static inline Py_ssize_t
|
||||
_PyObjectStack_Size(_PyObjectStack *stack)
|
||||
{
|
||||
Py_ssize_t size = 0;
|
||||
for (_PyObjectStackChunk *buf = stack->head; buf != NULL; buf = buf->prev) {
|
||||
size += buf->n;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
// Merge src into dst, leaving src empty
|
||||
extern void
|
||||
_PyObjectStack_Merge(_PyObjectStack *dst, _PyObjectStack *src);
|
||||
|
||||
// Remove all items from the stack
|
||||
extern void
|
||||
_PyObjectStack_Clear(_PyObjectStack *stack);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBJECT_STACK_H
|
||||
49
extern/include/python/internal/pycore_object_state.h
vendored
Normal file
49
extern/include/python/internal/pycore_object_state.h
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_STATE_H
|
||||
#define Py_INTERNAL_OBJECT_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist_state.h" // _Py_freelists
|
||||
#include "pycore_hashtable.h" // _Py_hashtable_t
|
||||
|
||||
|
||||
/* Reference tracer state */
|
||||
struct _reftracer_runtime_state {
|
||||
PyRefTracer tracer_func;
|
||||
void* tracer_data;
|
||||
};
|
||||
|
||||
|
||||
struct _py_object_runtime_state {
|
||||
#ifdef Py_REF_DEBUG
|
||||
Py_ssize_t interpreter_leaks;
|
||||
#endif
|
||||
int _not_used;
|
||||
};
|
||||
|
||||
struct _py_object_state {
|
||||
#if !defined(Py_GIL_DISABLED)
|
||||
struct _Py_freelists freelists;
|
||||
#endif
|
||||
#ifdef Py_REF_DEBUG
|
||||
Py_ssize_t reftotal;
|
||||
#endif
|
||||
#ifdef Py_TRACE_REFS
|
||||
// Hash table storing all objects. The key is the object pointer
|
||||
// (PyObject*) and the value is always the number 1 (as uintptr_t).
|
||||
// See _PyRefchain_IsTraced() and _PyRefchain_Trace() functions.
|
||||
_Py_hashtable_t *refchain;
|
||||
#endif
|
||||
int _not_used;
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OBJECT_STATE_H */
|
||||
702
extern/include/python/internal/pycore_obmalloc.h
vendored
Normal file
702
extern/include/python/internal/pycore_obmalloc.h
vendored
Normal file
@@ -0,0 +1,702 @@
|
||||
#ifndef Py_INTERNAL_OBMALLOC_H
|
||||
#define Py_INTERNAL_OBMALLOC_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
typedef unsigned int pymem_uint; /* assuming >= 16 bits */
|
||||
|
||||
#undef uint
|
||||
#define uint pymem_uint
|
||||
|
||||
|
||||
/* An object allocator for Python.
|
||||
|
||||
Here is an introduction to the layers of the Python memory architecture,
|
||||
showing where the object allocator is actually used (layer +2), It is
|
||||
called for every object allocation and deallocation (PyObject_New/Del),
|
||||
unless the object-specific allocators implement a proprietary allocation
|
||||
scheme (ex.: ints use a simple free list). This is also the place where
|
||||
the cyclic garbage collector operates selectively on container objects.
|
||||
|
||||
|
||||
Object-specific allocators
|
||||
_____ ______ ______ ________
|
||||
[ int ] [ dict ] [ list ] ... [ string ] Python core |
|
||||
+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
|
||||
_______________________________ | |
|
||||
[ Python's object allocator ] | |
|
||||
+2 | ####### Object memory ####### | <------ Internal buffers ------> |
|
||||
______________________________________________________________ |
|
||||
[ Python's raw memory allocator (PyMem_ API) ] |
|
||||
+1 | <----- Python memory (under PyMem manager's control) ------> | |
|
||||
__________________________________________________________________
|
||||
[ Underlying general-purpose allocator (ex: C library malloc) ]
|
||||
0 | <------ Virtual memory allocated for the python process -------> |
|
||||
|
||||
=========================================================================
|
||||
_______________________________________________________________________
|
||||
[ OS-specific Virtual Memory Manager (VMM) ]
|
||||
-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
|
||||
__________________________________ __________________________________
|
||||
[ ] [ ]
|
||||
-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
|
||||
|
||||
*/
|
||||
/*==========================================================================*/
|
||||
|
||||
/* A fast, special-purpose memory allocator for small blocks, to be used
|
||||
on top of a general-purpose malloc -- heavily based on previous art. */
|
||||
|
||||
/* Vladimir Marangozov -- August 2000 */
|
||||
|
||||
/*
|
||||
* "Memory management is where the rubber meets the road -- if we do the wrong
|
||||
* thing at any level, the results will not be good. And if we don't make the
|
||||
* levels work well together, we are in serious trouble." (1)
|
||||
*
|
||||
* (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
|
||||
* "Dynamic Storage Allocation: A Survey and Critical Review",
|
||||
* in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
|
||||
*/
|
||||
|
||||
/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/*
|
||||
* Allocation strategy abstract:
|
||||
*
|
||||
* For small requests, the allocator sub-allocates <Big> blocks of memory.
|
||||
* Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
|
||||
* system's allocator.
|
||||
*
|
||||
* Small requests are grouped in size classes spaced 8 bytes apart, due
|
||||
* to the required valid alignment of the returned address. Requests of
|
||||
* a particular size are serviced from memory pools of 4K (one VMM page).
|
||||
* Pools are fragmented on demand and contain free lists of blocks of one
|
||||
* particular size class. In other words, there is a fixed-size allocator
|
||||
* for each size class. Free pools are shared by the different allocators
|
||||
* thus minimizing the space reserved for a particular size class.
|
||||
*
|
||||
* This allocation strategy is a variant of what is known as "simple
|
||||
* segregated storage based on array of free lists". The main drawback of
|
||||
* simple segregated storage is that we might end up with lot of reserved
|
||||
* memory for the different free lists, which degenerate in time. To avoid
|
||||
* this, we partition each free list in pools and we share dynamically the
|
||||
* reserved space between all free lists. This technique is quite efficient
|
||||
* for memory intensive programs which allocate mainly small-sized blocks.
|
||||
*
|
||||
* For small requests we have the following table:
|
||||
*
|
||||
* Request in bytes Size of allocated block Size class idx
|
||||
* ----------------------------------------------------------------
|
||||
* 1-8 8 0
|
||||
* 9-16 16 1
|
||||
* 17-24 24 2
|
||||
* 25-32 32 3
|
||||
* 33-40 40 4
|
||||
* 41-48 48 5
|
||||
* 49-56 56 6
|
||||
* 57-64 64 7
|
||||
* 65-72 72 8
|
||||
* ... ... ...
|
||||
* 497-504 504 62
|
||||
* 505-512 512 63
|
||||
*
|
||||
* 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
|
||||
* allocator.
|
||||
*/
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/*
|
||||
* -- Main tunable settings section --
|
||||
*/
|
||||
|
||||
/*
|
||||
* Alignment of addresses returned to the user. 8-bytes alignment works
|
||||
* on most current architectures (with 32-bit or 64-bit address buses).
|
||||
* The alignment value is also used for grouping small requests in size
|
||||
* classes spaced ALIGNMENT bytes apart.
|
||||
*
|
||||
* You shouldn't change this unless you know what you are doing.
|
||||
*/
|
||||
|
||||
#if SIZEOF_VOID_P > 4
|
||||
#define ALIGNMENT 16 /* must be 2^N */
|
||||
#define ALIGNMENT_SHIFT 4
|
||||
#else
|
||||
#define ALIGNMENT 8 /* must be 2^N */
|
||||
#define ALIGNMENT_SHIFT 3
|
||||
#endif
|
||||
|
||||
/* Return the number of bytes in size class I, as a uint. */
|
||||
#define INDEX2SIZE(I) (((pymem_uint)(I) + 1) << ALIGNMENT_SHIFT)
|
||||
|
||||
/*
|
||||
* Max size threshold below which malloc requests are considered to be
|
||||
* small enough in order to use preallocated memory pools. You can tune
|
||||
* this value according to your application behaviour and memory needs.
|
||||
*
|
||||
* Note: a size threshold of 512 guarantees that newly created dictionaries
|
||||
* will be allocated from preallocated memory pools on 64-bit.
|
||||
*
|
||||
* The following invariants must hold:
|
||||
* 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
|
||||
* 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
|
||||
*
|
||||
* Although not required, for better performance and space efficiency,
|
||||
* it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
|
||||
*/
|
||||
#define SMALL_REQUEST_THRESHOLD 512
|
||||
#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
|
||||
|
||||
/*
|
||||
* The system's VMM page size can be obtained on most unices with a
|
||||
* getpagesize() call or deduced from various header files. To make
|
||||
* things simpler, we assume that it is 4K, which is OK for most systems.
|
||||
* It is probably better if this is the native page size, but it doesn't
|
||||
* have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
|
||||
* size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
|
||||
* violation fault. 4K is apparently OK for all the platforms that python
|
||||
* currently targets.
|
||||
*/
|
||||
#define SYSTEM_PAGE_SIZE (4 * 1024)
|
||||
|
||||
/*
|
||||
* Maximum amount of memory managed by the allocator for small requests.
|
||||
*/
|
||||
#ifdef WITH_MEMORY_LIMITS
|
||||
#ifndef SMALL_MEMORY_LIMIT
|
||||
#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(WITH_PYMALLOC_RADIX_TREE)
|
||||
/* Use radix-tree to track arena memory regions, for address_in_range().
|
||||
* Enable by default since it allows larger pool sizes. Can be disabled
|
||||
* using -DWITH_PYMALLOC_RADIX_TREE=0 */
|
||||
#define WITH_PYMALLOC_RADIX_TREE 1
|
||||
#endif
|
||||
|
||||
#if SIZEOF_VOID_P > 4
|
||||
/* on 64-bit platforms use larger pools and arenas if we can */
|
||||
#define USE_LARGE_ARENAS
|
||||
#if WITH_PYMALLOC_RADIX_TREE
|
||||
/* large pools only supported if radix-tree is enabled */
|
||||
#define USE_LARGE_POOLS
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
|
||||
* on a page boundary. This is a reserved virtual address space for the
|
||||
* current process (obtained through a malloc()/mmap() call). In no way this
|
||||
* means that the memory arenas will be used entirely. A malloc(<Big>) is
|
||||
* usually an address range reservation for <Big> bytes, unless all pages within
|
||||
* this space are referenced subsequently. So malloc'ing big blocks and not
|
||||
* using them does not mean "wasting memory". It's an addressable range
|
||||
* wastage...
|
||||
*
|
||||
* Arenas are allocated with mmap() on systems supporting anonymous memory
|
||||
* mappings to reduce heap fragmentation.
|
||||
*/
|
||||
#ifdef USE_LARGE_ARENAS
|
||||
#define ARENA_BITS 20 /* 1 MiB */
|
||||
#else
|
||||
#define ARENA_BITS 18 /* 256 KiB */
|
||||
#endif
|
||||
#define ARENA_SIZE (1 << ARENA_BITS)
|
||||
#define ARENA_SIZE_MASK (ARENA_SIZE - 1)
|
||||
|
||||
#ifdef WITH_MEMORY_LIMITS
|
||||
#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Size of the pools used for small blocks. Must be a power of 2.
|
||||
*/
|
||||
#ifdef USE_LARGE_POOLS
|
||||
#define POOL_BITS 14 /* 16 KiB */
|
||||
#else
|
||||
#define POOL_BITS 12 /* 4 KiB */
|
||||
#endif
|
||||
#define POOL_SIZE (1 << POOL_BITS)
|
||||
#define POOL_SIZE_MASK (POOL_SIZE - 1)
|
||||
|
||||
#if !WITH_PYMALLOC_RADIX_TREE
|
||||
#if POOL_SIZE != SYSTEM_PAGE_SIZE
|
||||
# error "pool size must be equal to system page size"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE)
|
||||
#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE
|
||||
# error "arena size not an exact multiple of pool size"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* -- End of tunable settings section --
|
||||
*/
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/* When you say memory, my mind reasons in terms of (pointers to) blocks */
|
||||
typedef uint8_t pymem_block;
|
||||
|
||||
/* Pool for small blocks. */
|
||||
struct pool_header {
|
||||
union { pymem_block *_padding;
|
||||
uint count; } ref; /* number of allocated blocks */
|
||||
pymem_block *freeblock; /* pool's free list head */
|
||||
struct pool_header *nextpool; /* see "Pool table" for meaning */
|
||||
struct pool_header *prevpool; /* " */
|
||||
uint arenaindex; /* index into arenas of base adr */
|
||||
uint szidx; /* block size class index */
|
||||
uint nextoffset; /* bytes to virgin block */
|
||||
uint maxnextoffset; /* largest valid nextoffset */
|
||||
};
|
||||
|
||||
typedef struct pool_header *poolp;
|
||||
|
||||
/* Record keeping for arenas. */
|
||||
struct arena_object {
|
||||
/* The address of the arena, as returned by malloc. Note that 0
|
||||
* will never be returned by a successful malloc, and is used
|
||||
* here to mark an arena_object that doesn't correspond to an
|
||||
* allocated arena.
|
||||
*/
|
||||
uintptr_t address;
|
||||
|
||||
/* Pool-aligned pointer to the next pool to be carved off. */
|
||||
pymem_block* pool_address;
|
||||
|
||||
/* The number of available pools in the arena: free pools + never-
|
||||
* allocated pools.
|
||||
*/
|
||||
uint nfreepools;
|
||||
|
||||
/* The total number of pools in the arena, whether or not available. */
|
||||
uint ntotalpools;
|
||||
|
||||
/* Singly-linked list of available pools. */
|
||||
struct pool_header* freepools;
|
||||
|
||||
/* Whenever this arena_object is not associated with an allocated
|
||||
* arena, the nextarena member is used to link all unassociated
|
||||
* arena_objects in the singly-linked `unused_arena_objects` list.
|
||||
* The prevarena member is unused in this case.
|
||||
*
|
||||
* When this arena_object is associated with an allocated arena
|
||||
* with at least one available pool, both members are used in the
|
||||
* doubly-linked `usable_arenas` list, which is maintained in
|
||||
* increasing order of `nfreepools` values.
|
||||
*
|
||||
* Else this arena_object is associated with an allocated arena
|
||||
* all of whose pools are in use. `nextarena` and `prevarena`
|
||||
* are both meaningless in this case.
|
||||
*/
|
||||
struct arena_object* nextarena;
|
||||
struct arena_object* prevarena;
|
||||
};
|
||||
|
||||
#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
|
||||
|
||||
#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
|
||||
|
||||
/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
|
||||
#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
|
||||
|
||||
/* Return total number of blocks in pool of size index I, as a uint. */
|
||||
#define NUMBLOCKS(I) ((pymem_uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/*
|
||||
* Pool table -- headed, circular, doubly-linked lists of partially used pools.
|
||||
|
||||
This is involved. For an index i, usedpools[i+i] is the header for a list of
|
||||
all partially used pools holding small blocks with "size class idx" i. So
|
||||
usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
|
||||
16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
|
||||
|
||||
Pools are carved off an arena's highwater mark (an arena_object's pool_address
|
||||
member) as needed. Once carved off, a pool is in one of three states forever
|
||||
after:
|
||||
|
||||
used == partially used, neither empty nor full
|
||||
At least one block in the pool is currently allocated, and at least one
|
||||
block in the pool is not currently allocated (note this implies a pool
|
||||
has room for at least two blocks).
|
||||
This is a pool's initial state, as a pool is created only when malloc
|
||||
needs space.
|
||||
The pool holds blocks of a fixed size, and is in the circular list headed
|
||||
at usedpools[i] (see above). It's linked to the other used pools of the
|
||||
same size class via the pool_header's nextpool and prevpool members.
|
||||
If all but one block is currently allocated, a malloc can cause a
|
||||
transition to the full state. If all but one block is not currently
|
||||
allocated, a free can cause a transition to the empty state.
|
||||
|
||||
full == all the pool's blocks are currently allocated
|
||||
On transition to full, a pool is unlinked from its usedpools[] list.
|
||||
It's not linked to from anything then anymore, and its nextpool and
|
||||
prevpool members are meaningless until it transitions back to used.
|
||||
A free of a block in a full pool puts the pool back in the used state.
|
||||
Then it's linked in at the front of the appropriate usedpools[] list, so
|
||||
that the next allocation for its size class will reuse the freed block.
|
||||
|
||||
empty == all the pool's blocks are currently available for allocation
|
||||
On transition to empty, a pool is unlinked from its usedpools[] list,
|
||||
and linked to the front of its arena_object's singly-linked freepools list,
|
||||
via its nextpool member. The prevpool member has no meaning in this case.
|
||||
Empty pools have no inherent size class: the next time a malloc finds
|
||||
an empty list in usedpools[], it takes the first pool off of freepools.
|
||||
If the size class needed happens to be the same as the size class the pool
|
||||
last had, some pool initialization can be skipped.
|
||||
|
||||
|
||||
Block Management
|
||||
|
||||
Blocks within pools are again carved out as needed. pool->freeblock points to
|
||||
the start of a singly-linked list of free blocks within the pool. When a
|
||||
block is freed, it's inserted at the front of its pool's freeblock list. Note
|
||||
that the available blocks in a pool are *not* linked all together when a pool
|
||||
is initialized. Instead only "the first two" (lowest addresses) blocks are
|
||||
set up, returning the first such block, and setting pool->freeblock to a
|
||||
one-block list holding the second such block. This is consistent with that
|
||||
pymalloc strives at all levels (arena, pool, and block) never to touch a piece
|
||||
of memory until it's actually needed.
|
||||
|
||||
So long as a pool is in the used state, we're certain there *is* a block
|
||||
available for allocating, and pool->freeblock is not NULL. If pool->freeblock
|
||||
points to the end of the free list before we've carved the entire pool into
|
||||
blocks, that means we simply haven't yet gotten to one of the higher-address
|
||||
blocks. The offset from the pool_header to the start of "the next" virgin
|
||||
block is stored in the pool_header nextoffset member, and the largest value
|
||||
of nextoffset that makes sense is stored in the maxnextoffset member when a
|
||||
pool is initialized. All the blocks in a pool have been passed out at least
|
||||
once when and only when nextoffset > maxnextoffset.
|
||||
|
||||
|
||||
Major obscurity: While the usedpools vector is declared to have poolp
|
||||
entries, it doesn't really. It really contains two pointers per (conceptual)
|
||||
poolp entry, the nextpool and prevpool members of a pool_header. The
|
||||
excruciating initialization code below fools C so that
|
||||
|
||||
usedpool[i+i]
|
||||
|
||||
"acts like" a genuine poolp, but only so long as you only reference its
|
||||
nextpool and prevpool members. The "- 2*sizeof(pymem_block *)" gibberish is
|
||||
compensating for that a pool_header's nextpool and prevpool members
|
||||
immediately follow a pool_header's first two members:
|
||||
|
||||
union { pymem_block *_padding;
|
||||
uint count; } ref;
|
||||
pymem_block *freeblock;
|
||||
|
||||
each of which consume sizeof(pymem_block *) bytes. So what usedpools[i+i] really
|
||||
contains is a fudged-up pointer p such that *if* C believes it's a poolp
|
||||
pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
|
||||
circular list is empty).
|
||||
|
||||
It's unclear why the usedpools setup is so convoluted. It could be to
|
||||
minimize the amount of cache required to hold this heavily-referenced table
|
||||
(which only *needs* the two interpool pointer members of a pool_header). OTOH,
|
||||
referencing code has to remember to "double the index" and doing so isn't
|
||||
free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
|
||||
on that C doesn't insert any padding anywhere in a pool_header at or before
|
||||
the prevpool member.
|
||||
**************************************************************************** */
|
||||
|
||||
#define OBMALLOC_USED_POOLS_SIZE (2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8)
|
||||
|
||||
struct _obmalloc_pools {
|
||||
poolp used[OBMALLOC_USED_POOLS_SIZE];
|
||||
};
|
||||
|
||||
|
||||
/*==========================================================================
|
||||
Arena management.
|
||||
|
||||
`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
|
||||
which may not be currently used (== they're arena_objects that aren't
|
||||
currently associated with an allocated arena). Note that arenas proper are
|
||||
separately malloc'ed.
|
||||
|
||||
Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
|
||||
we do try to free() arenas, and use some mild heuristic strategies to increase
|
||||
the likelihood that arenas eventually can be freed.
|
||||
|
||||
unused_arena_objects
|
||||
|
||||
This is a singly-linked list of the arena_objects that are currently not
|
||||
being used (no arena is associated with them). Objects are taken off the
|
||||
head of the list in new_arena(), and are pushed on the head of the list in
|
||||
PyObject_Free() when the arena is empty. Key invariant: an arena_object
|
||||
is on this list if and only if its .address member is 0.
|
||||
|
||||
usable_arenas
|
||||
|
||||
This is a doubly-linked list of the arena_objects associated with arenas
|
||||
that have pools available. These pools are either waiting to be reused,
|
||||
or have not been used before. The list is sorted to have the most-
|
||||
allocated arenas first (ascending order based on the nfreepools member).
|
||||
This means that the next allocation will come from a heavily used arena,
|
||||
which gives the nearly empty arenas a chance to be returned to the system.
|
||||
In my unscientific tests this dramatically improved the number of arenas
|
||||
that could be freed.
|
||||
|
||||
Note that an arena_object associated with an arena all of whose pools are
|
||||
currently in use isn't on either list.
|
||||
|
||||
Changed in Python 3.8: keeping usable_arenas sorted by number of free pools
|
||||
used to be done by one-at-a-time linear search when an arena's number of
|
||||
free pools changed. That could, overall, consume time quadratic in the
|
||||
number of arenas. That didn't really matter when there were only a few
|
||||
hundred arenas (typical!), but could be a timing disaster when there were
|
||||
hundreds of thousands. See bpo-37029.
|
||||
|
||||
Now we have a vector of "search fingers" to eliminate the need to search:
|
||||
nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas
|
||||
with nfp free pools. This is NULL if and only if there is no arena with
|
||||
nfp free pools in usable_arenas.
|
||||
*/
|
||||
|
||||
/* How many arena_objects do we initially allocate?
|
||||
* 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
|
||||
* `arenas` vector.
|
||||
*/
|
||||
#define INITIAL_ARENA_OBJECTS 16
|
||||
|
||||
struct _obmalloc_mgmt {
|
||||
/* Array of objects used to track chunks of memory (arenas). */
|
||||
struct arena_object* arenas;
|
||||
/* Number of slots currently allocated in the `arenas` vector. */
|
||||
uint maxarenas;
|
||||
|
||||
/* The head of the singly-linked, NULL-terminated list of available
|
||||
* arena_objects.
|
||||
*/
|
||||
struct arena_object* unused_arena_objects;
|
||||
|
||||
/* The head of the doubly-linked, NULL-terminated at each end, list of
|
||||
* arena_objects associated with arenas that have pools available.
|
||||
*/
|
||||
struct arena_object* usable_arenas;
|
||||
|
||||
/* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */
|
||||
struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1];
|
||||
|
||||
/* Number of arenas allocated that haven't been free()'d. */
|
||||
size_t narenas_currently_allocated;
|
||||
|
||||
/* Total number of times malloc() called to allocate an arena. */
|
||||
size_t ntimes_arena_allocated;
|
||||
/* High water mark (max value ever seen) for narenas_currently_allocated. */
|
||||
size_t narenas_highwater;
|
||||
|
||||
Py_ssize_t raw_allocated_blocks;
|
||||
};
|
||||
|
||||
|
||||
#if WITH_PYMALLOC_RADIX_TREE
|
||||
/*==========================================================================*/
|
||||
/* radix tree for tracking arena usage. If enabled, used to implement
|
||||
address_in_range().
|
||||
|
||||
memory address bit allocation for keys
|
||||
|
||||
64-bit pointers, IGNORE_BITS=0 and 2^20 arena size:
|
||||
15 -> MAP_TOP_BITS
|
||||
15 -> MAP_MID_BITS
|
||||
14 -> MAP_BOT_BITS
|
||||
20 -> ideal aligned arena
|
||||
----
|
||||
64
|
||||
|
||||
64-bit pointers, IGNORE_BITS=16, and 2^20 arena size:
|
||||
16 -> IGNORE_BITS
|
||||
10 -> MAP_TOP_BITS
|
||||
10 -> MAP_MID_BITS
|
||||
8 -> MAP_BOT_BITS
|
||||
20 -> ideal aligned arena
|
||||
----
|
||||
64
|
||||
|
||||
32-bit pointers and 2^18 arena size:
|
||||
14 -> MAP_BOT_BITS
|
||||
18 -> ideal aligned arena
|
||||
----
|
||||
32
|
||||
|
||||
*/
|
||||
|
||||
#if SIZEOF_VOID_P == 8
|
||||
|
||||
/* number of bits in a pointer */
|
||||
#define POINTER_BITS 64
|
||||
|
||||
/* High bits of memory addresses that will be ignored when indexing into the
|
||||
* radix tree. Setting this to zero is the safe default. For most 64-bit
|
||||
* machines, setting this to 16 would be safe. The kernel would not give
|
||||
* user-space virtual memory addresses that have significant information in
|
||||
* those high bits. The main advantage to setting IGNORE_BITS > 0 is that less
|
||||
* virtual memory will be used for the top and middle radix tree arrays. Those
|
||||
* arrays are allocated in the BSS segment and so will typically consume real
|
||||
* memory only if actually accessed.
|
||||
*/
|
||||
#define IGNORE_BITS 0
|
||||
|
||||
/* use the top and mid layers of the radix tree */
|
||||
#define USE_INTERIOR_NODES
|
||||
|
||||
#elif SIZEOF_VOID_P == 4
|
||||
|
||||
#define POINTER_BITS 32
|
||||
#define IGNORE_BITS 0
|
||||
|
||||
#else
|
||||
|
||||
/* Currently this code works for 64-bit or 32-bit pointers only. */
|
||||
#error "obmalloc radix tree requires 64-bit or 32-bit pointers."
|
||||
|
||||
#endif /* SIZEOF_VOID_P */
|
||||
|
||||
/* arena_coverage_t members require this to be true */
|
||||
#if ARENA_BITS >= 32
|
||||
# error "arena size must be < 2^32"
|
||||
#endif
|
||||
|
||||
/* the lower bits of the address that are not ignored */
|
||||
#define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS)
|
||||
|
||||
#ifdef USE_INTERIOR_NODES
|
||||
/* number of bits used for MAP_TOP and MAP_MID nodes */
|
||||
#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
|
||||
#else
|
||||
#define INTERIOR_BITS 0
|
||||
#endif
|
||||
|
||||
#define MAP_TOP_BITS INTERIOR_BITS
|
||||
#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS)
|
||||
#define MAP_TOP_MASK (MAP_TOP_LENGTH - 1)
|
||||
|
||||
#define MAP_MID_BITS INTERIOR_BITS
|
||||
#define MAP_MID_LENGTH (1 << MAP_MID_BITS)
|
||||
#define MAP_MID_MASK (MAP_MID_LENGTH - 1)
|
||||
|
||||
#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
|
||||
#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS)
|
||||
#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
|
||||
|
||||
#define MAP_BOT_SHIFT ARENA_BITS
|
||||
#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT)
|
||||
#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT)
|
||||
|
||||
#define AS_UINT(p) ((uintptr_t)(p))
|
||||
#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK)
|
||||
#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK)
|
||||
#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK)
|
||||
|
||||
#if IGNORE_BITS > 0
|
||||
/* Return the ignored part of the pointer address. Those bits should be same
|
||||
* for all valid pointers if IGNORE_BITS is set correctly.
|
||||
*/
|
||||
#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS)
|
||||
#else
|
||||
#define HIGH_BITS(p) 0
|
||||
#endif
|
||||
|
||||
|
||||
/* This is the leaf of the radix tree. See arena_map_mark_used() for the
|
||||
* meaning of these members. */
|
||||
typedef struct {
|
||||
int32_t tail_hi;
|
||||
int32_t tail_lo;
|
||||
} arena_coverage_t;
|
||||
|
||||
typedef struct arena_map_bot {
|
||||
/* The members tail_hi and tail_lo are accessed together. So, it
|
||||
* better to have them as an array of structs, rather than two
|
||||
* arrays.
|
||||
*/
|
||||
arena_coverage_t arenas[MAP_BOT_LENGTH];
|
||||
} arena_map_bot_t;
|
||||
|
||||
#ifdef USE_INTERIOR_NODES
|
||||
typedef struct arena_map_mid {
|
||||
struct arena_map_bot *ptrs[MAP_MID_LENGTH];
|
||||
} arena_map_mid_t;
|
||||
|
||||
typedef struct arena_map_top {
|
||||
struct arena_map_mid *ptrs[MAP_TOP_LENGTH];
|
||||
} arena_map_top_t;
|
||||
#endif
|
||||
|
||||
struct _obmalloc_usage {
|
||||
/* The root of radix tree. Note that by initializing like this, the memory
|
||||
* should be in the BSS. The OS will only memory map pages as the MAP_MID
|
||||
* nodes get used (OS pages are demand loaded as needed).
|
||||
*/
|
||||
#ifdef USE_INTERIOR_NODES
|
||||
arena_map_top_t arena_map_root;
|
||||
/* accounting for number of used interior nodes */
|
||||
int arena_map_mid_count;
|
||||
int arena_map_bot_count;
|
||||
#else
|
||||
arena_map_bot_t arena_map_root;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* WITH_PYMALLOC_RADIX_TREE */
|
||||
|
||||
|
||||
struct _obmalloc_global_state {
|
||||
int dump_debug_stats;
|
||||
Py_ssize_t interpreter_leaks;
|
||||
};
|
||||
|
||||
struct _obmalloc_state {
|
||||
struct _obmalloc_pools pools;
|
||||
struct _obmalloc_mgmt mgmt;
|
||||
#if WITH_PYMALLOC_RADIX_TREE
|
||||
struct _obmalloc_usage usage;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#undef uint
|
||||
|
||||
|
||||
/* Allocate memory directly from the O/S virtual memory system,
|
||||
* where supported. Otherwise fallback on malloc */
|
||||
void *_PyObject_VirtualAlloc(size_t size);
|
||||
void _PyObject_VirtualFree(void *, size_t size);
|
||||
|
||||
|
||||
/* This function returns the number of allocated memory blocks, regardless of size */
|
||||
extern Py_ssize_t _Py_GetGlobalAllocatedBlocks(void);
|
||||
#define _Py_GetAllocatedBlocks() \
|
||||
_Py_GetGlobalAllocatedBlocks()
|
||||
extern Py_ssize_t _PyInterpreterState_GetAllocatedBlocks(PyInterpreterState *);
|
||||
extern void _PyInterpreterState_FinalizeAllocatedBlocks(PyInterpreterState *);
|
||||
extern int _PyMem_init_obmalloc(PyInterpreterState *interp);
|
||||
extern bool _PyMem_obmalloc_state_on_heap(PyInterpreterState *interp);
|
||||
|
||||
|
||||
#ifdef WITH_PYMALLOC
|
||||
// Export the symbol for the 3rd party 'guppy3' project
|
||||
PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBMALLOC_H
|
||||
66
extern/include/python/internal/pycore_obmalloc_init.h
vendored
Normal file
66
extern/include/python/internal/pycore_obmalloc_init.h
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef Py_INTERNAL_OBMALLOC_INIT_H
|
||||
#define Py_INTERNAL_OBMALLOC_INIT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/****************************************************/
|
||||
/* the default object allocator's state initializer */
|
||||
|
||||
#define PTA(pools, x) \
|
||||
((poolp )((uint8_t *)&(pools.used[2*(x)]) - 2*sizeof(pymem_block *)))
|
||||
#define PT(p, x) PTA(p, x), PTA(p, x)
|
||||
|
||||
#define PT_8(p, start) \
|
||||
PT(p, start), \
|
||||
PT(p, start+1), \
|
||||
PT(p, start+2), \
|
||||
PT(p, start+3), \
|
||||
PT(p, start+4), \
|
||||
PT(p, start+5), \
|
||||
PT(p, start+6), \
|
||||
PT(p, start+7)
|
||||
|
||||
#if NB_SMALL_SIZE_CLASSES <= 8
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 16
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 24
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 32
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 40
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 48
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 56
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 64
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48), PT_8(p, 56) }
|
||||
#else
|
||||
# error "NB_SMALL_SIZE_CLASSES should be less than 64"
|
||||
#endif
|
||||
|
||||
#define _obmalloc_global_state_INIT \
|
||||
{ \
|
||||
.dump_debug_stats = -1, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBMALLOC_INIT_H
|
||||
2117
extern/include/python/internal/pycore_opcode_metadata.h
vendored
Normal file
2117
extern/include/python/internal/pycore_opcode_metadata.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
90
extern/include/python/internal/pycore_opcode_utils.h
vendored
Normal file
90
extern/include/python/internal/pycore_opcode_utils.h
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
#ifndef Py_INTERNAL_OPCODE_UTILS_H
|
||||
#define Py_INTERNAL_OPCODE_UTILS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#define MAX_REAL_OPCODE 254
|
||||
|
||||
#define IS_WITHIN_OPCODE_RANGE(opcode) \
|
||||
(((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \
|
||||
IS_PSEUDO_INSTR(opcode))
|
||||
|
||||
#define IS_BLOCK_PUSH_OPCODE(opcode) \
|
||||
((opcode) == SETUP_FINALLY || \
|
||||
(opcode) == SETUP_WITH || \
|
||||
(opcode) == SETUP_CLEANUP)
|
||||
|
||||
#define HAS_TARGET(opcode) \
|
||||
(OPCODE_HAS_JUMP(opcode) || IS_BLOCK_PUSH_OPCODE(opcode))
|
||||
|
||||
/* opcodes that must be last in the basicblock */
|
||||
#define IS_TERMINATOR_OPCODE(opcode) \
|
||||
(OPCODE_HAS_JUMP(opcode) || IS_SCOPE_EXIT_OPCODE(opcode))
|
||||
|
||||
/* opcodes which are not emitted in codegen stage, only by the assembler */
|
||||
#define IS_ASSEMBLER_OPCODE(opcode) \
|
||||
((opcode) == JUMP_FORWARD || \
|
||||
(opcode) == JUMP_BACKWARD || \
|
||||
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
|
||||
|
||||
#define IS_BACKWARDS_JUMP_OPCODE(opcode) \
|
||||
((opcode) == JUMP_BACKWARD || \
|
||||
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
|
||||
|
||||
#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \
|
||||
((opcode) == JUMP || \
|
||||
(opcode) == JUMP_NO_INTERRUPT || \
|
||||
(opcode) == JUMP_FORWARD || \
|
||||
(opcode) == JUMP_BACKWARD || \
|
||||
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
|
||||
|
||||
#define IS_CONDITIONAL_JUMP_OPCODE(opcode) \
|
||||
((opcode) == POP_JUMP_IF_FALSE || \
|
||||
(opcode) == POP_JUMP_IF_TRUE || \
|
||||
(opcode) == POP_JUMP_IF_NONE || \
|
||||
(opcode) == POP_JUMP_IF_NOT_NONE)
|
||||
|
||||
#define IS_SCOPE_EXIT_OPCODE(opcode) \
|
||||
((opcode) == RETURN_VALUE || \
|
||||
(opcode) == RAISE_VARARGS || \
|
||||
(opcode) == RERAISE)
|
||||
|
||||
#define IS_RETURN_OPCODE(opcode) \
|
||||
(opcode == RETURN_VALUE)
|
||||
#define IS_RAISE_OPCODE(opcode) \
|
||||
(opcode == RAISE_VARARGS || opcode == RERAISE)
|
||||
|
||||
|
||||
/* Flags used in the oparg for MAKE_FUNCTION */
|
||||
#define MAKE_FUNCTION_DEFAULTS 0x01
|
||||
#define MAKE_FUNCTION_KWDEFAULTS 0x02
|
||||
#define MAKE_FUNCTION_ANNOTATIONS 0x04
|
||||
#define MAKE_FUNCTION_CLOSURE 0x08
|
||||
#define MAKE_FUNCTION_ANNOTATE 0x10
|
||||
|
||||
/* Values used as the oparg for LOAD_COMMON_CONSTANT */
|
||||
#define CONSTANT_ASSERTIONERROR 0
|
||||
#define CONSTANT_NOTIMPLEMENTEDERROR 1
|
||||
#define CONSTANT_BUILTIN_TUPLE 2
|
||||
#define CONSTANT_BUILTIN_ALL 3
|
||||
#define CONSTANT_BUILTIN_ANY 4
|
||||
#define NUM_COMMON_CONSTANTS 5
|
||||
|
||||
/* Values used in the oparg for RESUME */
|
||||
#define RESUME_AT_FUNC_START 0
|
||||
#define RESUME_AFTER_YIELD 1
|
||||
#define RESUME_AFTER_YIELD_FROM 2
|
||||
#define RESUME_AFTER_AWAIT 3
|
||||
|
||||
#define RESUME_OPARG_LOCATION_MASK 0x3
|
||||
#define RESUME_OPARG_DEPTH1_MASK 0x4
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OPCODE_UTILS_H */
|
||||
318
extern/include/python/internal/pycore_optimizer.h
vendored
Normal file
318
extern/include/python/internal/pycore_optimizer.h
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
#ifndef Py_INTERNAL_OPTIMIZER_H
|
||||
#define Py_INTERNAL_OPTIMIZER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_typedefs.h" // _PyInterpreterFrame
|
||||
#include "pycore_uop_ids.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
|
||||
typedef struct _PyExecutorLinkListNode {
|
||||
struct _PyExecutorObject *next;
|
||||
struct _PyExecutorObject *previous;
|
||||
} _PyExecutorLinkListNode;
|
||||
|
||||
|
||||
/* Bloom filter with m = 256
|
||||
* https://en.wikipedia.org/wiki/Bloom_filter */
|
||||
#define _Py_BLOOM_FILTER_WORDS 8
|
||||
|
||||
typedef struct {
|
||||
uint32_t bits[_Py_BLOOM_FILTER_WORDS];
|
||||
} _PyBloomFilter;
|
||||
|
||||
typedef struct {
|
||||
uint8_t opcode;
|
||||
uint8_t oparg;
|
||||
uint8_t valid:1;
|
||||
uint8_t linked:1;
|
||||
uint8_t chain_depth:6; // Must be big enough for MAX_CHAIN_DEPTH - 1.
|
||||
bool warm;
|
||||
int index; // Index of ENTER_EXECUTOR (if code isn't NULL, below).
|
||||
_PyBloomFilter bloom;
|
||||
_PyExecutorLinkListNode links;
|
||||
PyCodeObject *code; // Weak (NULL if no corresponding ENTER_EXECUTOR).
|
||||
} _PyVMData;
|
||||
|
||||
/* Depending on the format,
|
||||
* the 32 bits between the oparg and operand are:
|
||||
* UOP_FORMAT_TARGET:
|
||||
* uint32_t target;
|
||||
* UOP_FORMAT_JUMP
|
||||
* uint16_t jump_target;
|
||||
* uint16_t error_target;
|
||||
*/
|
||||
typedef struct {
|
||||
uint16_t opcode:15;
|
||||
uint16_t format:1;
|
||||
uint16_t oparg;
|
||||
union {
|
||||
uint32_t target;
|
||||
struct {
|
||||
uint16_t jump_target;
|
||||
uint16_t error_target;
|
||||
};
|
||||
};
|
||||
uint64_t operand0; // A cache entry
|
||||
uint64_t operand1;
|
||||
#ifdef Py_STATS
|
||||
uint64_t execution_count;
|
||||
#endif
|
||||
} _PyUOpInstruction;
|
||||
|
||||
typedef struct {
|
||||
uint32_t target;
|
||||
_Py_BackoffCounter temperature;
|
||||
struct _PyExecutorObject *executor;
|
||||
} _PyExitData;
|
||||
|
||||
typedef struct _PyExecutorObject {
|
||||
PyObject_VAR_HEAD
|
||||
const _PyUOpInstruction *trace;
|
||||
_PyVMData vm_data; /* Used by the VM, but opaque to the optimizer */
|
||||
uint32_t exit_count;
|
||||
uint32_t code_size;
|
||||
size_t jit_size;
|
||||
void *jit_code;
|
||||
void *jit_side_entry;
|
||||
_PyExitData exits[1];
|
||||
} _PyExecutorObject;
|
||||
|
||||
/* If pending deletion list gets large enough, then scan,
|
||||
* and free any executors that aren't executing
|
||||
* i.e. any that aren't a thread's current_executor. */
|
||||
#define EXECUTOR_DELETE_LIST_MAX 100
|
||||
|
||||
// Export for '_opcode' shared extension (JIT compiler).
|
||||
PyAPI_FUNC(_PyExecutorObject*) _Py_GetExecutor(PyCodeObject *code, int offset);
|
||||
|
||||
void _Py_ExecutorInit(_PyExecutorObject *, const _PyBloomFilter *);
|
||||
void _Py_ExecutorDetach(_PyExecutorObject *);
|
||||
void _Py_BloomFilter_Init(_PyBloomFilter *);
|
||||
void _Py_BloomFilter_Add(_PyBloomFilter *bloom, void *obj);
|
||||
PyAPI_FUNC(void) _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj);
|
||||
|
||||
#define _Py_MAX_ALLOWED_BUILTINS_MODIFICATIONS 3
|
||||
#define _Py_MAX_ALLOWED_GLOBALS_MODIFICATIONS 6
|
||||
|
||||
#ifdef _Py_TIER2
|
||||
PyAPI_FUNC(void) _Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation);
|
||||
PyAPI_FUNC(void) _Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation);
|
||||
PyAPI_FUNC(void) _Py_Executors_InvalidateCold(PyInterpreterState *interp);
|
||||
|
||||
#else
|
||||
# define _Py_Executors_InvalidateDependency(A, B, C) ((void)0)
|
||||
# define _Py_Executors_InvalidateAll(A, B) ((void)0)
|
||||
# define _Py_Executors_InvalidateCold(A) ((void)0)
|
||||
|
||||
#endif
|
||||
|
||||
// Used as the threshold to trigger executor invalidation when
|
||||
// trace_run_counter is greater than this value.
|
||||
#define JIT_CLEANUP_THRESHOLD 100000
|
||||
|
||||
// This is the length of the trace we project initially.
|
||||
#define UOP_MAX_TRACE_LENGTH 800
|
||||
|
||||
#define TRACE_STACK_SIZE 5
|
||||
|
||||
int _Py_uop_analyze_and_optimize(_PyInterpreterFrame *frame,
|
||||
_PyUOpInstruction *trace, int trace_len, int curr_stackentries,
|
||||
_PyBloomFilter *dependencies);
|
||||
|
||||
extern PyTypeObject _PyUOpExecutor_Type;
|
||||
|
||||
|
||||
#define UOP_FORMAT_TARGET 0
|
||||
#define UOP_FORMAT_JUMP 1
|
||||
|
||||
static inline uint32_t uop_get_target(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format == UOP_FORMAT_TARGET);
|
||||
return inst->target;
|
||||
}
|
||||
|
||||
static inline uint16_t uop_get_jump_target(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format == UOP_FORMAT_JUMP);
|
||||
return inst->jump_target;
|
||||
}
|
||||
|
||||
static inline uint16_t uop_get_error_target(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format != UOP_FORMAT_TARGET);
|
||||
return inst->error_target;
|
||||
}
|
||||
|
||||
// Holds locals, stack, locals, stack ... co_consts (in that order)
|
||||
#define MAX_ABSTRACT_INTERP_SIZE 4096
|
||||
|
||||
#define TY_ARENA_SIZE (UOP_MAX_TRACE_LENGTH * 5)
|
||||
|
||||
// Need extras for root frame and for overflow frame (see TRACE_STACK_PUSH())
|
||||
#define MAX_ABSTRACT_FRAME_DEPTH (TRACE_STACK_SIZE + 2)
|
||||
|
||||
// The maximum number of side exits that we can take before requiring forward
|
||||
// progress (and inserting a new ENTER_EXECUTOR instruction). In practice, this
|
||||
// is the "maximum amount of polymorphism" that an isolated trace tree can
|
||||
// handle before rejoining the rest of the program.
|
||||
#define MAX_CHAIN_DEPTH 4
|
||||
|
||||
/* Symbols */
|
||||
/* See explanation in optimizer_symbols.c */
|
||||
|
||||
|
||||
typedef enum _JitSymType {
|
||||
JIT_SYM_UNKNOWN_TAG = 1,
|
||||
JIT_SYM_NULL_TAG = 2,
|
||||
JIT_SYM_NON_NULL_TAG = 3,
|
||||
JIT_SYM_BOTTOM_TAG = 4,
|
||||
JIT_SYM_TYPE_VERSION_TAG = 5,
|
||||
JIT_SYM_KNOWN_CLASS_TAG = 6,
|
||||
JIT_SYM_KNOWN_VALUE_TAG = 7,
|
||||
JIT_SYM_TUPLE_TAG = 8,
|
||||
JIT_SYM_TRUTHINESS_TAG = 9,
|
||||
} JitSymType;
|
||||
|
||||
typedef struct _jit_opt_known_class {
|
||||
uint8_t tag;
|
||||
uint32_t version;
|
||||
PyTypeObject *type;
|
||||
} JitOptKnownClass;
|
||||
|
||||
typedef struct _jit_opt_known_version {
|
||||
uint8_t tag;
|
||||
uint32_t version;
|
||||
} JitOptKnownVersion;
|
||||
|
||||
typedef struct _jit_opt_known_value {
|
||||
uint8_t tag;
|
||||
PyObject *value;
|
||||
} JitOptKnownValue;
|
||||
|
||||
#define MAX_SYMBOLIC_TUPLE_SIZE 7
|
||||
|
||||
typedef struct _jit_opt_tuple {
|
||||
uint8_t tag;
|
||||
uint8_t length;
|
||||
uint16_t items[MAX_SYMBOLIC_TUPLE_SIZE];
|
||||
} JitOptTuple;
|
||||
|
||||
typedef struct {
|
||||
uint8_t tag;
|
||||
bool invert;
|
||||
uint16_t value;
|
||||
} JitOptTruthiness;
|
||||
|
||||
typedef union _jit_opt_symbol {
|
||||
uint8_t tag;
|
||||
JitOptKnownClass cls;
|
||||
JitOptKnownValue value;
|
||||
JitOptKnownVersion version;
|
||||
JitOptTuple tuple;
|
||||
JitOptTruthiness truthiness;
|
||||
} JitOptSymbol;
|
||||
|
||||
|
||||
|
||||
struct _Py_UOpsAbstractFrame {
|
||||
// Max stacklen
|
||||
int stack_len;
|
||||
int locals_len;
|
||||
|
||||
JitOptSymbol **stack_pointer;
|
||||
JitOptSymbol **stack;
|
||||
JitOptSymbol **locals;
|
||||
};
|
||||
|
||||
typedef struct _Py_UOpsAbstractFrame _Py_UOpsAbstractFrame;
|
||||
|
||||
typedef struct ty_arena {
|
||||
int ty_curr_number;
|
||||
int ty_max_number;
|
||||
JitOptSymbol arena[TY_ARENA_SIZE];
|
||||
} ty_arena;
|
||||
|
||||
typedef struct _JitOptContext {
|
||||
char done;
|
||||
char out_of_space;
|
||||
bool contradiction;
|
||||
// The current "executing" frame.
|
||||
_Py_UOpsAbstractFrame *frame;
|
||||
_Py_UOpsAbstractFrame frames[MAX_ABSTRACT_FRAME_DEPTH];
|
||||
int curr_frame_depth;
|
||||
|
||||
// Arena for the symbolic types.
|
||||
ty_arena t_arena;
|
||||
|
||||
JitOptSymbol **n_consumed;
|
||||
JitOptSymbol **limit;
|
||||
JitOptSymbol *locals_and_stack[MAX_ABSTRACT_INTERP_SIZE];
|
||||
} JitOptContext;
|
||||
|
||||
extern bool _Py_uop_sym_is_null(JitOptSymbol *sym);
|
||||
extern bool _Py_uop_sym_is_not_null(JitOptSymbol *sym);
|
||||
extern bool _Py_uop_sym_is_const(JitOptContext *ctx, JitOptSymbol *sym);
|
||||
extern PyObject *_Py_uop_sym_get_const(JitOptContext *ctx, JitOptSymbol *sym);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_unknown(JitOptContext *ctx);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_not_null(JitOptContext *ctx);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_type(
|
||||
JitOptContext *ctx, PyTypeObject *typ);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_const(JitOptContext *ctx, PyObject *const_val);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_null(JitOptContext *ctx);
|
||||
extern bool _Py_uop_sym_has_type(JitOptSymbol *sym);
|
||||
extern bool _Py_uop_sym_matches_type(JitOptSymbol *sym, PyTypeObject *typ);
|
||||
extern bool _Py_uop_sym_matches_type_version(JitOptSymbol *sym, unsigned int version);
|
||||
extern void _Py_uop_sym_set_null(JitOptContext *ctx, JitOptSymbol *sym);
|
||||
extern void _Py_uop_sym_set_non_null(JitOptContext *ctx, JitOptSymbol *sym);
|
||||
extern void _Py_uop_sym_set_type(JitOptContext *ctx, JitOptSymbol *sym, PyTypeObject *typ);
|
||||
extern bool _Py_uop_sym_set_type_version(JitOptContext *ctx, JitOptSymbol *sym, unsigned int version);
|
||||
extern void _Py_uop_sym_set_const(JitOptContext *ctx, JitOptSymbol *sym, PyObject *const_val);
|
||||
extern bool _Py_uop_sym_is_bottom(JitOptSymbol *sym);
|
||||
extern int _Py_uop_sym_truthiness(JitOptContext *ctx, JitOptSymbol *sym);
|
||||
extern PyTypeObject *_Py_uop_sym_get_type(JitOptSymbol *sym);
|
||||
extern bool _Py_uop_sym_is_immortal(JitOptSymbol *sym);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_tuple(JitOptContext *ctx, int size, JitOptSymbol **args);
|
||||
extern JitOptSymbol *_Py_uop_sym_tuple_getitem(JitOptContext *ctx, JitOptSymbol *sym, int item);
|
||||
extern int _Py_uop_sym_tuple_length(JitOptSymbol *sym);
|
||||
extern JitOptSymbol *_Py_uop_sym_new_truthiness(JitOptContext *ctx, JitOptSymbol *value, bool truthy);
|
||||
|
||||
extern void _Py_uop_abstractcontext_init(JitOptContext *ctx);
|
||||
extern void _Py_uop_abstractcontext_fini(JitOptContext *ctx);
|
||||
|
||||
extern _Py_UOpsAbstractFrame *_Py_uop_frame_new(
|
||||
JitOptContext *ctx,
|
||||
PyCodeObject *co,
|
||||
int curr_stackentries,
|
||||
JitOptSymbol **args,
|
||||
int arg_len);
|
||||
extern int _Py_uop_frame_pop(JitOptContext *ctx);
|
||||
|
||||
PyAPI_FUNC(PyObject *) _Py_uop_symbols_test(PyObject *self, PyObject *ignored);
|
||||
|
||||
PyAPI_FUNC(int) _PyOptimizer_Optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *start, _PyExecutorObject **exec_ptr, int chain_depth);
|
||||
|
||||
static inline int is_terminator(const _PyUOpInstruction *uop)
|
||||
{
|
||||
int opcode = uop->opcode;
|
||||
return (
|
||||
opcode == _EXIT_TRACE ||
|
||||
opcode == _JUMP_TO_TOP
|
||||
);
|
||||
}
|
||||
|
||||
PyAPI_FUNC(int) _PyDumpExecutors(FILE *out);
|
||||
#ifdef _Py_TIER2
|
||||
extern void _Py_ClearExecutorDeletionList(PyInterpreterState *interp);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OPTIMIZER_H */
|
||||
97
extern/include/python/internal/pycore_parking_lot.h
vendored
Normal file
97
extern/include/python/internal/pycore_parking_lot.h
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// ParkingLot is an internal API for building efficient synchronization
|
||||
// primitives like mutexes and events.
|
||||
//
|
||||
// The API and name is inspired by WebKit's WTF::ParkingLot, which in turn
|
||||
// is inspired Linux's futex API.
|
||||
// See https://webkit.org/blog/6161/locking-in-webkit/.
|
||||
//
|
||||
// The core functionality is an atomic "compare-and-sleep" operation along with
|
||||
// an atomic "wake-up" operation.
|
||||
|
||||
#ifndef Py_INTERNAL_PARKING_LOT_H
|
||||
#define Py_INTERNAL_PARKING_LOT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
enum {
|
||||
// The thread was unparked by another thread.
|
||||
Py_PARK_OK = 0,
|
||||
|
||||
// The value of `address` did not match `expected`.
|
||||
Py_PARK_AGAIN = -1,
|
||||
|
||||
// The thread was unparked due to a timeout.
|
||||
Py_PARK_TIMEOUT = -2,
|
||||
|
||||
// The thread was interrupted by a signal.
|
||||
Py_PARK_INTR = -3,
|
||||
};
|
||||
|
||||
// Checks that `*address == *expected` and puts the thread to sleep until an
|
||||
// unpark operation is called on the same `address`. Otherwise, the function
|
||||
// returns `Py_PARK_AGAIN`. The comparison behaves like memcmp, but is
|
||||
// performed atomically with respect to unpark operations.
|
||||
//
|
||||
// The `address_size` argument is the size of the data pointed to by the
|
||||
// `address` and `expected` pointers (i.e., sizeof(*address)). It must be
|
||||
// 1, 2, 4, or 8.
|
||||
//
|
||||
// The `timeout_ns` argument specifies the maximum amount of time to wait, with
|
||||
// -1 indicating an infinite wait.
|
||||
//
|
||||
// `park_arg`, which can be NULL, is passed to the unpark operation.
|
||||
//
|
||||
// If `detach` is true, then the thread will detach/release the GIL while
|
||||
// waiting.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// if (_Py_atomic_compare_exchange_uint8(address, &expected, new_value)) {
|
||||
// int res = _PyParkingLot_Park(address, &new_value, sizeof(*address),
|
||||
// timeout_ns, NULL, 1);
|
||||
// ...
|
||||
// }
|
||||
PyAPI_FUNC(int)
|
||||
_PyParkingLot_Park(const void *address, const void *expected,
|
||||
size_t address_size, PyTime_t timeout_ns,
|
||||
void *park_arg, int detach);
|
||||
|
||||
// Callback for _PyParkingLot_Unpark:
|
||||
//
|
||||
// `arg` is the data of the same name provided to the _PyParkingLot_Unpark()
|
||||
// call.
|
||||
// `park_arg` is the data provided to _PyParkingLot_Park() call or NULL if
|
||||
// no waiting thread was found.
|
||||
// `has_more_waiters` is true if there are more threads waiting on the same
|
||||
// address. May be true in cases where threads are waiting on a different
|
||||
// address that map to the same internal bucket.
|
||||
typedef void _Py_unpark_fn_t(void *arg, void *park_arg, int has_more_waiters);
|
||||
|
||||
// Unparks a single thread waiting on `address`.
|
||||
//
|
||||
// Note that fn() is called regardless of whether a thread was unparked. If
|
||||
// no threads are waiting on `address` then the `park_arg` argument to fn()
|
||||
// will be NULL.
|
||||
//
|
||||
// Example usage:
|
||||
// void callback(void *arg, void *park_arg, int has_more_waiters);
|
||||
// _PyParkingLot_Unpark(address, &callback, arg);
|
||||
PyAPI_FUNC(void)
|
||||
_PyParkingLot_Unpark(const void *address, _Py_unpark_fn_t *fn, void *arg);
|
||||
|
||||
// Unparks all threads waiting on `address`.
|
||||
PyAPI_FUNC(void) _PyParkingLot_UnparkAll(const void *address);
|
||||
|
||||
// Resets the parking lot state after a fork. Forgets all parked threads.
|
||||
PyAPI_FUNC(void) _PyParkingLot_AfterFork(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PARKING_LOT_H */
|
||||
78
extern/include/python/internal/pycore_parser.h
vendored
Normal file
78
extern/include/python/internal/pycore_parser.h
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
#ifndef Py_INTERNAL_PARSER_H
|
||||
#define Py_INTERNAL_PARSER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
#include "pycore_ast.h" // struct _expr
|
||||
#include "pycore_global_strings.h" // _Py_DECLARE_STR()
|
||||
#include "pycore_pyarena.h" // PyArena
|
||||
|
||||
_Py_DECLARE_STR(empty, "")
|
||||
#if defined(Py_DEBUG) && defined(Py_GIL_DISABLED)
|
||||
#define _parser_runtime_state_INIT \
|
||||
{ \
|
||||
.mutex = {0}, \
|
||||
.dummy_name = { \
|
||||
.kind = Name_kind, \
|
||||
.v.Name.id = &_Py_STR(empty), \
|
||||
.v.Name.ctx = Load, \
|
||||
.lineno = 1, \
|
||||
.col_offset = 0, \
|
||||
.end_lineno = 1, \
|
||||
.end_col_offset = 0, \
|
||||
}, \
|
||||
}
|
||||
#else
|
||||
#define _parser_runtime_state_INIT \
|
||||
{ \
|
||||
.dummy_name = { \
|
||||
.kind = Name_kind, \
|
||||
.v.Name.id = &_Py_STR(empty), \
|
||||
.v.Name.ctx = Load, \
|
||||
.lineno = 1, \
|
||||
.col_offset = 0, \
|
||||
.end_lineno = 1, \
|
||||
.end_col_offset = 0, \
|
||||
}, \
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct _mod* _PyParser_ASTFromString(
|
||||
const char *str,
|
||||
PyObject* filename,
|
||||
int mode,
|
||||
PyCompilerFlags *flags,
|
||||
PyArena *arena);
|
||||
|
||||
extern struct _mod* _PyParser_ASTFromFile(
|
||||
FILE *fp,
|
||||
PyObject *filename_ob,
|
||||
const char *enc,
|
||||
int mode,
|
||||
const char *ps1,
|
||||
const char *ps2,
|
||||
PyCompilerFlags *flags,
|
||||
int *errcode,
|
||||
PyArena *arena);
|
||||
extern struct _mod* _PyParser_InteractiveASTFromFile(
|
||||
FILE *fp,
|
||||
PyObject *filename_ob,
|
||||
const char *enc,
|
||||
int mode,
|
||||
const char *ps1,
|
||||
const char *ps2,
|
||||
PyCompilerFlags *flags,
|
||||
int *errcode,
|
||||
PyObject **interactive_src,
|
||||
PyArena *arena);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PARSER_H */
|
||||
26
extern/include/python/internal/pycore_pathconfig.h
vendored
Normal file
26
extern/include/python/internal/pycore_pathconfig.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef Py_INTERNAL_PATHCONFIG_H
|
||||
#define Py_INTERNAL_PATHCONFIG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyPathConfig_ClearGlobal(void);
|
||||
|
||||
extern PyStatus _PyPathConfig_ReadGlobal(PyConfig *config);
|
||||
extern PyStatus _PyPathConfig_UpdateGlobal(const PyConfig *config);
|
||||
extern const wchar_t * _PyPathConfig_GetGlobalModuleSearchPath(void);
|
||||
|
||||
extern int _PyPathConfig_ComputeSysPath0(
|
||||
const PyWideStringList *argv,
|
||||
PyObject **path0);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PATHCONFIG_H */
|
||||
68
extern/include/python/internal/pycore_pyarena.h
vendored
Normal file
68
extern/include/python/internal/pycore_pyarena.h
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// An arena-like memory interface for the compiler.
|
||||
|
||||
#ifndef Py_INTERNAL_PYARENA_H
|
||||
#define Py_INTERNAL_PYARENA_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct _arena PyArena;
|
||||
|
||||
// _PyArena_New() and _PyArena_Free() create a new arena and free it,
|
||||
// respectively. Once an arena has been created, it can be used
|
||||
// to allocate memory via _PyArena_Malloc(). Pointers to PyObject can
|
||||
// also be registered with the arena via _PyArena_AddPyObject(), and the
|
||||
// arena will ensure that the PyObjects stay alive at least until
|
||||
// _PyArena_Free() is called. When an arena is freed, all the memory it
|
||||
// allocated is freed, the arena releases internal references to registered
|
||||
// PyObject*, and none of its pointers are valid.
|
||||
// XXX (tim) What does "none of its pointers are valid" mean? Does it
|
||||
// XXX mean that pointers previously obtained via _PyArena_Malloc() are
|
||||
// XXX no longer valid? (That's clearly true, but not sure that's what
|
||||
// XXX the text is trying to say.)
|
||||
//
|
||||
// _PyArena_New() returns an arena pointer. On error, it
|
||||
// returns a negative number and sets an exception.
|
||||
// XXX (tim): Not true. On error, _PyArena_New() actually returns NULL,
|
||||
// XXX and looks like it may or may not set an exception (e.g., if the
|
||||
// XXX internal PyList_New(0) returns NULL, _PyArena_New() passes that on
|
||||
// XXX and an exception is set; OTOH, if the internal
|
||||
// XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but
|
||||
// XXX an exception is not set in that case).
|
||||
//
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(PyArena*) _PyArena_New(void);
|
||||
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(void) _PyArena_Free(PyArena *);
|
||||
|
||||
// Mostly like malloc(), return the address of a block of memory spanning
|
||||
// `size` bytes, or return NULL (without setting an exception) if enough
|
||||
// new memory can't be obtained. Unlike malloc(0), _PyArena_Malloc() with
|
||||
// size=0 does not guarantee to return a unique pointer (the pointer
|
||||
// returned may equal one or more other pointers obtained from
|
||||
// _PyArena_Malloc()).
|
||||
// Note that pointers obtained via _PyArena_Malloc() must never be passed to
|
||||
// the system free() or realloc(), or to any of Python's similar memory-
|
||||
// management functions. _PyArena_Malloc()-obtained pointers remain valid
|
||||
// until _PyArena_Free(ar) is called, at which point all pointers obtained
|
||||
// from the arena `ar` become invalid simultaneously.
|
||||
//
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(void*) _PyArena_Malloc(PyArena *, size_t size);
|
||||
|
||||
// This routine isn't a proper arena allocation routine. It takes
|
||||
// a PyObject* and records it so that it can be DECREFed when the
|
||||
// arena is freed.
|
||||
//
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(int) _PyArena_AddPyObject(PyArena *, PyObject *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYARENA_H */
|
||||
177
extern/include/python/internal/pycore_pyatomic_ft_wrappers.h
vendored
Normal file
177
extern/include/python/internal/pycore_pyatomic_ft_wrappers.h
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
// This header file provides wrappers around the atomic operations found in
|
||||
// `pyatomic.h` that are only atomic in free-threaded builds.
|
||||
//
|
||||
// These are intended to be used in places where atomics are required in
|
||||
// free-threaded builds, but not in the default build, and we don't want to
|
||||
// introduce the potential performance overhead of an atomic operation in the
|
||||
// default build.
|
||||
//
|
||||
// All usages of these macros should be replaced with unconditionally atomic or
|
||||
// non-atomic versions, and this file should be removed, once the dust settles
|
||||
// on free threading.
|
||||
#ifndef Py_ATOMIC_FT_WRAPPERS_H
|
||||
#define Py_ATOMIC_FT_WRAPPERS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
#error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
#define FT_ATOMIC_LOAD_PTR(value) _Py_atomic_load_ptr(&value)
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) _Py_atomic_store_ptr(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_SSIZE(value) _Py_atomic_load_ssize(&value)
|
||||
#define FT_ATOMIC_LOAD_SSIZE_ACQUIRE(value) \
|
||||
_Py_atomic_load_ssize_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_SSIZE_RELAXED(value) \
|
||||
_Py_atomic_load_ssize_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) \
|
||||
_Py_atomic_store_ptr(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) \
|
||||
_Py_atomic_load_ptr_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_PTR_CONSUME(value) \
|
||||
_Py_atomic_load_ptr_consume(&value)
|
||||
#define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) \
|
||||
_Py_atomic_load_uintptr_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_PTR_RELAXED(value) \
|
||||
_Py_atomic_load_ptr_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_UINT8(value) \
|
||||
_Py_atomic_load_uint8(&value)
|
||||
#define FT_ATOMIC_STORE_UINT8(value, new_value) \
|
||||
_Py_atomic_store_uint8(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_UINT8_RELAXED(value) \
|
||||
_Py_atomic_load_uint8_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_UINT16_RELAXED(value) \
|
||||
_Py_atomic_load_uint16_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_UINT32_RELAXED(value) \
|
||||
_Py_atomic_load_uint32_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_ULONG_RELAXED(value) \
|
||||
_Py_atomic_load_ulong_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ptr_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) \
|
||||
_Py_atomic_store_ptr_release(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) \
|
||||
_Py_atomic_store_uintptr_release(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ssize_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint8_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint16_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint32_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_CHAR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_char_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_CHAR_RELAXED(value) \
|
||||
_Py_atomic_load_char_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_UCHAR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uchar_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_UCHAR_RELAXED(value) \
|
||||
_Py_atomic_load_uchar_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_SHORT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_short_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_SHORT_RELAXED(value) \
|
||||
_Py_atomic_load_short_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_USHORT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ushort_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_USHORT_RELAXED(value) \
|
||||
_Py_atomic_load_ushort_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_INT(value) \
|
||||
_Py_atomic_load_int(&value)
|
||||
#define FT_ATOMIC_STORE_INT(value, new_value) \
|
||||
_Py_atomic_store_int(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_INT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_int_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_INT_RELAXED(value) \
|
||||
_Py_atomic_load_int_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_UINT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_UINT_RELAXED(value) \
|
||||
_Py_atomic_load_uint_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_LONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_long_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_LONG_RELAXED(value) \
|
||||
_Py_atomic_load_long_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_ULONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ulong_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ssize_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_FLOAT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_float_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_FLOAT_RELAXED(value) \
|
||||
_Py_atomic_load_float_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_DOUBLE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_double_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_DOUBLE_RELAXED(value) \
|
||||
_Py_atomic_load_double_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_LLONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_llong_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_LLONG_RELAXED(value) \
|
||||
_Py_atomic_load_llong_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_ULLONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ullong_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_ULLONG_RELAXED(value) \
|
||||
_Py_atomic_load_ullong_relaxed(&value)
|
||||
#define FT_ATOMIC_ADD_SSIZE(value, new_value) \
|
||||
(void)_Py_atomic_add_ssize(&value, new_value)
|
||||
|
||||
#else
|
||||
#define FT_ATOMIC_LOAD_PTR(value) value
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_SSIZE(value) value
|
||||
#define FT_ATOMIC_LOAD_SSIZE_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_SSIZE_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_PTR_CONSUME(value) value
|
||||
#define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_PTR_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_UINT8(value) value
|
||||
#define FT_ATOMIC_STORE_UINT8(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_UINT8_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_UINT16_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_UINT32_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_ULONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_CHAR_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_CHAR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_UCHAR_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_UCHAR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_SHORT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_SHORT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_USHORT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_USHORT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_INT(value) value
|
||||
#define FT_ATOMIC_STORE_INT(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_INT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_INT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_UINT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_UINT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_LONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_LONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_ULONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_FLOAT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_FLOAT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_DOUBLE_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_DOUBLE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_LLONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_LLONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_ULLONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_ULLONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_ADD_SSIZE(value, new_value) (void)(value += new_value)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_ATOMIC_FT_WRAPPERS_H */
|
||||
21
extern/include/python/internal/pycore_pybuffer.h
vendored
Normal file
21
extern/include/python/internal/pycore_pybuffer.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#ifndef Py_INTERNAL_PYBUFFER_H
|
||||
#define Py_INTERNAL_PYBUFFER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
// Exported for the _interpchannels module.
|
||||
PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreter(
|
||||
PyInterpreterState *interp, Py_buffer *view);
|
||||
PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreterAndRawFree(
|
||||
PyInterpreterState *interp, Py_buffer *view);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYBUFFER_H */
|
||||
213
extern/include/python/internal/pycore_pyerrors.h
vendored
Normal file
213
extern/include/python/internal/pycore_pyerrors.h
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
#ifndef Py_INTERNAL_PYERRORS_H
|
||||
#define Py_INTERNAL_PYERRORS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* Error handling definitions */
|
||||
|
||||
extern _PyErr_StackItem* _PyErr_GetTopmostException(PyThreadState *tstate);
|
||||
extern PyObject* _PyErr_GetHandledException(PyThreadState *);
|
||||
extern void _PyErr_SetHandledException(PyThreadState *, PyObject *);
|
||||
extern void _PyErr_GetExcInfo(PyThreadState *, PyObject **, PyObject **, PyObject **);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyErr_SetKeyError(PyObject *);
|
||||
|
||||
|
||||
// Like PyErr_Format(), but saves current exception as __context__ and
|
||||
// __cause__.
|
||||
// Export for '_sqlite3' shared extension.
|
||||
PyAPI_FUNC(PyObject*) _PyErr_FormatFromCause(
|
||||
PyObject *exception,
|
||||
const char *format, /* ASCII-encoded string */
|
||||
...
|
||||
);
|
||||
|
||||
extern int _PyException_AddNote(
|
||||
PyObject *exc,
|
||||
PyObject *note);
|
||||
|
||||
extern int _PyErr_CheckSignals(void);
|
||||
|
||||
/* Support for adding program text to SyntaxErrors */
|
||||
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(PyObject*) _PyErr_ProgramDecodedTextObject(
|
||||
PyObject *filename,
|
||||
int lineno,
|
||||
const char* encoding);
|
||||
|
||||
extern PyObject* _PyUnicodeTranslateError_Create(
|
||||
PyObject *object,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end,
|
||||
const char *reason /* UTF-8 encoded string */
|
||||
);
|
||||
|
||||
extern void _Py_NO_RETURN _Py_FatalErrorFormat(
|
||||
const char *func,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern PyObject* _PyErr_SetImportErrorWithNameFrom(
|
||||
PyObject *,
|
||||
PyObject *,
|
||||
PyObject *,
|
||||
PyObject *);
|
||||
extern int _PyErr_SetModuleNotFoundError(PyObject *name);
|
||||
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern PyStatus _PyErr_InitTypes(PyInterpreterState *);
|
||||
extern void _PyErr_FiniTypes(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
static inline PyObject* _PyErr_Occurred(PyThreadState *tstate)
|
||||
{
|
||||
assert(tstate != NULL);
|
||||
if (tstate->current_exception == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (PyObject *)Py_TYPE(tstate->current_exception);
|
||||
}
|
||||
|
||||
static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state)
|
||||
{
|
||||
Py_CLEAR(exc_state->exc_value);
|
||||
}
|
||||
|
||||
extern PyObject* _PyErr_StackItemToExcInfoTuple(
|
||||
_PyErr_StackItem *err_info);
|
||||
|
||||
extern void _PyErr_Fetch(
|
||||
PyThreadState *tstate,
|
||||
PyObject **type,
|
||||
PyObject **value,
|
||||
PyObject **traceback);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyErr_GetRaisedException(PyThreadState *tstate);
|
||||
|
||||
PyAPI_FUNC(int) _PyErr_ExceptionMatches(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exc);
|
||||
|
||||
PyAPI_FUNC(void) _PyErr_SetRaisedException(PyThreadState *tstate, PyObject *exc);
|
||||
|
||||
extern void _PyErr_Restore(
|
||||
PyThreadState *tstate,
|
||||
PyObject *type,
|
||||
PyObject *value,
|
||||
PyObject *traceback);
|
||||
|
||||
extern void _PyErr_SetObject(
|
||||
PyThreadState *tstate,
|
||||
PyObject *type,
|
||||
PyObject *value);
|
||||
|
||||
extern void _PyErr_ChainStackItem(void);
|
||||
extern void _PyErr_ChainExceptions1Tstate(PyThreadState *, PyObject *);
|
||||
|
||||
PyAPI_FUNC(void) _PyErr_Clear(PyThreadState *tstate);
|
||||
|
||||
extern void _PyErr_SetNone(PyThreadState *tstate, PyObject *exception);
|
||||
|
||||
extern PyObject* _PyErr_NoMemory(PyThreadState *tstate);
|
||||
|
||||
extern int _PyErr_EmitSyntaxWarning(PyObject *msg, PyObject *filename, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset);
|
||||
extern void _PyErr_RaiseSyntaxError(PyObject *msg, PyObject *filename, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset);
|
||||
|
||||
PyAPI_FUNC(void) _PyErr_SetString(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *string);
|
||||
|
||||
/*
|
||||
* Set an exception with the error message decoded from the current locale
|
||||
* encoding (LC_CTYPE).
|
||||
*
|
||||
* Exceptions occurring in decoding take priority over the desired exception.
|
||||
*
|
||||
* Exported for '_ctypes' shared extensions.
|
||||
*/
|
||||
PyAPI_FUNC(void) _PyErr_SetLocaleString(
|
||||
PyObject *exception,
|
||||
const char *string);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyErr_Format(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyErr_FormatV(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *format,
|
||||
va_list vargs);
|
||||
|
||||
extern void _PyErr_NormalizeException(
|
||||
PyThreadState *tstate,
|
||||
PyObject **exc,
|
||||
PyObject **val,
|
||||
PyObject **tb);
|
||||
|
||||
extern PyObject* _PyErr_FormatFromCauseTstate(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern PyObject* _PyExc_CreateExceptionGroup(
|
||||
const char *msg,
|
||||
PyObject *excs);
|
||||
|
||||
extern PyObject* _PyExc_PrepReraiseStar(
|
||||
PyObject *orig,
|
||||
PyObject *excs);
|
||||
|
||||
extern int _PyErr_CheckSignalsTstate(PyThreadState *tstate);
|
||||
|
||||
extern void _Py_DumpExtensionModules(int fd, PyInterpreterState *interp);
|
||||
extern PyObject* _Py_CalculateSuggestions(PyObject *dir, PyObject *name);
|
||||
extern PyObject* _Py_Offer_Suggestions(PyObject* exception);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(Py_ssize_t) _Py_UTF8_Edit_Cost(PyObject *str_a, PyObject *str_b,
|
||||
Py_ssize_t max_cost);
|
||||
|
||||
// Export for '_json' shared extension
|
||||
PyAPI_FUNC(void) _PyErr_FormatNote(const char *format, ...);
|
||||
|
||||
/* Context manipulation (PEP 3134) */
|
||||
|
||||
Py_DEPRECATED(3.12) extern void _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *);
|
||||
|
||||
// implementation detail for the codeop module.
|
||||
// Exported for test.test_peg_generator.test_c_parser
|
||||
PyAPI_DATA(PyTypeObject) _PyExc_IncompleteInputError;
|
||||
#define PyExc_IncompleteInputError ((PyObject *)(&_PyExc_IncompleteInputError))
|
||||
|
||||
extern int _PyUnicodeError_GetParams(
|
||||
PyObject *self,
|
||||
PyObject **obj,
|
||||
Py_ssize_t *objlen,
|
||||
Py_ssize_t *start,
|
||||
Py_ssize_t *end,
|
||||
Py_ssize_t *slen,
|
||||
int as_bytes);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYERRORS_H */
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user