Coverage for cuda / core / _dlpack.pyx: 92.68%
123 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-29 01:27 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-29 01:27 +0000
1# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
5from enum import IntEnum
8cdef void pycapsule_deleter(object capsule) noexcept:
9 cdef DLManagedTensor* dlm_tensor
10 cdef DLManagedTensorVersioned* dlm_tensor_ver
11 # Do not invoke the deleter on a used capsule.
12 if cpython.PyCapsule_IsValid( 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
13 capsule, DLPACK_TENSOR_UNUSED_NAME):
14 dlm_tensor = <DLManagedTensor*>( 1a
15 cpython.PyCapsule_GetPointer( 1a
16 capsule, DLPACK_TENSOR_UNUSED_NAME))
17 if dlm_tensor.deleter: 1a
18 dlm_tensor.deleter(dlm_tensor) 1a
19 elif cpython.PyCapsule_IsValid( 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
20 capsule, DLPACK_VERSIONED_TENSOR_UNUSED_NAME):
21 dlm_tensor_ver = <DLManagedTensorVersioned*>( 1a
22 cpython.PyCapsule_GetPointer( 1a
23 capsule, DLPACK_VERSIONED_TENSOR_UNUSED_NAME))
24 if dlm_tensor_ver.deleter: 1a
25 dlm_tensor_ver.deleter(dlm_tensor_ver) 1a
28cdef void deleter(DLManagedTensor* tensor) noexcept with gil:
29 if tensor: 1+a
30 if tensor.dl_tensor.shape: 1+a
31 stdlib.free(tensor.dl_tensor.shape) 1+a
32 if tensor.manager_ctx: 1+a
33 cpython.Py_DECREF(<object>tensor.manager_ctx) 1+a
34 tensor.manager_ctx = NULL 1+a
35 stdlib.free(tensor) 1+a
38cdef void versioned_deleter(DLManagedTensorVersioned* tensor) noexcept with gil:
39 if tensor: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
40 if tensor.dl_tensor.shape: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
41 stdlib.free(tensor.dl_tensor.shape) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
42 if tensor.manager_ctx: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
43 cpython.Py_DECREF(<object>tensor.manager_ctx) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
44 tensor.manager_ctx = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
45 stdlib.free(tensor) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
48cdef inline DLManagedTensorVersioned* allocate_dlm_tensor_versioned() except? NULL:
49 cdef DLManagedTensorVersioned* dlm_tensor_ver = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
50 try: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
51 dlm_tensor_ver = <DLManagedTensorVersioned*>( 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
52 stdlib.malloc(sizeof(DLManagedTensorVersioned)))
53 dlm_tensor_ver.dl_tensor.shape = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
54 dlm_tensor_ver.manager_ctx = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
55 return dlm_tensor_ver 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
56 except:
57 if dlm_tensor_ver:
58 stdlib.free(dlm_tensor_ver)
59 raise
62cdef inline DLManagedTensor* allocate_dlm_tensor() except? NULL:
63 cdef DLManagedTensor* dlm_tensor = NULL 1+a
64 try: 1+a
65 dlm_tensor = <DLManagedTensor*>( 1+a
66 stdlib.malloc(sizeof(DLManagedTensor)))
67 dlm_tensor.dl_tensor.shape = NULL 1+a
68 dlm_tensor.manager_ctx = NULL 1+a
69 return dlm_tensor 1+a
70 except:
71 if dlm_tensor:
72 stdlib.free(dlm_tensor)
73 raise
76cdef inline int setup_dl_tensor_layout(DLTensor* dl_tensor, object buf) except -1:
77 dl_tensor.ndim = 1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
78 cdef int64_t* shape_strides = \
79 <int64_t*>stdlib.malloc(sizeof(int64_t) * 2) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
80 if shape_strides == NULL: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
81 raise MemoryError()
82 # DLPack v1.2+ requires non-NULL strides for ndim != 0.
83 shape_strides[0] = <int64_t>buf.size 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
84 shape_strides[1] = 1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
85 dl_tensor.shape = shape_strides 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
86 dl_tensor.strides = shape_strides + 1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
87 dl_tensor.byte_offset = 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
88 return 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
91def classify_dl_device(buf) -> tuple[int, int]:
92 """Classify a buffer into a DLPack (device_type, device_id) pair.
94 ``buf`` must expose ``is_device_accessible``, ``is_host_accessible``,
95 ``is_managed``, and ``device_id`` attributes.
96 """
97 cdef bint d = buf.is_device_accessible 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+-;./:a()*
98 cdef bint h = buf.is_host_accessible 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+-;./:a()*
99 if d and not h: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+-;./:a()*
100 return (_kDLCUDA, buf.device_id) 1;a
101 if d and h: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+-./:()*
102 return (_kDLCUDAManaged if buf.is_managed else _kDLCUDAHost, 0) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'/:()*
103 if not d and h: 1+-.
104 return (_kDLCPU, 0) 1.
105 raise BufferError("buffer is neither device-accessible nor host-accessible") 1+-
108cdef inline int setup_dl_tensor_device(DLTensor* dl_tensor, object buf) except -1:
109 cdef DLDevice* device = &dl_tensor.device 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
110 dev_type, dev_id = classify_dl_device(buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
111 device.device_type = <_DLDeviceType>dev_type 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
112 device.device_id = <int32_t>dev_id 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
113 return 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
116cdef inline int setup_dl_tensor_dtype(DLTensor* dl_tensor) except -1 nogil:
117 cdef DLDataType* dtype = &dl_tensor.dtype 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
118 dtype.code = <uint8_t>kDLInt 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
119 dtype.lanes = <uint16_t>1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
120 dtype.bits = <uint8_t>8 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
121 return 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
124cpdef object make_py_capsule(object buf, bint versioned):
125 cdef DLManagedTensor* dlm_tensor = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
126 cdef DLManagedTensorVersioned* dlm_tensor_ver = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
127 cdef DLTensor* dl_tensor
128 cdef void* tensor_ptr
129 cdef const char* capsule_name
130 cdef object ret = None 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
132 try: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
133 if versioned: 1,bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
134 dlm_tensor_ver = allocate_dlm_tensor_versioned() 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
135 # Transfer the reference to manager_ctx
136 cpython.Py_INCREF(buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
137 dlm_tensor_ver.manager_ctx = <void*>buf 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
138 dlm_tensor_ver.deleter = versioned_deleter 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
139 dlm_tensor_ver.version.major = DLPACK_MAJOR_VERSION 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
140 dlm_tensor_ver.version.minor = DLPACK_MINOR_VERSION 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
141 dlm_tensor_ver.flags = 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
142 dl_tensor = &dlm_tensor_ver.dl_tensor 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
143 tensor_ptr = dlm_tensor_ver 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
144 capsule_name = DLPACK_VERSIONED_TENSOR_UNUSED_NAME 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
145 else:
146 dlm_tensor = allocate_dlm_tensor() 1+a
147 # Transfer the reference to manager_ctx
148 cpython.Py_INCREF(buf) 1+a
149 dlm_tensor.manager_ctx = <void*>buf 1+a
150 dlm_tensor.deleter = deleter 1+a
151 dl_tensor = &dlm_tensor.dl_tensor 1+a
152 tensor_ptr = dlm_tensor 1+a
153 capsule_name = DLPACK_TENSOR_UNUSED_NAME 1+a
155 dl_tensor.data = <void*><intptr_t>(int(buf.handle)) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
156 setup_dl_tensor_layout(dl_tensor, buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
157 setup_dl_tensor_device(dl_tensor, buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'+a()*
158 setup_dl_tensor_dtype(dl_tensor) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
159 ret = cpython.PyCapsule_New(tensor_ptr, capsule_name, pycapsule_deleter) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
160 except: 1+
161 if ret is None: 1+
162 deleter(dlm_tensor) 1+
163 versioned_deleter(dlm_tensor_ver) 1+
164 raise 1+
165 return ret 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%'a()*
168class DLDeviceType(IntEnum):
169 kDLCPU = _kDLCPU
170 kDLCUDA = _kDLCUDA
171 kDLCUDAHost = _kDLCUDAHost
172 kDLCUDAManaged = _kDLCUDAManaged