Coverage for cuda / core / _dlpack.pyx: 90.83%

120 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-03-08 01:07 +0000

1# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 

2# 

3# SPDX-License-Identifier: Apache-2.0 

4  

5from enum import IntEnum 

6  

7  

8cdef void pycapsule_deleter(object capsule) noexcept: 

9 cdef DLManagedTensor* dlm_tensor 

10 cdef DLManagedTensorVersioned* dlm_tensor_ver 

11 # Do not invoke the deleter on a used capsule. 

12 if cpython.PyCapsule_IsValid( 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

13 capsule, DLPACK_TENSOR_UNUSED_NAME): 

14 dlm_tensor = <DLManagedTensor*>( 1a

15 cpython.PyCapsule_GetPointer( 1a

16 capsule, DLPACK_TENSOR_UNUSED_NAME)) 

17 if dlm_tensor.deleter: 1a

18 dlm_tensor.deleter(dlm_tensor) 1a

19 elif cpython.PyCapsule_IsValid( 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

20 capsule, DLPACK_VERSIONED_TENSOR_UNUSED_NAME): 

21 dlm_tensor_ver = <DLManagedTensorVersioned*>( 1a

22 cpython.PyCapsule_GetPointer( 1a

23 capsule, DLPACK_VERSIONED_TENSOR_UNUSED_NAME)) 

24 if dlm_tensor_ver.deleter: 1a

25 dlm_tensor_ver.deleter(dlm_tensor_ver) 1a

26  

27  

28cdef void deleter(DLManagedTensor* tensor) noexcept with gil: 

29 if tensor: 19a

30 if tensor.dl_tensor.shape: 19a

31 stdlib.free(tensor.dl_tensor.shape) 19a

32 if tensor.manager_ctx: 19a

33 cpython.Py_DECREF(<object>tensor.manager_ctx) 19a

34 tensor.manager_ctx = NULL 19a

35 stdlib.free(tensor) 19a

36  

37  

38cdef void versioned_deleter(DLManagedTensorVersioned* tensor) noexcept with gil: 

39 if tensor: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

40 if tensor.dl_tensor.shape: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

41 stdlib.free(tensor.dl_tensor.shape) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

42 if tensor.manager_ctx: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

43 cpython.Py_DECREF(<object>tensor.manager_ctx) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

44 tensor.manager_ctx = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

45 stdlib.free(tensor) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

46  

47  

48cdef inline DLManagedTensorVersioned* allocate_dlm_tensor_versioned() except? NULL: 

49 cdef DLManagedTensorVersioned* dlm_tensor_ver = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

50 try: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

51 dlm_tensor_ver = <DLManagedTensorVersioned*>( 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

52 stdlib.malloc(sizeof(DLManagedTensorVersioned))) 

53 dlm_tensor_ver.dl_tensor.shape = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

54 dlm_tensor_ver.manager_ctx = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

55 return dlm_tensor_ver 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

56 except: 

57 if dlm_tensor_ver: 

58 stdlib.free(dlm_tensor_ver) 

59 raise 

60  

61  

62cdef inline DLManagedTensor* allocate_dlm_tensor() except? NULL: 

63 cdef DLManagedTensor* dlm_tensor = NULL 19a

64 try: 19a

65 dlm_tensor = <DLManagedTensor*>( 19a

66 stdlib.malloc(sizeof(DLManagedTensor))) 

67 dlm_tensor.dl_tensor.shape = NULL 19a

68 dlm_tensor.manager_ctx = NULL 19a

69 return dlm_tensor 19a

70 except: 

71 if dlm_tensor: 

72 stdlib.free(dlm_tensor) 

73 raise 

74  

75  

76cdef inline int setup_dl_tensor_layout(DLTensor* dl_tensor, object buf) except -1: 

77 dl_tensor.ndim = 1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

78 cdef int64_t* shape_strides = \ 

79 <int64_t*>stdlib.malloc(sizeof(int64_t) * 2) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

80 if shape_strides == NULL: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

81 raise MemoryError() 

82 # DLPack v1.2+ requires non-NULL strides for ndim != 0. 

83 shape_strides[0] = <int64_t>buf.size 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

84 shape_strides[1] = 1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

85 dl_tensor.shape = shape_strides 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

86 dl_tensor.strides = shape_strides + 1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

87 dl_tensor.byte_offset = 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

88 return 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

89  

90  

91cdef inline int setup_dl_tensor_device(DLTensor* dl_tensor, object buf) except -1: 

92 cdef DLDevice* device = &dl_tensor.device 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

93 # buf should be a Buffer instance 

94 if buf.is_device_accessible and not buf.is_host_accessible: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

95 device.device_type = _kDLCUDA 1a

96 device.device_id = buf.device_id 1a

97 elif buf.is_device_accessible and buf.is_host_accessible: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456798

98 device.device_type = _kDLCUDAHost 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678

99 device.device_id = 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678

100 elif not buf.is_device_accessible and buf.is_host_accessible: 19

101 device.device_type = _kDLCPU 

102 device.device_id = 0 

103 else: # not buf.is_device_accessible and not buf.is_host_accessible 

104 raise BufferError("invalid buffer") 19

105 return 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

106  

107  

108cdef inline int setup_dl_tensor_dtype(DLTensor* dl_tensor) except -1 nogil: 

109 cdef DLDataType* dtype = &dl_tensor.dtype 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

110 dtype.code = <uint8_t>kDLInt 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

111 dtype.lanes = <uint16_t>1 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

112 dtype.bits = <uint8_t>8 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

113 return 0 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

114  

115  

116cpdef object make_py_capsule(object buf, bint versioned): 

117 cdef DLManagedTensor* dlm_tensor = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

118 cdef DLManagedTensorVersioned* dlm_tensor_ver = NULL 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

119 cdef DLTensor* dl_tensor 

120 cdef void* tensor_ptr 

121 cdef const char* capsule_name 

122 cdef object ret = None 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

123  

124 try: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

125 if versioned: 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

126 dlm_tensor_ver = allocate_dlm_tensor_versioned() 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

127 # Transfer the reference to manager_ctx 

128 cpython.Py_INCREF(buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

129 dlm_tensor_ver.manager_ctx = <void*>buf 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

130 dlm_tensor_ver.deleter = versioned_deleter 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

131 dlm_tensor_ver.version.major = DLPACK_MAJOR_VERSION 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

132 dlm_tensor_ver.version.minor = DLPACK_MINOR_VERSION 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

133 dlm_tensor_ver.flags = 0 1!bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

134 dl_tensor = &dlm_tensor_ver.dl_tensor 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

135 tensor_ptr = dlm_tensor_ver 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

136 capsule_name = DLPACK_VERSIONED_TENSOR_UNUSED_NAME 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

137 else: 

138 dlm_tensor = allocate_dlm_tensor() 19a

139 # Transfer the reference to manager_ctx 

140 cpython.Py_INCREF(buf) 19a

141 dlm_tensor.manager_ctx = <void*>buf 19a

142 dlm_tensor.deleter = deleter 19a

143 dl_tensor = &dlm_tensor.dl_tensor 19a

144 tensor_ptr = dlm_tensor 19a

145 capsule_name = DLPACK_TENSOR_UNUSED_NAME 19a

146  

147 dl_tensor.data = <void*><intptr_t>(int(buf.handle)) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

148 setup_dl_tensor_layout(dl_tensor, buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

149 setup_dl_tensor_device(dl_tensor, buf) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345679a8

150 setup_dl_tensor_dtype(dl_tensor) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

151 ret = cpython.PyCapsule_New(tensor_ptr, capsule_name, pycapsule_deleter) 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

152 except: 19

153 if ret is None: 19

154 deleter(dlm_tensor) 19

155 versioned_deleter(dlm_tensor_ver) 19

156 raise 19

157 return ret 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567a8

158  

159  

160class DLDeviceType(IntEnum): 

161 kDLCPU = _kDLCPU 

162 kDLCUDA = _kDLCUDA 

163 kDLCUDAHost = _kDLCUDAHost 

164 kDLCUDAManaged = _kDLCUDAManaged