Coverage for cuda / core / _memory / _device_memory_resource.pyx: 42.62%
122 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-25 01:07 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-25 01:07 +0000
1# SPDX-FileCopyrightText: Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
5from __future__ import annotations
7from cuda.bindings cimport cydriver
8from cuda.core._memory._memory_pool cimport (
9 _MemPool, MP_init_create_pool, MP_raise_release_threshold,
10)
11from cuda.core._memory cimport _ipc
12from cuda.core._memory._ipc cimport IPCAllocationHandle
13from cuda.core._resource_handles cimport (
14 as_cu,
15 get_device_mempool,
16)
17from cuda.core._utils.cuda_utils cimport (
18 check_or_create_options,
19 HANDLE_RETURN,
20)
21from cpython.mem cimport PyMem_Malloc, PyMem_Free
23from dataclasses import dataclass
24import multiprocessing
25import platform # no-cython-lint
26import uuid
28from ._peer_access_utils import plan_peer_access_update
29from cuda.core._utils.cuda_utils import check_multiprocessing_start_method
31__all__ = ['DeviceMemoryResource', 'DeviceMemoryResourceOptions']
34@dataclass
35cdef class DeviceMemoryResourceOptions:
36 """Customizable :obj:`~_memory.DeviceMemoryResource` options.
38 Attributes
39 ----------
40 ipc_enabled : bool, optional
41 Specifies whether to create an IPC-enabled memory pool. When set to
42 True, the memory pool and its allocations can be shared with other
43 processes. (Default to False)
45 max_size : int, optional
46 Maximum pool size. When set to 0, defaults to a system-dependent value.
47 (Default to 0)
48 """
49 ipc_enabled : bool = False
50 max_size : int = 0
53cdef class DeviceMemoryResource(_MemPool):
54 """
55 A device memory resource managing a stream-ordered memory pool.
57 Parameters
58 ----------
59 device_id : Device | int
60 Device or Device ordinal for which a memory resource is constructed.
62 options : DeviceMemoryResourceOptions
63 Memory resource creation options.
65 If set to `None`, the memory resource uses the driver's current
66 stream-ordered memory pool for the specified `device_id`. If no memory
67 pool is set as current, the driver's default memory pool for the device
68 is used.
70 If not set to `None`, a new memory pool is created, which is owned by
71 the memory resource.
73 When using an existing (current or default) memory pool, the returned
74 device memory resource does not own the pool (`is_handle_owned` is
75 `False`), and closing the resource has no effect.
77 Notes
78 -----
79 To create an IPC-Enabled memory resource (MR) that is capable of sharing
80 allocations between processes, specify ``ipc_enabled=True`` in the initializer
81 option. Sharing an allocation is a two-step procedure that involves
82 mapping a memory resource and then mapping buffers owned by that resource.
83 These steps can be accomplished in several ways.
85 An IPC-enabled memory resource can allocate memory buffers but cannot
86 receive shared buffers. Mapping an MR to another process creates a "mapped
87 memory resource" (MMR). An MMR cannot allocate memory buffers and can only
88 receive shared buffers. MRs and MMRs are both of type
89 :class:`DeviceMemoryResource` and can be distinguished via
90 :attr:`DeviceMemoryResource.is_mapped`.
92 An MR is shared via an allocation handle obtained by calling
93 :meth:`DeviceMemoryResource.get_allocation_handle`. The allocation handle
94 has a platform-specific interpretation; however, memory IPC is currently
95 only supported for Linux, and in that case allocation handles are file
96 descriptors. After sending an allocation handle to another process, it can
97 be used to create an MMR by invoking
98 :meth:`DeviceMemoryResource.from_allocation_handle`.
100 Buffers can be shared as serializable descriptors obtained by calling
101 :meth:`Buffer.get_ipc_descriptor`. In a receiving process, a shared buffer is
102 created by invoking :meth:`Buffer.from_ipc_descriptor` with an MMR and
103 buffer descriptor, where the MMR corresponds to the MR that created the
104 described buffer.
106 To help manage the association between memory resources and buffers, a
107 registry is provided. Every MR has a unique identifier (UUID). MMRs can be
108 registered by calling :meth:`DeviceMemoryResource.register` with the UUID
109 of the corresponding MR. Registered MMRs can be looked up via
110 :meth:`DeviceMemoryResource.from_registry`. When registering MMRs in this
111 way, the use of buffer descriptors can be avoided. Instead, buffer objects
112 can themselves be serialized and transferred directly. Serialization embeds
113 the UUID, which is used to locate the correct MMR during reconstruction.
115 IPC-enabled memory resources interoperate with the :mod:`multiprocessing`
116 module to provide a simplified interface. This approach can avoid direct
117 use of allocation handles, buffer descriptors, MMRs, and the registry. When
118 using :mod:`multiprocessing` to spawn processes or send objects through
119 communication channels such as :class:`multiprocessing.Queue`,
120 :class:`multiprocessing.Pipe`, or :class:`multiprocessing.Connection`,
121 :class:`Buffer` objects may be sent directly, and in such cases the process
122 for creating MMRs and mapping buffers will be handled automatically.
124 For greater efficiency when transferring many buffers, one may also send
125 MRs and buffers separately. When an MR is sent via :mod:`multiprocessing`,
126 an MMR is created and registered in the receiving process. Subsequently,
127 buffers may be serialized and transferred using ordinary :mod:`pickle`
128 methods. The reconstruction procedure uses the registry to find the
129 associated MMR.
130 """
132 def __cinit__(self, *args, **kwargs):
133 self._dev_id = cydriver.CU_DEVICE_INVALID 2o lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
134 self._peer_accessible_by = None 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
136 def __init__(self, device_id: Device | int, options=None):
137 _DMR_init(self, device_id, options) 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
139 def __reduce__(self):
140 return DeviceMemoryResource.from_registry, (self.uuid,) 1abcd
142 @staticmethod
143 def from_registry(uuid: uuid.UUID) -> DeviceMemoryResource: # no-cython-lint
144 """
145 Obtain a registered mapped memory resource.
147 Raises
148 ------
149 RuntimeError
150 If no mapped memory resource is found in the registry.
151 """
152 return <DeviceMemoryResource>(_ipc.MP_from_registry(uuid))
154 def register(self, uuid: uuid.UUID) -> DeviceMemoryResource: # no-cython-lint
155 """
156 Register a mapped memory resource.
158 Returns
159 -------
160 The registered mapped memory resource. If one was previously registered
161 with the given key, it is returned.
162 """
163 return <DeviceMemoryResource>(_ipc.MP_register(self, uuid))
165 @classmethod
166 def from_allocation_handle(
167 cls, device_id: Device | int, alloc_handle: int | IPCAllocationHandle
168 ) -> DeviceMemoryResource:
169 """Create a device memory resource from an allocation handle.
171 Construct a new `DeviceMemoryResource` instance that imports a memory
172 pool from a shareable handle. The memory pool is marked as owned, and
173 the resource is associated with the specified `device_id`.
175 Parameters
176 ----------
177 device_id : int | Device
178 The ID of the device or a Device object for which the memory
179 resource is created.
181 alloc_handle : int | IPCAllocationHandle
182 The shareable handle of the device memory resource to import. If an
183 integer is supplied, it must represent a valid platform-specific
184 handle. It is the caller's responsibility to close that handle.
186 Returns
187 -------
188 A new device memory resource instance with the imported handle.
189 """
190 cdef DeviceMemoryResource mr = <DeviceMemoryResource>(
191 _ipc.MP_from_allocation_handle(cls, alloc_handle))
192 from .._device import Device
193 mr._dev_id = Device(device_id).device_id
194 mr._peer_accessible_by = ()
195 return mr
197 def get_allocation_handle(self) -> IPCAllocationHandle:
198 """Export the memory pool handle to be shared (requires IPC).
200 The handle can be used to share the memory pool with other processes.
201 The handle is cached in this `MemoryResource` and owned by it.
203 Returns
204 -------
205 The shareable handle for the memory pool.
206 """
207 if not self.is_ipc_enabled: 2[ba b ]be ^b_bicjc`b{bkc|b}blc~bacbcmcccdcecf g fcgchch i j k c d q l r m n
208 raise RuntimeError("Memory resource is not IPC-enabled") 1q
209 return self._ipc_data._alloc_handle 2[ba b ]be ^b_bicjc`b{bkc|b}blc~bacbcmcccdcecf g fcgchch i j k c d l r m n
211 @property
212 def device_id(self) -> int:
213 """The associated device ordinal."""
214 return self._dev_id 2abbbcbdbebfbgbhb[ba b ]be ^b_b`b{b|b}b~bacbcccdcecf g fcgchch i j k c d ibjbs t p kbl m n ) nc* + , - oc. pc/ qcrc: sc; = tc? @ [ ] ^ ucvcwcxc_ ` { | yczcAcBc} ~
216 @property
217 def peer_accessible_by(self):
218 """
219 Get or set the devices that can access allocations from this memory
220 pool. Access can be modified at any time and affects all allocations
221 from this memory pool.
223 Returns a tuple of sorted device IDs that currently have peer access to
224 allocations from this memory pool.
226 When setting, accepts a sequence of Device objects or device IDs.
227 Setting to an empty sequence revokes all peer access.
229 For non-owned pools (the default or current device pool), the state
230 is always queried from the driver to reflect changes made by other
231 wrappers or direct driver calls.
233 Examples
234 --------
235 >>> dmr = DeviceMemoryResource(0)
236 >>> dmr.peer_accessible_by = [1] # Grant access to device 1
237 >>> assert dmr.peer_accessible_by == (1,)
238 >>> dmr.peer_accessible_by = [] # Revoke access
239 """
240 if not self._mempool_owned:
241 _DMR_query_peer_access(self)
242 return self._peer_accessible_by
244 @peer_accessible_by.setter
245 def peer_accessible_by(self, devices):
246 _DMR_set_peer_accessible_by(self, devices)
248 @property
249 def is_device_accessible(self) -> bool:
250 """Return True. This memory resource provides device-accessible buffers."""
251 return True 2s t p ) nc* + , - oc. pc/ qcrc: sc; = tc? @ [ ] ^ ucvcwcxc_ ` { | yczcAcBc} ~
253 @property
254 def is_host_accessible(self) -> bool:
255 """Return False. This memory resource does not provide host-accessible buffers."""
256 return False 1stp
259cdef inline _DMR_query_peer_access(DeviceMemoryResource self):
260 """Query the driver for the actual peer access state of this pool."""
261 cdef int total
262 cdef cydriver.CUmemAccess_flags flags
263 cdef cydriver.CUmemLocation location
264 cdef list peers = []
266 with nogil:
267 HANDLE_RETURN(cydriver.cuDeviceGetCount(&total))
269 location.type = cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE
270 for dev_id in range(total):
271 if dev_id == self._dev_id:
272 continue
273 location.id = dev_id
274 with nogil:
275 HANDLE_RETURN(cydriver.cuMemPoolGetAccess(&flags, as_cu(self._h_pool), &location))
276 if flags == cydriver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE:
277 peers.append(dev_id)
279 self._peer_accessible_by = tuple(sorted(peers))
282cdef inline _DMR_set_peer_accessible_by(DeviceMemoryResource self, devices):
283 from .._device import Device
285 this_dev = Device(self._dev_id)
286 cdef object resolve_device_id = lambda dev: Device(dev).device_id
287 cdef object plan
288 cdef tuple target_ids
289 cdef tuple to_add
290 cdef tuple to_rm
291 if not self._mempool_owned:
292 _DMR_query_peer_access(self)
293 plan = plan_peer_access_update(
294 owner_device_id=self._dev_id,
295 current_peer_ids=self._peer_accessible_by,
296 requested_devices=devices,
297 resolve_device_id=resolve_device_id,
298 can_access_peer=this_dev.can_access_peer,
299 )
300 target_ids = plan.target_ids
301 to_add = plan.to_add
302 to_rm = plan.to_remove
303 cdef size_t count = len(to_add) + len(to_rm)
304 cdef cydriver.CUmemAccessDesc* access_desc = NULL
305 cdef size_t i = 0
307 if count > 0:
308 access_desc = <cydriver.CUmemAccessDesc*>PyMem_Malloc(count * sizeof(cydriver.CUmemAccessDesc))
309 if access_desc == NULL:
310 raise MemoryError("Failed to allocate memory for access descriptors")
312 try:
313 for dev_id in to_add:
314 access_desc[i].flags = cydriver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE
315 access_desc[i].location.type = cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE
316 access_desc[i].location.id = dev_id
317 i += 1
319 for dev_id in to_rm:
320 access_desc[i].flags = cydriver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_NONE
321 access_desc[i].location.type = cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE
322 access_desc[i].location.id = dev_id
323 i += 1
325 with nogil:
326 HANDLE_RETURN(cydriver.cuMemPoolSetAccess(as_cu(self._h_pool), access_desc, count))
327 finally:
328 if access_desc != NULL:
329 PyMem_Free(access_desc)
331 self._peer_accessible_by = tuple(target_ids)
334cdef inline _DMR_init(DeviceMemoryResource self, device_id, options):
335 from .._device import Device 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
336 cdef int dev_id = Device(device_id).device_id 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
337 cdef DeviceMemoryResourceOptions opts = check_or_create_options( 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
338 DeviceMemoryResourceOptions, options, "DeviceMemoryResource options",
339 keep_none=True
340 )
341 cdef bint ipc_enabled = False 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
342 cdef size_t max_size = 0 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
344 self._dev_id = dev_id 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
346 if opts is not None: 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
347 ipc_enabled = opts.ipc_enabled 1uavbwexyzABCDEFGHIJKLMNOPQRSfgTUVhijkcdpWXYZ0123456789!#$%qlrmn'(
348 if ipc_enabled and not _ipc.is_supported(): 1uavbwexyzABCDEFGHIJKLMNOPQRSfgTUVhijkcdpWXYZ0123456789!#$%qlrmn'(
349 raise RuntimeError(f"IPC is not available on {platform.system()}")
350 max_size = opts.max_size 1uavbwexyzABCDEFGHIJKLMNOPQRSfgTUVhijkcdpWXYZ0123456789!#$%qlrmn'(
352 if opts is None: 2lbmbnbabbbcbdbebfbgbhbu a v b w e x y z A B C D E F G H I J K L M N O P Q R S f g T U V h i j k c d ibjbobpbs t p kbW X Y Z 0 1 2 3 4 5 6 7 8 9 ! # $ % q qbl r m n rbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b' ( *b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
353 self._h_pool = get_device_mempool(dev_id) 2lbmbnbabbbcbdbebfbgbhbibjbobpbs t kbqbrbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b*b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
354 self._mempool_owned = False 2lbmbnbabbbcbdbebfbgbhbibjbobpbs t kbqbrbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b*b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
355 MP_raise_release_threshold(self) 2lbmbnbabbbcbdbebfbgbhbibjbobpbs t kbqbrbsbtbubvbwbxbybzbAbBbCbDbEbFbGbHbIbJbKbLbMbNbObPbQbRbSbTbUbVbWbXbYbZb0b1b2b3b4b5b6b7b8b9b!b#b$b%b'b(b)b*b+b,b-b.b/b:b;b) * + , - . / : ; = ? @ [ ] ^ _ ` { | =b?b@b} ~
356 else:
357 self._peer_accessible_by = () 1uavbwexyzABCDEFGHIJKLMNOPQRSfgTUVhijkcdpWXYZ0123456789!#$%qlrmn'(
358 MP_init_create_pool( 1uavbwexyzABCDEFGHIJKLMNOPQRSfgTUVhijkcdpWXYZ0123456789!#$%qlrmn'(
359 self,
360 cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE,
361 dev_id,
362 cydriver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED,
363 ipc_enabled,
364 max_size,
365 )
368# Note: this is referenced in instructions to debug nvbug 5698116.
369cpdef DMR_mempool_get_access(DeviceMemoryResource dmr, int device_id):
370 """
371 Probes peer access from the given device using cuMemPoolGetAccess.
373 Parameters
374 ----------
375 device_id : int or Device
376 The device to query access for.
378 Returns
379 -------
380 str
381 Access permissions: "rw" for read-write, "r" for read-only, "" for no access.
382 """
383 from .._device import Device
385 cdef int dev_id = Device(device_id).device_id
386 cdef cydriver.CUmemAccess_flags flags
387 cdef cydriver.CUmemLocation location
389 location.type = cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE
390 location.id = dev_id
392 with nogil:
393 HANDLE_RETURN(cydriver.cuMemPoolGetAccess(&flags, as_cu(dmr._h_pool), &location))
395 if flags == cydriver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE:
396 return "rw"
397 elif flags == cydriver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READ:
398 return "r"
399 else:
400 return ""
403def _deep_reduce_device_memory_resource(mr):
404 check_multiprocessing_start_method() 2[ba b ]be ^b_b`b{b|b}b~bacbcccdcecf g fcgchch i j k c d l m n
405 from .._device import Device 2[ba b ]be ^b_b`b{b|b}b~bacbcccdcecf g fcgchch i j k c d l m n
406 device = Device(mr.device_id) 2[ba b ]be ^b_b`b{b|b}b~bacbcccdcecf g fcgchch i j k c d l m n
407 alloc_handle = mr.get_allocation_handle() 2[ba b ]be ^b_b`b{b|b}b~bacbcccdcecf g fcgchch i j k c d l m n
408 return mr.from_allocation_handle, (device, alloc_handle) 2[ba b ]be ^b_b`b{b|b}b~bacbcccdcecf g fcgchch i j k c d l m n
411multiprocessing.reduction.register(DeviceMemoryResource, _deep_reduce_device_memory_resource)