Coverage for cuda / core / _memory / _pinned_memory_resource.pyx: 85.71%
63 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-25 01:07 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-25 01:07 +0000
1# SPDX-FileCopyrightText: Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
5from __future__ import annotations
7from cuda.bindings cimport cydriver
8from cuda.core._memory._memory_pool cimport _MemPool, MP_init_create_pool, MP_init_current_pool
9from cuda.core._memory cimport _ipc
10from cuda.core._memory._ipc cimport IPCAllocationHandle
11from cuda.core._utils.cuda_utils cimport (
12 check_or_create_options,
13 HANDLE_RETURN,
14)
16from dataclasses import dataclass
17import multiprocessing
18import platform # no-cython-lint
19import uuid
21from cuda.core._utils.cuda_utils import check_multiprocessing_start_method
23__all__ = ['PinnedMemoryResource', 'PinnedMemoryResourceOptions']
26@dataclass
27cdef class PinnedMemoryResourceOptions:
28 """Customizable :obj:`~_memory.PinnedMemoryResource` options.
30 Attributes
31 ----------
32 ipc_enabled : bool, optional
33 Specifies whether to create an IPC-enabled memory pool. When set to
34 True, the memory pool and its allocations can be shared with other
35 processes. (Default to False)
37 max_size : int, optional
38 Maximum pool size. When set to 0, defaults to a system-dependent value.
39 (Default to 0)
41 numa_id : int or None, optional
42 Host NUMA node ID for pool placement. When set to None (the default),
43 the behavior depends on ``ipc_enabled``:
45 - ``ipc_enabled=False``: OS-managed placement (location type HOST).
46 - ``ipc_enabled=True``: automatically derived from the current CUDA
47 device's ``host_numa_id`` attribute, requiring an active CUDA
48 context.
50 When set to a non-negative integer, that NUMA node is used explicitly
51 regardless of ``ipc_enabled`` (location type HOST_NUMA).
52 """
53 ipc_enabled : bool = False
54 max_size : int = 0
55 numa_id : int | None = None
58cdef class PinnedMemoryResource(_MemPool):
59 """
60 A host-pinned memory resource managing a stream-ordered memory pool.
62 Parameters
63 ----------
64 options : PinnedMemoryResourceOptions
65 Memory resource creation options.
67 If set to `None`, the memory resource uses the driver's current
68 stream-ordered memory pool. If no memory
69 pool is set as current, the driver's default memory pool
70 is used.
72 If not set to `None`, a new memory pool is created, which is owned by
73 the memory resource.
75 When using an existing (current or default) memory pool, the returned
76 host-pinned memory resource does not own the pool (`is_handle_owned` is
77 `False`), and closing the resource has no effect.
79 Notes
80 -----
81 To create an IPC-Enabled memory resource (MR) that is capable of sharing
82 allocations between processes, specify ``ipc_enabled=True`` in the initializer
83 option. When IPC is enabled and ``numa_id`` is not specified, the NUMA node
84 is automatically derived from the current CUDA device's ``host_numa_id``
85 attribute, which requires an active CUDA context. If ``numa_id`` is
86 explicitly set, that value is used regardless of ``ipc_enabled``.
88 See :class:`DeviceMemoryResource` for more details on IPC usage patterns.
89 """
91 def __init__(self, options=None):
92 _PMR_init(self, options) 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
94 def __reduce__(self):
95 return PinnedMemoryResource.from_registry, (self.uuid,)
97 @staticmethod
98 def from_registry(uuid: uuid.UUID) -> PinnedMemoryResource: # no-cython-lint
99 """
100 Obtain a registered mapped memory resource.
102 Raises
103 ------
104 RuntimeError
105 If no mapped memory resource is found in the registry.
106 """
107 return <PinnedMemoryResource>(_ipc.MP_from_registry(uuid))
109 def register(self, uuid: uuid.UUID) -> PinnedMemoryResource: # no-cython-lint
110 """
111 Register a mapped memory resource.
113 Returns
114 -------
115 The registered mapped memory resource. If one was previously registered
116 with the given key, it is returned.
117 """
118 return <PinnedMemoryResource>(_ipc.MP_register(self, uuid))
120 @classmethod
121 def from_allocation_handle(
122 cls, alloc_handle: int | IPCAllocationHandle
123 ) -> PinnedMemoryResource:
124 """Create a host-pinned memory resource from an allocation handle.
126 Construct a new `PinnedMemoryResource` instance that imports a memory
127 pool from a shareable handle. The memory pool is marked as owned.
129 Parameters
130 ----------
131 alloc_handle : int | IPCAllocationHandle
132 The shareable handle of the host-pinned memory resource to import. If an
133 integer is supplied, it must represent a valid platform-specific
134 handle. It is the caller's responsibility to close that handle.
136 Returns
137 -------
138 A new host-pinned memory resource instance with the imported handle.
139 """
140 # cuMemPoolImportFromShareableHandle requires CUDA to be initialized, but in
141 # a child process CUDA may not be initialized yet. For DeviceMemoryResource,
142 # this is not a concern because most likely when retrieving the device_id the
143 # user would have already initialized CUDA. But since PinnedMemoryResource is
144 # not device-specific it is unlikelt the case.
145 HANDLE_RETURN(cydriver.cuInit(0))
147 cdef PinnedMemoryResource mr = <PinnedMemoryResource>(
148 _ipc.MP_from_allocation_handle(cls, alloc_handle))
149 return mr
151 def get_allocation_handle(self) -> IPCAllocationHandle:
152 """Export the memory pool handle to be shared (requires IPC).
154 The handle can be used to share the memory pool with other processes.
155 The handle is cached in this `MemoryResource` and owned by it.
157 Returns
158 -------
159 The shareable handle for the memory pool.
160 """
161 if not self.is_ipc_enabled: 1345678/:9!;#$=%'(?)*+,-.ab
162 raise RuntimeError("Memory resource is not IPC-enabled") 1b
163 return self._ipc_data._alloc_handle 1345678/:9!;#$=%'(?)*+,-.a
165 @property
166 def device_id(self) -> int:
167 """Return -1. Pinned memory is host memory and is not associated with a specific device."""
168 return -1 120Mab
170 @property
171 def numa_id(self) -> int:
172 """The host NUMA node ID used for pool placement, or -1 for OS-managed placement."""
173 return self._numa_id 1abOcN
175 @property
176 def is_device_accessible(self) -> bool:
177 """Return True. This memory resource provides device-accessible buffers."""
178 return True 10Ma
180 @property
181 def is_host_accessible(self) -> bool:
182 """Return True. This memory resource provides host-accessible buffers."""
183 return True 10Ma
186cdef inline _PMR_init(PinnedMemoryResource self, options):
187 from .._device import Device 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
189 cdef PinnedMemoryResourceOptions opts = check_or_create_options( 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
190 PinnedMemoryResourceOptions, options, "PinnedMemoryResource options",
191 keep_none=True
192 )
193 cdef bint ipc_enabled = False 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
194 cdef size_t max_size = 0 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
195 cdef cydriver.CUmemLocationType loc_type
196 cdef int numa_id = -1 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
198 if opts is not None: 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN1
199 ipc_enabled = opts.ipc_enabled 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOcN1
200 if ipc_enabled and not _ipc.is_supported(): 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOcN1
201 raise RuntimeError(f"IPC is not available on {platform.system()}")
202 max_size = opts.max_size 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOcN1
204 if opts.numa_id is not None: 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOcN1
205 numa_id = opts.numa_id 1N1
206 if numa_id < 0: 1N1
207 raise ValueError(f"numa_id must be >= 0, got {numa_id}") 11
208 elif ipc_enabled: 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOc
209 dev = Device() 1defghijklmnopqrstuvwxyzABCDEFGHIJKLac
210 numa_id = dev.properties.host_numa_id 1defghijklmnopqrstuvwxyzABCDEFGHIJKLac
211 if numa_id < 0: 1defghijklmnopqrstuvwxyzABCDEFGHIJKLac
212 raise RuntimeError(
213 "Cannot determine host NUMA ID for IPC-enabled pinned "
214 "memory pool. The system may not support NUMA, or no "
215 "CUDA context is active. Set numa_id explicitly or "
216 "call Device.set_current() first.")
218 if numa_id >= 0: 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN
219 loc_type = cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA 1defghijklmnopqrstuvwxyzABCDEFGHIJKLacN
220 else:
221 loc_type = cydriver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST 12QRSTUVWXYZ0MbO
223 self._numa_id = numa_id 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN
225 if opts is None: 1defghijklmnopqrstuvwxyzABCD2QERFSGTHUIVJWKXLYZ0MabOcN
226 MP_init_current_pool( 120
227 self,
228 loc_type,
229 numa_id,
230 cydriver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED,
231 )
232 else:
233 MP_init_create_pool( 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOcN
234 self,
235 loc_type,
236 numa_id,
237 cydriver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED,
238 ipc_enabled,
239 max_size, 1defghijklmnopqrstuvwxyzABCDQERFSGTHUIVJWKXLYZMabOcN
240 )
243def _deep_reduce_pinned_memory_resource(mr):
244 check_multiprocessing_start_method() 13456789!#$%'()*+,-.
245 alloc_handle = mr.get_allocation_handle() 13456789!#$%'()*+,-.
246 return mr.from_allocation_handle, (alloc_handle,) 13456789!#$%'()*+,-.
249multiprocessing.reduction.register(PinnedMemoryResource, _deep_reduce_pinned_memory_resource)