cuda.core.experimental.VirtualMemoryResourceOptions#

class cuda.core.experimental.VirtualMemoryResourceOptions(
allocation_type: ~typing.Literal['pinned',
'managed'] = 'pinned',
location_type: ~typing.Literal['device',
'host',
'host_numa',
'host_numa_current'] = 'device',
handle_type: ~typing.Literal['posix_fd',
'generic',
'win32',
'win32_kmt',
'fabric'] | None = 'posix_fd',
granularity: ~typing.Literal['minimum',
'recommended'] = 'recommended',
gpu_direct_rdma: bool = False,
addr_hint: int | None = 0,
addr_align: int | None = None,
peers: ~typing.Iterable[int] = <factory>,
self_access: ~typing.Literal['rw',
'r'] | None = 'rw',
peer_access: ~typing.Literal['rw',
'r'] | None = 'rw',
)#
A configuration object for the VirtualMemoryResource

Stores configuration information which tells the resource how to use the CUDA VMM APIs

allocation_type#

Controls the type of allocation.

Type:

VirtualMemoryAllocationTypeT

location_type#

Controls the location of the allocation.

Type:

VirtualMemoryLocationTypeT

handle_type#

Export handle type for the physical allocation. Use "posix_fd" on Linux if you plan to import/export the allocation (required for cuMemRetainAllocationHandle). Use None if you don’t need an exportable handle.

Type:

VirtualMemoryHandleTypeT

gpu_direct_rdma#

Hint that the allocation should be GDR-capable (if supported).

Type:

bool

granularity#

Controls granularity query and size rounding.

Type:

VirtualMemoryGranularityT

addr_hint#

A (optional) virtual address hint to try to reserve at. Setting it to 0 lets the CUDA driver decide.

Type:

int

addr_align#

Alignment for the VA reservation. If None, use the queried granularity.

Type:

int

peers#

Extra device IDs that should be granted access in addition to device.

Type:

Iterable[int]

self_access#

Access flags for the owning device.

Type:

VirtualMemoryAccessTypeT

peer_access#

Access flags for peers.

Type:

VirtualMemoryAccessTypeT