Coverage for cuda / core / _memory / _legacy.py: 65.00%

60 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-03-25 01:07 +0000

1# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 

2# 

3# SPDX-License-Identifier: Apache-2.0 

4 

5from __future__ import annotations 

6 

7from typing import TYPE_CHECKING 

8 

9if TYPE_CHECKING: 

10 from cuda.core._memory._buffer import DevicePointerT 

11 

12from cuda.core._memory._buffer import Buffer, MemoryResource 

13from cuda.core._utils.cuda_utils import ( 

14 _check_driver_error as raise_if_driver_error, 

15) 

16from cuda.core._utils.cuda_utils import ( 

17 driver, 

18) 

19 

20__all__ = ["LegacyPinnedMemoryResource", "_SynchronousMemoryResource"] 

21 

22 

23class LegacyPinnedMemoryResource(MemoryResource): 

24 """Create a pinned memory resource that uses legacy cuMemAllocHost/cudaMallocHost 

25 APIs. 

26 """ 

27 

28 # TODO: support creating this MR with flags that are later passed to cuMemHostAlloc? 

29 

30 def allocate(self, size, stream=None) -> Buffer: 

31 """Allocate a buffer of the requested size. 

32 

33 Parameters 

34 ---------- 

35 size : int 

36 The size of the buffer to allocate, in bytes. 

37 stream : Stream, optional 

38 Currently ignored 

39 

40 Returns 

41 ------- 

42 Buffer 

43 The allocated buffer object, which is accessible on both host and device. 

44 """ 

45 if stream is None: 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

46 from cuda.core._stream import default_stream 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

47 

48 stream = default_stream() 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

49 if size: 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

50 err, ptr = driver.cuMemAllocHost(size) 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

51 raise_if_driver_error(err) 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

52 else: 

53 ptr = 0 

54 return Buffer._init(ptr, size, self) 1bcdefghijklmnopqrstuvwxyzAB7856CDEFGHIJKLMNOPQRSTUVWXYZ01234

55 

56 def deallocate(self, ptr: DevicePointerT, size, stream): 

57 """Deallocate a buffer previously allocated by this resource. 

58 

59 Parameters 

60 ---------- 

61 ptr : :obj:`~_memory.DevicePointerT` 

62 The pointer or handle to the buffer to deallocate. 

63 size : int 

64 The size of the buffer to deallocate, in bytes. 

65 stream : Stream 

66 The stream on which to perform the deallocation synchronously. 

67 """ 

68 if stream is not None: 1bcdefghijklmnopqrstuvwxyzAB56CDEFGHIJKLMNOPQRSTUVWXYZ01234

69 stream.sync() 

70 

71 if size: 1bcdefghijklmnopqrstuvwxyzAB56CDEFGHIJKLMNOPQRSTUVWXYZ01234

72 (err,) = driver.cuMemFreeHost(ptr) 1bcdefghijklmnopqrstuvwxyzAB56CDEFGHIJKLMNOPQRSTUVWXYZ01234

73 raise_if_driver_error(err) 1bcdefghijklmnopqrstuvwxyzAB56CDEFGHIJKLMNOPQRSTUVWXYZ01234

74 

75 @property 

76 def is_device_accessible(self) -> bool: 

77 """bool: this memory resource provides device-accessible buffers.""" 

78 return True 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234

79 

80 @property 

81 def is_host_accessible(self) -> bool: 

82 """bool: this memory resource provides host-accessible buffers.""" 

83 return True 1bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234

84 

85 @property 

86 def device_id(self) -> int: 

87 """This memory resource is not bound to any GPU.""" 

88 raise RuntimeError("a pinned memory resource is not bound to any GPU") 

89 

90 

91class _SynchronousMemoryResource(MemoryResource): 

92 __slots__ = ("_device_id",) 

93 

94 def __init__(self, device_id): 

95 from .._device import Device 

96 

97 self._device_id = Device(device_id).device_id 

98 

99 def allocate(self, size, stream=None) -> Buffer: 

100 if stream is None: 

101 from cuda.core._stream import default_stream 

102 

103 stream = default_stream() 

104 if size: 

105 err, ptr = driver.cuMemAlloc(size) 

106 raise_if_driver_error(err) 

107 else: 

108 ptr = 0 

109 return Buffer._init(ptr, size, self) 

110 

111 def deallocate(self, ptr, size, stream): 

112 if stream is not None: 

113 stream.sync() 

114 if size: 

115 (err,) = driver.cuMemFree(ptr) 

116 raise_if_driver_error(err) 

117 

118 @property 

119 def is_device_accessible(self) -> bool: 

120 return True 

121 

122 @property 

123 def is_host_accessible(self) -> bool: 

124 return False 

125 

126 @property 

127 def device_id(self) -> int: 

128 return self._device_id