Coverage for cuda / core / system / _device.pyx: 82.84%
134 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-08 01:07 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-08 01:07 +0000
1# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
5from libc.stdint cimport intptr_t, uint64_t
6from libc.math cimport ceil
8from multiprocessing import cpu_count
9from typing import Iterable
11from cuda.bindings import nvml
13from ._nvml_context cimport initialize
16AddressingMode = nvml.DeviceAddressingModeType
17AffinityScope = nvml.AffinityScope
18BrandType = nvml.BrandType
19DeviceArch = nvml.DeviceArch
20GpuP2PCapsIndex = nvml.GpuP2PCapsIndex
21GpuP2PStatus = nvml.GpuP2PStatus
22GpuTopologyLevel = nvml.GpuTopologyLevel
23Pstates = nvml.Pstates
26include "_clock.pxi"
27include "_cooler.pxi"
28include "_device_attributes.pxi"
29include "_device_utils.pxi"
30include "_event.pxi"
31include "_fan.pxi"
32include "_field_values.pxi"
33include "_inforom.pxi"
34include "_memory.pxi"
35include "_pci_info.pxi"
36include "_performance.pxi"
37include "_repair_status.pxi"
38include "_temperature.pxi"
41cdef class Device:
42 """
43 Representation of a device.
45 :class:`cuda.core.system.Device` provides access to various pieces of metadata
46 about devices and their topology, as provided by the NVIDIA Management
47 Library (NVML). To use CUDA with a device, use :class:`cuda.core.Device`.
49 Creating a device instance causes NVML to initialize the target GPU.
50 NVML may initialize additional GPUs if the target GPU is an SLI slave.
52 Parameters
53 ----------
54 index: int, optional
55 Integer representing the CUDA device index to get a handle to. Valid
56 values are between ``0`` and ``cuda.core.system.get_num_devices() - 1``.
58 The order in which devices are enumerated has no guarantees of
59 consistency between reboots. For that reason, it is recommended that
60 devices are looked up by their PCI ids or UUID.
62 uuid: bytes or str, optional
63 UUID of a CUDA device to get a handle to.
65 pci_bus_id: bytes or str, optional
66 PCI bus ID of a CUDA device to get a handle to.
68 Raises
69 ------
70 ValueError
71 If anything other than a single `index`, `uuid` or `pci_bus_id` are specified.
72 """
74 # This is made public for testing purposes only
75 cdef public intptr_t _handle
77 def __init__(
78 self,
79 *,
80 index: int | None = None,
81 uuid: bytes | str | None = None,
82 pci_bus_id: bytes | str | None = None,
83 ):
84 args = [index, uuid, pci_bus_id] 1anbopfjqrstuevwxcyzABkClDEFGHIghiJKdm
85 cdef int arg_count = sum(arg is not None for arg in args) 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKdm
87 if arg_count > 1: 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKdm
88 raise ValueError("Handle requires only one of `index`, `uuid`, or `pci_bus_id`.")
89 if arg_count == 0: 1anbopfjqrstuevwxcyzABkClDEFGHIghiJKdm
90 raise ValueError("Handle requires either a device `index`, `uuid`, or `pci_bus_id`.")
92 initialize() 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKdm
94 if index is not None: 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKdm
95 self._handle = nvml.device_get_handle_by_index_v2(index) 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKd
96 elif uuid is not None: 1acm
97 if isinstance(uuid, bytes): 1m
98 uuid = uuid.decode("ascii")
99 self._handle = nvml.device_get_handle_by_uuid(uuid) 1m
100 elif pci_bus_id is not None: 1ac
101 if isinstance(pci_bus_id, bytes): 1c
102 pci_bus_id = pci_bus_id.decode("ascii")
103 self._handle = nvml.device_get_handle_by_pci_bus_id_v2(pci_bus_id) 1c
105 #########################################################################
106 # BASIC PROPERTIES
108 @property
109 def index(self) -> int:
110 """
111 The NVML index of this device.
113 Valid indices are derived from the count returned by
114 :meth:`Device.get_device_count`. For example, if ``get_device_count()``
115 returns 2, the valid indices are 0 and 1, corresponding to GPU 0 and GPU
116 1.
118 The order in which NVML enumerates devices has no guarantees of
119 consistency between reboots. For that reason, it is recommended that
120 devices be looked up by their PCI ids or GPU UUID.
122 Note: The NVML index may not correlate with other APIs, such as the CUDA
123 device index.
124 """
125 return nvml.device_get_index(self._handle) 1cG
127 @property
128 def uuid(self) -> str:
129 """
130 Retrieves the globally unique immutable UUID associated with this
131 device, as a 5 part hexadecimal string, that augments the immutable,
132 board serial identifier.
134 In the upstream NVML C++ API, the UUID includes a ``gpu-`` or ``mig-``
135 prefix. That is not included in ``cuda.core.system``.
136 """
137 # NVML UUIDs have a `GPU-` or `MIG-` prefix. We remove that here.
139 # TODO: If the user cares about the prefix, we will expose that in the
140 # future using the MIG-related APIs in NVML.
141 return nvml.device_get_uuid(self._handle)[4:] 1Adm
143 @property
144 def pci_bus_id(self) -> str:
145 """
146 Retrieves the PCI bus ID of this device.
147 """
148 return self.pci_info.bus_id
150 @property
151 def numa_node_id(self) -> int:
152 """
153 The NUMA node of the given GPU device.
155 This only applies to platforms where the GPUs are NUMA nodes.
156 """
157 return nvml.device_get_numa_node_id(self._handle) 1I
159 @property
160 def arch(self) -> DeviceArch:
161 """
162 Device architecture.
164 For example, a Tesla V100 will report ``DeviceArchitecture.name ==
165 "VOLTA"``, and RTX A6000 will report ``DeviceArchitecture.name ==
166 "AMPERE"``.
167 """
168 return DeviceArch(nvml.device_get_architecture(self._handle)) 1rB
170 @property
171 def name(self) -> str:
172 """
173 Name of the device, e.g.: `"Tesla V100-SXM2-32GB"`
174 """
175 return nvml.device_get_name(self._handle) 1x
177 @property
178 def brand(self) -> BrandType:
179 """
180 Brand of the device
181 """
182 return BrandType(nvml.device_get_brand(self._handle)) 1u
184 @property
185 def serial(self) -> str:
186 """
187 Retrieves the globally unique board serial number associated with this
188 device's board.
190 For all products with an InfoROM.
191 """
192 return nvml.device_get_serial(self._handle) 1z
194 @property
195 def module_id(self) -> int:
196 """
197 Get a unique identifier for the device module on the baseboard.
199 This API retrieves a unique identifier for each GPU module that exists
200 on a given baseboard. For non-baseboard products, this ID would always
201 be 0.
202 """
203 return nvml.device_get_module_id(self._handle) 1H
205 @property
206 def minor_number(self) -> int:
207 """
208 The minor number of this device.
210 For Linux only.
212 The minor number is used by the Linux device driver to identify the
213 device node in ``/dev/nvidiaX``.
214 """
215 return nvml.device_get_minor_number(self._handle) 1E
217 @property
218 def is_c2c_mode_enabled(self) -> bool:
219 """
220 Whether the C2C (Chip-to-Chip) mode is enabled for this device.
221 """
222 return bool(nvml.device_get_c2c_mode_info_v(self._handle).is_c2c_enabled) 1p
224 @property
225 def persistence_mode_enabled(self) -> bool:
226 """
227 Whether persistence mode is enabled for this device.
229 For Linux only.
230 """
231 return nvml.device_get_persistence_mode(self._handle) == nvml.EnableState.FEATURE_ENABLED 1g
233 @persistence_mode_enabled.setter
234 def persistence_mode_enabled(self, enabled: bool) -> None:
235 nvml.device_set_persistence_mode( 1g
236 self._handle, 1g
237 nvml.EnableState.FEATURE_ENABLED if enabled else nvml.EnableState.FEATURE_DISABLED 1g
238 )
240 @property
241 def cuda_compute_capability(self) -> tuple[int, int]:
242 """
243 CUDA compute capability of the device, e.g.: `(7, 0)` for a Tesla V100.
245 Returns a tuple `(major, minor)`.
246 """
247 return nvml.device_get_cuda_compute_capability(self._handle) 1v
249 def to_cuda_device(self) -> "cuda.core.Device":
250 """
251 Get the corresponding :class:`cuda.core.Device` (which is used for CUDA
252 access) for this :class:`cuda.core.system.Device` (which is used for
253 NVIDIA machine library (NVML) access).
255 The devices are mapped to one another by their UUID.
257 Returns
258 -------
259 cuda.core.Device
260 The corresponding CUDA device.
261 """
262 from cuda.core import Device as CudaDevice 1d
264 # CUDA does not have an API to get a device by its UUID, so we just
265 # search all the devices for one with a matching UUID.
267 for cuda_device in CudaDevice.get_all_devices(): 1d
268 if cuda_device.uuid == self.uuid: 1d
269 return cuda_device 1d
271 raise RuntimeError("No corresponding CUDA device found for this NVML device.")
273 @classmethod
274 def get_device_count(cls) -> int:
275 """
276 Get the number of available devices.
278 Returns
279 -------
280 int
281 The number of available devices.
282 """
283 return nvml.device_get_count_v2() 1MNO
285 @classmethod
286 def get_all_devices(cls) -> Iterable[Device]:
287 """
288 Query the available device instances.
290 Returns
291 -------
292 Iterator of Device
293 An iterator over available devices.
294 """
295 for device_id in range(nvml.device_get_count_v2()): 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKd
296 yield cls(index=device_id) 1nbopfjqrstuevwxcyzABkClDEFGHIghiJKd
298 #########################################################################
299 # ADDRESSING MODE
301 @property
302 def addressing_mode(self) -> AddressingMode:
303 """
304 Get the addressing mode of the device.
306 Addressing modes can be one of:
308 - :attr:`AddressingMode.DEVICE_ADDRESSING_MODE_HMM`: System allocated
309 memory (``malloc``, ``mmap``) is addressable from the device (GPU), via
310 software-based mirroring of the CPU's page tables, on the GPU.
311 - :attr:`AddressingMode.DEVICE_ADDRESSING_MODE_ATS`: System allocated
312 memory (``malloc``, ``mmap``) is addressable from the device (GPU), via
313 Address Translation Services. This means that there is (effectively) a
314 single set of page tables, and the CPU and GPU both use them.
315 - :attr:`AddressingMode.DEVICE_ADDRESSING_MODE_NONE`: Neither HMM nor ATS
316 is active.
317 """
318 return AddressingMode(nvml.device_get_addressing_mode(self._handle).value) 1n
320 #########################################################################
321 # AFFINITY
323 @classmethod
324 def get_all_devices_with_cpu_affinity(cls, cpu_index: int) -> Iterable[Device]:
325 """
326 Retrieve the set of GPUs that have a CPU affinity with the given CPU number.
328 Supported on Linux only.
330 Parameters
331 ----------
332 cpu_index: int
333 The CPU index.
335 Returns
336 -------
337 Iterator of Device
338 An iterator over available devices.
339 """
340 cdef Device device
341 for handle in nvml.system_get_topology_gpu_set(cpu_index): 1L
342 device = Device.__new__(Device) 1L
343 device._handle = handle 1L
344 yield device 1L
346 def get_memory_affinity(self, scope: AffinityScope=AffinityScope.NODE) -> list[int]:
347 """
348 Retrieves a list of indices of NUMA nodes or CPU sockets with the ideal
349 memory affinity for the device.
351 For Kepler™ or newer fully supported devices.
353 Supported on Linux only.
355 If requested scope is not applicable to the target topology, the API
356 will fall back to reporting the memory affinity for the immediate non-I/O
357 ancestor of the device.
358 """
359 return _unpack_bitmask( 1b
360 nvml.device_get_memory_affinity( 1b
361 self._handle, 1b
362 <unsigned int>ceil(cpu_count() / 64), 1b
363 scope 1b
364 )
365 )
367 def get_cpu_affinity(self, scope: AffinityScope=AffinityScope.NODE) -> list[int]:
368 """
369 Retrieves a list of indices of NUMA nodes or CPU sockets with the ideal
370 CPU affinity for the device.
372 For Kepler™ or newer fully supported devices.
374 Supported on Linux only.
376 If requested scope is not applicable to the target topology, the API
377 will fall back to reporting the memory affinity for the immediate non-I/O
378 ancestor of the device.
379 """
380 return _unpack_bitmask( 1beL
381 nvml.device_get_cpu_affinity_within_scope( 1beL
382 self._handle, 1beL
383 <unsigned int>ceil(cpu_count() / 64), 1beL
384 scope, 1beL
385 )
386 )
388 def set_cpu_affinity(self):
389 """
390 Sets the ideal affinity for the calling thread and device.
392 For Kepler™ or newer fully supported devices.
394 Supported on Linux only.
395 """
396 nvml.device_set_cpu_affinity(self._handle)
398 def clear_cpu_affinity(self):
399 """
400 Clear all affinity bindings for the calling thread.
402 For Kepler™ or newer fully supported devices.
404 Supported on Linux only.
405 """
406 nvml.device_clear_cpu_affinity(self._handle)
408 #########################################################################
409 # CLOCK
410 # See external class definitions in _clock.pxi
412 def clock(self, clock_type: ClockType) -> ClockInfo:
413 """
414 Get information about and manage a specific clock on a device.
415 """
416 return ClockInfo(self._handle, clock_type) 1j
418 def get_auto_boosted_clocks_enabled(self) -> tuple[bool, bool]:
419 """
420 Retrieve the current state of auto boosted clocks on a device.
422 For Kepler™ or newer fully supported devices.
424 Auto Boosted clocks are enabled by default on some hardware, allowing
425 the GPU to run at higher clock rates to maximize performance as thermal
426 limits allow.
428 On Pascal™ and newer hardware, Auto Boosted clocks are controlled
429 through application clocks. Use :meth:`set_application_clocks` and
430 :meth:`reset_application_clocks` to control Auto Boost behavior.
432 Returns
433 -------
434 bool
435 The current state of Auto Boosted clocks
436 bool
437 The default Auto Boosted clocks behavior
439 """
440 current, default = nvml.device_get_auto_boosted_clocks_enabled(self._handle) 1o
441 return current == nvml.EnableState.FEATURE_ENABLED, default == nvml.EnableState.FEATURE_ENABLED
443 def get_current_clock_event_reasons(self) -> list[ClocksEventReasons]:
444 """
445 Retrieves the current clocks event reasons.
447 For all fully supported products.
448 """
449 cdef uint64_t[1] reasons
450 reasons[0] = nvml.device_get_current_clocks_event_reasons(self._handle) 1f
451 return [ClocksEventReasons(1 << reason) for reason in _unpack_bitmask(reasons)] 1f
453 def get_supported_clock_event_reasons(self) -> list[ClocksEventReasons]:
454 """
455 Retrieves supported clocks event reasons that can be returned by
456 :meth:`get_current_clock_event_reasons`.
458 For all fully supported products.
460 This method is not supported in virtual machines running virtual GPU (vGPU).
461 """
462 cdef uint64_t[1] reasons
463 reasons[0] = nvml.device_get_supported_clocks_event_reasons(self._handle) 1f
464 return [ClocksEventReasons(1 << reason) for reason in _unpack_bitmask(reasons)] 1f
466 ##########################################################################
467 # COOLER
468 # See external class definitions in _cooler.pxi
470 @property
471 def cooler(self) -> CoolerInfo:
472 """
473 Get information about cooler on a device.
474 """
475 return CoolerInfo(nvml.device_get_cooler_info(self._handle))
477 ##########################################################################
478 # DEVICE ATTRIBUTES
479 # See external class definitions in _device_attributes.pxi
481 @property
482 def attributes(self) -> DeviceAttributes:
483 """
484 Get various device attributes.
486 For Ampere™ or newer fully supported devices. Only available on Linux
487 systems.
488 """
489 return DeviceAttributes(nvml.device_get_attributes_v2(self._handle)) 1s
491 #########################################################################
492 # DISPLAY
494 @property
495 def display_mode(self) -> bool:
496 """
497 The display mode for this device.
499 Indicates whether a physical display (e.g. monitor) is currently connected to
500 any of the device's connectors.
501 """
502 return True if nvml.device_get_display_mode(self._handle) == nvml.EnableState.FEATURE_ENABLED else False 1k
504 @property
505 def display_active(self) -> bool:
506 """
507 The display active status for this device.
509 Indicates whether a display is initialized on the device. For example,
510 whether X Server is attached to this device and has allocated memory for
511 the screen.
513 Display can be active even when no monitor is physically attached.
514 """
515 return True if nvml.device_get_display_active(self._handle) == nvml.EnableState.FEATURE_ENABLED else False 1k
517 ##########################################################################
518 # EVENTS
519 # See external class definitions in _event.pxi
521 def register_events(self, events: EventType | int | list[EventType | int]) -> DeviceEvents:
522 """
523 Starts recording events on this device.
525 For Fermi™ or newer fully supported devices. For Linux only.
527 ECC events are available only on ECC-enabled devices (see
528 :meth:`Device.get_total_ecc_errors`). Power capping events are
529 available only on Power Management enabled devices (see
530 :meth:`Device.get_power_management_mode`).
532 This call starts recording of events on specific device. All events
533 that occurred before this call are not recorded. Wait for events using
534 the :meth:`DeviceEvents.wait` method on the result.
536 Examples
537 --------
538 >>> device = Device(index=0)
539 >>> events = device.register_events([
540 ... EventType.EVENT_TYPE_XID_CRITICAL_ERROR,
541 ... ])
542 >>> while event := events.wait(timeout_ms=10000):
543 ... print(f"Event {event.event_type} occurred on device {event.device.uuid}")
545 Parameters
546 ----------
547 events: EventType, int, or list of EventType or int
548 The event type or list of event types to register for this device.
550 Returns
551 -------
552 :class:`DeviceEvents`
553 An object representing the registered events. Call
554 :meth:`DeviceEvents.wait` on this object to wait for events.
556 Raises
557 ------
558 :class:`cuda.core.system.NotSupportedError`
559 None of the requested event types are registered.
560 """
561 return DeviceEvents(self._handle, events) 1i
563 def get_supported_event_types(self) -> list[EventType]:
564 """
565 Get the list of event types supported by this device.
567 For Fermi™ or newer fully supported devices. For Linux only (returns an
568 empty list on Windows).
570 Returns
571 -------
572 list[EventType]
573 The list of supported event types.
574 """
575 cdef uint64_t[1] bitmask
576 bitmask[0] = nvml.device_get_supported_event_types(self._handle) 1i
577 return [EventType(1 << ev) for ev in _unpack_bitmask(bitmask)] 1i
579 ##########################################################################
580 # FAN
581 # See external class definitions in _fan.pxi
583 def fan(self, fan: int = 0) -> FanInfo:
584 """
585 Get information and manage a specific fan on a device.
586 """
587 if fan < 0 or fan >= self.num_fans:
588 raise ValueError(f"Fan index {fan} is out of range [0, {self.num_fans})")
589 return FanInfo(self._handle, fan)
591 @property
592 def num_fans(self) -> int:
593 """
594 The number of fans on the device.
595 """
596 return nvml.device_get_num_fans(self._handle) 1qC
598 ##########################################################################
599 # FIELD VALUES
600 # See external class definitions in _field_values.pxi
602 def get_field_values(self, field_ids: list[int | tuple[int, int]]) -> FieldValues:
603 """
604 Get multiple field values from the device.
606 Each value specified can raise its own exception. That exception will
607 be raised when attempting to access the corresponding ``value`` from the
608 returned :class:`FieldValues` container.
610 To confirm that there are no exceptions in the entire container, call
611 :meth:`FieldValues.validate`.
613 Parameters
614 ----------
615 field_ids: list of int or tuple of (int, int)
616 List of field IDs to query.
618 Each item may be either a single value from the :class:`FieldId`
619 enum, or a pair of (:class:`FieldId`, scope ID).
621 Returns
622 -------
623 :class:`FieldValues`
624 Container of field values corresponding to the requested field IDs.
625 """
626 return FieldValues(nvml.device_get_field_values(self._handle, field_ids)) 1l
628 def clear_field_values(self, field_ids: list[int | tuple[int, int]]) -> None:
629 """
630 Clear multiple field values from the device.
632 Parameters
633 ----------
634 field_ids: list of int or tuple of (int, int)
635 List of field IDs to clear.
637 Each item may be either a single value from the :class:`FieldId`
638 enum, or a pair of (:class:`FieldId`, scope ID).
639 """
640 nvml.device_clear_field_values(self._handle, field_ids) 1l
642 ##########################################################################
643 # INFOROM
644 # See external class definitions in _inforom.pxi
646 @property
647 def inforom(self) -> InforomInfo:
648 """
649 Accessor for InfoROM information.
651 For all products with an InfoROM.
652 """
653 return InforomInfo(self) 1D
655 ##########################################################################
656 # MEMORY
657 # See external class definitions in _memory.pxi
659 @property
660 def bar1_memory_info(self) -> BAR1MemoryInfo:
661 """
662 Get information about BAR1 memory.
664 BAR1 is used to map the FB (device memory) so that it can be directly
665 accessed by the CPU or by 3rd party devices (peer-to-peer on the PCIE
666 bus).
667 """
668 return BAR1MemoryInfo(nvml.device_get_bar1_memory_info(self._handle)) 1t
670 @property
671 def memory_info(self) -> MemoryInfo:
672 """
673 Object with memory information.
674 """
675 return MemoryInfo(nvml.device_get_memory_info_v2(self._handle)) 1w
677 ##########################################################################
678 # PCI INFO
679 # See external class definitions in _pci_info.pxi
681 @property
682 def pci_info(self) -> PciInfo:
683 """
684 The PCI attributes of this device.
685 """
686 return PciInfo(nvml.device_get_pci_info_ext(self._handle), self._handle) 1cydm
688 ##########################################################################
689 # PERFORMANCE
690 # See external class definitions in _performance.pxi
692 @property
693 def performance_state(self) -> Pstates:
694 """
695 The current performance state of the device.
697 For Fermi™ or newer fully supported devices.
699 See :class:`Pstates` for possible performance states.
700 """
701 return Pstates(nvml.device_get_performance_state(self._handle)) 1jh
703 @property
704 def dynamic_pstates_info(self) -> GpuDynamicPstatesInfo:
705 """
706 Retrieve performance monitor samples from the associated subdevice.
707 """
708 return GpuDynamicPstatesInfo(nvml.device_get_dynamic_pstates_info(self._handle)) 1h
710 def get_supported_pstates(self) -> list[Pstates]:
711 """
712 Get all supported Performance States (P-States) for the device.
714 The returned list contains a contiguous list of valid P-States supported by
715 the device.
716 """
717 return [Pstates(x) for x in nvml.device_get_supported_performance_states(self._handle)] 1h
719 ##########################################################################
720 # REPAIR STATUS
721 # See external class definitions in _repair_status.pxi
723 @property
724 def repair_status(self) -> RepairStatus:
725 """
726 Get the repair status for TPC/Channel repair.
728 For Ampere™ or newer fully supported devices.
729 """
730 return RepairStatus(self._handle) 1J
732 ##########################################################################
733 # TEMPERATURE
734 # See external class definitions in _temperature.pxi
736 @property
737 def temperature(self) -> Temperature:
738 """
739 Get information about temperatures on a device.
740 """
741 return Temperature(self._handle) 1K
743 #######################################################################
744 # TOPOLOGY
746 def get_topology_nearest_gpus(self, level: GpuTopologyLevel) -> Iterable[Device]:
747 """
748 Retrieve the GPUs that are nearest to this device at a specific interconnectivity level.
750 Supported on Linux only.
752 Parameters
753 ----------
754 level: :class:`GpuTopologyLevel`
755 The topology level.
757 Returns
758 -------
759 Iterable of :class:`Device`
760 The nearest devices at the given topology level.
761 """
762 cdef Device device
763 for handle in nvml.device_get_topology_nearest_gpus(self._handle, level): 1F
764 device = Device.__new__(Device)
765 device._handle = handle
766 yield device
769def get_topology_common_ancestor(device1: Device, device2: Device) -> GpuTopologyLevel:
770 """
771 Retrieve the common ancestor for two devices.
773 For Linux only.
775 Parameters
776 ----------
777 device1: :class:`Device`
778 The first device.
779 device2: :class:`Device`
780 The second device.
782 Returns
783 -------
784 :class:`GpuTopologyLevel`
785 The common ancestor level of the two devices.
786 """
787 return GpuTopologyLevel(
788 nvml.device_get_topology_common_ancestor(
789 device1._handle,
790 device2._handle,
791 )
792 )
795def get_p2p_status(device1: Device, device2: Device, index: GpuP2PCapsIndex) -> GpuP2PStatus:
796 """
797 Retrieve the P2P status between two devices.
799 Parameters
800 ----------
801 device1: :class:`Device`
802 The first device.
803 device2: :class:`Device`
804 The second device.
805 index: :class:`GpuP2PCapsIndex`
806 The P2P capability index being looked for between ``device1`` and ``device2``.
808 Returns
809 -------
810 :class:`GpuP2PStatus`
811 The P2P status between the two devices.
812 """
813 return GpuP2PStatus(
814 nvml.device_get_p2p_status(
815 device1._handle,
816 device2._handle,
817 index,
818 )
819 )
822__all__ = [
823 "AddressingMode",
824 "AffinityScope",
825 "BAR1MemoryInfo",
826 "BrandType",
827 "ClockId",
828 "ClockInfo",
829 "ClockOffsets",
830 "ClocksEventReasons",
831 "ClockType",
832 "CoolerControl",
833 "CoolerInfo",
834 "CoolerTarget",
835 "Device",
836 "DeviceArch",
837 "DeviceAttributes",
838 "DeviceEvents",
839 "EventData",
840 "EventType",
841 "FanControlPolicy",
842 "FanInfo",
843 "FieldId",
844 "FieldValue",
845 "FieldValues",
846 "get_p2p_status",
847 "get_topology_common_ancestor",
848 "GpuDynamicPstatesInfo",
849 "GpuDynamicPstatesUtilization",
850 "GpuP2PCapsIndex",
851 "GpuP2PStatus",
852 "GpuTopologyLevel",
853 "InforomInfo",
854 "InforomObject",
855 "MemoryInfo",
856 "PcieUtilCounter",
857 "PciInfo",
858 "Pstates",
859 "RepairStatus",
860 "Temperature",
861 "TemperatureSensors",
862 "TemperatureThresholds",
863 "ThermalController",
864 "ThermalSensor",
865 "ThermalSettings",
866 "ThermalTarget",
867]