Skip to content

Instantly share code, notes, and snippets.

@promto-c
Last active January 9, 2026 21:19
Show Gist options
  • Select an option

  • Save promto-c/49036ab5f3ab5f7ebf2b2253859e16c4 to your computer and use it in GitHub Desktop.

Select an option

Save promto-c/49036ab5f3ab5f7ebf2b2253859e16c4 to your computer and use it in GitHub Desktop.
Minimal cross-platform PyTorch device info.
import torch
print("PyTorch version:", torch.__version__)
# CUDA / ROCm (both use torch.cuda API in PyTorch)
# ------------------------------------------------
has_cuda_api = torch.cuda.is_available()
cuda_version = torch.version.cuda
rocm_version = torch.version.hip # None if not ROCm build
print("\n[GPU Backend]")
print("torch.cuda.is_available():", has_cuda_api)
print("CUDA version (torch.version.cuda):", cuda_version)
print("ROCm version (torch.version.hip):", rocm_version)
if cuda_version is not None:
gpu_backend = "cuda" # NVIDIA
elif rocm_version is not None:
gpu_backend = "rocm" # AMD
else:
gpu_backend = None
print("Detected GPU backend:", gpu_backend or "None")
device_count = torch.cuda.device_count() if has_cuda_api else 0
print("GPU device count:", device_count)
for i in range(device_count):
name = torch.cuda.get_device_name(i)
# mem_get_info may fail on some ROCm / older drivers
try:
free, total = torch.cuda.mem_get_info(i)
mem_info = f"{free/1e9:.1f} / {total/1e9:.1f} GB"
except Exception:
mem_info = "N/A"
print(f"GPU {i}: {name} | Memory: {mem_info}")
# Apple MPS
# ---------
has_mps = hasattr(torch, "mps") and torch.mps.is_available()
print("\n[MPS]")
print("MPS available:", has_mps)
# Intel XPU
# ---------
has_xpu = hasattr(torch, "xpu") and torch.xpu.is_available()
print("\n[XPU]")
print("XPU available:", has_xpu)
# Optional Backends (require extra packages)
# ------------------------------------------
print("\n[Optional Backends]")
# TPU (XLA)
try:
import torch_xla.core.xla_model as xm # noqa: F401
has_xla = True
except Exception:
has_xla = False
print("TPU / XLA available:", has_xla)
# Habana Gaudi (HPU)
try:
has_hpu = hasattr(torch, "hpu") and torch.hpu.is_available()
except Exception:
has_hpu = False
print("Habana HPU available:", has_hpu)
# Huawei Ascend (NPU)
try:
import torch_npu # noqa: F401
has_npu = torch.npu.is_available()
except Exception:
has_npu = False
print("Ascend NPU available:", has_npu)
# Suggested default device
# ------------------------
if has_cuda_api:
device = "cuda" # works for both CUDA + ROCm
elif has_hpu:
device = "hpu"
elif has_npu:
device = "npu"
elif has_xla:
device = "xla"
elif has_xpu:
device = "xpu"
elif has_mps:
device = "mps"
else:
device = "cpu"
print("\n[Suggested device]")
print("torch.device =", device)
# Extra clarity for logs / bug reports
if gpu_backend == "cuda":
print("Backend detail: NVIDIA CUDA")
elif gpu_backend == "rocm":
print("Backend detail: AMD ROCm (HIP)")
elif device == "mps":
print("Backend detail: Apple Metal (MPS)")
elif device == "xpu":
print("Backend detail: Intel XPU")
elif device == "hpu":
print("Backend detail: Habana Gaudi (HPU)")
elif device == "npu":
print("Backend detail: Huawei Ascend (NPU)")
elif device == "xla":
print("Backend detail: Google TPU (XLA)")
else:
print("Backend detail: CPU only")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment