auto-diffuser-config / hardware_detector.py
chansung's picture
Upload folder using huggingface_hub
80a1334 verified
raw
history blame
4.82 kB
import platform
import subprocess
import os
from typing import Dict, Optional
class HardwareDetector:
def __init__(self):
self.specs = self._detect_system_specs()
def _detect_system_specs(self) -> Dict:
"""Detect system hardware specifications automatically."""
specs = {
'platform': platform.system(),
'architecture': platform.machine(),
'cpu_count': os.cpu_count(),
'python_version': platform.python_version(),
'gpu_info': self._detect_gpu(),
'cuda_available': False,
'mps_available': False
}
# Check for PyTorch and device availability
try:
import torch
specs['torch_version'] = torch.__version__
specs['cuda_available'] = torch.cuda.is_available()
specs['mps_available'] = torch.backends.mps.is_available()
if specs['cuda_available']:
specs['cuda_device_count'] = torch.cuda.device_count()
specs['cuda_device_name'] = torch.cuda.get_device_name(0)
specs['cuda_memory'] = torch.cuda.get_device_properties(0).total_memory // (1024**3)
except ImportError:
specs['torch_version'] = 'Not installed'
return specs
def _detect_gpu(self) -> Optional[Dict]:
"""Attempt to detect GPU information using nvidia-smi."""
try:
result = subprocess.run([
'nvidia-smi',
'--query-gpu=name,memory.total',
'--format=csv,noheader,nounits'
], capture_output=True, text=True, check=True)
lines = result.stdout.strip().split('\n')
gpus = []
for line in lines:
if line.strip():
name, memory = line.split(', ')
gpus.append({'name': name.strip(), 'memory_mb': int(memory)})
return gpus
except (subprocess.CalledProcessError, FileNotFoundError):
return None
def get_manual_input(self) -> Dict:
"""Get hardware specifications via manual user input."""
print("Enter your hardware specifications manually:")
gpu_name = input("GPU Name (e.g., RTX 4090, A100, leave empty if none): ").strip()
if gpu_name:
try:
vram_gb = int(input("VRAM in GB (e.g., 24): "))
gpu_info = [{'name': gpu_name, 'memory_mb': vram_gb * 1024}]
except ValueError:
gpu_info = None
else:
gpu_info = None
try:
ram_gb = int(input("System RAM in GB (e.g., 32): "))
except ValueError:
ram_gb = 16 # Default
specs = self.specs.copy()
specs['gpu_info'] = gpu_info
specs['ram_gb'] = ram_gb
specs['manual_input'] = True
return specs
def get_optimization_profile(self) -> str:
"""Determine the best optimization profile based on hardware."""
if self.specs['cuda_available']:
if self.specs.get('cuda_memory', 0) >= 20:
return 'high_end_gpu'
elif self.specs.get('cuda_memory', 0) >= 8:
return 'mid_range_gpu'
else:
return 'low_vram_gpu'
elif self.specs['mps_available']:
return 'apple_silicon'
else:
return 'cpu_only'
def print_specs(self):
"""Print detected hardware specifications."""
print(f"Platform: {self.specs['platform']} ({self.specs['architecture']})")
print(f"CPU Cores: {self.specs['cpu_count']}")
print(f"Python: {self.specs['python_version']}")
print(f"PyTorch: {self.specs.get('torch_version', 'Not detected')}")
print(f"CUDA Available: {self.specs['cuda_available']}")
print(f"MPS Available: {self.specs['mps_available']}")
if self.specs['gpu_info']:
print("GPU Information:")
for i, gpu in enumerate(self.specs['gpu_info']):
vram_gb = gpu['memory_mb'] / 1024
print(f" GPU {i}: {gpu['name']} ({vram_gb:.1f} GB VRAM)")
else:
print("No GPU detected")
if __name__ == "__main__":
detector = HardwareDetector()
print("=== Auto-detected Hardware ===")
detector.print_specs()
choice = input("\nUse auto-detected specs? (y/n): ").lower()
if choice != 'y':
specs = detector.get_manual_input()
detector.specs = specs
print("\n=== Final Hardware Specs ===")
detector.print_specs()
print(f"\nRecommended optimization profile: {detector.get_optimization_profile()}")