John6666 commited on
Commit
e6c1c3c
·
verified ·
1 Parent(s): 5509121

Upload mod.py

Browse files
Files changed (1) hide show
  1. mod.py +3 -7
mod.py CHANGED
@@ -14,13 +14,9 @@ from modutils import download_things
14
 
15
 
16
  IS_ZERO = True if os.getenv("SPACES_ZERO_GPU", None) else False
17
- if torch.cuda.is_available():
18
- torch.backends.cudnn.deterministic = True
19
- torch.backends.cudnn.benchmark = False
20
- torch.backends.cuda.matmul.allow_tf32 = True
21
- if IS_ZERO:
22
- subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
23
- torch.set_float32_matmul_precision("high") # https://pytorch.org/blog/accelerating-generative-ai-3/
24
 
25
 
26
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
14
 
15
 
16
  IS_ZERO = True if os.getenv("SPACES_ZERO_GPU", None) else False
17
+ if IS_ZERO:
18
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
19
+ torch.set_float32_matmul_precision("high") # https://pytorch.org/blog/accelerating-generative-ai-3/
 
 
 
 
20
 
21
 
22
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)