fxmarty commited on
Commit
90c1496
·
1 Parent(s): 5727bea

Adding regression benchmark for the transformers SHA 4692d2619433f1eb064f3da4f3573f060a115eac

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0306,32.7,5.81,34.4
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 12:56:58,867][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 12:56:58,868][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 12:56:59,157][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 12:56:59,158][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 12:56:59,158][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 12:56:59,299][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 12:56:59,314][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 12:56:59,316][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 12:58:05,921][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 12:58:05,922][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 12:58:13,730][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 12:58:15,019][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-11 12:58:15,019][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-11 12:58:15,019][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 12:58:15,327][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 12:58:35,694][inference][INFO] - + Forward pass latency: 3.06e-02 (s)
17
+ [2023-08-11 12:58:35,695][inference][INFO] - + Forward pass throughput: 32.70 (samples/s)
18
+ [2023-08-11 12:58:35,696][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 12:58:42,206][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 12:59:05,460][inference][INFO] - + Generation pass latency: 5.81e+00 (s)
21
+ [2023-08-11 12:59:05,462][inference][INFO] - + Generation pass throughput: 34.40 (tokens/s)
22
+ [2023-08-11 12:59:05,462][inference][INFO] - Saving inference results
23
+ [2023-08-11 12:59:05,472][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0642,15.6,5.57,35.9
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 12:59:05,937][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 12:59:05,939][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 12:59:06,123][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 12:59:06,123][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 12:59:06,123][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 12:59:06,224][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 12:59:06,251][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 12:59:06,252][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 12:59:22,883][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 12:59:22,885][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 12:59:30,668][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 12:59:30,744][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-11 12:59:30,744][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-11 12:59:30,745][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 12:59:32,868][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:00:39,173][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
17
+ [2023-08-11 13:00:39,174][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-11 13:00:39,175][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:00:44,757][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:01:07,025][inference][INFO] - + Generation pass latency: 5.57e+00 (s)
21
+ [2023-08-11 13:01:07,027][inference][INFO] - + Generation pass throughput: 35.90 (tokens/s)
22
+ [2023-08-11 13:01:07,027][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:01:07,034][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0307,65.1,6.02,66.4
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:01:07,529][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:01:07,530][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:01:07,725][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:01:07,726][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:01:07,726][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:01:07,830][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:01:07,856][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:01:07,857][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:01:18,347][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:01:18,349][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:01:26,097][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:01:26,136][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-11 13:01:26,136][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-11 13:01:26,137][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:01:26,627][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:01:57,904][inference][INFO] - + Forward pass latency: 3.07e-02 (s)
17
+ [2023-08-11 13:01:57,905][inference][INFO] - + Forward pass throughput: 65.10 (samples/s)
18
+ [2023-08-11 13:01:57,905][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:02:04,703][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:02:28,772][inference][INFO] - + Generation pass latency: 6.02e+00 (s)
21
+ [2023-08-11 13:02:28,774][inference][INFO] - + Generation pass throughput: 66.40 (tokens/s)
22
+ [2023-08-11 13:02:28,774][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:02:28,780][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.109,18.3,7.04,56.8
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:02:29,260][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:02:29,260][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:02:29,450][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:02:29,451][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:02:29,451][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:02:29,554][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:02:29,580][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:02:29,581][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:02:46,435][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:02:46,437][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:02:54,279][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:02:54,402][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-11 13:02:54,403][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-11 13:02:54,403][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:02:58,235][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:04:08,880][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-11 13:04:08,881][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-11 13:04:08,882][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:04:15,933][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:04:37,065][inference][INFO] - + Generation pass latency: 7.04e+00 (s)
21
+ [2023-08-11 13:04:37,068][inference][INFO] - + Generation pass throughput: 56.80 (tokens/s)
22
+ [2023-08-11 13:04:37,068][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:04:37,076][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17000.431615999998,0.031,129.0,6.04,132.0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:04:37,574][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:04:37,575][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:04:37,776][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:04:37,776][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:04:37,776][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:04:37,879][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:04:37,905][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:04:37,906][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:04:48,445][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:04:48,447][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:04:56,177][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:04:56,222][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
+ [2023-08-11 13:04:56,222][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
+ [2023-08-11 13:04:56,222][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:04:56,965][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:05:45,589][inference][INFO] - + Forward pass latency: 3.10e-02 (s)
17
+ [2023-08-11 13:05:45,590][inference][INFO] - + Forward pass throughput: 129.00 (samples/s)
18
+ [2023-08-11 13:05:45,590][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:05:52,632][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:06:16,811][inference][INFO] - + Generation pass latency: 6.04e+00 (s)
21
+ [2023-08-11 13:06:16,813][inference][INFO] - + Generation pass throughput: 132.00 (tokens/s)
22
+ [2023-08-11 13:06:16,813][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:06:16,823][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.67,104.0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:06:17,289][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:06:17,291][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:06:17,653][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:06:17,654][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:06:17,654][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:06:17,757][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:06:17,782][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:06:17,783][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:06:34,730][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:06:34,731][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:06:42,565][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:06:42,779][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-11 13:06:42,779][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-11 13:06:42,784][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:06:49,719][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:08:04,382][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-11 13:08:04,384][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-11 13:08:04,385][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:08:12,128][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:08:35,152][inference][INFO] - + Generation pass latency: 7.67e+00 (s)
21
+ [2023-08-11 13:08:35,154][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-11 13:08:35,155][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:08:35,162][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0975,164.0,6.26,511.0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:08:35,716][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:08:35,717][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:08:35,904][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:08:35,904][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:08:35,904][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:08:36,006][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:08:36,032][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:08:36,033][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:08:46,671][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:08:46,672][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:08:54,366][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:08:54,478][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-11 13:08:54,478][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-11 13:08:54,479][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:08:57,089][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:09:51,276][inference][INFO] - + Forward pass latency: 9.75e-02 (s)
17
+ [2023-08-11 13:09:51,277][inference][INFO] - + Forward pass throughput: 164.00 (samples/s)
18
+ [2023-08-11 13:09:51,277][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:09:57,749][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:10:22,785][inference][INFO] - + Generation pass latency: 6.26e+00 (s)
21
+ [2023-08-11 13:10:22,786][inference][INFO] - + Generation pass throughput: 511.00 (tokens/s)
22
+ [2023-08-11 13:10:22,786][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:10:22,793][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.684,23.4,13.0,246.0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:10:23,373][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:10:23,375][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:10:23,572][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:10:23,572][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:10:23,573][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:10:23,671][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:10:23,697][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:10:23,698][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:10:40,806][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:10:40,808][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:10:48,665][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:10:49,370][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-11 13:10:49,370][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-11 13:10:49,386][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:11:14,764][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:12:32,757][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
+ [2023-08-11 13:12:32,758][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-11 13:12:32,758][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:12:46,389][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:13:12,294][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-11 13:13:12,297][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-11 13:13:12,297][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:13:12,304][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,460.087296,0.00317,315.0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:13:17,158][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:13:17,159][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:13:17,341][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-11 13:13:17,341][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:13:17,341][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:13:17,341][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:13:17,343][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:13:17,343][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 13:13:17,936][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:13:17,937][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:13:18,056][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-11 13:13:18,058][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-11 13:13:18,106][inference][INFO] - + Forward pass peak memory: 460.087296 (MB)
14
+ [2023-08-11 13:13:18,107][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-11 13:13:18,108][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-11 13:13:18,140][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-11 13:13:28,252][inference][INFO] - + Forward pass latency: 3.17e-03 (s)
18
+ [2023-08-11 13:13:28,254][inference][INFO] - + Forward pass throughput: 315.00 (samples/s)
19
+ [2023-08-11 13:13:28,254][inference][INFO] - Saving inference results
20
+ [2023-08-11 13:13:28,267][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.88428799999997,0.00311,322.0,0.515,194.0
raw_results/2023-08-11_12:16:01_4692d2619433f1eb064f3da4f3573f060a115eac/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:13:32,687][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:13:32,688][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:13:32,876][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-11 13:13:32,877][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:13:32,877][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:13:32,877][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:13:32,879][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:13:32,879][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 13:13:33,528][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:13:33,529][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:13:33,740][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:13:33,790][inference][INFO] - + Forward pass peak memory: 463.88428799999997 (MB)
13
+ [2023-08-11 13:13:33,791][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-11 13:13:33,824][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-11 13:13:43,935][inference][INFO] - + Forward pass latency: 3.11e-03 (s)
16
+ [2023-08-11 13:13:43,937][inference][INFO] - + Forward pass throughput: 322.00 (samples/s)
17
+ [2023-08-11 13:13:43,938][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-11 13:13:44,435][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-11 13:13:54,746][inference][INFO] - + Generation pass latency: 5.15e-01 (s)
20
+ [2023-08-11 13:13:54,746][inference][INFO] - + Generation pass throughput: 194.00 (tokens/s)
21
+ [2023-08-11 13:13:54,747][inference][INFO] - Saving inference results
22
+ [2023-08-11 13:13:54,760][backend][INFO] - Cleaning backend