fxmarty commited on
Commit
711d0ae
·
1 Parent(s): e81a807

Adding regression benchmark for the transformers SHA a7da2996a00c0ea083012ac86ab70f0bc4799f33

Browse files
Files changed (20) hide show
  1. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/inference_results.csv +1 -1
  2. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/main.log +23 -23
  3. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/inference_results.csv +1 -1
  4. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/main.log +23 -23
  5. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/inference_results.csv +1 -1
  6. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/main.log +23 -23
  7. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/inference_results.csv +1 -1
  8. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/main.log +23 -23
  9. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/inference_results.csv +1 -1
  10. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/main.log +23 -23
  11. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/inference_results.csv +1 -1
  12. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/main.log +23 -23
  13. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/inference_results.csv +1 -1
  14. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/main.log +23 -23
  15. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/inference_results.csv +1 -1
  16. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/main.log +23 -23
  17. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/inference_results.csv +1 -1
  18. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/main.log +20 -40
  19. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/inference_results.csv +1 -1
  20. raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/main.log +22 -44
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,26952.466431999997,0.0307,32.6,6.71,29.8
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.031,32.3,5.9,33.9
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 16:49:25,131][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 16:49:25,131][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 16:49:25,422][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 16:49:25,422][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 16:49:25,422][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 16:49:25,624][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 16:49:25,637][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 16:49:25,638][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 16:50:32,978][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 16:50:32,980][inference][INFO] - Running inference benchmark
11
- [2023-08-10 16:50:41,073][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 16:50:42,784][memory_tracker][INFO] - Peak memory usage: 26952.466431999997 MB
13
- [2023-08-10 16:50:42,784][inference][INFO] - + Forward pass peak memory: 26952.466431999997 (MB)
14
- [2023-08-10 16:50:42,785][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 16:50:43,275][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 16:51:16,016][inference][INFO] - + Forward pass latency: 3.07e-02 (s)
17
- [2023-08-10 16:51:16,017][inference][INFO] - + Forward pass throughput: 32.60 (samples/s)
18
- [2023-08-10 16:51:16,018][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 16:51:23,392][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 16:51:43,515][inference][INFO] - + Generation pass latency: 6.71e+00 (s)
21
- [2023-08-10 16:51:43,518][inference][INFO] - + Generation pass throughput: 29.80 (tokens/s)
22
- [2023-08-10 16:51:43,519][inference][INFO] - Saving inference results
23
- [2023-08-10 16:51:43,529][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:44:42,173][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:44:42,174][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:44:42,471][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:44:42,471][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:44:42,472][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:44:42,614][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:44:42,629][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:44:42,630][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 20:45:49,226][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:45:49,228][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:45:57,832][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:45:59,095][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-10 20:45:59,096][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-10 20:45:59,096][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:45:59,407][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:46:19,759][inference][INFO] - + Forward pass latency: 3.10e-02 (s)
17
+ [2023-08-10 20:46:19,760][inference][INFO] - + Forward pass throughput: 32.30 (samples/s)
18
+ [2023-08-10 20:46:19,760][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:46:26,363][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:46:49,975][inference][INFO] - + Generation pass latency: 5.90e+00 (s)
21
+ [2023-08-10 20:46:49,978][inference][INFO] - + Generation pass throughput: 33.90 (tokens/s)
22
+ [2023-08-10 20:46:49,978][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:46:49,988][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,51642.236928,0.0779,12.8,8.32,24.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0642,15.6,5.64,35.5
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 16:51:44,013][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 16:51:44,014][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 16:51:44,214][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 16:51:44,215][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 16:51:44,215][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 16:51:44,354][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 16:51:44,378][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 16:51:44,378][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 16:52:01,188][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 16:52:01,189][inference][INFO] - Running inference benchmark
11
- [2023-08-10 16:52:08,963][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 16:52:09,073][memory_tracker][INFO] - Peak memory usage: 51642.236928 MB
13
- [2023-08-10 16:52:09,073][inference][INFO] - + Forward pass peak memory: 51642.236928 (MB)
14
- [2023-08-10 16:52:09,073][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 16:52:12,013][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 16:53:18,379][inference][INFO] - + Forward pass latency: 7.79e-02 (s)
17
- [2023-08-10 16:53:18,380][inference][INFO] - + Forward pass throughput: 12.80 (samples/s)
18
- [2023-08-10 16:53:18,381][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 16:53:26,674][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 16:53:51,642][inference][INFO] - + Generation pass latency: 8.32e+00 (s)
21
- [2023-08-10 16:53:51,644][inference][INFO] - + Generation pass throughput: 24.00 (tokens/s)
22
- [2023-08-10 16:53:51,645][inference][INFO] - Saving inference results
23
- [2023-08-10 16:53:51,651][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:46:50,459][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:46:50,460][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:46:50,651][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:46:50,652][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:46:50,652][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:46:50,755][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:46:50,795][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:46:50,796][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 20:47:08,129][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:47:08,131][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:47:16,623][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:47:16,699][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-10 20:47:16,699][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-10 20:47:16,700][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:47:18,829][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:48:25,144][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
17
+ [2023-08-10 20:48:25,144][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-10 20:48:25,145][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:48:30,804][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:48:53,378][inference][INFO] - + Generation pass latency: 5.64e+00 (s)
21
+ [2023-08-10 20:48:53,380][inference][INFO] - + Generation pass throughput: 35.50 (tokens/s)
22
+ [2023-08-10 20:48:53,380][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:48:53,387][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,35592.732672,0.0369,54.2,7.07,56.6
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0313,63.9,6.15,65.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 16:53:52,136][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 16:53:52,137][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 16:53:52,358][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 16:53:52,358][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 16:53:52,358][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 16:53:52,505][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 16:53:52,527][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 16:53:52,528][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 16:54:03,235][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 16:54:03,236][inference][INFO] - Running inference benchmark
11
- [2023-08-10 16:54:10,992][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 16:54:11,033][memory_tracker][INFO] - Peak memory usage: 35592.732672 MB
13
- [2023-08-10 16:54:11,033][inference][INFO] - + Forward pass peak memory: 35592.732672 (MB)
14
- [2023-08-10 16:54:11,034][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 16:54:11,492][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 16:54:53,362][inference][INFO] - + Forward pass latency: 3.69e-02 (s)
17
- [2023-08-10 16:54:53,363][inference][INFO] - + Forward pass throughput: 54.20 (samples/s)
18
- [2023-08-10 16:54:53,363][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 16:55:01,099][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 16:55:22,320][inference][INFO] - + Generation pass latency: 7.07e+00 (s)
21
- [2023-08-10 16:55:22,323][inference][INFO] - + Generation pass throughput: 56.60 (tokens/s)
22
- [2023-08-10 16:55:22,323][inference][INFO] - Saving inference results
23
- [2023-08-10 16:55:22,329][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:48:53,873][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:48:53,875][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:48:54,098][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:48:54,098][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:48:54,099][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:48:54,201][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:48:54,237][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:48:54,238][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 20:49:05,505][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:49:05,507][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:49:13,939][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:49:13,979][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-10 20:49:13,979][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-10 20:49:13,979][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:49:14,457][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:49:45,157][inference][INFO] - + Forward pass latency: 3.13e-02 (s)
17
+ [2023-08-10 20:49:45,158][inference][INFO] - + Forward pass throughput: 63.90 (samples/s)
18
+ [2023-08-10 20:49:45,158][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:49:52,099][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:50:16,689][inference][INFO] - + Generation pass latency: 6.15e+00 (s)
21
+ [2023-08-10 20:50:16,690][inference][INFO] - + Generation pass throughput: 65.00 (tokens/s)
22
+ [2023-08-10 20:50:16,691][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:50:16,698][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,49989.681152,0.176,11.4,11.7,34.2
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.109,18.3,7.04,56.8
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 16:55:22,829][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 16:55:22,830][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 16:55:23,088][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 16:55:23,088][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 16:55:23,088][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 16:55:23,234][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 16:55:23,255][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 16:55:23,256][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 16:55:39,923][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 16:55:39,924][inference][INFO] - Running inference benchmark
11
- [2023-08-10 16:55:47,796][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 16:55:48,047][memory_tracker][INFO] - Peak memory usage: 49989.681152 MB
13
- [2023-08-10 16:55:48,047][inference][INFO] - + Forward pass peak memory: 49989.681152 (MB)
14
- [2023-08-10 16:55:48,047][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 16:55:53,759][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 16:57:05,288][inference][INFO] - + Forward pass latency: 1.76e-01 (s)
17
- [2023-08-10 16:57:05,288][inference][INFO] - + Forward pass throughput: 11.40 (samples/s)
18
- [2023-08-10 16:57:05,289][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 16:57:16,516][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 16:57:39,881][inference][INFO] - + Generation pass latency: 1.17e+01 (s)
21
- [2023-08-10 16:57:39,885][inference][INFO] - + Generation pass throughput: 34.20 (tokens/s)
22
- [2023-08-10 16:57:39,885][inference][INFO] - Saving inference results
23
- [2023-08-10 16:57:39,891][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:50:17,187][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:50:17,187][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:50:17,378][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:50:17,378][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:50:17,378][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:50:17,481][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:50:17,519][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:50:17,520][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 20:50:35,128][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:50:35,130][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:50:43,722][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:50:43,848][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-10 20:50:43,848][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-10 20:50:43,849][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:50:47,669][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:51:58,213][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-10 20:51:58,214][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-10 20:51:58,214][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:52:05,263][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:52:26,390][inference][INFO] - + Generation pass latency: 7.04e+00 (s)
21
+ [2023-08-10 20:52:26,392][inference][INFO] - + Generation pass throughput: 56.80 (tokens/s)
22
+ [2023-08-10 20:52:26,392][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:52:26,400][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,36211.392512,0.0452,88.5,7.45,107.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17000.431615999998,0.0315,127.0,6.18,129.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 16:57:40,415][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 16:57:40,416][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 16:57:40,606][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 16:57:40,607][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 16:57:40,607][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 16:57:40,755][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 16:57:40,777][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 16:57:40,778][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 16:57:51,271][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 16:57:51,273][inference][INFO] - Running inference benchmark
11
- [2023-08-10 16:57:58,998][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 16:57:59,048][memory_tracker][INFO] - Peak memory usage: 36211.392512 MB
13
- [2023-08-10 16:57:59,049][inference][INFO] - + Forward pass peak memory: 36211.392512 (MB)
14
- [2023-08-10 16:57:59,049][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 16:58:00,714][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 16:58:54,604][inference][INFO] - + Forward pass latency: 4.52e-02 (s)
17
- [2023-08-10 16:58:54,605][inference][INFO] - + Forward pass throughput: 88.50 (samples/s)
18
- [2023-08-10 16:58:54,606][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 16:59:03,215][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 16:59:25,569][inference][INFO] - + Generation pass latency: 7.45e+00 (s)
21
- [2023-08-10 16:59:25,573][inference][INFO] - + Generation pass throughput: 107.00 (tokens/s)
22
- [2023-08-10 16:59:25,573][inference][INFO] - Saving inference results
23
- [2023-08-10 16:59:25,579][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:52:26,910][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:52:26,911][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:52:27,096][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:52:27,097][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:52:27,097][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:52:27,201][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:52:27,240][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:52:27,241][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 20:52:40,576][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:52:40,577][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:52:49,273][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:52:49,319][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
+ [2023-08-10 20:52:49,320][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
+ [2023-08-10 20:52:49,320][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:52:50,073][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:53:37,857][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
17
+ [2023-08-10 20:53:37,858][inference][INFO] - + Forward pass throughput: 127.00 (samples/s)
18
+ [2023-08-10 20:53:37,859][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:53:45,026][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:54:09,759][inference][INFO] - + Generation pass latency: 6.18e+00 (s)
21
+ [2023-08-10 20:54:09,761][inference][INFO] - + Generation pass throughput: 129.00 (tokens/s)
22
+ [2023-08-10 20:54:09,761][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:54:09,773][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,50692.227071999994,0.302,13.2,12.3,65.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.93,101.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 16:59:26,090][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 16:59:26,091][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 16:59:26,370][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 16:59:26,371][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 16:59:26,371][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 16:59:26,516][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 16:59:26,538][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 16:59:26,539][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 16:59:43,777][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 16:59:43,778][inference][INFO] - Running inference benchmark
11
- [2023-08-10 16:59:51,553][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 16:59:51,979][memory_tracker][INFO] - Peak memory usage: 50692.227071999994 MB
13
- [2023-08-10 16:59:51,979][inference][INFO] - + Forward pass peak memory: 50692.227071999994 (MB)
14
- [2023-08-10 16:59:51,988][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 17:00:02,693][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 17:01:19,015][inference][INFO] - + Forward pass latency: 3.02e-01 (s)
17
- [2023-08-10 17:01:19,016][inference][INFO] - + Forward pass throughput: 13.20 (samples/s)
18
- [2023-08-10 17:01:19,017][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 17:01:31,424][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 17:01:56,033][inference][INFO] - + Generation pass latency: 1.23e+01 (s)
21
- [2023-08-10 17:01:56,037][inference][INFO] - + Generation pass throughput: 65.00 (tokens/s)
22
- [2023-08-10 17:01:56,037][inference][INFO] - Saving inference results
23
- [2023-08-10 17:01:56,043][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:54:10,280][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:54:10,281][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:54:10,477][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:54:10,478][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:54:10,478][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:54:10,581][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:54:10,621][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:54:10,622][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 20:54:28,092][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:54:28,094][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:54:36,607][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:54:36,811][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-10 20:54:36,812][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-10 20:54:36,816][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:54:43,735][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:55:58,389][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-10 20:55:58,391][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-10 20:55:58,392][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:56:06,495][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:56:30,297][inference][INFO] - + Generation pass latency: 7.93e+00 (s)
21
+ [2023-08-10 20:56:30,298][inference][INFO] - + Generation pass throughput: 101.00 (tokens/s)
22
+ [2023-08-10 20:56:30,298][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:56:30,305][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,38709.100544,0.158,101.0,9.57,334.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0995,161.0,8.2,390.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 17:01:56,601][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 17:01:56,602][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 17:01:56,842][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 17:01:56,842][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 17:01:56,842][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 17:01:56,986][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 17:01:57,008][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 17:01:57,009][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 17:02:07,690][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 17:02:07,692][inference][INFO] - Running inference benchmark
11
- [2023-08-10 17:02:15,394][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 17:02:15,620][memory_tracker][INFO] - Peak memory usage: 38709.100544 MB
13
- [2023-08-10 17:02:15,621][inference][INFO] - + Forward pass peak memory: 38709.100544 (MB)
14
- [2023-08-10 17:02:15,621][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 17:02:19,897][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 17:03:14,397][inference][INFO] - + Forward pass latency: 1.58e-01 (s)
17
- [2023-08-10 17:03:14,397][inference][INFO] - + Forward pass throughput: 101.00 (samples/s)
18
- [2023-08-10 17:03:14,398][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 17:03:24,177][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 17:03:52,888][inference][INFO] - + Generation pass latency: 9.57e+00 (s)
21
- [2023-08-10 17:03:52,891][inference][INFO] - + Generation pass throughput: 334.00 (tokens/s)
22
- [2023-08-10 17:03:52,892][inference][INFO] - Saving inference results
23
- [2023-08-10 17:03:52,897][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:56:30,905][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:56:30,907][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:56:31,112][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:56:31,112][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:56:31,113][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:56:31,215][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:56:31,254][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:56:31,255][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 20:56:42,744][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:56:42,745][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:56:51,331][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:56:51,442][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-10 20:56:51,442][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-10 20:56:51,442][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:56:54,107][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 20:57:48,317][inference][INFO] - + Forward pass latency: 9.95e-02 (s)
17
+ [2023-08-10 20:57:48,318][inference][INFO] - + Forward pass throughput: 161.00 (samples/s)
18
+ [2023-08-10 20:57:48,318][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 20:57:56,767][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 20:58:21,355][inference][INFO] - + Generation pass latency: 8.20e+00 (s)
21
+ [2023-08-10 20:58:21,357][inference][INFO] - + Generation pass throughput: 390.00 (tokens/s)
22
+ [2023-08-10 20:58:21,357][inference][INFO] - Saving inference results
23
+ [2023-08-10 20:58:21,365][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,55035.428863999994,1.26,12.7,21.0,152.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.683,23.4,13.0,246.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 17:03:53,627][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 17:03:53,628][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 17:03:53,851][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 17:03:53,851][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 17:03:53,851][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 17:03:53,999][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 17:03:54,021][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 17:03:54,022][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 17:04:11,433][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 17:04:11,435][inference][INFO] - Running inference benchmark
11
- [2023-08-10 17:04:19,217][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 17:04:20,767][memory_tracker][INFO] - Peak memory usage: 55035.428863999994 MB
13
- [2023-08-10 17:04:20,767][inference][INFO] - + Forward pass peak memory: 55035.428863999994 (MB)
14
- [2023-08-10 17:04:20,804][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 17:05:01,550][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 17:06:09,960][inference][INFO] - + Forward pass latency: 1.26e+00 (s)
17
- [2023-08-10 17:06:09,961][inference][INFO] - + Forward pass throughput: 12.70 (samples/s)
18
- [2023-08-10 17:06:09,962][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 17:06:31,841][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 17:06:52,805][inference][INFO] - + Generation pass latency: 2.10e+01 (s)
21
- [2023-08-10 17:06:52,809][inference][INFO] - + Generation pass throughput: 152.00 (tokens/s)
22
- [2023-08-10 17:06:52,809][inference][INFO] - Saving inference results
23
- [2023-08-10 17:06:52,816][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 20:58:21,965][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 20:58:21,967][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 20:58:22,166][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 20:58:22,166][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 20:58:22,166][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 20:58:22,269][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 20:58:22,307][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 20:58:22,308][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 20:58:40,054][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 20:58:40,056][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 20:58:48,591][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 20:58:49,301][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-10 20:58:49,301][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-10 20:58:49,318][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 20:59:14,651][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:00:32,551][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
17
+ [2023-08-10 21:00:32,552][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-10 21:00:32,552][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:00:46,360][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:01:12,308][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-10 21:01:12,312][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-10 21:01:12,312][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:01:12,319][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
- 0,459.93984,0.00319,313.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,460.140544,0.004,250.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/main.log CHANGED
@@ -1,40 +1,20 @@
1
- [2023-08-10 18:21:50,370][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 18:21:50,371][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 18:21:50,557][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
- [2023-08-10 18:21:50,557][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 18:21:50,557][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 18:21:50,558][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 18:21:50,559][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 18:21:50,559][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
- [2023-08-10 18:21:51,180][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 18:21:51,181][inference][INFO] - Running inference benchmark
11
- [2023-08-10 18:21:51,303][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
- [2023-08-10 18:21:51,305][inference][INFO] - + Tracking forward pass peak memory
13
- [2023-08-10 18:21:51,357][inference][INFO] - + Forward pass peak memory: 459.689984 (MB)
14
- [2023-08-10 18:21:51,358][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
- [2023-08-10 18:21:51,360][inference][INFO] - + Warming up the forward pass
16
- [2023-08-10 18:21:51,392][inference][INFO] - + Tracking forward pass latency and throughput
17
- [2023-08-10 18:22:01,505][inference][INFO] - + Forward pass latency: 3.15e-03 (s)
18
- [2023-08-10 18:22:01,508][inference][INFO] - + Forward pass throughput: 317.00 (samples/s)
19
- [2023-08-10 18:22:01,508][inference][INFO] - Saving inference results
20
- [2023-08-10 18:22:01,521][backend][INFO] - Cleaning backend
21
- [2023-08-10 18:52:05,787][benchmark][INFO] - Configuring inference benchmark
22
- [2023-08-10 18:52:05,789][benchmark][INFO] - + Setting seed(42)
23
- [2023-08-10 18:52:05,978][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
24
- [2023-08-10 18:52:05,978][backend][INFO] - Configuring pytorch backend
25
- [2023-08-10 18:52:05,978][backend][INFO] - + Checking initial device isolation
26
- [2023-08-10 18:52:05,978][backend][INFO] - + Checking contineous device isolation
27
- [2023-08-10 18:52:05,981][pytorch][INFO] - + Disabling gradients
28
- [2023-08-10 18:52:05,982][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
29
- [2023-08-10 18:52:06,629][pytorch][INFO] - + Turning on eval mode
30
- [2023-08-10 18:52:06,630][inference][INFO] - Running inference benchmark
31
- [2023-08-10 18:52:06,752][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
32
- [2023-08-10 18:52:06,753][inference][INFO] - + Tracking forward pass peak memory
33
- [2023-08-10 18:52:06,807][inference][INFO] - + Forward pass peak memory: 459.93984 (MB)
34
- [2023-08-10 18:52:06,809][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
35
- [2023-08-10 18:52:06,810][inference][INFO] - + Warming up the forward pass
36
- [2023-08-10 18:52:06,847][inference][INFO] - + Tracking forward pass latency and throughput
37
- [2023-08-10 18:52:16,958][inference][INFO] - + Forward pass latency: 3.19e-03 (s)
38
- [2023-08-10 18:52:16,961][inference][INFO] - + Forward pass throughput: 313.00 (samples/s)
39
- [2023-08-10 18:52:16,961][inference][INFO] - Saving inference results
40
- [2023-08-10 18:52:16,975][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:01:17,268][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:01:17,269][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:01:17,458][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-10 21:01:17,458][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:01:17,458][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:01:17,458][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:01:17,460][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:01:17,460][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-10 21:01:18,085][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:01:18,086][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:01:18,223][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-10 21:01:18,225][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-10 21:01:18,278][inference][INFO] - + Forward pass peak memory: 460.140544 (MB)
14
+ [2023-08-10 21:01:18,279][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-10 21:01:18,281][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-10 21:01:18,323][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-10 21:01:28,421][inference][INFO] - + Forward pass latency: 4.00e-03 (s)
18
+ [2023-08-10 21:01:28,423][inference][INFO] - + Forward pass throughput: 250.00 (samples/s)
19
+ [2023-08-10 21:01:28,423][inference][INFO] - Saving inference results
20
+ [2023-08-10 21:01:28,437][backend][INFO] - Cleaning backend
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,463.38048,0.0039,256.0,0.509,196.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.736832,0.00399,251.0,0.534,187.0
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/main.log CHANGED
@@ -1,44 +1,22 @@
1
- [2023-08-10 18:22:05,279][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 18:22:05,280][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 18:22:05,460][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
- [2023-08-10 18:22:05,460][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 18:22:05,460][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 18:22:05,460][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 18:22:05,462][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 18:22:05,462][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
- [2023-08-10 18:22:06,129][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 18:22:06,129][inference][INFO] - Running inference benchmark
11
- [2023-08-10 18:22:06,334][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 18:22:06,380][inference][INFO] - + Forward pass peak memory: 464.019456 (MB)
13
- [2023-08-10 18:22:06,382][inference][INFO] - + Warming up the forward pass
14
- [2023-08-10 18:22:06,418][inference][INFO] - + Tracking forward pass latency and throughput
15
- [2023-08-10 18:22:16,527][inference][INFO] - + Forward pass latency: 3.23e-03 (s)
16
- [2023-08-10 18:22:16,529][inference][INFO] - + Forward pass throughput: 310.00 (samples/s)
17
- [2023-08-10 18:22:16,530][inference][INFO] - + Warming up the generation pass
18
- [2023-08-10 18:22:17,025][inference][INFO] - + Tracking generation latency and throughput
19
- [2023-08-10 18:22:27,218][inference][INFO] - + Generation pass latency: 4.85e-01 (s)
20
- [2023-08-10 18:22:27,219][inference][INFO] - + Generation pass throughput: 206.00 (tokens/s)
21
- [2023-08-10 18:22:27,219][inference][INFO] - Saving inference results
22
- [2023-08-10 18:22:27,232][backend][INFO] - Cleaning backend
23
- [2023-08-10 18:52:21,674][benchmark][INFO] - Configuring inference benchmark
24
- [2023-08-10 18:52:21,677][benchmark][INFO] - + Setting seed(42)
25
- [2023-08-10 18:52:21,861][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
26
- [2023-08-10 18:52:21,861][backend][INFO] - Configuring pytorch backend
27
- [2023-08-10 18:52:21,861][backend][INFO] - + Checking initial device isolation
28
- [2023-08-10 18:52:21,862][backend][INFO] - + Checking contineous device isolation
29
- [2023-08-10 18:52:21,863][pytorch][INFO] - + Disabling gradients
30
- [2023-08-10 18:52:21,864][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
31
- [2023-08-10 18:52:22,565][pytorch][INFO] - + Turning on eval mode
32
- [2023-08-10 18:52:22,565][inference][INFO] - Running inference benchmark
33
- [2023-08-10 18:52:22,881][inference][INFO] - + Tracking forward pass peak memory
34
- [2023-08-10 18:52:22,934][inference][INFO] - + Forward pass peak memory: 463.38048 (MB)
35
- [2023-08-10 18:52:22,935][inference][INFO] - + Warming up the forward pass
36
- [2023-08-10 18:52:22,969][inference][INFO] - + Tracking forward pass latency and throughput
37
- [2023-08-10 18:52:33,064][inference][INFO] - + Forward pass latency: 3.90e-03 (s)
38
- [2023-08-10 18:52:33,067][inference][INFO] - + Forward pass throughput: 256.00 (samples/s)
39
- [2023-08-10 18:52:33,067][inference][INFO] - + Warming up the generation pass
40
- [2023-08-10 18:52:33,612][inference][INFO] - + Tracking generation latency and throughput
41
- [2023-08-10 18:52:43,797][inference][INFO] - + Generation pass latency: 5.09e-01 (s)
42
- [2023-08-10 18:52:43,797][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s)
43
- [2023-08-10 18:52:43,797][inference][INFO] - Saving inference results
44
- [2023-08-10 18:52:43,811][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:01:32,949][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:01:32,950][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:01:33,134][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-10 21:01:33,135][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:01:33,135][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:01:33,135][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:01:33,136][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:01:33,137][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-10 21:01:33,826][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:01:33,826][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:01:34,042][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:01:34,092][inference][INFO] - + Forward pass peak memory: 463.736832 (MB)
13
+ [2023-08-10 21:01:34,093][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-10 21:01:34,127][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-10 21:01:44,220][inference][INFO] - + Forward pass latency: 3.99e-03 (s)
16
+ [2023-08-10 21:01:44,223][inference][INFO] - + Forward pass throughput: 251.00 (samples/s)
17
+ [2023-08-10 21:01:44,223][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-10 21:01:44,818][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-10 21:01:54,969][inference][INFO] - + Generation pass latency: 5.34e-01 (s)
20
+ [2023-08-10 21:01:54,970][inference][INFO] - + Generation pass throughput: 187.00 (tokens/s)
21
+ [2023-08-10 21:01:54,970][inference][INFO] - Saving inference results
22
+ [2023-08-10 21:01:54,987][backend][INFO] - Cleaning backend