fxmarty commited on
Commit
d12bb3b
·
1 Parent(s): 711d0ae

Adding regression benchmark for the transformers SHA 347001237a8ff845fc23f678107fc505361f9f13

Browse files
Files changed (20) hide show
  1. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/inference_results.csv +1 -1
  2. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/main.log +23 -23
  3. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/inference_results.csv +1 -1
  4. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/main.log +23 -23
  5. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/inference_results.csv +1 -1
  6. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/main.log +23 -23
  7. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/inference_results.csv +1 -1
  8. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/main.log +23 -23
  9. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/inference_results.csv +1 -1
  10. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/main.log +23 -23
  11. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/inference_results.csv +1 -1
  12. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/main.log +23 -23
  13. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/inference_results.csv +1 -1
  14. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/main.log +23 -23
  15. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/inference_results.csv +1 -1
  16. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/main.log +23 -23
  17. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/inference_results.csv +1 -1
  18. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/main.log +20 -20
  19. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/inference_results.csv +1 -1
  20. raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/main.log +22 -22
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,16195.125247999998,0.0399,25.1,7.68,26.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0315,31.7,5.99,33.4
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 18:53:41,035][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 18:53:41,036][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 18:53:41,414][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 18:53:41,414][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 18:53:41,415][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 18:53:41,559][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 18:53:41,572][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 18:53:41,572][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 18:54:49,945][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 18:54:49,947][inference][INFO] - Running inference benchmark
11
- [2023-08-10 18:54:58,590][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 18:54:59,875][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
- [2023-08-10 18:54:59,875][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
- [2023-08-10 18:54:59,876][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 18:55:00,275][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 18:55:20,466][inference][INFO] - + Forward pass latency: 3.99e-02 (s)
17
- [2023-08-10 18:55:20,467][inference][INFO] - + Forward pass throughput: 25.10 (samples/s)
18
- [2023-08-10 18:55:20,467][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 18:55:28,859][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 18:55:51,898][inference][INFO] - + Generation pass latency: 7.68e+00 (s)
21
- [2023-08-10 18:55:51,901][inference][INFO] - + Generation pass throughput: 26.00 (tokens/s)
22
- [2023-08-10 18:55:51,901][inference][INFO] - Saving inference results
23
- [2023-08-10 18:55:51,911][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:02:41,867][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:02:41,868][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:02:42,157][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:02:42,157][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:02:42,158][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:02:42,300][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:02:42,317][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:02:42,318][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 21:03:51,341][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:03:51,343][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:03:59,523][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:04:00,786][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-10 21:04:00,786][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-10 21:04:00,786][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:04:01,102][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:04:21,440][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
17
+ [2023-08-10 21:04:21,441][inference][INFO] - + Forward pass throughput: 31.70 (samples/s)
18
+ [2023-08-10 21:04:21,442][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:04:28,129][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:04:52,084][inference][INFO] - + Generation pass latency: 5.99e+00 (s)
21
+ [2023-08-10 21:04:52,086][inference][INFO] - + Generation pass throughput: 33.40 (tokens/s)
22
+ [2023-08-10 21:04:52,086][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:04:52,096][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,30317.346815999997,0.0643,15.6,5.55,36.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0642,15.6,5.68,35.2
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 18:55:52,413][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 18:55:52,415][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 18:55:52,610][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 18:55:52,610][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 18:55:52,611][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 18:55:52,713][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 18:55:52,748][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 18:55:52,749][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 18:56:10,041][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 18:56:10,043][inference][INFO] - Running inference benchmark
11
- [2023-08-10 18:56:18,675][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 18:56:18,752][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
- [2023-08-10 18:56:18,752][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
- [2023-08-10 18:56:18,752][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 18:56:20,879][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 18:57:27,273][inference][INFO] - + Forward pass latency: 6.43e-02 (s)
17
- [2023-08-10 18:57:27,274][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
- [2023-08-10 18:57:27,275][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 18:57:32,839][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 18:57:55,027][inference][INFO] - + Generation pass latency: 5.55e+00 (s)
21
- [2023-08-10 18:57:55,029][inference][INFO] - + Generation pass throughput: 36.00 (tokens/s)
22
- [2023-08-10 18:57:55,029][inference][INFO] - Saving inference results
23
- [2023-08-10 18:57:55,036][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:04:52,568][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:04:52,569][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:04:52,764][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:04:52,764][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:04:52,764][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:04:52,864][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:04:52,891][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:04:52,892][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 21:05:09,629][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:05:09,630][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:05:17,422][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:05:17,497][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-10 21:05:17,497][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-10 21:05:17,498][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:05:19,637][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:06:25,937][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
17
+ [2023-08-10 21:06:25,938][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-10 21:06:25,938][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:06:31,637][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:06:54,360][inference][INFO] - + Generation pass latency: 5.68e+00 (s)
21
+ [2023-08-10 21:06:54,363][inference][INFO] - + Generation pass throughput: 35.20 (tokens/s)
22
+ [2023-08-10 21:06:54,363][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:06:54,370][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,16381.771776,0.0312,64.1,6.09,65.7
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0315,63.5,6.17,64.8
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 18:57:55,528][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 18:57:55,529][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 18:57:55,734][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 18:57:55,734][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 18:57:55,734][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 18:57:55,837][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 18:57:55,871][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 18:57:55,871][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 18:58:07,179][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 18:58:07,181][inference][INFO] - Running inference benchmark
11
- [2023-08-10 18:58:15,666][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 18:58:15,707][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
- [2023-08-10 18:58:15,707][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
- [2023-08-10 18:58:15,707][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 18:58:16,196][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 18:58:47,047][inference][INFO] - + Forward pass latency: 3.12e-02 (s)
17
- [2023-08-10 18:58:47,048][inference][INFO] - + Forward pass throughput: 64.10 (samples/s)
18
- [2023-08-10 18:58:47,049][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 18:58:53,942][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 18:59:18,317][inference][INFO] - + Generation pass latency: 6.09e+00 (s)
21
- [2023-08-10 18:59:18,319][inference][INFO] - + Generation pass throughput: 65.70 (tokens/s)
22
- [2023-08-10 18:59:18,320][inference][INFO] - Saving inference results
23
- [2023-08-10 18:59:18,327][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:06:54,938][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:06:54,940][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:06:55,132][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:06:55,132][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:06:55,133][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:06:55,236][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:06:55,260][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:06:55,261][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 21:07:05,882][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:07:05,884][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:07:13,653][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:07:13,692][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-10 21:07:13,692][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-10 21:07:13,692][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:07:14,169][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:07:44,628][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
17
+ [2023-08-10 21:07:44,629][inference][INFO] - + Forward pass throughput: 63.50 (samples/s)
18
+ [2023-08-10 21:07:44,630][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:07:51,600][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:08:16,272][inference][INFO] - + Generation pass latency: 6.17e+00 (s)
21
+ [2023-08-10 21:08:16,274][inference][INFO] - + Generation pass throughput: 64.80 (tokens/s)
22
+ [2023-08-10 21:08:16,274][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:08:16,281][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,30778.720255999997,0.109,18.3,7.53,53.1
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.109,18.3,7.03,56.9
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 18:59:18,828][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 18:59:18,830][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 18:59:19,038][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 18:59:19,038][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 18:59:19,038][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 18:59:19,147][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 18:59:19,182][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 18:59:19,183][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 18:59:36,592][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 18:59:36,594][inference][INFO] - Running inference benchmark
11
- [2023-08-10 18:59:45,211][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 18:59:45,335][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
- [2023-08-10 18:59:45,335][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
- [2023-08-10 18:59:45,335][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 18:59:49,164][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 19:00:59,802][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
- [2023-08-10 19:00:59,803][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
- [2023-08-10 19:00:59,804][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 19:01:07,380][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 19:01:29,977][inference][INFO] - + Generation pass latency: 7.53e+00 (s)
21
- [2023-08-10 19:01:29,979][inference][INFO] - + Generation pass throughput: 53.10 (tokens/s)
22
- [2023-08-10 19:01:29,979][inference][INFO] - Saving inference results
23
- [2023-08-10 19:01:29,986][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:08:16,753][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:08:16,754][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:08:16,943][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:08:16,943][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:08:16,944][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:08:17,047][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:08:17,072][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:08:17,073][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 21:08:34,019][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:08:34,021][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:08:41,849][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:08:41,973][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-10 21:08:41,973][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-10 21:08:41,974][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:08:45,801][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:09:56,335][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-10 21:09:56,336][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-10 21:09:56,336][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:10:03,374][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:10:24,471][inference][INFO] - + Generation pass latency: 7.03e+00 (s)
21
+ [2023-08-10 21:10:24,473][inference][INFO] - + Generation pass throughput: 56.90 (tokens/s)
22
+ [2023-08-10 21:10:24,473][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:10:24,480][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,17000.431615999998,0.0312,128.0,6.11,131.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17000.431615999998,0.0317,126.0,6.19,129.0
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 19:01:30,504][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 19:01:30,505][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 19:01:30,768][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 19:01:30,769][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 19:01:30,769][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 19:01:30,872][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 19:01:30,907][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 19:01:30,908][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 19:01:42,129][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 19:01:42,131][inference][INFO] - Running inference benchmark
11
- [2023-08-10 19:01:50,553][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 19:01:50,599][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
- [2023-08-10 19:01:50,599][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
- [2023-08-10 19:01:50,599][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 19:01:51,363][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 19:02:39,819][inference][INFO] - + Forward pass latency: 3.12e-02 (s)
17
- [2023-08-10 19:02:39,820][inference][INFO] - + Forward pass throughput: 128.00 (samples/s)
18
- [2023-08-10 19:02:39,820][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 19:02:46,913][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 19:03:11,365][inference][INFO] - + Generation pass latency: 6.11e+00 (s)
21
- [2023-08-10 19:03:11,367][inference][INFO] - + Generation pass throughput: 131.00 (tokens/s)
22
- [2023-08-10 19:03:11,367][inference][INFO] - Saving inference results
23
- [2023-08-10 19:03:11,374][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:10:24,974][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:10:24,975][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:10:25,167][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:10:25,168][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:10:25,168][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:10:25,271][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:10:25,296][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:10:25,297][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 21:10:36,031][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:10:36,033][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:10:43,849][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:10:43,901][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
+ [2023-08-10 21:10:43,901][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
+ [2023-08-10 21:10:43,901][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:10:44,653][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:11:32,162][inference][INFO] - + Forward pass latency: 3.17e-02 (s)
17
+ [2023-08-10 21:11:32,163][inference][INFO] - + Forward pass throughput: 126.00 (samples/s)
18
+ [2023-08-10 21:11:32,163][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:11:39,345][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:12:04,095][inference][INFO] - + Generation pass latency: 6.19e+00 (s)
21
+ [2023-08-10 21:12:04,097][inference][INFO] - + Generation pass throughput: 129.00 (tokens/s)
22
+ [2023-08-10 21:12:04,098][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:12:04,105][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,31481.266175999997,0.187,21.4,7.7,104.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.67,104.0
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 19:03:11,889][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 19:03:11,890][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 19:03:12,145][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 19:03:12,146][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 19:03:12,146][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 19:03:12,248][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 19:03:12,284][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 19:03:12,284][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 19:03:29,601][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 19:03:29,602][inference][INFO] - Running inference benchmark
11
- [2023-08-10 19:03:38,102][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 19:03:38,312][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
- [2023-08-10 19:03:38,312][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
- [2023-08-10 19:03:38,316][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 19:03:45,239][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 19:04:59,912][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
- [2023-08-10 19:04:59,913][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
- [2023-08-10 19:04:59,914][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 19:05:07,690][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 19:05:30,805][inference][INFO] - + Generation pass latency: 7.70e+00 (s)
21
- [2023-08-10 19:05:30,806][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
- [2023-08-10 19:05:30,806][inference][INFO] - Saving inference results
23
- [2023-08-10 19:05:30,814][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:12:04,597][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:12:04,598][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:12:04,792][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:12:04,792][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:12:04,793][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:12:04,894][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:12:04,919][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:12:04,920][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 21:12:22,009][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:12:22,010][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:12:29,869][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:12:30,082][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-10 21:12:30,082][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-10 21:12:30,087][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:12:37,015][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:13:51,689][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-10 21:13:51,691][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-10 21:13:51,692][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:13:59,433][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:14:22,438][inference][INFO] - + Generation pass latency: 7.67e+00 (s)
21
+ [2023-08-10 21:14:22,440][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-10 21:14:22,440][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:14:22,447][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,19498.139648,0.0982,163.0,6.36,503.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0981,163.0,6.44,497.0
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 19:05:31,399][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 19:05:31,401][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 19:05:31,613][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 19:05:31,614][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 19:05:31,614][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 19:05:31,718][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 19:05:31,752][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 19:05:31,753][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
- [2023-08-10 19:05:43,102][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 19:05:43,104][inference][INFO] - Running inference benchmark
11
- [2023-08-10 19:05:51,637][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 19:05:51,748][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
- [2023-08-10 19:05:51,748][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
- [2023-08-10 19:05:51,748][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 19:05:54,367][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 19:06:48,398][inference][INFO] - + Forward pass latency: 9.82e-02 (s)
17
- [2023-08-10 19:06:48,398][inference][INFO] - + Forward pass throughput: 163.00 (samples/s)
18
- [2023-08-10 19:06:48,399][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 19:06:54,966][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 19:07:20,390][inference][INFO] - + Generation pass latency: 6.36e+00 (s)
21
- [2023-08-10 19:07:20,391][inference][INFO] - + Generation pass throughput: 503.00 (tokens/s)
22
- [2023-08-10 19:07:20,391][inference][INFO] - Saving inference results
23
- [2023-08-10 19:07:20,397][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:14:23,006][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:14:23,007][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:14:23,208][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:14:23,208][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:14:23,208][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:14:23,308][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:14:23,332][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:14:23,333][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-10 21:14:34,365][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:14:34,366][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:14:42,294][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:14:42,405][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-10 21:14:42,405][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-10 21:14:42,405][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:14:45,031][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:15:39,054][inference][INFO] - + Forward pass latency: 9.81e-02 (s)
17
+ [2023-08-10 21:15:39,055][inference][INFO] - + Forward pass throughput: 163.00 (samples/s)
18
+ [2023-08-10 21:15:39,055][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:15:45,748][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:16:11,510][inference][INFO] - + Generation pass latency: 6.44e+00 (s)
21
+ [2023-08-10 21:16:11,512][inference][INFO] - + Generation pass throughput: 497.00 (tokens/s)
22
+ [2023-08-10 21:16:11,512][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:16:11,518][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,35824.467968,0.684,23.4,13.0,246.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.683,23.4,12.9,248.0
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/main.log CHANGED
@@ -1,23 +1,23 @@
1
- [2023-08-10 19:07:20,984][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 19:07:20,985][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 19:07:21,189][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
- [2023-08-10 19:07:21,190][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 19:07:21,190][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 19:07:21,292][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 19:07:21,327][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 19:07:21,328][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
- [2023-08-10 19:07:38,627][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 19:07:38,628][inference][INFO] - Running inference benchmark
11
- [2023-08-10 19:07:47,362][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 19:07:48,070][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
- [2023-08-10 19:07:48,070][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
- [2023-08-10 19:07:48,086][inference][INFO] - + Warming up the forward pass
15
- [2023-08-10 19:08:13,451][inference][INFO] - + Tracking forward pass latency and throughput
16
- [2023-08-10 19:09:31,430][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
- [2023-08-10 19:09:31,431][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
- [2023-08-10 19:09:31,432][inference][INFO] - + Warming up the generation pass
19
- [2023-08-10 19:09:45,269][inference][INFO] - + Tracking generation latency and throughput
20
- [2023-08-10 19:10:11,324][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
- [2023-08-10 19:10:11,327][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
- [2023-08-10 19:10:11,327][inference][INFO] - Saving inference results
23
- [2023-08-10 19:10:11,333][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:16:12,105][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:16:12,106][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:16:12,300][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-10 21:16:12,301][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:16:12,301][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:16:12,403][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:16:12,429][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:16:12,430][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-10 21:16:29,791][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:16:29,793][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:16:37,679][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:16:38,403][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-10 21:16:38,403][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-10 21:16:38,420][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-10 21:17:03,773][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-10 21:18:21,714][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
17
+ [2023-08-10 21:18:21,715][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-10 21:18:21,715][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-10 21:18:35,343][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-10 21:19:01,225][inference][INFO] - + Generation pass latency: 1.29e+01 (s)
21
+ [2023-08-10 21:19:01,227][inference][INFO] - + Generation pass throughput: 248.00 (tokens/s)
22
+ [2023-08-10 21:19:01,228][inference][INFO] - Saving inference results
23
+ [2023-08-10 21:19:01,234][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
- 0,460.41497599999997,0.00331,302.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,459.997184,0.00302,331.0
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/main.log CHANGED
@@ -1,20 +1,20 @@
1
- [2023-08-10 20:42:47,142][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 20:42:47,143][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 20:42:47,336][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
- [2023-08-10 20:42:47,336][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 20:42:47,336][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 20:42:47,337][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 20:42:47,339][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 20:42:47,339][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
- [2023-08-10 20:42:47,917][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 20:42:47,918][inference][INFO] - Running inference benchmark
11
- [2023-08-10 20:42:48,051][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
- [2023-08-10 20:42:48,053][inference][INFO] - + Tracking forward pass peak memory
13
- [2023-08-10 20:42:48,106][inference][INFO] - + Forward pass peak memory: 460.41497599999997 (MB)
14
- [2023-08-10 20:42:48,107][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
- [2023-08-10 20:42:48,109][inference][INFO] - + Warming up the forward pass
16
- [2023-08-10 20:42:48,142][inference][INFO] - + Tracking forward pass latency and throughput
17
- [2023-08-10 20:42:58,250][inference][INFO] - + Forward pass latency: 3.31e-03 (s)
18
- [2023-08-10 20:42:58,252][inference][INFO] - + Forward pass throughput: 302.00 (samples/s)
19
- [2023-08-10 20:42:58,253][inference][INFO] - Saving inference results
20
- [2023-08-10 20:42:58,266][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:19:06,081][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:19:06,082][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:19:06,267][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-10 21:19:06,267][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:19:06,267][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:19:06,268][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:19:06,269][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:19:06,269][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-10 21:19:06,948][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:19:06,949][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:19:07,074][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-10 21:19:07,075][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-10 21:19:07,126][inference][INFO] - + Forward pass peak memory: 459.997184 (MB)
14
+ [2023-08-10 21:19:07,127][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-10 21:19:07,129][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-10 21:19:07,160][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-10 21:19:17,276][inference][INFO] - + Forward pass latency: 3.02e-03 (s)
18
+ [2023-08-10 21:19:17,278][inference][INFO] - + Forward pass throughput: 331.00 (samples/s)
19
+ [2023-08-10 21:19:17,278][inference][INFO] - Saving inference results
20
+ [2023-08-10 21:19:17,291][backend][INFO] - Cleaning backend
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/inference_results.csv CHANGED
@@ -1,2 +1,2 @@
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
- 0,463.88428799999997,0.00393,254.0,0.483,207.0
 
1
  ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.54022399999997,0.00387,258.0,0.53,189.0
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/main.log CHANGED
@@ -1,22 +1,22 @@
1
- [2023-08-10 20:43:02,728][benchmark][INFO] - Configuring inference benchmark
2
- [2023-08-10 20:43:02,729][benchmark][INFO] - + Setting seed(42)
3
- [2023-08-10 20:43:02,918][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
- [2023-08-10 20:43:02,918][backend][INFO] - Configuring pytorch backend
5
- [2023-08-10 20:43:02,918][backend][INFO] - + Checking initial device isolation
6
- [2023-08-10 20:43:02,918][backend][INFO] - + Checking contineous device isolation
7
- [2023-08-10 20:43:02,920][pytorch][INFO] - + Disabling gradients
8
- [2023-08-10 20:43:02,920][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
- [2023-08-10 20:43:03,908][pytorch][INFO] - + Turning on eval mode
10
- [2023-08-10 20:43:03,909][inference][INFO] - Running inference benchmark
11
- [2023-08-10 20:43:04,111][inference][INFO] - + Tracking forward pass peak memory
12
- [2023-08-10 20:43:04,161][inference][INFO] - + Forward pass peak memory: 463.88428799999997 (MB)
13
- [2023-08-10 20:43:04,163][inference][INFO] - + Warming up the forward pass
14
- [2023-08-10 20:43:04,196][inference][INFO] - + Tracking forward pass latency and throughput
15
- [2023-08-10 20:43:14,287][inference][INFO] - + Forward pass latency: 3.93e-03 (s)
16
- [2023-08-10 20:43:14,289][inference][INFO] - + Forward pass throughput: 254.00 (samples/s)
17
- [2023-08-10 20:43:14,290][inference][INFO] - + Warming up the generation pass
18
- [2023-08-10 20:43:14,856][inference][INFO] - + Tracking generation latency and throughput
19
- [2023-08-10 20:43:25,006][inference][INFO] - + Generation pass latency: 4.83e-01 (s)
20
- [2023-08-10 20:43:25,007][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s)
21
- [2023-08-10 20:43:25,007][inference][INFO] - Saving inference results
22
- [2023-08-10 20:43:25,020][backend][INFO] - Cleaning backend
 
1
+ [2023-08-10 21:19:21,855][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-10 21:19:21,856][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-10 21:19:22,037][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-10 21:19:22,037][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-10 21:19:22,037][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-10 21:19:22,037][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-10 21:19:22,039][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-10 21:19:22,039][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-10 21:19:22,713][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-10 21:19:22,714][inference][INFO] - Running inference benchmark
11
+ [2023-08-10 21:19:22,920][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-10 21:19:22,972][inference][INFO] - + Forward pass peak memory: 463.54022399999997 (MB)
13
+ [2023-08-10 21:19:22,974][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-10 21:19:23,006][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-10 21:19:33,099][inference][INFO] - + Forward pass latency: 3.87e-03 (s)
16
+ [2023-08-10 21:19:33,102][inference][INFO] - + Forward pass throughput: 258.00 (samples/s)
17
+ [2023-08-10 21:19:33,103][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-10 21:19:33,692][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-10 21:19:43,761][inference][INFO] - + Generation pass latency: 5.30e-01 (s)
20
+ [2023-08-10 21:19:43,762][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s)
21
+ [2023-08-10 21:19:43,762][inference][INFO] - Saving inference results
22
+ [2023-08-10 21:19:43,776][backend][INFO] - Cleaning backend