Adding regression benchmark for the transformers SHA 347001237a8ff845fc23f678107fc505361f9f13
Browse files- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/main.log +23 -23
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/main.log +20 -20
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/inference_results.csv +1 -1
- raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/main.log +22 -22
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,16195.125247999998,0.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,16195.125247999998,0.0315,31.7,5.99,33.4
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/0/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:02:41,867][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:02:41,868][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:02:42,157][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:02:42,157][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:02:42,158][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:02:42,300][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:02:42,317][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:02:42,318][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 21:03:51,341][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:03:51,343][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:03:59,523][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:04:00,786][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
|
13 |
+
[2023-08-10 21:04:00,786][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
|
14 |
+
[2023-08-10 21:04:00,786][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:04:01,102][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:04:21,440][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
|
17 |
+
[2023-08-10 21:04:21,441][inference][INFO] - + Forward pass throughput: 31.70 (samples/s)
|
18 |
+
[2023-08-10 21:04:21,442][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:04:28,129][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:04:52,084][inference][INFO] - + Generation pass latency: 5.99e+00 (s)
|
21 |
+
[2023-08-10 21:04:52,086][inference][INFO] - + Generation pass throughput: 33.40 (tokens/s)
|
22 |
+
[2023-08-10 21:04:52,086][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:04:52,096][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,30317.346815999997,0.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,30317.346815999997,0.0642,15.6,5.68,35.2
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/1/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:04:52,568][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:04:52,569][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:04:52,764][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:04:52,764][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:04:52,764][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:04:52,864][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:04:52,891][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:04:52,892][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 21:05:09,629][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:05:09,630][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:05:17,422][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:05:17,497][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
|
13 |
+
[2023-08-10 21:05:17,497][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
|
14 |
+
[2023-08-10 21:05:17,498][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:05:19,637][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:06:25,937][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
|
17 |
+
[2023-08-10 21:06:25,938][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
|
18 |
+
[2023-08-10 21:06:25,938][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:06:31,637][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:06:54,360][inference][INFO] - + Generation pass latency: 5.68e+00 (s)
|
21 |
+
[2023-08-10 21:06:54,363][inference][INFO] - + Generation pass throughput: 35.20 (tokens/s)
|
22 |
+
[2023-08-10 21:06:54,363][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:06:54,370][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,16381.771776,0.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,16381.771776,0.0315,63.5,6.17,64.8
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/2/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:06:54,938][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:06:54,940][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:06:55,132][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:06:55,132][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:06:55,133][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:06:55,236][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:06:55,260][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:06:55,261][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 21:07:05,882][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:07:05,884][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:07:13,653][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:07:13,692][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
|
13 |
+
[2023-08-10 21:07:13,692][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
|
14 |
+
[2023-08-10 21:07:13,692][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:07:14,169][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:07:44,628][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
|
17 |
+
[2023-08-10 21:07:44,629][inference][INFO] - + Forward pass throughput: 63.50 (samples/s)
|
18 |
+
[2023-08-10 21:07:44,630][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:07:51,600][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:08:16,272][inference][INFO] - + Generation pass latency: 6.17e+00 (s)
|
21 |
+
[2023-08-10 21:08:16,274][inference][INFO] - + Generation pass throughput: 64.80 (tokens/s)
|
22 |
+
[2023-08-10 21:08:16,274][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:08:16,281][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,30778.720255999997,0.109,18.3,7.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,30778.720255999997,0.109,18.3,7.03,56.9
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/3/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:08:16,753][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:08:16,754][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:08:16,943][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:08:16,943][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:08:16,944][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:08:17,047][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:08:17,072][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:08:17,073][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 21:08:34,019][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:08:34,021][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:08:41,849][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:08:41,973][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
|
13 |
+
[2023-08-10 21:08:41,973][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
|
14 |
+
[2023-08-10 21:08:41,974][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:08:45,801][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:09:56,335][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
|
17 |
+
[2023-08-10 21:09:56,336][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
|
18 |
+
[2023-08-10 21:09:56,336][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:10:03,374][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:10:24,471][inference][INFO] - + Generation pass latency: 7.03e+00 (s)
|
21 |
+
[2023-08-10 21:10:24,473][inference][INFO] - + Generation pass throughput: 56.90 (tokens/s)
|
22 |
+
[2023-08-10 21:10:24,473][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:10:24,480][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,17000.431615999998,0.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,17000.431615999998,0.0317,126.0,6.19,129.0
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/4/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:10:24,974][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:10:24,975][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:10:25,167][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:10:25,168][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:10:25,168][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:10:25,271][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:10:25,296][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:10:25,297][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 21:10:36,031][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:10:36,033][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:10:43,849][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:10:43,901][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
|
13 |
+
[2023-08-10 21:10:43,901][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
|
14 |
+
[2023-08-10 21:10:43,901][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:10:44,653][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:11:32,162][inference][INFO] - + Forward pass latency: 3.17e-02 (s)
|
17 |
+
[2023-08-10 21:11:32,163][inference][INFO] - + Forward pass throughput: 126.00 (samples/s)
|
18 |
+
[2023-08-10 21:11:32,163][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:11:39,345][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:12:04,095][inference][INFO] - + Generation pass latency: 6.19e+00 (s)
|
21 |
+
[2023-08-10 21:12:04,097][inference][INFO] - + Generation pass throughput: 129.00 (tokens/s)
|
22 |
+
[2023-08-10 21:12:04,098][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:12:04,105][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,31481.266175999997,0.187,21.4,7.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,31481.266175999997,0.187,21.4,7.67,104.0
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/5/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:12:04,597][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:12:04,598][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:12:04,792][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:12:04,792][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:12:04,793][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:12:04,894][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:12:04,919][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:12:04,920][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 21:12:22,009][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:12:22,010][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:12:29,869][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:12:30,082][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
|
13 |
+
[2023-08-10 21:12:30,082][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
|
14 |
+
[2023-08-10 21:12:30,087][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:12:37,015][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:13:51,689][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
|
17 |
+
[2023-08-10 21:13:51,691][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
|
18 |
+
[2023-08-10 21:13:51,692][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:13:59,433][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:14:22,438][inference][INFO] - + Generation pass latency: 7.67e+00 (s)
|
21 |
+
[2023-08-10 21:14:22,440][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
|
22 |
+
[2023-08-10 21:14:22,440][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:14:22,447][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,19498.139648,0.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,19498.139648,0.0981,163.0,6.44,497.0
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/6/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:14:23,006][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:14:23,007][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:14:23,208][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:14:23,208][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:14:23,208][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:14:23,308][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:14:23,332][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:14:23,333][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 21:14:34,365][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:14:34,366][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:14:42,294][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:14:42,405][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
|
13 |
+
[2023-08-10 21:14:42,405][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
|
14 |
+
[2023-08-10 21:14:42,405][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:14:45,031][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:15:39,054][inference][INFO] - + Forward pass latency: 9.81e-02 (s)
|
17 |
+
[2023-08-10 21:15:39,055][inference][INFO] - + Forward pass throughput: 163.00 (samples/s)
|
18 |
+
[2023-08-10 21:15:39,055][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:15:45,748][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:16:11,510][inference][INFO] - + Generation pass latency: 6.44e+00 (s)
|
21 |
+
[2023-08-10 21:16:11,512][inference][INFO] - + Generation pass throughput: 497.00 (tokens/s)
|
22 |
+
[2023-08-10 21:16:11,512][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:16:11,518][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,35824.467968,0.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,35824.467968,0.683,23.4,12.9,248.0
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/llama_1gpu_inference/7/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10 19:
|
21 |
-
[2023-08-10 19:
|
22 |
-
[2023-08-10 19:
|
23 |
-
[2023-08-10 19:
|
|
|
1 |
+
[2023-08-10 21:16:12,105][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:16:12,106][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:16:12,300][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 21:16:12,301][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:16:12,301][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:16:12,403][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:16:12,429][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:16:12,430][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 21:16:29,791][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:16:29,793][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:16:37,679][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:16:38,403][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
|
13 |
+
[2023-08-10 21:16:38,403][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
|
14 |
+
[2023-08-10 21:16:38,420][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 21:17:03,773][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:18:21,714][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
|
17 |
+
[2023-08-10 21:18:21,715][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
|
18 |
+
[2023-08-10 21:18:21,715][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:18:35,343][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:19:01,225][inference][INFO] - + Generation pass latency: 1.29e+01 (s)
|
21 |
+
[2023-08-10 21:19:01,227][inference][INFO] - + Generation pass throughput: 248.00 (tokens/s)
|
22 |
+
[2023-08-10 21:19:01,228][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:19:01,234][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
|
2 |
+
0,459.997184,0.00302,331.0
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_bert_inference/0/main.log
CHANGED
@@ -1,20 +1,20 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:19:06,081][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:19:06,082][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:19:06,267][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
|
4 |
+
[2023-08-10 21:19:06,267][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:19:06,267][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:19:06,268][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:19:06,269][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:19:06,269][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
9 |
+
[2023-08-10 21:19:06,948][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:19:06,949][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:19:07,074][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
12 |
+
[2023-08-10 21:19:07,075][inference][INFO] - + Tracking forward pass peak memory
|
13 |
+
[2023-08-10 21:19:07,126][inference][INFO] - + Forward pass peak memory: 459.997184 (MB)
|
14 |
+
[2023-08-10 21:19:07,127][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
15 |
+
[2023-08-10 21:19:07,129][inference][INFO] - + Warming up the forward pass
|
16 |
+
[2023-08-10 21:19:07,160][inference][INFO] - + Tracking forward pass latency and throughput
|
17 |
+
[2023-08-10 21:19:17,276][inference][INFO] - + Forward pass latency: 3.02e-03 (s)
|
18 |
+
[2023-08-10 21:19:17,278][inference][INFO] - + Forward pass throughput: 331.00 (samples/s)
|
19 |
+
[2023-08-10 21:19:17,278][inference][INFO] - Saving inference results
|
20 |
+
[2023-08-10 21:19:17,291][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,463.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,463.54022399999997,0.00387,258.0,0.53,189.0
|
raw_results/2023-08-10_15:13:39_347001237a8ff845fc23f678107fc505361f9f13/pytorch_gpt2_inference/0/main.log
CHANGED
@@ -1,22 +1,22 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 21:19:21,855][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:19:21,856][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:19:22,037][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
|
4 |
+
[2023-08-10 21:19:22,037][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:19:22,037][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:19:22,037][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:19:22,039][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:19:22,039][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
9 |
+
[2023-08-10 21:19:22,713][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:19:22,714][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:19:22,920][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:19:22,972][inference][INFO] - + Forward pass peak memory: 463.54022399999997 (MB)
|
13 |
+
[2023-08-10 21:19:22,974][inference][INFO] - + Warming up the forward pass
|
14 |
+
[2023-08-10 21:19:23,006][inference][INFO] - + Tracking forward pass latency and throughput
|
15 |
+
[2023-08-10 21:19:33,099][inference][INFO] - + Forward pass latency: 3.87e-03 (s)
|
16 |
+
[2023-08-10 21:19:33,102][inference][INFO] - + Forward pass throughput: 258.00 (samples/s)
|
17 |
+
[2023-08-10 21:19:33,103][inference][INFO] - + Warming up the generation pass
|
18 |
+
[2023-08-10 21:19:33,692][inference][INFO] - + Tracking generation latency and throughput
|
19 |
+
[2023-08-10 21:19:43,761][inference][INFO] - + Generation pass latency: 5.30e-01 (s)
|
20 |
+
[2023-08-10 21:19:43,762][inference][INFO] - + Generation pass throughput: 189.00 (tokens/s)
|
21 |
+
[2023-08-10 21:19:43,762][inference][INFO] - Saving inference results
|
22 |
+
[2023-08-10 21:19:43,776][backend][INFO] - Cleaning backend
|