Adding regression benchmark for the transformers SHA a7da2996a00c0ea083012ac86ab70f0bc4799f33
Browse files- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/main.log +23 -23
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/main.log +20 -40
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/inference_results.csv +1 -1
- raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/main.log +22 -44
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,16195.125247999998,0.031,32.3,5.9,33.9
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/0/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:44:42,173][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:44:42,174][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:44:42,471][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:44:42,471][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:44:42,472][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:44:42,614][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:44:42,629][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:44:42,630][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 20:45:49,226][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:45:49,228][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:45:57,832][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:45:59,095][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
|
13 |
+
[2023-08-10 20:45:59,096][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
|
14 |
+
[2023-08-10 20:45:59,096][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:45:59,407][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:46:19,759][inference][INFO] - + Forward pass latency: 3.10e-02 (s)
|
17 |
+
[2023-08-10 20:46:19,760][inference][INFO] - + Forward pass throughput: 32.30 (samples/s)
|
18 |
+
[2023-08-10 20:46:19,760][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:46:26,363][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:46:49,975][inference][INFO] - + Generation pass latency: 5.90e+00 (s)
|
21 |
+
[2023-08-10 20:46:49,978][inference][INFO] - + Generation pass throughput: 33.90 (tokens/s)
|
22 |
+
[2023-08-10 20:46:49,978][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:46:49,988][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,30317.346815999997,0.0642,15.6,5.64,35.5
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/1/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:46:50,459][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:46:50,460][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:46:50,651][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:46:50,652][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:46:50,652][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:46:50,755][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:46:50,795][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:46:50,796][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 20:47:08,129][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:47:08,131][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:47:16,623][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:47:16,699][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
|
13 |
+
[2023-08-10 20:47:16,699][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
|
14 |
+
[2023-08-10 20:47:16,700][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:47:18,829][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:48:25,144][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
|
17 |
+
[2023-08-10 20:48:25,144][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
|
18 |
+
[2023-08-10 20:48:25,145][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:48:30,804][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:48:53,378][inference][INFO] - + Generation pass latency: 5.64e+00 (s)
|
21 |
+
[2023-08-10 20:48:53,380][inference][INFO] - + Generation pass throughput: 35.50 (tokens/s)
|
22 |
+
[2023-08-10 20:48:53,380][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:48:53,387][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,16381.771776,0.0313,63.9,6.15,65.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/2/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:48:53,873][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:48:53,875][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:48:54,098][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:48:54,098][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:48:54,099][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:48:54,201][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:48:54,237][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:48:54,238][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 20:49:05,505][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:49:05,507][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:49:13,939][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:49:13,979][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
|
13 |
+
[2023-08-10 20:49:13,979][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
|
14 |
+
[2023-08-10 20:49:13,979][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:49:14,457][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:49:45,157][inference][INFO] - + Forward pass latency: 3.13e-02 (s)
|
17 |
+
[2023-08-10 20:49:45,158][inference][INFO] - + Forward pass throughput: 63.90 (samples/s)
|
18 |
+
[2023-08-10 20:49:45,158][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:49:52,099][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:50:16,689][inference][INFO] - + Generation pass latency: 6.15e+00 (s)
|
21 |
+
[2023-08-10 20:50:16,690][inference][INFO] - + Generation pass throughput: 65.00 (tokens/s)
|
22 |
+
[2023-08-10 20:50:16,691][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:50:16,698][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,30778.720255999997,0.109,18.3,7.04,56.8
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/3/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:50:17,187][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:50:17,187][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:50:17,378][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:50:17,378][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:50:17,378][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:50:17,481][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:50:17,519][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:50:17,520][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 20:50:35,128][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:50:35,130][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:50:43,722][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:50:43,848][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
|
13 |
+
[2023-08-10 20:50:43,848][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
|
14 |
+
[2023-08-10 20:50:43,849][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:50:47,669][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:51:58,213][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
|
17 |
+
[2023-08-10 20:51:58,214][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
|
18 |
+
[2023-08-10 20:51:58,214][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:52:05,263][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:52:26,390][inference][INFO] - + Generation pass latency: 7.04e+00 (s)
|
21 |
+
[2023-08-10 20:52:26,392][inference][INFO] - + Generation pass throughput: 56.80 (tokens/s)
|
22 |
+
[2023-08-10 20:52:26,392][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:52:26,400][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,17000.431615999998,0.0315,127.0,6.18,129.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/4/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:52:26,910][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:52:26,911][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:52:27,096][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:52:27,097][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:52:27,097][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:52:27,201][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:52:27,240][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:52:27,241][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 20:52:40,576][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:52:40,577][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:52:49,273][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:52:49,319][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
|
13 |
+
[2023-08-10 20:52:49,320][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
|
14 |
+
[2023-08-10 20:52:49,320][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:52:50,073][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:53:37,857][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
|
17 |
+
[2023-08-10 20:53:37,858][inference][INFO] - + Forward pass throughput: 127.00 (samples/s)
|
18 |
+
[2023-08-10 20:53:37,859][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:53:45,026][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:54:09,759][inference][INFO] - + Generation pass latency: 6.18e+00 (s)
|
21 |
+
[2023-08-10 20:54:09,761][inference][INFO] - + Generation pass throughput: 129.00 (tokens/s)
|
22 |
+
[2023-08-10 20:54:09,761][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:54:09,773][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,31481.266175999997,0.187,21.4,7.93,101.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/5/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:54:10,280][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:54:10,281][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:54:10,477][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:54:10,478][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:54:10,478][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:54:10,581][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:54:10,621][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:54:10,622][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 20:54:28,092][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:54:28,094][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:54:36,607][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:54:36,811][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
|
13 |
+
[2023-08-10 20:54:36,812][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
|
14 |
+
[2023-08-10 20:54:36,816][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:54:43,735][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:55:58,389][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
|
17 |
+
[2023-08-10 20:55:58,391][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
|
18 |
+
[2023-08-10 20:55:58,392][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:56:06,495][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:56:30,297][inference][INFO] - + Generation pass latency: 7.93e+00 (s)
|
21 |
+
[2023-08-10 20:56:30,298][inference][INFO] - + Generation pass throughput: 101.00 (tokens/s)
|
22 |
+
[2023-08-10 20:56:30,298][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:56:30,305][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,19498.139648,0.0995,161.0,8.2,390.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/6/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:56:30,905][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:56:30,907][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:56:31,112][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:56:31,112][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:56:31,113][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:56:31,215][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:56:31,254][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:56:31,255][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
|
9 |
+
[2023-08-10 20:56:42,744][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:56:42,745][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:56:51,331][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:56:51,442][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
|
13 |
+
[2023-08-10 20:56:51,442][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
|
14 |
+
[2023-08-10 20:56:51,442][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:56:54,107][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 20:57:48,317][inference][INFO] - + Forward pass latency: 9.95e-02 (s)
|
17 |
+
[2023-08-10 20:57:48,318][inference][INFO] - + Forward pass throughput: 161.00 (samples/s)
|
18 |
+
[2023-08-10 20:57:48,318][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 20:57:56,767][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 20:58:21,355][inference][INFO] - + Generation pass latency: 8.20e+00 (s)
|
21 |
+
[2023-08-10 20:58:21,357][inference][INFO] - + Generation pass throughput: 390.00 (tokens/s)
|
22 |
+
[2023-08-10 20:58:21,357][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 20:58:21,365][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,35824.467968,0.683,23.4,13.0,246.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/llama_1gpu_inference/7/main.log
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10
|
|
|
1 |
+
[2023-08-10 20:58:21,965][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 20:58:21,967][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 20:58:22,166][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
4 |
+
[2023-08-10 20:58:22,166][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 20:58:22,166][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 20:58:22,269][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 20:58:22,307][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 20:58:22,308][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
|
9 |
+
[2023-08-10 20:58:40,054][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 20:58:40,056][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 20:58:48,591][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 20:58:49,301][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
|
13 |
+
[2023-08-10 20:58:49,301][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
|
14 |
+
[2023-08-10 20:58:49,318][inference][INFO] - + Warming up the forward pass
|
15 |
+
[2023-08-10 20:59:14,651][inference][INFO] - + Tracking forward pass latency and throughput
|
16 |
+
[2023-08-10 21:00:32,551][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
|
17 |
+
[2023-08-10 21:00:32,552][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
|
18 |
+
[2023-08-10 21:00:32,552][inference][INFO] - + Warming up the generation pass
|
19 |
+
[2023-08-10 21:00:46,360][inference][INFO] - + Tracking generation latency and throughput
|
20 |
+
[2023-08-10 21:01:12,308][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
|
21 |
+
[2023-08-10 21:01:12,312][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
|
22 |
+
[2023-08-10 21:01:12,312][inference][INFO] - Saving inference results
|
23 |
+
[2023-08-10 21:01:12,319][backend][INFO] - Cleaning backend
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
|
2 |
-
0,
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
|
2 |
+
0,460.140544,0.004,250.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_bert_inference/0/main.log
CHANGED
@@ -1,40 +1,20 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10 18:52:05,787][benchmark][INFO] - Configuring inference benchmark
|
22 |
-
[2023-08-10 18:52:05,789][benchmark][INFO] - + Setting seed(42)
|
23 |
-
[2023-08-10 18:52:05,978][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
|
24 |
-
[2023-08-10 18:52:05,978][backend][INFO] - Configuring pytorch backend
|
25 |
-
[2023-08-10 18:52:05,978][backend][INFO] - + Checking initial device isolation
|
26 |
-
[2023-08-10 18:52:05,978][backend][INFO] - + Checking contineous device isolation
|
27 |
-
[2023-08-10 18:52:05,981][pytorch][INFO] - + Disabling gradients
|
28 |
-
[2023-08-10 18:52:05,982][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
29 |
-
[2023-08-10 18:52:06,629][pytorch][INFO] - + Turning on eval mode
|
30 |
-
[2023-08-10 18:52:06,630][inference][INFO] - Running inference benchmark
|
31 |
-
[2023-08-10 18:52:06,752][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
32 |
-
[2023-08-10 18:52:06,753][inference][INFO] - + Tracking forward pass peak memory
|
33 |
-
[2023-08-10 18:52:06,807][inference][INFO] - + Forward pass peak memory: 459.93984 (MB)
|
34 |
-
[2023-08-10 18:52:06,809][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
35 |
-
[2023-08-10 18:52:06,810][inference][INFO] - + Warming up the forward pass
|
36 |
-
[2023-08-10 18:52:06,847][inference][INFO] - + Tracking forward pass latency and throughput
|
37 |
-
[2023-08-10 18:52:16,958][inference][INFO] - + Forward pass latency: 3.19e-03 (s)
|
38 |
-
[2023-08-10 18:52:16,961][inference][INFO] - + Forward pass throughput: 313.00 (samples/s)
|
39 |
-
[2023-08-10 18:52:16,961][inference][INFO] - Saving inference results
|
40 |
-
[2023-08-10 18:52:16,975][backend][INFO] - Cleaning backend
|
|
|
1 |
+
[2023-08-10 21:01:17,268][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:01:17,269][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:01:17,458][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
|
4 |
+
[2023-08-10 21:01:17,458][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:01:17,458][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:01:17,458][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:01:17,460][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:01:17,460][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
9 |
+
[2023-08-10 21:01:18,085][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:01:18,086][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:01:18,223][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
12 |
+
[2023-08-10 21:01:18,225][inference][INFO] - + Tracking forward pass peak memory
|
13 |
+
[2023-08-10 21:01:18,278][inference][INFO] - + Forward pass peak memory: 460.140544 (MB)
|
14 |
+
[2023-08-10 21:01:18,279][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
|
15 |
+
[2023-08-10 21:01:18,281][inference][INFO] - + Warming up the forward pass
|
16 |
+
[2023-08-10 21:01:18,323][inference][INFO] - + Tracking forward pass latency and throughput
|
17 |
+
[2023-08-10 21:01:28,421][inference][INFO] - + Forward pass latency: 4.00e-03 (s)
|
18 |
+
[2023-08-10 21:01:28,423][inference][INFO] - + Forward pass throughput: 250.00 (samples/s)
|
19 |
+
[2023-08-10 21:01:28,423][inference][INFO] - Saving inference results
|
20 |
+
[2023-08-10 21:01:28,437][backend][INFO] - Cleaning backend
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/inference_results.csv
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
-
0,463.
|
|
|
1 |
,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
|
2 |
+
0,463.736832,0.00399,251.0,0.534,187.0
|
raw_results/2023-08-10_15:07:32_a7da2996a00c0ea083012ac86ab70f0bc4799f33/pytorch_gpt2_inference/0/main.log
CHANGED
@@ -1,44 +1,22 @@
|
|
1 |
-
[2023-08-10
|
2 |
-
[2023-08-10
|
3 |
-
[2023-08-10
|
4 |
-
[2023-08-10
|
5 |
-
[2023-08-10
|
6 |
-
[2023-08-10
|
7 |
-
[2023-08-10
|
8 |
-
[2023-08-10
|
9 |
-
[2023-08-10
|
10 |
-
[2023-08-10
|
11 |
-
[2023-08-10
|
12 |
-
[2023-08-10
|
13 |
-
[2023-08-10
|
14 |
-
[2023-08-10
|
15 |
-
[2023-08-10
|
16 |
-
[2023-08-10
|
17 |
-
[2023-08-10
|
18 |
-
[2023-08-10
|
19 |
-
[2023-08-10
|
20 |
-
[2023-08-10
|
21 |
-
[2023-08-10
|
22 |
-
[2023-08-10
|
23 |
-
[2023-08-10 18:52:21,674][benchmark][INFO] - Configuring inference benchmark
|
24 |
-
[2023-08-10 18:52:21,677][benchmark][INFO] - + Setting seed(42)
|
25 |
-
[2023-08-10 18:52:21,861][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
|
26 |
-
[2023-08-10 18:52:21,861][backend][INFO] - Configuring pytorch backend
|
27 |
-
[2023-08-10 18:52:21,861][backend][INFO] - + Checking initial device isolation
|
28 |
-
[2023-08-10 18:52:21,862][backend][INFO] - + Checking contineous device isolation
|
29 |
-
[2023-08-10 18:52:21,863][pytorch][INFO] - + Disabling gradients
|
30 |
-
[2023-08-10 18:52:21,864][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
31 |
-
[2023-08-10 18:52:22,565][pytorch][INFO] - + Turning on eval mode
|
32 |
-
[2023-08-10 18:52:22,565][inference][INFO] - Running inference benchmark
|
33 |
-
[2023-08-10 18:52:22,881][inference][INFO] - + Tracking forward pass peak memory
|
34 |
-
[2023-08-10 18:52:22,934][inference][INFO] - + Forward pass peak memory: 463.38048 (MB)
|
35 |
-
[2023-08-10 18:52:22,935][inference][INFO] - + Warming up the forward pass
|
36 |
-
[2023-08-10 18:52:22,969][inference][INFO] - + Tracking forward pass latency and throughput
|
37 |
-
[2023-08-10 18:52:33,064][inference][INFO] - + Forward pass latency: 3.90e-03 (s)
|
38 |
-
[2023-08-10 18:52:33,067][inference][INFO] - + Forward pass throughput: 256.00 (samples/s)
|
39 |
-
[2023-08-10 18:52:33,067][inference][INFO] - + Warming up the generation pass
|
40 |
-
[2023-08-10 18:52:33,612][inference][INFO] - + Tracking generation latency and throughput
|
41 |
-
[2023-08-10 18:52:43,797][inference][INFO] - + Generation pass latency: 5.09e-01 (s)
|
42 |
-
[2023-08-10 18:52:43,797][inference][INFO] - + Generation pass throughput: 196.00 (tokens/s)
|
43 |
-
[2023-08-10 18:52:43,797][inference][INFO] - Saving inference results
|
44 |
-
[2023-08-10 18:52:43,811][backend][INFO] - Cleaning backend
|
|
|
1 |
+
[2023-08-10 21:01:32,949][benchmark][INFO] - Configuring inference benchmark
|
2 |
+
[2023-08-10 21:01:32,950][benchmark][INFO] - + Setting seed(42)
|
3 |
+
[2023-08-10 21:01:33,134][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
|
4 |
+
[2023-08-10 21:01:33,135][backend][INFO] - Configuring pytorch backend
|
5 |
+
[2023-08-10 21:01:33,135][backend][INFO] - + Checking initial device isolation
|
6 |
+
[2023-08-10 21:01:33,135][backend][INFO] - + Checking contineous device isolation
|
7 |
+
[2023-08-10 21:01:33,136][pytorch][INFO] - + Disabling gradients
|
8 |
+
[2023-08-10 21:01:33,137][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
|
9 |
+
[2023-08-10 21:01:33,826][pytorch][INFO] - + Turning on eval mode
|
10 |
+
[2023-08-10 21:01:33,826][inference][INFO] - Running inference benchmark
|
11 |
+
[2023-08-10 21:01:34,042][inference][INFO] - + Tracking forward pass peak memory
|
12 |
+
[2023-08-10 21:01:34,092][inference][INFO] - + Forward pass peak memory: 463.736832 (MB)
|
13 |
+
[2023-08-10 21:01:34,093][inference][INFO] - + Warming up the forward pass
|
14 |
+
[2023-08-10 21:01:34,127][inference][INFO] - + Tracking forward pass latency and throughput
|
15 |
+
[2023-08-10 21:01:44,220][inference][INFO] - + Forward pass latency: 3.99e-03 (s)
|
16 |
+
[2023-08-10 21:01:44,223][inference][INFO] - + Forward pass throughput: 251.00 (samples/s)
|
17 |
+
[2023-08-10 21:01:44,223][inference][INFO] - + Warming up the generation pass
|
18 |
+
[2023-08-10 21:01:44,818][inference][INFO] - + Tracking generation latency and throughput
|
19 |
+
[2023-08-10 21:01:54,969][inference][INFO] - + Generation pass latency: 5.34e-01 (s)
|
20 |
+
[2023-08-10 21:01:54,970][inference][INFO] - + Generation pass throughput: 187.00 (tokens/s)
|
21 |
+
[2023-08-10 21:01:54,970][inference][INFO] - Saving inference results
|
22 |
+
[2023-08-10 21:01:54,987][backend][INFO] - Cleaning backend
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|