Upload 6 files
Browse files- pile_mul_fractal_2_8/config.json +51 -0
- pile_mul_fractal_2_8/sae.pt +3 -0
- pile_mul_fractal_4_4/config.json +51 -0
- pile_mul_fractal_4_4/sae.pt +3 -0
- pile_topk_l0_50_65k/config.json +51 -0
- pile_topk_l0_50_65k/sae.pt +3 -0
pile_mul_fractal_2_8/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 50,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 799634235,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "cerebras/SlimPajama-627B",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "exp80_bench",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 2, 'num_nkeys': 8, 'num_tokens': 799634235, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
pile_mul_fractal_2_8/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5c54175b1ab9e3a9e4c40601edd77d228932af7933c15d6dd5d421fdd1ad426
|
3 |
+
size 981904728
|
pile_mul_fractal_4_4/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 50,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 999238222,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "cerebras/SlimPajama-627B",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "exp80_bench",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 4, 'num_nkeys': 4, 'num_tokens': 999238222, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
pile_mul_fractal_4_4/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1e3d3e6b52962698843f4d715c6d74de506b0fca85b1162650e0029f81c37e7
|
3 |
+
size 906374488
|
pile_topk_l0_50_65k/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 50,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": null,
|
24 |
+
"num_nkeys": null,
|
25 |
+
"num_heads": -1,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 500000000,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "cerebras/SlimPajama-627B",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "exp80_bench",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_tokens': 500000000, 'sae_type': 'topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
pile_topk_l0_50_65k/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a23fac935d5096f0b297a19fc1f885596589d33f7e50c651138aa54360f7464
|
3 |
+
size 1208495512
|