Upload 59 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mul_fractal_2_16_l0_128/config.json +51 -0
- mul_fractal_2_16_l0_128/sae.pt +3 -0
- mul_fractal_2_16_l0_16/config.json +51 -0
- mul_fractal_2_16_l0_16/sae.pt +3 -0
- mul_fractal_2_16_l0_256/config.json +51 -0
- mul_fractal_2_16_l0_256/sae.pt +3 -0
- mul_fractal_2_16_l0_32/config.json +51 -0
- mul_fractal_2_16_l0_32/sae.pt +3 -0
- mul_fractal_2_16_l0_64/config.json +51 -0
- mul_fractal_2_16_l0_64/sae.pt +3 -0
- mul_fractal_2_4_l0_128/config.json +51 -0
- mul_fractal_2_4_l0_128/sae.pt +3 -0
- mul_fractal_2_4_l0_16/config.json +51 -0
- mul_fractal_2_4_l0_16/sae.pt +3 -0
- mul_fractal_2_4_l0_256/config.json +51 -0
- mul_fractal_2_4_l0_256/sae.pt +3 -0
- mul_fractal_2_4_l0_32/config.json +51 -0
- mul_fractal_2_4_l0_32/sae.pt +3 -0
- mul_fractal_2_4_l0_64/config.json +51 -0
- mul_fractal_2_4_l0_64/sae.pt +3 -0
- mul_fractal_2_8_l0_128/config.json +51 -0
- mul_fractal_2_8_l0_128/sae.pt +3 -0
- mul_fractal_2_8_l0_16/config.json +51 -0
- mul_fractal_2_8_l0_16/sae.pt +3 -0
- mul_fractal_2_8_l0_256/config.json +51 -0
- mul_fractal_2_8_l0_256/sae.pt +3 -0
- mul_fractal_2_8_l0_32/config.json +51 -0
- mul_fractal_2_8_l0_32/sae.pt +3 -0
- mul_fractal_2_8_l0_64/config.json +51 -0
- mul_fractal_4_4_l0_128/config.json +51 -0
- mul_fractal_4_4_l0_128/sae.pt +3 -0
- mul_fractal_4_4_l0_16/config.json +51 -0
- mul_fractal_4_4_l0_16/sae.pt +3 -0
- mul_fractal_4_4_l0_256/config.json +51 -0
- mul_fractal_4_4_l0_256/sae.pt +3 -0
- mul_fractal_4_4_l0_32/config.json +51 -0
- mul_fractal_4_4_l0_32/sae.pt +3 -0
- mul_fractal_4_4_l0_64/config.json +51 -0
- mul_fractal_4_4_l0_64/sae.pt +3 -0
- mul_fractal_4_8_l0_128/config.json +51 -0
- mul_fractal_4_8_l0_128/sae.pt +3 -0
- mul_fractal_4_8_l0_16/config.json +51 -0
- mul_fractal_4_8_l0_16/sae.pt +3 -0
- mul_fractal_4_8_l0_256/config.json +51 -0
- mul_fractal_4_8_l0_256/sae.pt +3 -0
- mul_fractal_4_8_l0_32/config.json +51 -0
- mul_fractal_4_8_l0_32/sae.pt +3 -0
- mul_fractal_4_8_l0_64/config.json +51 -0
- mul_fractal_4_8_l0_64/sae.pt +3 -0
- topk_l0_128/config.json +51 -0
mul_fractal_2_16_l0_128/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 128,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 16,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 888362139,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 2, 'num_nkeys': 16, 'num_tokens': 888362139, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_16_l0_128/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f59b70805f5927fa1b119dee146475f1a78d7afcc9f55f06b98137d935a7aa2a
|
3 |
+
size 944139608
|
mul_fractal_2_16_l0_16/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 16,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 16,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 888362139,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 2, 'num_nkeys': 16, 'num_tokens': 888362139, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_16_l0_16/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0de8b3f8ccd77047cf6126c254aec2227f9243a40d52e5d1a2078e9b6fbf2bee
|
3 |
+
size 944139608
|
mul_fractal_2_16_l0_256/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 256,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 16,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 888362139,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 2, 'num_nkeys': 16, 'num_tokens': 888362139, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_16_l0_256/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8590bfe9f0626751106a6f5a9d84c0066671b5f01c5d7ddfda3079101d792b6
|
3 |
+
size 944139608
|
mul_fractal_2_16_l0_32/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 32,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 16,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 888362139,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 2, 'num_nkeys': 16, 'num_tokens': 888362139, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_16_l0_32/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68691d277ebc3c3b219f7ca214b44612dbf45b954a0cb61a109abbdd9976541f
|
3 |
+
size 944139608
|
mul_fractal_2_16_l0_64/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 64,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 16,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 888362139,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 2, 'num_nkeys': 16, 'num_tokens': 888362139, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_16_l0_64/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a3d5cf28d6bd75dffa482733b1a05c88b04ccc004ecee22a98f1aa6b329e025
|
3 |
+
size 944139608
|
mul_fractal_2_4_l0_128/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 128,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 8192,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 666497296,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 8192, 'num_mkeys': 2, 'num_nkeys': 4, 'num_tokens': 666497296, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_4_l0_128/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e28af90a4206ab9011daa741e24aab8afee51668422492ee88653e895e47424
|
3 |
+
size 1057434968
|
mul_fractal_2_4_l0_16/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 16,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 8192,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 666497296,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 8192, 'num_mkeys': 2, 'num_nkeys': 4, 'num_tokens': 666497296, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_4_l0_16/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0240a85d49b3d247917cc3584b82b4aab87de36405a0c518b48c5a79a00c5bd8
|
3 |
+
size 1057434968
|
mul_fractal_2_4_l0_256/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 256,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 8192,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 666497296,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 8192, 'num_mkeys': 2, 'num_nkeys': 4, 'num_tokens': 666497296, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_4_l0_256/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f60ce40dec6ad3dee6dc50c9c007d0aaf96fad98ccb7c98411fb68f1b93ccd57
|
3 |
+
size 1057434968
|
mul_fractal_2_4_l0_32/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 32,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 8192,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 666497296,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 8192, 'num_mkeys': 2, 'num_nkeys': 4, 'num_tokens': 666497296, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_4_l0_32/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a54317922936455fcec74693bc1ced860ae23610cc9560096377ae548d07a42
|
3 |
+
size 1057434968
|
mul_fractal_2_4_l0_64/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 64,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 8192,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 666497296,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 8192, 'num_mkeys': 2, 'num_nkeys': 4, 'num_tokens': 666497296, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_4_l0_64/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f3d7542536f062e7ce966d7e9dd50af164bdc0729ff2093a0f2e0f0039c0c404
|
3 |
+
size 1057434968
|
mul_fractal_2_8_l0_128/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 128,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 799634235,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 2, 'num_nkeys': 8, 'num_tokens': 799634235, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_8_l0_128/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:814fe46c16ad86196b9056ddb4f186567ffd0ccae687b1cc14cc87380c3b27ba
|
3 |
+
size 981904728
|
mul_fractal_2_8_l0_16/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 16,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 799634235,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 2, 'num_nkeys': 8, 'num_tokens': 799634235, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_8_l0_16/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eebf8b2e065e22f886d315f4dcec1671add85daac3160697a99e1196bb55a3f5
|
3 |
+
size 981904728
|
mul_fractal_2_8_l0_256/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 256,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 799634235,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 2, 'num_nkeys': 8, 'num_tokens': 799634235, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_8_l0_256/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4747708976c209bd290a5a71e66e4877c3aed6417e988d832bdf7d7c1f7e0030
|
3 |
+
size 981904728
|
mul_fractal_2_8_l0_32/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 32,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 799634235,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 2, 'num_nkeys': 8, 'num_tokens': 799634235, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_2_8_l0_32/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79abec4705739699f9e0f9ceeecc0b4bb681ba85043a452a24c664ed23aa152c
|
3 |
+
size 981904728
|
mul_fractal_2_8_l0_64/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 64,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 2,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 799634235,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 2, 'num_nkeys': 8, 'num_tokens': 799634235, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_4_l0_128/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 128,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 999238222,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 4, 'num_nkeys': 4, 'num_tokens': 999238222, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_4_l0_128/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9534a6632f9e2ea8e58aa4c782aaefe613bc1d626d4a9d75296c092e0a5c5955
|
3 |
+
size 906374488
|
mul_fractal_4_4_l0_16/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 16,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 999238222,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 4, 'num_nkeys': 4, 'num_tokens': 999238222, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_4_l0_16/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff5f22a8bd962abb1727b834c129831e6c2c694dbb72e299ec256fc87f856e39
|
3 |
+
size 906374488
|
mul_fractal_4_4_l0_256/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 256,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 999238222,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 4, 'num_nkeys': 4, 'num_tokens': 999238222, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_4_l0_256/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91f12b476bb1285b81f4e6d1e6dbc8d77b49ab316c089fa1b0c372851e5e7554
|
3 |
+
size 906374488
|
mul_fractal_4_4_l0_32/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 32,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 999238222,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 4, 'num_nkeys': 4, 'num_tokens': 999238222, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_4_l0_32/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41b9b59f97cadd527ae34183ddd6b5550d407932a2c0a7a11a3f525cf1f08138
|
3 |
+
size 906374488
|
mul_fractal_4_4_l0_64/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 64,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 4,
|
25 |
+
"num_heads": 4096,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 999238222,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 4096, 'num_mkeys': 4, 'num_nkeys': 4, 'num_tokens': 999238222, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_4_l0_64/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ffb3ae395d3f8cb51a945a470dd79b81a629a07aa2f2327f273f837ce41803fe
|
3 |
+
size 906374488
|
mul_fractal_4_8_l0_128/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 128,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 1331641354,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 4, 'num_nkeys': 8, 'num_tokens': 1331641354, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_8_l0_128/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b01988518bb330d6ad5e68964a644411fee10ac1c3eab15a4bbbea2f8b14e0f
|
3 |
+
size 830844248
|
mul_fractal_4_8_l0_16/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 16,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 1331641354,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 4, 'num_nkeys': 8, 'num_tokens': 1331641354, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_8_l0_16/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68adc681c80db186acfb3ff52c1a915ae8dc06d2a1b74f4f3ae8a22a89e0693f
|
3 |
+
size 830844248
|
mul_fractal_4_8_l0_256/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 256,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 1331641354,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 4, 'num_nkeys': 8, 'num_tokens': 1331641354, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_8_l0_256/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54785291ec38e9d7bc69a3a57999281a1305ece8ff1ba5421c3866d7bce04787
|
3 |
+
size 830844248
|
mul_fractal_4_8_l0_32/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 32,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 1331641354,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 4, 'num_nkeys': 8, 'num_tokens': 1331641354, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_8_l0_32/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15e835f4080dea081fc92c9d8c30e0fa69ea8491f98620da5f957b926ddf1100
|
3 |
+
size 830844248
|
mul_fractal_4_8_l0_64/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "mul_fractal_topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 64,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": 4,
|
24 |
+
"num_nkeys": 8,
|
25 |
+
"num_heads": 2048,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 1331641354,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_heads': 2048, 'num_mkeys': 4, 'num_nkeys': 8, 'num_tokens': 1331641354, 'sae_type': 'mul_fractal_topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|
mul_fractal_4_8_l0_64/sae.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea4015a06e64e303b5d95af31bac64cce4ff66e5d1706ea703b086981282b921
|
3 |
+
size 830844248
|
topk_l0_128/config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemma-2-2b",
|
3 |
+
"layer": 12,
|
4 |
+
"hook_point": "resid_post",
|
5 |
+
"act_size": 2304,
|
6 |
+
"sae_type": "topk",
|
7 |
+
"dict_size": 65536,
|
8 |
+
"aux_penalty": 0.03125,
|
9 |
+
"input_unit_norm": true,
|
10 |
+
"batch_norm_on_queries": false,
|
11 |
+
"affine_batch_norm": false,
|
12 |
+
"linear_heads": 0,
|
13 |
+
"topk2": 128,
|
14 |
+
"topk1": 50,
|
15 |
+
"topk2_warmup_steps_fraction": 0.0,
|
16 |
+
"start_topk2": 50,
|
17 |
+
"topk1_warmup_steps_fraction": 0.0,
|
18 |
+
"start_topk1": 50,
|
19 |
+
"topk2_aux": 512,
|
20 |
+
"cartesian_op": "mul",
|
21 |
+
"router_depth": 2,
|
22 |
+
"router_tree_width": null,
|
23 |
+
"num_mkeys": null,
|
24 |
+
"num_nkeys": null,
|
25 |
+
"num_heads": -1,
|
26 |
+
"n_batches_to_dead": 10,
|
27 |
+
"lr": 0.0008,
|
28 |
+
"bandwidth": 0.001,
|
29 |
+
"l1_coeff": 0.0018,
|
30 |
+
"num_tokens": 500000000,
|
31 |
+
"seq_len": 1024,
|
32 |
+
"model_batch_size": 64,
|
33 |
+
"num_batches_in_buffer": 5,
|
34 |
+
"max_grad_norm": 1.0,
|
35 |
+
"batch_size": 8192,
|
36 |
+
"weight_decay": 0.0,
|
37 |
+
"warmup_fraction": 0.1,
|
38 |
+
"scheduler_type": "cosine_with_min_lr",
|
39 |
+
"device": "cuda",
|
40 |
+
"dtype": "torch.float32",
|
41 |
+
"sae_dtype": "torch.float32",
|
42 |
+
"dataset_path": "HuggingFaceFW/fineweb-edu",
|
43 |
+
"wandb_project": "turbo-llama-lens",
|
44 |
+
"enable_wandb": true,
|
45 |
+
"sae_name": "sae",
|
46 |
+
"seed": 42,
|
47 |
+
"performance_log_steps": 100,
|
48 |
+
"save_checkpoint_steps": 15000000,
|
49 |
+
"wandb_run_suffix": "ex72_for_sae_bench_gemma",
|
50 |
+
"sweep_pair": "{'dict_size': 65536, 'num_tokens': 500000000, 'sae_type': 'topk', 'start_topk1': 50, 'start_topk2': 50, 'topk1': 50}"
|
51 |
+
}
|