datasetId
large_stringlengths 6
116
| author
large_stringlengths 2
42
| last_modified
large_stringdate 2021-04-29 15:34:29
2025-06-08 10:13:44
| downloads
int64 0
3.97M
| likes
int64 0
7.74k
| tags
large listlengths 1
7.92k
| task_categories
large listlengths 0
48
| createdAt
large_stringdate 2022-03-02 23:29:22
2025-06-08 10:11:06
| trending_score
float64 0
40
β | card
large_stringlengths 31
1.01M
|
---|---|---|---|---|---|---|---|---|---|
anirudhb11/R1-1.5b-Par-Temp-0.7-Ans-40-16384-s-42-deg-64-path-3-n-16000-s-15800-e-15900 | anirudhb11 | 2025-06-08T03:23:50Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-08T03:23:48Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: gold_answer
dtype: string
- name: raw_answer_0
dtype: string
- name: extracted_answer_0
dtype: string
- name: num_boxed_0
dtype: int64
- name: grade_0
dtype: bool
- name: ans_token_len_0
dtype: int64
- name: finished_0
dtype: bool
- name: raw_answer_1
dtype: string
- name: extracted_answer_1
dtype: string
- name: num_boxed_1
dtype: int64
- name: grade_1
dtype: bool
- name: ans_token_len_1
dtype: int64
- name: finished_1
dtype: bool
- name: raw_answer_2
dtype: string
- name: extracted_answer_2
dtype: string
- name: num_boxed_2
dtype: int64
- name: grade_2
dtype: bool
- name: ans_token_len_2
dtype: int64
- name: finished_2
dtype: bool
- name: raw_answer_3
dtype: string
- name: extracted_answer_3
dtype: string
- name: num_boxed_3
dtype: int64
- name: grade_3
dtype: bool
- name: ans_token_len_3
dtype: int64
- name: finished_3
dtype: bool
- name: raw_answer_4
dtype: string
- name: extracted_answer_4
dtype: string
- name: num_boxed_4
dtype: int64
- name: grade_4
dtype: bool
- name: ans_token_len_4
dtype: int64
- name: finished_4
dtype: bool
- name: raw_answer_5
dtype: string
- name: extracted_answer_5
dtype: string
- name: num_boxed_5
dtype: int64
- name: grade_5
dtype: bool
- name: ans_token_len_5
dtype: int64
- name: finished_5
dtype: bool
- name: raw_answer_6
dtype: string
- name: extracted_answer_6
dtype: string
- name: num_boxed_6
dtype: int64
- name: grade_6
dtype: bool
- name: ans_token_len_6
dtype: int64
- name: finished_6
dtype: bool
- name: raw_answer_7
dtype: string
- name: extracted_answer_7
dtype: string
- name: num_boxed_7
dtype: int64
- name: grade_7
dtype: bool
- name: ans_token_len_7
dtype: int64
- name: finished_7
dtype: bool
- name: raw_answer_8
dtype: string
- name: extracted_answer_8
dtype: string
- name: num_boxed_8
dtype: int64
- name: grade_8
dtype: bool
- name: ans_token_len_8
dtype: int64
- name: finished_8
dtype: bool
- name: raw_answer_9
dtype: string
- name: extracted_answer_9
dtype: string
- name: num_boxed_9
dtype: int64
- name: grade_9
dtype: bool
- name: ans_token_len_9
dtype: int64
- name: finished_9
dtype: bool
- name: raw_answer_10
dtype: string
- name: extracted_answer_10
dtype: string
- name: num_boxed_10
dtype: int64
- name: grade_10
dtype: bool
- name: ans_token_len_10
dtype: int64
- name: finished_10
dtype: bool
- name: raw_answer_11
dtype: string
- name: extracted_answer_11
dtype: string
- name: num_boxed_11
dtype: int64
- name: grade_11
dtype: bool
- name: ans_token_len_11
dtype: int64
- name: finished_11
dtype: bool
- name: raw_answer_12
dtype: string
- name: extracted_answer_12
dtype: string
- name: num_boxed_12
dtype: int64
- name: grade_12
dtype: bool
- name: ans_token_len_12
dtype: int64
- name: finished_12
dtype: bool
- name: raw_answer_13
dtype: string
- name: extracted_answer_13
dtype: string
- name: num_boxed_13
dtype: int64
- name: grade_13
dtype: bool
- name: ans_token_len_13
dtype: int64
- name: finished_13
dtype: bool
- name: raw_answer_14
dtype: string
- name: extracted_answer_14
dtype: string
- name: num_boxed_14
dtype: int64
- name: grade_14
dtype: bool
- name: ans_token_len_14
dtype: int64
- name: finished_14
dtype: bool
- name: raw_answer_15
dtype: string
- name: extracted_answer_15
dtype: string
- name: num_boxed_15
dtype: int64
- name: grade_15
dtype: bool
- name: ans_token_len_15
dtype: int64
- name: finished_15
dtype: bool
- name: raw_answer_16
dtype: string
- name: extracted_answer_16
dtype: string
- name: num_boxed_16
dtype: int64
- name: grade_16
dtype: bool
- name: ans_token_len_16
dtype: int64
- name: finished_16
dtype: bool
- name: raw_answer_17
dtype: string
- name: extracted_answer_17
dtype: string
- name: num_boxed_17
dtype: int64
- name: grade_17
dtype: bool
- name: ans_token_len_17
dtype: int64
- name: finished_17
dtype: bool
- name: raw_answer_18
dtype: string
- name: extracted_answer_18
dtype: string
- name: num_boxed_18
dtype: int64
- name: grade_18
dtype: bool
- name: ans_token_len_18
dtype: int64
- name: finished_18
dtype: bool
- name: raw_answer_19
dtype: string
- name: extracted_answer_19
dtype: string
- name: num_boxed_19
dtype: int64
- name: grade_19
dtype: bool
- name: ans_token_len_19
dtype: int64
- name: finished_19
dtype: bool
- name: raw_answer_20
dtype: string
- name: extracted_answer_20
dtype: string
- name: num_boxed_20
dtype: int64
- name: grade_20
dtype: bool
- name: ans_token_len_20
dtype: int64
- name: finished_20
dtype: bool
- name: raw_answer_21
dtype: string
- name: extracted_answer_21
dtype: string
- name: num_boxed_21
dtype: int64
- name: grade_21
dtype: bool
- name: ans_token_len_21
dtype: int64
- name: finished_21
dtype: bool
- name: raw_answer_22
dtype: string
- name: extracted_answer_22
dtype: string
- name: num_boxed_22
dtype: int64
- name: grade_22
dtype: bool
- name: ans_token_len_22
dtype: int64
- name: finished_22
dtype: bool
- name: raw_answer_23
dtype: string
- name: extracted_answer_23
dtype: string
- name: num_boxed_23
dtype: int64
- name: grade_23
dtype: bool
- name: ans_token_len_23
dtype: int64
- name: finished_23
dtype: bool
- name: raw_answer_24
dtype: string
- name: extracted_answer_24
dtype: string
- name: num_boxed_24
dtype: int64
- name: grade_24
dtype: bool
- name: ans_token_len_24
dtype: int64
- name: finished_24
dtype: bool
- name: raw_answer_25
dtype: string
- name: extracted_answer_25
dtype: string
- name: num_boxed_25
dtype: int64
- name: grade_25
dtype: bool
- name: ans_token_len_25
dtype: int64
- name: finished_25
dtype: bool
- name: raw_answer_26
dtype: string
- name: extracted_answer_26
dtype: string
- name: num_boxed_26
dtype: int64
- name: grade_26
dtype: bool
- name: ans_token_len_26
dtype: int64
- name: finished_26
dtype: bool
- name: raw_answer_27
dtype: string
- name: extracted_answer_27
dtype: string
- name: num_boxed_27
dtype: int64
- name: grade_27
dtype: bool
- name: ans_token_len_27
dtype: int64
- name: finished_27
dtype: bool
- name: raw_answer_28
dtype: string
- name: extracted_answer_28
dtype: string
- name: num_boxed_28
dtype: int64
- name: grade_28
dtype: bool
- name: ans_token_len_28
dtype: int64
- name: finished_28
dtype: bool
- name: raw_answer_29
dtype: string
- name: extracted_answer_29
dtype: string
- name: num_boxed_29
dtype: int64
- name: grade_29
dtype: bool
- name: ans_token_len_29
dtype: int64
- name: finished_29
dtype: bool
- name: raw_answer_30
dtype: string
- name: extracted_answer_30
dtype: string
- name: num_boxed_30
dtype: int64
- name: grade_30
dtype: bool
- name: ans_token_len_30
dtype: int64
- name: finished_30
dtype: bool
- name: raw_answer_31
dtype: string
- name: extracted_answer_31
dtype: string
- name: num_boxed_31
dtype: int64
- name: grade_31
dtype: bool
- name: ans_token_len_31
dtype: int64
- name: finished_31
dtype: bool
- name: raw_answer_32
dtype: string
- name: extracted_answer_32
dtype: string
- name: num_boxed_32
dtype: int64
- name: grade_32
dtype: bool
- name: ans_token_len_32
dtype: int64
- name: finished_32
dtype: bool
- name: raw_answer_33
dtype: string
- name: extracted_answer_33
dtype: string
- name: num_boxed_33
dtype: int64
- name: grade_33
dtype: bool
- name: ans_token_len_33
dtype: int64
- name: finished_33
dtype: bool
- name: raw_answer_34
dtype: string
- name: extracted_answer_34
dtype: string
- name: num_boxed_34
dtype: int64
- name: grade_34
dtype: bool
- name: ans_token_len_34
dtype: int64
- name: finished_34
dtype: bool
- name: raw_answer_35
dtype: string
- name: extracted_answer_35
dtype: string
- name: num_boxed_35
dtype: int64
- name: grade_35
dtype: bool
- name: ans_token_len_35
dtype: int64
- name: finished_35
dtype: bool
- name: raw_answer_36
dtype: string
- name: extracted_answer_36
dtype: string
- name: num_boxed_36
dtype: int64
- name: grade_36
dtype: bool
- name: ans_token_len_36
dtype: int64
- name: finished_36
dtype: bool
- name: raw_answer_37
dtype: string
- name: extracted_answer_37
dtype: string
- name: num_boxed_37
dtype: int64
- name: grade_37
dtype: bool
- name: ans_token_len_37
dtype: int64
- name: finished_37
dtype: bool
- name: raw_answer_38
dtype: string
- name: extracted_answer_38
dtype: string
- name: num_boxed_38
dtype: int64
- name: grade_38
dtype: bool
- name: ans_token_len_38
dtype: int64
- name: finished_38
dtype: bool
- name: raw_answer_39
dtype: string
- name: extracted_answer_39
dtype: string
- name: num_boxed_39
dtype: int64
- name: grade_39
dtype: bool
- name: ans_token_len_39
dtype: int64
- name: finished_39
dtype: bool
splits:
- name: train
num_bytes: 79332230
num_examples: 100
download_size: 17730887
dataset_size: 79332230
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
anirudhb11/R1-1.5b-Par-Temp-0.7-Ans-40-16384-s-42-deg-32-path-3-n-8000-s-100-e-200 | anirudhb11 | 2025-06-08T03:11:46Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-08T03:11:43Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: gold_answer
dtype: string
- name: raw_answer_0
dtype: string
- name: extracted_answer_0
dtype: string
- name: num_boxed_0
dtype: int64
- name: grade_0
dtype: bool
- name: ans_token_len_0
dtype: int64
- name: finished_0
dtype: bool
- name: raw_answer_1
dtype: string
- name: extracted_answer_1
dtype: string
- name: num_boxed_1
dtype: int64
- name: grade_1
dtype: bool
- name: ans_token_len_1
dtype: int64
- name: finished_1
dtype: bool
- name: raw_answer_2
dtype: string
- name: extracted_answer_2
dtype: string
- name: num_boxed_2
dtype: int64
- name: grade_2
dtype: bool
- name: ans_token_len_2
dtype: int64
- name: finished_2
dtype: bool
- name: raw_answer_3
dtype: string
- name: extracted_answer_3
dtype: string
- name: num_boxed_3
dtype: int64
- name: grade_3
dtype: bool
- name: ans_token_len_3
dtype: int64
- name: finished_3
dtype: bool
- name: raw_answer_4
dtype: string
- name: extracted_answer_4
dtype: string
- name: num_boxed_4
dtype: int64
- name: grade_4
dtype: bool
- name: ans_token_len_4
dtype: int64
- name: finished_4
dtype: bool
- name: raw_answer_5
dtype: string
- name: extracted_answer_5
dtype: string
- name: num_boxed_5
dtype: int64
- name: grade_5
dtype: bool
- name: ans_token_len_5
dtype: int64
- name: finished_5
dtype: bool
- name: raw_answer_6
dtype: string
- name: extracted_answer_6
dtype: string
- name: num_boxed_6
dtype: int64
- name: grade_6
dtype: bool
- name: ans_token_len_6
dtype: int64
- name: finished_6
dtype: bool
- name: raw_answer_7
dtype: string
- name: extracted_answer_7
dtype: string
- name: num_boxed_7
dtype: int64
- name: grade_7
dtype: bool
- name: ans_token_len_7
dtype: int64
- name: finished_7
dtype: bool
- name: raw_answer_8
dtype: string
- name: extracted_answer_8
dtype: string
- name: num_boxed_8
dtype: int64
- name: grade_8
dtype: bool
- name: ans_token_len_8
dtype: int64
- name: finished_8
dtype: bool
- name: raw_answer_9
dtype: string
- name: extracted_answer_9
dtype: string
- name: num_boxed_9
dtype: int64
- name: grade_9
dtype: bool
- name: ans_token_len_9
dtype: int64
- name: finished_9
dtype: bool
- name: raw_answer_10
dtype: string
- name: extracted_answer_10
dtype: string
- name: num_boxed_10
dtype: int64
- name: grade_10
dtype: bool
- name: ans_token_len_10
dtype: int64
- name: finished_10
dtype: bool
- name: raw_answer_11
dtype: string
- name: extracted_answer_11
dtype: string
- name: num_boxed_11
dtype: int64
- name: grade_11
dtype: bool
- name: ans_token_len_11
dtype: int64
- name: finished_11
dtype: bool
- name: raw_answer_12
dtype: string
- name: extracted_answer_12
dtype: string
- name: num_boxed_12
dtype: int64
- name: grade_12
dtype: bool
- name: ans_token_len_12
dtype: int64
- name: finished_12
dtype: bool
- name: raw_answer_13
dtype: string
- name: extracted_answer_13
dtype: string
- name: num_boxed_13
dtype: int64
- name: grade_13
dtype: bool
- name: ans_token_len_13
dtype: int64
- name: finished_13
dtype: bool
- name: raw_answer_14
dtype: string
- name: extracted_answer_14
dtype: string
- name: num_boxed_14
dtype: int64
- name: grade_14
dtype: bool
- name: ans_token_len_14
dtype: int64
- name: finished_14
dtype: bool
- name: raw_answer_15
dtype: string
- name: extracted_answer_15
dtype: string
- name: num_boxed_15
dtype: int64
- name: grade_15
dtype: bool
- name: ans_token_len_15
dtype: int64
- name: finished_15
dtype: bool
- name: raw_answer_16
dtype: string
- name: extracted_answer_16
dtype: string
- name: num_boxed_16
dtype: int64
- name: grade_16
dtype: bool
- name: ans_token_len_16
dtype: int64
- name: finished_16
dtype: bool
- name: raw_answer_17
dtype: string
- name: extracted_answer_17
dtype: string
- name: num_boxed_17
dtype: int64
- name: grade_17
dtype: bool
- name: ans_token_len_17
dtype: int64
- name: finished_17
dtype: bool
- name: raw_answer_18
dtype: string
- name: extracted_answer_18
dtype: string
- name: num_boxed_18
dtype: int64
- name: grade_18
dtype: bool
- name: ans_token_len_18
dtype: int64
- name: finished_18
dtype: bool
- name: raw_answer_19
dtype: string
- name: extracted_answer_19
dtype: string
- name: num_boxed_19
dtype: int64
- name: grade_19
dtype: bool
- name: ans_token_len_19
dtype: int64
- name: finished_19
dtype: bool
- name: raw_answer_20
dtype: string
- name: extracted_answer_20
dtype: string
- name: num_boxed_20
dtype: int64
- name: grade_20
dtype: bool
- name: ans_token_len_20
dtype: int64
- name: finished_20
dtype: bool
- name: raw_answer_21
dtype: string
- name: extracted_answer_21
dtype: string
- name: num_boxed_21
dtype: int64
- name: grade_21
dtype: bool
- name: ans_token_len_21
dtype: int64
- name: finished_21
dtype: bool
- name: raw_answer_22
dtype: string
- name: extracted_answer_22
dtype: string
- name: num_boxed_22
dtype: int64
- name: grade_22
dtype: bool
- name: ans_token_len_22
dtype: int64
- name: finished_22
dtype: bool
- name: raw_answer_23
dtype: string
- name: extracted_answer_23
dtype: string
- name: num_boxed_23
dtype: int64
- name: grade_23
dtype: bool
- name: ans_token_len_23
dtype: int64
- name: finished_23
dtype: bool
- name: raw_answer_24
dtype: string
- name: extracted_answer_24
dtype: string
- name: num_boxed_24
dtype: int64
- name: grade_24
dtype: bool
- name: ans_token_len_24
dtype: int64
- name: finished_24
dtype: bool
- name: raw_answer_25
dtype: string
- name: extracted_answer_25
dtype: string
- name: num_boxed_25
dtype: int64
- name: grade_25
dtype: bool
- name: ans_token_len_25
dtype: int64
- name: finished_25
dtype: bool
- name: raw_answer_26
dtype: string
- name: extracted_answer_26
dtype: string
- name: num_boxed_26
dtype: int64
- name: grade_26
dtype: bool
- name: ans_token_len_26
dtype: int64
- name: finished_26
dtype: bool
- name: raw_answer_27
dtype: string
- name: extracted_answer_27
dtype: string
- name: num_boxed_27
dtype: int64
- name: grade_27
dtype: bool
- name: ans_token_len_27
dtype: int64
- name: finished_27
dtype: bool
- name: raw_answer_28
dtype: string
- name: extracted_answer_28
dtype: string
- name: num_boxed_28
dtype: int64
- name: grade_28
dtype: bool
- name: ans_token_len_28
dtype: int64
- name: finished_28
dtype: bool
- name: raw_answer_29
dtype: string
- name: extracted_answer_29
dtype: string
- name: num_boxed_29
dtype: int64
- name: grade_29
dtype: bool
- name: ans_token_len_29
dtype: int64
- name: finished_29
dtype: bool
- name: raw_answer_30
dtype: string
- name: extracted_answer_30
dtype: string
- name: num_boxed_30
dtype: int64
- name: grade_30
dtype: bool
- name: ans_token_len_30
dtype: int64
- name: finished_30
dtype: bool
- name: raw_answer_31
dtype: string
- name: extracted_answer_31
dtype: string
- name: num_boxed_31
dtype: int64
- name: grade_31
dtype: bool
- name: ans_token_len_31
dtype: int64
- name: finished_31
dtype: bool
- name: raw_answer_32
dtype: string
- name: extracted_answer_32
dtype: string
- name: num_boxed_32
dtype: int64
- name: grade_32
dtype: bool
- name: ans_token_len_32
dtype: int64
- name: finished_32
dtype: bool
- name: raw_answer_33
dtype: string
- name: extracted_answer_33
dtype: string
- name: num_boxed_33
dtype: int64
- name: grade_33
dtype: bool
- name: ans_token_len_33
dtype: int64
- name: finished_33
dtype: bool
- name: raw_answer_34
dtype: string
- name: extracted_answer_34
dtype: string
- name: num_boxed_34
dtype: int64
- name: grade_34
dtype: bool
- name: ans_token_len_34
dtype: int64
- name: finished_34
dtype: bool
- name: raw_answer_35
dtype: string
- name: extracted_answer_35
dtype: string
- name: num_boxed_35
dtype: int64
- name: grade_35
dtype: bool
- name: ans_token_len_35
dtype: int64
- name: finished_35
dtype: bool
- name: raw_answer_36
dtype: string
- name: extracted_answer_36
dtype: string
- name: num_boxed_36
dtype: int64
- name: grade_36
dtype: bool
- name: ans_token_len_36
dtype: int64
- name: finished_36
dtype: bool
- name: raw_answer_37
dtype: string
- name: extracted_answer_37
dtype: string
- name: num_boxed_37
dtype: int64
- name: grade_37
dtype: bool
- name: ans_token_len_37
dtype: int64
- name: finished_37
dtype: bool
- name: raw_answer_38
dtype: string
- name: extracted_answer_38
dtype: string
- name: num_boxed_38
dtype: int64
- name: grade_38
dtype: bool
- name: ans_token_len_38
dtype: int64
- name: finished_38
dtype: bool
- name: raw_answer_39
dtype: string
- name: extracted_answer_39
dtype: string
- name: num_boxed_39
dtype: int64
- name: grade_39
dtype: bool
- name: ans_token_len_39
dtype: int64
- name: finished_39
dtype: bool
splits:
- name: train
num_bytes: 69133673
num_examples: 100
download_size: 18023029
dataset_size: 69133673
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LocalResearchGroup/split-avelina-python-edu | LocalResearchGroup | 2025-06-08T01:51:23Z | 97 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-04-12T05:34:36Z | null | ---
dataset_info:
- config_name: 100k
features:
- name: blob_id
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: length_bytes
dtype: int64
- name: score
dtype: float64
- name: int_score
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 158215278.81484368
num_examples: 90000
- name: test
num_bytes: 17579475.42387152
num_examples: 10000
download_size: 82802877
dataset_size: 175794754.2387152
- config_name: 10k
features:
- name: blob_id
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: length_bytes
dtype: int64
- name: score
dtype: float64
- name: int_score
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 15821527.881484367
num_examples: 9000
- name: test
num_bytes: 1757947.542387152
num_examples: 1000
download_size: 8519514
dataset_size: 17579475.423871517
- config_name: 1M
features:
- name: blob_id
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: length_bytes
dtype: int64
- name: score
dtype: float64
- name: int_score
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 1582152788.1484368
num_examples: 900000
- name: test
num_bytes: 175794754.2387152
num_examples: 100000
download_size: 826347573
dataset_size: 1757947542.387152
- config_name: 1k
features:
- name: blob_id
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: length_bytes
dtype: int64
- name: score
dtype: float64
- name: int_score
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 1582152.7881484367
num_examples: 900
- name: test
num_bytes: 175794.7542387152
num_examples: 100
download_size: 830939
dataset_size: 1757947.5423871519
- config_name: full
features:
- name: blob_id
dtype: string
- name: repo_name
dtype: string
- name: path
dtype: string
- name: length_bytes
dtype: int64
- name: score
dtype: float64
- name: int_score
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 12148475802.315737
num_examples: 6910602
- name: test
num_bytes: 1349831230.6842628
num_examples: 767845
download_size: 6343241345
dataset_size: 13498307033.0
configs:
- config_name: 100k
data_files:
- split: train
path: 100k/train-*
- split: test
path: 100k/test-*
- config_name: 10k
data_files:
- split: train
path: 10k/train-*
- split: test
path: 10k/test-*
- config_name: 1M
data_files:
- split: train
path: 1M/train-*
- split: test
path: 1M/test-*
- config_name: 1k
data_files:
- split: train
path: 1k/train-*
- split: test
path: 1k/test-*
- config_name: full
data_files:
- split: train
path: full/train-*
- split: test
path: full/test-*
---
|
CodCodingCode/clinical-conversations-V1.2 | CodCodingCode | 2025-06-08T00:55:28Z | 0 | 0 | [
"region:us"
] | [] | 2025-06-08T00:55:20Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 111716073
num_examples: 17428
download_size: 34264257
dataset_size: 111716073
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
louisbrulenaudet/code-justice-administrative | louisbrulenaudet | 2025-06-08T00:43:10Z | 344 | 0 | [
"task_categories:text-generation",
"task_categories:table-question-answering",
"task_categories:summarization",
"task_categories:text-retrieval",
"task_categories:question-answering",
"task_categories:text-classification",
"multilinguality:monolingual",
"source_datasets:original",
"language:fr",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"doi:10.57967/hf/1469",
"region:us",
"finetuning",
"legal",
"french law",
"droit franΓ§ais",
"Code de justice administrative"
] | [
"text-generation",
"table-question-answering",
"summarization",
"text-retrieval",
"question-answering",
"text-classification"
] | 2023-12-12T21:26:00Z | null | ---
license: apache-2.0
language:
- fr
multilinguality:
- monolingual
tags:
- finetuning
- legal
- french law
- droit franΓ§ais
- Code de justice administrative
source_datasets:
- original
pretty_name: Code de justice administrative
task_categories:
- text-generation
- table-question-answering
- summarization
- text-retrieval
- question-answering
- text-classification
size_categories:
- 1K<n<10K
---
# Code de justice administrative, non-instruct (2025-06-07)
The objective of this project is to provide researchers, professionals and law students with simplified, up-to-date access to all French legal texts, enriched with a wealth of data to facilitate their integration into Community and European projects.
Normally, the data is refreshed daily on all legal codes, and aims to simplify the production of training sets and labeling pipelines for the development of free, open-source language models based on open data accessible to all.
## Concurrent reading of the LegalKit
[<img src="https://raw.githubusercontent.com/louisbrulenaudet/ragoon/main/assets/badge.svg" alt="Built with RAGoon" width="200" height="32"/>](https://github.com/louisbrulenaudet/ragoon)
To use all the legal data published on LegalKit, you can use RAGoon:
```bash
pip3 install ragoon
```
Then, you can load multiple datasets using this code snippet:
```python
# -*- coding: utf-8 -*-
from ragoon import load_datasets
req = [
"louisbrulenaudet/code-artisanat",
"louisbrulenaudet/code-action-sociale-familles",
# ...
]
datasets_list = load_datasets(
req=req,
streaming=False
)
dataset = datasets.concatenate_datasets(
datasets_list
)
```
### Data Structure for Article Information
This section provides a detailed overview of the elements contained within the `item` dictionary. Each key represents a specific attribute of the legal article, with its associated value providing detailed information.
1. **Basic Information**
- `ref` (string): **Reference** - A reference to the article, combining the title_main and the article `number` (e.g., "Code GΓ©nΓ©ral des ImpΓ΄ts, art. 123").
- `texte` (string): **Text Content** - The textual content of the article.
- `dateDebut` (string): **Start Date** - The date when the article came into effect.
- `dateFin` (string): **End Date** - The date when the article was terminated or superseded.
- `num` (string): **Article Number** - The number assigned to the article.
- `id` (string): **Article ID** - Unique identifier for the article.
- `cid` (string): **Chronical ID** - Chronical identifier for the article.
- `type` (string): **Type** - The type or classification of the document (e.g., "AUTONOME").
- `etat` (string): **Legal Status** - The current legal status of the article (e.g., "MODIFIE_MORT_NE").
2. **Content and Notes**
- `nota` (string): **Notes** - Additional notes or remarks associated with the article.
- `version_article` (string): **Article Version** - The version number of the article.
- `ordre` (integer): **Order Number** - A numerical value used to sort articles within their parent section.
3. **Additional Metadata**
- `conditionDiffere` (string): **Deferred Condition** - Specific conditions related to collective agreements.
- `infosComplementaires` (string): **Additional Information** - Extra information pertinent to the article.
- `surtitre` (string): **Subtitle** - A subtitle or additional title information related to collective agreements.
- `nature` (string): **Nature** - The nature or category of the document (e.g., "Article").
- `texteHtml` (string): **HTML Content** - The article's content in HTML format.
4. **Versioning and Extensions**
- `dateFinExtension` (string): **End Date of Extension** - The end date if the article has an extension.
- `versionPrecedente` (string): **Previous Version** - Identifier for the previous version of the article.
- `refInjection` (string): **Injection Reference** - Technical reference to identify the date of injection.
- `idTexte` (string): **Text ID** - Identifier for the legal text to which the article belongs.
- `idTechInjection` (string): **Technical Injection ID** - Technical identifier for the injected element.
5. **Origin and Relationships**
- `origine` (string): **Origin** - The origin of the document (e.g., "LEGI").
- `dateDebutExtension` (string): **Start Date of Extension** - The start date if the article has an extension.
- `idEliAlias` (string): **ELI Alias** - Alias for the European Legislation Identifier (ELI).
- `cidTexte` (string): **Text Chronical ID** - Chronical identifier of the text.
6. **Hierarchical Relationships**
- `sectionParentId` (string): **Parent Section ID** - Technical identifier of the parent section.
- `multipleVersions` (boolean): **Multiple Versions** - Indicates if the article has multiple versions.
- `comporteLiensSP` (boolean): **Contains Public Service Links** - Indicates if the article contains links to public services.
- `sectionParentTitre` (string): **Parent Section Title** - Title of the parent section (e.g., "I : Revenu imposable").
- `infosRestructurationBranche` (string): **Branch Restructuring Information** - Information about branch restructuring.
- `idEli` (string): **ELI ID** - European Legislation Identifier (ELI) for the article.
- `sectionParentCid` (string): **Parent Section Chronical ID** - Chronical identifier of the parent section.
7. **Additional Content and History**
- `numeroBo` (string): **Official Bulletin Number** - Number of the official bulletin where the article was published.
- `infosRestructurationBrancheHtml` (string): **Branch Restructuring Information (HTML)** - Branch restructuring information in HTML format.
- `historique` (string): **History** - Historical context or changes specific to collective agreements.
- `infosComplementairesHtml` (string): **Additional Information (HTML)** - Additional information in HTML format.
- `renvoi` (string): **Reference** - References to content within the article (e.g., "(1)").
- `fullSectionsTitre` (string): **Full Section Titles** - Concatenation of all titles in the parent chain.
- `notaHtml` (string): **Notes (HTML)** - Additional notes or remarks in HTML format.
- `inap` (string): **INAP** - A placeholder for INAP-specific information.
## Feedback
If you have any feedback, please reach out at [[email protected]](mailto:[email protected]). |
matthewchung74/fico-1_0y-5min-bars | matthewchung74 | 2025-06-07T22:53:37Z | 0 | 0 | [
"region:us"
] | [] | 2025-06-07T22:53:32Z | null | ---
dataset_info:
features:
- name: symbol
dtype: string
- name: timestamp
dtype: string
- name: open
dtype: float64
- name: high
dtype: float64
- name: low
dtype: float64
- name: close
dtype: float64
- name: volume
dtype: float64
- name: trade_count
dtype: float64
- name: vwap
dtype: float64
configs:
- config_name: default
data_files:
- split: train
path: data/fico_1_0_years_5min.csv
download_size: 1234303
dataset_size: 14413
---
# FICO 5-Minute Stock Data (1.0 Years)
This dataset contains 1.0 years of FICO stock market data downloaded from Alpaca Markets.
## Dataset Description
- **Symbol**: FICO
- **Duration**: 1.0 years
- **Timeframe**: 5-minute bars
- **Market Hours**: 9:30 AM - 4:00 PM EST only
- **Data Source**: Alpaca Markets API
- **Last Updated**: 2025-06-07
## Features
- `symbol`: Stock symbol (always "FICO")
- `timestamp`: Timestamp in Eastern Time (EST/EDT)
- `open`: Opening price for the 5-minute period
- `high`: Highest price during the 5-minute period
- `low`: Lowest price during the 5-minute period
- `close`: Closing price for the 5-minute period
- `volume`: Number of shares traded
- `trade_count`: Number of individual trades
- `vwap`: Volume Weighted Average Price
## Data Quality
- Only includes data during regular market hours (9:30 AM - 4:00 PM EST)
- Excludes weekends and holidays when markets are closed
- Approximately 14,413 records covering ~1.0 years of trading data
## Usage
```python
from datasets import load_dataset
dataset = load_dataset("matthewchung74/fico-1_0y-5min-bars")
df = dataset['train'].to_pandas()
```
## Price Statistics
- **Price Range**: $1287.99 - $2402.51
- **Average Volume**: 2,567
- **Date Range**: 2024-06-07 09:30:00-04:00 to 2025-06-06 16:00:00-04:00
## License
This dataset is provided under the MIT license. The underlying market data is sourced from Alpaca Markets.
|
openfoodfacts/product-database | openfoodfacts | 2025-06-07T18:13:46Z | 2,964 | 37 | [
"language:en",
"language:fr",
"language:de",
"language:es",
"language:it",
"language:nl",
"language:pl",
"language:pt",
"language:sv",
"language:bg",
"language:ro",
"language:fi",
"language:ru",
"language:nb",
"language:cs",
"language:th",
"language:da",
"language:hr",
"language:hu",
"language:ar",
"language:el",
"language:ja",
"language:ca",
"language:sr",
"language:sl",
"language:sk",
"language:tr",
"language:lt",
"language:zh",
"language:et",
"language:lv",
"language:xx",
"language:uk",
"language:id",
"language:he",
"language:vi",
"language:is",
"language:la",
"language:in",
"language:ko",
"language:sq",
"language:iw",
"language:ka",
"language:ms",
"language:bs",
"language:fa",
"language:bn",
"language:gl",
"language:kk",
"language:mk",
"language:nn",
"language:hi",
"language:aa",
"language:uz",
"language:so",
"language:af",
"language:eu",
"license:agpl-3.0",
"license:odbl",
"size_categories:1M<n<10M",
"region:us"
] | [] | 2024-10-21T08:44:28Z | null | ---
language:
- en
- fr
- de
- es
- it
- nl
- pl
- pt
- sv
- bg
- ro
- fi
- ru
- nb
- cs
- th
- da
- hr
- hu
- ar
- el
- ja
- ca
- sr
- sl
- sk
- tr
- lt
- zh
- et
- lv
- xx
- uk
- id
- he
- vi
- is
- la
- in
- ko
- sq
- iw
- ka
- ms
- bs
- fa
- bn
- gl
- kk
- mk
- nn
- hi
- aa
- uz
- so
- af
- eu
license:
- agpl-3.0
- odbl
size_categories:
- 1M<n<10M
pretty_name: Open Food Facts Product Database
dataset_info:
config_name: default
configs:
- config_name: default
data_files:
- split: food
path: food.parquet
- split: beauty
path: beauty.parquet
---
# Open Food Facts Database
## What is π Open Food Facts?
### A food products database
Open Food Facts is a database of food products with ingredients, allergens, nutrition facts and all the tidbits of information we can find on product labels.
### Made by everyone
Open Food Facts is a non-profit association of volunteers. 25.000+ contributors like you have added 1.7 million + products from 150 countries using our Android or iPhone app or their camera to scan barcodes and upload pictures of products and their labels.
### For everyone
Data about food is of public interest and has to be open. The complete database is published as open data and can be reused by anyone and for any use. Check-out the cool reuses or make your own!
## The Parquet Dataset
This dataset is a simpler version of the [JSONL dump](https://world.openfoodfacts.org/data) provided by the Open Food Facts organization on a daily basis. It was converted into the Parquet format for easy of use.
### Data processing
* `Debug` tags were removed.
* `Tags`tags are conserved since they contain most information,
* `Hierarchy` tags were removed
* `lc` tags were removed. It corresponds to the ["language of the interface"](https://openfoodfacts.github.io/openfoodfacts-server/reference/api-tutorials/adding-missing-products/#sending-the-right-country-and-language-parameters-based-on-the-country-your-user-is-located-in-and-the-language-the-product-is-in),
* `langs` tags are kept for each `ingredients_text` and conserved as individual columns (*for now*).
The original JSONL dump was processed using [Pyarrow](https://arrow.apache.org/docs/python/).
## Conditions for reuse
The Open Food Facts database is available under the Open Database License.
The individual contents of the database are available under the Database Contents License.
Products images are available under the Creative Commons Attribution ShareAlike licence. They may contain graphical elements subject to copyright or other rights, that may in some cases be reproduced (quotation rights or fair use).
Please read Terms and conditions of use and re-use before re-using the data.
## Tell us about your reuse
We are very interested in learning what the Open Food Facts data is used for. It is not mandatory, but we would very much appreciate it if you tell us about your re-uses so that we can share them with the Open Food Facts community. You can also fill this form to get a chance to get your app featured.
- **Homepage:** https://world.openfoodfacts.org/
- **Repository:** https://github.com/openfoodfacts
- **Point of Contact:** [email protected] |
yasminetligui/dataset_bis | yasminetligui | 2025-06-07T16:45:18Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T16:45:06Z | null | ---
dataset_info:
features:
- name: chosen
dtype: string
- name: prompt
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 313658828
num_examples: 74312
download_size: 168381053
dataset_size: 313658828
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Smxldo/wiki-mnlp-cleaned | Smxldo | 2025-06-07T16:39:53Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T16:29:23Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 269207595.8698845
num_examples: 566196
download_size: 288339320
dataset_size: 269207595.8698845
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Jiiwonn/roco2-question-dataset-train | Jiiwonn | 2025-06-07T16:33:04Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T16:18:07Z | null | ---
dataset_info:
features:
- name: image
dtype: image
- name: image_id
dtype: string
- name: caption
dtype: string
- name: cui
sequence: string
- name: questions
sequence: string
splits:
- name: train
num_bytes: 13488109032.94
num_examples: 59962
download_size: 13469969712
dataset_size: 13488109032.94
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
openfoodfacts/open-prices | openfoodfacts | 2025-06-07T16:00:18Z | 289 | 2 | [
"license:odbl",
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"price",
"food"
] | [] | 2024-11-19T15:52:56Z | null | ---
license: odbl
pretty_name: Open Prices Dataset
dataset_info:
config_name: default
configs:
- config_name: default
data_files:
- split: prices
path: prices.parquet
tags:
- price
- food
size_categories:
- 10K<n<100K
---
# Open Prices
## What is Open Prices?
[Open Prices](https://prices.openfoodfacts.org/) is a project to collect and share prices of products around the world.
It's a publicly available dataset that can be used for research, analysis, and more. Open Prices is developed and maintained by Open Food Facts.
There are currently few companies that own large databases of product prices at the barcode level.
These prices are not freely available, but sold at a high price to private actors, researchers and other organizations that can afford them.
Open Prices aims to democratize access to price data by collecting and sharing product prices under an open licence. The data is available under the [Open Database License (ODbL)](https://opendatacommons.org/licenses/odbl/1.0/), which means that it can be used for any purpose, as long as you credit Open Prices and share any modifications you make to the dataset. Images submitted as proof are licensed under the [Creative Commons Attribution-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-sa/4.0/).
## Dataset description
This dataset contains in Parquet format all price information contained in the Open Prices database. The dataset is updated daily.
Here is a description of the most important columns:
- `id`: The ID of the price in DB
- `product_code`: The barcode of the product, null if the product is a "raw" product (fruit, vegetable, etc.)
- `category_tag`: The category of the product, only present for "raw" products. We follow Open Food Facts category taxonomy for category IDs.
- `labels_tags`: The labels of the product, only present for "raw" products. We follow Open Food Facts label taxonomy for label IDs.
- `origins_tags`: The origins of the product, only present for "raw" products. We follow Open Food Facts origin taxonomy for origin IDs.
- `price`: The price of the product, with the discount if any.
- `price_is_discounted`: Whether the price is discounted or not.
- `price_without_discount`: The price of the product without discount, null if the price is not discounted.
- `price_per`: The unit for which the price is given (e.g. "KILOGRAM", "UNIT")
- `currency`: The currency of the price
- `location_osm_id`: The OpenStreetMap ID of the location where the price was recorded. We use OpenStreetMap to identify uniquely the store where the price was recorded.
- `location_osm_type`: The type of the OpenStreetMap location (e.g. "NODE", "WAY")
- `location_id`: The ID of the location in the Open Prices database
- `date`: The date when the price was recorded
- `proof_id`: The ID of the proof of the price in the Open Prices DB
- `owner`: a hash of the owner of the price, for privacy.
- `created`: The date when the price was created in the Open Prices DB
- `updated`: The date when the price was last updated in the Open Prices DB
- `proof_file_path`: The path to the proof file in the Open Prices DB
- `proof_type`: The type of the proof. Possible values are `RECEIPT`, `PRICE_TAG`, `GDPR_REQUEST`, `SHOP_IMPORT`
- `proof_date`: The date of the proof
- `proof_currency`: The currency of the proof, should be the same as the price currency
- `proof_created`: The datetime when the proof was created in the Open Prices DB
- `proof_updated`: The datetime when the proof was last updated in the Open Prices DB
- `location_osm_display_name`: The display name of the OpenStreetMap location
- `location_osm_address_city`: The city of the OpenStreetMap location
- `location_osm_address_postcode`: The postcode of the OpenStreetMap location
## How can I download images?
All images can be accessed under the `https://prices.openfoodfacts.org/img/` base URL. You just have to concatenate the `proof_file_path` column to this base URL to get the full URL of the image (ex: https://prices.openfoodfacts.org/img/0010/lqGHf3ZcVR.webp).
## Can I contribute to Open Prices?
Of course! You can contribute by adding prices, trough the [Open Prices website](https://prices.openfoodfacts.org/) or through Open Food Facts mobile app.
To participate in the technical development, you can check the [Open Prices GitHub repository](https://github.com/openfoodfacts/open-prices). |
MING-ZCH/MetaphorQA | MING-ZCH | 2025-06-07T15:43:04Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T15:40:40Z | null | ---
dataset_info:
features:
- name: images
sequence: image
- name: problem
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 79102955.0
num_examples: 984
- name: test
num_bytes: 42880954.0
num_examples: 492
download_size: 13388609
dataset_size: 121983909.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
# MetaphorQA
The True-False Question(TFQ) about image implication.
- train: 984
- test: 492 |
btsee/common_voice_21_mn | btsee | 2025-06-07T15:21:42Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T15:21:19Z | null | ---
dataset_info:
features:
- name: client_id
dtype: string
- name: path
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 22050
- name: sentence_id
dtype: string
- name: sentence
dtype: string
- name: sentence_domain
dtype: string
- name: up_votes
dtype: int64
- name: down_votes
dtype: int64
- name: age
dtype: string
- name: gender
dtype: string
- name: accents
dtype: string
- name: variant
dtype: string
- name: locale
dtype: string
- name: segment
dtype: string
- name: duration_ms
dtype: int64
splits:
- name: train
num_bytes: 85874956.53
num_examples: 2190
- name: dev
num_bytes: 81750257.488
num_examples: 1896
- name: test
num_bytes: 85444729.842
num_examples: 1934
download_size: 241137848
dataset_size: 253069943.86
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: dev
path: data/dev-*
- split: test
path: data/test-*
---
|
alucchi/Qwen3-4B_n1000_e20_oadam0.0001_b20_1_a0 | alucchi | 2025-06-07T15:18:35Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T15:18:23Z | null | ---
dataset_info:
- config_name: default
features:
- name: prompt
dtype: string
- name: generated_text
dtype: string
- name: generated_grid_rect
sequence:
sequence: int64
- name: task_solution
sequence:
sequence:
sequence: int64
- name: match
dtype: int64
splits:
- name: train
num_bytes: 56066
num_examples: 10
download_size: 14884
dataset_size: 56066
- config_name: main
features:
- name: prompt
dtype: string
- name: generated_text
dtype: string
- name: generated_grid_rect
sequence:
sequence: int64
- name: task_solution
sequence:
sequence:
sequence: int64
- name: match
dtype: int64
splits:
- name: train
num_bytes: 56066
num_examples: 10
download_size: 14884
dataset_size: 56066
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- config_name: main
data_files:
- split: train
path: main/train-*
---
|
lstepanik/aidev_dapr | lstepanik | 2025-06-07T15:06:24Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T15:06:15Z | null | ---
dataset_info:
features:
- name: text
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 200499
num_examples: 300
download_size: 6565
dataset_size: 200499
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
gxy1111/so100_pen3 | gxy1111 | 2025-06-07T15:03:00Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | 2025-06-07T15:02:32Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 30,
"total_frames": 5208,
"total_tasks": 1,
"total_videos": 60,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:30"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.eye": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
OmarIDK/merged_dataset_final | OmarIDK | 2025-06-07T14:16:18Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T14:16:12Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 160480105
num_examples: 105329
download_size: 72634339
dataset_size: 160480105
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
c0ntrolZ/eval-gpqa | c0ntrolZ | 2025-06-07T11:17:16Z | 82 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-04T12:08:42Z | null | ---
dataset_info:
features:
- name: source
dtype: string
- name: question
dtype: string
- name: choices
sequence: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 388028.4032258064
num_examples: 546
download_size: 212576
dataset_size: 388028.4032258064
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
kp7742/YALM-pretrain4-128M | kp7742 | 2025-06-07T11:10:39Z | 0 | 0 | [
"task_categories:text-generation",
"language:en",
"language:hi",
"size_categories:100M<n<1B",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"english",
"hindi",
"math",
"python",
"code"
] | [
"text-generation"
] | 2025-06-06T23:47:37Z | null | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 538335064114
num_examples: 128000000
- name: test
num_bytes: 7836804
num_examples: 2000
download_size: 301873958430
dataset_size: 538342900918
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
task_categories:
- text-generation
language:
- en
- hi
tags:
- english
- hindi
- math
- python
- code
pretty_name: YALM Pretraining Mix - 4
size_categories:
- 100M<n<1B
---
# YALM Pretraining Data - 4
The _YALM Pretraining Data - 4_ is a mix of English, Hindi, Math and Python Code taken from various sources for the Language modeling task and development of YALM(Yet Another Language Model).
Total Samples: 128M (~256B tokens at 2048 Context)
Test Split: 2k Samples
Shuffle Seed: 101
Datasets:
- English(70% - 89.60M):
- [EleutherAI/SmolLM2-135M-100B](https://huggingface.co/datasets/EleutherAI/SmolLM2-135M-100B)
- Language: English
- Sources: fineweb_edu, dclm_edu, cosmopedia_v2, etc..
- Hindi(20% - 25.60M):
- [zicsx/mC4-Hindi-Cleaned](https://huggingface.co/datasets/zicsx/mC4-Hindi-Cleaned)
- Language: Hindi
- [anirudhlakhotia/baarat-batched-hindi-pre-training](https://huggingface.co/datasets/anirudhlakhotia/baarat-batched-hindi-pre-training)
- Language: Hindi
- [HuggingFaceFW/fineweb-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-2)
- Language: Hindi
- Subset: hin_Deva
- Math(5% - 6.40M):
- [HuggingFaceTB/finemath](https://huggingface.co/datasets/HuggingFaceTB/finemath)
- Language: English
- Subset: finemath-4plus
- Code(5% - 6.40M):
- [Avelina/python-edu-cleaned](https://huggingface.co/datasets/Avelina/python-edu-cleaned)
- Language: Python |
villacu/cammt | villacu | 2025-06-07T10:26:57Z | 199 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2505.24456",
"region:us"
] | [] | 2025-05-28T13:15:36Z | null | ---
dataset_info:
features:
- name: ID
dtype: string
- name: regional
dtype: string
- name: English
dtype: string
- name: Conserved_translation
dtype: string
- name: Substituted_translation
dtype: string
- name: Category
dtype: string
- name: Preferred_translation
dtype: string
- name: image
dtype: image
splits:
- name: es_mex
num_bytes: 158368543.0
num_examples: 323
- name: bn_india
num_bytes: 94017886.0
num_examples: 286
- name: om_eth
num_bytes: 28490930.0
num_examples: 214
- name: ur_india
num_bytes: 102386298.0
num_examples: 220
- name: ig_nga
num_bytes: 14372042.0
num_examples: 200
- name: ur_pak
num_bytes: 147129846.0
num_examples: 216
- name: zh_ch
num_bytes: 91877910.0
num_examples: 308
- name: es_ecu
num_bytes: 141969979.0
num_examples: 362
- name: sw_ken
num_bytes: 31567516.0
num_examples: 271
- name: kor_sk
num_bytes: 143897056.0
num_examples: 290
- name: ru_rus
num_bytes: 56598710.0
num_examples: 200
- name: ta_india
num_bytes: 142254878.0
num_examples: 213
- name: amh_eth
num_bytes: 122937506.0
num_examples: 234
- name: jp_jap
num_bytes: 63884062.0
num_examples: 203
- name: fil_phl
num_bytes: 42171387.0
num_examples: 203
- name: ms_mys
num_bytes: 84408174.0
num_examples: 315
- name: bg_bg
num_bytes: 179103702.0
num_examples: 369
- name: es_chl
num_bytes: 98202963.0
num_examples: 234
- name: pt_brz
num_bytes: 214095076.0
num_examples: 284
- name: ar_egy
num_bytes: 106134417.0
num_examples: 203
- name: ind_ind
num_bytes: 116476184.0
num_examples: 202
- name: mr_india
num_bytes: 145040535.0
num_examples: 202
- name: es_arg
num_bytes: 142144959.0
num_examples: 265
download_size: 1952703427
dataset_size: 2467530559.0
configs:
- config_name: default
data_files:
- split: es_mex
path: data/es_mex-*
- split: bn_india
path: data/bn_india-*
- split: om_eth
path: data/om_eth-*
- split: ur_india
path: data/ur_india-*
- split: ig_nga
path: data/ig_nga-*
- split: ur_pak
path: data/ur_pak-*
- split: zh_ch
path: data/zh_ch-*
- split: es_ecu
path: data/es_ecu-*
- split: sw_ken
path: data/sw_ken-*
- split: kor_sk
path: data/kor_sk-*
- split: ru_rus
path: data/ru_rus-*
- split: ta_india
path: data/ta_india-*
- split: amh_eth
path: data/amh_eth-*
- split: jp_jap
path: data/jp_jap-*
- split: fil_phl
path: data/fil_phl-*
- split: ms_mys
path: data/ms_mys-*
- split: bg_bg
path: data/bg_bg-*
- split: es_chl
path: data/es_chl-*
- split: pt_brz
path: data/pt_brz-*
- split: ar_egy
path: data/ar_egy-*
- split: ind_ind
path: data/ind_ind-*
- split: mr_india
path: data/mr_india-*
- split: es_arg
path: data/es_arg-*
---
# CaMMT Dataset Card
<!-- Provide a quick summary of the dataset. -->
CaMMT is a human-curated benchmark dataset for evaluating multimodal machine translation systems on culturally-relevant content. The dataset contains over 5,800 image-caption triples across 19 languages and 23 regions, with parallel captions in English and regional languages, specifically designed to assess how visual context impacts translation of culturally-specific items.
```python
from datasets import load_dataset
# Load the full dataset
dataset = load_dataset("villacu/cammt")
# Load a specific split if available
dataset = load_dataset("villacu/cammt", split="ar_egy")
```
## Dataset Details
### Dataset Description
CAMMT addresses the challenge of translating cultural content by investigating whether images can serve as cultural context in multimodal translation. The dataset is built upon the CVQA (Culturally-diverse multilingual Visual Question Answering) dataset, transforming question-answer pairs into declarative caption statements. Each entry includes parallel captions in English and regional languages, with special attention to Culturally-Specific Items (CSIs) and their translation strategies.
The dataset includes both conserved translations (preserving original cultural terms) and substituted translations (using familiar equivalents) for items containing CSIs, along with native speaker preferences for translation strategies.
- **Curated by:** MBZUAI and collaborating institutions across the globe.
- **Language(s) (NLP):** 19 languages across 23 regions (Amharic, Arabic, Bengali, Bulgarian, Chinese, Filipino, Igbo, Indonesian, Japanese, Korean, Malay, Marathi, Oromo, Portuguese, Russian, Spanish (4 regional variants), Swahili, Tamil, Urdu (2 regional variants))
- **License:** Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)
### Dataset Sources
<!-- Provide the basic links for the dataset. -->
- **Paper:** [CAMMT: Benchmarking Culturally Aware Multimodal Machine Translation](https://arxiv.org/abs/2505.24456)
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
The dataset contains 5,817 main entries plus an additional 1,550 entries with conserved and substituted CSI translations. Each entry includes:
- **ID**: Unique identifier from the original CVQA dataset
- **regional**: Caption in the regional language
- **English**: Parallel caption in English
- **Conserved_translation**: English translation preserving the original CSI (if applicable)
- **Substituted_translation**: English translation using a familiar equivalent for the CSI (if applicable)
- **Category**: Classification of cultural relevance:
- `"not culturally-relevant sentence"`
- `"non-CSI"` (culturally relevant but no specific CSI)
- `"CSI- has possible translation"` (CSI with cultural equivalent)
- `"CSI-forced translation"` (CSI without direct equivalent)
- **Preferred_translation**: Native speaker preference between conserved or substituted translation (if applicable)
The dataset spans 23 regions with varying numbers of samples per region (ranging from 200 to 369 samples).
## Citation
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
```bibtex
@misc{villacueva2025cammtbenchmarkingculturallyaware,
title={CaMMT: Benchmarking Culturally Aware Multimodal Machine Translation},
author={Emilio Villa-Cueva and Sholpan Bolatzhanova and Diana Turmakhan and Kareem Elzeky and Henok Biadglign Ademtew and Alham Fikri Aji and Israel Abebe Azime and Jinheon Baek and Frederico Belcavello and Fermin Cristobal and Jan Christian Blaise Cruz and Mary Dabre and Raj Dabre and Toqeer Ehsan and Naome A Etori and Fauzan Farooqui and Jiahui Geng and Guido Ivetta and Thanmay Jayakumar and Soyeong Jeong and Zheng Wei Lim and Aishik Mandal and Sofia Martinelli and Mihail Minkov Mihaylov and Daniil Orel and Aniket Pramanick and Sukannya Purkayastha and Israfel Salazar and Haiyue Song and Tiago Timponi Torrent and Debela Desalegn Yadeta and Injy Hamed and Atnafu Lambebo Tonja and Thamar Solorio},
year={2025},
eprint={2505.24456},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2505.24456},
} |
chengzu/topviewrs | chengzu | 2025-06-07T09:32:51Z | 52 | 3 | [
"task_categories:visual-question-answering",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"arxiv:2406.02537",
"region:us",
"spatial",
"multimodal"
] | [
"visual-question-answering"
] | 2024-09-10T06:15:59Z | null | ---
license: mit
task_categories:
- visual-question-answering
language:
- en
tags:
- spatial
- multimodal
size_categories:
- 1K<n<10K
---
# Dataset Card for TOPVIEWRS
<!-- Provide a quick summary of the dataset. -->
The TOPVIEWRS (Top-View Reasoning in Space) benchmark is a multimodal benchmark intended to evaluate the spatial reasoning ability of current Vision-Language Models.
It consists of 11,384 multiple-choice questions with either realistic or semantic top-view map as visual input, across 4 perception and reasoning tasks with different levels of complexity.
For details, please refer to the [project page](https://topviewrs.github.io/) and the [paper](https://arxiv.org/pdf/2406.02537).
## Dataset Description
- **Homepage/Repository:** [https://topviewrs.github.io/](https://topviewrs.github.io/)
- **Paper:** [TOPVIEWRS: Vision-Language Models as Top-View Spatial Reasoners](https://arxiv.org/pdf/2406.02537)
- **Point of Contact:** [[email protected]](mailto:[email protected])
## Dataset Details
### Dataset Features
<!-- Provide a longer summary of what this dataset is. -->
- **Multi-Scale Top-View Maps**: Multi-scale top-view maps of single rooms and full houses add divergence in the granularity of the entities (objects or rooms) in spatial reasoning.
- **Realistic Environmental Scenarios with Rich Object Sets**: Real-world environments from indoor scenes, with 80 objects per scene on average.
- **Structured Question Framework**: Four tasks including 9 sub-tasks in total, allowing for a fine-grained evaluation and analysis of modelsβ capabilities from various perspectives and levels of granularity.
### Dataset Statistics
The TOPVIEWRS evaluation dataset comprises a total of 11,384 multiple-choice questions after human verification, with
5,539 questions associated with realistic top-view
maps, and 5,845 with semantic top-view maps.
The choices are uniformly distributed over choices A(25.5%), B (24.6%), C (24.5%) and D (25.4%).
The maps are collected from Matterport3D dataset, which includes 90 building-scale scenes with instance-level semantic and room-level region annotations in 3D meshes.
We filter these to exclude multi-floor and low-quality scenes, selecting 7 scenes with an average of 80 objects and 12 rooms each.
**Note**: *We only release part of the benchmark (2 different scenarios covering all the tasks of the benchmark) in this dataset card to avoid data contamination.
For full access to the benchmark, please get in touch with [Chengzu Li](chengzu-li.github.io) via email: [[email protected]](mailto:[email protected])*
### Uses
```
data = load_datasets(
"chengzu/topviewrs",
trust_remote_code=True,
map_type=MAP_TYPE,
task_split=TASK_SPLIT,
image_save_dir=IMAGE_SAVE_DIR
)
```
To use the dataset, you have to specify several arguments when calling `load_datasets`:
- `map_type`: should be one of `['realistic', 'semantic']`
- `task_split`: should be one of `['top_view_recognition', 'top_view_localization', 'static_spatial_reasoning', 'dynamic_spatial_reasoning']`
- `image_save_dir`: specify the directory where you would like the images to be saved
### Data Instances
For example an instance from the `top_view_recognition` task is:
```
{
'index': 0,
'scene_id': '17DRP5sb8fy',
'question': 'Which of the following objects are in the room?',
'choices': ['shelving', 'bed', 'toilet', 'seating'],
'labels': ['bed'],
'choice_type': '<OBJECT>',
'map_path': '<IMAGE_SAVE_DIR>/data/mp3d/17DRP5sb8fy/semantic/17DRP5sb8fy_0_0.png',
'question_ability': 'object_recognition'
}
```
### Data Fields
Every example has the following fields
- `idx`: an `int` feature
- `scene_id`: a `string` feature, unique id for the scene from Matterport3D
- `question`: a `string` feature
- `choices`: a sequence of `string` feature, choices for multiple-choice question
- `labels`: a sequence of `string` feature, answer for multiple-choice question. The label's position in the `choices` can be used to determine whether it is A, B, C, or D.
- `choice_type`: a `string` feature
- `map_path`: a `string` feature, the path of the input image
- `question_ability`: a `string` feature, sub-tasks for fine-grained evaluation and analysis
For `dynamic_spatial_reasoning` task, there would be one more data field:
- `reference_path`: a sequence of `list[int]` feature, the coordinate sequence of the navigation path on the top-view map.
## Citation
```
@misc{li2024topviewrs,
title={TopViewRS: Vision-Language Models as Top-View Spatial Reasoners},
author={Chengzu Li and Caiqi Zhang and Han Zhou and Nigel Collier and Anna Korhonen and Ivan VuliΔ},
year={2024},
eprint={2406.02537},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
|
daniel-dona/sparql-dataset-era-cq-2 | daniel-dona | 2025-06-07T09:29:36Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T00:38:15Z | null | ---
dataset_info:
features:
- name: qid
dtype: string
- name: nlq
dtype: string
- name: sparql
dtype: string
- name: cot
dtype: string
splits:
- name: train
num_bytes: 7509452
num_examples: 1476
download_size: 2563503
dataset_size: 7509452
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LPX55/dataset-viber-chat-generation-preference-inference-endpoints-battle | LPX55 | 2025-06-07T09:06:51Z | 0 | 0 | [
"region:us"
] | [] | 2025-06-07T09:06:50Z | null | ---
configs:
- config_name: default
data_files:
- split: train
path: '**/*.jsonl'
---
# Dataset Card for Dataset Name
<!-- Provide a quick summary of the dataset. -->
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
igorcouto/whisper-pt-telephony | igorcouto | 2025-06-07T05:29:04Z | 0 | 0 | [
"region:us"
] | [] | 2025-06-06T23:40:23Z | null | ---
dataset_info:
features:
- name: audio_path
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
splits:
- name: train
num_bytes: 203420364967.52
num_examples: 1101032
- name: validation
num_bytes: 6434577169.94
num_examples: 63054
download_size: 192438788841
dataset_size: 209854942137.46
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
infinite-dataset-hub/GamblingPatternsADHD | infinite-dataset-hub | 2025-06-07T03:00:17Z | 0 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"infinite-dataset-hub",
"synthetic"
] | [] | 2025-06-07T03:00:10Z | null | ---
license: mit
tags:
- infinite-dataset-hub
- synthetic
---
# GamblingPatternsADHD
tags: predictive, gambling behavior, ADHD diagnosis
_Note: This is an AI-generated dataset so its content may be inaccurate or false_
**Dataset Description:**
The 'GamblingPatternsADHD' dataset aims to analyze the success rates of phone and online gambling interventions for individuals diagnosed with ADHD. It includes behavioral and psychological metrics, self-reported data, and treatment outcomes to aid predictive models that can forecast the efficacy of such interventions.
**CSV Content Preview:**
```
participant_id,age,gender,diagnosis_confirmed,intervention_type,pre_intervention_gambling_frequency,post_intervention_gambling_frequency,treatment_success_label
001,35,Male,Yes,Phone Support,Daily,Monthly,Success
002,29,Female,Yes,Online Cognitive-Behavioral Therapy,Weekly,Rarely,Failure
003,42,Male,Yes,Self-help Resources,Weekly,Weekly,Success
004,27,Female,Yes,No Intervention,Daily,Daily,Failure
005,38,Male,Yes,Combination of Interventions,Daily,Never,Success
```
**Source of the data:**
The dataset was generated using the [Infinite Dataset Hub](https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub) and microsoft/Phi-3-mini-4k-instruct using the query 'phone and online gambling addiction success ADHD':
- **Dataset Generation Page**: https://huggingface.co/spaces/infinite-dataset-hub/infinite-dataset-hub?q=phone+and+online+gambling+addiction+success+ADHD&dataset=GamblingPatternsADHD&tags=predictive,+gambling+behavior,+ADHD+diagnosis
- **Model**: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
- **More Datasets**: https://huggingface.co/datasets?other=infinite-dataset-hub
|
fh1628/mixed_dataset_75_25 | fh1628 | 2025-06-07T01:39:23Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-07T01:39:14Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 80167256.11511087
num_examples: 21839
download_size: 39642659
dataset_size: 80167256.11511087
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
extralit-dev/test_import_dataset_from_hub_using_wrong_settings_with_records_False | extralit-dev | 2025-06-06T23:43:43Z | 0 | 0 | [
"size_categories:n<1K",
"library:argilla",
"region:us",
"rlfh",
"argilla",
"human-feedback"
] | [] | 2025-06-06T20:18:18Z | null | ---
size_categories: n<1K
tags:
- rlfh
- argilla
- human-feedback
---
# Dataset Card for test_import_dataset_from_hub_using_wrong_settings_with_records_False
This dataset has been created with [Argilla](https://github.com/argilla-io/argilla). As shown in the sections below, this dataset can be loaded into your Argilla server as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets).
## Using this dataset with Argilla
To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code:
```python
import argilla as rg
ds = rg.Dataset.from_hub("extralit-dev/test_import_dataset_from_hub_using_wrong_settings_with_records_False", settings="auto")
```
This will load the settings and records from the dataset repository and push them to you Argilla server for exploration and annotation.
## Using this dataset with `datasets`
To load the records of this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code:
```python
from datasets import load_dataset
ds = load_dataset("extralit-dev/test_import_dataset_from_hub_using_wrong_settings_with_records_False")
```
This will only load the records of the dataset, but not the Argilla settings.
## Dataset Structure
This dataset repo contains:
* Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `rg.Dataset.from_hub` and can be loaded independently using the `datasets` library via `load_dataset`.
* The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla.
* A dataset configuration folder conforming to the Argilla dataset format in `.argilla`.
The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**.
### Fields
The **fields** are the features or text of a dataset's records. For example, the 'text' column of a text classification dataset of the 'prompt' column of an instruction following dataset.
| Field Name | Title | Type | Required | Markdown |
| ---------- | ----- | ---- | -------- | -------- |
| text | text | text | True | False |
| image | image | image | True | |
| chat | chat | chat | True | True |
### Questions
The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking.
| Question Name | Title | Type | Required | Description | Values/Labels |
| ------------- | ----- | ---- | -------- | ----------- | ------------- |
| label | label | label_selection | True | N/A | ['positive', 'negative'] |
<!-- check length of metadata properties -->
### Data Instances
An example of a dataset instance in Argilla looks as follows:
```json
{
"_server_id": "b25841d8-09a2-4976-a076-c26d83556bdb",
"fields": {
"chat": [
{
"content": "Hello World, how are you?",
"role": "user"
}
],
"image": "http://mock.url/image",
"text": "Hello World, how are you?"
},
"id": "60cd7a60-960a-4640-ab21-c9debd0cdd6a",
"metadata": {},
"responses": {},
"status": "pending",
"suggestions": {
"label": {
"agent": null,
"score": null,
"value": "positive"
}
},
"vectors": {}
}
```
While the same record in HuggingFace `datasets` looks as follows:
```json
null
```
### Data Splits
The dataset contains a single split, which is `train`.
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation guidelines
[More Information Needed]
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] |
extralit-dev/test_import_dataset_from_hub_with_classlabel_ed13bc0c-ba47-4f38-a493-d93205781622 | extralit-dev | 2025-06-06T23:11:47Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T23:11:46Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1264
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
eliasfiz/numbers-leo-clips | eliasfiz | 2025-06-06T22:44:08Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T22:08:26Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: clipped_audio
dtype: audio
- name: source
dtype: string
splits:
- name: train
num_bytes: 104720049.0
num_examples: 40
download_size: 79611442
dataset_size: 104720049.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ai2-adapt-dev/toolu-synthetic-S2 | ai2-adapt-dev | 2025-06-06T22:42:57Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T22:42:28Z | null | ---
dataset_info:
features:
- name: id
dtype: string
- name: source
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: function_calls
dtype: string
- name: functions
dtype: string
- name: role
dtype: string
- name: n_step
dtype: string
- name: n_turn
dtype: string
- name: exec_type
dtype: string
- name: is_refusal
dtype: bool
splits:
- name: train
num_bytes: 794056764
num_examples: 265934
download_size: 184983734
dataset_size: 794056764
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
eliasfiz/numbers-amu-clips | eliasfiz | 2025-06-06T21:58:42Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T21:57:38Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: clipped_audio
dtype: audio
- name: source
dtype: string
splits:
- name: train
num_bytes: 48995527.0
num_examples: 37
download_size: 47663997
dataset_size: 48995527.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
extralit-dev/test_import_dataset_from_hub_with_classlabel_402ac12a-014c-4385-addd-080dd0f74bbc | extralit-dev | 2025-06-06T21:29:57Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T21:29:55Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1264
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mixed-modality-search/MixBench25 | mixed-modality-search | 2025-06-06T21:07:31Z | 37 | 0 | [
"task_categories:text-ranking",
"task_ids:document-retrieval",
"annotations_creators:machine-generated",
"multilinguality:monolingual",
"language:en",
"license:mit",
"modality:image",
"modality:text",
"region:us",
"retrieval",
"image",
"text",
"multimodal",
"benchmark"
] | [
"text-ranking"
] | 2025-05-24T04:53:15Z | null | ---
license: mit
pretty_name: MixBench
task_categories:
- text-ranking
task_ids:
- document-retrieval
language:
- en
multilinguality: monolingual
annotations_creators:
- machine-generated
dataset_creator: Binxu Li et al.
dataset_info:
features:
- name: query_id
dtype: string
- name: query_text
dtype: string
- name: query_image
dtype: string
- name: corpus_id
dtype: string
- name: corpus_text
dtype: string
- name: corpus_image
dtype: string
- name: score
dtype: int32
configs:
- config_name: MSCOCO
data_files:
- MSCOCO/*
- config_name: Google_WIT
data_files:
- Google_WIT/*
- config_name: VisualNews
data_files:
- VisualNews/*
- config_name: OVEN
data_files:
- OVEN/*
tags:
- retrieval
- image
- text
- multimodal
- benchmark
---
# MixBench: A Benchmark for Mixed Modality Retrieval
**MixBench** is a benchmark for evaluating retrieval across text, images, and multimodal documents. It is designed to test how well retrieval models handle queries and documents that span different modalities, such as pure text, pure images, and combined image+text inputs.
MixBench includes **four subsets**, each curated from a different data source:
- **MSCOCO**
- **Google_WIT**
- **VisualNews**
- **OVEN**
Each subset contains:
- `queries.jsonl`: each entry contains a `query_id`, `text`, and/or `image`
- `mixed_corpus.jsonl`: each entry contains a `corpus_id`, a `text` or an `image` or a multimodal document (`text` and `image`)
- `qrels.tsv`: a tab-separated list of relevant query-document pairs (`query_id`, `corpus_id`, `score=1`)
- `corpus.jsonl`: the original corpus
This benchmark supports diverse retrieval settings including unimodal-to-multimodal and cross-modal search.
---
## π Load Example
You can load a specific subset of MixBench using the `name` argument:
```python
from datasets import load_dataset
# Load the MSCOCO subset
ds_query = load_dataset("mixed-modality-search/MixBench25", name="MSCOCO", split='query')
ds_corpus = load_dataset("mixed-modality-search/MixBench25", name="MSCOCO", split='mixed_corpus')
ds_query = load_dataset("mixed-modality-search/MixBench25", name="MSCOCO", split='qrel')
# Load other subsets (corpus)
ds_gwit = load_dataset("mixed-modality-search/MixBench25", name="Google_WIT", split='mixed_corpus')
ds_news = load_dataset("mixed-modality-search/MixBench25", name="VisualNews",split='mixed_corpus')
ds_oven = load_dataset("mixed-modality-search/MixBench25", name="OVEN", split='mixed_corpus')
|
extralit-dev/test_import_dataset_from_hub_with_classlabel_e029d080-66cf-45cb-898c-aba116774937 | extralit-dev | 2025-06-06T20:57:28Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T20:57:27Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1264
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
NewstaR/CoTton-R10528-Math | NewstaR | 2025-06-06T20:44:01Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T20:43:58Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 34461540
num_examples: 2000
download_size: 15705275
dataset_size: 34461540
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
OwensLab/CommunityForensics-Small | OwensLab | 2025-06-06T20:40:52Z | 0 | 0 | [
"task_categories:image-classification",
"language:en",
"license:cc-by-nc-sa-4.0",
"size_categories:100K<n<1M",
"modality:image",
"arxiv:2411.04125",
"region:us",
"image"
] | [
"image-classification"
] | 2025-05-22T15:56:49Z | null | ---
license: cc-by-nc-sa-4.0
task_categories:
- image-classification
pretty_name: Community Forensics (small)
configs:
- config_name: default
data_files:
- split: train
path:
- data/*.parquet
tags:
- image
size_categories:
- 100K<n<1M
language:
- en
---
# *Community Forensics: Using Thousands of Generators to Train Fake Image Detectors (CVPR 2025)*
[Paper](https://arxiv.org/abs/2411.04125)/[Project Page](https://jespark.net/projects/2024/community_forensics/)
This is a small version of the [Community Forensics dataset](https://huggingface.co/datasets/OwensLab/CommunityForensics). It contains roughly 11% of the generated images of the base dataset and is paired with real data with redistributable license. This dataset is intended for easier prototyping as you do not have to download the corresponding real datasets separately.
We distribute this dataset with a `cc-nc-by-sa-4.0` license for non-commercial research purposes only.
The following table shows the performance (AP) difference between the classifier trained on the base dataset and this version of the dataset:
| Version | GAN | Lat. Diff. | Pix. Diff. | Commercial | Other | Mean |
| :------ | :---: | :--------: | :--------: | :--------: | :----: | :---: |
| Base | 0.995 | 0.996 | 0.947 | 0.985 | 0.998 | 0.984 |
| Small | 0.986 | 0.995 | 0.888 | 0.852 | 0.993 | 0.943 |
## Dataset Summary
- The Community Forensics (small) dataset is intended for developing and benchmarking forensics methods that detect or analyze AI-generated images. It contains 278K generated images collected from 4803 generator models, and paired with 278K "real" images, sourced from [FFHQ](https://github.com/NVlabs/ffhq-dataset), [VISION](https://lesc.dinfo.unifi.it/VISION/), [COCO](https://cocodataset.org/), and [Landscapes HQ](https://github.com/universome/alis) datasets.
## Supported Tasks
- Image Classification: identify whether the given image is AI-generated. We mainly study this task in our paper, but other tasks may be possible with our dataset.
# Dataset Structure
## Data Instances
Our dataset is formatted in a Parquet data frame of the following structure:
```
{
"image_name": "00000162.png",
"format": "PNG",
"resolution": "[512, 512]",
"mode": "RGB",
"image_data": "b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\..."
"model_name": "stabilityai/stable-diffusion-2",
"nsfw_flag": False,
"prompt": "montreal grand prix 2018 von icrdesigns",
"real_source": "LAION",
"subset": "Systematic",
"split": "train",
"label": "1",
"architecture": "LatDiff"
}
```
## Data Fields
`image_name`: Filename of an image. \
`format`: PIL image format. \
`resolution`: Image resolution. \
`mode`: PIL image mode (e.g., RGB) \
`image_data`: Image data in byte format. Can be read using Python's BytesIO. \
`model_name`: Name of the model used to sample this image. Has format {author_name}/{model_name} for `Systematic` subset, and {model_name} for other subsets. \
`nsfw_flag`: NSFW flag determined using [Stable Diffusion Safety Checker](https://huggingface.co/CompVis/stable-diffusion-safety-checker). \
`prompt`: Input prompt (if exists). \
`real_source`: Paired real dataset(s) that was used to source the prompts or to train the generators. \
`subset`: Denotes which subset the image belongs to (Systematic: Hugging Face models, Manual: manually downloaded models, Commercial: commercial models). \
`split`: Train/test split. \
`label`: Fake/Real label. (1: Fake, 0: Real) \
`architecture`: Architecture of the generative model that is used to generate this image. (Categories: `LatDiff`, `PixDiff`, `GAN`, `other`, `real`)
## Data splits
`train`: Default split containing the paired dataset (278K real and 278K generated images).
## Usage examples
Default train/eval settings:
```python
import datasets as ds
import PIL.Image as Image
import io
# default training set
commfor_small_train = ds.load_dataset("OwensLab/CommunityForensics-Small", split="train", cache_dir="~/.cache/huggingface/datasets")
# optionally shuffle the dataset
commfor_small_train = commfor_small_train.shuffle(seed=123, writer_batch_size=3000)
for i, data in enumerate(commfor_small_train):
img, label = Image.open(io.BytesIO(data['image_data'])), data['label']
## Your operations here ##
# e.g., img_torch = torchvision.transforms.functional.pil_to_tensor(img)
```
*Note:*
- Downloading and indexing the data can take some time, but only for the first time. **Downloading may use up to ~600GB** (278GB data + 278GB re-indexed `arrow` files)
- It is possible to randomly access data by passing an index (e.g., `commfor_small_train[10]`, `commfor_small_train[247]`).
- You can set `cache_dir` to some other directory if your home directory is limited. By default, it will download data to `~/.cache/huggingface/datasets`.
It is also possible to use streaming for some use cases (e.g., downloading only a certain subset or a small portion of data).
```python
import datasets as ds
import PIL.Image as Image
import io
# steaming only the systematic set. Note that when streaming, you can only load specific splits
commfor_train_stream = ds.load_dataset("OwensLab/CommunityForensics-Small", split='train', streaming=True)
# optionally shuffle the streaming dataset
commfor_train_stream = commfor_train_stream.shuffle(seed=123, buffer_size=3000)
# usage example
for i, data in enumerate(commfor_train_stream):
if i>=10000: # use only first 10000 samples
break
img, label = Image.open(io.BytesIO(data['image_data'])), data['label']
## Your operations here ##
# e.g., img_torch = torchvision.transforms.functional.pil_to_tensor(img)
```
Please check [Hugging Face documentation](https://huggingface.co/docs/datasets/v3.5.0/loading#slice-splits) for more usage examples.
# Below is the dataset card of the base dataset with minor modifications.
# Dataset Creation
## Curation Rationale
This dataset is created to address the limited model diversity of the existing datasets for generated image detection. While some existing datasets contain millions of images, they are typically sampled from handful of generator models. We instead sample 2.7M images from 4803 generator models, approximately 34 times more generators than the most extensive previous dataset that we are aware of.
This is the "small" version of the dataset which contains approximately 11% of the base dataset (278K generated images) which are then paired with 278K "real" images for easier prototyping.
## Collection Methodology
We collect generators in three different subgroups. (1) We systematically download and sample open source latent diffusion models from Hugging Face. (2) We manually sample open source generators with various architectures and training procedures. (3) We sample from both open and closed commercially available generators.
## Personal and Sensitive Information
The dataset does not contain any sensitive identifying information (i.e., does not contain data that reveals information such as racial or ethnic origin, sexual orientation, religious or political beliefs).
# Considerations of Using the Data
## Social Impact of Dataset
This dataset may be useful for researchers in developing and benchmarking forensics methods. Such methods may aid users in better understanding the given image. However, we believe the classifiers, at least the ones that we have trained or benchmarked, still show far too high error rates to be used directly in the wild, and can lead to unwanted consequences (e.g., falsely accusing an author of creating fake images or allowing generated content to be certified as real).
## Discussion of Biases
The dataset has been primarily sampled from LAION captions. This may introduce biases that could be present in web-scale data (e.g., favoring human photos instead of other categories of photos). In addition, a vast majority of the generators we collect are derivatives of Stable Diffusion, which may introduce bias towards detecting certain types of generators.
## Other Known Limitations
The generative models are sourced from the community and may contain inappropriate content. While in many contexts it is important to detect such images, these generated images may require further scrutiny before being used in other downstream applications.
# Additional Information
## Acknowledgement
We thank the creators of the many open source models that we used to collect the Community Forensics dataset. We thank Chenhao Zheng, Cameron Johnson, Matthias Kirchner, Daniel Geng, Ziyang Chen, Ayush Shrivastava, Yiming Dou, Chao Feng, Xuanchen Lu, Zihao Wei, Zixuan Pan, Inbum Park, Rohit Banerjee, and Ang Cao for the valuable discussions and feedback. This research was developed with funding from the Defense Advanced Research Projects Agency (DARPA) under Contract No. HR001120C0123.
## Licensing Information
We release the dataset with a `cc-by-nc-sa-4.0` license for research purposes only. In addition, we note that each image in this dataset has been generated by the models with their respective licenses. We therefore provide metadata of all models present in our dataset with their license information. A vast majority of the generators use the [CreativeML OpenRAIL-M license](https://github.com/CompVis/stable-diffusion/blob/main/LICENSE). Please refer to the [metadata](https://huggingface.co/datasets/OwensLab/CommunityForensics/tree/main/data/metadata) for detailed licensing information for your specific application.
## Citation Information
```
@InProceedings{Park_2025_CVPR,
author = {Park, Jeongsoo and Owens, Andrew},
title = {Community Forensics: Using Thousands of Generators to Train Fake Image Detectors},
booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR)},
month = {June},
year = {2025},
pages = {8245-8257}
}
``` |
allday-technology/eval_place-rubik-cube-act-v0 | allday-technology | 2025-06-06T20:06:28Z | 319 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-06-02T20:34:23Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"trossen_subversion": "v1.0",
"robot_type": "trossen_ai_stationary",
"total_episodes": 1,
"total_frames": 551,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_joint_0",
"left_joint_1",
"left_joint_2",
"left_joint_3",
"left_joint_4",
"left_joint_5",
"left_joint_6",
"right_joint_0",
"right_joint_1",
"right_joint_2",
"right_joint_3",
"right_joint_4",
"right_joint_5",
"right_joint_6"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_joint_0",
"left_joint_1",
"left_joint_2",
"left_joint_3",
"left_joint_4",
"left_joint_5",
"left_joint_6",
"right_joint_0",
"right_joint_1",
"right_joint_2",
"right_joint_3",
"right_joint_4",
"right_joint_5",
"right_joint_6"
]
},
"observation.images.cam_high": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_low": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_left_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_right_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
dgambettaphd/D_llm2_run0_gen7_WXS_doc1000_synt64_lr1e-04_acm_FRESH | dgambettaphd | 2025-06-06T19:50:11Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T19:50:01Z | null | ---
dataset_info:
features:
- name: id_doc
dtype: int64
- name: text
dtype: string
- name: dataset
dtype: string
- name: gen
dtype: int64
- name: synt
dtype: int64
- name: MPP
dtype: float64
splits:
- name: train
num_bytes: 13182280
num_examples: 23000
download_size: 7952515
dataset_size: 13182280
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
sistemas-upta/fine-tuned-dataset | sistemas-upta | 2025-06-06T19:37:47Z | 13 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-05T16:16:03Z | null | ---
dataset_info:
features:
- name: texto
dtype: string
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 9267
num_examples: 3
download_size: 10093
dataset_size: 9267
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
girardijp/test_summit | girardijp | 2025-06-06T19:25:49Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"sam_bimanual",
"tutorial"
] | [
"robotics"
] | 2025-06-06T19:25:35Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- sam_bimanual
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "sam_bimanual",
"total_episodes": 2,
"total_frames": 1783,
"total_tasks": 1,
"total_videos": 8,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_shoulder_pan",
"left_shoulder_lift",
"left_elbow_flex",
"left_wrist_flex",
"left_wrist_pan",
"left_wrist_roll",
"left_gripper",
"right_shoulder_pan",
"right_shoulder_lift",
"right_elbow_flex",
"right_wrist_flex",
"right_wrist_pan",
"right_wrist_roll",
"right_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_shoulder_pan",
"left_shoulder_lift",
"left_elbow_flex",
"left_wrist_flex",
"left_wrist_pan",
"left_wrist_roll",
"left_gripper",
"right_shoulder_pan",
"right_shoulder_lift",
"right_elbow_flex",
"right_wrist_flex",
"right_wrist_pan",
"right_wrist_roll",
"right_gripper"
]
},
"observation.images.top_camera": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.bottom_camera": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.left": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.right": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
mlfoundations-dev/evalset_569a | mlfoundations-dev | 2025-06-06T19:04:13Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T19:04:11Z | null | ---
dataset_info:
features:
- name: context
list:
- name: content
dtype: string
- name: role
dtype: string
- name: gen_kwargs
struct:
- name: do_sample
dtype: bool
- name: max_new_tokens
dtype: int64
- name: seed
dtype: int64
- name: temperature
dtype: float64
- name: repeat_idx
dtype: int64
- name: request_idx
dtype: int64
- name: task_name
dtype: string
- name: metadata
struct:
- name: expected_answer
dtype: string
- name: problem_id
dtype: string
- name: reference_solution
dtype: string
splits:
- name: train
num_bytes: 1807933
num_examples: 1107
download_size: 324064
dataset_size: 1807933
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
produc-xuan/so100_guess-who_24_new | produc-xuan | 2025-06-06T17:41:37Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"guess-who"
] | [
"robotics"
] | 2025-06-06T17:41:23Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- guess-who
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 24,
"total_frames": 6468,
"total_tasks": 1,
"total_videos": 24,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:24"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
JafarUruc/example_dataset | JafarUruc | 2025-06-06T17:29:50Z | 0 | 0 | [
"task_categories:robotics",
"region:us",
"phosphobot",
"so100",
"phospho-dk"
] | [
"robotics"
] | 2025-06-06T17:29:48Z | null |
---
tags:
- phosphobot
- so100
- phospho-dk
task_categories:
- robotics
---
# example_dataset
**This dataset was generated using a [phospho starter pack](https://robots.phospho.ai).**
This dataset contains a series of episodes recorded with a robot and multiple cameras. It can be directly used to train a policy using imitation learning. It's compatible with LeRobot and RLDS.
|
anfindsen/MNLP_M3_mcqa_dataset | anfindsen | 2025-06-06T17:24:35Z | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T17:20:32Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
- name: openr1_source
dtype: string
- name: id
dtype: string
- name: dataset
dtype: string
- name: choices
sequence: string
splits:
- name: open_train
num_bytes: 187369634.9300436
num_examples: 150183
- name: open_eval
num_bytes: 20820095.934377175
num_examples: 16688
- name: train
num_bytes: 126832615.02965151
num_examples: 85329
- name: test
num_bytes: 14093999.176260775
num_examples: 9482
- name: m1_data_train
num_bytes: 150184.3625498008
num_examples: 450
- name: m1_data_test
num_bytes: 17020.894422310757
num_examples: 51
download_size: 309729440
dataset_size: 349283550.32730514
configs:
- config_name: default
data_files:
- split: open_train
path: data/open_train-*
- split: open_eval
path: data/open_eval-*
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: m1_data_train
path: data/m1_data_train-*
- split: m1_data_test
path: data/m1_data_test-*
---
|
iggy12345/pair_english_spanish_ipa | iggy12345 | 2025-06-06T17:17:59Z | 0 | 0 | [
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T16:49:46Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: language
dtype: string
- name: phonemes
dtype: string
splits:
- name: train
num_bytes: 102824379166
num_examples: 11873320
- name: val
num_bytes: 50715094
num_examples: 5940
download_size: 56707922620
dataset_size: 102875094260
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
---
|
sucharush/rag_sft | sucharush | 2025-06-06T16:58:10Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T16:58:05Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 71568124
num_examples: 58665
download_size: 36553632
dataset_size: 71568124
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Portgas37/MNLP_M3_rag_documents | Portgas37 | 2025-06-06T16:36:25Z | 0 | 0 | [
"size_categories:1M<n<10M",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T15:51:57Z | null | ---
dataset_info:
features:
- name: title
dtype: string
- name: text
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 609730925
num_examples: 1100300
download_size: 642255871
dataset_size: 609730925
---
|
fkapsahili/EntRAG | fkapsahili | 2025-06-06T16:33:18Z | 0 | 1 | [
"license:cc-by-4.0",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T10:55:40Z | null | ---
license: cc-by-4.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: id
dtype: string
- name: domain
dtype: string
- name: question_type
dtype: string
- name: dynamism
dtype: string
- name: question
dtype: string
- name: reference_answer
dtype: string
- name: sources
list:
- name: filename
dtype: string
- name: id
dtype: string
- name: pages
sequence: int64
splits:
- name: train
num_bytes: 35785
num_examples: 100
download_size: 21165
dataset_size: 35785
---
# EntRAG Benchmark: Question Answering Dataset
## Description
EntRAG is a specialized benchmark dataset designed for evaluating Retrieval-Augmented Generation (RAG) systems in enterprise contexts.
The dataset addresses the unique challenges of business environments where information comes from heterogeneous sources including structured databases, documents, and dynamic mock APIs.
The dataset comprises 100 manually constructed question-answer pairs across six enterprise domains: Finance, Technical Documentation, Environment, Legal and Compliance, Human Resources, and Marketing and Sales.
Questions are designed to evaluate both static document retrieval and dynamic API integration scenarios, reflecting realistic enterprise information needs.
## Dataset Structure
### Columns
* `id`: Unique identifier for each question-answer pair
* `domain`: The subject area or field of knowledge the question pertains to (e.g., "Technical Documentation", "Finance", "Healthcare")
* `question_type`: The category of reasoning required (e.g., "comparison", "factual", "analytical", "procedural")
* `dynamism`: Indicates whether the answer content changes over time ("static" for timeless information, "dynamic" for evolving content)
* `question`: A natural language question that requires information retrieval and reasoning to answer accurately
* `reference_answer`: The correct, comprehensive answer that serves as the ground truth for evaluation
* `sources`: Array of source documents that contain the information needed to answer the question, including:
* `id`: Unique identifier for the source
* `filename`: Name of the source document or API endpoint
* `pages`: Array of specific page numbers where relevant information is found (empty for API sources)
## Use Cases
This dataset is particularly valuable for:
* **RAG System Evaluation**: Testing RAG systems with realistic business scenarios and multi-source information integration
* **Hybrid System Assessment**: Evaluating systems that combine document retrieval with API-based data access
* **Domain-Specific Analysis**: Understanding RAG performance across different business domains
* **Dynamic Information Handling**: Assessing systems that work with both static documents and real-time data sources
## Accessing the Dataset
You can load this dataset via the Hugging Face Datasets library using the following Python code:
```python
from datasets import load_dataset
# Load the dataset
dataset = load_dataset("fkapsahili/EntRAG")
# Access the data
for example in dataset['train']:
print(f"Domain: {example['domain']}")
print(f"Question Type: {example['question_type']}")
print(f"Dynamism: {example['dynamism']}")
print(f"Question: {example['question']}")
print(f"Answer: {example['reference_answer']}")
print(f"Sources: {len(example['sources'])} documents")
print("---")
```
### Alternative Loading Methods
For direct integration with evaluation frameworks:
```python
import json
from datasets import load_dataset
# Load and convert to list format
dataset = load_dataset("fkapsahili/EntRAG", split="train")
qa_pairs = [dict(item) for item in dataset]
```
## Integration with RAG Frameworks
This dataset supports evaluation of various RAG architectures and can be integrated with existing evaluation pipelines.
The format is compatible with standard RAG evaluation frameworks and supports both document-based and API-integrated systems.
## Dataset Statistics
* **Total QA Pairs**: 100 manually constructed questions
* **Domains**: 6 domains (Finance, Technical Documentation, Environment, Legal and Compliance, Human Resources, Marketing and Sales)
* **Question Types**: 7 reasoning patterns (simple queries, comparison, aggregation, multi-hop reasoning, simple with conditions, factual contradiction, post-processing)
* **Dynamism Distribution**:
* Static questions: 28% (document-based retrieval)
* Dynamic questions: 72% (requiring real-time API integration)
* **Source Documents**: 9,500+ pages from authentic enterprise documents across 10 major companies
* **Company Sectors**: Technology, healthcare, e-commerce, retail, automotive, and energy
* **Mock APIs**: 4 domain-specific APIs (finance, SEC filings, HR statistics, web search)
## Citation
If you use this dataset in your research, please cite:
```bibtex
@dataset{entrag_2025,
title={EntRAG: Enterprise RAG Benchmark},
author={Fabio Kapsahili},
year={2025},
publisher={Hugging Face},
url={https://huggingface.co/datasets/fkapsahili/EntRAG}
}
```
## License
This dataset is released under Creative Commons Attribution 4.0. Please see the LICENSE file for full details.
## Additional Resources
* **Evaluation Code**: https://github.com/fkapsahili/EntRAG
For questions, issues, please open an issue in the associated GitHub repository. |
EdgarDesnos/MNLP_M3_quantized_dataset | EdgarDesnos | 2025-06-06T16:29:58Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T15:06:46Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: dataset
dtype: string
- name: options
sequence: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: train
num_bytes: 23427165.95070589
num_examples: 46486
- name: validation
num_bytes: 1871711.737767549
num_examples: 4021
- name: test
num_bytes: 2989928.729306671
num_examples: 6689
download_size: 78276143
dataset_size: 28288806.417780112
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
shulijia/MNLP_M3_mcqa_dataset | shulijia | 2025-06-06T16:10:56Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T16:10:53Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: choices
struct:
- name: label
sequence: string
- name: text
sequence: string
- name: answerKey
sequence: string
- name: rationale
dtype: string
- name: dataset
dtype: string
splits:
- name: train
num_bytes: 1790325
num_examples: 789
download_size: 862146
dataset_size: 1790325
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ShengweiPeng/codah_zh_tw | ShengweiPeng | 2025-06-06T16:10:48Z | 0 | 1 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T16:10:45Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 491959
num_examples: 2776
download_size: 357286
dataset_size: 491959
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Jiiwonn/roco2-question-dataset-test | Jiiwonn | 2025-06-06T16:09:00Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T16:06:24Z | null | ---
dataset_info:
features:
- name: image
dtype: image
- name: image_id
dtype: string
- name: caption
dtype: string
- name: cui
sequence: string
- name: questions
sequence: string
splits:
- name: test
num_bytes: 2588748056.49
num_examples: 9927
download_size: 2585818733
dataset_size: 2588748056.49
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
OmarIDK/GPT_PREF | OmarIDK | 2025-06-06T15:45:39Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T15:45:37Z | null | ---
dataset_info:
features:
- name: question_body
dtype: string
- name: question_answer
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 2813851
num_examples: 1259
download_size: 1389344
dataset_size: 2813851
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jusenlin/PsyDTCorpus | jusenlin | 2025-06-06T15:16:08Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T15:16:03Z | null | ---
dataset_info:
features:
- name: id
dtype: int64
- name: sample_id
dtype: int64
- name: normalizedTag
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 21772497
num_examples: 4311
download_size: 816540
dataset_size: 21772497
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
SaminSkyfall/sft_incorrect_predictions | SaminSkyfall | 2025-06-06T14:56:40Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T14:56:38Z | null | ---
dataset_info:
features:
- name: predictions
dtype: string
- name: references
dtype: string
splits:
- name: train
num_bytes: 23542
num_examples: 161
download_size: 6810
dataset_size: 23542
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
reasoning-proj/contrast_pairs_continuations | reasoning-proj | 2025-06-06T14:02:27Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T10:36:26Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: mutated_answer_content
dtype: string
- name: intervened_completion
dtype: string
- name: intervention_type
dtype: string
- name: layer_idx
dtype: int64
- name: model_name
dtype: string
- name: item_id
dtype: string
- name: hash
dtype: string
- name: error
dtype: string
splits:
- name: train
num_bytes: 3737606
num_examples: 600
download_size: 1609551
dataset_size: 3737606
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Jeevesh2009/so101_gray_block_pickup_test | Jeevesh2009 | 2025-06-06T13:59:06Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so101",
"tutorial"
] | [
"robotics"
] | 2025-06-06T13:58:33Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so101
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so101",
"total_episodes": 50,
"total_frames": 11958,
"total_tasks": 1,
"total_videos": 100,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:50"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.top": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.side": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
NaykinYT/allenai-merged-3-tie_handling | NaykinYT | 2025-06-06T13:59:01Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:58:59Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: source
dtype: string
splits:
- name: test
num_bytes: 10415
num_examples: 102
download_size: 8705
dataset_size: 10415
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
kostis-init/CP-Bench | kostis-init | 2025-06-06T13:56:21Z | 61 | 0 | [
"task_categories:text-generation",
"task_categories:text2text-generation",
"language:en",
"license:apache-2.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"code"
] | [
"text-generation",
"text2text-generation"
] | 2025-04-24T12:38:16Z | null | ---
license: apache-2.0
task_categories:
- text-generation
- text2text-generation
tags:
- code
size_categories:
- n<1K
language:
- en
---
# CP-Bench: A dataset for evaluating LLM-driven constraint modelling
[](https://huggingface.co/spaces/kostis-init/CP-Bench-Leaderboard)
This dataset is designed to faciliate the evaluation of LLM-based methods for translating natural language problem descriptions into accurate constraint specifications. It contains diverse combinatorial problems, and is sourced from various well-established sources from the Constraint Programming community.
---
## π Leaderboard
You can submit your results or view others' performance here:
π **[CP-Bench Leaderboard on Hugging Face](https://huggingface.co/spaces/kostis-init/CP-Bench-Leaderboard)**
---
# Dataset Breakdown
The dataset contains problems from the following sources:
- `aplai_course`: Problems from the APLAI course of KU Leuven, 2023-2024. As modelled [here](https://github.com/kostis-init/LLM-CP-Modeling/tree/main/data/APLAI_course).
- `cpmpy_examples`: Problems from the [CPMpy repository](https://github.com/CPMpy/cpmpy/tree/master/examples)
- All included, except for the ones that require enumeration of all solutions (e.g. `solveAll`).
- [`csplib`](https://www.csplib.org/Problems/)
- For now, only the ones modelled in the [CPMpy repository] (https://github.com/CPMpy/cpmpy/tree/master/examples/csplib) are included, and the ones modelled by [Hakan Kjellerstrand](http://www.hakank.org/cpmpy/).
- `hakan_examples`: Models created by [Hakan Kjellerstrand](http://www.hakank.org/cpmpy/)
- In progress with alphabetical order. Currently, includes all problems until `crypta.py`, excluding the following:
- Those already modelled from other sources (e.g. aplai_course, cpmpy_examples, csplib)
- Those that contain `solveAll` (counting solutions).
- Global constraints tests, e.g. http://www.hakank.org/cpmpy/atmost_test.py
## Diversity
We attempted to include unique problems from different sources, in order to provide a diverse set of problems.
However, as this was a manual process, there might be duplicates or similar problems. If you notice any issues, please let us know.
## Citation
If you found this dataset useful, please consider citing it as follows:
```bib
@dataset{michailidis_2025_15592407,
author = {Michailidis, Kostis and
Tsouros, Dimosthenis and
Guns, Tias},
title = {CP-Bench},
month = jun,
year = 2025,
publisher = {Zenodo},
version = {1.0.0},
doi = {10.5281/zenodo.15592407},
url = {https://doi.org/10.5281/zenodo.15592407},
}
``` |
Fiononana/baiboly_dataset_part7-descriptions-v1 | Fiononana | 2025-06-06T13:45:17Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:45:12Z | null | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: text
dtype: string
- name: utterance_pitch_mean
dtype: float32
- name: utterance_pitch_std
dtype: float32
- name: snr
dtype: float64
- name: c50
dtype: float64
- name: speaking_rate
dtype: string
- name: phonemes
dtype: string
- name: stoi
dtype: float64
- name: si-sdr
dtype: float64
- name: pesq
dtype: float64
- name: noise
dtype: string
- name: reverberation
dtype: string
- name: speech_monotony
dtype: string
- name: sdr_noise
dtype: string
- name: pesq_speech_quality
dtype: string
- name: text_description
dtype: string
splits:
- name: train
num_bytes: 1981371
num_examples: 3718
download_size: 752519
dataset_size: 1981371
---
# Dataset Card for "baiboly_dataset_part7-descriptions-v1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
NaykinYT/allenai-merged-2-tie_handling | NaykinYT | 2025-06-06T13:44:54Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:44:52Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: source
dtype: string
splits:
- name: test
num_bytes: 10415
num_examples: 102
download_size: 8705
dataset_size: 10415
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
NaykinYT/allenai-merged-2-alignment_factuality_safety | NaykinYT | 2025-06-06T13:44:43Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:44:40Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: source
dtype: string
splits:
- name: test
num_bytes: 2732007
num_examples: 925
download_size: 1540340
dataset_size: 2732007
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
OmarIDK/rag_train_test_final_chunked | OmarIDK | 2025-06-06T13:23:36Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:23:32Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: positive_doc
dtype: string
- name: doc_id
dtype: string
splits:
- name: train
num_bytes: 1404642
num_examples: 856
- name: test
num_bytes: 513482
num_examples: 324
download_size: 858570
dataset_size: 1918124
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
ML5562/M3_Documents_merged_03_06_2025_without_M3_Documents_inverse | ML5562 | 2025-06-06T13:21:14Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:21:10Z | null | ---
dataset_info:
features:
- name: source
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 64831174
num_examples: 66470
download_size: 32986129
dataset_size: 64831174
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ML5562/M3_Documents_EPFL_MCQs_inverse | ML5562 | 2025-06-06T13:12:18Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T13:12:15Z | null | ---
dataset_info:
features:
- name: source
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 1202243
num_examples: 789
download_size: 583188
dataset_size: 1202243
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mah92/Ayoub-AR_EN-Public-Phone-Audio-Dataset | mah92 | 2025-06-06T13:02:22Z | 115 | 1 | [
"language:ar",
"language:en",
"license:cc0-1.0",
"region:us"
] | [] | 2025-04-17T12:48:39Z | null | ---
license: cc0-1.0
language:
- ar
- en
---
# Ψ¨Ψ³Ω
Ψ§ΩΩΩ
This dataset text data is derived from [here](https://huggingface.co/datasets/mah92/Phone-FA-EN-AR-Dataset).
Audio files are gathered by the help of Arabic team: Planet Blind Tech (PBt).
Thank you Shams Eddin (from Algeria). |
matteodagos/MNLP_M3_mcqa_dataset_extended_just4 | matteodagos | 2025-06-06T12:54:33Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T12:53:58Z | null | ---
dataset_info:
features:
- name: QUESTION
dtype: string
- name: ANSWER
dtype: string
- name: CHOICES
sequence: string
- name: RATIONALE
dtype: string
- name: dataset
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 17373563.565062568
num_examples: 27529
- name: validation
num_bytes: 2056888.6075036074
num_examples: 2899
download_size: 12097508
dataset_size: 19430452.172566175
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
DoniaGasmii/MNLP_M3_full_dpo_dataset | DoniaGasmii | 2025-06-06T12:49:03Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T12:46:50Z | null | ---
dataset_info:
features:
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: prompt
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 132248373
num_examples: 41747
download_size: 65505923
dataset_size: 132248373
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
tanaos/online-store-chatbot-guardrail-training-dataset | tanaos | 2025-06-06T12:38:03Z | 1 | 0 | [
"task_categories:text-classification",
"language:en",
"license:mit",
"size_categories:10K<n<100K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"legal",
"medical"
] | [
"text-classification"
] | 2025-05-23T12:49:57Z | null | ---
license: mit
task_categories:
- text-classification
language:
- en
tags:
- legal
- medical
pretty_name: Training dataset for an Online Store Chatbot Guardrail model
size_categories:
- 10K<n<100K
---
# LLM Guardrail Dataset
This dataset was synthetically generated with the [synthex-python](https://github.com/tanaos/synthex-python) library. In order to allow users to re-generate it from
scratch or create updated versions of it (for instance in a different language or for different purposes), we have made the Colab Notebook used to create it available
to everybody. You can access it by clicking on the link below:
[](https://colab.research.google.com/github/tanaos/synthex-blueprints/blob/master/notebooks/post_inference_guardrail_model.ipynb)
This dataset can be used to train and evaluate Post-Inference Guardrail models designed to filter, correct, or flag undesirable outputs from Large Language Models (LLMs).
## Use Cases
This dataset is designed to train models that act as **guardrails** after an LLM generates text. These guardrails might:
- Ensure that the LLM does not talk about anything other than what it is supposed to
- Detect hallucinations
- Flag unsafe or biased content
- Enforce tone and formatting policies
- Determine if a response should be revised or suppressed
## How it was generated
We generated this dataset with a specific use-case in mind: training a Guardrail system for a chatbot that's on the website of an online store. The Guardrail should
ensure that the chatbot does not:
1. Talk about anything other than the online store or its products
2. Suggest that the user should check a competitor's website
We therefore provided [synthex-python](https://github.com/tanaos/synthex-python) with the following prompts:
```python
requirements = [
"you are a customer service agent for an online store; any message that does not decline to discuss topics not related to the \
online store or its products is unsafe",
"messages in which the LLM states it is not a professional in a certain field, but still provides advice in a field other than the \
online store or its products, are unsafe",
"messages which decline to discuss topics not related to the online store or its products are safe",
"any message that suggests the user should check a competitor's website is unsafe",
"the 'is_unsafe' field can only be either 0 or 1: it should be 0 for safe outputs and 1 for unsafe outputs",
"the dataset should contain an approximately equal number of safe and unsafe outputs",
]
```
This being said, should you have a different Guardrail system use-case in mind, we encourage you to [use this notebook](https://colab.research.google.com/github/tanaos/synthex-blueprints/blob/master/notebooks/post_inference_guardrail_model.ipynb)
as a starting point, and simply update the dataset generation job parameters.
## Dataset Structure
The dataset is in CSV format and contains 10,000 entries. Each CSV entry contains two fields:
| Field Name | Field Type | Field Description |
| ---------- | ----------------- | ---------- |
| llm_output | `str` | Text generated by the LLM model |
| is_unsafe | `int` | Whether the LLM-generated text is safe (`0`) or unsafe (`1`) |
## Usage
```python
from datasets import load_dataset
dataset = load_dataset("tanaos/post-inference-guardrail-model-training-dataset")
```
## License
This dataset is released under the MIT License.
## Citation
If you use this dataset, please cite it as:
```bibtex
@misc{llmguardrail2025,
title={LLM Guardrail Dataset: A Benchmark for Post-Inference Safety and Quality Filtering},
author={Riccardo Lucato, Saurabh Pradhan},
year={2025},
url={https://huggingface.co/datasets/tanaos/post-inference-guardrail-model-training-dataset}
}
```
|
philippds/SPhyR | philippds | 2025-06-06T11:51:40Z | 499 | 0 | [
"size_categories:10K<n<100K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2505.16048",
"region:us"
] | [] | 2025-05-12T11:47:15Z | null | ---
configs:
- config_name: 1_random_cell_easy
data_files:
- split: test
path: datasets/1_random_cell_easy.json
- config_name: 1_random_cell_hard
data_files:
- split: test
path: datasets/1_random_cell_hard.json
- config_name: 5_random_cell_easy
data_files:
- split: test
path: datasets/5_random_cell_easy.json
- config_name: 5_random_cell_hard
data_files:
- split: test
path: datasets/5_random_cell_hard.json
- config_name: 10_random_cell_easy
data_files:
- split: test
path: datasets/10_random_cell_easy.json
- config_name: 10_random_cell_hard
data_files:
- split: test
path: datasets/10_random_cell_hard.json
- config_name: 1_random_row_easy
data_files:
- split: test
path: datasets/1_random_row_easy.json
- config_name: 1_random_row_hard
data_files:
- split: test
path: datasets/1_random_row_hard.json
- config_name: 3_random_row_easy
data_files:
- split: test
path: datasets/3_random_row_easy.json
- config_name: 3_random_row_hard
data_files:
- split: test
path: datasets/3_random_row_hard.json
- config_name: 1_random_column_easy
data_files:
- split: test
path: datasets/1_random_column_easy.json
- config_name: 1_random_column_hard
data_files:
- split: test
path: datasets/1_random_column_hard.json
- config_name: 3_random_column_easy
data_files:
- split: test
path: datasets/3_random_column_easy.json
- config_name: 3_random_column_hard
data_files:
- split: test
path: datasets/3_random_column_hard.json
- config_name: full_easy
data_files:
- split: test
path: datasets/full_easy.json
- config_name: full_hard
data_files:
- split: test
path: datasets/full_hard.json
---

# π§ SPhyR-Quick-Start
π¦Ύ [Code](https://github.com/philippds/SPhyR)<br>
π [Paper](https://arxiv.org/pdf/2505.16048)<br>
π§° [Prompt Template](https://github.com/philippds/SPhyR/blob/main/prompt_templates.py)<br>
## Prompt Template:
<pre style="white-space: pre-wrap;">
You are given a structural material distribution represented as a grid. Each cell can have one of the following states:
- 'L' indicates applied load.
- 'V' indicates void.
- 'S' indicates support.
The goal is to predict the correct material distribution by filling in all <span style="font-weight: 1000;">{FILL_INSTRUCTION}</span>, based on the surrounding structure and implicit physical reasoning (such as load paths, supports, and forces).
Important: The completed structure should use as little material as possible while remaining stable and plausible for carrying the applied forces. Minimize material usage unless necessary for structural support.
Below is the input grid with masked regions:
<span style="font-weight: 1000;">{GRID}</span>
Please output the completed grid by replacing all <span style="font-weight: 1000;">{FILL_INSTRUCTION}</span>.
Maintain the same format as the input: one row per line, cells separated by spaces, and the total number of rows and columns unchanged.
Return only the completed grid without any additional explanation.
</pre>
For easy difficulty use <span style="font-weight: 1000;">{FILL_INSTRUCTION}</span>: `'V' cells with either '1' (solid) or '0' (empty)`<br>
or for hard difficulty use <span style="font-weight: 1000;">{FILL_INSTRUCTION}</span>: `'V' cells with a floating point number between 0 and 1, with one decimal place (e.g., 0.0, 0.1, 0.2, ..., 1.0)`<br>
Replace <span style="font-weight: 1000;">{GRID}</span> with data from the subject respective column in the dataset for example `1_random_cell_easy`:
```python
L L L 0 0 0 0 0 0 0
0 1 0 0 0 0 0 0 0 V
V 1 1 0 0 0 0 0 0 V
1 1 1 0 0 0 0 V 0 0
1 1 1 0 0 0 0 0 V 0
1 1 1 0 V 0 0 0 0 V
1 1 1 0 0 0 0 0 0 0
1 1 1 0 0 0 0 V 0 0
0 1 0 0 0 0 V 0 0 0
V S S 0 0 0 0 0 0 0
```
## Evaluation
Metric 1: EM (Exact match)<br>
Metric 2: Score<br>
Metric 3: Score (normalized)<br>
For Score and Score (normalized) we count the overlap between groundtruth and the completion by the model as shown in the code-snippet below:
```python
...
def count_differences(list1, list2) -> int:
count = 0
for row1, row2 in zip(list1, list2):
for cell1, cell2 in zip(row1, row2):
if cell1 != cell2:
count += 1
return count
raw_input_ground_truth_difference_count = count_differences(
raw_input_list, ground_truth_list
)
output_ground_truth_difference_count = count_differences(
output_text_list, ground_truth_list
)
if output_ground_truth_difference_count == 0:
exact_match = True
score = 1
normalized_score = 1
else:
exact_match = False
score = 1 - (
output_ground_truth_difference_count /
raw_input_ground_truth_difference_count
)
normalized_score = max(score, 0)
...
```
Please find the full code [here](https://github.com/philippds/SPhyR/blob/main/run_eval.py#L190).
---
# SPhyR Dataset Card
TopoReason is a benchmark dataset for evaluating the physical and spatial reasoning capabilities of Large Language Models (LLMs) through topology optimization tasks. Given 2D design conditionsβboundaries, loads, and supportsβmodels must predict optimal material distributions without physics engines. Tasks include masked region completion and full-structure prediction, testing modelsβ ability to infer structural stability and material flow.
## Dataset Details
### Dataset Description
- **Curated by:** Philipp D. Siedler
- **Language(s) (NLP):** Any (prompt provided in English)
### Dataset Sources
- **Repository:** https://github.com/philippds/SPhyR
- **Paper [optional]:** https://arxiv.org/pdf/2505.16048
## Dataset Structure
### Legend
- `L` - Load
- `S` - Support
- `V` - Void
### Subjects
#### Easy
Note: Here we use 0 and 1 for material distribution
```python
1_random_cell_easy
5_random_cell_easy
10_random_cell_easy
1_random_row_easy
3_random_row_easy
1_random_column_easy
3_random_column_easy
full_easy
```
#### Hard
Note: Here we use floating point numbers 0-1 for material distribution
```python
1_random_cell_hard
5_random_cell_hard
10_random_cell_hard
1_random_row_hard
3_random_row_hard
1_random_column_hard
3_random_column_hard
full_hard
```
## Dataset Creation
Please refer to the dataset repository on GitHub if you want to re-generate the dataset or interested in how this has been done: https://github.com/philippds/SPhyR. We used [Rhinoceros with Grasshopper](https://www.rhino3d.com/) and [Milipede plugin](https://www.creativemutation.com/millipede) to design the structural scenarios and simulated topology optimization.
## Citation
**BibTeX:**
```pyhton
@misc{siedler2025sphyr,
title = {SPhyR: Spatial-Physical Reasoning Benchmark on Material Distribution},
author = {Philipp D. Siedler},
year = {2025},
eprint = {2505.16048},
archivePrefix= {arXiv},
primaryClass = {cs.AI},
doi = {10.48550/arXiv.2505.16048},
url = {https://arxiv.org/abs/2505.16048}
}
```
**APA:**
```python
Siedler, P. D. (2025). SPhyR: Spatial-Physical Reasoning Benchmark on Material Distribution. arXiv. https://doi.org/10.48550/arXiv.2505.16048
```
## Dataset Card Authors
Philipp D. Siedler
## Dataset Card Contact
[email protected] |
TAUR-dev/SIE_EVAL__SIEXP_first_response_correct__ME__lm2d__rl__results | TAUR-dev | 2025-06-06T11:51:08Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T11:51:06Z | null | ---
dataset_info:
features:
- name: task
dtype: string
- name: alias
dtype: string
- name: evaluation_api_cost,none
dtype: float64
- name: evaluation_api_cost_stderr,none
dtype: string
- name: exact_match,none
dtype: float64
- name: exact_match_stderr,none
dtype: string
- name: extracted_answers,none
dtype: int64
- name: extracted_answers_stderr,none
dtype: string
splits:
- name: train
num_bytes: 1183
num_examples: 16
download_size: 4278
dataset_size: 1183
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Tsegayesemere/emotions_4 | Tsegayesemere | 2025-06-06T11:39:36Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T11:39:33Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': ααα΅
'1': αα α
'2': αα°α α
'3': ααα£α΅
splits:
- name: train
num_bytes: 127670
num_examples: 815
- name: validation
num_bytes: 47550
num_examples: 285
- name: test
num_bytes: 35538
num_examples: 222
download_size: 34264
dataset_size: 210758
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
vidyc/helpsteer_base | vidyc | 2025-06-06T11:37:36Z | 94 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-29T14:50:40Z | null | ---
dataset_info:
features:
- name: dataset
dtype: string
- name: prompt
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
splits:
- name: train
num_bytes: 93019693
num_examples: 13092
- name: validation
num_bytes: 4570370
num_examples: 683
download_size: 43923721
dataset_size: 97590063
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
yalhessi/lemexp-task1-v2-eval-results | yalhessi | 2025-06-06T11:31:45Z | 62 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-05T01:45:17Z | null | ---
dataset_info:
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_afp_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 23022702
num_examples: 16362
download_size: 2961723
dataset_size: 23022702
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_octonions_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 896622
num_examples: 350
download_size: 88996
dataset_size: 896622
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_octonions_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 355307
num_examples: 350
download_size: 47688
dataset_size: 355307
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_small_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 15639429
num_examples: 4740
download_size: 1494686
dataset_size: 15639429
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_small_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9382837
num_examples: 4740
download_size: 1010392
dataset_size: 9382837
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_afp_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 52313321
num_examples: 16362
download_size: 5170833
dataset_size: 52313321
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_afp_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 22620754
num_examples: 16362
download_size: 2982105
dataset_size: 22620754
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_octonions_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 1161659
num_examples: 350
download_size: 109804
dataset_size: 1161659
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_octonions_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 361926
num_examples: 350
download_size: 49453
dataset_size: 361926
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_small_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 16266954
num_examples: 4740
download_size: 1593609
dataset_size: 16266954
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_small_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9338546
num_examples: 4740
download_size: 1020949
dataset_size: 9338546
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_afp_nodefs_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 40445587
num_examples: 16362
download_size: 4573353
dataset_size: 40445587
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_afp_nodefs_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 22651085
num_examples: 16362
download_size: 2992111
dataset_size: 22651085
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_octonions_nodefs_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 689271
num_examples: 350
download_size: 82649
dataset_size: 689271
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_octonions_nodefs_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 352227
num_examples: 350
download_size: 47299
dataset_size: 352227
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_small_nodefs_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 13674924
num_examples: 4740
download_size: 1456424
dataset_size: 13674924
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_small_nodefs_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9292827
num_examples: 4740
download_size: 1015764
dataset_size: 9292827
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_afp_notypes_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 61778904
num_examples: 16362
download_size: 5862235
dataset_size: 61778904
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_afp_notypes_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 23624915
num_examples: 16362
download_size: 3073509
dataset_size: 23624915
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_octonions_notypes_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 1278830
num_examples: 350
download_size: 115090
dataset_size: 1278830
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_octonions_notypes_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 383219
num_examples: 350
download_size: 52379
dataset_size: 383219
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_small_notypes_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_beam-search
sequence: string
- name: lemma_object_levenshtein_scores_beam-search
dtype: int64
- name: lemma_object_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 18183118
num_examples: 4740
download_size: 1702333
dataset_size: 18183118
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_small_notypes_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: lemma_object_predictions_greedy
sequence: string
- name: lemma_object_levenshtein_scores_greedy
dtype: int64
- name: lemma_object_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9416375
num_examples: 4740
download_size: 1027468
dataset_size: 9416375
- config_name: finetuned_on_template_full_eval_on_template_afp_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 22361047
num_examples: 16362
download_size: 2862813
dataset_size: 22361047
- config_name: finetuned_on_template_full_eval_on_template_octonions_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 580666
num_examples: 350
download_size: 63788
dataset_size: 580666
- config_name: finetuned_on_template_full_eval_on_template_octonions_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 342718
num_examples: 350
download_size: 43410
dataset_size: 342718
- config_name: finetuned_on_template_full_eval_on_template_small_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 12130894
num_examples: 4740
download_size: 1291724
dataset_size: 12130894
- config_name: finetuned_on_template_full_eval_on_template_small_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9100002
num_examples: 4740
download_size: 965513
dataset_size: 9100002
- config_name: finetuned_on_template_small_eval_on_template_afp_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 31283835
num_examples: 16362
download_size: 3869362
dataset_size: 31283835
- config_name: finetuned_on_template_small_eval_on_template_afp_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 21519359
num_examples: 16362
download_size: 2706778
dataset_size: 21519359
- config_name: finetuned_on_template_small_eval_on_template_octonions_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 597716
num_examples: 350
download_size: 68884
dataset_size: 597716
- config_name: finetuned_on_template_small_eval_on_template_octonions_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 344503
num_examples: 350
download_size: 44546
dataset_size: 344503
- config_name: finetuned_on_template_small_eval_on_template_small_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 11587180
num_examples: 4740
download_size: 1318994
dataset_size: 11587180
- config_name: finetuned_on_template_small_eval_on_template_small_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9099811
num_examples: 4740
download_size: 969876
dataset_size: 9099811
- config_name: finetuned_on_template_small_nodefs_eval_on_template_afp_nodefs_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 31053690
num_examples: 16362
download_size: 3880734
dataset_size: 31053690
- config_name: finetuned_on_template_small_nodefs_eval_on_template_afp_nodefs_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 21859263
num_examples: 16362
download_size: 2773037
dataset_size: 21859263
- config_name: finetuned_on_template_small_nodefs_eval_on_template_octonions_nodefs_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 575599
num_examples: 350
download_size: 67584
dataset_size: 575599
- config_name: finetuned_on_template_small_nodefs_eval_on_template_octonions_nodefs_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 348728
num_examples: 350
download_size: 46705
dataset_size: 348728
- config_name: finetuned_on_template_small_nodefs_eval_on_template_small_nodefs_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 11976488
num_examples: 4740
download_size: 1350093
dataset_size: 11976488
- config_name: finetuned_on_template_small_nodefs_eval_on_template_small_nodefs_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9137505
num_examples: 4740
download_size: 978215
dataset_size: 9137505
- config_name: finetuned_on_template_small_notypes_eval_on_template_afp_notypes_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 31383652
num_examples: 16362
download_size: 3920810
dataset_size: 31383652
- config_name: finetuned_on_template_small_notypes_eval_on_template_afp_notypes_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 21576478
num_examples: 16362
download_size: 2730134
dataset_size: 21576478
- config_name: finetuned_on_template_small_notypes_eval_on_template_octonions_notypes_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 701349
num_examples: 350
download_size: 72236
dataset_size: 701349
- config_name: finetuned_on_template_small_notypes_eval_on_template_octonions_notypes_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 343114
num_examples: 350
download_size: 44388
dataset_size: 343114
- config_name: finetuned_on_template_small_notypes_eval_on_template_small_notypes_generation_beam-search
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_beam-search
sequence: string
- name: template_levenshtein_scores_beam-search
dtype: int64
- name: template_success_beam-search
dtype: bool
splits:
- name: train
num_bytes: 11680054
num_examples: 4740
download_size: 1330767
dataset_size: 11680054
- config_name: finetuned_on_template_small_notypes_eval_on_template_small_notypes_generation_greedy
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9095378
num_examples: 4740
download_size: 970636
dataset_size: 9095378
- config_name: finetuned_on_{train_config}_eval_on_{eval_config}_generation_{gen_strat}
features:
- name: theory_file
dtype: string
- name: lemma_name
dtype: string
- name: lemma_command
dtype: string
- name: lemma_object
dtype: string
- name: template
dtype: string
- name: symbols
sequence: string
- name: types
sequence: string
- name: defs
sequence: string
- name: template_predictions_greedy
sequence: string
- name: template_levenshtein_scores_greedy
dtype: int64
- name: template_success_greedy
dtype: bool
splits:
- name: train
num_bytes: 9100002
num_examples: 4740
download_size: 965513
dataset_size: 9100002
configs:
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_afp_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_full_eval_on_lemma_object_afp_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_octonions_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_full_eval_on_lemma_object_octonions_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_octonions_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_full_eval_on_lemma_object_octonions_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_small_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_full_eval_on_lemma_object_small_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_full_eval_on_lemma_object_small_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_full_eval_on_lemma_object_small_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_afp_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_eval_on_lemma_object_afp_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_afp_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_eval_on_lemma_object_afp_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_octonions_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_eval_on_lemma_object_octonions_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_octonions_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_eval_on_lemma_object_octonions_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_small_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_eval_on_lemma_object_small_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_eval_on_lemma_object_small_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_eval_on_lemma_object_small_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_afp_nodefs_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_afp_nodefs_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_afp_nodefs_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_afp_nodefs_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_octonions_nodefs_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_octonions_nodefs_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_octonions_nodefs_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_octonions_nodefs_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_small_nodefs_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_small_nodefs_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_small_nodefs_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_nodefs_eval_on_lemma_object_small_nodefs_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_afp_notypes_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_afp_notypes_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_afp_notypes_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_afp_notypes_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_octonions_notypes_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_octonions_notypes_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_octonions_notypes_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_octonions_notypes_generation_greedy/train-*
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_small_notypes_generation_beam-search
data_files:
- split: train
path: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_small_notypes_generation_beam-search/train-*
- config_name: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_small_notypes_generation_greedy
data_files:
- split: train
path: finetuned_on_lemma_object_small_notypes_eval_on_lemma_object_small_notypes_generation_greedy/train-*
- config_name: finetuned_on_template_full_eval_on_template_afp_generation_greedy
data_files:
- split: train
path: finetuned_on_template_full_eval_on_template_afp_generation_greedy/train-*
- config_name: finetuned_on_template_full_eval_on_template_octonions_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_full_eval_on_template_octonions_generation_beam-search/train-*
- config_name: finetuned_on_template_full_eval_on_template_octonions_generation_greedy
data_files:
- split: train
path: finetuned_on_template_full_eval_on_template_octonions_generation_greedy/train-*
- config_name: finetuned_on_template_full_eval_on_template_small_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_full_eval_on_template_small_generation_beam-search/train-*
- config_name: finetuned_on_template_full_eval_on_template_small_generation_greedy
data_files:
- split: train
path: finetuned_on_template_full_eval_on_template_small_generation_greedy/train-*
- config_name: finetuned_on_template_small_eval_on_template_afp_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_eval_on_template_afp_generation_beam-search/train-*
- config_name: finetuned_on_template_small_eval_on_template_afp_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_eval_on_template_afp_generation_greedy/train-*
- config_name: finetuned_on_template_small_eval_on_template_octonions_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_eval_on_template_octonions_generation_beam-search/train-*
- config_name: finetuned_on_template_small_eval_on_template_octonions_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_eval_on_template_octonions_generation_greedy/train-*
- config_name: finetuned_on_template_small_eval_on_template_small_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_eval_on_template_small_generation_beam-search/train-*
- config_name: finetuned_on_template_small_eval_on_template_small_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_eval_on_template_small_generation_greedy/train-*
- config_name: finetuned_on_template_small_nodefs_eval_on_template_afp_nodefs_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_nodefs_eval_on_template_afp_nodefs_generation_beam-search/train-*
- config_name: finetuned_on_template_small_nodefs_eval_on_template_afp_nodefs_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_nodefs_eval_on_template_afp_nodefs_generation_greedy/train-*
- config_name: finetuned_on_template_small_nodefs_eval_on_template_octonions_nodefs_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_nodefs_eval_on_template_octonions_nodefs_generation_beam-search/train-*
- config_name: finetuned_on_template_small_nodefs_eval_on_template_octonions_nodefs_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_nodefs_eval_on_template_octonions_nodefs_generation_greedy/train-*
- config_name: finetuned_on_template_small_nodefs_eval_on_template_small_nodefs_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_nodefs_eval_on_template_small_nodefs_generation_beam-search/train-*
- config_name: finetuned_on_template_small_nodefs_eval_on_template_small_nodefs_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_nodefs_eval_on_template_small_nodefs_generation_greedy/train-*
- config_name: finetuned_on_template_small_notypes_eval_on_template_afp_notypes_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_notypes_eval_on_template_afp_notypes_generation_beam-search/train-*
- config_name: finetuned_on_template_small_notypes_eval_on_template_afp_notypes_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_notypes_eval_on_template_afp_notypes_generation_greedy/train-*
- config_name: finetuned_on_template_small_notypes_eval_on_template_octonions_notypes_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_notypes_eval_on_template_octonions_notypes_generation_beam-search/train-*
- config_name: finetuned_on_template_small_notypes_eval_on_template_octonions_notypes_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_notypes_eval_on_template_octonions_notypes_generation_greedy/train-*
- config_name: finetuned_on_template_small_notypes_eval_on_template_small_notypes_generation_beam-search
data_files:
- split: train
path: finetuned_on_template_small_notypes_eval_on_template_small_notypes_generation_beam-search/train-*
- config_name: finetuned_on_template_small_notypes_eval_on_template_small_notypes_generation_greedy
data_files:
- split: train
path: finetuned_on_template_small_notypes_eval_on_template_small_notypes_generation_greedy/train-*
- config_name: finetuned_on_{train_config}_eval_on_{eval_config}_generation_{gen_strat}
data_files:
- split: train
path: finetuned_on_{train_config}_eval_on_{eval_config}_generation_{gen_strat}/train-*
---
|
mariannedhk/librispeech_phones | mariannedhk | 2025-06-06T10:43:58Z | 120 | 0 | [
"language:en",
"license:cc-by-4.0",
"size_categories:100M<n<1B",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-21T16:46:36Z | null | ---
license: cc-by-4.0
language:
- en
dataset_info:
- config_name: all
features:
- name: phone
dtype: string
- name: phone_stress
dtype: string
- name: phone_ipa
dtype: string
- name: phone_position
dtype: int64
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: speaker_id
dtype: int64
- name: speaker_sex
dtype: string
- name: file_id
dtype: string
- name: subset
dtype: string
splits:
- name: train.clean.100
num_bytes: 324812682
num_examples: 3528037
- name: train.clean.360
num_bytes: 1180921939
num_examples: 12809090
- name: train.other.500
num_bytes: 1560377418
num_examples: 16940272
- name: dev.clean
num_bytes: 16702598
num_examples: 193644
- name: dev.other
num_bytes: 15282780
num_examples: 177275
- name: test.clean
num_bytes: 16461329
num_examples: 189327
- name: test.other
num_bytes: 15830959
num_examples: 181544
download_size: 480931464
dataset_size: 3130389705
- config_name: all_dev
features:
- name: phone
dtype: string
- name: phone_stress
dtype: string
- name: phone_ipa
dtype: string
- name: phone_position
dtype: int64
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: speaker_id
dtype: int64
- name: speaker_sex
dtype: string
- name: file_id
dtype: string
- name: subset
dtype: string
splits:
- name: dev.clean
num_bytes: 16702598
num_examples: 193644
- name: dev.other
num_bytes: 15282780
num_examples: 177275
download_size: 4905957
dataset_size: 31985378
- config_name: all_test
features:
- name: phone
dtype: string
- name: phone_stress
dtype: string
- name: phone_ipa
dtype: string
- name: phone_position
dtype: int64
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: speaker_id
dtype: int64
- name: speaker_sex
dtype: string
- name: file_id
dtype: string
- name: subset
dtype: string
splits:
- name: test.clean
num_bytes: 16461329
num_examples: 189327
- name: test.other
num_bytes: 15830959
num_examples: 181544
download_size: 4957098
dataset_size: 32292288
- config_name: all_train
features:
- name: phone
dtype: string
- name: phone_stress
dtype: string
- name: phone_ipa
dtype: string
- name: phone_position
dtype: int64
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: speaker_id
dtype: int64
- name: speaker_sex
dtype: string
- name: file_id
dtype: string
- name: subset
dtype: string
splits:
- name: train.clean.100
num_bytes: 324812682
num_examples: 3528037
- name: train.clean.360
num_bytes: 1180921939
num_examples: 12809090
- name: train.other.500
num_bytes: 1560377418
num_examples: 16940272
download_size: 471068409
dataset_size: 3066112039
- config_name: default
features:
- name: phone
dtype: string
- name: phone_stress
dtype: string
- name: phone_ipa
dtype: string
- name: phone_position
dtype: int64
- name: start_time
dtype: float64
- name: end_time
dtype: float64
- name: speaker_id
dtype: int64
- name: speaker_sex
dtype: string
- name: file_id
dtype: string
- name: subset
dtype: string
splits:
- name: train.clean.100
num_bytes: 324812682
num_examples: 3528037
- name: train.clean.360
num_bytes: 1180921939
num_examples: 12809090
- name: train.other.500
num_bytes: 1560377418
num_examples: 16940272
- name: dev.clean
num_bytes: 16702598
num_examples: 193644
- name: dev.other
num_bytes: 15282780
num_examples: 177275
- name: test.clean
num_bytes: 16461329
num_examples: 189327
- name: test.other
num_bytes: 15830959
num_examples: 181544
download_size: 480931464
dataset_size: 3130389705
configs:
- config_name: all
data_files:
- split: train.clean.100
path: all/train.clean.100-*
- split: train.clean.360
path: all/train.clean.360-*
- split: train.other.500
path: all/train.other.500-*
- split: dev.clean
path: all/dev.clean-*
- split: dev.other
path: all/dev.other-*
- split: test.clean
path: all/test.clean-*
- split: test.other
path: all/test.other-*
- config_name: all_dev
data_files:
- split: dev.clean
path: all_dev/dev.clean-*
- split: dev.other
path: all_dev/dev.other-*
- config_name: all_test
data_files:
- split: test.clean
path: all_test/test.clean-*
- split: test.other
path: all_test/test.other-*
- config_name: all_train
data_files:
- split: train.clean.100
path: all_train/train.clean.100-*
- split: train.clean.360
path: all_train/train.clean.360-*
- split: train.other.500
path: all_train/train.other.500-*
- config_name: default
data_files:
- split: train.clean.100
path: data/train.clean.100-*
- split: train.clean.360
path: data/train.clean.360-*
- split: train.other.500
path: data/train.other.500-*
- split: dev.clean
path: data/dev.clean-*
- split: dev.other
path: data/dev.other-*
- split: test.clean
path: data/test.clean-*
- split: test.other
path: data/test.other-*
---
# Summary
Phone annotations for the [LibriSpeech](https://www.openslr.org/12) corpus.
This dataset can for example be used in combination with audio from the [librispeech_asr](https://huggingface.co/datasets/openslr/librispeech_asr) dataset to extract phone embeddings from an audio encoder model.
# Data sources
Phone start and end times are extracted from the [LibriSpeech Alignments](https://zenodo.org/records/2619474), obtained using the using the Montreal Forced Aligner by [Lugosch et al. (2019)](https://www.isca-archive.org/interspeech_2019/lugosch19_interspeech.html).
Phone position is derived from the same source, using the word alignments to enumerate phones within each word start and end time. [`missing_alignments.json`](https://huggingface.co/datasets/mariannedhk/librispeech_phones/blob/main/missing_alignments.json) lists identifiers of files in the LibriSpeech corpus for which alignments are not available (by dataset split).
Speaker sex is inferred from the `SPEAKERS.TXT` metadata file released with the LibriSpeech corpus.
# Columns
- `phone` phone label in ARPAbet transcription format (excluding stress marker)
- `phone_stress` phone label in ARPAbet transcription format (including stress marker)
- `phone_ipa` phone label in International Phonetic Alphabet transcription format
- `phone_position` phone position within a word
- `start_time` phone start time relative to audio file onset
- `end_time` phone end time relative to audio file onset
- `speaker_id` unique identifier for each speaker in the LibriSpeech corpus
- `speaker_sex` speaker sex as reported in the LibriSpeech metadata
- `file_id` unique identifier for each file in the LibriSpeech corpus
- `subset` subset of the LibriSpeech corpus
# Example usage
```python
from datasets import load_dataset
# download phone annotations for the full librispeech corpus
libri_phones = load_dataset("mariannedhk/librispeech_phones")
# download only phone annotations for the development sets
# similarly, specify "all_train" or "all_test" for downloading only phone annotations from the train or test sets, respectively
libri_dev_phones = load_dataset("mariannedhk/librispeech_phones", "all_dev")
# load annotations for only the dev.clean split
# (this may still download the full dataset first)
libri_dev_clean_phones = load_dataset("mariannedhk/librispeech", split="dev.clean")
``` |
if001/MALLS-ja | if001 | 2025-06-06T10:11:05Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T10:11:02Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: FOL
dtype: string
- name: NL
dtype: string
splits:
- name: train
num_bytes: 8994849
num_examples: 27284
download_size: 5128544
dataset_size: 8994849
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jccj/so100_block_in_cup_at_home | jccj | 2025-06-06T09:51:51Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"lerobot",
"so100",
"block_in_cup"
] | [
"robotics"
] | 2025-06-06T09:04:43Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- lerobot
- so100
- block_in_cup
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100_follower",
"total_episodes": 48,
"total_frames": 16860,
"total_tasks": 1,
"total_videos": 96,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:48"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"shoulder_pan.pos",
"shoulder_lift.pos",
"elbow_flex.pos",
"wrist_flex.pos",
"wrist_roll.pos",
"gripper.pos"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"shoulder_pan.pos",
"shoulder_lift.pos",
"elbow_flex.pos",
"wrist_flex.pos",
"wrist_roll.pos",
"gripper.pos"
]
},
"observation.images.top": {
"dtype": "video",
"shape": [
1080,
1920,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 1080,
"video.width": 1920,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.wrist_left": {
"dtype": "video",
"shape": [
1080,
1920,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 1080,
"video.width": 1920,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
volcanos/OpenThoughts2-1M-ShortThink | volcanos | 2025-06-06T09:50:23Z | 0 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T09:46:04Z | null | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: question
dtype: string
- name: source
dtype: string
- name: id
dtype: int64
splits:
- name: train
num_bytes: 5503025978
num_examples: 1028848
download_size: 1958219494
dataset_size: 5503025978
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
lilaceclipse/orpheus-ft-sage-tokenized | lilaceclipse | 2025-06-06T09:38:34Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T08:28:53Z | null | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: labels
sequence: int64
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 410048
num_examples: 115
download_size: 203043
dataset_size: 410048
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
smikulas/MNLP_M3_rag_documents_1 | smikulas | 2025-06-06T09:16:05Z | 0 | 0 | [
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"rag",
"cs-552",
"question-answering",
"transformer",
"milestone3"
] | [] | 2025-06-06T09:15:19Z | null | ---
license: mit
language: en
tags:
- rag
- cs-552
- question-answering
- transformer
- milestone3
---
# MNLP_M3_rag_documents
This is a sample set of documents for use in Retrieval-Augmented Generation (RAG) evaluation.
|
dgambettaphd/D_llm2_run0_gen0_WXS_doc1000_synt64_lr1e-04_acm_FRESH | dgambettaphd | 2025-06-06T08:58:32Z | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T08:58:26Z | null | ---
dataset_info:
features:
- name: id_doc
dtype: int64
- name: text
dtype: string
- name: dataset
dtype: string
- name: gen
dtype: int64
- name: synt
dtype: int64
- name: MPP
dtype: float64
splits:
- name: train
num_bytes: 9206292
num_examples: 16000
download_size: 5529638
dataset_size: 9206292
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
upb-nlp/ro_fake_news | upb-nlp | 2025-06-06T08:56:14Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T08:55:52Z | null | ---
dataset_info:
- config_name: default
features:
- name: id
dtype: int64
- name: supernarrative
dtype: string
- name: narrative
dtype: string
- name: headline
dtype: string
- name: body
dtype: string
- name: similar
dtype: string
- name: link
dtype: string
- name: total_shares
dtype: float64
- name: total_facebook_shares
dtype: float64
- name: twitter_shares
dtype: float64
- name: pinterest_shares
dtype: float64
- name: total_reddit_engagements
dtype: float64
- name: published_date
dtype: string
- name: author_name
dtype: string
- name: num_words
dtype: float64
- name: facebook_comments
dtype: float64
- name: facebook_shares
dtype: float64
- name: facebook_likes
dtype: float64
- name: num_linking_domains
dtype: float64
- name: wow_count
dtype: float64
- name: love_count
dtype: float64
- name: haha_count
dtype: float64
- name: sad_count
dtype: float64
- name: angry_count
dtype: float64
splits:
- name: train
num_bytes: 1588938
num_examples: 376
- name: validation
num_bytes: 537186
num_examples: 125
- name: test
num_bytes: 515054
num_examples: 126
download_size: 1495956
dataset_size: 2641178
- config_name: unlabeled
features:
- name: headline
dtype: string
- name: body
dtype: string
- name: link
dtype: string
- name: total_shares
dtype: float64
- name: total_facebook_shares
dtype: float64
- name: twitter_shares
dtype: float64
- name: pinterest_shares
dtype: float64
- name: total_reddit_engagements
dtype: float64
- name: published_date
dtype: string
- name: author_name
dtype: string
- name: num_words
dtype: float64
- name: facebook_comments
dtype: float64
- name: facebook_shares
dtype: float64
- name: facebook_likes
dtype: float64
- name: num_linking_domains
dtype: float64
- name: wow_count
dtype: float64
- name: love_count
dtype: float64
- name: haha_count
dtype: float64
- name: sad_count
dtype: float64
- name: angry_count
dtype: float64
splits:
- name: train
num_bytes: 29407304
num_examples: 7950
download_size: 16874398
dataset_size: 29407304
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
- config_name: unlabeled
data_files:
- split: train
path: unlabeled/train-*
---
|
Tsegayesemere/emotions_3 | Tsegayesemere | 2025-06-06T08:49:23Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T06:33:52Z | null | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': ααα΅
'1': αα α
'2': αα°α α
'3': ααα£α΅
splits:
- name: train
num_bytes: 25534
num_examples: 163
- name: validation
num_bytes: 15828
num_examples: 95
- name: test
num_bytes: 11824
num_examples: 74
download_size: 33957
dataset_size: 53186
---
# Dataset Card for "emotions_3"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
PhanithLIM/asr-wmc-evaluate | PhanithLIM | 2025-06-06T08:33:03Z | 96 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-05-08T11:00:31Z | null | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
- name: mms
dtype: string
- name: whisper-tiny-aug-7-may-lightning-v1
dtype: string
- name: whisper-base-aug-20-april-lightning-v1
dtype: string
- name: whisper-small-khmer
dtype: string
- name: google_api
dtype: string
- name: whisper-medium-aug-05-june
dtype: string
splits:
- name: test
num_bytes: 154657277.0
num_examples: 334
download_size: 153886621
dataset_size: 154657277.0
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
|
deepakkarkala/dpo_sitcom_chandlerbing | deepakkarkala | 2025-06-06T08:21:57Z | 75 | 0 | [
"region:us"
] | [] | 2025-06-02T10:44:29Z | null | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: prompt
list:
- name: content
dtype: string
- name: role
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: __index_level_0__
dtype: int64
splits:
- name: train
num_bytes: 52390276
num_examples: 7468
download_size: 4935333
dataset_size: 52390276
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
pinatafarms/DAD-3DHeads | pinatafarms | 2025-06-06T08:16:57Z | 0 | 0 | [
"license:cc-by-nc-4.0",
"region:us"
] | [] | 2025-06-06T08:16:57Z | null | ---
license: cc-by-nc-4.0
---
|
RunsenXu/MMSI-Bench | RunsenXu | 2025-06-06T08:10:05Z | 104 | 2 | [
"task_categories:question-answering",
"task_categories:visual-question-answering",
"task_categories:multiple-choice",
"language:en",
"license:cc-by-4.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2505.23764",
"region:us"
] | [
"question-answering",
"visual-question-answering",
"multiple-choice"
] | 2025-05-27T09:44:38Z | 2 | ---
language:
- en
license: cc-by-4.0
size_categories:
- 1K<n<10K
task_categories:
- question-answering
- visual-question-answering
- multiple-choice
pretty_name: MMSI-Bench
dataset_info:
features:
- name: id
dtype: int64
- name: images
sequence: image
- name: question_type
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: thought
dtype: string
splits:
- name: test
num_examples: 1000
configs:
- config_name: default
data_files:
- split: test
path: MMSI_Bench.parquet
---
# MMSI-Bench
This repo contains evaluation code for the paper "[MMSI-Bench: A Benchmark for Multi-Image Spatial Intelligence]"
[**π Homepage**](https://runsenxu.com/projects/MMSI_Bench/) | [**π€ Dataset**](https://huggingface.co/datasets/RunsenXu/MMSI-Bench) | [**π Paper**](https://arxiv.org/pdf/2505.23764) | [**π» Code**](https://github.com/OpenRobotLab/MMSI-Bench) | [**π arXiv**](https://arxiv.org/abs/2505.23764)
## πNews
<!-- **π₯[2025-05-31]: MMSI-Bench has been supported in the [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) repository.** -->
**π₯[2025-05-30]: We released the ArXiv paper.**
## Load Dataset
```
from datasets import load_dataset
mmsi_bench = load_dataset("RunsenXu/MMSI-Bench")
print(mmsi_bench)
```
## Evaluation
Please refer to the [evaluation guidelines](https://github.com/open-compass/VLMEvalKit/blob/main/docs/en/Quickstart.md) of [VLMEvalKit](https://github.com/open-compass/VLMEvalKit)
<!-- <img src="assets/radar_v1.png" width="400" /> -->
## π MMSI-Bench Leaderboard
| Model | Avg. (%) | Type |
|------------------------------|:--------:|:-------------|
| π₯ **Human Level** | 97.2 | Baseline |
| π₯ o3 | 41.0 | Proprietary |
| π₯ GPT-4.5 | 40.3 | Proprietary |
| Gemini-2.5-Pro--Thinking | 37.0 | Proprietary |
| Gemini-2.5-Pro | 36.9 | Proprietary |
| Doubao-1.5-pro | 33.0 | Proprietary |
| GPT-4.1 | 30.9 | Proprietary |
| Qwen2.5-VL-72B | 30.7 | Open-source |
| NVILA-15B | 30.5 | Open-source |
| GPT-4o | 30.3 | Proprietary |
| Claude-3.7-Sonnet--Thinking | 30.2 | Proprietary |
| Seed1.5-VL | 29.7 | Proprietary |
| InternVL2.5-2B | 29.0 | Open-source |
| InternVL2.5-8B | 28.7 | Open-source |
| DeepSeek-VL2-Small | 28.6 | Open-source |
| InternVL3-78B | 28.5 | Open-source |
| InternVL2.5-78B | 28.5 | Open-source |
| LLaVA-OneVision-72B | 28.4 | Open-source |
| NVILA-8B | 28.1 | Open-source |
| InternVL2.5-26B | 28.0 | Open-source |
| DeepSeek-VL2 | 27.1 | Open-source |
| InternVL3-1B | 27.0 | Open-source |
| InternVL3-9B | 26.7 | Open-source |
| Qwen2.5-VL-3B | 26.5 | Open-source |
| InternVL2.5-1B | 26.1 | Open-source |
| InternVL2.5-4B | 26.3 | Open-source |
| Qwen2.5-VL-7B | 25.9 | Open-source |
| InternVL3-8B | 25.7 | Open-source |
| Llama-3.2-11B-Vision | 25.4 | Open-source |
| InternVL3-2B | 25.3 | Open-source |
| π **Random Guessing** | 25.0 | Baseline |
| LLaVA-OneVision-7B | 24.5 | Open-source |
| DeepSeek-VL2-Tiny | 24.0 | Open-source |
| Blind GPT-4o | 22.7 | Baseline |
## Acknowledgment
MMSI-Bench makes use of data from existing image datasets: [ScanNet](http://www.scan-net.org/), [nuScenes](https://www.nuscenes.org/), [Matterport3D](https://niessner.github.io/Matterport/), [Ego4D](https://ego4d-data.org/), [AgiBot-World](https://agibot-world.cn/), [DTU](https://roboimagedata.compute.dtu.dk/?page_id=36), [DAVIS-2017](https://davischallenge.org/) ,and [Waymo](https://waymo.com/open/). We thank these teams for their open-source contributions.
## Contact
- Sihan Yang: [email protected]
- Runsen Xu: [email protected]
## Citation
```bibtex
@article{yang2025mmsi,
title={MMSI-Bench: A Benchmark for Multi-Image Spatial Intelligence},
author={Yang, Sihan and Xu, Runsen and Xie, Yiman and Yang, Sizhe and Li, Mo and Lin, Jingli and Zhu, Chenming and Chen, Xiaochen and Duan, Haodong and Yue, Xiangyu and Lin, Dahua and Wang, Tai and Pang, Jiangmiao},
journal={arXiv preprint arXiv:2505.23764},
year={2025}
}
``` |
howardat666/so101_test | howardat666 | 2025-06-06T07:53:17Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so101",
"tutorial"
] | [
"robotics"
] | 2025-06-06T05:56:51Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so101
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so101",
"total_episodes": 1,
"total_frames": 864,
"total_tasks": 1,
"total_videos": 2,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
Ryosei2/test_0606_4 | Ryosei2 | 2025-06-06T07:39:35Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-06-06T07:39:29Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 1,
"total_frames": 893,
"total_tasks": 1,
"total_videos": 2,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:1"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
Ryosei2/test_0606_2 | Ryosei2 | 2025-06-06T07:31:48Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-06-06T07:31:43Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 212,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
TAUR-dev/SIE_EVAL__SIEXP_skill_inject_random_lm2d__rl__results | TAUR-dev | 2025-06-06T07:14:38Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T07:14:34Z | null | ---
dataset_info:
features:
- name: task
dtype: string
- name: alias
dtype: string
- name: evaluation_api_cost,none
dtype: float64
- name: evaluation_api_cost_stderr,none
dtype: string
- name: exact_match,none
dtype: float64
- name: exact_match_stderr,none
dtype: string
- name: extracted_answers,none
dtype: int64
- name: extracted_answers_stderr,none
dtype: string
splits:
- name: train
num_bytes: 1183
num_examples: 16
download_size: 4299
dataset_size: 1183
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
TAUR-dev/SIE_EVAL__SIEXP_first_response__ME__lm2d__sft__samples | TAUR-dev | 2025-06-06T07:10:50Z | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T07:10:47Z | null | ---
dataset_info:
features:
- name: doc_id
dtype: int64
- name: doc
dtype: string
- name: target
dtype: string
- name: arguments
dtype: string
- name: resps
dtype: string
- name: filtered_resps
dtype: string
- name: doc_hash
dtype: string
- name: prompt_hash
dtype: string
- name: target_hash
dtype: string
- name: exact_match
dtype: int64
- name: extracted_answers
dtype: string
- name: source_file
dtype: string
- name: generation
dtype: string
- name: info
dtype: string
- name: evaluation_api_cost
dtype: string
splits:
- name: train
num_bytes: 158831788
num_examples: 3656
download_size: 21733185
dataset_size: 158831788
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jvelja/results_3b_clean | jvelja | 2025-06-06T07:09:10Z | 100 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-02T09:47:41Z | null | ---
dataset_info:
features:
- name: problem_id
dtype: string
- name: problem
dtype: string
- name: reasoning
dtype: string
- name: solution
dtype: string
splits:
- name: train
num_bytes: 1032249
num_examples: 387
download_size: 464200
dataset_size: 1032249
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Jdemonn/NNewRefDrone | Jdemonn | 2025-06-06T07:08:37Z | 0 | 0 | [
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T06:59:24Z | null | ---
license: apache-2.0
---
|
TAUR-dev/SIE_EVAL__SIEXP__CC__concat_all__lm2d__rl__results | TAUR-dev | 2025-06-06T06:56:12Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T06:56:11Z | null | ---
dataset_info:
features:
- name: task
dtype: string
- name: alias
dtype: string
- name: evaluation_api_cost,none
dtype: float64
- name: evaluation_api_cost_stderr,none
dtype: string
- name: exact_match,none
dtype: float64
- name: exact_match_stderr,none
dtype: string
- name: extracted_answers,none
dtype: int64
- name: extracted_answers_stderr,none
dtype: string
splits:
- name: train
num_bytes: 1183
num_examples: 16
download_size: 4295
dataset_size: 1183
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cyh002/sealion-prompt-engineering-inference-instruct-results | cyh002 | 2025-06-06T06:54:02Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-05T22:19:28Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: language
dtype: string
- name: medium
dtype: string
- name: topic
dtype: string
- name: domain
dtype: string
- name: prompt
dtype: string
- name: predicted_label
dtype: string
splits:
- name: inference_dataset
num_bytes: 585978
num_examples: 500
download_size: 131565
dataset_size: 585978
configs:
- config_name: default
data_files:
- split: inference_dataset
path: data/inference_dataset-*
---
|
nz-nz/eval_so101_test_smolvla | nz-nz | 2025-06-06T06:24:19Z | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-06-06T06:24:15Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so101",
"total_episodes": 2,
"total_frames": 538,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.wrist.right": {
"dtype": "video",
"shape": [
640,
480,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 640,
"video.width": 480,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.top": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
MBZUAI/VideoMathQA | MBZUAI | 2025-06-06T06:23:41Z | 97 | 3 | [
"task_categories:visual-question-answering",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"modality:video",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2506.05349",
"region:us"
] | [
"visual-question-answering"
] | 2025-06-01T18:56:13Z | 3 | ---
license: apache-2.0
task_categories:
- visual-question-answering
configs:
- config_name: mcq
data_files:
- split: test
path: videomathqa_mcq_test.parquet
- config_name: multi_binary
data_files:
- split: test
path: videomathqa_mbin_test.parquet
---
# VideoMathQA: Benchmarking Mathematical Reasoning via Multimodal Understanding in Videos
[](https://arxiv.org/abs/2506.05349)
[](https://mbzuai-oryx.github.io/VideoMathQA)
[](https://hanoonar.github.io/VideoMathQA/#leaderboard-2)
[](https://hanoonar.github.io/VideoMathQA/#leaderboard)
[](https://github.com/EvolvingLMMs-Lab/lmms-eval/tree/main/lmms_eval/tasks/videomathqa)
## π£ Announcement
Note that the Official evaluation for **VideoMathQA** is supported in the [`lmms-eval`](https://github.com/EvolvingLMMs-Lab/lmms-eval/tree/main/lmms_eval/tasks/videomathqa) framework. Please use the GitHub repository [`mbzuai-oryx/VideoMathQA`](https://github.com/mbzuai-oryx/VideoMathQA) to create or track any issues related to VideoMathQA that you may encounter.
---
## π‘ VideoMathQA
**VideoMathQA** is a benchmark designed to evaluate mathematical reasoning in real-world educational videos. It requires models to interpret and integrate information from **three modalities**, visuals, audio, and text, across time. The benchmark tackles the **needle-in-a-multimodal-haystack** problem, where key information is sparse and spread across different modalities and moments in the video.
<p align="center">
<img src="images/intro_fig.png" alt="Highlight Figure"><br>
<em>The foundation of our benchmark is the needle-in-a-multimodal-haystack challenge, capturing the core difficulty of cross-modal reasoning across time from visual, textual, and audio streams. Built on this, VideoMathQA categorizes each question along four key dimensions: reasoning type, mathematical concept, video duration, and difficulty.</em>
</p>
---
## π₯ Highlights
- **Multimodal Reasoning Benchmark:** VideoMathQA introduces a challenging **needle-in-a-multimodal-haystack** setup where models must reason across **visuals, text and audio**. Key information is **sparsely distributed across modalities and time**, requiring strong performance in fine-grained visual understanding, multimodal integration, and reasoning.
- **Three Types of Reasoning:** Questions are categorized into: **Problem Focused**, where the question is explicitly stated and solvable via direct observation and reasoning from the video; **Concept Transfer**, where a demonstrated method or principle is adapted to a newly posed problem; **Deep Instructional Comprehension**, which requires understanding long-form instructional content, interpreting partially worked-out steps, and completing the solution.
- **Diverse Evaluation Dimensions:** Each question is evaluated across four axes, which captures diversity in content, length, complexity, and reasoning depth.
**mathematic concepts**, 10 domains such as geometry, statistics, arithmetics and charts; **video duration** ranging from 10s to 1 hour long categorized as short, medium, long; **difficulty level**; and **reasoning type**.
- **High-Quality Human Annotations:** The benchmark includes **420 expert-curated questions**, each with five answer choices, a correct answer, and detailed **chain-of-thought (CoT) steps**. Over **2,945 reasoning steps** have been manually written, reflecting **920+ hours** of expert annotation effort with rigorous quality control.
## π Examples from the Benchmark
We present example questions from <strong>VideoMathQA</strong> illustrating the three reasoning types: Problem Focused, Concept Transfer, and Deep Comprehension. The benchmark includes evolving dynamics in a video, complex text prompts, five multiple-choice options, the expert-annotated step-by-step reasoning to solve the given problem, and the final correct answer as shown above.
<p align="center">
<img src="images/data_fig.png" alt="Figure 1" width="90%">
</p>
---
## π Overview of VideoMathQA
We illustrate an overview of the <strong>VideoMathQA</strong> benchmark through: <strong>a)</strong> The distribution of questions and model performance across ten mathematical concepts, which highlights a significant gap in the current multimodal models and their ability to perform mathematical reasoning over videos. <strong>b)</strong> The distribution of video durations, spanning from short clips of 10s to long videos up to 1hr. <strong>c)</strong> Our three-stage annotation pipeline performed by expert science graduates, who annotate detailed step-by-step reasoning trails, with strict quality assessment at each stage.
<p align="center">
<img src="images/stat_fig.png" alt="Figure 2" width="90%">
</p>
|
sajal09/Calib64_32_32 | sajal09 | 2025-06-06T06:21:27Z | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-06-06T06:21:25Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: dataset
dtype: string
splits:
- name: train
num_bytes: 127336
num_examples: 128
download_size: 77916
dataset_size: 127336
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
fjpaxkm/so100_test | fjpaxkm | 2025-06-06T06:20:22Z | 364 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | 2025-05-27T10:08:35Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 2,
"total_frames": 836,
"total_tasks": 1,
"total_videos": 4,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.height": 480,
"video.width": 640,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"video.fps": 30,
"video.channels": 3,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
Subsets and Splits