The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'train' of the config 'default' of the dataset.
Error code: FeaturesError Exception: ArrowInvalid Message: Schema at index 1 was different: config: struct<name: string, backend: struct<name: string, version: string, _target_: string, task: string, library: string, model_type: string, model: string, processor: string, device: string, device_ids: string, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>, scenario: struct<name: string, _target_: string, max_steps: int64, warmup_steps: int64, dataset_shapes: struct<dataset_size: int64, sequence_length: int64, num_choices: int64>, training_arguments: struct<per_device_train_batch_size: int64, gradient_accumulation_steps: int64, output_dir: string, evaluation_strategy: string, eval_strategy: string, save_strategy: string, do_train: bool, use_cpu: bool, max_steps: int64, do_eval: bool, do_predict: bool, report_to: string, skip_memory_metrics: bool, ddp_find_unused_parameters: bool>, latency: bool, memory: bool, energy: bool>, launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: string, numactl: bool, numactl_kwargs: struct<>, start_method: string>, environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, gpu: list<item: string>, gpu_count: int64, gpu_vram_mb: int64, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: string, peft_commit: null>, print_report: bool, log_report: bool> report: struct<overall: struct<memory: struct<unit: string, max_ram: double, max_global_vram: double, max_process_vram: double, max_reserved: double, max_allocated: double>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: struct<unit: string, cpu: double, ram: double, gpu: double, total: double>, efficiency: struct<unit: string, value: double>>, warmup: struct<memory: struct<unit: string, max_ram: double, max_global_vram: double, max_process_vram: double, max_reserved: double, max_allocated: double>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>, train: struct<memory: struct<unit: string, max_ram: double, max_global_vram: double, max_process_vram: double, max_reserved: double, max_allocated: double>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>> vs name: string backend: struct<name: string, version: string, _target_: string, task: string, library: string, model_type: string, model: string, processor: string, device: string, device_ids: string, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>> scenario: struct<name: string, _target_: string, max_steps: int64, warmup_steps: int64, dataset_shapes: struct<dataset_size: int64, sequence_length: int64, num_choices: int64>, training_arguments: struct<per_device_train_batch_size: int64, gradient_accumulation_steps: int64, output_dir: string, evaluation_strategy: string, eval_strategy: string, save_strategy: string, do_train: bool, use_cpu: bool, max_steps: int64, do_eval: bool, do_predict: bool, report_to: string, skip_memory_metrics: bool, ddp_find_unused_parameters: bool>, latency: bool, memory: bool, energy: bool> launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: string, numactl: bool, numactl_kwargs: struct<>, start_method: string> environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, gpu: list<item: string>, gpu_count: int64, gpu_vram_mb: int64, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: string, peft_commit: null> print_report: bool log_report: bool Traceback: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 231, in compute_first_rows_from_streaming_response iterable_dataset = iterable_dataset._resolve_features() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 3335, in _resolve_features features = _infer_features_from_batch(self.with_format(None)._head()) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2096, in _head return next(iter(self.iter(batch_size=n))) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2296, in iter for key, example in iterator: File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1856, in __iter__ for key, pa_table in self._iter_arrow(): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1878, in _iter_arrow yield from self.ex_iterable._iter_arrow() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 504, in _iter_arrow yield new_key, pa.Table.from_batches(chunks_buffer) File "pyarrow/table.pxi", line 4116, in pyarrow.lib.Table.from_batches File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Schema at index 1 was different: config: struct<name: string, backend: struct<name: string, version: string, _target_: string, task: string, library: string, model_type: string, model: string, processor: string, device: string, device_ids: string, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>, scenario: struct<name: string, _target_: string, max_steps: int64, warmup_steps: int64, dataset_shapes: struct<dataset_size: int64, sequence_length: int64, num_choices: int64>, training_arguments: struct<per_device_train_batch_size: int64, gradient_accumulation_steps: int64, output_dir: string, evaluation_strategy: string, eval_strategy: string, save_strategy: string, do_train: bool, use_cpu: bool, max_steps: int64, do_eval: bool, do_predict: bool, report_to: string, skip_memory_metrics: bool, ddp_find_unused_parameters: bool>, latency: bool, memory: bool, energy: bool>, launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: string, numactl: bool, numactl_kwargs: struct<>, start_method: string>, environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, gpu: list<item: string>, gpu_count: int64, gpu_vram_mb: int64, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: string, peft_commit: null>, print_report: bool, log_report: bool> report: struct<overall: struct<memory: struct<unit: string, max_ram: double, max_global_vram: double, max_process_vram: double, max_reserved: double, max_allocated: double>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: struct<unit: string, cpu: double, ram: double, gpu: double, total: double>, efficiency: struct<unit: string, value: double>>, warmup: struct<memory: struct<unit: string, max_ram: double, max_global_vram: double, max_process_vram: double, max_reserved: double, max_allocated: double>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>, train: struct<memory: struct<unit: string, max_ram: double, max_global_vram: double, max_process_vram: double, max_reserved: double, max_allocated: double>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>> vs name: string backend: struct<name: string, version: string, _target_: string, task: string, library: string, model_type: string, model: string, processor: string, device: string, device_ids: string, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>> scenario: struct<name: string, _target_: string, max_steps: int64, warmup_steps: int64, dataset_shapes: struct<dataset_size: int64, sequence_length: int64, num_choices: int64>, training_arguments: struct<per_device_train_batch_size: int64, gradient_accumulation_steps: int64, output_dir: string, evaluation_strategy: string, eval_strategy: string, save_strategy: string, do_train: bool, use_cpu: bool, max_steps: int64, do_eval: bool, do_predict: bool, report_to: string, skip_memory_metrics: bool, ddp_find_unused_parameters: bool>, latency: bool, memory: bool, energy: bool> launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: string, numactl: bool, numactl_kwargs: struct<>, start_method: string> environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, gpu: list<item: string>, gpu_count: int64, gpu_vram_mb: int64, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: string, peft_commit: null> print_report: bool log_report: bool
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
Subsets and Splits