Dataset Viewer
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'test' of the config 'default' of the dataset.
Error code:   FeaturesError
Exception:    ArrowInvalid
Message:      Schema at index 1 was different: 
config: struct<name: string, backend: struct<name: string, version: string, _target_: string, model: string, processor: string, task: string, library: string, model_type: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, allow_tf32: bool, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>, scenario: struct<name: string, _target_: string, iterations: int64, duration: int64, warmup_runs: int64, input_shapes: struct<batch_size: int64, sequence_length: int64, num_choices: int64>, new_tokens: null, memory: bool, latency: bool, energy: bool, forward_kwargs: struct<>, generate_kwargs: struct<>, call_kwargs: struct<>>, launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: null, numactl: bool, numactl_kwargs: struct<>, start_method: string>, environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, optimum_benchmark_version: string, optimum_benchmark_commit: string, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: null, peft_commit: null>, print_report: bool, log_report: bool>
report: struct<load_model: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: int64, stdev_: int64>, throughput: null, energy: null, efficiency: null>, first_forward: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: int64, stdev_: int64>, throughput: null, energy: null, efficiency: null>, forward: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>>
vs
name: string
backend: struct<name: string, version: string, _target_: string, model: string, processor: string, task: string, library: string, model_type: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, allow_tf32: bool, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>
scenario: struct<name: string, _target_: string, iterations: int64, duration: int64, warmup_runs: int64, input_shapes: struct<batch_size: int64, sequence_length: int64, num_choices: int64>, new_tokens: null, memory: bool, latency: bool, energy: bool, forward_kwargs: struct<>, generate_kwargs: struct<>, call_kwargs: struct<>>
launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: null, numactl: bool, numactl_kwargs: struct<>, start_method: string>
environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, optimum_benchmark_version: string, optimum_benchmark_commit: string, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: null, peft_commit: null>
print_report: bool
log_report: bool
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 231, in compute_first_rows_from_streaming_response
                  iterable_dataset = iterable_dataset._resolve_features()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 3335, in _resolve_features
                  features = _infer_features_from_batch(self.with_format(None)._head())
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2096, in _head
                  return next(iter(self.iter(batch_size=n)))
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2296, in iter
                  for key, example in iterator:
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1856, in __iter__
                  for key, pa_table in self._iter_arrow():
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1878, in _iter_arrow
                  yield from self.ex_iterable._iter_arrow()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 536, in _iter_arrow
                  yield new_key, pa.Table.from_batches(chunks_buffer)
                File "pyarrow/table.pxi", line 4116, in pyarrow.lib.Table.from_batches
                File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
              pyarrow.lib.ArrowInvalid: Schema at index 1 was different: 
              config: struct<name: string, backend: struct<name: string, version: string, _target_: string, model: string, processor: string, task: string, library: string, model_type: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, allow_tf32: bool, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>, scenario: struct<name: string, _target_: string, iterations: int64, duration: int64, warmup_runs: int64, input_shapes: struct<batch_size: int64, sequence_length: int64, num_choices: int64>, new_tokens: null, memory: bool, latency: bool, energy: bool, forward_kwargs: struct<>, generate_kwargs: struct<>, call_kwargs: struct<>>, launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: null, numactl: bool, numactl_kwargs: struct<>, start_method: string>, environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, optimum_benchmark_version: string, optimum_benchmark_commit: string, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: null, peft_commit: null>, print_report: bool, log_report: bool>
              report: struct<load_model: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: int64, stdev_: int64>, throughput: null, energy: null, efficiency: null>, first_forward: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: int64, stdev_: int64>, throughput: null, energy: null, efficiency: null>, forward: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>>
              vs
              name: string
              backend: struct<name: string, version: string, _target_: string, model: string, processor: string, task: string, library: string, model_type: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, device_map: null, torch_dtype: null, eval_mode: bool, to_bettertransformer: bool, low_cpu_mem_usage: null, attn_implementation: null, cache_implementation: null, allow_tf32: bool, autocast_enabled: bool, autocast_dtype: null, torch_compile: bool, torch_compile_target: string, torch_compile_config: struct<>, quantization_scheme: null, quantization_config: struct<>, deepspeed_inference: bool, deepspeed_inference_config: struct<>, peft_type: null, peft_config: struct<>>
              scenario: struct<name: string, _target_: string, iterations: int64, duration: int64, warmup_runs: int64, input_shapes: struct<batch_size: int64, sequence_length: int64, num_choices: int64>, new_tokens: null, memory: bool, latency: bool, energy: bool, forward_kwargs: struct<>, generate_kwargs: struct<>, call_kwargs: struct<>>
              launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: null, numactl: bool, numactl_kwargs: struct<>, start_method: string>
              environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, optimum_benchmark_version: string, optimum_benchmark_commit: string, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: null, optimum_commit: null, timm_version: string, timm_commit: null, peft_version: null, peft_commit: null>
              print_report: bool
              log_report: bool

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

No dataset card yet

Downloads last month
29