tokenizer-data / README.md
christopher's picture
Upload dataset
bba1237 verified
metadata
dataset_info:
  features:
    - name: tokenizer_id
      dtype: int64
    - name: hash
      dtype: string
    - name: vocab_hash
      dtype: string
    - name: vocab_size
      dtype: int64
    - name: model_type
      dtype: string
    - name: num_merges
      dtype: int64
    - name: has_normalizer
      dtype: bool
    - name: has_pre_tokenizer
      dtype: bool
    - name: has_post_processor
      dtype: bool
    - name: has_decoder
      dtype: bool
    - name: num_added_tokens
      dtype: int64
    - name: normalizer_type
      dtype: string
    - name: pre_tokenizer_type
      dtype: string
    - name: decoder_type
      dtype: string
    - name: normalizer_types
      list: string
    - name: pre_tokenizer_types
      list: string
    - name: decoder_types
      list: string
    - name: version
      dtype: string
    - name: truncation
      dtype: string
    - name: padding
      dtype: string
    - name: added_tokens
      dtype: string
    - name: normalizer
      dtype: string
    - name: pre_tokenizer
      dtype: string
    - name: post_processor
      dtype: string
    - name: decoder
      dtype: string
    - name: model
      dtype: string
  splits:
    - name: train
      num_bytes: 75252466551
      num_examples: 24798
  download_size: 40124756481
  dataset_size: 75252466551
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*