File size: 1,871 Bytes
9605203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/bin/bash
#SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
#SBATCH --ntasks=1                   # number of MP tasks
#SBATCH --nodes=1
#SBATCH --cpus-per-task=40           # number of cores per tasks
#SBATCH --hint=nomultithread         # we get physical cores not logical
#SBATCH --time=20:00:00             # maximum execution time (HH:MM:SS)
#SBATCH --output=logs/catalogue-jsonl-to-meg-ds/%x-%j.out          # output file name
#SBATCH --account=six@cpu
#SBATCH --array=0-11
#SBATCH --partition=cpu_p1

set -x -e

source $six_ALL_CCFRWORK/start-prod
# We need a specific installation of tokenizers so that it works with bytefallback
conda activate thomas_data_tooling

# ======= Generate meg-ds file ======

DATASET_PATHS=($(ls -d /gpfsscratch/rech/six/commun/bigscience-datasets/pii_no_id_no_num/post/*.jsonl))
DATASET_PATH=${DATASET_PATHS[$SLURM_ARRAY_TASK_ID]}

TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles

DATASET_NAME_WITH_JSONL=$(basename $DATASET_PATH)
DATASET_NAME=${DATASET_NAME_WITH_JSONL:0:-6}
LANG=$(basename $(dirname $DATASET_PATH))
SAVE_MEG_DS_DATASET=$six_ALL_CCFRSCRATCH/bigscience-datasets/oscar_pii_v2/$LANG/"$DATASET_NAME"/meg_ds_"${TOKENIZER_NAME_OR_PATH//\//_}"

mkdir -p $(dirname $SAVE_MEG_DS_DATASET)

if [[ -f "$SAVE_MEG_DS_DATASET"_text_document.bin ]];
then
    echo "$SAVE_MEG_DS_DATASET exists."
    exit 0
fi

export HF_DATASETS_OFFLINE=1
export TRANSFORMERS_OFFLINE=1

cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
/usr/bin/time -v python tools/preprocess_data_many_cores.py \
       --input $DATASET_PATH \
       --output-prefix $SAVE_MEG_DS_DATASET \
       --dataset-impl mmap \
       --tokenizer-type PretrainedFromHF \
       --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
       --append-eod \
       --workers 40