{ "cells": [ { "cell_type": "code", "execution_count": 2, "id": "bed45d12-7681-4ba4-9c89-48a3515704e2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "File ‘hi_rom.txt.xz’ already there; not retrieving.\n", "\n", "hi_rom.txt.xz (1/1)\n", "xz: hi_rom.txt: File exists\n" ] } ], "source": [ "!wget -nc http://data.statmt.org/cc-100/hi_rom.txt.xz\n", "!xz -d -v hi_rom.txt.xz\n", "!rm hi_rom.txt.xz" ] }, { "cell_type": "markdown", "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad", "metadata": { "tags": [] }, "source": [ "### Required columns\n", "- target_hinglish\n", "- source_hindi\n", "- parallel_english\n", "- annotations\n", "- raw_input\n", "- alternates\n", "\n", "> For **hi_rom.txt**, only `target_hinglish` is valid" ] }, { "cell_type": "code", "execution_count": 7, "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: clean-text[gpl] in /opt/conda/lib/python3.7/site-packages (0.6.0)\n", "Requirement already satisfied: tqdm in /opt/conda/lib/python3.7/site-packages (4.62.3)\n", "Requirement already satisfied: emoji<2.0.0,>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from clean-text[gpl]) (1.6.3)\n", "Requirement already satisfied: ftfy<7.0,>=6.0 in /opt/conda/lib/python3.7/site-packages (from clean-text[gpl]) (6.1.1)\n", "Requirement already satisfied: unidecode<2.0.0,>=1.1.1 in /opt/conda/lib/python3.7/site-packages (from clean-text[gpl]) (1.3.2)\n", "Requirement already satisfied: wcwidth>=0.2.5 in /opt/conda/lib/python3.7/site-packages (from ftfy<7.0,>=6.0->clean-text[gpl]) (0.2.5)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "10251114it [20:30, 8333.31it/s] \n" ] } ], "source": [ "!pip install clean-text[gpl] tqdm\n", "import pandas as pd\n", "from tqdm import tqdm\n", "from cleantext import clean as clean_\n", "\n", "clean = lambda x: clean_(\n", " x, \n", " no_line_breaks=True, no_punct=True,\n", " no_urls=True, replace_with_url=\"<|url|>\",\n", " no_emoji=True, no_phone_numbers=True, replace_with_phone_number=\"<|phonenumber|>\",\n", " no_emails=True, replace_with_email=\"<|email|>\",\n", " no_currency_symbols=True, replace_with_currency_symbol=\"\", )\n", "\n", "with open(\"./hi_rom.txt\", 'r') as file:\n", " df = pd.DataFrame(\n", " [(clean(line), None, None, None, None, None) for line in tqdm(file)],\n", " columns=[\"target_hinglish\", \"source_hindi\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )" ] }, { "cell_type": "code", "execution_count": 8, "id": "b54fdd52-1ab0-4c84-89e5-0bcb8fcbfbeb", "metadata": {}, "outputs": [], "source": [ "# Split dataset\n", "from sklearn.model_selection import train_test_split\n", "_train_eval_df, test_df = train_test_split(df, test_size=0.1)\n", "train_df, eval_df = train_test_split(_train_eval_df, test_size=0.1)" ] }, { "cell_type": "code", "execution_count": 9, "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting tables\n", " Using cached tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n", "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n", "Collecting numexpr>=2.6.2\n", " Using cached numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n", "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n", "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n", "Installing collected packages: numexpr, tables\n", "Successfully installed numexpr-2.8.1 tables-3.7.0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n", "your performance may suffer as PyTables will pickle object types that it cannot\n", "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['target_hinglish', 'source_hindi', 'parallel_english', 'annotations',\n", " 'raw_input', 'alternates'],\n", " dtype='object')]\n", "\n", " encoding=encoding,\n" ] } ], "source": [ "!pip install tables\n", "\n", "# Save to hdfs files\n", "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n", "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n", "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)" ] }, { "cell_type": "code", "execution_count": 10, "id": "3298f2f3-3e21-478e-b027-947c992f880d", "metadata": {}, "outputs": [], "source": [ "# Confirm that everything worked as expected\n", "\n", "# Load from hdfs files\n", "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n", "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n", "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n", "\n", "assert (len(_train_df) == len(train_df)) == \\\n", " (len(_eval_df) == len(eval_df)) == \\\n", " (len(_test_df) == len(test_df))" ] }, { "cell_type": "code", "execution_count": 11, "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3", "metadata": {}, "outputs": [], "source": [ "!rm hi_rom.txt" ] } ], "metadata": { "environment": { "kernel": "python3", "name": "managed-notebooks.m87", "type": "gcloud", "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest" }, "kernelspec": { "display_name": "Python (Local)", "language": "python", "name": "local-base" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.12" } }, "nbformat": 4, "nbformat_minor": 5 }