{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "46322fb5-5918-4b70-9689-9e0781439ac4", "metadata": {}, "outputs": [], "source": [ "# !pip3 install wordcloud" ] }, { "cell_type": "code", "execution_count": 2, "id": "daf1e3d1-75ac-4299-8bed-2f413a49f9a6", "metadata": { "tags": [] }, "outputs": [], "source": [ "import nltk\n", "from nltk.tokenize import sent_tokenize\n", "from nltk.tokenize import word_tokenize\n", "\n", "import gensim\n", "from gensim import corpora\n", "from gensim import similarities\n", "from gensim import models\n", "from gensim.models import CoherenceModel\n", "\n", "# from wordcloud import WordCloud, ImageColorGenerator\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "import pandas as pd\n", "import re\n", "import os\n", "import datetime\n", "\n", "import warnings\n", "\n", "warnings.filterwarnings(\"ignore\")\n", "\n", "from pprint import pprint\n", "import pyLDAvis\n", "import pyLDAvis.gensim_models as gensimvis" ] }, { "cell_type": "code", "execution_count": 3, "id": "c673c907-e1d8-4d64-9a73-c15c15b78e7f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2024-04-14 13:28:04.105795\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "print(datetime.datetime.now())" ] }, { "cell_type": "markdown", "id": "49e6de6b-71bd-4948-8827-52601406058f", "metadata": {}, "source": [ "# Import Data" ] }, { "cell_type": "code", "execution_count": 4, "id": "49222182-7811-4fa6-8c0a-21d3a546863e", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "df = pd.read_parquet(\"processed_data1.parquet\")" ] }, { "cell_type": "code", "execution_count": 5, "id": "3fb59a30", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
idHeadlineDetailsSeverityCategoryRegionDatetimeYearlatlon...if_labeledMonthWeekHeadline_Detailsurltitlecontentcleaned_contentbinary_contentword_count
01.0Grasberg Mine- Grasberg mine workers extend st...Media sources indicate that workers at the Gra...ModerateMine Workers StrikeIndonesia28/5/17 17:082017.0-4.05608137.11302...False5.021.0Grasberg Mine- Grasberg mine workers extend st...https://news.google.com/rss/articles/CBMiZ2h0d...Freeport Indonesia mine workers extend strike ...Trucks are seen on a road in the Grasberg copp...[truck, be, see, on, road, in, grasberg, coppe...[adkerson_jakarta_try, agreement_freeport_indo...53
13.0Shanghai port congestion impacts terminals in ...The persisting port congestion at Shanghai’s Y...MinorPort CongestionChina27/4/17 9:162017.029.52000121.33190...False4.017.0Shanghai port congestion impacts terminals in ...https://news.google.com/rss/articles/CBMiVWh0d...Typhoon Muifa to shut China ports for second t...By Sam Whelan 13/09/2022\\n\\nAnother typhoon ha...[by, sam, whelan, typhoon, have, prompt, port,...[additional_ripple_effect, avoid_path_typhoon,...44
25.0UPDATE - Indonesia: Police confirm two explosi...According to local police in Jakarta, two expl...ExtremeBombing, Police OperationsIndonesia24/5/17 16:202017.0NaNNaN...True5.021.0UPDATE - Indonesia: Police confirm two explosi...https://news.google.com/rss/articles/CBMiZWh0d...Jakarta Police Receive 2 More Reports on Coldp...TEMPO.CO, Jakarta - South Jakarta Metro Police...[jakarta, south, jakarta, metro, police, recei...[actress_accord, available_day_concert, click_...24
36.0UPDATE - Indonesia: Severe winds damage infras...Severe winds have downed billboards and trees ...ModerateRoadway Closure / Disruption, Flooding, Severe...Indonesia19/4/17 9:102017.0-6.91264107.65700...True4.016.0UPDATE - Indonesia: Severe winds damage infras...https://news.google.com/rss/articles/CBMiSWh0d...Indonesia hit by some of strongest winds recordedA man stands near damaged houses following a t...[man, stand, near, damage, house, follow, torn...[bbc_indonesia, climatologist_government_resea...28
414.02 miles E of Chesterfield - A tornado has touc...Government sources are reporting a tornado has...MinorTornadoUnited States17/9/18 19:552018.037.51000-77.61000...True9.038.02 miles E of Chesterfield - A tornado has touc...https://news.google.com/rss/articles/CBMigAFod...UPDATE: Number of homes without power down to ...More than 90,000 homes and businesses across t...[more, than, home, business, across, richmond,...[advise_seek_alternate, affect_richmond, alter...134
\n", "

5 rows × 23 columns

\n", "
" ], "text/plain": [ " id Headline \\\n", "0 1.0 Grasberg Mine- Grasberg mine workers extend st... \n", "1 3.0 Shanghai port congestion impacts terminals in ... \n", "2 5.0 UPDATE - Indonesia: Police confirm two explosi... \n", "3 6.0 UPDATE - Indonesia: Severe winds damage infras... \n", "4 14.0 2 miles E of Chesterfield - A tornado has touc... \n", "\n", " Details Severity \\\n", "0 Media sources indicate that workers at the Gra... Moderate \n", "1 The persisting port congestion at Shanghai’s Y... Minor \n", "2 According to local police in Jakarta, two expl... Extreme \n", "3 Severe winds have downed billboards and trees ... Moderate \n", "4 Government sources are reporting a tornado has... Minor \n", "\n", " Category Region \\\n", "0 Mine Workers Strike Indonesia \n", "1 Port Congestion China \n", "2 Bombing, Police Operations Indonesia \n", "3 Roadway Closure / Disruption, Flooding, Severe... Indonesia \n", "4 Tornado United States \n", "\n", " Datetime Year lat lon ... if_labeled Month Week \\\n", "0 28/5/17 17:08 2017.0 -4.05608 137.11302 ... False 5.0 21.0 \n", "1 27/4/17 9:16 2017.0 29.52000 121.33190 ... False 4.0 17.0 \n", "2 24/5/17 16:20 2017.0 NaN NaN ... True 5.0 21.0 \n", "3 19/4/17 9:10 2017.0 -6.91264 107.65700 ... True 4.0 16.0 \n", "4 17/9/18 19:55 2018.0 37.51000 -77.61000 ... True 9.0 38.0 \n", "\n", " Headline_Details \\\n", "0 Grasberg Mine- Grasberg mine workers extend st... \n", "1 Shanghai port congestion impacts terminals in ... \n", "2 UPDATE - Indonesia: Police confirm two explosi... \n", "3 UPDATE - Indonesia: Severe winds damage infras... \n", "4 2 miles E of Chesterfield - A tornado has touc... \n", "\n", " url \\\n", "0 https://news.google.com/rss/articles/CBMiZ2h0d... \n", "1 https://news.google.com/rss/articles/CBMiVWh0d... \n", "2 https://news.google.com/rss/articles/CBMiZWh0d... \n", "3 https://news.google.com/rss/articles/CBMiSWh0d... \n", "4 https://news.google.com/rss/articles/CBMigAFod... \n", "\n", " title \\\n", "0 Freeport Indonesia mine workers extend strike ... \n", "1 Typhoon Muifa to shut China ports for second t... \n", "2 Jakarta Police Receive 2 More Reports on Coldp... \n", "3 Indonesia hit by some of strongest winds recorded \n", "4 UPDATE: Number of homes without power down to ... \n", "\n", " content \\\n", "0 Trucks are seen on a road in the Grasberg copp... \n", "1 By Sam Whelan 13/09/2022\\n\\nAnother typhoon ha... \n", "2 TEMPO.CO, Jakarta - South Jakarta Metro Police... \n", "3 A man stands near damaged houses following a t... \n", "4 More than 90,000 homes and businesses across t... \n", "\n", " cleaned_content \\\n", "0 [truck, be, see, on, road, in, grasberg, coppe... \n", "1 [by, sam, whelan, typhoon, have, prompt, port,... \n", "2 [jakarta, south, jakarta, metro, police, recei... \n", "3 [man, stand, near, damage, house, follow, torn... \n", "4 [more, than, home, business, across, richmond,... \n", "\n", " binary_content word_count \n", "0 [adkerson_jakarta_try, agreement_freeport_indo... 53 \n", "1 [additional_ripple_effect, avoid_path_typhoon,... 44 \n", "2 [actress_accord, available_day_concert, click_... 24 \n", "3 [bbc_indonesia, climatologist_government_resea... 28 \n", "4 [advise_seek_alternate, affect_richmond, alter... 134 \n", "\n", "[5 rows x 23 columns]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.head()" ] }, { "cell_type": "code", "execution_count": 6, "id": "09113e88-66cc-414c-a953-da04db83c4ae", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] }, { "data": { "text/plain": [ "(3555, 23)" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.shape" ] }, { "cell_type": "markdown", "id": "037e74fc-bbcd-43e3-8346-799920cca8d8", "metadata": {}, "source": [ "# Vectorisation" ] }, { "cell_type": "markdown", "id": "d67cef3a-59fb-4dd8-adc8-2cf288b90728", "metadata": {}, "source": [ "NLP vectorization refers to the process of converting text data into numerical vectors that machine learning algorithms can understand and process. \n", "\n", "Bag-of-Words (BoW) is used here that represents text as a collection of unique words along with their frequencies. Each word is assigned an index, and the vector contains the count of each word present in the document." ] }, { "cell_type": "code", "execution_count": 7, "id": "c95b7b8a-9767-469d-812d-c9a9d9fee0e9", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "df_copy = df.copy()" ] }, { "cell_type": "code", "execution_count": 8, "id": "dfb2001e-04c1-49dc-b423-a64ea47af5a9", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "# choose only the extreme and severe cases for modelling\n", "cleaned = df_copy[df_copy[\"Severity\"].isin([\"Minor\"])]\n", "cleaned.reset_index(drop=True, inplace=True)" ] }, { "cell_type": "code", "execution_count": 9, "id": "3da09b6a-65c6-4f40-9a21-e0b798318ca5", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] }, { "data": { "text/plain": [ "(1565, 23)" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "cleaned.shape" ] }, { "cell_type": "code", "execution_count": 10, "id": "de71c523-a59e-44b2-aa96-5f17d872c9c6", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "headline = cleaned.cleaned_content" ] }, { "cell_type": "code", "execution_count": 11, "id": "5b1e34e1", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] }, { "data": { "text/plain": [ "array(['man', 'be', 'seriously', 'injure', 'after', 'boat', 'catch',\n", " 'fire', 'in', 'sydney', 'marina', 'today', 'emergency', 'service',\n", " 'be', 'call', 'birkenhead', 'point', 'marina', 'in', 'drummoyne',\n", " 'shortly', 'after', 'follow', 'report', 'of', 'explosion', 'on',\n", " 'boat', 'careflight', 's', 'rapid', 'response', 'helicopter', 'be',\n", " 'task', 'at', 'crew', 'include', 'doctor', 'intensive', 'care',\n", " 'paramedic', 'fly', 'drummoyne', 'land', 'in', 'nearby', 'brett',\n", " 'park', 'just', 'minute', 'late', 'careflight', 'boat', 'catch',\n", " 'fire', 'near', 'birkenhead', 'point', 'shopping', 'outlet',\n", " 'simon', 'r', 'supply', 'reader', 'image', 'of', 'boat', 'on',\n", " 'fire', 'at', 'marina', 'in', 'drummoyne', 'simon', 'r', 'supply',\n", " 'three', 'nsw', 'ambulance', 'crew', 'careflight', 'chopper',\n", " 'attend', 'scene', 'find', 'man', 'suffer', 'serious', 'burn',\n", " 'legs', 'arm', 'shoulder', 'man', 'suffer', 'burn', 'percent',\n", " 'of', 'body', 'boat', 'reportedly', 'explode', 'into', 'flame',\n", " 'shortly', 'before', 'morning', 'paramedic', 'treat', 'man', 'at',\n", " 'scene', 'with', 'burn', 'percent', 'of', 'body', 'firefighting',\n", " 'boat', 'be', 'deploy', 'help', 'extinguish', 'blaze', 'man', 'be',\n", " 'treat', 'on', 'scene', 'be', 'surround', 'by', 'family', 'be',\n", " 'not', 'onboard', 'boat', 'at', 'time'], dtype=object)" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "headline[5]" ] }, { "cell_type": "code", "execution_count": 12, "id": "677055b4-978e-4253-90f4-3f903662e225", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "# vectorise the words\n", "doc_dict = gensim.corpora.Dictionary(headline)\n", "docs_vecs = [doc_dict.doc2bow(doc) for doc in headline]" ] }, { "cell_type": "code", "execution_count": 13, "id": "a54d1768-b069-4936-a156-deaf0b506d93", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Number of unique tokens: 33794\n", "Number of articles: 1565\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "print(\"Number of unique tokens: %d\" % len(doc_dict))\n", "print(\"Number of articles: %d\" % len(docs_vecs))" ] }, { "cell_type": "code", "execution_count": 14, "id": "9147fa86-1503-4252-bd9b-92fea1e6a926", "metadata": { "scrolled": true, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[('be', 53994),\n", " ('of', 49297),\n", " ('in', 39323),\n", " ('for', 18431),\n", " ('have', 17670),\n", " ('on', 17585),\n", " ('with', 12117),\n", " ('as', 11218),\n", " ('port', 10791),\n", " ('from', 10043),\n", " ('at', 9988),\n", " ('by', 9804),\n", " ('will', 7881),\n", " ('s', 7723),\n", " ('say', 7402),\n", " ('that', 7105),\n", " ('not', 5352),\n", " ('day', 5043),\n", " ('more', 4979),\n", " ('new', 4965),\n", " ('service', 4828),\n", " ('time', 4388),\n", " ('china', 4374),\n", " ('also', 4324),\n", " ('ship', 4006),\n", " ('than', 3534),\n", " ('year', 3495),\n", " ('can', 3294),\n", " ('trade', 3288),\n", " ('include', 3029),\n", " ('between', 3016),\n", " ('over', 3013),\n", " ('state', 3006),\n", " ('supply', 2992),\n", " ('do', 2900),\n", " ('report', 2836),\n", " ('one', 2752),\n", " ('after', 2752),\n", " ('area', 2749),\n", " ('vessel', 2722),\n", " ('cargo', 2712),\n", " ('people', 2692),\n", " ('increase', 2690),\n", " ('continue', 2652),\n", " ('chain', 2622),\n", " ('market', 2608),\n", " ('country', 2603),\n", " ('other', 2596),\n", " ('container', 2590),\n", " ('strike', 2576),\n", " ('expect', 2568),\n", " ('work', 2518),\n", " ('http', 2470),\n", " ('while', 2451),\n", " ('high', 2445),\n", " ('pm', 2436),\n", " ('about', 2364),\n", " ('remain', 2327),\n", " ('into', 2303),\n", " ('government', 2297),\n", " ('take', 2258),\n", " ('update', 2257),\n", " ('info', 2250),\n", " ('train', 2249),\n", " ('would', 2213),\n", " ('terminal', 2206),\n", " ('city', 2205),\n", " ('due', 2191),\n", " ('asia', 2169),\n", " ('demand', 2164),\n", " ('two', 2120),\n", " ('world', 2109),\n", " ('may', 2104),\n", " ('make', 2099),\n", " ('use', 2057),\n", " ('customer', 2005),\n", " ('large', 2005),\n", " ('south', 2004),\n", " ('see', 1996),\n", " ('plan', 1983),\n", " ('march', 1969),\n", " ('company', 1967),\n", " ('through', 1963),\n", " ('most', 1932),\n", " ('north', 1924),\n", " ('global', 1899),\n", " ('first', 1897),\n", " ('line', 1884),\n", " ('across', 1855),\n", " ('coast', 1810),\n", " ('operation', 1787),\n", " ('good', 1787),\n", " ('could', 1786),\n", " ('if', 1760),\n", " ('week', 1752),\n", " ('business', 1733),\n", " ('million', 1683),\n", " ('when', 1682),\n", " ('last', 1674),\n", " ('month', 1662)]\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "# Calculate word frequencies\n", "word_frequencies = {doc_dict[word_id]: freq for word_id, freq in doc_dict.cfs.items()}\n", "sorted_words = sorted(word_frequencies.items(), key=lambda x: x[1], reverse=True)\n", "\n", "pprint(sorted_words[:100])" ] }, { "cell_type": "markdown", "id": "5ed78239-2ce1-4784-a8f4-4c7438c8627b", "metadata": {}, "source": [ "# LDA Modelling" ] }, { "cell_type": "markdown", "id": "9db83273-461d-4f70-b23f-ec967579d94f", "metadata": {}, "source": [ "## Benchmark Model" ] }, { "cell_type": "code", "execution_count": null, "id": "e6d577bd-9936-4d45-be90-345af2eb4827", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/wyf/miniconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", " and should_run_async(code)\n" ] } ], "source": [ "# Build LDA benchmark model\n", "lda_model = gensim.models.LdaMulticore(\n", " corpus=docs_vecs,\n", " id2word=doc_dict,\n", " num_topics=4,\n", " random_state=42,\n", " chunksize=100,\n", " passes=10,\n", " per_word_topics=True,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "c4f1521f-5f43-40d2-a3a3-a8ac2ca6fec2", "metadata": { "tags": [] }, "outputs": [], "source": [ "from pprint import pprint\n", "\n", "# Print the Keyword in the 10 topics\n", "pprint(lda_model.print_topics())\n", "doc_lda = lda_model[docs_vecs]" ] }, { "cell_type": "code", "execution_count": null, "id": "fd57b1f4-a6cd-41e8-964f-d8a1d30aa3c9", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Compute Benchmark Coherence Score\n", "coherence_model_lda = CoherenceModel(\n", " model=lda_model, texts=headline, dictionary=doc_dict, coherence=\"c_v\"\n", ")\n", "coherence_lda = coherence_model_lda.get_coherence()\n", "print(\"\\nCoherence Score LDAModel: \", coherence_lda)" ] }, { "cell_type": "code", "execution_count": null, "id": "152e5a3a-7afe-4fb8-a02f-d7492ad80936", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Compute Benchmark Perplexity\n", "perplex = lda_model.log_perplexity(docs_vecs, total_docs=None) # For LDAModel\n", "# a measure of how good the model is. lower the better.\n", "\n", "print(\"\\nPerplexity for LDAModel: \", perplex)" ] }, { "cell_type": "code", "execution_count": null, "id": "7dd3a60a-5c6f-4249-9868-30528a5b0ac8", "metadata": {}, "outputs": [], "source": [ "from pprint import pprint\n", "import pyLDAvis\n", "import pyLDAvis.gensim_models as gensimvis\n", "\n", "# feed the LDA model into the pyLDAvis instance\n", "pyLDAvis.enable_notebook()\n", "visual = gensimvis.prepare(lda_model, docs_vecs, doc_dict)\n", "\n", "# Save the output to the html file\n", "pyLDAvis.save_html(visual, \"topic_viz_benchmark_minor.html\")" ] }, { "cell_type": "code", "execution_count": null, "id": "26da4eea-06a0-4ff7-ae14-2f40fa0db04b", "metadata": {}, "outputs": [], "source": [ "# break" ] }, { "cell_type": "markdown", "id": "1895598f-3e5f-4acd-83a6-4491cc90f695", "metadata": {}, "source": [ "# Hyper-Perameter Tuning and Evaluation" ] }, { "cell_type": "markdown", "id": "47136c89-ff7b-4ac9-840f-04122fe62160", "metadata": {}, "source": [ "Run the cells below only for re-modelling with new datasets, the whole tuning and evaluation process may take hours to run." ] }, { "cell_type": "code", "execution_count": null, "id": "c79ca5c4-e078-43ce-a430-8c1ed93dcd64", "metadata": {}, "outputs": [], "source": [ "# hyper-perameter tuning (alpha and beta)\n", "def compute_coherence_values(corpus, dictionary, k, a, b):\n", "\n", " lda_model = gensim.models.LdaMulticore(\n", " corpus=corpus,\n", " id2word=dictionary,\n", " num_topics=k,\n", " random_state=42,\n", " chunksize=100,\n", " passes=10,\n", " alpha=a,\n", " eta=b,\n", " )\n", "\n", " coherence_model_lda = CoherenceModel(\n", " model=lda_model, texts=headline, dictionary=doc_dict, coherence=\"c_v\"\n", " )\n", " coherence = coherence_model_lda.get_coherence()\n", " perplex = lda_model.log_perplexity(docs_vecs, total_docs=None)\n", "\n", " return coherence, perplex" ] }, { "cell_type": "code", "execution_count": null, "id": "1c3c8478-9336-40f2-bb30-a37db4243b67", "metadata": {}, "outputs": [], "source": [ "# setup\n", "import numpy as np\n", "\n", "from gensim.models import CoherenceModel\n", "\n", "model_list = []\n", "coherence_values = []\n", "perplexity_values = []\n", "model_topics = []\n", "alpha_result = []\n", "beta_result = []\n", "\n", "# topic ranges\n", "num_topics = range(4, 13)\n", "\n", "# Alpha parameter\n", "alpha = list(np.arange(0.31, 1, 0.3))\n", "alpha.append(\"symmetric\")\n", "alpha.append(\"asymmetric\")\n", "\n", "# Beta parameter\n", "beta = list(np.arange(0.31, 1, 0.3))\n", "beta.append(\"symmetric\")" ] }, { "cell_type": "markdown", "id": "c7e6bc53-0b57-4858-879a-644eca54ddbc", "metadata": {}, "source": [ "Rational behind the alpha and eta: https://stats.stackexchange.com/questions/37405/natural-interpretation-for-lda-hyperparameters" ] }, { "cell_type": "code", "execution_count": null, "id": "02877b81-32df-4168-8e62-4cbca2be100b", "metadata": { "tags": [] }, "outputs": [], "source": [ "print(\"Topic range: \", num_topics)\n", "print(\"Alpha: \", alpha)\n", "print(\"Beta: \", beta)" ] }, { "cell_type": "code", "execution_count": null, "id": "3c1f703c-4778-467f-a12e-0c18eeb274c5", "metadata": {}, "outputs": [], "source": [ "import datetime\n", "import numpy as np\n", "from gensim.models import CoherenceModel\n", "\n", "print(datetime.datetime.now())\n", "\n", "for a in alpha:\n", " for b in beta:\n", " for num in num_topics:\n", " cv, pv = compute_coherence_values(\n", " corpus=docs_vecs, dictionary=doc_dict, k=num, a=a, b=b\n", " )\n", "\n", " model_topics.append(num)\n", " coherence_values.append(cv)\n", " perplexity_values.append(pv)\n", " alpha_result.append(a)\n", " beta_result.append(b)\n", " print(\n", " \"#Topics: \"\n", " + str(num)\n", " + \", CV Score: \"\n", " + str(coherence_values[-1])\n", " + \", PV Score: \"\n", " + str(perplexity_values[-1])\n", " + \", Alpha: \"\n", " + str(alpha_result[-1])\n", " + \", Beta: \"\n", " + str(beta_result[-1])\n", " )\n", "\n", "print(datetime.datetime.now())" ] }, { "cell_type": "markdown", "id": "364ff6d5-e3da-4dde-a2c8-5375fc5d711f", "metadata": {}, "source": [ "The table below reveals the top 20 fine tuned models with best combinations of coherence score and perplexity score. It was sorted by the coherence score in descending order as a higher coherence score indicates a better model, and sorted the perplexity score in ascending order as a lower perplexity score indicates a better model. While coherence score evaluates the quality of the topics, the perplexity score evaluates the overall performance of the model in predicting new documents. Usually, the coherence score is a better metric to use if the goal is to obtain topics that are semantically coherent and interpretable. Perplexity score, on the other hand, is a better metric to use if the goal is to build a model that generalises well to new data, in other words, how confident the model is in predicting the new data (Sánchez-Aguayo, et al., 2022). Ultimately, we aim to get a balance between the perplexity value and coherence score when determining our final model." ] }, { "cell_type": "code", "execution_count": null, "id": "78a60032-a4d7-44d4-841c-a1bd3740d5dd", "metadata": {}, "outputs": [], "source": [ "# Find the top 20 combinations based on Coherence Score and Perplexity Score\n", "result = pd.DataFrame(\n", " {\n", " \"Topics\": model_topics,\n", " \"Coherence Score\": coherence_values,\n", " \"Perplexity Score\": perplexity_values,\n", " \"Alpha\": alpha_result,\n", " \"Beta\": beta_result,\n", " }\n", ")\n", "result.sort_values(\n", " by=[\"Coherence Score\", \"Perplexity Score\"], ascending=[False, True]\n", ").head(20)" ] }, { "cell_type": "code", "execution_count": null, "id": "3461df57-c069-4ad2-80d7-8890dec9438e", "metadata": {}, "outputs": [], "source": [ "result.to_csv(\"lda_fine_tuning_result.csv\")" ] }, { "cell_type": "code", "execution_count": null, "id": "800e5a4b-7302-42e8-97b0-5b598c1c80ae", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# Show graph Topics vs Coherence Score\n", "result.groupby(\"Alpha\").plot(x=\"Topics\", y=\"Coherence Score\", legend=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "26996b89-0e7a-4f2d-8cf7-c4a716569bc2", "metadata": {}, "outputs": [], "source": [ "# Show graph Topics vs Perplexity Score\n", "\n", "plt.plot(model_topics, coherence_values)\n", "plt.xlabel(\"Num Topics\")\n", "plt.ylabel(\"Coherence Score\")\n", "plt.legend((\"Coherence Score\"), loc=\"best\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "91d2f4c1-de77-44b6-b41b-fcc9a07233e8", "metadata": {}, "outputs": [], "source": [ "# Show graph Topics vs Perplexity Score\n", "\n", "plt.plot(model_topics, perplexity_values)\n", "plt.xlabel(\"Num Topics\")\n", "plt.ylabel(\"Perplexity score\")\n", "plt.legend((\"perplexity_values\"), loc=\"best\")\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "cdc3ddd2-f743-4e5b-b6c6-2656e0b77aec", "metadata": {}, "source": [ "## Final Model" ] }, { "cell_type": "markdown", "id": "e86c2bfe-264b-4530-9d81-10b1cdc5071c", "metadata": {}, "source": [ "refer to the script topic_modelling_severe for detailed explanation" ] }, { "cell_type": "code", "execution_count": null, "id": "490734ed-077c-4fb0-930c-0b42f4f63c94", "metadata": {}, "outputs": [], "source": [ "# realised that there may be some overlaps for 8 topics, thus 4-6 topics are optimal\n", "k = 2\n", "# a = 'asymmetric'\n", "a = 0.31\n", "# b = 0.31\n", "b = \"symmetric\"\n", "\n", "\n", "final_model = gensim.models.LdaMulticore(\n", " corpus=docs_vecs,\n", " id2word=doc_dict,\n", " num_topics=k,\n", " random_state=42,\n", " chunksize=100,\n", " passes=10,\n", " alpha=a,\n", " eta=b,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "afe8abf0-2d12-414e-92be-a655865addb1", "metadata": { "tags": [] }, "outputs": [], "source": [ "compute_coherence_values(corpus=docs_vecs, dictionary=doc_dict, k=k, a=a, b=b)" ] }, { "cell_type": "code", "execution_count": null, "id": "8430a827-6dbb-4737-8ccc-78ed17a01234", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Set up the environment to display the graphical outputs\n", "# feed the LDA model into the pyLDAvis instance\n", "pyLDAvis.enable_notebook()\n", "visual = gensimvis.prepare(final_model, docs_vecs, doc_dict)\n", "\n", "# Save the output to the html file\n", "pyLDAvis.save_html(visual, \"topic_viz2_minor_training.html\")" ] }, { "cell_type": "code", "execution_count": null, "id": "5e30d71a-a3c7-40c7-94c0-7eea1bedc887", "metadata": { "tags": [] }, "outputs": [], "source": [ "final_model.print_topics(num_words=30)" ] }, { "cell_type": "code", "execution_count": null, "id": "b958c148-5dbe-4896-bcba-85e0f78b2941", "metadata": {}, "outputs": [], "source": [ "break" ] }, { "cell_type": "code", "execution_count": null, "id": "1939c00d-12e4-4670-9f18-f287f8b86bef", "metadata": {}, "outputs": [], "source": [ "# Map the topic ID with appropriate topic names, this part should be updated accordinly whenever the model is updated\n", "topic_mapping = {0: \"finance\", 1: \"tech\", 2: \"education\", 3: \"sports\", 4: \"leisure\"}" ] }, { "cell_type": "code", "execution_count": null, "id": "1a9a921e-3cdd-4fe5-a58f-7b50e5feeecd", "metadata": {}, "outputs": [], "source": [ "# Get the topics and their top keywords into a dataframe\n", "topics = final_model.show_topics(num_words=30)\n", "\n", "topic_keywords = pd.DataFrame()\n", "for topic_id, topic in topics:\n", " topic_keywords.at[topic_id, \"Topic Keywords\"] = topic\n", "\n", "topic_keywords[\"Topic ID\"] = topic_keywords.index\n", "topic_keywords[\"Topic Name\"] = topic_mapping\n", "topic_keywords" ] }, { "cell_type": "markdown", "id": "607d2cfd-b3ca-4f99-9e01-d320ca98a2a0", "metadata": {}, "source": [ "# Save the final model " ] }, { "cell_type": "code", "execution_count": null, "id": "84eb2746-173a-4283-bca5-681f77548698", "metadata": {}, "outputs": [], "source": [ "# Save a model to disk, or reload a pre-trained model\n", "# naming convention: final_model_topic_alpha_eta\n", "final_model.save(\"final_model_5_asym_91\")" ] }, { "cell_type": "markdown", "id": "a7b6e4d9-a577-4dfb-ba6e-fc74365880f4", "metadata": {}, "source": [ "# Find dominant topic(s) for each news article" ] }, { "cell_type": "markdown", "id": "0eeecbcb-358c-44f9-8463-75cdfac0ba90", "metadata": {}, "source": [ "Attach the dominant topics back to the news dataset for classifying purpose." ] }, { "cell_type": "markdown", "id": "8bebb269-dbb0-4c46-925c-38de0f2bcfd7", "metadata": {}, "source": [ "Made use of gensim lda's own function: https://radimrehurek.com/gensim/models/ldamodel.html" ] }, { "cell_type": "code", "execution_count": null, "id": "f585ff52-b60d-4d70-ae64-a7c23d2cc6c1", "metadata": {}, "outputs": [], "source": [ "import warnings\n", "\n", "warnings.filterwarnings(\"ignore\")\n", "\n", "\n", "def format_topics_sentences(ldamodel, corpus, data):\n", " # Preallocate memory for the DataFrame\n", " num_docs = len(corpus)\n", " sent_topics = {\n", " \"Dominant_Topic\": [0] * num_docs,\n", " \"Perc_Contribution\": [0.0] * num_docs,\n", " \"Topic_Distribution\": [()] * num_docs,\n", " }\n", "\n", " # Get main topic in each document\n", " for i, row in enumerate(ldamodel[corpus]):\n", " row = sorted(row, key=lambda x: (x[1]), reverse=True)\n", " if row:\n", " # Get the Dominant topic, Perc Contribution and Keywords for each document\n", " dominant_topic, perc_contribution = row[0]\n", " topic_distribution = row\n", " sent_topics[\"Dominant_Topic\"][i] = int(dominant_topic)\n", " sent_topics[\"Perc_Contribution\"][i] = round(perc_contribution, 4)\n", " sent_topics[\"Topic_Distribution\"][i] = topic_distribution\n", "\n", " # Create the DataFrame\n", " sent_topics_df = pd.DataFrame(sent_topics)\n", " sent_topics_df[\"Text\"] = data\n", "\n", " return sent_topics_df" ] }, { "cell_type": "code", "execution_count": null, "id": "24d3ff60-035e-4133-9ffd-88cce5cdccb1", "metadata": {}, "outputs": [], "source": [ "df_topic_sents_keywords = format_topics_sentences(\n", " ldamodel=final_model, corpus=docs_vecs, data=cleaned.Headline_Details\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "c88b088b", "metadata": {}, "outputs": [], "source": [ "# Format\n", "df_dominant_topic = df_topic_sents_keywords.reset_index()\n", "df_dominant_topic.columns = [\n", " \"Document_No\",\n", " \"Dominant_Topic\",\n", " \"Topic_Perc_Contrib\",\n", " \"Topic_Distribution\",\n", " \"Text\",\n", "]\n", "\n", "# Show\n", "df_dominant_topic.head(10)" ] }, { "cell_type": "markdown", "id": "560da382-aa86-4df1-8b85-56b057a27cd4", "metadata": {}, "source": [ "# Result Analysis" ] }, { "cell_type": "code", "execution_count": null, "id": "4fe6b40b-6922-4de3-8d9e-dac7474b6303", "metadata": {}, "outputs": [], "source": [ "df_dominant_topic[\"Dominant_Topic\"].value_counts()" ] }, { "cell_type": "code", "execution_count": null, "id": "b9917340-31cf-48af-871f-b481128fdf22", "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", "# Get value counts of each topic\n", "topic_counts = df_dominant_topic[\"Dominant_Topic\"].value_counts()\n", "\n", "# Create a bar plot\n", "plt.figure(figsize=(8, 6))\n", "topic_counts.plot(kind=\"bar\", color=\"skyblue\")\n", "\n", "# Add labels to the bars\n", "for i, count in enumerate(topic_counts):\n", " plt.text(i, count, str(count), ha=\"center\", va=\"bottom\")\n", "\n", "# Add labels and title\n", "plt.xlabel(\"Topics\")\n", "plt.ylabel(\"Number of News\")\n", "plt.title(\"Topic Distribution\")\n", "\n", "# Show the plot\n", "plt.xticks(rotation=45) # Rotate x-axis labels for better readability\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "fffa1e57-f975-4469-a42b-19d76c60fb66", "metadata": {}, "outputs": [], "source": [ "df_dominant_topic.sort_values(by=\"Topic_Perc_Contrib\", ascending=True).head(20)" ] }, { "cell_type": "code", "execution_count": null, "id": "8510f506-141f-4382-b668-251df1afc95f", "metadata": {}, "outputs": [], "source": [ "# Sample 100 rows, can change the random_state for different samples\n", "sampled_data = df_dominant_topic.sample(n=100, random_state=42)\n", "sampled_df = pd.DataFrame(sampled_data).reset_index()\n", "sampled_df.to_csv(\"sample_minor.csv\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.16" } }, "nbformat": 4, "nbformat_minor": 5 }