{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "4ba6aba8"
},
"source": [
"# 🤖 **Data Collection, Creation, Storage, and Processing**\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "jpASMyIQMaAq"
},
"source": [
"## **1.** 📦 Install required packages"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "f48c8f8c",
"outputId": "4f196026-072b-44eb-a94e-cc21132bfa7e"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (4.13.5)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (2.2.2)\n",
"Requirement already satisfied: matplotlib in /usr/local/lib/python3.12/dist-packages (3.10.0)\n",
"Requirement already satisfied: seaborn in /usr/local/lib/python3.12/dist-packages (0.13.2)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (2.0.2)\n",
"Requirement already satisfied: textblob in /usr/local/lib/python3.12/dist-packages (0.19.0)\n",
"Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (2.8.3)\n",
"Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (4.15.0)\n",
"Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas) (2.9.0.post0)\n",
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.2)\n",
"Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.3)\n",
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.3.3)\n",
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (0.12.1)\n",
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (4.62.1)\n",
"Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.5.0)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (26.0)\n",
"Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (11.3.0)\n",
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (3.3.2)\n",
"Requirement already satisfied: nltk>=3.9 in /usr/local/lib/python3.12/dist-packages (from textblob) (3.9.1)\n",
"Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (8.3.1)\n",
"Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (1.5.3)\n",
"Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (2025.11.3)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (4.67.3)\n",
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)\n"
]
}
],
"source": [
"!pip install beautifulsoup4 pandas matplotlib seaborn numpy textblob"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "lquNYCbfL9IM"
},
"source": [
"## **2.** ⛏ Web-scrape all book titles, prices, and ratings from books.toscrape.com"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0IWuNpxxYDJF"
},
"source": [
"### *a. Initial setup*\n",
"Define the base url of the website you will scrape as well as how and what you will scrape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "91d52125"
},
"outputs": [],
"source": [
"import requests\n",
"from bs4 import BeautifulSoup\n",
"import pandas as pd\n",
"import time\n",
"\n",
"base_url = \"https://books.toscrape.com/catalogue/page-{}.html\"\n",
"headers = {\"User-Agent\": \"Mozilla/5.0\"}\n",
"\n",
"titles, prices, ratings = [], [], []"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "oCdTsin2Yfp3"
},
"source": [
"### *b. Fill titles, prices, and ratings from the web pages*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xqO5Y3dnYhxt"
},
"outputs": [],
"source": [
"# Loop through all 50 pages\n",
"for page in range(1, 51):\n",
" url = base_url.format(page)\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, \"html.parser\")\n",
" books = soup.find_all(\"article\", class_=\"product_pod\")\n",
"\n",
" for book in books:\n",
" titles.append(book.h3.a[\"title\"])\n",
" prices.append(float(book.find(\"p\", class_=\"price_color\").text[1:]))\n",
" ratings.append(book.p.get(\"class\")[1])\n",
"\n",
" time.sleep(0.5) # polite scraping delay"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "T0TOeRC4Yrnn"
},
"source": [
"### *c. ✋🏻🛑⛔️ Create a dataframe df_books that contains the now complete \"title\", \"price\", and \"rating\" objects*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "l5FkkNhUYTHh"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"df_books = pd.DataFrame({\n",
" \"title\": titles,\n",
" \"price\": prices,\n",
" \"rating\": ratings\n",
"})"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "duI5dv3CZYvF"
},
"source": [
"### *d. Save web-scraped dataframe either as a CSV or Excel file*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "lC1U_YHtZifh"
},
"outputs": [],
"source": [
"# 💾 Save to CSV\n",
"df_books.to_csv(\"books_data.csv\", index=False)\n",
"\n",
"# 💾 Or save to Excel\n",
"# df_books.to_excel(\"books_data.xlsx\", index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "qMjRKMBQZlJi"
},
"source": [
"### *e. ✋🏻🛑⛔️ View first fiew lines*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 204
},
"id": "O_wIvTxYZqCK",
"outputId": "a0a39578-2938-4332-cc23-c868d845b4e3"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" title price rating\n",
"0 A Light in the Attic 51.77 Three\n",
"1 Tipping the Velvet 53.74 One\n",
"2 Soumission 50.10 One\n",
"3 Sharp Objects 47.82 Four\n",
"4 Sapiens: A Brief History of Humankind 54.23 Five"
],
"text/html": [
"\n",
"
\n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" title | \n",
" price | \n",
" rating | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" A Light in the Attic | \n",
" 51.77 | \n",
" Three | \n",
"
\n",
" \n",
" | 1 | \n",
" Tipping the Velvet | \n",
" 53.74 | \n",
" One | \n",
"
\n",
" \n",
" | 2 | \n",
" Soumission | \n",
" 50.10 | \n",
" One | \n",
"
\n",
" \n",
" | 3 | \n",
" Sharp Objects | \n",
" 47.82 | \n",
" Four | \n",
"
\n",
" \n",
" | 4 | \n",
" Sapiens: A Brief History of Humankind | \n",
" 54.23 | \n",
" Five | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df_books",
"summary": "{\n \"name\": \"df_books\",\n \"rows\": 1000,\n \"fields\": [\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 999,\n \"samples\": [\n \"The Grownup\",\n \"Persepolis: The Story of a Childhood (Persepolis #1-2)\",\n \"Ayumi's Violin\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"price\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 14.446689669952772,\n \"min\": 10.0,\n \"max\": 59.99,\n \"num_unique_values\": 903,\n \"samples\": [\n 19.73,\n 55.65,\n 46.31\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"rating\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"One\",\n \"Two\",\n \"Four\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
}
},
"metadata": {},
"execution_count": 40
}
],
"source": [
"df_books.head()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "p-1Pr2szaqLk"
},
"source": [
"## **3.** 🧩 Create a meaningful connection between real & synthetic datasets"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "SIaJUGIpaH4V"
},
"source": [
"### *a. Initial setup*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "-gPXGcRPuV_9"
},
"outputs": [],
"source": [
"import numpy as np\n",
"import random\n",
"from datetime import datetime\n",
"import warnings\n",
"\n",
"warnings.filterwarnings(\"ignore\")\n",
"random.seed(2025)\n",
"np.random.seed(2025)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "pY4yCoIuaQqp"
},
"source": [
"### *b. Generate popularity scores based on rating (with some randomness) with a generate_popularity_score function*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "mnd5hdAbaNjz"
},
"outputs": [],
"source": [
"def generate_popularity_score(rating):\n",
" base = {\"One\": 2, \"Two\": 3, \"Three\": 3, \"Four\": 4, \"Five\": 4}.get(rating, 3)\n",
" trend_factor = random.choices([-1, 0, 1], weights=[1, 3, 2])[0]\n",
" return int(np.clip(base + trend_factor, 1, 5))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "n4-TaNTFgPak"
},
"source": [
"### *c. ✋🏻🛑⛔️ Run the function to create a \"popularity_score\" column from \"rating\"*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "V-G3OCUCgR07"
},
"outputs": [],
"source": [
"df_books[\"popularity_score\"] = df_books[\"rating\"].apply(generate_popularity_score)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "HnngRNTgacYt"
},
"source": [
"### *d. Decide on the sentiment_label based on the popularity score with a get_sentiment function*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kUtWmr8maZLZ"
},
"outputs": [],
"source": [
"def get_sentiment(popularity_score):\n",
" if popularity_score <= 2:\n",
" return \"negative\"\n",
" elif popularity_score == 3:\n",
" return \"neutral\"\n",
" else:\n",
" return \"positive\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "HF9F9HIzgT7Z"
},
"source": [
"### *e. ✋🏻🛑⛔️ Run the function to create a \"sentiment_label\" column from \"popularity_score\"*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "tafQj8_7gYCG"
},
"outputs": [],
"source": [
"df_books[\"sentiment_label\"] = df_books[\"popularity_score\"].apply(get_sentiment)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "T8AdKkmASq9a"
},
"source": [
"## **4.** 📈 Generate synthetic book sales data of 18 months"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OhXbdGD5fH0c"
},
"source": [
"### *a. Create a generate_sales_profit function that would generate sales patterns based on sentiment_label (with some randomness)*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "qkVhYPXGbgEn"
},
"outputs": [],
"source": [
"def generate_sales_profile(sentiment):\n",
" months = pd.date_range(end=datetime.today(), periods=18, freq=\"M\")\n",
"\n",
" if sentiment == \"positive\":\n",
" base = random.randint(200, 300)\n",
" trend = np.linspace(base, base + random.randint(20, 60), len(months))\n",
" elif sentiment == \"negative\":\n",
" base = random.randint(20, 80)\n",
" trend = np.linspace(base, base - random.randint(10, 30), len(months))\n",
" else: # neutral\n",
" base = random.randint(80, 160)\n",
" trend = np.full(len(months), base + random.randint(-10, 10))\n",
"\n",
" seasonality = 10 * np.sin(np.linspace(0, 3 * np.pi, len(months)))\n",
" noise = np.random.normal(0, 5, len(months))\n",
" monthly_sales = np.clip(trend + seasonality + noise, a_min=0, a_max=None).astype(int)\n",
"\n",
" return list(zip(months.strftime(\"%Y-%m\"), monthly_sales))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "L2ak1HlcgoTe"
},
"source": [
"### *b. Run the function as part of building sales_data*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "SlJ24AUafoDB"
},
"outputs": [],
"source": [
"sales_data = []\n",
"for _, row in df_books.iterrows():\n",
" records = generate_sales_profile(row[\"sentiment_label\"])\n",
" for month, units in records:\n",
" sales_data.append({\n",
" \"title\": row[\"title\"],\n",
" \"month\": month,\n",
" \"units_sold\": units,\n",
" \"sentiment_label\": row[\"sentiment_label\"]\n",
" })"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "4IXZKcCSgxnq"
},
"source": [
"### *c. ✋🏻🛑⛔️ Create a df_sales DataFrame from sales_data*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wcN6gtiZg-ws"
},
"outputs": [],
"source": [
"df_sales = pd.DataFrame(sales_data)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "EhIjz9WohAmZ"
},
"source": [
"### *d. Save df_sales as synthetic_sales_data.csv & view first few lines*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "MzbZvLcAhGaH",
"outputId": "cbd2b207-876f-42ce-8818-1ee4457efa97"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" title month units_sold sentiment_label\n",
"0 A Light in the Attic 2024-09 100 neutral\n",
"1 A Light in the Attic 2024-10 109 neutral\n",
"2 A Light in the Attic 2024-11 102 neutral\n",
"3 A Light in the Attic 2024-12 107 neutral\n",
"4 A Light in the Attic 2025-01 108 neutral\n"
]
}
],
"source": [
"df_sales.to_csv(\"synthetic_sales_data.csv\", index=False)\n",
"\n",
"print(df_sales.head())"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "7g9gqBgQMtJn"
},
"source": [
"## **5.** 🎯 Generate synthetic customer reviews"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Gi4y9M9KuDWx"
},
"source": [
"### *a. ✋🏻🛑⛔️ Ask ChatGPT to create a list of 50 distinct generic book review texts for the sentiment labels \"positive\", \"neutral\", and \"negative\" called synthetic_reviews_by_sentiment*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "b3cd2a50"
},
"outputs": [],
"source": [
"synthetic_reviews_by_sentiment = {\n",
" \"positive\": [\n",
" \"I absolutely loved this book, it was amazing.\",\n",
" \"A fantastic read with great characters.\",\n",
" \"Really enjoyable and well written.\",\n",
" \"An excellent story from start to finish.\",\n",
" \"I couldn't put it down, highly recommend.\",\n",
" \"Brilliant and engaging narrative.\",\n",
" \"A masterpiece, truly inspiring.\",\n",
" \"Very entertaining and satisfying.\",\n",
" \"Loved every chapter of this book.\",\n",
" \"A great experience overall.\",\n",
" \"Superb writing and compelling plot.\",\n",
" \"One of the best books I've read.\",\n",
" \"Amazing storytelling and depth.\",\n",
" \"Highly enjoyable and emotional.\",\n",
" \"A very positive reading experience.\",\n",
" \"Incredible and memorable story.\",\n",
" \"Absolutely worth reading.\"\n",
" ],\n",
" \"neutral\": [\n",
" \"The book was okay, nothing special.\",\n",
" \"An average read with some good moments.\",\n",
" \"It was fine, but not very memorable.\",\n",
" \"Decent but not outstanding.\",\n",
" \"A fairly typical story.\",\n",
" \"Some parts were interesting, others not.\",\n",
" \"It was neither good nor bad.\",\n",
" \"A standard book with no surprises.\",\n",
" \"Moderately engaging but slow at times.\",\n",
" \"An acceptable read overall.\",\n",
" \"Nothing particularly stood out.\",\n",
" \"It had its moments but was inconsistent.\",\n",
" \"Just an average experience.\",\n",
" \"Okay for passing time.\",\n",
" \"Not bad, not great either.\",\n",
" \"Fairly predictable storyline.\"\n",
" ],\n",
" \"negative\": [\n",
" \"I didn't enjoy this book at all.\",\n",
" \"The story was boring and slow.\",\n",
" \"Very disappointing read.\",\n",
" \"Poorly written and hard to follow.\",\n",
" \"I struggled to finish it.\",\n",
" \"Not engaging and quite dull.\",\n",
" \"The plot made little sense.\",\n",
" \"A waste of time in my opinion.\",\n",
" \"Characters were flat and uninteresting.\",\n",
" \"I would not recommend this book.\",\n",
" \"Very underwhelming and forgettable.\",\n",
" \"Bad pacing and weak story.\",\n",
" \"It failed to keep my interest.\",\n",
" \"Quite frustrating to read.\",\n",
" \"Not worth reading.\",\n",
" \"I expected much better.\"\n",
" ]\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "fQhfVaDmuULT"
},
"source": [
"### *b. Generate 10 reviews per book using random sampling from the corresponding 50*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "l2SRc3PjuTGM"
},
"outputs": [],
"source": [
"review_rows = []\n",
"for _, row in df_books.iterrows():\n",
" title = row['title']\n",
" sentiment_label = row['sentiment_label']\n",
" review_pool = synthetic_reviews_by_sentiment[sentiment_label]\n",
" sampled_reviews = random.sample(review_pool, 10)\n",
" for review_text in sampled_reviews:\n",
" review_rows.append({\n",
" \"title\": title,\n",
" \"sentiment_label\": sentiment_label,\n",
" \"review_text\": review_text,\n",
" \"rating\": row['rating'],\n",
" \"popularity_score\": row['popularity_score']\n",
" })"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "bmJMXF-Bukdm"
},
"source": [
"### *c. Create the final dataframe df_reviews & save it as synthetic_book_reviews.csv*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZUKUqZsuumsp"
},
"outputs": [],
"source": [
"df_reviews = pd.DataFrame(review_rows)\n",
"df_reviews.to_csv(\"synthetic_book_reviews.csv\", index=False)"
]
},
{
"cell_type": "markdown",
"source": [
"### *c. inputs for R*"
],
"metadata": {
"id": "_602pYUS3gY5"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "3946e521",
"outputId": "6a29f9d9-0dfc-4db1-896a-17e29119424a"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"✅ Wrote synthetic_title_level_features.csv\n",
"✅ Wrote synthetic_monthly_revenue_series.csv\n"
]
}
],
"source": [
"import numpy as np\n",
"import pandas as pd # Ensure pandas is imported locally\n",
"\n",
"def _safe_num(s):\n",
" # Ensure s is a Series before applying string operations\n",
" if not isinstance(s, pd.Series):\n",
" s = pd.Series(s)\n",
" return pd.to_numeric(\n",
" s.astype(str).str.replace(r\"[^0-9.]\", \"\", regex=True),\n",
" errors=\"coerce\"\n",
" )\n",
"\n",
"# --- Clean book metadata (price/rating) ---\n",
"df_books_r = df_books.copy()\n",
"if not df_books_r.empty:\n",
" if \"price\" in df_books_r.columns:\n",
" df_books_r[\"price\"] = _safe_num(df_books_r[\"price\"])\n",
" if \"rating\" in df_books_r.columns:\n",
" df_books_r[\"rating\"] = _safe_num(df_books_r[\"rating\"])\n",
" if \"title\" in df_books_r.columns:\n",
" df_books_r[\"title\"] = df_books_r[\"title\"].astype(str).str.strip()\n",
"\n",
"\n",
"# --- Clean sales ---\n",
"df_sales_r = df_sales.copy()\n",
"if not df_sales_r.empty:\n",
" if \"title\" in df_sales_r.columns:\n",
" df_sales_r[\"title\"] = df_sales_r[\"title\"].astype(str).str.strip()\n",
" if \"month\" in df_sales_r.columns:\n",
" df_sales_r[\"month\"] = pd.to_datetime(df_sales_r[\"month\"], errors=\"coerce\")\n",
" if \"units_sold\" in df_sales_r.columns:\n",
" df_sales_r[\"units_sold\"] = _safe_num(df_sales_r[\"units_sold\"])\n",
"else:\n",
" # If df_sales_r is empty (no columns), create an empty one with expected columns\n",
" df_sales_r = pd.DataFrame(columns=['title', 'month', 'units_sold', 'sentiment_label'])\n",
"\n",
"\n",
"# --- Clean reviews ---\n",
"df_reviews_r = df_reviews.copy()\n",
"if not df_reviews_r.empty:\n",
" if \"title\" in df_reviews_r.columns:\n",
" df_reviews_r[\"title\"] = df_reviews_r[\"title\"].astype(str).str.strip()\n",
" if \"sentiment_label\" in df_reviews_r.columns:\n",
" df_reviews_r[\"sentiment_label\"] = df_reviews_r[\"sentiment_label\"].astype(str).str.lower().str.strip()\n",
" if \"rating\" in df_reviews_r.columns:\n",
" df_reviews_r[\"rating\"] = _safe_num(df_reviews_r[\"rating\"])\n",
" if \"popularity_score\" in df_reviews_r.columns:\n",
" df_reviews_r[\"popularity_score\"] = _safe_num(df_reviews_r[\"popularity_score\"])\n",
"else:\n",
" # If df_reviews_r is empty (no columns), create an empty one with expected columns\n",
" df_reviews_r = pd.DataFrame(columns=['title', 'sentiment_label', 'review_text', 'rating', 'popularity_score'])\n",
"\n",
"\n",
"# --- Sentiment shares per title (from reviews) ---\n",
"if not df_reviews_r.empty and 'title' in df_reviews_r.columns and 'sentiment_label' in df_reviews_r.columns:\n",
" sent_counts = (\n",
" df_reviews_r.groupby([\"title\", \"sentiment_label\"])\n",
" .size()\n",
" .unstack(fill_value=0)\n",
" )\n",
"else:\n",
" sent_counts = pd.DataFrame(columns=['title', 'positive', 'neutral', 'negative'])\n",
"\n",
"for lab in [\"positive\", \"neutral\", \"negative\"]:\n",
" if lab not in sent_counts.columns:\n",
" sent_counts[lab] = 0\n",
"\n",
"if not sent_counts.empty and 'total_reviews' not in sent_counts.columns:\n",
" sent_counts[\"total_reviews\"] = sent_counts[[\"positive\", \"neutral\", \"negative\"]].sum(axis=1)\n",
" den = sent_counts[\"total_reviews\"].replace(0, np.nan)\n",
" sent_counts[\"share_positive\"] = sent_counts[\"positive\"] / den\n",
" sent_counts[\"share_neutral\"] = sent_counts[\"neutral\"] / den\n",
" sent_counts[\"share_negative\"] = sent_counts[\"negative\"] / den\n",
" sent_counts = sent_counts.reset_index()\n",
"elif sent_counts.empty:\n",
" sent_counts = pd.DataFrame(columns=['title', 'share_positive', 'share_neutral', 'share_negative', 'total_reviews'])\n",
"\n",
"\n",
"# --- Sales aggregation per title ---\n",
"if not df_sales_r.empty and 'title' in df_sales_r.columns:\n",
" sales_by_title = (\n",
" df_sales_r.dropna(subset=[\"title\"])\n",
" .groupby(\"title\", as_index=False)\n",
" .agg(\n",
" months_observed=(\"month\", \"nunique\"),\n",
" avg_units_sold=(\"units_sold\", \"mean\"),\n",
" total_units_sold=(\"units_sold\", \"sum\"),\n",
" )\n",
" )\n",
"else:\n",
" sales_by_title = pd.DataFrame(columns=['title', 'months_observed', 'avg_units_sold', 'total_units_sold'])\n",
"\n",
"\n",
"# --- Title-level features (join sales + books + sentiment) ---\n",
"# Initialize df_title as an empty DataFrame with all expected columns\n",
"df_title = pd.DataFrame(columns=[\n",
" 'title', 'months_observed', 'avg_units_sold', 'total_units_sold',\n",
" 'price', 'rating', 'share_positive', 'share_neutral', 'share_negative',\n",
" 'total_reviews', 'avg_revenue', 'total_revenue'\n",
"])\n",
"\n",
"if not sales_by_title.empty and 'title' in sales_by_title.columns and \\\n",
" not df_books_r.empty and 'title' in df_books_r.columns and \\\n",
" not sent_counts.empty and 'title' in sent_counts.columns:\n",
"\n",
" df_title = (\n",
" sales_by_title\n",
" .merge(df_books_r[[\"title\", \"price\", \"rating\"]], on=\"title\", how=\"left\")\n",
" .merge(sent_counts[[\"title\", \"share_positive\", \"share_neutral\", \"share_negative\", \"total_reviews\"]],\n",
" on=\"title\", how=\"left\")\n",
" )\n",
"\n",
" if not df_title.empty:\n",
" if 'avg_units_sold' in df_title.columns and 'price' in df_title.columns:\n",
" df_title[\"avg_revenue\"] = df_title[\"avg_units_sold\"] * df_title[\"price\"]\n",
" if 'total_units_sold' in df_title.columns and 'price' in df_title.columns:\n",
" df_title[\"total_revenue\"] = df_title[\"total_units_sold\"] * df_title[\"price\"]\n",
"\n",
"df_title.to_csv(\"synthetic_title_level_features.csv\", index=False)\n",
"print(\"✅ Wrote synthetic_title_level_features.csv\")\n",
"\n",
"\n",
"# --- Monthly revenue series (proxy: units_sold * price) ---\n",
"monthly_rev = pd.DataFrame() # Initialize as empty\n",
"if not df_sales_r.empty and 'title' in df_sales_r.columns and \\\n",
" not df_books_r.empty and 'title' in df_books_r.columns and 'price' in df_books_r.columns:\n",
"\n",
" monthly_rev = (\n",
" df_sales_r.merge(df_books_r[[\"title\", \"price\"]], on=\"title\", how=\"left\")\n",
" )\n",
" if not monthly_rev.empty and 'units_sold' in monthly_rev.columns and 'price' in monthly_rev.columns:\n",
" monthly_rev[\"revenue\"] = monthly_rev[\"units_sold\"] * monthly_rev[\"price\"]\n",
"\n",
"df_monthly = pd.DataFrame(columns=['month', 'total_revenue']) # Initialize df_monthly\n",
"\n",
"if not monthly_rev.empty and 'month' in monthly_rev.columns and 'revenue' in monthly_rev.columns:\n",
" df_monthly = (\n",
" monthly_rev.dropna(subset=[\"month\"])\n",
" .groupby(\"month\", as_index=False)[\"revenue\"]\n",
" .sum()\n",
" .rename(columns={\"revenue\": \"total_revenue\"})\n",
" .sort_values(\"month\")\n",
" )\n",
" # if revenue is all NA (e.g., missing price), fallback to units_sold as a teaching proxy\n",
" if 'total_revenue' in df_monthly.columns and df_monthly[\"total_revenue\"].notna().sum() == 0 and \\\n",
" not df_sales_r.empty and 'month' in df_sales_r.columns and 'units_sold' in df_sales_r.columns:\n",
" df_monthly = (\n",
" df_sales_r.dropna(subset=[\"month\"])\n",
" .groupby(\"month\", as_index=False)[\"units_sold\"]\n",
" .sum()\n",
" .rename(columns={\"units_sold\": \"total_revenue\"})\n",
" .sort_values(\"month\")\n",
" )\n",
"\n",
"if not df_monthly.empty and 'month' in df_monthly.columns:\n",
" df_monthly[\"month\"] = pd.to_datetime(df_monthly[\"month\"], errors=\"coerce\").dt.strftime(\"%Y-%m-%d\")\n",
"\n",
"df_monthly.to_csv(\"synthetic_monthly_revenue_series.csv\", index=False)\n",
"print(\"✅ Wrote synthetic_monthly_revenue_series.csv\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RYvGyVfXuo54"
},
"source": [
"### *d. ✋🏻🛑⛔️ View the first few lines*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 142
},
"id": "xfE8NMqOurKo",
"outputId": "c21a60f1-b8f4-4586-fabc-19c1f81a14d4"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" title month units_sold sentiment_label\n",
"0 A Light in the Attic 2024-09 100 neutral\n",
"1 A Light in the Attic 2024-10 109 neutral\n",
"2 A Light in the Attic 2024-11 102 neutral"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" title | \n",
" month | \n",
" units_sold | \n",
" sentiment_label | \n",
"
\n",
" \n",
" \n",
" \n",
" | 0 | \n",
" A Light in the Attic | \n",
" 2024-09 | \n",
" 100 | \n",
" neutral | \n",
"
\n",
" \n",
" | 1 | \n",
" A Light in the Attic | \n",
" 2024-10 | \n",
" 109 | \n",
" neutral | \n",
"
\n",
" \n",
" | 2 | \n",
" A Light in the Attic | \n",
" 2024-11 | \n",
" 102 | \n",
" neutral | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df_sales",
"summary": "{\n \"name\": \"df_sales\",\n \"rows\": 18000,\n \"fields\": [\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 999,\n \"samples\": [\n \"The Grownup\",\n \"Persepolis: The Story of a Childhood (Persepolis #1-2)\",\n \"Ayumi's Violin\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"month\",\n \"properties\": {\n \"dtype\": \"object\",\n \"num_unique_values\": 18,\n \"samples\": [\n \"2024-09\",\n \"2024-10\",\n \"2025-05\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"units_sold\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 98,\n \"min\": 0,\n \"max\": 362,\n \"num_unique_values\": 354,\n \"samples\": [\n 214,\n 289,\n 205\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"sentiment_label\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n \"neutral\",\n \"negative\",\n \"positive\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
}
},
"metadata": {},
"execution_count": 59
}
],
"source": [
"df_sales.head(3)"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [
"jpASMyIQMaAq",
"lquNYCbfL9IM",
"0IWuNpxxYDJF",
"oCdTsin2Yfp3",
"T0TOeRC4Yrnn",
"duI5dv3CZYvF",
"qMjRKMBQZlJi",
"p-1Pr2szaqLk",
"SIaJUGIpaH4V",
"pY4yCoIuaQqp",
"n4-TaNTFgPak",
"HnngRNTgacYt",
"HF9F9HIzgT7Z",
"T8AdKkmASq9a",
"OhXbdGD5fH0c",
"L2ak1HlcgoTe",
"4IXZKcCSgxnq",
"EhIjz9WohAmZ",
"Gi4y9M9KuDWx",
"fQhfVaDmuULT",
"bmJMXF-Bukdm",
"RYvGyVfXuo54"
],
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}