Spaces:
Sleeping
Sleeping
first draft
Browse files- polars/11_missing_data.py +523 -0
polars/11_missing_data.py
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# /// script
|
| 2 |
+
# requires-python = ">=3.12"
|
| 3 |
+
# dependencies = [
|
| 4 |
+
# "plotly[express]==6.3.0",
|
| 5 |
+
# "polars==1.33.1",
|
| 6 |
+
# ]
|
| 7 |
+
# ///
|
| 8 |
+
|
| 9 |
+
import marimo
|
| 10 |
+
|
| 11 |
+
__generated_with = "0.15.3"
|
| 12 |
+
app = marimo.App(width="medium")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@app.cell(hide_code=True)
|
| 16 |
+
def _(mo):
|
| 17 |
+
mo.md(
|
| 18 |
+
r"""
|
| 19 |
+
# Dealing with Missing Data
|
| 20 |
+
|
| 21 |
+
_by [etrotta](https://github.com/etrotta)_
|
| 22 |
+
|
| 23 |
+
This notebook covers some common problems you may face when dealing with real datasets and techniques used to solve deal with them, providing an overview of polars functionalities to handle missing data.
|
| 24 |
+
"""
|
| 25 |
+
)
|
| 26 |
+
return
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@app.cell(hide_code=True)
|
| 30 |
+
def _(mo):
|
| 31 |
+
mo.md(
|
| 32 |
+
r"""
|
| 33 |
+
We will be using a dataset about the weather in Rio de Janeiro, originally available in Google Big Query under `datario.clima_pluviometro`. What you need to know about it:
|
| 34 |
+
|
| 35 |
+
- Contains multiple stations covering the Municipality of Rio de Janeiro
|
| 36 |
+
- Measures the precipitation as milimeters, with a granularity of 15 minutes
|
| 37 |
+
- We filtered to only include data about 2020, 2021 and 2022
|
| 38 |
+
"""
|
| 39 |
+
)
|
| 40 |
+
return
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@app.cell
|
| 44 |
+
def _(px, stations):
|
| 45 |
+
px.scatter_map(stations, lat="lat", lon="lon", text="name")
|
| 46 |
+
return
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@app.cell(disabled=True, hide_code=True)
|
| 50 |
+
def _(pl, px, stations):
|
| 51 |
+
# In case `scatter_map` does not works for you:
|
| 52 |
+
_fig = px.scatter_geo(stations, lat="lat", lon="lon", hover_name="name")
|
| 53 |
+
|
| 54 |
+
_min_lat = stations.select(pl.col("lat").min()).item()
|
| 55 |
+
_max_lat = stations.select(pl.col("lat").max()).item()
|
| 56 |
+
_min_lon = stations.select(pl.col("lon").min()).item()
|
| 57 |
+
_max_lon = stations.select(pl.col("lon").max()).item()
|
| 58 |
+
|
| 59 |
+
_fig.update_geos(
|
| 60 |
+
lataxis_range=[_min_lat - 0.2, _max_lat + 0.2],
|
| 61 |
+
lonaxis_range=[_min_lon - 0.2, _max_lon + 0.2],
|
| 62 |
+
resolution=50,
|
| 63 |
+
showocean=True,
|
| 64 |
+
oceancolor="Lightblue",
|
| 65 |
+
)
|
| 66 |
+
_fig
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@app.cell(hide_code=True)
|
| 71 |
+
def _(mo):
|
| 72 |
+
mo.md(
|
| 73 |
+
r"""
|
| 74 |
+
# Stations
|
| 75 |
+
|
| 76 |
+
First, let's take a look at some of the stations. Notice how
|
| 77 |
+
|
| 78 |
+
- Some stations have been deactivated, so there won't be any data about them (in fact, we don't even know their coordinates)
|
| 79 |
+
- There are some columns that do not even contain data at all!
|
| 80 |
+
|
| 81 |
+
We will remove the empty columns and remove rows without coordinates
|
| 82 |
+
"""
|
| 83 |
+
)
|
| 84 |
+
return
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@app.cell(hide_code=True)
|
| 88 |
+
def _(dirty_stations, mo, pl):
|
| 89 |
+
# If you were working on this yourself, you may want to briefly at *all* of them, but for practical purposes I am taking a slice for the displayed output, as otherwise it would take too much screen space.
|
| 90 |
+
# mo.ui.table(dirty_stations, pagination=False)
|
| 91 |
+
|
| 92 |
+
mo.vstack(
|
| 93 |
+
[
|
| 94 |
+
mo.md("Before (head and tail sample):"),
|
| 95 |
+
pl.concat([dirty_stations.head(3), dirty_stations.tail(3)], how="vertical"),
|
| 96 |
+
]
|
| 97 |
+
)
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@app.cell
|
| 102 |
+
def _(dirty_stations, mo, pl):
|
| 103 |
+
stations = dirty_stations.drop_nulls(subset=("lat", "lon")).drop(pl.col(r"^operation_(start|end)_date$"))
|
| 104 |
+
mo.vstack([mo.md("After (full dataframe):"), stations])
|
| 105 |
+
return (stations,)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@app.cell(hide_code=True)
|
| 109 |
+
def _(mo):
|
| 110 |
+
mo.md(
|
| 111 |
+
r"""
|
| 112 |
+
# Precipitation
|
| 113 |
+
Now, let's move on to the Precipitation data.
|
| 114 |
+
|
| 115 |
+
## Part 1 - Null Values
|
| 116 |
+
|
| 117 |
+
First of all, let's check for null values:
|
| 118 |
+
"""
|
| 119 |
+
)
|
| 120 |
+
return
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@app.cell
|
| 124 |
+
def _(dirty_weather, pl):
|
| 125 |
+
rain = pl.col("accumulated_rain_15_minutes") # Create an alias since we'll use that column a lot
|
| 126 |
+
|
| 127 |
+
dirty_weather.filter(rain.is_null())
|
| 128 |
+
return (rain,)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
@app.cell(hide_code=True)
|
| 132 |
+
def _(dirty_weather, mo, rain):
|
| 133 |
+
_missing_count = dirty_weather.select(rain.is_null().sum()).item()
|
| 134 |
+
|
| 135 |
+
mo.md(
|
| 136 |
+
f"As you can see, there are {_missing_count:,} rows missing the accumulated rain for a period.\n\nThat could be cause due to sensor malfunctions, maintenance, bobby tables or a myriad of other reasons. While it may be a small percentage of the data ({_missing_count / len(dirty_weather):.3%}), it is still important to take it in consideration, one way or the other."
|
| 137 |
+
)
|
| 138 |
+
return
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@app.cell(hide_code=True)
|
| 142 |
+
def _(mo):
|
| 143 |
+
mo.md(
|
| 144 |
+
r"""
|
| 145 |
+
### First option to fixing it: Dropping data.
|
| 146 |
+
|
| 147 |
+
We could just remove those rows like we did for the stations, which may be a passable solution for some problems, but is not always the best idea.
|
| 148 |
+
```py
|
| 149 |
+
dirty_weather.drop_nulls()
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
### Second option to fixing it: Interpolation
|
| 153 |
+
|
| 154 |
+
Instead of removing these rows, we can use some heuritics to guess values that make sense for them. Remember that this adds a degree of uncertainty to the final results, so you should disclose how you are treating missing values if you draw any conclusions based on such guesses.
|
| 155 |
+
```py
|
| 156 |
+
dirty_weather.with_columns(rain.fill_null(strategy="forward")),
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
When doing so, which strategy may make sense for your data varies greatly. In some cases you'll want to use the mean to maintain it centered around the same distribution, while in other cases you'll want to zero it to avoid modifying the total, or fill forward/backward to keep it mostly continuous.
|
| 160 |
+
|
| 161 |
+
### Last option to fixing it: Acquire the correct values from elsewhere.
|
| 162 |
+
|
| 163 |
+
We will not explore this option in this notebook, but you could try finding approximate values from another dataset or in some cases manually input the correct values.
|
| 164 |
+
|
| 165 |
+
### However
|
| 166 |
+
|
| 167 |
+
Let's investigate a bit more before deciding on following with either approach.
|
| 168 |
+
For example, is our current data even complete, or are we already missing some rows beyond those with null values?
|
| 169 |
+
"""
|
| 170 |
+
)
|
| 171 |
+
return
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
@app.cell
|
| 175 |
+
def _(dirty_weather, pl):
|
| 176 |
+
seen_counts = dirty_weather.group_by(pl.col("datetime").dt.time(), "station").len()
|
| 177 |
+
|
| 178 |
+
# Fun fact: a single row has its time set to `23:55`.
|
| 179 |
+
# It should not be present in this dataset, but found its way into the official Google Big Query table somehow.
|
| 180 |
+
seen_counts = seen_counts.filter(pl.col("len") > 1)
|
| 181 |
+
# You may want to treat it as a bug or outlier and remove it from dirty_weather, but we won't dive into cleaning such in this notebook
|
| 182 |
+
|
| 183 |
+
# seen_counts.sort("station", "datetime").select("station", "datetime", "len")
|
| 184 |
+
seen_counts.sort("len").select("station", "datetime", "len")
|
| 185 |
+
return
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
@app.cell
|
| 189 |
+
def _(pl):
|
| 190 |
+
expected_range = pl.datetime_range(
|
| 191 |
+
pl.lit("2020-01-01T00:00:00").str.to_datetime(time_zone="America/Sao_Paulo"),
|
| 192 |
+
pl.lit("2022-12-31T23:45:00").str.to_datetime(time_zone="America/Sao_Paulo"),
|
| 193 |
+
"15m",
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
pl.select(expected_range).group_by(pl.col.literal.dt.time()).len().sort("literal")
|
| 197 |
+
return
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@app.cell(hide_code=True)
|
| 201 |
+
def _(mo):
|
| 202 |
+
mo.md(
|
| 203 |
+
r"""
|
| 204 |
+
## Part 2 - Missing Rows
|
| 205 |
+
|
| 206 |
+
We can see that we expected there to be 1096 rows for each hour for each station (from the start of 2020 to the end of 2022) , but in reality we see between 1077 and 1096 rows.
|
| 207 |
+
|
| 208 |
+
That difference could be caused by the same factors as null values, or even by someone dropping null values along the way, but for the purposes of this notebook let's say that we want to have values for each combination with no exceptions, so we'll have to make reasonable assumptions to interpolate and extrapolate them.
|
| 209 |
+
|
| 210 |
+
Given that we are working with time series data, we will [upsample](https://docs.pola.rs/api/python/stable/reference/dataframe/api/polars.DataFrame.upsample.html) the data, but you could also create a DataFrame containing all expected rows then use `join(how="...")`
|
| 211 |
+
|
| 212 |
+
However, that will give us _even more_ null values, so we will want to fill them in afterwards. For this case, we will just use a forward fill followed by a backwards fill.
|
| 213 |
+
"""
|
| 214 |
+
)
|
| 215 |
+
return
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@app.cell
|
| 219 |
+
def _(dirty_weather, mo, pl, rain):
|
| 220 |
+
_hollow_weather = dirty_weather.sort("station", "datetime").upsample("datetime", every="15m", group_by="station")
|
| 221 |
+
weather = _hollow_weather.fill_null(strategy="forward").fill_null(strategy="backward")
|
| 222 |
+
|
| 223 |
+
mo.vstack(
|
| 224 |
+
[
|
| 225 |
+
mo.ui.table(
|
| 226 |
+
label="Null counts at each step",
|
| 227 |
+
data=pl.concat(
|
| 228 |
+
[
|
| 229 |
+
dirty_weather.null_count().select(
|
| 230 |
+
pl.lit("Before upsampling").alias("label"), rain, "station", "datetime"
|
| 231 |
+
),
|
| 232 |
+
_hollow_weather.null_count().select(
|
| 233 |
+
pl.lit("After upsampling").alias("label"), rain, "station", "datetime"
|
| 234 |
+
),
|
| 235 |
+
weather.null_count().select(pl.lit("After filling").alias("label"), rain, "station", "datetime"),
|
| 236 |
+
]
|
| 237 |
+
),
|
| 238 |
+
),
|
| 239 |
+
mo.md("Data after upsampling and filling in nulls:"),
|
| 240 |
+
weather,
|
| 241 |
+
]
|
| 242 |
+
)
|
| 243 |
+
return (weather,)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
@app.cell(hide_code=True)
|
| 247 |
+
def _(mo):
|
| 248 |
+
mo.md(r"""Now that we finally have a clean dataset, let's play around with it a little""")
|
| 249 |
+
return
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
@app.cell(hide_code=True)
|
| 253 |
+
def _(mo):
|
| 254 |
+
year_picker = mo.ui.dropdown(options=[2020, 2021, 2022], value=2022, label="Year")
|
| 255 |
+
day_slider = mo.ui.range_slider(1, 365, show_value=True, label="Day of the year", full_width=True, value=[87, 94])
|
| 256 |
+
hour_slider = mo.ui.range_slider(0, 24, 0.25, show_value=True, label="Hour of the day", full_width=True)
|
| 257 |
+
interval = mo.ui.dropdown(
|
| 258 |
+
options=["15m", "30m", "1h", "2h", "4h", "6h", "1d"], value="4h", label="Aggregation Granularity"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
mo.vstack(
|
| 262 |
+
[
|
| 263 |
+
year_picker,
|
| 264 |
+
day_slider,
|
| 265 |
+
hour_slider,
|
| 266 |
+
interval,
|
| 267 |
+
]
|
| 268 |
+
)
|
| 269 |
+
return day_slider, hour_slider, interval, year_picker
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
@app.cell
|
| 273 |
+
def _(
|
| 274 |
+
day_slider,
|
| 275 |
+
hour_slider,
|
| 276 |
+
interval,
|
| 277 |
+
pl,
|
| 278 |
+
rain,
|
| 279 |
+
stations,
|
| 280 |
+
weather,
|
| 281 |
+
year_picker,
|
| 282 |
+
):
|
| 283 |
+
_range_seconds = map(lambda hour: hour * 3600, hour_slider.value)
|
| 284 |
+
_df_seconds = pl.col("datetime").dt.hour() + pl.col("datetime").dt.minute().mul(60)
|
| 285 |
+
|
| 286 |
+
animation_data = (
|
| 287 |
+
weather.lazy()
|
| 288 |
+
.filter(
|
| 289 |
+
pl.col("datetime").dt.year() == year_picker.value,
|
| 290 |
+
pl.col("datetime").dt.ordinal_day().is_between(*day_slider.value),
|
| 291 |
+
_df_seconds.is_between(*_range_seconds),
|
| 292 |
+
)
|
| 293 |
+
.group_by_dynamic("datetime", group_by="station", every=interval.value)
|
| 294 |
+
.agg(rain.sum().alias("precipitation"))
|
| 295 |
+
.remove(pl.col("precipitation").eq(0).all().over("station"))
|
| 296 |
+
.join(stations.lazy(), on="station")
|
| 297 |
+
.select("name", "lat", "lon", "precipitation", "datetime")
|
| 298 |
+
.collect()
|
| 299 |
+
)
|
| 300 |
+
return (animation_data,)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
@app.cell
|
| 304 |
+
def _(animation_data, pl, px):
|
| 305 |
+
_fig = px.scatter_geo(
|
| 306 |
+
animation_data.with_columns(avg_precipitation=pl.col("precipitation").mean()),
|
| 307 |
+
lat="lat",
|
| 308 |
+
lon="lon",
|
| 309 |
+
hover_name="name",
|
| 310 |
+
animation_group="name",
|
| 311 |
+
animation_frame="datetime",
|
| 312 |
+
size="avg_precipitation",
|
| 313 |
+
color="precipitation",
|
| 314 |
+
color_continuous_scale="PuBu",
|
| 315 |
+
range_color=[0, animation_data.select(pl.col("precipitation").max()).item()],
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
_min_lat = animation_data.select(pl.col("lat").min()).item()
|
| 319 |
+
_max_lat = animation_data.select(pl.col("lat").max()).item()
|
| 320 |
+
_min_lon = animation_data.select(pl.col("lon").min()).item()
|
| 321 |
+
_max_lon = animation_data.select(pl.col("lon").max()).item()
|
| 322 |
+
|
| 323 |
+
_fig.update_geos(
|
| 324 |
+
lataxis_range=[_min_lat - 0.2, _max_lat + 0.2],
|
| 325 |
+
lonaxis_range=[_min_lon - 0.2, _max_lon + 0.2],
|
| 326 |
+
resolution=50,
|
| 327 |
+
showocean=True,
|
| 328 |
+
oceancolor="Lightblue",
|
| 329 |
+
)
|
| 330 |
+
_fig
|
| 331 |
+
return
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
@app.cell(hide_code=True)
|
| 335 |
+
def _(mo):
|
| 336 |
+
mo.md(
|
| 337 |
+
r"""
|
| 338 |
+
If we were missing some rows, we would have circles popping in and out of existince instead of a smooth animation!
|
| 339 |
+
|
| 340 |
+
In many scenarios, missing data can also lead to wrong results overall, for example if we were to estimate the total amount of rainfall during the observed period:
|
| 341 |
+
"""
|
| 342 |
+
)
|
| 343 |
+
return
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
@app.cell
|
| 347 |
+
def _(dirty_weather, mo, rain, weather):
|
| 348 |
+
old_estimate = dirty_weather.select(rain.sum()).item()
|
| 349 |
+
new_estimate = weather.select(rain.sum()).item()
|
| 350 |
+
# Note: The aggregation used to calculate these variables (taking a sum across all stations) is not very meaningful, but the relative diference between them scales across many potentially useful aggregations
|
| 351 |
+
|
| 352 |
+
mo.md(f"Our estimates may change by roughly {(new_estimate - old_estimate) / old_estimate:.2%}")
|
| 353 |
+
return
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
@app.cell(hide_code=True)
|
| 357 |
+
def _(mo):
|
| 358 |
+
mo.md(
|
| 359 |
+
r"""
|
| 360 |
+
Which is still a relatively small difference, but every drop counts when you are dealing with the weather.
|
| 361 |
+
|
| 362 |
+
For datasets with a higher share of missing values, that difference can get much higher.
|
| 363 |
+
"""
|
| 364 |
+
)
|
| 365 |
+
return
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
@app.cell(hide_code=True)
|
| 369 |
+
def _(mo):
|
| 370 |
+
mo.md(
|
| 371 |
+
r"""
|
| 372 |
+
## Bonus Content
|
| 373 |
+
|
| 374 |
+
### Appendix A: Missing Time Zones
|
| 375 |
+
|
| 376 |
+
The original dataset contained naive datetimes instead of timezone-aware, but we can infer whenever it refers to UTC time or local time (for this case, -03:00 UTC) based on the measurements.
|
| 377 |
+
|
| 378 |
+
For example, we can select one specific interval during which we know rained a lot, or graph the average amount of precipitation for each hour of the day, then compare the data timestamps with a ground truth.
|
| 379 |
+
"""
|
| 380 |
+
)
|
| 381 |
+
return
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@app.cell(hide_code=True)
|
| 385 |
+
def _(dirty_weather_naive, mo):
|
| 386 |
+
mo.vstack(
|
| 387 |
+
[
|
| 388 |
+
mo.md("Original data example:"),
|
| 389 |
+
dirty_weather_naive.head(3),
|
| 390 |
+
]
|
| 391 |
+
)
|
| 392 |
+
return
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
@app.cell
|
| 396 |
+
def _(dirty_weather_naive, pl, px, rain):
|
| 397 |
+
naive_downfall_per_hour = (
|
| 398 |
+
dirty_weather_naive.group_by(pl.col("datetime").dt.hour().alias("hour"))
|
| 399 |
+
.agg(rain.sum().alias("accumulated_rain"))
|
| 400 |
+
.with_columns(pl.col("accumulated_rain").truediv(pl.col("accumulated_rain").sum()).mul(100))
|
| 401 |
+
)
|
| 402 |
+
px.bar(
|
| 403 |
+
naive_downfall_per_hour.sort("hour"),
|
| 404 |
+
x="hour",
|
| 405 |
+
y="accumulated_rain",
|
| 406 |
+
title="Distribution of precipitation per hour (%), using the naive datetime",
|
| 407 |
+
)
|
| 408 |
+
return
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@app.cell
|
| 412 |
+
def _(dirty_weather_naive, pl, rain, stations):
|
| 413 |
+
naive_top_rain_events = (
|
| 414 |
+
dirty_weather_naive.lazy()
|
| 415 |
+
# If you wanted to filter the dates and locate a specific event:
|
| 416 |
+
# .filter(pl.col("datetime").is_between(pl.lit("2022-03-01").str.to_datetime(), pl.lit("2022-05-01").str.to_datetime()))
|
| 417 |
+
.sort("station", "datetime")
|
| 418 |
+
.group_by_dynamic("datetime", every="1h", offset="30m", group_by="station")
|
| 419 |
+
.agg(rain.sum())
|
| 420 |
+
.join(stations.lazy(), on="station")
|
| 421 |
+
.sort(rain, descending=True)
|
| 422 |
+
.select(
|
| 423 |
+
"name",
|
| 424 |
+
pl.col("datetime").alias("window_start"),
|
| 425 |
+
(pl.col("datetime") + pl.duration(hours=1)).alias("window_end"),
|
| 426 |
+
rain.alias("accumulated rain"),
|
| 427 |
+
)
|
| 428 |
+
.head(50)
|
| 429 |
+
.collect()
|
| 430 |
+
)
|
| 431 |
+
naive_top_rain_events
|
| 432 |
+
return
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
@app.cell(hide_code=True)
|
| 436 |
+
def _(mo):
|
| 437 |
+
mo.md(
|
| 438 |
+
r"""
|
| 439 |
+
By externally researching the expected distribution and looking up some of the extreme weather events, we can come to a conclusion about whenever it is aligned with the local time or with UTC.
|
| 440 |
+
|
| 441 |
+
In this case, the distribution matches the normal weather for this region and we can see that the hours with the most precipitation match those of historical events, so it is safe to say it is using Americas/São Paulo time zone.
|
| 442 |
+
"""
|
| 443 |
+
)
|
| 444 |
+
return
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
@app.cell
|
| 448 |
+
def _(dirty_weather_naive, pl):
|
| 449 |
+
dirty_weather = dirty_weather_naive.with_columns(pl.col("datetime").dt.replace_time_zone("America/Sao_Paulo"))
|
| 450 |
+
|
| 451 |
+
# Also get rid of some of the other variables to economize memory
|
| 452 |
+
# del raw_weather
|
| 453 |
+
# del dirty_weather_naive
|
| 454 |
+
|
| 455 |
+
dirty_weather.head(3)
|
| 456 |
+
return (dirty_weather,)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
@app.cell(hide_code=True)
|
| 460 |
+
def _(mo):
|
| 461 |
+
mo.md(
|
| 462 |
+
r"""
|
| 463 |
+
### Utilities
|
| 464 |
+
|
| 465 |
+
Loading data and imports
|
| 466 |
+
"""
|
| 467 |
+
)
|
| 468 |
+
return
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@app.cell
|
| 472 |
+
def _(pl):
|
| 473 |
+
raw_stations = pl.read_csv("/mnt/c/Users/Etrot/Downloads/datario_alertario_stations.csv")
|
| 474 |
+
raw_weather = pl.read_csv("/mnt/c/Users/Etrot/Downloads/datario_alertario_weather_2020_to_2022.csv")
|
| 475 |
+
return raw_stations, raw_weather
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
@app.cell
|
| 479 |
+
def _(pl, raw_stations):
|
| 480 |
+
dirty_stations = raw_stations.select(
|
| 481 |
+
pl.col("id_estacao").alias("station"),
|
| 482 |
+
pl.col("estacao").alias("name"),
|
| 483 |
+
pl.col("latitude").alias("lat"),
|
| 484 |
+
pl.col("longitude").alias("lon"),
|
| 485 |
+
pl.col("cota").alias("altitude"),
|
| 486 |
+
pl.col("situacao").alias("situation"),
|
| 487 |
+
pl.col("endereco").alias("address"),
|
| 488 |
+
pl.col("data_inicio_operacao").alias("operation_start_date"),
|
| 489 |
+
pl.col("data_fim_operacao").alias("operation_end_date"),
|
| 490 |
+
)
|
| 491 |
+
return (dirty_stations,)
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
@app.cell
|
| 495 |
+
def _(pl, raw_weather):
|
| 496 |
+
dirty_weather_naive = raw_weather.select(
|
| 497 |
+
pl.col("id_estacao").alias("station"),
|
| 498 |
+
pl.col("acumulado_chuva_15_min").alias("accumulated_rain_15_minutes"),
|
| 499 |
+
pl.concat_str("data_particao", pl.lit("T"), "horario").str.to_datetime(time_zone=None).alias("datetime"),
|
| 500 |
+
)
|
| 501 |
+
return (dirty_weather_naive,)
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
@app.cell
|
| 505 |
+
def _():
|
| 506 |
+
import marimo as mo
|
| 507 |
+
return (mo,)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
@app.cell
|
| 511 |
+
def _():
|
| 512 |
+
import polars as pl
|
| 513 |
+
return (pl,)
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
@app.cell
|
| 517 |
+
def _():
|
| 518 |
+
import plotly.express as px
|
| 519 |
+
return (px,)
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
if __name__ == "__main__":
|
| 523 |
+
app.run()
|