markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Annual unutilized management residuals [x] Public lands non-commercial management residuals [ ] Private land non-commercial management residuals [x] Public lands logging residuals [x] Private lands logging residuals
cat_codes = {'nf_ncmr': 'Unburned, non-commercial management residuals from National Forest lands', 'nf_lr': 'Logging residuals generated from timber sales on National Forest lands', 'opriv_lr': 'Logging residuals generated from timber sales on non-industrial private forest lands', 'fi_lr': 'Logging residuals generated from timber sales on industrial private lands', 'opub_lr': 'Logging residuals generated from timber sales on industrial private lands'} usfs_an_bdt['cuft']= usfs_an_bdt.bdt *wavg_dens resid_stats=pd.DataFrame((usfs_an_bdt.iloc[6:,2]/1000000).describe()) resid_stats.columns = ['nf_ncmr'] resid_stats['nf_lr']=tpoData[tpoData.ownership.str.contains('National Forest')]['loggingresidues'].describe() resid_stats['opriv_lr']=tpoData[tpoData.ownership.str.contains('Other Private')]['loggingresidues'].describe() resid_stats['fi_lr']=tpoData[tpoData.ownership.str.contains('Forest Industry')]['loggingresidues'].describe() resid_stats['opub_lr']=tpoData[tpoData.ownership.str.contains('Other Public')]['loggingresidues'].describe() resid_stats print tabulate(resid_stats, headers = resid_stats.columns.tolist(), tablefmt ='pipe')
wood_fates.ipynb
peteWT/fcat_biomass
mit
Estimating combined GHG and SLCP emissions from unutilized residues Only a fraction of the
ureg = UnitRegistry() ureg.define('cubic foot = cubic_centimeter/ 3.53147e-5 = cubic_foot' ) ureg.define('million cubic foot = cubic_foot*1000000 = MMCF' ) ureg.define('board foot sawlog = cubic_foot / 5.44 = BF_saw') ureg.define('board foot veneer = cubic_foot / 5.0 = BF_vo') ureg.define('board foot bioenergy = cubic_foot / 1.0 = BF_bio') ureg.define('bone-dry unit = cubic_foot * 96 = BDU')
wood_fates.ipynb
peteWT/fcat_biomass
mit
Some global data
#symbol = '^GSPC' symbol = 'SPY' capital = 10000 start = datetime.datetime(2015, 10, 30) #start = datetime.datetime(*pf.SP500_BEGIN) end = datetime.datetime.now()
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
Define Strategy Class
class Strategy: def __init__(self, symbol, capital, start, end): self.symbol = symbol self.capital = capital self.start = start self.end = end self.ts = None self.tlog = None self.dbal = None self.stats = None def _algo(self): pf.TradeLog.cash = self.capital for i, row in enumerate(self.ts.itertuples()): date = row.Index.to_pydatetime() close = row.close; end_flag = pf.is_last_row(self.ts, i) shares = 0 # Buy to cover (at the open on first trading day in Nov) if self.tlog.shares > 0: if (row.month == 11 and row.first_dotm) or end_flag: shares = self.tlog.buy2cover(date, row.open) # Sell short (at the open on first trading day in May) else: if row.month == 5 and row.first_dotm: shares = self.tlog.sell_short(date, row.open) if shares > 0: pf.DBG("{0} SELL SHORT {1} {2} @ {3:.2f}".format( date, shares, self.symbol, row.open)) elif shares < 0: pf.DBG("{0} BUY TO COVER {1} {2} @ {3:.2f}".format( date, -shares, self.symbol, row.open)) # Record daily balance self.dbal.append(date, close) def _algo2(self): pf.TradeLog.cash = self.capital for i, row in enumerate(self.ts.itertuples()): date = row.Index.to_pydatetime() close = row.close; end_flag = pf.is_last_row(self.ts, i) shares = 0 # On the first day of the month, adjust short position to 50% if (row.first_dotm or end_flag): weight = 0 if end_flag else 0.5 self.tlog.adjust_percent(date, close, weight, pf.Direction.SHORT) # Record daily balance self.dbal.append(date, close) def run(self): self.ts = pf.fetch_timeseries(self.symbol) self.ts = pf.select_tradeperiod(self.ts, self.start, self.end, use_adj=True) # add calendar columns self.ts = pf.calendar(self.ts) self.tlog = pf.TradeLog(self.symbol) self.dbal = pf.DailyBal() self.ts, self.start = pf.finalize_timeseries(self.ts, self.start) # Pick either algo or algo2 self._algo() #self._algo2() self._get_logs() self._get_stats() def _get_logs(self): self.rlog = self.tlog.get_log_raw() self.tlog = self.tlog.get_log() self.dbal = self.dbal.get_log(self.tlog) def _get_stats(self): self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
Run Strategy
s = Strategy(symbol, capital, start, end) s.run() s.rlog.head() s.tlog.head() s.dbal.tail()
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
Run Benchmark, Retrieve benchmark logs, and Generate benchmark stats
benchmark = pf.Benchmark(symbol, s.capital, s.start, s.end) benchmark.run()
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
Plot Equity Curves: Strategy vs Benchmark
pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal)
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
Plot Trades
pf.plot_trades(s.dbal, benchmark=benchmark.dbal)
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
Bar Graph: Strategy vs Benchmark
df = pf.plot_bar_graph(s.stats, benchmark.stats) df
examples/120.sell-short/strategy.ipynb
fja05680/pinkfish
mit
IRL on a random MDP Testing both linear reward models & MLP reward models.
mdp = menv.RandomMDP( n_states=16, n_actions=3, branch_factor=2, horizon=10, random_obs=True, obs_dim=5, generator_seed=42, ) V, Q, pi = tirl.mce_partition_fh(mdp) Dt, D = tirl.mce_occupancy_measures(mdp, pi=pi) demo_counts = D @ mdp.observation_matrix (obs_dim,) = demo_counts.shape rmodel = tirl.LinearRewardModel(obs_dim) opt = th.optim.Adam(rmodel.parameters(), lr=0.1) D_fake = tirl.mce_irl(mdp, opt, rmodel, D, linf_eps=1e-1) rmodel = tirl.MLPRewardModel(obs_dim, [32, 32]) opt = th.optim.Adam(rmodel.parameters(), lr=0.1) D_fake = tirl.mce_irl(mdp, opt, rmodel, D, linf_eps=1e-2)
experiments/mce_irl.ipynb
HumanCompatibleAI/imitation
mit
Same thing, but on grid world The true reward here is not linear in the reduced feature space (i.e $(x,y)$ coordinates). Finding an appropriate linear reward is impossible (as I will demonstration), but an MLP should Just Work(tm).
# Same experiments, but on grid world mdp = menv.CliffWorld(width=7, height=4, horizon=8, use_xy_obs=True) V, Q, pi = tirl.mce_partition_fh(mdp) Dt, D = tirl.mce_occupancy_measures(mdp, pi=pi) demo_counts = D @ mdp.observation_matrix (obs_dim,) = demo_counts.shape rmodel = tirl.LinearRewardModel(obs_dim) opt = th.optim.Adam(rmodel.parameters(), lr=1.0) D_fake = tirl.mce_irl(mdp, opt, rmodel, D, linf_eps=0.1) mdp.draw_value_vec(D) plt.title("Cliff World $p(s)$") plt.xlabel("x-coord") plt.ylabel("y-coord") plt.show() mdp.draw_value_vec(D_fake) plt.title("Occupancy for linear reward function") plt.show() plt.subplot(1, 2, 1) mdp.draw_value_vec(rmodel(th.as_tensor(mdp.observation_matrix)).detach().numpy()) plt.title("Inferred reward") plt.subplot(1, 2, 2) mdp.draw_value_vec(mdp.reward_matrix) plt.title("True reward") plt.show() rmodel = tirl.MLPRewardModel( obs_dim, [ 1024, ], activation=th.nn.ReLU, ) opt = th.optim.Adam(rmodel.parameters(), lr=1e-3) D_fake_mlp = tirl.mce_irl(mdp, opt, rmodel, D, linf_eps=3e-2, print_interval=250) mdp.draw_value_vec(D_fake_mlp) plt.title("Occupancy for MLP reward function") plt.show() plt.subplot(1, 2, 1) mdp.draw_value_vec(rmodel(th.as_tensor(mdp.observation_matrix)).detach().numpy()) plt.title("Inferred reward") plt.subplot(1, 2, 2) mdp.draw_value_vec(mdp.reward_matrix) plt.title("True reward") plt.show()
experiments/mce_irl.ipynb
HumanCompatibleAI/imitation
mit
Create a model As a first step let us have a look how we can create one of the models implemented in rrmpg.models. Basically, for all models we have two different options: 1. Initialize a model without specific model parameters. 2. Initialize a model with specific model parameters. The documentation provides a list of all model parameters. Alternatively we can look at help() for the model (e.g. help(CemaneigeGR4J)). If no specific model parameters are provided upon intialization, random parameters will be generated that are in between the default parameter bounds. We can look at these bounds by calling .get_param_bounds() method on the model object and check the current parameter values by calling .get_params() method. For now we don't know any specific parameter values, so we'll create one with random parameters.
model = CemaneigeGR4J() model.get_params()
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Here we can see the six model parameters of CemaneigeGR4J model and their current values. Using the CAMELSLoader To have data to start with, we can use the CAMELSLoader class to load data of provided basins from the CAMELS dataset. To get a list of all available basins that are provided within this library, we can use the .get_basin_numbers() method. For now we will use the provided basin number 01031500.
df = CAMELSLoader().load_basin('01031500') df.head()
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Next we will split the data into a calibration period, which we will use to find a set of good model parameters, and a validation period, we will use the see how good our model works on unseen data. As in the CAMELS data set publication, we will use the first 15 hydrological years for calibration. The rest of the data will be used for validation. Because the index of the dataframe is in pandas Datetime format, we can easily split the dataframe into two parts
# calcute the end date of the calibration period end_cal = pd.to_datetime(f"{df.index[0].year + 15}/09/30", yearfirst=True) # validation period starts one day later start_val = end_cal + pd.DateOffset(days=1) # split the data into two parts cal = df[:end_cal].copy() val = df[start_val:].copy()
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Fit the model to observed discharge As already said above, we'll look at two different methods implemented in this library: 1. Using one of SciPy's global optimizer 2. Monte-Carlo-Simulation Using one of SciPy's global optimizer Each model has a .fit() method. This function uses the global optimizer differential evolution from the scipy package to find the set of model parameters that produce the best simulation, regarding the provided observed discharge array. The inputs for this function can be found in the documentation or the help().
help(model.fit)
examples/model_api_example.ipynb
kratzert/RRMPG
mit
We don't know any values for the initial states of the storages, so we will ignore them for now. For the missing mean temperature, we calculate a proxy from the minimum and maximum daily temperature. The station height can be retrieved from the CAMELSLoader class via the .get_station_height() method.
# calculate mean temp for calibration and validation period cal['tmean'] = (cal['tmin(C)'] + cal['tmax(C)']) / 2 val['tmean'] = (val['tmin(C)'] + val['tmax(C)']) / 2 # load the gauge station height height = CAMELSLoader().get_station_height('01031500')
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Now we are ready to fit the model and retrieve a good set of model parameters from the optimizer. Again, this will be done with the calibration data. Because the model methods also except pandas Series, we can call the function as follows.
# We don't have an initial value for the snow storage, so we omit this input result = model.fit(cal['QObs(mm/d)'], cal['prcp(mm/day)'], cal['tmean'], cal['tmin(C)'], cal['tmax(C)'], cal['PET'], height)
examples/model_api_example.ipynb
kratzert/RRMPG
mit
result is an object defined by the scipy library and contains the optimized model parameters, as well as some more information on the optimization process. Let us have a look at this object:
result
examples/model_api_example.ipynb
kratzert/RRMPG
mit
The relevant information here is: - fun is the final value of our optimization criterion (the mean-squared-error in this case) - message describes the cause of the optimization termination - nfev is the number of model simulations - sucess is a flag wether or not the optimization was successful - x are the optimized model parameters Next, let us set the model parameters to the optimized ones found by the search. Therefore we need to create a dictonary containing one key for each model parameter and as the corresponding value the optimized parameter. As mentioned before, the list of model parameter names can be retrieved by the model.get_parameter_names() function. We can then create the needed dictonary by the following lines of code:
params = {} param_names = model.get_parameter_names() for i, param in enumerate(param_names): params[param] = result.x[i] # This line set the model parameters to the ones specified in the dict model.set_params(params) # To be sure, let's look at the current model parameters model.get_params()
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Also it might not be clear at the first look, this are the same parameters as the ones specified in result.x. In result.x they are ordered according to the ordering of the _param_list specified in each model class, where ass the dictonary output here is alphabetically sorted. Monte-Carlo-Simulation Now let us have a look how we can use the Monte-Carlo-Simulation implemented in rrmpg.tools.monte_carlo.
help(monte_carlo)
examples/model_api_example.ipynb
kratzert/RRMPG
mit
As specified in the help text, all model inputs needed for a simulation must be provided as keyword arguments. The keywords need to match the names specified in the model.simulate() function. Let us create a new model instance and see how this works for the CemaneigeGR4J model.
model2 = CemaneigeGR4J() # Let use run MC for 1000 runs, which is in the same range as the above optimizer result_mc = monte_carlo(model2, num=10000, qobs=cal['QObs(mm/d)'], prec=cal['prcp(mm/day)'], mean_temp=cal['tmean'], min_temp=cal['tmin(C)'], max_temp=cal['tmax(C)'], etp=cal['PET'], met_station_height=height) # Get the index of the best fit (smallest mean squared error) idx = np.argmin(result_mc['mse'][~np.isnan(result_mc['mse'])]) # Get the optimal parameters and set them as model parameters optim_params = result_mc['params'][idx] params = {} for i, param in enumerate(param_names): params[param] = optim_params[i] # This line set the model parameters to the ones specified in the dict model2.set_params(params)
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Calculate simulated discharge We now have two models, optimized by different methods. Let's calculate the simulated streamflow of each model and compare the results! Each model has a .simulate() method, that returns the simulated discharge for the inputs we provide to this function.
# simulated discharge of the model optimized by the .fit() function val['qsim_fit'] = model.simulate(val['prcp(mm/day)'], val['tmean'], val['tmin(C)'], val['tmax(C)'], val['PET'], height) # simulated discharge of the model optimized by monte-carlo-sim val['qsim_mc'] = model2.simulate(val['prcp(mm/day)'], val['tmean'], val['tmin(C)'], val['tmax(C)'], val['PET'], height) # Calculate and print the Nash-Sutcliff-Efficiency for both simulations nse_fit = calc_nse(val['QObs(mm/d)'], val['qsim_fit']) nse_mc = calc_nse(val['QObs(mm/d)'], val['qsim_mc']) print("NSE of the .fit() optimization: {:.4f}".format(nse_fit)) print("NSE of the Monte-Carlo-Simulation: {:.4f}".format(nse_mc))
examples/model_api_example.ipynb
kratzert/RRMPG
mit
What do this number mean? Let us have a look at some window of the simulated timeseries and compare them to the observed discharge:
# Plot last full hydrological year of the simulation %matplotlib notebook start_date = pd.to_datetime("2013/10/01", yearfirst=True) end_date = pd.to_datetime("2014/09/30", yearfirst=True) plt.plot(val.loc[start_date:end_date, 'QObs(mm/d)'], label='Qobs') plt.plot(val.loc[start_date:end_date, 'qsim_fit'], label='Qsim .fit()') plt.plot(val.loc[start_date:end_date, 'qsim_mc'], label='Qsim mc') plt.legend()
examples/model_api_example.ipynb
kratzert/RRMPG
mit
The result is not perfect, but it is not bad either! And since this package is also about speed, let us also check how long it takes to simulate the discharge for the entire validation period (19 years of data).
%%timeit model.simulate(val['prcp(mm/day)'], val['tmean'], val['tmin(C)'], val['tmax(C)'], val['PET'], height)
examples/model_api_example.ipynb
kratzert/RRMPG
mit
Summary In the cells to follow, the following material parameters were found $$\begin{align} B_0 &= 14617807286.8\ B_1 &= 40384983097.2\ B_2 &= 385649437.858\ P_0 & = βˆ’164761936.257 \ P_1 & = 3.20119273834eβˆ’10\ P_2 & = 7.39166987894eβˆ’18\ P_3 & = 0.0983914345654\ G_1 & = 9647335534.93 \ G_2 & = 2.3838775292eβˆ’09 \ G_3 & = βˆ’7.40942609805eβˆ’07\ \end{align}$$ Read in the Data Read in the hydrostatic data and compute derived values.
df = read_excel('porodata.xlsx', sheetname='hydrostatic') df['EV'] = trace(df, 'STRAIN') df['I1'] = trace(df, 'SIG') df['dEV'] = diff(df['EV']) df['dI1'] = diff(df['I1'])
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
Hydrostatic Response Elastic Unloading Curve Plot the pressure vs. volume strain curve and determine the section in which elastic unloading occurs
plot = figure(x_axis_label='Volume Strain', y_axis_label='Pressure') plot.circle(-df['EV'], -df['I1']/3.) plot.text(-df['EV'], -df['I1']/3., text=range(len(df)),text_color="#333333", text_align="left", text_font_size="5pt") show(plot)
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
It appears that the unloading occurs at data point 101 and continues until the end of the data. This curve will be used to fit the bulk modulus parameters. Below, scipy is used to optimize the parameters to the curve.
kfun = lambda B0, B1, B2, I1: B0 + B1 * exp(-B2 / abs(I1)) def kmm_bulk(x, fac, I1, K): B0, B1, B2 = x * fac return K - kfun(B0, B1, B2, I1) imax = 101 df1 = df.iloc[imax:].copy() K = np.array(df1['dI1'] / 3. / df1['dEV']) b0 = np.array((K[-1], K[0] - K[-1], 1e9)) fac = 1e9 B, icov = leastsq(kmm_bulk, b0/fac, args=(fac, df1['I1'], K)) B0, B1, B2 = B * fac B0, B1, B2 plot = figure(x_axis_label='Bulk Modulus', y_axis_label='Pressure') plot.circle(-df1['I1']/3., K) plot.line(-df['I1']/3., kfun(B0, B1, B2, df['I1']), color='red') show(plot)
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
Poro response With the bulk response determined, find the porosity parameters
df['EP'] = df['I1'] / 3. / kfun(B0, B1, B2, df['I1']) - df['EV'] p3 = max(df['EP']) df['PORO'] = p3 - df['EP'] plot = figure(x_axis_label='Plastic Strain', y_axis_label='Pressure') plot.circle(df['EP'], -df['I1']/3.) show(plot) plot = figure(x_axis_label='Pressure', y_axis_label='PORO') df2 = df.iloc[:imax].copy() plot.circle(-df2['I1']/3., df2['PORO']) show(plot) def pfun(P0, P1, P2, P3, I1): xi = -I1 / 3. + P0 return P3 * exp(-(P1 + P2 * xi) * xi) def kmm_poro(x, fac, I1, P): p0, p1, p2, p3 = asarray(x) * fac return P - pfun(p0, p1, p2, p3, I1) p0 = (1, 1, 1, p3) fac = np.array([1e8, 1e-10, 1e-18, 1]) p, icov = leastsq(kmm_poro, p0, args=(fac, df2['I1'], df2['PORO'])) P0, P1, P2, P3 = p * fac P0, P1, P2, P3 plot = figure(x_axis_label='Pressure', y_axis_label='PORO') plot.circle(-df2['I1']/3., df2['PORO'], legend='Data') plot.line(-df2['I1']/3., pfun(P0, P1, P2, P3, df2['I1']), color='red', legend='Fit') show(plot)
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
Shear Response
keys = (2.5, 5.0, 7.5, 10.0, 12.5, 15.0, 22.5, 30.0) colors = ('red', 'blue', 'orange', 'purple', 'green', 'black', 'magenta', 'teal', 'cyan') df2 = {} p = figure(x_axis_label='I1', y_axis_label='Sqrt[J2]') p1 = figure(x_axis_label='Axial Strain', y_axis_label='Axial Stress') for (i, key) in enumerate(keys): key = 'txc p={0:.01f}MPa'.format(key) x = read_excel('porodata.xlsx', sheetname=key) x['I1'] = trace(x, 'SIG') x['RTJ2'] = RTJ2(x) df2[key] = x p.circle(-df2[key]['I1'], df2[key]['RTJ2'], legend=key[4:], color=colors[i]) # determine where hydrostatic preload ends j = nonzero(x['SIG11'] - x['SIG22'])[0] E0, S0 = df2[key]['STRAIN11'][j[0]], df2[key]['SIG11'][j[0]] p1.circle(-df2[key]['STRAIN11'][j]+E0, -df2[key]['SIG11'][j]+S0, legend=key[4:], color=colors[i]) p.legend.orientation = 'horizontal' show(p1) show(p)
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
The axial stress versus axial strain plot shows that the response is linear, meaning that the elastic modulus is constant.
key = 'txc p=2.5MPa' j = nonzero(df2[key]['SIG11'] - df2[key]['SIG22'])[0] df3 = df2[key].iloc[j].copy() E0, S0 = df3['STRAIN11'].iloc[0], df3['SIG11'].iloc[0] EF, SF = df3['STRAIN11'].iloc[-1], df3['SIG11'].iloc[-1] E = (SF - S0) / (EF - E0) print('{0:E}'.format(E))
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
The shear modulus can now be determined
G = lambda I1: 3 * kfun(B0, B1, B2, I1) * E / (9 * kfun(B0, B1, B2, I1) - E) gfun = lambda g0, g1, g2, rtj2: g0 * (1 - g1 * exp(-g2 * rtj2)) / (1 - g1) def kmm_shear(x, fac, rtj2, G): g0, g1, g2 = asarray(x) * fac return G - gfun(g0, g1, g2, rtj2) g = asarray(G(df3['I1'])) g0 = (g[0], .0001, 0) fac = 1. g, icov = leastsq(kmm_shear, g0, args=(fac, RTJ2(df3), g)) G0, G1, G2 = g * fac G0, G1, G2 p2 = figure(x_axis_label='Sqrt[J2]', y_axis_label='Shear Modulus') p2.circle(RTJ2(df3), G(df3['I1'])) p2.line(RTJ2(df3), gfun(G0, G1, G2, RTJ2(df3)), color='red') show(p2)
notebooks/PoroplasticFitting.ipynb
matmodlab/matmodlab2
bsd-3-clause
Data import and cleanup Next we import the radon data. For cleanup, we strip whitespace from column headers, restrict data to Minnesota (MN) and add a unique numerical identifier for each county.
# Import radon data srrs2 = pd.read_csv('data/srrs2.dat') srrs2.columns = srrs2.columns.map(str.strip) # Make a combined state and county ID, by household srrs_mn = srrs2.assign(fips=srrs2.stfips * 1000 + srrs2.cntyfips)[srrs2.state == 'MN'] # Check data srrs_mn.head()
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We import uranium data for each county, creating a unique identifier for each county to match that in srrs.
# Obtain the uranium level as a county-level predictor cty = pd.read_csv('data/cty.dat') cty_mn = cty[cty.st == 'MN'].copy() # MN only data # Make a combined state and county id, by county cty_mn['fips'] = 1000 * cty_mn.stfips + cty_mn.ctfips # Check data cty_mn.head()
stan_model_radon.ipynb
widdowquinn/notebooks
mit
It is convenient to bring all the data into a single dataframe with radon and uranium data byhousehold, so we merge on the basis of the unique county identifier, to assign uranium data across all households in a county.
# Combine data into a single dataframe srrs_mn = srrs_mn.merge(cty_mn[['fips', 'Uppm']], on='fips') # Get uranium level by household (on county basis) srrs_mn = srrs_mn.drop_duplicates(subset='idnum') # Lose duplicate houses u = np.log(srrs_mn.Uppm) # log-transform uranium level n = len(srrs_mn) # number of households # Check data srrs_mn.head() srrs_mn.columns
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We create a dictionary associating each county with a unique index code, for use in Stan.
# Index counties with a lookup dictionary srrs_mn.county = srrs_mn.county.str.strip() mn_counties = srrs_mn.county.unique() counties = len(mn_counties) county_lookup = dict(zip(mn_counties, range(len(mn_counties))))
stan_model_radon.ipynb
widdowquinn/notebooks
mit
For construction of the Stan model, it is convenient to have the relevant variables as local copies - this aids readability. index code for each county radon activity log radon activity which floor measurement was taken
# Make local copies of variables county = srrs_mn['county_code'] = srrs_mn.county.replace(county_lookup).values radon = srrs_mn.activity srrs_mn['log_radon'] = log_radon = np.log(radon + 0.1).values floor_measure = srrs_mn.floor.values
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Modelling distribution of radon in MN Visual inspection of the variation in (log) observed radon levels shows a broad range of values. We aim to determine the contributions of the prevailing radon level and the floor at which radon level is measured, to produce this distribution of observed values.
srrs_mn.activity.apply(lambda x: np.log(x + 0.1)).hist(bins=25);
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Conventional approaches Two conventional alternatives to modelling, pooling and not pooling represent two extremes of a tradeoff between variance and bias. The bias-variance tradeoff Where the variable we are trying to predict is $Y$, as a function of covariates $X$, we assume a relationship $Y = f(X) + \epsilon$ where the error term $\epsilon$ is distributed normally with mean zero: $\epsilon \sim N(0, \sigma_{\epsilon})$. We estimate a model $\hat{f}(X)$ of $f(X)$ using some technique. This gives us squared prediction error: $\textrm{Err}(x) = E[(Y βˆ’ \hat{f}(x))^2]$. That squared error can be decomposed into: $$\textrm{Err}(x)=(E[\hat{f} (x)] βˆ’ f(x))^2 + E[(\hat{f}(x) βˆ’ E[\hat{f}(x)])^2] + \sigma^2_e$$ where $E[\hat{f} (x)] βˆ’ f(x))^2$ is the square of the difference between the model $\hat{f}(x)$ and the 'true' relationship $f(x)$, i.e. the square of the bias $E[(\hat{f}(x) βˆ’ E[\hat{f}(x)])^2]$ is the square of the difference between the mean behaviour of the model and the observed behaviour of this model, i.e. the square of the variance $\sigma^2_e$ is the noise of the 'true' relationship that cannot be captured in any model, i.e. the irreducible error With a known true model, and an infinite amount of data, it is in principle possible to reduce both bias and variance to zero. In reality, both sources of error exist, and we choose to minimise bias and/or variance. The trade-off in the radon model Taking $y = \log(\textrm{radon})$, floor measurements (basement or ground) as $x$, where $i$ indicates the house, and $j[i]$ is the county to which a house 'belongs'. Then $\alpha$ is the radon level across all counties, and $\alpha_{j[i]}$ is the radon level in a single county; $\beta$ is the influence of the choice of floor at which measurement is made; and $\epsilon$ is some other error (measurement error, temporal variation in a house, or variation among houses). We take two approaches: Complete pooling - treat all counties the same, and estimate a single radon level: $y_i = \alpha + \beta x_i + \epsilon_i$ No pooling - treat each county independently: $y_i = \alpha_{j[i]} + \beta x_i + \epsilon_i$ When we do not pool, we will likely obtain quite different parameter estimates $\alpha_{j[i]}$ for each county - especially when there are few observations in a county. As new data is gathered, these estimates are likely to change radically. This is therefore a model with high variance. Alternatively, by pooling all counties, we will obtain a single estimate for $\alpha$, but this value may deviate quite far from the true situation in some or all counties. This is therefore a model with high bias. So, if we treat all counties as the same, we have a biased estimate, but if we treat them as individuals, we have high variance - the bias-variance tradeoff. It may be the case that neither extreme produces a good model for the real behaviour: models that minimise bias to produce a high variance error are overfit; those that minimise variance to produce a strong bias error are underfit. Specifying the pooled model in Stan To build a model in Stan, we need to define data, parameters, and the model itself. This is done by creating strings in the Stan language, rather than having an API that provides a constructor for the model. We construct the data block to comprise the number of samples (N, int), with vectors of log-radon measurements (y, a vector of length N) and the floor measurement covariates (x, vector, length N).
# Construct the data block. pooled_data = """ data { int<lower=0> N; vector[N] x; vector[N] y; } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Next we initialise parameters, which here are linear model coefficients (beta, a vector of length 2) that represent both $\alpha$ and $\beta$ in the pooled model definition, as beta[1] and beta[2] are assumed to lie on a Normal distribution, and the Normal distribution scale parameter sigma defining errors in the model's prediction of the output (y, defined later), which is constrained to be positive.
# Initialise parameters pooled_parameters = """ parameters { vector[2] beta; real<lower=0> sigma; } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Finally we specify the model, with log(radon) measurements as a normal sample, having a mean that is a function of the choice of floor at which the measurement was made, $y \sim N(\beta[1] + \beta[2]x, \sigma_e)$
pooled_model = """ model { y ~ normal(beta[1] + beta[2] * x, sigma); } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Running the pooled model in Stan We need to map Python variables to those in the stan model, and pass the data, parameters and model strings above to stan. We also need to specify how many iterations of sampling we want, and how many parallel chains to sample (here, 1000 iterations of 2 chains). This is where explicitly-named local variables are convenient for definition of Stan models. Calling pystan.stan doesn't just define the model, ready to fit - it runs the fitting immediately.
pooled_data_dict = {'N': len(log_radon), 'x': floor_measure, 'y': log_radon} pooled_fit = pystan.stan(model_code=pooled_data + pooled_parameters + pooled_model, data=pooled_data_dict, iter=1000, chains=2)
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Once the fit has been run, the sample can be extracted for visualisation and summarisation. Specifying permuted=True means that all fitting chains are merged and warmup samples are discarded and that a dictionary is returned, with samples for each parameter:
# Collect the sample pooled_sample = pooled_fit.extract(permuted=True)
stan_model_radon.ipynb
widdowquinn/notebooks
mit
The output is an OrderedDict with two keys of interest to us: beta and sigma. sigma describes the estimated error term, and beta describes the estimated values of $\alpha$ and $\beta$ for each iteration:
# Inspect the sample pooled_sample['beta']
stan_model_radon.ipynb
widdowquinn/notebooks
mit
While it can be very interesting to see the results for individual iterations (and how they vary), for now we are interested in the mean values of these estimates:
# Get mean values for parameters, from the sample # b0 = common radon value across counties (alpha) # m0 = variation in radon level with change in floor (beta) b0, m0 = pooled_sample['beta'].T.mean(1) # What are the fitted parameters print("alpha: {0}, beta: {1}".format(b0, m0))
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We can visualise how well this pooled model fits the observed data:
# Plot the fitted model (red line) against observed values (blue points) plt.scatter(srrs_mn.floor, np.log(srrs_mn.activity + 0.1)) xvals = np.linspace(-0.1, 1.2) plt.plot(xvals, m0 * xvals + b0, 'r--') plt.title("Fitted model") plt.xlabel("Floor") plt.ylabel("log(radon)");
stan_model_radon.ipynb
widdowquinn/notebooks
mit
The answer is: not terribly badly (the fitted line runs convincingly through the centre of the data, and plausibly describes the trend), but not terribly well, either. The observed points vary widely about the fitted model, implying that the prevailing radon level varies quite widely, and we might expect different gradients if we chose different subsets of the data. The main error in this model fit is due to bias, because the pooling approach is an an inaccurate representation of the underlying radon level, taken across all measurements. Specifying the unpooled model in Stan For the unpooled model, we have the parameter $\alpha_{j[i]}$, representing a list of (independent) mean values, one for each county. Otherwise the model is the same as for the pooled example, with shared parameters for the effect of which floor is being measured, and the standard deviation of the error. We construct the data, parameters and model blocks in a similar way to before. We define the number of samples (N, int), and two vectors of log-radon measurements (y, length N) and floor measurement covariates (x, length N). The main difference to before is that we define a list of counties (these are the indices 1..85 defined above, rather than county names), one for each sample:
unpooled_data = """ data { int<lower=0> N; int<lower=1, upper=85> county[N]; vector[N] x; vector[N] y; } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We define three parameters: $\alpha_{j[i]}$ - one radon level per county (a - as a vector of length 85, one value per county); change in radon level by floor, $\beta$ (beta, a real value), and the Normal distribution scale parameter sigma, as before:
unpooled_parameters = """ parameters { vector[85] a; real beta; real<lower=0, upper=100> sigma; } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We also define transformed parameters, for convenience. This defines a new variable $\hat{y}$ (y_hat, a vector with one value per sample) which is our estimate/prediction of log(radon) value per household. This could equally well be done in the model block - we don't need to generate a transformed parameter, but for more complex models this is a useful technique to improve readability and maintainability.
unpooled_transformed_parameters = """ transformed parameters { vector[N] y_hat; for (i in 1:N) y_hat[i] <- beta * x[i] + a[county[i]]; } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Using this transformed parameter, the model form is now $y \sim N(\hat{y}, \sigma_e)$, making explicit that we are fitting parameters that result in the model predicting a household radon measurement, and we are estimating the error of this prediction against the observed values:
unpooled_model = """ model { y ~ normal(y_hat, sigma); } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Running the unpooled model in Stan We again map Python variables to those used in the stan model, then pass the data, parameters (transformed and untransformed) and the model to stan. We again specify 1000 iterations of 2 chains. Note that we have to offset our Python indices for counties by 1, as Python counts from zero, but Stan counts from 1.
# Map data unpooled_data_dict = {'N': len(log_radon), 'county': county + 1, # Stan counts start from 1 'x': floor_measure, 'y': log_radon} # Fit model unpooled_fit = pystan.stan(model_code=unpooled_data + unpooled_parameters + unpooled_transformed_parameters + unpooled_model, data=unpooled_data_dict, iter=1000, chains=2)
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We can extract the sample from the fit for visualisation and summarisation. This time we do not use the permuted=True option. This returns a StanFit4Model object, from which we can extract the fitted estimates for a parameter using indexing, like a dictionary, e.g. unpooled_fit['beta'], and this will return a numpy ndarray of values. For $\alpha$ (a) we get a 1000x85 array, for $\beta$ (beta) we get a 1000x1 array. Mean and standard deviation (and other summary statistics) can be calculated from these. When extracting vectors of $\alpha_{j[i]}$ (radon levels per county) and the associated standard errors, we use a pd.Series object, for compatibility with pandas. This allows us to specify an index, which is the list of county names in mn_counties.
# Extract fit of radon by county unpooled_estimates = pd.Series(unpooled_fit['a'].mean(0), index=mn_counties) unpooled_se = pd.Series(unpooled_fit['a'].std(0), index=mn_counties) # Inspect estimates unpooled_estimates.head()
stan_model_radon.ipynb
widdowquinn/notebooks
mit
To inspect the variation in predicted radon levels at county resolution, we can plot the mean of each estimate with its associated standard error. To structure this visually, we'll reorder the counties such that we plot counties from lowest to highest.
# Get row order of estimates as an index: low to high radon order = unpooled_estimates.sort_values().index # Plot mean radon estimates with stderr, following low to high radon order plt.scatter(range(len(unpooled_estimates)), unpooled_estimates[order]) for i, m, se in zip(range(len(unpooled_estimates)), unpooled_estimates[order], unpooled_se[order]): plt.plot([i,i], [m - se, m + se], 'b-') plt.xlim(-1, 86) plt.ylim(-1, 4) plt.xlabel('Ordered county') plt.ylabel('Radon estimate');
stan_model_radon.ipynb
widdowquinn/notebooks
mit
From this visual inspection, we can see that there is one county with a relatively low predicted radon level, and about five with relatively high levels. This reinforces our suggestion that a pooled estimate is likely to exhibit significant bias. Plot comparison of pooled and unpooled estimates We can make direct visual comparisons between pooled and unpooled estimates for all counties, but here we do so for a specific subset:
# Define subset of counties sample_counties = ('LAC QUI PARLE', 'AITKIN', 'KOOCHICHING', 'DOUGLAS', 'CLAY', 'STEARNS', 'RAMSEY', 'ST LOUIS') # Make plot fig, axes = plt.subplots(2, 4, figsize=(12, 6), sharex=True, sharey=True) axes = axes.ravel() # turn axes into a flattened array m = unpooled_fit['beta'].mean(0) for i, c in enumerate(sample_counties): # Get unpooled estimates and set common x values b = unpooled_estimates[c] xvals = np.linspace(-0.2, 1.2) # Plot household data x = srrs_mn.floor[srrs_mn.county == c] y = srrs_mn.log_radon[srrs_mn.county == c] axes[i].scatter(x + np.random.randn(len(x)) * 0.01, y, alpha=0.4) # Plot models axes[i].plot(xvals, m * xvals + b) # unpooled axes[i].plot(xvals, m0 * xvals + b0, 'r--') # pooled # Add labels and ticks axes[i].set_xticks([0, 1]) axes[i].set_xticklabels(['basement', 'floor']) axes[i].set_ylim(-1, 3) axes[i].set_title(c) if not i % 2: axes[i].set_ylabel('log radon level')
stan_model_radon.ipynb
widdowquinn/notebooks
mit
By visual inspection, we can see that using unpooled county estimates for prevailing radon level has resulted in models that deviate from the pooled estimates, correcting for its bias. However, we can also see that for counties with few observations, the fitted estimates track the observations very closely, suggesting that there has been overfitting. The attempt to minimise error due to bias has resulted in the introduction of greater error due to variance in the dataset. Conclusion Neither model does perfectly: For identification of counties with a predicted prevailing high radon level, pooling is useless (because all counties are modelled with the same level) However, we ought not to trust any unpooled estimates that were produced using few observations on a county Ideally, we would have an intermediate form of model that optimally minimises the errors due to both bias and variance. Pooling and Multilevel/Hierarchical Models pooled model When we pool data, we imply that they are sampled from the same model. This ignores all variation (other than sampling variation) among the units being sampled. That is to say, observations $y_1, y_2, \ldots, y_k$ share common parameter(s) $\theta$: unpooled model If we analyse our data with an unpooled model, we separate our data out into groups (which may be as extreme as one group per sample), which implies that the groups are sampled independently from separate models because the differences between sampling units are too great for them to be reasonably combined. That is to say, observations (or grouped observations) $y_1, y_2, \ldots, y_k$ have independent parameters $\theta_1, \theta_2, \ldots, \theta_k$. partial pooling/hierarchical modelling In a hierarchical, or partial pooling model, model parameters are instead viewed as a sample from a population distribution of parameters, so the unpooled model parameters $\theta_1, \theta_2, \ldots, \theta_k$ can be sampled from a single distribution $N(\mu, \sigma^2)$. One of the great advantages of Bayesian modelling (as opposed to linear regression modelling) is the relative ease with which one can specify multilevel models and fit them using Hamiltonian Monte Carlo. Partial Pooling A simple model The simplest possible partial pooling model for the radon dataset is one that estimates radon levels, with no other predictors (i.e. ignoring the effect of floor). This is a compromise between pooled (mean of all counties) and unpooled (county-level means), and approximates a weighted average (by sample size) of unpooled county means, and the pooled mean: $$\hat{\alpha} \approx \frac{(n_j/\sigma_y^2)\bar{y}j + (1/\sigma{\alpha}^2)\bar{y}}{(n_j/\sigma_y^2) + (1/\sigma_{\alpha}^2)}$$ $\hat{\alpha}$ - partially-pooled estimate of radon level $n_j$ - number of samples in county $j$ $\bar{y}_j$ - estimated mean for county $j$ $\sigma_y^2$ - s.e. of $\bar{y}_j$, variability of the county mean $\bar{y}$ - pooled mean estimate for $\alpha$ $\sigma_{\alpha}^2$ - s.e. of $\bar{y}$ Specifying the model We can define this in stan, specifying data, parameters, transformed parameters and model blocks. The model is built up as follows. Our observed log(radon) measurements ($y$ approximate an intermediate transformed parameter $\hat{y}$, which is normally distributed with variance $\sigma_y^2$: $$y \sim N(\hat{y}, \sigma_y^2)$$ The transformed variable $\hat{y}$ is the value of $\alpha$ associated with the county $i$ ($i=1,\ldots,N$) in which each household is found. $$\hat{y} = {\alpha_1, \ldots, \alpha_N}$$ The value of $\alpha$ for each county $i$, is Normally distributed with mean $10\mu_{\alpha}$ and variance $\sigma_{\alpha}^2$. That is, there is a common mean and variance underlying each of the prevailing radon levels in each county. $$\alpha_i \sim N(10\mu_{\alpha}, \sigma_{\alpha}^2), i = 1,\ldots,N$$ The value $\mu_{\alpha}$ is Normally distributed around 0, with unit variance: $$\mu_{\alpha} \sim N(0, 1)$$ In data: * N will be the number of samples (int) * county will be a list of N values from 1-85, specifying the county index each measurement * y will be a vector of log(radon) measurements, one per household/sample. We define parameters: a (vector, one value per county), representing $\alpha$, the vector of prevailing radon levels for each county. mu_a, a real corresponding to $\mu_{alpha}$, the mean radon level underlying the distribution from which the county levels are drawn. sigma_a is $\sigma_{\alpha}$, the standard deviation of the radon level distribution underlying the county levels: variability of county means about the average. sigma_y is $\sigma_y$, the standard deviation of the measurement/sampling error: residual error of the observations.
partial_pooling = """ data { int<lower=0> N; int<lower=1,upper=85> county[N]; vector[N] y; } parameters { vector[85] a; real mu_a; real<lower=0,upper=100> sigma_a; real<lower=0,upper=100> sigma_y; } transformed parameters { vector[N] y_hat; for(i in 1:N) y_hat[i] <- a[county[i]]; } model { mu_a ~ normal(0, 1); a ~ normal(10 * mu_a, sigma_a); y ~ normal(y_hat, sigma_y); } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We map Python variables onto the model data (remembering to offset counts/indices by 1, as Stan counts from 1, not from 0):
partial_pool_data = {'N': len(log_radon), 'county': county + 1, 'y': log_radon}
stan_model_radon.ipynb
widdowquinn/notebooks
mit
Finally, we fit the model, to estimate $\mu_{\alpha}$, and $\alpha_i, i=1,\ldots,N$:
partial_pool_fit = pystan.stan(model_code=partial_pooling, data=partial_pool_data, iter=1000, chains=2)
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We're interested primarily in the county-level estimates of prevailing radon levels, so we obtain the sample estimates for a:
sample_trace = partial_pool_fit['a'] means = sample_trace.mean(axis=0) # county-level estimates sd = sample_trace.std(axis=0) samples, counties = sample_trace.shape n_county = srrs_mn.groupby('county')['idnum'].count() # number of samples from each county
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We're going to compare the results from our partially-pooled model to the unpooled model above.
# Obtain unpooled estimates unpooled = pd.DataFrame({'n': n_county, 'm': unpooled_estimates, 'sd': unpooled_se}) unpooled['se'] = unpooled.sd/np.sqrt(unpooled.n) # Construct axes for results fig, axes = plt.subplots(1, 2, figsize=(14,6), sharex=True, sharey=True) jitter = np.random.normal(scale=0.1, size=counties) # avoid overplotting counties # Plot unpooled estimates axes[0].plot(unpooled.n + jitter, unpooled.m, 'b.') #Β means for j, row in zip(jitter, unpooled.iterrows()): name, dat = row axes[0].plot([dat.n + j, dat.n + j], [dat.m - dat.se, dat.m + dat.se], 'b-') # Plot partially-pooled estimates axes[1].scatter(n_county.values + jitter, means) for j, n, m, s in zip(jitter, n_county.values, means, sd): axes[1].plot([n + j, n + j], [m - s, m + s], 'b-') # Add line for underlying mean for ax in axes: ax.hlines(sample_trace.mean(), 0.9, 100, linestyles='--') # underlying mean from partial model # Set axis limits/scale (shared x/y - need only to set one axis) axes[0].set_xscale('log') axes[0].set_xlim(1, 100) axes[0].set_ylim(-0.5, 3.5) # Set axis titles axes[0].set_title("Unpooled model estimates") axes[1].set_title("Partially pooled model estimates");
stan_model_radon.ipynb
widdowquinn/notebooks
mit
By inspection, there is quite a difference between unpooled and partially-pooled estimates of prevailing county-level radon level, especially as smaller sample sizes. The unpooled estimates at smaller sample sizes are both more extreme, and more imprecise. Partial pooling: varying intercept We can extend this partial pooling to a linear model of the relationship between measured log(radon), the prevailing county radon level, and the floor at which the measurement was made. In the linear model, the measured radon level in a household $y_i$ is a function of the floor at which measurement took place, $x_i$, with parameters $\alpha_{j[i]}$ (the prevailing radon level in the county) and $\beta$ (the influence of the floor), and residual error $\epsilon_i$. $$y_i = \alpha_{j[i]} + \beta x_i + \epsilon_i$$ In this linear model, the prevailing radon level $\alpha_j[i]$ is the intercept, with random Normal effect: $$\alpha_{j[i]} \sim N(\mu_{\alpha}, \sigma_{\alpha}^2$$ The residual error is also sampled from a Normal distribution: $$\epsilon_i \sim N(0, \sigma_y^2$$ This approach is similar to a least squares regression, but the multilevel modelling approach allows parameter distributions - information to be shared across groups, which can lead to more reasonable estimates of parameters with relatively little data. In this example, using a common distribution for prevailing county-level radon spreads the information about likely radon levels such that our estimates for counties with few observations should be less extreme. Specifying the model We define the model in stan, as usual specifying data, parameters, transformed parameters and model blocks. The model is built up as follows. Our observed log(radon) measurements ($y$ approximate an intermediate transformed parameter $\hat{y}$, which is normally distributed with variance $\sigma_y^2$. $\sigma_y$ is sampled from a Uniform distribution. $$y \sim N(\hat{y}, \sigma_y^2)$$ $$\sigma_{y} \sim U(0, 100)$$ The transformed variable $\hat{y}$ is a linear function of $x_i$, the floor at which radon is measured. The parameters are the value of $\alpha$ associated with the county $i$ ($i=1,\ldots,N$) in which each household is found, and the effect due to which floor is used for measurement. $$\hat{y_i} = {\alpha_{j[i]} + \beta x_i}$$ The value of $\alpha$ for each county $i$, is Normally distributed with mean $\mu_{\alpha}$ and variance $\sigma_{\alpha}^2$. $\sigma_{\alpha}$ is sampled from a Uniform distribution, between 0 and 100. $\mu_{\alpha}$ is an unconstrained real value. There is a common mean and variance underlying each of the prevailing radon levels in each county. $$\alpha_i \sim N(\mu_{\alpha}, \sigma_{\alpha}^2)$$ $$\sigma_{\alpha} \sim U(0, 100)$$ The value of $\beta$ is assumed to be Normally distributed about zero, with unit variance: $$\beta \sim N(0, 1)$$ In data: * J is the number of counties (int) * N is the number of samples (int) * county is a list of N values from 1-85, specifying the county index each measurement * x is a vector of indices for which floor the radon measurements were taken at each household * y is a vector of log(radon) measurements, one per household/sample. We define parameters: a (vector, one value per county), representing $\alpha$, the vector of prevailing radon levels for each county. b (real) representing $\beta$, the effect of floor choice mu_a, a real corresponding to $\mu_{alpha}$, the mean radon level underlying the distribution from which the county levels are drawn. sigma_a is $\sigma_{\alpha}$, the standard deviation of the radon level distribution underlying the county levels: variability of county means about the average. sigma_y is $\sigma_y$, the standard deviation of the measurement/sampling error: residual error of the observations.
varying_intercept = """ data { int<lower=0> J; int<lower=0> N; int<lower=1,upper=J> county[N]; vector[N] x; vector[N] y; } parameters { vector[J] a; real b; real mu_a; real<lower=0,upper=100> sigma_a; real<lower=0,upper=100> sigma_y; } transformed parameters { vector[N] y_hat; for (i in 1:N) y_hat[i] <- a[county[i]] + x[i] * b; } model { sigma_a ~ uniform(0, 100); a ~ normal(mu_a, sigma_a); b ~ normal(0,1); sigma_y ~ uniform(0, 100); y ~ normal(y_hat, sigma_y); } """
stan_model_radon.ipynb
widdowquinn/notebooks
mit
As usual, we map Python variables to those in the model, and run the fit:
varying_intercept_data = {'N': len(log_radon), 'J': len(n_county), 'county': county + 1, 'x': floor_measure, 'y': log_radon} varying_intercept_fit = pystan.stan(model_code=varying_intercept, data=varying_intercept_data, iter=1000, chains=2)
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We can then collect the county-level estimates of prevailing radon, the intercept of the model, $\alpha_{j[i]}$, from a (1000 iterations x 85 counties):
a_sample = pd.DataFrame(varying_intercept_fit['a'])
stan_model_radon.ipynb
widdowquinn/notebooks
mit
We can visualise the distribution of these estimates, by county, with a boxplot:
plt.figure(figsize=(16, 6)) g = sns.boxplot(data=a_sample, whis=np.inf, color="c") g.set_xticklabels(mn_counties, rotation=90) # label counties g; # 2x2 plot of parameter estimate data fig, axes = plt.subplots(2, 2, figsize=(10, 6)) # density plot of sigma_a estimate sns.kdeplot(varying_intercept_fit['sigma_a'], ax=axes[0][0]) axes[0][0].set_xlim(varying_intercept_fit['sigma_a'].min(), varying_intercept_fit['sigma_a'].max()) # scatterplot of sigma_a estimate axes[0][1].plot(varying_intercept_fit['sigma_a'], 'o', alpha=0.3) # density plot of beta estimate sns.kdeplot(varying_intercept_fit['b'], ax=axes[1][0]) axes[1][0].set_xlim(varying_intercept_fit['b'].min(), varying_intercept_fit['b'].max()) # scatterplot of beta estimate axes[1][1].plot(varying_intercept_fit['b'], 'o', alpha=0.3) # titles/labels axes[0][0].set_title("sigma_a") axes[1][0].set_title("b") axes[0][0].set_ylabel("frequency") axes[1][0].set_ylabel("frequency") axes[0][0].set_xlabel("value") axes[1][0].set_xlabel("value"); axes[0][1].set_ylabel("sigma_a") axes[1][1].set_ylabel("b") axes[0][1].set_xlabel("iteration") axes[1][1].set_xlabel("iteration"); varying_intercept_fit['sigma_a'].min(), varying_intercept_fit['sigma_a'].max() pystan.__version__
stan_model_radon.ipynb
widdowquinn/notebooks
mit
For purposes of this example, we will load some data into the server to work with. You may already have tables in your server that you can use.
tbl = conn.read_csv('https://raw.githubusercontent.com/sassoftware/sas-viya-programming/master/data/cars.csv') tbl tbl.head()
communities/Exporting Data from CAS using Python.ipynb
sassoftware/sas-viya-programming
apache-2.0
Now that we have a CASTable object to work with, we can export the data from the CAS table that it references to a local file. We'll start with CSV. The to_csv method will return a string of CSV data if you don't specify a filename. We'll do it that way in the following code.
print(tbl.to_csv()) print(tbl.to_html()) print(tbl.to_latex())
communities/Exporting Data from CAS using Python.ipynb
sassoftware/sas-viya-programming
apache-2.0
There are many other to_XXX methods on the CASTable object, each of which corresponds to the same to_XXX method on Pandas DataFrames. The CASTable methods take the same arguments as the DataFrame counterparts, so you can read the Pandas documentation for more information.
conn.close()
communities/Exporting Data from CAS using Python.ipynb
sassoftware/sas-viya-programming
apache-2.0
MΓ©todos
# Criando uma lista lst = [100, -2, 12, 65, 0] # Usando um mΓ©todo do objeto lista lst.append(10) # Imprimindo a lista lst # Usando um mΓ©todo do objeto lista lst.count(10) # A funΓ§Γ£o help() explica como utilizar cada mΓ©todo de um objeto help(lst.count) # A funΓ§Γ£o dir() mostra todos os mΓ©todos e atributos de um objeto dir(lst) a = 'Isso Γ© uma string' # O mΓ©todo de um objeto pode ser chamado dentro de uma funΓ§Γ£o, como print() print (a.split())
Cap03/Notebooks/DSA-Python-Cap03-05-Metodos.ipynb
dsacademybr/PythonFundamentos
gpl-3.0
Solution There are two ways to solve this problem. One is to relate the desired distribution to the binomial distribution. If the probability of success on every trial is p, the probability of getting the kth success on the nth trial is PMF(n; k, p) = BinomialPMF(k-1; n-1, p) p That is, the probability of getting k-1 successes in n-1 trials, times the probability of getting the kth success on the nth trial. Here's a function that computes it:
def MakePmfUsingBinom(k, p, high=100): pmf = Pmf() for n in range(1, high): pmf[n] = stats.binom.pmf(k-1, n-1, p) * p return pmf
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
And here's an example using the parameters in the question.
pmf = MakePmfUsingBinom(5, 0.1, 200) thinkplot.Pdf(pmf)
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
We can solve the same problem using the negative binomial distribution, but it requires some translation from the parameters of the problem to the conventional parameters of the binomial distribution. The negative binomial PMF is the probability of getting r non-terminal events before the kth terminal event. (I am using "terminal event" instead of "success" and "non-terminal" event instead of "failure" because in the context of the negative binomial distribution, the use of "success" and "failure" is often reversed.) If n is the total number of events, n = k + r, so r = n - k If the probability of a terminal event on every trial is p, the probability of getting the kth terminal event on the nth trial is PMF(n; k, p) = NegativeBinomialPMF(n-k; k, p) p That is, the probability of n-k non-terminal events on the way to getting the kth terminal event. Here's a function that computes it:
def MakePmfUsingNbinom(k, p, high=100): pmf = Pmf() for n in range(1, high): r = n-k pmf[n] = stats.nbinom.pmf(r, k, p) return pmf
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
Here's the same example:
pmf2 = MakePmfUsingNbinom(5, 0.1, 200) thinkplot.Pdf(pmf2)
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
And confirmation that the results are the same within floating point error.
diffs = [abs(pmf[n] - pmf2[n]) for n in pmf] max(diffs)
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
Using the PMF, we can compute the mean and standard deviation:
pmf.Mean(), pmf.Std()
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
To compute percentiles, we can convert to a CDF (which computes the cumulative sum of the PMF)
cdf = Cdf(pmf) scale = thinkplot.Cdf(cdf)
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
And here are the 10th and 90th percentiles.
cdf.Percentile(10), cdf.Percentile(90)
negative_binomial.ipynb
AllenDowney/ProbablyOverthinkingIt
mit
Constructing plugins from TimeSeries Many times we encounter event lists or sets of spectral histograms from which we would like to derive a single or set of plugins. For this purpose, we provide the TimeSeriesBuilder which provides a unified interface to time series data. Here we will demonstrate how to construct plugins from different data types. Constructing time series objects from different data types The TimeSeriesBuilder currently supports reading of the following data type: * A generic PHAII data file * GBM TTE/CSPEC/CTIME files * LAT LLE files If you would like to build a time series from your own custom data, consider creating a TimeSeriesBuilder.from_your_data() class method. GBM Data Building plugins from GBM is achieved in the following fashion
cspec_file = get_path_of_data_file('datasets/glg_cspec_n3_bn080916009_v01.pha') tte_file = get_path_of_data_file('datasets/glg_tte_n3_bn080916009_v01.fit.gz') gbm_rsp = get_path_of_data_file('datasets/glg_cspec_n3_bn080916009_v00.rsp2') gbm_cspec = TimeSeriesBuilder.from_gbm_cspec_or_ctime('nai3_cspec', cspec_or_ctime_file=cspec_file, rsp_file=gbm_rsp) gbm_tte = TimeSeriesBuilder.from_gbm_tte('nai3_tte', tte_file=tte_file, rsp_file=gbm_rsp)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
LAT LLE data LAT LLE data is constructed in a similar fashion
lle_file = get_path_of_data_file('datasets/gll_lle_bn080916009_v10.fit') ft2_file = get_path_of_data_file('datasets/gll_pt_bn080916009_v10.fit') lle_rsp = get_path_of_data_file('datasets/gll_cspec_bn080916009_v10.rsp') lat_lle = TimeSeriesBuilder.from_lat_lle('lat_lle', lle_file=lle_file, ft2_file=ft2_file, rsp_file=lle_rsp)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Viewing Lightcurves and selecting source intervals All time series objects share the same commands to get you to a plugin. Let's have a look at the GBM TTE lightcurve.
threeML_config['lightcurve']['lightcurve color'] = '#07AE44' fig = gbm_tte.view_lightcurve(start=-20,stop=200)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Perhaps we want to fit the time interval from 0-10 seconds. We make a selection like this:
threeML_config['lightcurve']['selection color'] = '#4C3CB7' gbm_tte.set_active_time_interval('0-10') fig = gbm_tte.view_lightcurve(start=-20,stop=200);
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
For event list style data like time tagged events, the selection is exact. However, pre-binned data in the form of e.g. PHAII files will have the selection automatically adjusted to the underlying temporal bins. Several discontinuous time selections can be made. Fitting a polynomial background In order to get to a plugin, we need to model and create an estimated background in each channel ($B_i$) for our interval of interest. The process that we have implemented is to fit temporal off-source regions to polynomials ($P(t;\vec{\theta})$) in time. First, a polynomial is fit to the total count rate. From this fit we determine the best polynomial order via a likelihood ratio test, unless the user supplies a polynomial order in the constructor or directly via the polynomial_order attribute. Then, this order of polynomial is fit to every channel in the data. From the polynomial fit, the polynomial is integrated in time over the active source interval to estimate the count rate in each channel. The estimated background and background errors then stored for each channel. $$ B_i = \int_{T_1}^{T_2}P(t;\vec{\theta}) {\rm d}t $$
threeML_config['lightcurve']['background color'] = '#FC2530' gbm_tte.set_background_interval('-24--5','100-200') gbm_tte.view_lightcurve(start=-20,stop=200);
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
For event list data, binned or unbinned background fits are possible. For pre-binned data, only a binned fit is possible.
gbm_tte.set_background_interval('-24--5','100-200',unbinned=False)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Saving the background fit The background polynomial coefficients can be saved to disk for faster manipulation of time series data.
gbm_tte.save_background('background_store',overwrite=True) gbm_tte_reloaded = TimeSeriesBuilder.from_gbm_tte('nai3_tte', tte_file=tte_file, rsp_file=gbm_rsp, restore_background='background_store.h5') fig = gbm_tte_reloaded.view_lightcurve(-10,200)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Creating a plugin With our background selections made, we can now create a plugin instance. In the case of GBM data, this results in a DispersionSpectrumLike plugin. Please refer to the Plugins documentation for more details.
gbm_plugin = gbm_tte.to_spectrumlike() gbm_plugin.display()
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Time-resolved binning and plugin creation It is possible to temporally bin time series. There are up to four methods provided depending on the type of time series being used: Constant cadence (all time series) Custom (all time series) Significance (all time series) Bayesian Blocks (event lists) Constant Cadence Constant cadence bins are defined by a start and a stop time along with a time delta.
gbm_tte.create_time_bins(start=0, stop=10, method='constant', dt=2.) gbm_tte.bins.display()
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Custom Custom time bins can be created by providing a contiguous list of start and stop times.
time_edges = np.array([.5,.63,20.,21.]) starts = time_edges[:-1] stops = time_edges[1:] gbm_tte.create_time_bins(start=starts, stop=stops, method='custom') gbm_tte.bins.display()
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Significance Time bins can be created by specifying a significance of signal to background if a background fit has been performed.
gbm_tte.create_time_bins(start=0., stop=50., method='significance', sigma=25) gbm_tte.bins.display()
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Bayesian Blocks The Bayesian Blocks algorithm (Scargle et al. 2013) can be used to bin event list by looking for significant changes in the rate.
gbm_tte.create_time_bins(start=0., stop=50., method='bayesblocks', p0=.01, use_background=True) gbm_tte.bins.display()
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Working with bins The light curve can be displayed by supplying the use_binner option to display the time binning
fig = gbm_tte.view_lightcurve(use_binner=True)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
The bins can all be writted to a PHAII file for analysis via OGIPLike.
gbm_tte.write_pha_from_binner(file_name='out', overwrite=True, force_rsp_write = False) # if you need to write the RSP to a file. We try to choose the best option for you.
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
Similarly, we can create a list of plugins directly from the time series.
my_plugins = gbm_tte.to_spectrumlike(from_bins=True)
docs/notebooks/Building_Plugins_from_TimeSeries.ipynb
giacomov/3ML
bsd-3-clause
The function time_ch calls generate_cahn_hilliard_data to generate the data. generate_cahn_hilliard_data returns the microstructure and response as a tuple. compute is called on the response field with certain number of workers and with a scheduler.
def time_ch(num_workers, get, shape=(48, 200, 200), chunks=(1, 200, 200), n_steps=100): generate_cahn_hilliard_data(shape, chunks=chunks, n_steps=n_steps)[1].compute(num_workers=num_workers, get=get)
sandbox/ch-benchmark.ipynb
wd15/fmks
mit
Threaded Timings
for n_proc in (8, 4, 2, 1): print(n_proc, "thread(s)") %timeit time_ch(n_proc, dask.threaded.get)
sandbox/ch-benchmark.ipynb
wd15/fmks
mit
Multiprocessing Timings
for n_proc in (8, 4, 2, 1): print(n_proc, "process(es)") %timeit time_ch(n_proc, dask.multiprocessing.get)
sandbox/ch-benchmark.ipynb
wd15/fmks
mit
Init
import glob import cPickle as pickle import copy from IPython.display import Image %load_ext rpy2.ipython %%R library(ggplot2) library(dplyr) library(tidyr) library(gridExtra) if not os.path.isdir(workDir): os.makedirs(workDir)
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Creating a community file from the fraction relative abundances
%%R -i abundFile # reading priming experiment OTU table tbl.abund = read.delim(abundFile, sep='\t') tbl.abund %>% head %%R tbl.comm = tbl.abund %>% rename('taxon_name' = OTUId, 'rel_abund_perc' = mean_perc_abund) %>% select(taxon_name, rel_abund_perc) %>% mutate(library = '1', rank = row_number(-rel_abund_perc)) %>% arrange(rank) tbl.comm %>% head %%R # rescaling rel_abund_perc so sum(rel_abund_perc) = 100 tbl.comm = tbl.comm %>% group_by(library) %>% mutate(total = sum(rel_abund_perc)) %>% ungroup() %>% mutate(rel_abund_perc = rel_abund_perc * 100 / total) %>% select(library, taxon_name, rel_abund_perc, rank) tbl.comm %>% head %%R -i comm_richness # number of OTUs n.OTUs = tbl.comm$taxon_name %>% unique %>% length cat('Number of OTUs:', n.OTUs, '\n') # assertion cat('Community richness = number of OTUs? ', comm_richness == n.OTUs, '\n') %%R -i workDir commFile = paste(c(workDir, 'comm.txt'), collapse='/') write.table(tbl.comm, commFile, sep='\t', quote=F, row.names=F)
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting community distribution
%%R -i workDir commFile = paste(c(workDir, 'comm.txt'), collapse='/') comm = read.delim(commFile, sep='\t') comm %>% head %%R -w 900 -h 350 ggplot(comm, aes(rank, rel_abund_perc)) + geom_point() + labs(x='Rank', y='% relative abundance', title='Priming experiment community abundance distribution') + theme_bw() + theme( text = element_text(size=16) )
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Simulating fragments Making a genome index file to map genome fasta files to OTUs Will be used for community simulation Just OTUs with association to genomes
%%R -i taxonMapFile -i genomeFilterFile taxonMap = read.delim(taxonMapFile, sep='\t') %>% select(target_genome, OTU) %>% distinct() taxonMap %>% nrow %>% print taxonMap %>% head(n=3) %>% print breaker = '----------------\n' cat(breaker) genomeFilter = read.delim(genomeFilterFile, sep='\t', header=F) genomeFilter %>% nrow %>% print genomeFilter %>% head(n=3) %>% print cat(breaker) comm = read.delim(commFile, sep='\t') comm %>% nrow %>% print comm %>% head(n=3) %>% print %%R taxonMap$OTU %>% table %>% sort(decreasing=T) %>% head %%R tbl.j = inner_join(taxonMap, genomeFilter, c('target_genome' = 'V1')) %>% rename('fasta_file' = V2) %>% select(OTU, fasta_file, target_genome) tbl.j %>% head(n=3) %%R tbl.j$OTU %>% table %>% sort(decreasing=T) %>% head %%R tbl.j2 = inner_join(tbl.j, comm, c('OTU' = 'taxon_name')) n.target.genomes = tbl.j2$OTU %>% unique %>% length cat('Number of target OTUs: ', n.target.genomes, '\n') cat('--------', '\n') tbl.j2 %>% head(n=3) %%R -i workDir outFile = paste(c(workDir, 'target_genome_index.txt'), collapse='/') write.table(tbl.j2, outFile, sep='\t', quote=F, row.names=F, col.names=F)
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting community abundance distribution of target genomes
%%R -w 900 -h 350 ggplot(tbl.j2, aes(rank, rel_abund_perc)) + geom_point(size=3, shape='O', color='red') + labs(x='Rank', y='% relative abundance', title='Priming experiment community abundance distribution') + theme_bw() + theme( text = element_text(size=16) )
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Simulating fragments of genomes that match priming_exp bulk OTUs
!cd $workDir; \ SIPSim fragments \ target_genome_index.txt \ --fp $genomeDir \ --fr $primerFile \ --fld skewed-normal,9000,2500,-5 \ --flr None,None \ --nf 10000 \ --np $nprocs \ 2> ampFrags.log \ > ampFrags.pkl
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Appending fragments from randomly selected genomes of total dataset (n=1210) This is to obtain the richness of the bulk soil community Random OTUs will be named after non-target OTUs in comm file Making list of non-target OTUs
%%R -i workDir # loading files ## target genome index (just OTUs with associated genome) inFile = paste(c(workDir, 'target_genome_index.txt'), collapse='/') tbl.target = read.delim(inFile, sep='\t', header=F) colnames(tbl.target) = c('OTUId', 'fasta_file', 'genome_name') ## comm file of total community OTUs commFile = paste(c(workDir, 'comm.txt'), collapse='/') tbl.comm = read.delim(commFile, sep='\t') %%R # just OTUs w/out an associated genome tbl.j = anti_join(tbl.comm, tbl.target, c('taxon_name' = 'OTUId')) n.nontarget.genomes = tbl.j$taxon_name %>% length cat('Number of non-target genomes: ', n.nontarget.genomes, '\n') cat('---------\n') tbl.j %>% head(n=5) %%R -i comm_richness # checking assumptions cat('Target + nonTarget richness = total community richness?: ', n.target.genomes + n.nontarget.genomes == comm_richness, '\n') %%R -i workDir # writing out non-target OTU file outFile = paste(c(workDir, 'comm_nonTarget.txt'), collapse='/') write.table(tbl.j, outFile, sep='\t', quote=F, row.names=F)
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Randomly selecting amplicon fragment length-GC KDEs from total genome pool
# List of non-target OTUs inFile = os.path.join(workDir, 'comm_nonTarget.txt') nonTarget = pd.read_csv(inFile, sep='\t')['taxon_name'].tolist() print 'Number of non-target OTUs: {}'.format(len(nonTarget)) nonTarget[:4] # loading amplicon fragments from full genome KDE dataset inFile = os.path.join(workDir, 'ampFrags.pkl') ampFrag_target = [] with open(inFile, 'rb') as iFH: ampFrag_target = pickle.load(iFH) print 'Target OTU richness: {}'.format(len(ampFrag_target)) # loading amplicon fragments from full genome KDE dataset ampFrag_all = [] with open(allAmpFrags, 'rb') as iFH: ampFrag_all = pickle.load(iFH) print 'Count of frag-GC KDEs for all genomes: {}'.format(len(ampFrag_all)) # random selection from list #target_richness = len(ampFrag_target) target_richness = len(ampFrag_target) richness_needed = comm_richness - target_richness print 'Number of random taxa needed to reach richness: {}'.format(richness_needed) if richness_needed > 0: index = range(target_richness) index = np.random.choice(index, richness_needed) ampFrag_rand = [] for i in index: sys.stderr.write('{},'.format(i)) ampFrag_rand.append(copy.deepcopy(ampFrag_all[i])) else: ampFrag_rand = [] # renaming randomly selected KDEs by non-target OTU-ID for i in range(len(ampFrag_rand)): ampFrag_rand[i][0] = nonTarget[i] # appending random taxa to target taxa and writing outFile = os.path.join(workDir, 'ampFrags_wRand.pkl') with open(outFile, 'wb') as oFH: x = ampFrag_target + ampFrag_rand print 'Number of taxa in output: {}'.format(len(x)) pickle.dump(x, oFH)
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit