markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Finally, putting everything together, we have heap_pop:
def heap_pop(heap): # swap root with last value heap[0], heap[-1] = heap[-1], heap[0] # remove last value result = heap.pop() # restore heap properties for i in range(len(heap)): percolate_down(heap, 0, len(heap)) return result
Heaps.ipynb
abeschneider/algorithm_notes
mit
To see heap_pop in action:
heap = [] heap_insert(heap, 1) heap_insert(heap, 100) heap_insert(heap, 20) heap_insert(heap, 5) heap_insert(heap, 3) print(heap) print(heap_pop(heap)) print(heap_pop(heap)) print(heap_pop(heap)) print(heap_pop(heap)) print(heap_pop(heap))
Heaps.ipynb
abeschneider/algorithm_notes
mit
Examples Example 1
testing = (__name__ == "__main__") if testing: ! jupyter nbconvert --to python hadamard.ipynb import numpy as np import sys,os import matplotlib.image as mpimg ia898path = os.path.abspath('../../') if ia898path not in sys.path: sys.path.append(ia898path) import ia898.src as ia if testing: f = mpimg.imread('../data/cameraman.tif') F = ia.hadamard(f) nb = ia.nbshow(2) nb.nbshow(f) nb.nbshow(ia.normalize(np.log(abs(F)+1))) nb.nbshow()
src/hadamard.ipynb
robertoalotufo/ia898
mit
Measuring time:
if testing: f = mpimg.imread('../data/cameraman.tif') print('Computational time is:') %timeit ia.hadamard(f)
src/hadamard.ipynb
robertoalotufo/ia898
mit
Enhancement factor of mobility depending on charge carrier concentration $g_1$
c = 10**np.linspace(-10, 0., 101) for nsigma in [3, 4, 5, 6][::-1]: g1 = egdm.g1(nsigma, np.where(c < egdm.g1_max_c, c, egdm.g1_max_c)) plt.plot(c, g1, label='$\hat{\sigma}$ = %s'%nsigma) testing.store(g1, rtol=1e-7) # solve for g1(x)=2 c2 = brentq(lambda x: egdm.g1(nsigma, x) - 2., 1e-10, 1e1) plt.plot(c2, 2, 'o', color='black') plt.yscale('log') plt.xscale('log') plt.ylim([1., 1e6]) plt.xlabel('carrier concentration') plt.ylabel('mobility enhancement $g_1$') plt.legend(loc=0, frameon=False);
examples/egdm/egdm-g1-g2-g3.ipynb
mzszym/oedes
agpl-3.0
Enhancement factor of mobility depending on electric field $g_2$
En = np.linspace(0., 2.5, 101) for nsigma in [3, 4, 5, 6][::-1]: g2 = egdm.g2(nsigma, np.where(En < egdm.g2_max_En, En, egdm.g2_max_En)) testing.store(g2, rtol=1e-7) plt.plot(En, g2, label='$\hat{\sigma}$ = %s'%nsigma) plt.yscale('log') plt.ylim([1., 1e3]) plt.xlabel('normalized electric field, $E_n=eaF/\sigma$') plt.ylabel('mobility enhancement $g_2$') plt.legend(loc=0, frameon=False);
examples/egdm/egdm-g1-g2-g3.ipynb
mzszym/oedes
agpl-3.0
Enhancement factor of diffusion $g_3$
c = 10**np.linspace(-4, 0., 1001) for nsigma in [3, 4, 5, 6][::-1]: g3 = egdm.g3(nsigma, np.where(c < egdm.g3_max_c, c, egdm.g3_max_c)) plt.plot(c, g3, label='$\hat{\sigma}$ = %s'%nsigma) testing.store(g3, rtol=1e-7) # solve for g3(x)=2 c2 = brentq(lambda x: egdm.g3(nsigma, x) - 2., 1e-4, 0.5) plt.plot(c2, 2, 'o', color='black') plt.xscale('log') plt.ylim([1., 8.]) plt.xlabel('carrier concentration') plt.ylabel('diffusion enhancement $g_3$') plt.legend(loc=0, frameon=False);
examples/egdm/egdm-g1-g2-g3.ipynb
mzszym/oedes
agpl-3.0
The object returned by load_breast_cancer() is a scikit-learn Bunch object, which is similar to a dictionary.
cancer.keys()
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 0 (Example) How many features does the breast cancer dataset have? This function should return an integer.
# You should write your whole answer within the function provided. The autograder will call # this function and compare the return value against the correct solution value def answer_zero(): # This function returns the number of features of the breast cancer dataset, which is an integer. # The assignment question description will tell you the general format the autograder is expecting return len(cancer['feature_names']) # You can examine what your function returns by calling it in the cell. If you have questions # about the assignment formats, check out the discussion forums for any FAQs answer_zero()
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 1 Scikit-learn works with lists, numpy arrays, scipy-sparse matrices, and pandas DataFrames, so converting the dataset to a DataFrame is not necessary for training this model. Using a DataFrame does however help make many things easier such as munging data, so let's practice creating a classifier with a pandas DataFrame. Convert the sklearn.dataset cancer to a DataFrame. *This function should return a (569, 31) DataFrame with * *columns = * ['mean radius', 'mean texture', 'mean perimeter', 'mean area', 'mean smoothness', 'mean compactness', 'mean concavity', 'mean concave points', 'mean symmetry', 'mean fractal dimension', 'radius error', 'texture error', 'perimeter error', 'area error', 'smoothness error', 'compactness error', 'concavity error', 'concave points error', 'symmetry error', 'fractal dimension error', 'worst radius', 'worst texture', 'worst perimeter', 'worst area', 'worst smoothness', 'worst compactness', 'worst concavity', 'worst concave points', 'worst symmetry', 'worst fractal dimension', 'target'] *and index = * RangeIndex(start=0, stop=569, step=1)
def answer_one(): df = pd.DataFrame(data=cancer['data'], columns=cancer['feature_names']) df['target'] = cancer['target'] return df answer_one()
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 2 What is the class distribution? (i.e. how many instances of malignant (encoded 0) and how many benign (encoded 1)?) This function should return a Series named target of length 2 with integer values and index = ['malignant', 'benign']
def answer_two(): cancerdf = answer_one() malignant = (cancerdf['target']==0).sum() benign = (cancerdf['target']==1).sum() ans = [malignant, benign] return ans answer_two()
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 3 Split the DataFrame into X (the data) and y (the labels). This function should return a tuple of length 2: (X, y), where * X has shape (569, 30) * y has shape (569,).
cancerdf = answer_one() cancerdf.iloc[:, :-1] def answer_three(): cancerdf = answer_one() X= cancerdf.iloc[:, :-1] y= cancerdf['target'] return X, y
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 4 Using train_test_split, split X and y into training and test sets (X_train, X_test, y_train, and y_test). Set the random number generator state to 0 using random_state=0 to make sure your results match the autograder! This function should return a tuple of length 4: (X_train, X_test, y_train, y_test), where * X_train has shape (426, 30) * X_test has shape (143, 30) * y_train has shape (426,) * y_test has shape (143,)
from sklearn.model_selection import train_test_split def answer_four(): X, y = answer_three() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25 , random_state=0) return X_train, X_test, y_train, y_test answer_four()
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 5 Using KNeighborsClassifier, fit a k-nearest neighbors (knn) classifier with X_train, y_train and using one nearest neighbor (n_neighbors = 1). *This function should return a * sklearn.neighbors.classification.KNeighborsClassifier.
from sklearn.neighbors import KNeighborsClassifier def answer_five(): X_train, X_test, y_train, y_test = answer_four() # Your code here return # Return your answer
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 6 Using your knn classifier, predict the class label using the mean value for each feature. Hint: You can use cancerdf.mean()[:-1].values.reshape(1, -1) which gets the mean value for each feature, ignores the target column, and reshapes the data from 1 dimension to 2 (necessary for the precict method of KNeighborsClassifier). This function should return a numpy array either array([ 0.]) or array([ 1.])
def answer_six(): cancerdf = answer_one() means = cancerdf.mean()[:-1].values.reshape(1, -1) # Your code here return # Return your answer
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 7 Using your knn classifier, predict the class labels for the test set X_test. This function should return a numpy array with shape (143,) and values either 0.0 or 1.0.
def answer_seven(): X_train, X_test, y_train, y_test = answer_four() knn = answer_five() # Your code here return # Return your answer
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Question 8 Find the score (mean accuracy) of your knn classifier using X_test and y_test. This function should return a float between 0 and 1
def answer_eight(): X_train, X_test, y_train, y_test = answer_four() knn = answer_five() # Your code here return # Return your answer
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Optional plot Try using the plotting function below to visualize the differet predicition scores between training and test sets, as well as malignant and benign cells.
def accuracy_plot(): import matplotlib.pyplot as plt %matplotlib notebook X_train, X_test, y_train, y_test = answer_four() # Find the training and testing accuracies by target value (i.e. malignant, benign) mal_train_X = X_train[y_train==0] mal_train_y = y_train[y_train==0] ben_train_X = X_train[y_train==1] ben_train_y = y_train[y_train==1] mal_test_X = X_test[y_test==0] mal_test_y = y_test[y_test==0] ben_test_X = X_test[y_test==1] ben_test_y = y_test[y_test==1] knn = answer_five() scores = [knn.score(mal_train_X, mal_train_y), knn.score(ben_train_X, ben_train_y), knn.score(mal_test_X, mal_test_y), knn.score(ben_test_X, ben_test_y)] plt.figure() # Plot the scores as a bar chart bars = plt.bar(np.arange(4), scores, color=['#4c72b0','#4c72b0','#55a868','#55a868']) # directly label the score onto the bars for bar in bars: height = bar.get_height() plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2), ha='center', color='w', fontsize=11) # remove all the ticks (both axes), and tick labels on the Y axis plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on') # remove the frame of the chart for spine in plt.gca().spines.values(): spine.set_visible(False) plt.xticks([0,1,2,3], ['Malignant\nTraining', 'Benign\nTraining', 'Malignant\nTest', 'Benign\nTest'], alpha=0.8); plt.title('Training and Test Accuracies for Malignant and Benign Cells', alpha=0.8) # Uncomment the plotting function to see the visualization, # Comment out the plotting function when submitting your notebook for grading #accuracy_plot()
python-machine-learning/Assignment 1.ipynb
atulsingh0/MachineLearning
gpl-3.0
Github https://github.com/jbwhit/OSCON-2015/commit/6750b962606db27f69162b802b5de4f84ac916d5 A few Python Basics
# Create a [list] days = ['Monday', # multiple lines 'Tuesday', # acceptable 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', # trailing comma is fine! ] days # Simple for-loop for day in days: print(day) # Double for-loop for day in days: for letter in day: print(letter) print(days) print(*days) # Double for-loop for day in days: for letter in day: print(letter) print() for day in days: for letter in day: print(letter.lower())
notebooks/07-Some_basics.ipynb
jbwhit/jupyter-best-practices
mit
Specify the model: The model is specified in the same way as any Pyro model, except that a keyword argument, observations, must be used to input a dictionary with each observation as a key. Since inference compilation involves learning to perform inference for any observed values, it is not important what the values in the dictionary are. 0 is used here.
def model(prior_mean, observations={"x1": 0, "x2": 0}): x = pyro.sample("z", dist.Normal(prior_mean, torch.tensor(5**0.5))) y1 = pyro.sample("x1", dist.Normal(x, torch.tensor(2**0.5)), obs=observations["x1"]) y2 = pyro.sample("x2", dist.Normal(x, torch.tensor(2**0.5)), obs=observations["x2"]) return x
tutorial/source/csis.ipynb
uber/pyro
apache-2.0
And the guide: The guide will be trained (a.k.a. compiled) to use the observed values to make proposal distributions for each unconditioned sample statement. In the paper [1], a neural network architecture is automatically generated for any model. However, for the implementation in Pyro the user must specify a task-specific guide program structure. As with any Pyro guide function, this should have the same call signature as the model. It must also encounter the same unobserved sample statements as the model. So that the guide program can be trained to make good proposal distributions, the distributions at sample statements should depend on the values in observations. In this example, a feed-forward neural network is used to map the observations to a proposal distribution for the latent variable. pyro.module is called when the guide function is run so that the guide parameters can be found by the optimiser during training.
class Guide(nn.Module): def __init__(self): super().__init__() self.neural_net = nn.Sequential( nn.Linear(2, 10), nn.ReLU(), nn.Linear(10, 20), nn.ReLU(), nn.Linear(20, 10), nn.ReLU(), nn.Linear(10, 5), nn.ReLU(), nn.Linear(5, 2)) def forward(self, prior_mean, observations={"x1": 0, "x2": 0}): pyro.module("guide", self) x1 = observations["x1"] x2 = observations["x2"] v = torch.cat((x1.view(1, 1), x2.view(1, 1)), 1) v = self.neural_net(v) mean = v[0, 0] std = v[0, 1].exp() pyro.sample("z", dist.Normal(mean, std)) guide = Guide()
tutorial/source/csis.ipynb
uber/pyro
apache-2.0
Now create a CSIS instance: The object is initialised with the model; the guide; a PyTorch optimiser for training the guide; and the number of importance-weighted samples to draw when performing inference. The guide will be optimised for a particular value of the model/guide argument, prior_mean, so we use the value set here throughout training and inference.
optimiser = pyro.optim.Adam({'lr': 1e-3}) csis = pyro.infer.CSIS(model, guide, optimiser, num_inference_samples=50) prior_mean = torch.tensor(1.)
tutorial/source/csis.ipynb
uber/pyro
apache-2.0
Now we 'compile' the instance to perform inference on this model: The arguments given to csis.step are passed to the model and guide when they are run to evaluate the loss.
for step in range(n_steps): csis.step(prior_mean)
tutorial/source/csis.ipynb
uber/pyro
apache-2.0
And now perform inference by importance sampling: The compiled guide program should now be able to propose a distribution for z that approximates the posterior, $p(z | x_1, x_2)$, for any $x_1, x_2$. The same prior_mean is entered again, as well as the observed values inside observations.
posterior = csis.run(prior_mean, observations={"x1": torch.tensor(8.), "x2": torch.tensor(9.)}) marginal = pyro.infer.EmpiricalMarginal(posterior, "z")
tutorial/source/csis.ipynb
uber/pyro
apache-2.0
We now plot the results and compare with importance sampling: We observe $x_1 = 8$ and $x_2 = 9$. Inference is performed by taking 50 samples using CSIS, and 50 using importance sampling from the prior. We then plot the resulting approximations to the posterior distributions, along with the analytic posterior.
import numpy as np import scipy.stats import matplotlib.pyplot as plt with torch.no_grad(): # Draw samples from empirical marginal for plotting csis_samples = torch.stack([marginal() for _ in range(1000)]) # Calculate empirical marginal with importance sampling is_posterior = pyro.infer.Importance(model, num_samples=50).run( prior_mean, observations={"x1": torch.tensor(8.), "x2": torch.tensor(9.)}) is_marginal = pyro.infer.EmpiricalMarginal(is_posterior, "z") is_samples = torch.stack([is_marginal() for _ in range(1000)]) # Calculate true prior and posterior over z true_posterior_z = torch.arange(-10, 10, 0.05) true_posterior_p = dist.Normal(7.25, (5/6)**0.5).log_prob(true_posterior_z).exp() prior_z = true_posterior_z prior_p = dist.Normal(1., 5**0.5).log_prob(true_posterior_z).exp() plt.rcParams['figure.figsize'] = [30, 15] plt.rcParams.update({'font.size': 30}) fig, ax = plt.subplots() plt.plot(prior_z, prior_p, 'k--', label='Prior') plt.plot(true_posterior_z, true_posterior_p, color='k', label='Analytic Posterior') plt.hist(csis_samples.numpy(), range=(-10, 10), bins=100, color='r', density=1, label="Inference Compilation") plt.hist(is_samples.numpy(), range=(-10, 10), bins=100, color='b', density=1, label="Importance Sampling") plt.xlim(-8, 10) plt.ylim(0, 5) plt.xlabel("z") plt.ylabel("Estimated Posterior Probability Density") plt.legend() plt.show()
tutorial/source/csis.ipynb
uber/pyro
apache-2.0
We'll use the Delaney dataset from the MoleculeNet suite to run our experiments in this tutorial. Let's load up our dataset for our experiments, and then make some uncertainty predictions.
import deepchem as dc import numpy as np import matplotlib.pyplot as plot tasks, datasets, transformers = dc.molnet.load_delaney() train_dataset, valid_dataset, test_dataset = datasets model = dc.models.MultitaskRegressor(len(tasks), 1024, uncertainty=True) model.fit(train_dataset, nb_epoch=20) y_pred, y_std = model.predict_uncertainty(test_dataset)
examples/tutorials/25_Uncertainty_In_Deep_Learning.ipynb
lilleswing/deepchem
mit
All of this looks exactly like any other example, with just two differences. First, we add the option uncertainty=True when creating the model. This instructs it to add features to the model that are needed for estimating uncertainty. Second, we call predict_uncertainty() instead of predict() to produce the output. y_pred is the predicted outputs. y_std is another array of the same shape, where each element is an estimate of the uncertainty (standard deviation) of the corresponding element in y_pred. And that's all there is to it! Simple, right? Of course, it isn't really that simple at all. DeepChem is doing a lot of work to come up with those uncertainties. So now let's pull back the curtain and see what is really happening. (For the full mathematical details of calculating uncertainty, see https://arxiv.org/abs/1703.04977) To begin with, what does "uncertainty" mean? Intuitively, it is a measure of how much we can trust the predictions. More formally, we expect that the true value of whatever we are trying to predict should usually be within a few standard deviations of the predicted value. But uncertainty comes from many sources, ranging from noisy training data to bad modelling choices, and different sources behave in different ways. It turns out there are two fundamental types of uncertainty we need to take into account. Aleatoric Uncertainty Consider the following graph. It shows the best fit linear regression to a set of ten data points.
# Generate some fake data and plot a regression line. x = np.linspace(0, 5, 10) y = 0.15*x + np.random.random(10) plot.scatter(x, y) fit = np.polyfit(x, y, 1) line_x = np.linspace(-1, 6, 2) plot.plot(line_x, np.poly1d(fit)(line_x)) plot.show()
examples/tutorials/25_Uncertainty_In_Deep_Learning.ipynb
lilleswing/deepchem
mit
The line clearly does not do a great job of fitting the data. There are many possible reasons for this. Perhaps the measuring device used to capture the data was not very accurate. Perhaps y depends on some other factor in addition to x, and if we knew the value of that factor for each data point we could predict y more accurately. Maybe the relationship between x and y simply isn't linear, and we need a more complicated model to capture it. Regardless of the cause, the model clearly does a poor job of predicting the training data, and we need to keep that in mind. We cannot expect it to be any more accurate on test data than on training data. This is known as aleatoric uncertainty. How can we estimate the size of this uncertainty? By training a model to do it, of course! At the same time it is learning to predict the outputs, it is also learning to predict how accurately each output matches the training data. For every output of the model, we add a second output that produces the corresponding uncertainty. Then we modify the loss function to make it learn both outputs at the same time. Epistemic Uncertainty Now consider these three curves. They are fit to the same data points as before, but this time we are using 10th degree polynomials.
plot.figure(figsize=(12, 3)) line_x = np.linspace(0, 5, 50) for i in range(3): plot.subplot(1, 3, i+1) plot.scatter(x, y) fit = np.polyfit(np.concatenate([x, [3]]), np.concatenate([y, [i]]), 10) plot.plot(line_x, np.poly1d(fit)(line_x)) plot.show()
examples/tutorials/25_Uncertainty_In_Deep_Learning.ipynb
lilleswing/deepchem
mit
Each of them perfectly interpolates the data points, yet they clearly are different models. (In fact, there are infinitely many 10th degree polynomials that exactly interpolate any ten data points.) They make identical predictions for the data we fit them to, but for any other value of x they produce different predictions. This is called epistemic uncertainty. It means the data does not fully constrain the model. Given the training data, there are many different models we could have found, and those models make different predictions. The ideal way to measure epistemic uncertainty is to train many different models, each time using a different random seed and possibly varying hyperparameters. Then use all of them for each input and see how much the predictions vary. This is very expensive to do, since it involves repeating the whole training process many times. Fortunately, we can approximate the same effect in a less expensive way: by using dropout. Recall that when you train a model with dropout, you are effectively training a huge ensemble of different models all at once. Each training sample is evaluated with a different dropout mask, corresponding to a different random subset of the connections in the full model. Usually we only perform dropout during training and use a single averaged mask for prediction. But instead, let's use dropout for prediction too. We can compute the output for lots of different dropout masks, then see how much the predictions vary. This turns out to give a reasonable estimate of the epistemic uncertainty in the outputs. Uncertain Uncertainty? Now we can combine the two types of uncertainty to compute an overall estimate of the error in each output: $$\sigma_\text{total} = \sqrt{\sigma_\text{aleatoric}^2 + \sigma_\text{epistemic}^2}$$ This is the value DeepChem reports. But how much can you trust it? Remember how I started this tutorial: deep learning models should not be used as black boxes. We want to know how reliable the outputs are. Adding uncertainty estimates does not completely eliminate the problem; it just adds a layer of indirection. Now we have estimates of how reliable the outputs are, but no guarantees that those estimates are themselves reliable. Let's go back to the example we started with. We trained a model on the SAMPL training set, then generated predictions and uncertainties for the test set. Since we know the correct outputs for all the test samples, we can evaluate how well we did. Here is a plot of the absolute error in the predicted output versus the predicted uncertainty.
abs_error = np.abs(y_pred.flatten()-test_dataset.y.flatten()) plot.scatter(y_std.flatten(), abs_error) plot.xlabel('Standard Deviation') plot.ylabel('Absolute Error') plot.show()
examples/tutorials/25_Uncertainty_In_Deep_Learning.ipynb
lilleswing/deepchem
mit
The first thing we notice is that the axes have similar ranges. The model clearly has learned the overall magnitude of errors in the predictions. There also is clearly a correlation between the axes. Values with larger uncertainties tend on average to have larger errors. (Strictly speaking, we expect the absolute error to be less than the predicted uncertainty. Even a very uncertain number could still happen to be close to the correct value by chance. If the model is working well, there should be more points below the diagonal than above it.) Now let's see how well the values satisfy the expected distribution. If the standard deviations are correct, and if the errors are normally distributed (which is certainly not guaranteed to be true!), we expect 95% of the values to be within two standard deviations, and 99% to be within three standard deviations. Here is a histogram of errors as measured in standard deviations.
plot.hist(abs_error/y_std.flatten(), 20) plot.show()
examples/tutorials/25_Uncertainty_In_Deep_Learning.ipynb
lilleswing/deepchem
mit
Dataset Parameters Let's create the ParameterSet which would be added to the Bundle when calling add_dataset. Later we'll call add_dataset, which will create and attach this ParameterSet for us.
ps, constraints = phoebe.dataset.orb() print ps
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
times
print ps['times']
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
Compute Options Let's look at the compute options (for the default PHOEBE 2 backend) that relate to dynamics and the ORB dataset
ps_compute = phoebe.compute.phoebe() print ps_compute
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
dynamics_method
print ps_compute['dynamics_method']
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
The 'dynamics_method' parameter controls how stars and components are placed in the coordinate system as a function of time and has several choices: * keplerian (default): Use Kepler's laws to determine positions. If the system has more than two components, then each orbit is treated independently and nested (ie there are no dynamical/tidal effects - the inner orbit is treated as a single point mass in the outer orbit). * more coming soon ltte
print ps_compute['ltte']
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
The 'ltte' parameter sets whether light travel time effects (Roemer delay) are included. If set to False, the positions and velocities are returned as they actually are for that given object at that given time. If set to True, they are instead returned as they were or will be when their light reaches the origin of the coordinate system. See the Systemic Velocity Example Script for an example of how 'ltte' and 'vgamma' (systemic velocity) interplay. Synthetics
b.add_dataset('orb', times=np.linspace(0,3,201)) b.run_compute() b['orb@model'].twigs print b['times@primary@orb01@orb@model'] print b['xs@primary@orb01@orb@model'] print b['vxs@primary@orb01@orb@model']
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
Plotting By default, orb datasets plot as 'ys' vx 'xs' (plane of sky). Notice the y-scale here with inclination set to 90.
axs, artists = b['orb@model'].plot()
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
As always, you have access to any of the arrays for either axes, so if you want to plot 'vxs' vs 'times'
axs, artists = b['orb@model'].plot(x='times', y='vxs')
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
3d axes are not yet supported for orbits, but hopefully will be soon. Once they are supported, they will default to x, y, and z positions plotted on their respective axes.
fig = plt.figure() ax = fig.add_subplot(111, projection='3d') axs, artists = b['orb@model'].plot(xlim=(-4,4), ylim=(-4,4), zlim=(-4,4))
2.0/tutorials/ORB.ipynb
phoebe-project/phoebe2-docs
gpl-3.0
Data https://www.drivendata.org/competitions/54/machine-learning-with-a-heart/page/109/ - Numeric - slope_of_peak_exercise_st_segment (int, semi-categorical, 1-3) - resting_blood_pressure (int) - chest_pain_type (int, semi-categorical, 1-4) - num_major_vessels (int, semi-categorical, 0-3) - resting_ekg_results (int, semi-categorical, 0-2) - serum_cholesterol_mg_per_dl (int) - oldpeak_eq_st_depression (float) - age (int) - max_heart_rate_achieved (int) - Categorical - thal - normal - fixed_defect - reversible_defect - fasting_blood_sugar_gt_120_mg_per_dl (blood sugar > 120) - 0 - 1 - sex - 0 (f) - 1 (m) - exercise_induced_angina - 0 - 1
features = pd.read_csv('train_values.csv') labels = pd.read_csv('train_labels.csv') features.head() labels.head() FEATURES = ['slope_of_peak_exercise_st_segment', 'thal', 'resting_blood_pressure', 'chest_pain_type', 'num_major_vessels', 'fasting_blood_sugar_gt_120_mg_per_dl', 'resting_ekg_results', 'serum_cholesterol_mg_per_dl', 'oldpeak_eq_st_depression', 'sex', 'age', 'max_heart_rate_achieved', 'exercise_induced_angina'] LABEL = 'heart_disease_present' EXPLANATIONS = {'slope_of_peak_exercise_st_segment' : 'Quality of Blood Flow to the Heart', 'thal' : 'Thallium Stress Test Measuring Blood Flow to the Heart', 'resting_blood_pressure' : 'Resting Blood Pressure', 'chest_pain_type' : 'Chest Pain Type (1-4)', 'num_major_vessels' : 'Major Vessels (0-3) Colored by Flourosopy', 'fasting_blood_sugar_gt_120_mg_per_dl' : 'Fasting Blood Sugar > 120 mg/dl', 'resting_ekg_results' : 'Resting Electrocardiographic Results (0-2)', 'serum_cholesterol_mg_per_dl' : 'Serum Cholesterol in mg/dl', 'oldpeak_eq_st_depression' : 'Exercise vs. Rest\nA Measure of Abnormality in Electrocardiograms', 'age' : 'Age (years)', 'sex' : 'Sex (m/f)', 'max_heart_rate_achieved' : 'Maximum Heart Rate Achieved (bpm)', 'exercise_induced_angina' : 'Exercise-Induced Chest Pain (yes/no)'} NUMERICAL_FEATURES = ['slope_of_peak_exercise_st_segment', 'resting_blood_pressure', 'chest_pain_type', 'num_major_vessels', 'resting_ekg_results', 'serum_cholesterol_mg_per_dl', 'oldpeak_eq_st_depression', 'age', 'max_heart_rate_achieved'] CATEGORICAL_FEATURES = ['thal', 'fasting_blood_sugar_gt_120_mg_per_dl', 'sex', 'exercise_induced_angina'] CATEGORICAL_FEATURE_VALUES = {'thal' : [[0, 1, 2], ['Normal', 'Fixed Defect', 'Reversible Defect']], 'fasting_blood_sugar_gt_120_mg_per_dl' : [[0, 1], ['No', 'Yes']], 'sex' : [[0, 1], ['F', 'M']], 'exercise_induced_angina' : [[0, 1], ['No', 'Yes']]} SEMI_CATEGORICAL_FEATURES = ['slope_of_peak_exercise_st_segment', 'chest_pain_type', 'num_major_vessels', 'resting_ekg_results'] SEMI_CATEGORICAL_FEATURE_LIMITS = {'slope_of_peak_exercise_st_segment' : [1, 3], 'chest_pain_type' : [1, 4], 'num_major_vessels' : [0, 3], 'resting_ekg_results' : [0, 2]} LABEL_VALUES = [[0, 1], ['No', 'Yes']] for feature in CATEGORICAL_FEATURES: if len(CATEGORICAL_FEATURE_VALUES[feature][0]) > 2: onehot_feature = pd.get_dummies(features[feature]) feature_index = features.columns.get_loc(feature) features.drop(feature, axis=1, inplace=True) onehot_feature.columns = [f'{feature}={feature_value}' for feature_value in onehot_feature.columns] for colname in onehot_feature.columns[::-1]: features.insert(feature_index, colname, onehot_feature[colname]) features.head() x = features.values[:,1:].astype(int) y = labels.values[:,-1].astype(int) print('x =\n', x) print('y =\n', y) stratified_kflod_validator = sklearn.model_selection.StratifiedKFold(n_splits=5, shuffle=True) stratified_kflod_validator
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Decision Trees
tree_mean_acc = 0 tree_score_df = pd.DataFrame(columns = ['Fold', 'Accuracy', 'Precision', 'Recall']) for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1): x_train, x_test = x[train_indices], x[test_indices] y_train, y_test = y[train_indices], y[test_indices] dec_tree = sklearn.tree.DecisionTreeClassifier(min_samples_split = 5) dec_tree.fit(x_train, y_train) acc = dec_tree.score(x_test, y_test) tree_mean_acc += acc y_pred = dec_tree.predict(x_test) precision = sklearn.metrics.precision_score(y_test, y_pred) recall = sklearn.metrics.recall_score(y_test, y_pred) tree_score_df.loc[fold_ind] = [f'{fold_ind}', f'{acc*100:.2f} %', f'{precision*100:.2f} %', f'{recall*100:.2f} %'] tree_plot_data = sklearn.tree.export_graphviz(dec_tree, out_file = None, feature_names = features.columns[1:], class_names = [f'{labels.columns[1]}={label_value}' for label_value in LABEL_VALUES[1]], filled = True, rounded = True, special_characters = True) graph = graphviz.Source(tree_plot_data) graph.render(f'Fold {fold_ind}') next_ind = len(tree_score_df) + 1 mean_acc = tree_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean() mean_prec = tree_score_df['Precision'].apply(lambda n: float(n[:-2])).mean() mean_rec = tree_score_df['Recall'].apply(lambda n: float(n[:-2])).mean() tree_score_df.loc[next_ind] = ['Avg', f'{mean_acc:.2f} %', f'{mean_prec:.2f} %', f'{mean_rec:.2f} %'] tree_score_df
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
KNN
# TODO Normalize knn_mean_score_df = pd.DataFrame(columns = ['k', 'Avg. Accuracy', 'Avg. Precision', 'Avg. Recall']) normalized_x = sklearn.preprocessing.normalize(x) # No improvement over un-normalized data. mean_accs = [] for k in list(range(1, 10)) + [math.ceil(len(features) * step) for step in [0.1, 0.2, 0.3, 0.4, 0.5]]: knn_score_df = pd.DataFrame(columns = ['Fold', 'Accuracy', 'Precision', 'Recall']) mean_acc = 0 for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1): x_train, x_test = normalized_x[train_indices], normalized_x[test_indices] y_train, y_test = y[train_indices], y[test_indices] knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors = k) knn.fit(x_train, y_train) acc = knn.score(x_test, y_test) mean_acc += acc y_pred = knn.predict(x_test) precision = sklearn.metrics.precision_score(y_test, y_pred) recall = sklearn.metrics.recall_score(y_test, y_pred) knn_score_df.loc[fold_ind] = [f'{fold_ind}', f'{acc*100:.2f} %', f'{precision*100:.2f} %', f'{recall*100:.2f} %'] next_ind = len(knn_score_df) + 1 mean_acc = knn_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean() mean_prec = knn_score_df['Precision'].apply(lambda n: float(n[:-2])).mean() mean_rec = knn_score_df['Recall'].apply(lambda n: float(n[:-2])).mean() knn_score_df.loc[next_ind] = ['Avg', f'{acc*100:.2f} %', f'{precision*100:.2f} %', f'{recall*100:.2f} %'] knn_mean_score_df.loc[k] = [k, f'{mean_acc:.2f} %', f'{mean_prec:.2f} %', f'{mean_rec:.2f} %'] # print(f'k = {k}') # print(knn_score_df) # print() best_accuracy = knn_mean_score_df.sort_values(by = ['Avg. Accuracy']).iloc[-1] print('Best avg. accuracy is', best_accuracy['Avg. Accuracy'], 'for k =', best_accuracy['k'], '.') knn_mean_score_df.sort_values(by = ['Avg. Accuracy'])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Naive Bayes
nb_classifier_types = [sklearn.naive_bayes.GaussianNB, sklearn.naive_bayes.MultinomialNB, sklearn.naive_bayes.ComplementNB, sklearn.naive_bayes.BernoulliNB] nb_mean_score_df = pd.DataFrame(columns = ['Type', 'Avg. Accuracy', 'Avg. Precision', 'Avg. Recall']) for nb_classifier_type in nb_classifier_types: nb_score_df = pd.DataFrame(columns = ['Fold', 'Accuracy', 'Precision', 'Recall']) mean_acc = 0 for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1): x_train, x_test = x[train_indices], x[test_indices] y_train, y_test = y[train_indices], y[test_indices] nb = nb_classifier_type() nb.fit(x_train, y_train) acc = nb.score(x_test, y_test) mean_acc += acc y_pred = nb.predict(x_test) precision = sklearn.metrics.precision_score(y_test, y_pred) recall = sklearn.metrics.recall_score(y_test, y_pred) nb_score_df.loc[fold_ind] = [f'{fold_ind}', f'{acc*100:.2f} %', f'{precision*100:.2f} %', f'{recall*100:.2f} %'] next_ind = len(nb_score_df) + 1 mean_acc = nb_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean() mean_prec = nb_score_df['Precision'].apply(lambda n: float(n[:-2])).mean() mean_rec = nb_score_df['Recall'].apply(lambda n: float(n[:-2])).mean() nb_score_df.loc[next_ind] = ['Avg', f'{mean_acc:.2f} %', f'{mean_prec:.2f} %', f'{mean_rec:.2f} %'] nb_mean_score_df.loc[len(nb_mean_score_df) + 1] = [nb_classifier_type.__name__, f'{mean_acc:.2f} %', f'{mean_prec:.2f} %', f'{mean_rec:.2f} %'] print(nb_classifier_type.__name__) print() print(nb_score_df) print() nb_mean_score_df.sort_values(by = ['Avg. Accuracy'])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
SVM
svm_classifier_type = sklearn.svm.SVC # Avg. # Args -> acc / prec / rec # # kernel: linear -> 78.89 % 78.31 % 73.75 % # kernel: linear, C: 0.1 -> 84.44 % 88.54 % 75.00 % # # * No improvement for larger C. # # kernel: poly, max_iter: 1 -> 46.67 % 34.67 % 21.25 % # kernel: poly, max_iter: 10 -> 57.22 % 51.27 % 66.25 % # kernel: poly, max_iter: 100 -> 61.67 % 60.18 % 40.00 % # kernel: poly, max_iter: 100, coef0: 1 -> 62.22 % 62.19 % 41.25 % # # * No improvement for more iters. # * No improvement for larger C. # * No improvement for higher degree. # * No improvement for different coef0. # # kernel: rbf, max_iter: 10 -> 48.89 % 46.07 % 72.50 % # kernel: rbf, max_iter: 100 -> 60.00 % 74.00 % 17.50 % # kernel: rbf, max_iter: 1000 -> 60.56 % 78.33 % 15.00 % args = {'kernel': 'linear', 'C': 0.1} svm_score_df = pd.DataFrame(columns = ['Type', 'Accuracy', 'Precision', 'Recall']) # normalized_x = sklearn.preprocessing.normalize(x) mean_acc = 0 for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1): x_train, x_test = x[train_indices], x[test_indices] y_train, y_test = y[train_indices], y[test_indices] svm = svm_classifier_type(**args, gamma = 'scale', cache_size = 256) svm.fit(x_train, y_train) acc = svm.score(x_test, y_test) mean_acc += acc y_pred = svm.predict(x_test) precision = sklearn.metrics.precision_score(y_test, y_pred) recall = sklearn.metrics.recall_score(y_test, y_pred) svm_score_df.loc[fold_ind] = [f'{fold_ind}', f'{acc*100:.2f} %', f'{precision*100:.2f} %', f'{recall*100:.2f} %'] next_ind = len(svm_score_df) + 1 mean_acc = svm_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean() mean_prec = svm_score_df['Precision'].apply(lambda n: float(n[:-2])).mean() mean_rec = svm_score_df['Recall'].apply(lambda n: float(n[:-2])).mean() svm_score_df.loc[next_ind] = ['Avg', f'{mean_acc:.2f} %', f'{mean_prec:.2f} %', f'{mean_rec:.2f} %'] print(svm_score_df)
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Shallow Neural Nets Import deps
import pandas as pd from sklearn.model_selection import train_test_split import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization, LeakyReLU
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Import data
features = pd.read_csv('train_values.csv') labels = pd.read_csv('train_labels.csv') print(labels.head()) features.head() FEATURES = ['slope_of_peak_exercise_st_segment', 'thal', 'resting_blood_pressure', 'chest_pain_type', 'num_major_vessels', 'fasting_blood_sugar_gt_120_mg_per_dl', 'resting_ekg_results', 'serum_cholesterol_mg_per_dl', 'oldpeak_eq_st_depression', 'sex', 'age', 'max_heart_rate_achieved', 'exercise_induced_angina'] LABEL = 'heart_disease_present' EXPLANATIONS = {'slope_of_peak_exercise_st_segment' : 'Quality of Blood Flow to the Heart', 'thal' : 'Thallium Stress Test Measuring Blood Flow to the Heart', 'resting_blood_pressure' : 'Resting Blood Pressure', 'chest_pain_type' : 'Chest Pain Type (1-4)', 'num_major_vessels' : 'Major Vessels (0-3) Colored by Flourosopy', 'fasting_blood_sugar_gt_120_mg_per_dl' : 'Fasting Blood Sugar > 120 mg/dl', 'resting_ekg_results' : 'Resting Electrocardiographic Results (0-2)', 'serum_cholesterol_mg_per_dl' : 'Serum Cholesterol in mg/dl', 'oldpeak_eq_st_depression' : 'Exercise vs. Rest\nA Measure of Abnormality in Electrocardiograms', 'age' : 'Age (years)', 'sex' : 'Sex (m/f)', 'max_heart_rate_achieved' : 'Maximum Heart Rate Achieved (bpm)', 'exercise_induced_angina' : 'Exercise-Induced Chest Pain (yes/no)'} NUMERICAL_FEATURES = ['slope_of_peak_exercise_st_segment', 'resting_blood_pressure', 'chest_pain_type', 'num_major_vessels', 'resting_ekg_results', 'serum_cholesterol_mg_per_dl', 'oldpeak_eq_st_depression', 'age', 'max_heart_rate_achieved'] CATEGORICAL_FEATURES = ['thal', 'fasting_blood_sugar_gt_120_mg_per_dl', 'sex', 'exercise_induced_angina'] CATEGORICAL_FEATURE_VALUES = {'thal' : [[0, 1, 2], ['Normal', 'Fixed Defect', 'Reversible Defect']], 'fasting_blood_sugar_gt_120_mg_per_dl' : [[0, 1], ['No', 'Yes']], 'sex' : [[0, 1], ['F', 'M']], 'exercise_induced_angina' : [[0, 1], ['No', 'Yes']]} SEMI_CATEGORICAL_FEATURES = ['slope_of_peak_exercise_st_segment', 'chest_pain_type', 'num_major_vessels', 'resting_ekg_results'] SEMI_CATEGORICAL_FEATURE_LIMITS = {'slope_of_peak_exercise_st_segment' : [1, 3], 'chest_pain_type' : [1, 4], 'num_major_vessels' : [0, 3], 'resting_ekg_results' : [0, 2]} LABEL_VALUES = [[0, 1], ['No', 'Yes']] for feature in CATEGORICAL_FEATURES: if len(CATEGORICAL_FEATURE_VALUES[feature][0]) > 2: onehot_feature = pd.get_dummies(features[feature]) feature_index = features.columns.get_loc(feature) features.drop(feature, axis=1, inplace=True) onehot_feature.columns = ['%s=%s' % (feature, feature_value) for feature_value in onehot_feature.columns] for colname in onehot_feature.columns[::-1]: features.insert(feature_index, colname, onehot_feature[colname]) x = features.values[:,1:].astype(int) y = labels.values[:,-1].astype(int) print('x =\n', x) print('y =\n', y) # for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1): # x_train, x_test = x[train_indices], x[test_indices] # y_train, y_test = y[train_indices], y[test_indices] x_train, x_test, y_train, y_test = \ train_test_split(x, y, test_size=0.2, random_state=42) print(x_train.shape, x_test.shape) print(y_train.shape, y_test.shape)
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Define model
input_shape = (1,15) num_classes = 2 print(x.shape) print(y.shape) print(x[:1]) print(y[:1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 0 - Inflating Dense 120-225, 0.5 Dropout, Batch Norm, Sigmoid Classification
arch_cnt = 'arch-0-3' model = Sequential() model.add( Dense(120, input_dim=15, kernel_initializer='normal', # kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add(Dense(225, input_dim=15, kernel_initializer='normal', activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=5, min_lr=0.001, verbose=1) # es_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # min_delta=0.1, # patience=7, # verbose=1, # mode='auto' # ) # 'restore_best_weights' in dir(keras.callbacks.EarlyStopping()) # FALSE = library is not up-to-date tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, es_cb, tb_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 1 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification
arch_cnt = 'arch-1' model = Sequential() model.add( Dense(225, input_dim=15, kernel_initializer='normal', # kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add(Dense(112, input_dim=15, kernel_initializer='normal', activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=7, min_lr=0.001, verbose=1) tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, tb_cb] # callbacks=[earlystop_cb, reduce_lr_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 2 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, HE Initialization
arch_cnt = 'arch-2' model = Sequential() model.add( Dense(225, input_dim=15, kernel_initializer='he_uniform', kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add(Dense(112, input_dim=15, kernel_initializer='he_uniform', activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=7, min_lr=0.001, verbose=1) tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, tb_cb] # callbacks=[earlystop_cb, reduce_lr_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, L2 = 1e^-4
arch_cnt = 'arch-3-4' model = Sequential() model.add( Dense(225, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.0001), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add( Dense(112, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.0001), # pierd 0.2 acc activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=7, min_lr=0.001, verbose=1) tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, tb_cb] # callbacks=[earlystop_cb, reduce_lr_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, L2 = 1e^-3
arch_cnt = 'arch-3-3' model = Sequential() model.add( Dense(225, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add( Dense(112, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=7, min_lr=0.001, verbose=1) tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, tb_cb] # callbacks=[earlystop_cb, reduce_lr_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, L2 = 1e^-2
arch_cnt = 'arch-3-2' model = Sequential() model.add( Dense(225, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.01), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add( Dense(112, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.01), # pierd 0.2 acc activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=7, min_lr=0.001, verbose=1) tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, tb_cb] # callbacks=[earlystop_cb, reduce_lr_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, L2 = 1e^-1
arch_cnt = 'arch-3-1' model = Sequential() model.add( Dense(225, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.1), # pierd 0.2 acc activation='relu')) model.add(Dropout(0.5)) model.add( Dense(112, input_dim=15, kernel_initializer='normal', kernel_regularizer=keras.regularizers.l2(0.1), # pierd 0.2 acc activation='relu')) # model.add(LeakyReLU(alpha=0.1)) model.add(BatchNormalization(axis = 1)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() %%time # earlystop_cb = keras.callbacks.EarlyStopping( # monitor='val_loss', # patience=5, restore_best_weights=True, # verbose=1) reduce_lr_cb = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.05, patience=7, min_lr=0.001, verbose=1) tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0, write_graph=True, write_images=True) epochs = 50 batch_size = 32 model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False, validation_data=(x_test, y_test), callbacks=[reduce_lr_cb, tb_cb] # callbacks=[earlystop_cb, reduce_lr_cb] ) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Ensemble Methods
import matplotlib.pyplot as plt %matplotlib inline
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Bagging Strategies Random Forests
from sklearn.ensemble import RandomForestClassifier # x_train, x_test, y_train, y_test clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0) clf.fit(x_train, y_train) print(clf.feature_importances_) print(clf.predict(x_test)) # make predictions for test data y_pred = clf.predict(x_test) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_test, predictions) print("Accuracy: %.2f%%" % (accuracy * 100.0))
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
ExtraTrees
from sklearn.ensemble import ExtraTreesClassifier # x_train, x_test, y_train, y_test clf = ExtraTreesClassifier(n_estimators=100, max_depth=2, random_state=0) clf.fit(x_train, y_train) print(clf.feature_importances_) print(clf.predict(x_test)) # make predictions for test data y_pred = clf.predict(x_test) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_test, predictions) print("Accuracy: %.2f%%" % (accuracy * 100.0)) fig = plt.figure(figsize=(10,5)) plot_learning_curves(x_train, y_train, x_test, y_test, clf) plt.show()
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
Stacking Strategies SuperLearner Boosting Strategies xgboost
# import xgboost as xgb from xgboost import XGBClassifier from sklearn.metrics import accuracy_score # x_train, x_test, y_train, y_test model = XGBClassifier() model.fit(x_train, y_train) print(model) # make predictions for test data y_pred = model.predict(x_test) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_test, predictions) print("Accuracy: %.2f%%" % (accuracy * 100.0))
kaggle/machine-learning-with-a-heart/Lab4.ipynb
xR86/ml-stuff
mit
When to use python? -- 50xp, Status : Earned Python is a pretty versatile language. For what applications can you use Python? Ans: All of the above Any comments? -- 100xp, Satatus : Earned We can add comments to python scripts. Comments are short snippets of plain english, to help you and others understand what the code is about. To add a comment, use '#'tag, insert it at the front of the text. Comments have idle state, i.e. they don't affect the code results. Comments are ignored by the python interpretor.
# Just testing division print(5 / 8) # Additon works too ( added comment here ) print(7 + 10)
.ipynb_checkpoints/DAT208x - Week 1 - Python Basics-checkpoint.ipynb
dataDogma/Computer-Science
gpl-3.0
Python as a calculator -- 100xp, Status : Earned Python is perfectly suited to do basic calculations. Apart from addition, subtraction, multiplication and division, there is also support for more advanced operations such as: Exponentiation:. This operator raises the number to its left to the power of the number to its right: for example 42 will give 16. Modulo: %. It returns the remainder of the division of the number to the left by the number on its right, for example 18 % 7 equals 4.
"""Suppose you have $100, which you can invest with a 10% return each year. After one year, it's 100 x 1.1 = 110 dollars, and after two years it's 100 x 1.1 x 1.1 = 121. Add code to calculate how much money you end up with after 7 years""" print(5 + 5) print(5 - 5) # Multiplication and division print(3 * 5) print(10 / 2) # Exponentiation print(4 ** 2) # Modulo print(18 % 7) # How much is your $100 worth after 7 years? # first try was unsuccesful, so used the only two things * and ** operators. print ( 100 * ( 1.1 ** 7 ) )
.ipynb_checkpoints/DAT208x - Week 1 - Python Basics-checkpoint.ipynb
dataDogma/Computer-Science
gpl-3.0
Parsevals theorem when applied to discrete Fourier Transform looks like this. $\sum {n=0}^{N-1}|x[n]|^{2}={\frac {1}{N}}\sum {k=0}^{N-1}|X[k]|^{2}$ Source: https://en.wikipedia.org/wiki/Parseval%27s_theorem
# check Parseval's theorem holds numerically nsamps=1000 # window w = signal.tukey(nsamps,0.1) a = np.random.normal(0,1,nsamps) * w A = np.fft.fft(a) b = (1/np.sqrt(2*np.pi))*(signal.gaussian(nsamps,10)) B = np.fft.fft(b) c = np.convolve(a,b,'same') C = np.fft.fft(c) # signal c is convolution of Gaussian noise (a) with a Gaussian wavelet (b) # C is the fourier transform of c. sumt = np.sum(c**2) sumf = np.sum(np.abs(C)**2)/nsamps print('time domain',sumt) print('fourier domain',sumf) print('difference',np.abs(sumt-sumf)) print('percent', (np.abs(sumt-sumf)/sumt)*100)
devel/Parseval.ipynb
JackWalpole/splitwavepy
mit
Furthermore by the convolution theorem: C = A * B. And therefore sum(C^2) = sum(A^2 * B^2)
AB = A * B ab = np.fft.ifft(AB) plt.plot(np.roll(ab,500)) plt.plot(c) sumAB = np.sum(np.abs(A**2*B**2))/nsamps print('sum A*B',sumAB) print('difference',np.abs(sumt-sumAB)) print('percent',(np.abs(sumt-sumAB)/sumt)*100)
devel/Parseval.ipynb
JackWalpole/splitwavepy
mit
Parsevals theorem as applied in Silver and Chan (and Walsh). $\sum {n=0}^{N-1}|x[n]|^{2}={\frac {1}{N}}\sum {k=1}^{N-2}|X[k]|^{2}+\frac{1}_{2}\sum|X[0,N-1]|$ Source: https://en.wikipedia.org/wiki/Parseval%27s_theorem
def ndf(y,taper=True,detrend=True): """ Uses the improvement found by Walsh et al (2013). By default will detrend data to ensure zero mean and will taper edges using a Tukey filter affecting amplitudes of 5% of data at edges """ if taper is True: y = y * signal.tukey(y.size,0.05) if detrend is True: # ensure no trend on the noise trace y = signal.detrend(y) Y = np.fft.fft(y) amp = np.absolute(Y) # estimate E2 and E4 following Walsh et al (2013) a = np.ones(Y.size) a[0] = a[-1] = 0.5 E2 = np.sum( a * amp**2) E4 = (np.sum( (4 * a**2 / 3) * amp**4)) ndf = 2 * ( 2 * E2**2 / E4 - 1 ) return ndf def ndf2(y,taper=True,detrend=True): """ """ if taper is True: y = y * signal.tukey(y.size,0.05) if detrend is True: # ensure no trend on the noise trace y = signal.detrend(y) Y = np.fft.fft(y) amp = np.absolute(Y)**2 E2 = np.sum(amp**2) E4 = (np.sum( (4/3) * amp**4)) ndf = 2 * ( 2 * E2**2 / E4 - 1 ) return ndf print(ndf(c)) print(ndf2(c)) stats.moment(c,moment=4)
devel/Parseval.ipynb
JackWalpole/splitwavepy
mit
NumPy tested with version 1.9 (1.13.1)
import numpy as np np.__version__
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
Requests tested with version 2.7 (2.18.1) Required for using the Shim interface to SciDB.
import requests requests.__version__
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
Pandas (optional) tested with version 0.15. (0.20.3) Required only for importing/exporting SciDB arrays as Pandas Dataframe objects.
import pandas as pd pd.__version__
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
SciPy (optional) tested with versions 0.10-0.12. (0.19.0) Required only for importing/exporting SciDB arrays as SciPy sparse matrices.
import scipy scipy.__version__
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
2) Importar scidbpy pip install git+http://github.com/paradigm4/scidb-py.git@devel
import scidbpy scidbpy.__version__ from scidbpy import connect
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
conectarse al servidor de Base de datos
sdb = connect('http://localhost:8080')
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
3) Leer archivo con cada una de las ondas
import urllib.request # urllib2 in python2 the lib that handles the url stuff target_url = "https://www.physionet.org/physiobank/database/mimic2wdb/matched/RECORDS-waveforms" data = urllib.request.urlopen(target_url) # it's a file like object and works just like a file lines = data.readlines(); line = str(lines[100])
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
Quitarle caracteres especiales
carpeta,onda = line.replace('b\'','').replace('\'','').replace('\\n','').split("/") onda
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
4) Importar WFDB para conectarse a physionet
import wfdb sig, fields = wfdb.srdsamp(onda,pbdir='mimic2wdb/matched/'+carpeta) #, sampfrom=11000 print(sig) print("signame: " + str(fields['signame'])) print("units: " + str(fields['units'])) print("fs: " + str(fields['fs'])) print("comments: " + str(fields['comments'])) print("fields: " + str(fields))
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
Busca la ubicacion de la señal tipo II
signalII = None try: signalII = fields['signame'].index("II") except ValueError: print("List does not contain value") if(signalII!=None): print("List contain value")
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
Normaliza la señal y le quita los valores en null
array = wfdb.processing.normalize(x=sig[:, signalII], lb=-2, ub=2) arrayNun = array[~np.isnan(array)] arrayNun = np.trim_zeros(arrayNun) arrayNun
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
Cambiar los guiones "-" por raya al piso "_" porque por algun motivo SciDB tiene problemas con estos caracteres Si el arreglo sin valores nulos no queda vacio lo sube al SciDB
ondaName = onda.replace("-", "_") if arrayNun.size>0 : sdb.input(upload_data=array).store(ondaName,gc=False) # sdb.iquery("store(input(<x:int64>[i], '{fn}', 0, '{fmt}'), "+ondaName+")", upload_data=array)
Jupyter/LoadDataMimic-II.ipynb
davidgutierrez/HeartRatePatterns
gpl-3.0
First reload the data we generated in notmist.ipynb.
pickle_file = 'notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape)
udacity_notebook/3_regularization.ipynb
ds-hwang/deeplearning_udacity
mit
Reformat into a shape that's more adapted to the models we're going to train: - data as a flat matrix, - labels as float 1-hot encodings.
image_size = 28 num_labels = 10 def reformat(dataset, labels): dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32) # Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...] labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
udacity_notebook/3_regularization.ipynb
ds-hwang/deeplearning_udacity
mit
1.1 Loading HDF5 files HDF5 files can be read using a few different classes operating on different levels. The hierarchy meaningful to the end user is in the following (from low to high), * mpes.fprocessing.File() -- local import of h5py.File(), a low-level Python HDF5 parser (wrapped over even lower C code). * mpes.fprocessing.hdf5Reader() -- built on the File() class, with the inclusion of several file structure parsing, file component readout and format conversion functions. * mpes.fprocessing.hdf5Splitter() -- built on the hdf5Reader() class, used for splitting large hdf5 files. * mpes.fprocessing.hdf5Processor() -- built on the hdf5Reader() class, with the inclusion of binning operations and io. The hierarchy goes File $\in$ hdf5Reader $\in$ (hdf5Splitter, hdf5Processor)
hdff = fp.File(fpath) hdff hdfr = fp.hdf5Reader(fpath) hdfr
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
New attributes and methods in the hdf5Reader() class
print( list(set(dir(hdfr)) - set(dir(hdff))) ) hdfp = fp.hdf5Processor(fpath) hdfp
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
New attributes and methods in the hdf5Processer() class
print( list(set(dir(hdfp)) - set(dir(hdfr))) )
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
1.2 Retrieving components from HDF5 files Reading components can also be done at different levels, the level of hdf5Reader() or above is recommended.
hdfp.summarize() print(list(hdfr.readGroup(hdfr, 'EventFormat')))
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
1.3 Converting HDF5 files Conversion of hdf5 to Matlab (mat) format (no data processing).
hdfr.convert('mat', save_addr='../data/data_131')
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
Conversion to parquet format
hdfr.convert('parquet', save_addr='../data/data_131_parquet', pq_append=False, chunksz=1e7, \ compression='gzip')
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
1.4 Splitting HDF5 files
hdfs = fp.hdf5Splitter(fpath) hdfs.split(nsplit=50, save_addr=r'../data/data_114_parts/data_114_', pbar=True)
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
1.5 Retrieve binned data from stored HDF5 file Read binned data over 3 axes
fpath_binned = r'../data/binres_114.h5' bindict = fp.readBinnedhdf5(fpath_binned, combined=True) bindict.keys()
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
Read binned data over 4 axes
fpath_binned = r'../data/data_114_4axis_binned.h5' bindict = fp.readBinnedhdf5(fpath_binned, combined=True) bindict.keys() bindict = fp.readBinnedhdf5(fpath_binned, combined=False) bindict.keys()
examples/Tutorial_01_HDF5 File Management.ipynb
RealPolitiX/mpes
mit
Time series analysis NOTE: Some of the example in this chapter have been updated to work with more recent versions of the libraries. Load the data from "Price of Weed".
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/mj-clean.csv") transactions = pd.read_csv("mj-clean.csv", parse_dates=[5]) transactions.head()
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
The following function takes a DataFrame of transactions and compute daily averages.
def GroupByDay(transactions, func=np.mean): """Groups transactions by day and compute the daily mean ppg. transactions: DataFrame of transactions returns: DataFrame of daily prices """ grouped = transactions[["date", "ppg"]].groupby("date") daily = grouped.aggregate(func) daily["date"] = daily.index start = daily.date[0] one_year = np.timedelta64(1, "Y") daily["years"] = (daily.date - start) / one_year return daily
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
The following function returns a map from quality name to a DataFrame of daily averages.
def GroupByQualityAndDay(transactions): """Divides transactions by quality and computes mean daily price. transaction: DataFrame of transactions returns: map from quality to time series of ppg """ groups = transactions.groupby("quality") dailies = {} for name, group in groups: dailies[name] = GroupByDay(group) return dailies
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
dailies is the map from quality name to DataFrame.
dailies = GroupByQualityAndDay(transactions)
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
The following plots the daily average price for each quality.
import matplotlib.pyplot as plt thinkplot.PrePlot(rows=3) for i, (name, daily) in enumerate(dailies.items()): thinkplot.SubPlot(i + 1) title = "Price per gram ($)" if i == 0 else "" thinkplot.Config(ylim=[0, 20], title=title) thinkplot.Scatter(daily.ppg, s=10, label=name) if i == 2: plt.xticks(rotation=30) thinkplot.Config() else: thinkplot.Config(xticks=[])
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
We can use statsmodels to run a linear model of price as a function of time.
import statsmodels.formula.api as smf def RunLinearModel(daily): model = smf.ols("ppg ~ years", data=daily) results = model.fit() return model, results
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
Here's what the results look like.
from IPython.display import display for name, daily in dailies.items(): model, results = RunLinearModel(daily) print(name) display(results.summary())
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
Now let's plot the fitted model with the data.
def PlotFittedValues(model, results, label=""): """Plots original data and fitted values. model: StatsModel model object results: StatsModel results object """ years = model.exog[:, 1] values = model.endog thinkplot.Scatter(years, values, s=15, label=label) thinkplot.Plot(years, results.fittedvalues, label="model", color="#ff7f00")
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
The following function plots the original data and the fitted curve.
def PlotLinearModel(daily, name): """Plots a linear fit to a sequence of prices, and the residuals. daily: DataFrame of daily prices name: string """ model, results = RunLinearModel(daily) PlotFittedValues(model, results, label=name) thinkplot.Config( title="Fitted values", xlabel="Years", xlim=[-0.1, 3.8], ylabel="Price per gram ($)", )
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
Here are results for the high quality category:
name = "high" daily = dailies[name] PlotLinearModel(daily, name)
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
Moving averages As a simple example, I'll show the rolling average of the numbers from 1 to 10.
array = np.arange(10)
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
With a "window" of size 3, we get the average of the previous 3 elements, or nan when there are fewer than 3.
series = pd.Series(array) series.rolling(3).mean()
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
The following function plots the rolling mean.
def PlotRollingMean(daily, name): """Plots rolling mean. daily: DataFrame of daily prices """ dates = pd.date_range(daily.index.min(), daily.index.max()) reindexed = daily.reindex(dates) thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.2, label=name) roll_mean = pd.Series(reindexed.ppg).rolling(30).mean() thinkplot.Plot(roll_mean, label="rolling mean", color="#ff7f00") plt.xticks(rotation=30) thinkplot.Config(ylabel="price per gram ($)")
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0
Here's what it looks like for the high quality category.
PlotRollingMean(daily, name)
code/chap12ex.ipynb
AllenDowney/ThinkStats2
gpl-3.0