markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
---|---|---|---|---|
Initial model
|
# Proposed Initial Model
xgb1 = xgb.XGBClassifier( learning_rate =0.1, n_estimators=200, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6,
colsample_bytree=0.6, reg_alpha=0, reg_lambda=1, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
#Fit the algorithm on the data
xgb1.fit(X_train, Y_train,eval_metric='merror')
#Predict training set:
predictions = xgb1.predict(X_train)
#Print model report
# Confusion Matrix
conf = confusion_matrix(Y_train, predictions)
# Print Results
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
print ("\nConfusion Matrix")
display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True)
# Print Feature Importance
feat_imp = pd.Series(xgb1.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# Cross Validation parameters
cv_folds = 10
rounds = 100
xgb_param_1 = xgb1.get_xgb_params()
xgb_param_1['num_class'] = 9
# Perform cross-validation
cvresult1 = xgb.cv(xgb_param_1, dtrain, num_boost_round=xgb_param_1['n_estimators'],
stratified = True, nfold=cv_folds, metrics='merror', early_stopping_rounds=rounds)
print ("\nCross Validation Training Report Summary")
print (cvresult1.head())
print (cvresult1.tail())
|
HouMath/Face_classification_HouMath_XGB_01.ipynb
|
esa-as/2016-ml-contest
|
apache-2.0
|
The typical range for learning rate is around 0.01~0.2, so we vary ther learning rate a bit and at the same time, scan over the number of boosted trees to fit. This will take a little bit of time to finish.
|
print("Parameter optimization")
grid_search1 = GridSearchCV(xgb1,{'learning_rate':[0.05,0.01,0.1,0.2] , 'n_estimators':[200,400,600,800]},
scoring='accuracy' , n_jobs = 4)
grid_search1.fit(X_train,Y_train)
print("Best Set of Parameters")
grid_search1.grid_scores_, grid_search1.best_params_, grid_search1.best_score_
|
HouMath/Face_classification_HouMath_XGB_01.ipynb
|
esa-as/2016-ml-contest
|
apache-2.0
|
It seems that we need to adjust the learning rate and make it smaller, which could help to reduce overfitting in my opinion. The number of boosted trees to fit also requires to be updated.
|
# Proposed Model with optimized learning rate and number of boosted trees to fit
xgb2 = xgb.XGBClassifier( learning_rate =0.01, n_estimators=400, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6,
colsample_bytree=0.6, reg_alpha=0, reg_lambda=1, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
#Fit the algorithm on the data
xgb2.fit(X_train, Y_train,eval_metric='merror')
#Predict training set:
predictions = xgb2.predict(X_train)
#Print model report
# Confusion Matrix
conf = confusion_matrix(Y_train, predictions )
# Print Results
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
# Confusion Matrix
print ("\nConfusion Matrix")
display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True)
# Print Feature Importance
feat_imp = pd.Series(xgb2.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# Cross Validation parameters
cv_folds = 10
rounds = 100
xgb_param_2 = xgb2.get_xgb_params()
xgb_param_2['num_class'] = 9
# Perform cross-validation
cvresult2 = xgb.cv(xgb_param_2, dtrain, num_boost_round=xgb_param_2['n_estimators'],
stratified = True, nfold=cv_folds, metrics='merror', early_stopping_rounds=rounds)
print ("\nCross Validation Training Report Summary")
print (cvresult2.head())
print (cvresult2.tail())
print("Parameter optimization")
grid_search2 = GridSearchCV(xgb2,{'reg_alpha':[0, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10], 'reg_lambda':[0, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10] },
scoring='accuracy' , n_jobs = 4)
grid_search2.fit(X_train,Y_train)
print("Best Set of Parameters")
grid_search2.grid_scores_, grid_search2.best_params_, grid_search2.best_score_
# Proposed Model with optimized regularization
xgb3 = xgb.XGBClassifier( learning_rate =0.01, n_estimators=400, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6,
colsample_bytree=0.6, reg_alpha=0.1, reg_lambda=0.5, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
#Fit the algorithm on the data
xgb3.fit(X_train, Y_train,eval_metric='merror')
#Predict training set:
predictions = xgb3.predict(X_train)
#Print model report
# Confusion Matrix
conf = confusion_matrix(Y_train, predictions )
# Print Results
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
# Confusion Matrix
print ("\nConfusion Matrix")
display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True)
# Print Feature Importance
feat_imp = pd.Series(xgb3.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
print("Parameter optimization")
grid_search3 = GridSearchCV(xgb3,{'max_depth':[2, 5, 8], 'gamma':[0, 1], 'subsample':[0.4, 0.6, 0.8],'colsample_bytree':[0.4, 0.6, 0.8] },
scoring='accuracy' , n_jobs = 4)
grid_search3.fit(X_train,Y_train)
print("Best Set of Parameters")
grid_search3.grid_scores_, grid_search3.best_params_, grid_search3.best_score_
# Load data
filename = '../facies_vectors.csv'
data = pd.read_csv(filename)
# Change to category data type
data['Well Name'] = data['Well Name'].astype('category')
data['Formation'] = data['Formation'].astype('category')
# Leave one well out for cross validation
well_names = data['Well Name'].unique()
f1=[]
for i in range(len(well_names)):
# Split data for training and testing
X_train = data.drop(['Facies', 'Formation','Depth'], axis = 1 )
Y_train = data['Facies' ] - 1
train_X = X_train[X_train['Well Name'] != well_names[i] ]
train_Y = Y_train[X_train['Well Name'] != well_names[i] ]
test_X = X_train[X_train['Well Name'] == well_names[i] ]
test_Y = Y_train[X_train['Well Name'] == well_names[i] ]
train_X = train_X.drop(['Well Name'], axis = 1 )
test_X = test_X.drop(['Well Name'], axis = 1 )
# Final recommended model based on the extensive parameters search
model_final = xgb.XGBClassifier( learning_rate =0.01, n_estimators=400, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6, reg_alpha=0.1, reg_lambda=0.5,
colsample_bytree=0.6, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
# Train the model based on training data
model_final.fit( train_X , train_Y , eval_metric = 'merror' )
# Predict on the test set
predictions = model_final.predict(test_X)
# Print report
print ("\n------------------------------------------------------")
print ("Validation on the leaving out well " + well_names[i])
conf = confusion_matrix( test_Y, predictions, labels = np.arange(9) )
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
print ("-F1 Score: %.6f" % ( f1_score ( test_Y , predictions , labels = np.arange(9), average = 'weighted' ) ))
f1.append(f1_score ( test_Y , predictions , labels = np.arange(9), average = 'weighted' ))
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
print ("\nConfusion Matrix Results")
from classification_utilities import display_cm, display_adj_cm
display_cm(conf, facies_labels,display_metrics=True, hide_zeros=True)
print ("\n------------------------------------------------------")
print ("Final Results")
print ("-Average F1 Score: %6f" % (sum(f1)/(1.0*len(f1))))
# Load test data
test_data = pd.read_csv('../validation_data_nofacies.csv')
test_data['Well Name'] = test_data['Well Name'].astype('category')
X_test = test_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
# Predict facies of unclassified data
Y_predicted = model_final.predict(X_test)
test_data['Facies'] = Y_predicted + 1
# Store the prediction
test_data.to_csv('Prediction1.csv')
test_data
|
HouMath/Face_classification_HouMath_XGB_01.ipynb
|
esa-as/2016-ml-contest
|
apache-2.0
|
Try it out
|
U = 4 * 10** -np.arange(11.) # generates values 4, 4e-1, 4e-2 .. 4e-10
print("{:>10s} {:>10s} {:>10s}".format('u ', 'W(u)','W1(u) '))
for u in U:
print("{0:10.1e} {1:10.4e} {2:10.4e}".format(u, W(u), W1(u)))
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
Is seems that our numerical integration is a fair approximation to four significant digits, but not better, even when computed with 1000 steps as we did. So it is relatively easy to create one's own numerically computed value of an analytical expression like the exponential integral
Theis well function as a power series
The theis well function can be expressed also as a power series. This expression has certain advanages as it gives insight into the behavior of its character and allows important simplifications and deductions.
$$ W(u) = -0.5773 - \ln(u) + u - \frac {u^2} {2 . 2!} + \frac {u^3} {3 . 3!} - \frac {u^4} {4 . 4!} + ... $$
This series too can be readily numerially comptuted by first defining a function for it. The sum will be computed in a loop. To prevent having to compute faculties, it is easiest to compute each successive term from the previous one.
So to get from term m to term n+1:
$$ \frac {u^{n+1}} {(n+1) . (n+1)!} = \frac {u^n} { n . n!} \times \frac {u \, n} {(n+1)^2} $$
This series is implemented below.
|
def W2(u):
"""Returns Theis well function computed as a power series"""
tol = 1e-5
w = -0.5772 -np.log(u) + u
a = u
for n in range(1, 100):
a = -a * u * n / (n+1)**2 # new term (next term)
w += a
if np.all(a) < tol:
return w
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
Compare the three methods of computing the well function.
|
U = 4.0 * 10** -np.arange(11.) # generates values 4, 4e-1, 4e-2 .. 4e-10
print("{:>10s} {:>10s} {:>10s} {:>10s}".format('u ', 'W(u) ','W1(u) ', 'W2(u) '))
for u in U:
print("{0:10.1e} {1:10.4e} {2:10.4e} {2:10.4e}".format(u, W(u), W1(u), W2(u)))
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
We see that all three methods yiedld the same results.
Next we show the well function as it shown in groundwater hydrology books.
|
u = np.logspace(-7, 1, 71)
import matplotlib.pylab as plt
fig1= plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set(xlabel='1/u', ylabel='W(u)', title='Theis type curve versus u', yscale='log', xscale='log')
ax1.grid(True)
ax1.plot(u, W(u), 'b', label='-expi(-u)')
#ax1.plot(u, W1(u), 'rx', label='integal') # works only for scalars
ax1.plot(u, W2(u), 'g+', label='power series')
ax1.legend(loc='best')
plt.show()
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
The curve W(u) versus u runs counter intuitively which and is, therefore, confusing. Therefore, it generally presented as W(u) versus 1/u instead as shown below
|
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set(xlabel='1/u', ylabel='W(u)', title='Theis type curve versus 1/u', yscale='log', xscale='log')
ax2.grid(True)
ax2.plot(1/u, W(u))
plt.show()
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
Now W(u) resembles the actual drawdown, which increases with time.
The reason that this is so, becomes clear from the fact that
$$ u = \frac {r^2 S} {4 kD t} $$
and that
$$ \frac 1 u = \frac {4 kDt} {r^2 S} = \frac {4 kD} S \frac t {r^2} $$
which shows that $\frac 1 u$ increases with time, so that the values of $\frac 1 u$ on the $\frac 1 u$ axis are propotional with time and so the drawdown, i.e., the well function $W(u)$ increases with $\frac 1 u$, which is less confusing.
The graph of $W(u)$ versus $\frac 1 u$ is called the Theis type curve. It's vertical axis is proportional to the drawdown and its horizontal axis proportional to time.
The same curve is shown below but now on linear vertical scale and a logarithmic horizontal scale. The vertical scale was reversed (see values on y-axis) to obtain a curve that illustrates the decline of groundwater head with time caused by the extraction. This way of presending is probably least confusing when reading the curve.
|
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set(xlabel='1/u', ylabel='W(u)', title='Theis type curve versus 1/u', yscale='linear', xscale='log')
ax2.grid(True)
ax2.plot(1/u, W(u))
ax2.invert_yaxis()
plt.show()
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
Logarithmic approximaion of the Theis type curve
We see that after some time, the drawdown is linear when only the time-axis is logarithmic. This suggests that a logarithmic approximation of time-drawdown curve is accurate after some time.
That this is indeed the case can be deduced from the power series description of the type curve:
$$ W(u) = -0.5773 - \ln(u) + u - \frac {u^2} {2 . 2!} + \frac {u^3} {3 . 3!} - \frac {u^4} {4 . 4!} + ... $$
It is clear that all terms to the right of u will be smaller than u when $u<1$. Hence when u is so small that it can be neglected relative to $\ln(u)$, then all the terms to the right of $\ln(u)$ can be neglected. Therefore we have the following spproximation
$$ W(u) \approx -0.5772 -\ln(u) + O(u) $$
for
$$ -\ln(u)>>u \,\,\,\rightarrow \,\,\, \ln(u)<<-u \,\,\, \rightarrow \,\,\, u<<e^{-u} \, \approx \,1 $$
which is practically the case for $u<0.01$, as can also be seen in the graph for $1/u = 10^2 $. From the graph one may conclude that even for 1/u>10 or u<0.1, the logarithmic type curve is straight and therefore can be accurately computed using a logarithmic approximation of the type curve.
Below the error between the full Theis curve $W(u)$ and the approximation $Wu(u) = -0.5772 - \ln(u)$ are computed and shown. This reveals that at $u=0.01$ the error is 5.4% and at $u=0.001$ it has come down to only 0.2%.
|
U = np.logspace(-2, 0, 21)
Wa = lambda u : -0.5772 - np.log(u)
print("{:>12s} {:>12s} {:>12s} {:>12s}".format('u','W(u)','Wa(u)','1-Wa(u)/W(u)'))
print("{:>12s} {:>12s} {:>12s} {:>12s}".format(' ',' ',' ','the error'))
for u in U:
print("{:12.3g} {:12.3g} {:12.3g} {:12.1%}".format(u, W(u), Wa(u), 1-Wa(u)/W(u)))
U = np.logspace(-7, 1, 81)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set(xlabel='1/u', ylabel='W(u)', title='Theis type curve and its logarithmic approximation', yscale='linear', xscale='log')
ax.grid(True)
ax.plot(1/U, W(U), 'b', linewidth = 2., label='Theis type curve')
ax.plot(1/U, Wa(U), 'r', linewidth = 0.25, label='log approximation')
ax.invert_yaxis()
plt.legend(loc='best')
plt.show()
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
Hence, in any practical situation, the logarithmic approximation is accurate enough when $u<0.01$.
The approximatin of the Theis type curve can no be elaborated:
$$ Wa (u) \approx -0.5772 - \ln(u) = \ln(e^{-0.5772}) - \ln(u) = \ln(0.5615) - \ln(u) = \ln \frac {0.5615} {u} $$
Because $u = \frac {r^2 S} {4 kD t}$ we have, with 4\times 0.5615 \approx 2.25
$$ W(u) \approx \ln \frac {2.25 kD t} {r^2 S} $$
and so the drawdown approximation becomes
$$ s \approx \frac Q {4 \pi kD} \ln \frac {2.25 kD t} {r^2 S} $$
The condition u<0.1 can be translated to $\frac {r^2 S} {4 kD t} < 0.1$ or
$$\frac t {r^2} > 2.5 \frac {S} {kD}$$
Radius of influence
The previous logarithmic drawdown type curve versus $1/u$ can be seen an image of the drawdown for a fixed distance and varying time. This is because $1/u$ is proportional to the real time. On the other hand, the drawdown type curve versus u may be regarded as the drawdown at a fixed time for varying distance. This follows from
s versus u is
$$ W(u)\approx \ln \frac {2.25 kD t} { r^2 S} \,\,\,\, versus\,\,\,\, u = \ln \frac {r^2 S} {4 kD t} = 2 \ln \left( \frac {S} {4 kD t} r\right) $$
That is, proportional r on log scale. The plot reveals this:
|
U = np.logspace(-7, 1, 81)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set(xlabel='u', ylabel='W(u)', title='Theis type curve and its logarithmic approximation', yscale='linear', xscale='log')
ax.grid(True)
ax.plot(U, W(U), 'b', linewidth = 2., label='Theis type curve')
ax.plot(U, Wa(U), 'r', linewidth = 0.25, label='log approximation')
ax.invert_yaxis()
plt.legend(loc='best')
plt.show()
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
This shows that the radius of influence is limited. We can now approximate this radius of influence by saying that the radius is where the appoximated Theis curve, that is the straight red line in the graph intersects the zero drawdown, i.e. $W(u) = 0$.
Hence, for the radius of influence, R, we have
$$ \ln \frac {2.25 kD t} {R^2 S} = 0 $$
impying that
$$ \frac {2.25 kD t } { R^2 S } = 1 $$
$$ R =\sqrt { \frac {2.25 kD t} D} $$
with R the radius of influence. Computing the radius of influence is an easy way to determine how far out the drawdown affects the groundwater heads.
Pumping test
Introduction
Below are the data given that were obtained from a pumping test carried out on the site "Oude Korendijk" south of Rotterdam in the Netherlands (See Kruseman and De Ridder, p56, 59). The piezometers are all open at 20 m below ground surface. The groundwater head is shallow, within a m from ground surface. The first18 m below ground surface consist of clay,peat and clayey fine sand. These layers form a practially impermeable confining unit. Below this, between 18 and25 m below ground surface are 7 m of sand an some gravel, that form the aquifer. Fine sandy and clayey sediments thereunder from the base of the aquifer, which is considered impermeable.
Piezometers wer installed at 30, 90 and 215 m from the well, open at 20 m below ground surface. The well has its screen installed over the whole thickness of the aquifer. We consider the aquifer as confined with no leakage. But we should look with a critical eye that the drawdown curves to verify to what extent this assumption holds true.
The drawdown data for the three piezometers is given below. The first column is time after the start of the pump in minutes; the second column is the drawdown in m.
The well extracts 788 m3/d
The objective of the pumping test is to determine the properties kD and S of the aquifer.
The data:
|
# t[min], s[m]
H30 = [ [0.0, 0.0],
[0.1, 0.04],
[0.25, 0.08],
[0.50, 0.13],
[0.70, 0.18],
[1.00, 0.23],
[1.40, 0.28],
[1.90, 0.33],
[2.33, 0.36],
[2.80, 0.39],
[3.36, 0.42],
[4.00, 0.45],
[5.35, 0.50],
[6.80, 0.54],
[8.30, 0.57],
[8.70, 0.58],
[10.0, 0.60],
[13.1, 0.64]]
# t[min], s[m]
H90= [[0.0, 0.0],
[1.5, 0.015],
[2.0, 0.021],
[2.16, 0.23],
[2.66, 0.044],
[3.00, 0.054],
[3.50, 0.075],
[4.00, 0.090],
[4.33, 0.104],
[5.50, 0.133],
[6.0, 0.154],
[7.5, 0.178],
[9.0, 0.206],
[13.0, 0.250],
[15.0, 0.275],
[18.0, 0.305],
[25.0, 0.348],
[30.0, 0.364]]
# t[min], s[m]
H215=[[0.0, 0.0],
[66.0, 0.089],
[127., 0.138],
[185., 0.165],
[251., 0.186]]
|
exercises_notebooks/TheisWellFunction.ipynb
|
Olsthoorn/TransientGroundwaterFlow
|
gpl-3.0
|
Configure GCP environment settings
Update the following variables to reflect the values for your GCP environment:
PROJECT_ID: The ID of the Google Cloud project you are using to implement this solution.
PROJECT_NUMBER: The number of the Google Cloud project you are using to implement this solution. You can find this in the Project info card on the project dashboard page.
BUCKET: The name of the Cloud Storage bucket you created to use with this solution. The BUCKET value should be just the bucket name, so myBucket rather than gs://myBucket.
REGION: The region to use for the AI Platform Prediction job.
|
PROJECT_ID = 'yourProject' # Change to your project.
PROJECT_NUMBER = 'yourProjectNumber' # Change to your project number
BUCKET = 'yourBucketName' # Change to the bucket you created.
REGION = 'yourPredictionRegion' # Change to your AI Platform Prediction region.
ARTIFACTS_REPOSITORY_NAME = 'ml-serving'
EMBEDDNIG_LOOKUP_MODEL_OUTPUT_DIR = f'gs://{BUCKET}/bqml/embedding_lookup_model'
EMBEDDNIG_LOOKUP_MODEL_NAME = 'item_embedding_lookup'
EMBEDDNIG_LOOKUP_MODEL_VERSION = 'v1'
INDEX_DIR = f'gs://{BUCKET}/bqml/scann_index'
SCANN_MODEL_NAME = 'index_server'
SCANN_MODEL_VERSION = 'v1'
KIND = 'song'
!gcloud config set project $PROJECT_ID
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Authenticate your GCP account
This is required if you run the notebook in Colab. If you use an AI Platform notebook, you should already be authenticated.
|
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Deploy the embedding lookup model to AI Platform Prediction
Create the embedding lookup model resource in AI Platform:
|
!gcloud ai-platform models create {EMBEDDNIG_LOOKUP_MODEL_NAME} --region={REGION}
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Next, deploy the model:
|
!gcloud ai-platform versions create {EMBEDDNIG_LOOKUP_MODEL_VERSION} \
--region={REGION} \
--model={EMBEDDNIG_LOOKUP_MODEL_NAME} \
--origin={EMBEDDNIG_LOOKUP_MODEL_OUTPUT_DIR} \
--runtime-version=2.2 \
--framework=TensorFlow \
--python-version=3.7 \
--machine-type=n1-standard-2
print("The model version is deployed to AI Platform Prediction.")
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Once the model is deployed, you can verify it in the AI Platform console.
Test the deployed embedding lookup AI Platform Prediction model
Set the AI Platform Prediction API information:
|
import googleapiclient.discovery
from google.api_core.client_options import ClientOptions
api_endpoint = f'https://{REGION}-ml.googleapis.com'
client_options = ClientOptions(api_endpoint=api_endpoint)
service = googleapiclient.discovery.build(
serviceName='ml', version='v1', client_options=client_options)
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Run the caip_embedding_lookup method to retrieve item embeddings. This method accepts item IDs, calls the embedding lookup model in AI Platform Prediction, and returns the appropriate embedding vectors.
|
def caip_embedding_lookup(input_items):
request_body = {'instances': input_items}
service_name = f'projects/{PROJECT_ID}/models/{EMBEDDNIG_LOOKUP_MODEL_NAME}/versions/{EMBEDDNIG_LOOKUP_MODEL_VERSION}'
print(f'Calling : {service_name}')
response = service.projects().predict(
name=service_name, body=request_body).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Test the caip_embedding_lookup method with three item IDs:
|
input_items = ['2114406', '2114402 2120788', 'abc123']
embeddings = caip_embedding_lookup(input_items)
print(f'Embeddings retrieved: {len(embeddings)}')
for idx, embedding in enumerate(embeddings):
print(f'{input_items[idx]}: {embedding[:5]}')
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
ScaNN matching service
The ScaNN matching service performs the following steps:
Receives one or more item IDs from the client.
Calls the embedding lookup model to fetch the embedding vectors of those item IDs.
Uses these embedding vectors to query the ANN index to find approximate nearest neighbor embedding vectors.
Maps the approximate nearest neighbors embedding vectors to their corresponding item IDs.
Sends the item IDs back to the client.
When the client receives the item IDs of the matches, the song title and artist information is fetched from Datastore in real-time to be displayed and served to the client application.
Note: In practice, recommendation systems combine matches (from one or more indices) with user-provided filtering clauses (like where price <= value and colour =red), as well as other item metadata (like item categories, popularity, and recency) to ensure recommendation freshness and diversity. In addition, ranking is commonly applied after generating the matches to decide the order in which they are served to the user.
ScaNN matching service implementation
The ScaNN matching service is implemented as a Flask application that runs on a gunicorn web server. This application is implemented in the main.py module.
The ScaNN matching service application works as follows:
Uses environmental variables to set configuration information, such as the Google Cloud location of the ScaNN index to load.
Loads the ScaNN index as the ScaNNMatcher object is initiated.
As required by AI Platform Prediction, exposes two HTTP endpoints:
health: a GET method to which AI Platform Prediction sends health checks.
predict: a POST method to which AI Platform Prediction forwards prediction requests.
The predict method expects JSON requests in the form {"instances":[{"query": "item123", "show": 10}]}, where query represents the item ID to retrieve matches for, and show represents the number of matches to retrieve.
The predict method works as follows:
1. Validates the received request object.
1. Extracts the `query` and `show` values from the request object.
1. Calls `embedding_lookup.lookup` with the given query item ID to get its embedding vector from the embedding lookup model.
1. Calls `scann_matcher.match` with the query item embedding vector to retrieve its approximate nearest neighbor item IDs from the ANN Index.
The list of matching item IDs are put into JSON format and returned as the response of the predict method.
Deploy the ScaNN matching service to AI Platform Prediction
Package the ScaNN matching service application in a custom container and deploy it to AI Platform Prediction.
Create an Artifact Registry for the Docker container image
|
!gcloud beta artifacts repositories create {ARTIFACTS_REPOSITORY_NAME} \
--location={REGION} \
--repository-format=docker
!gcloud beta auth configure-docker {REGION}-docker.pkg.dev --quiet
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Use Cloud Build to build the Docker container image
The container runs the gunicorn HTTP web server and executes the Flask app variable defined in the main.py module.
The container image to deploy to AI Platform Prediction is defined in a Dockerfile, as shown in the following code snippet:
```
FROM python:3.8-slim
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . ./
ARG PORT
ENV PORT=$PORT
CMD exec gunicorn --bind :$PORT main:app --workers=1 --threads 8 --timeout 1800
```
Build the container image by using Cloud Build and specifying the cloudbuild.yaml file:
|
IMAGE_URL = f'{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACTS_REPOSITORY_NAME}/{SCANN_MODEL_NAME}:{SCANN_MODEL_VERSION}'
PORT=5001
SUBSTITUTIONS = ''
SUBSTITUTIONS += f'_IMAGE_URL={IMAGE_URL},'
SUBSTITUTIONS += f'_PORT={PORT}'
!gcloud builds submit --config=index_server/cloudbuild.yaml \
--substitutions={SUBSTITUTIONS} \
--timeout=1h
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Run the following command to verify the container image has been built:
|
repository_id = f'{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACTS_REPOSITORY_NAME}'
!gcloud beta artifacts docker images list {repository_id}
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Create a service account for AI Platform Prediction
Create a service account to run the custom container. This is required in cases where you want to grant specific permissions to the service account.
|
SERVICE_ACCOUNT_NAME = 'caip-serving'
SERVICE_ACCOUNT_EMAIL = f'{SERVICE_ACCOUNT_NAME}@{PROJECT_ID}.iam.gserviceaccount.com'
!gcloud iam service-accounts create {SERVICE_ACCOUNT_NAME} \
--description="Service account for AI Platform Prediction to access cloud resources."
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Grant the Cloud ML Engine (AI Platform) service account the iam.serviceAccountAdmin privilege, and grant the caip-serving service account the privileges required by the ScaNN matching service, which are storage.objectViewer and ml.developer.
|
!gcloud projects describe {PROJECT_ID} --format="value(projectNumber)"
!gcloud projects add-iam-policy-binding {PROJECT_ID} \
--role=roles/iam.serviceAccountAdmin \
--member=serviceAccount:service-{PROJECT_NUMBER}@cloud-ml.google.com.iam.gserviceaccount.com
!gcloud projects add-iam-policy-binding {PROJECT_ID} \
--role=roles/storage.objectViewer \
--member=serviceAccount:{SERVICE_ACCOUNT_EMAIL}
!gcloud projects add-iam-policy-binding {PROJECT_ID} \
--role=roles/ml.developer \
--member=serviceAccount:{SERVICE_ACCOUNT_EMAIL}
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Deploy the custom container to AI Platform Prediction
Create the ANN index model resource in AI Platform:
|
!gcloud ai-platform models create {SCANN_MODEL_NAME} --region={REGION}
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Deploy the custom container to AI Platform prediction. Note that you use the env-vars parameter to pass environmental variables to the Flask application in the container.
|
HEALTH_ROUTE=f'/v1/models/{SCANN_MODEL_NAME}/versions/{SCANN_MODEL_VERSION}'
PREDICT_ROUTE=f'/v1/models/{SCANN_MODEL_NAME}/versions/{SCANN_MODEL_VERSION}:predict'
ENV_VARIABLES = f'PROJECT_ID={PROJECT_ID},'
ENV_VARIABLES += f'REGION={REGION},'
ENV_VARIABLES += f'INDEX_DIR={INDEX_DIR},'
ENV_VARIABLES += f'EMBEDDNIG_LOOKUP_MODEL_NAME={EMBEDDNIG_LOOKUP_MODEL_NAME},'
ENV_VARIABLES += f'EMBEDDNIG_LOOKUP_MODEL_VERSION={EMBEDDNIG_LOOKUP_MODEL_VERSION}'
!gcloud beta ai-platform versions create {SCANN_MODEL_VERSION} \
--region={REGION} \
--model={SCANN_MODEL_NAME} \
--image={IMAGE_URL} \
--ports={PORT} \
--predict-route={PREDICT_ROUTE} \
--health-route={HEALTH_ROUTE} \
--machine-type=n1-standard-4 \
--env-vars={ENV_VARIABLES} \
--service-account={SERVICE_ACCOUNT_EMAIL}
print("The model version is deployed to AI Platform Prediction.")
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Test the Deployed ScaNN Index Service
After deploying the custom container, test it by running the caip_scann_match method. This method accepts the parameter query_items, whose value is converted into a space-separated string of item IDs and treated as a single query. That is, a single embedding vector is retrieved from the embedding lookup model, and similar item IDs are retrieved from the ScaNN index given this embedding vector.
|
from google.cloud import datastore
import requests
client = datastore.Client(PROJECT_ID)
def caip_scann_match(query_items, show=10):
request_body = {
'instances': [{
'query':' '.join(query_items),
'show':show
}]
}
service_name = f'projects/{PROJECT_ID}/models/{SCANN_MODEL_NAME}/versions/{SCANN_MODEL_VERSION}'
print(f'Calling: {service_name}')
response = service.projects().predict(
name=service_name, body=request_body).execute()
if 'error' in response:
raise RuntimeError(response['error'])
match_tokens = response['predictions']
keys = [client.key(KIND, int(key)) for key in match_tokens]
items = client.get_multi(keys)
return items
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Call the caip_scann_match method with five item IDs and request five match items for each:
|
songs = {
'2120788': 'Limp Bizkit: My Way',
'1086322': 'Jacques Brel: Ne Me Quitte Pas',
'833391': 'Ricky Martin: Livin\' la Vida Loca',
'1579481': 'Dr. Dre: The Next Episode',
'2954929': 'Black Sabbath: Iron Man'
}
for item_Id, desc in songs.items():
print(desc)
print("==================")
similar_items = caip_scann_match([item_Id], 5)
for similar_item in similar_items:
print(f'- {similar_item["artist"]}: {similar_item["track_title"]}')
print()
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
(Optional) Deploy the matrix factorization model to AI Platform Prediction
Optionally, you can deploy the matrix factorization model in order to perform exact item matching. The model takes Item1_Id as an input and outputs the top 50 recommended item2_Ids.
Exact matching returns better results, but takes significantly longer than approximate nearest neighbor matching. You might want to use exact item matching in cases where you are working with a very small data set and where latency isn't a primary concern.
Export the model from BigQuery ML to Cloud Storage as a SavedModel
|
BQ_DATASET_NAME = 'recommendations'
BQML_MODEL_NAME = 'item_matching_model'
BQML_MODEL_VERSION = 'v1'
BQML_MODEL_OUTPUT_DIR = f'gs://{BUCKET}/bqml/item_matching_model'
!bq --quiet extract -m {BQ_DATASET_NAME}.{BQML_MODEL_NAME} {BQML_MODEL_OUTPUT_DIR}
!saved_model_cli show --dir {BQML_MODEL_OUTPUT_DIR} --tag_set serve --signature_def serving_default
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Deploy the exact matching model to AI Platform Prediction
|
!gcloud ai-platform models create {BQML_MODEL_NAME} --region={REGION}
!gcloud ai-platform versions create {BQML_MODEL_VERSION} \
--region={REGION} \
--model={BQML_MODEL_NAME} \
--origin={BQML_MODEL_OUTPUT_DIR} \
--runtime-version=2.2 \
--framework=TensorFlow \
--python-version=3.7 \
--machine-type=n1-standard-2
print("The model version is deployed to AI Platform Predicton.")
def caip_bqml_matching(input_items, show):
request_body = {'instances': input_items}
service_name = f'projects/{PROJECT_ID}/models/{BQML_MODEL_NAME}/versions/{BQML_MODEL_VERSION}'
print(f'Calling : {service_name}')
response = service.projects().predict(
name=service_name, body=request_body).execute()
if 'error' in response:
raise RuntimeError(response['error'])
match_tokens = response['predictions'][0]["predicted_item2_Id"][:show]
keys = [client.key(KIND, int(key)) for key in match_tokens]
items = client.get_multi(keys)
return items
return response['predictions']
for item_Id, desc in songs.items():
print(desc)
print("==================")
similar_items = caip_bqml_matching([int(item_Id)], 5)
for similar_item in similar_items:
print(f'- {similar_item["artist"]}: {similar_item["track_title"]}')
print()
|
retail/recommendation-system/bqml-scann/05_deploy_lookup_and_scann_caip.ipynb
|
GoogleCloudPlatform/analytics-componentized-patterns
|
apache-2.0
|
Creating Series
A Series can be created and initialised by passing either a scalar value, a NumPy nd array, a Python list or a Python Dict as the data parameter of the Series constructor.
|
# create one item series
s1 = pd.Series(1)
s1
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
'0' is the index and '1' is the value. The data type (dtype) is also shown. We can also retrieve the value using the associated index.
|
# get value with label 0
s1[0]
# create from list
s2 = pd.Series([1,2,3,4,5])
s2
# get the values in the series
s2.values
# get the index of the series
s2.index
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Creating Series with named index
Pandas will create different index types based on the type of data identified in the index parameter. These different index types are optimized to perform indexing operations for that specific data type. To specify the index at the time of creation of the Series, use the index parameter of the constructor.
|
# explicitly create an index
# index is alpha, not an integer
s3 = pd.Series([1,2,3], index=['a','b','c'])
s3
s3.index
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Please note the type of the index items. It is not string but 'object'.
|
# look up by label value and not object position
s3['b']
# position also works
s3[2]
# create series from an existing index
# scalar value will be copied at each index label
s4 = pd.Series(2,index=s2.index)
s4
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
It is a common practice to initialize the Series objects using NumPy ndarrays, and with various NumPy functions that create arrays. The following code creates a Series from five normally distributed values:
|
np.random.seed(123456)
pd.Series(np.random.randn(5))
# 0 through 9
pd.Series(np.linspace(0,9,10))
# o through 8
pd.Series(np.arange(0,9))
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
A Series can also be created from a Python dictionary. The keys of the dictionary are used as the index lables for the Series:
|
s6 = pd.Series({'a':1,'b':2,'c':3,'d':4})
s6
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Size, Shape, Count and Uniqueness of Values
|
# example series which also contains a NaN
s = pd.Series([0,1,1,2,3,4,5,6,7,np.NaN])
s
# length of the Series
len(s)
s.size
# shape is a tuple with one value
s.shape
# number of values not part of NaN can be found using count() method
s.count()
# all unique values
s.unique()
# count of non-NaN values, returned max to min order
s.value_counts()
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Peeking at data with heads, tails and take
pandas provides the .head() and .tail() methods to examine just the first few or last records in a Series. By default, these return the first five or last rows respectively, but you can use the n parameter or just pass an integer to specify the number of rows:
|
# first five
s.head()
# first three
s.head(3)
# last five
s.tail()
# last 2
s.tail(n=2) # equivalent to s.tail(2)
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
The .take() method will return the rows in a series that correspond to the zero-based positions specified in a list:
|
# only take specific items
s.take([0,3,9])
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Looking up values in Series
Values in a Series object can be retrieved using the [] operator and passing either a single index label or a list of index labels.
|
# single item lookup
s3['a']
# lookup by position since index is not an integer
s3[2]
# multiple items
s3[['a','c']]
# series with an integer index but not starting with 0
s5 = pd.Series([1,2,3], index =[11,12,13])
s5[12] # by value as value passed and index are both integer
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
To alleviate the potential confusion in determining the label-based lookups versus position-based lookups, index based lookup can be enforced using the .loc[] accessor:
|
# force lookup by index label
s5.loc[12]
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Lookup by position can be enforced using the iloc[] accessor:
|
# force lookup by position or location
s5.iloc[1]
# multiple items by index label
s5.loc[[12,10]]
# multiple items by position or location
s5.iloc[[1,2]]
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
If a location / position passed to .iloc[] in a list is out of bounds, an exception will be thrown. This is different than with .loc[], which if passed a label that does not exist, will return NaN as the value for that label:
|
s5.loc[[12,-1,15]]
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
A Series also has a property .ix that can be used to look up items either by label or by zero-based array position.
|
s3
# label based lookup
s3.ix[['a','b']]
# position based lookup
s3.ix[[1,2]]
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
This can become complicated if the indexes are integers and you pass a list of integers to ix. Since they are of the same type, the lookup will be by index label instead of position:
|
# this looks by label and not position
# note that 1,2 have NaN as those labels do not exist in the index
s5.ix[[1,2,10,11]]
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Alignment via index labels
A fundamental difference between a NumPy ndarray and a pandas Series is the ability of a Series to automatically align data from another Series based on label values before performing an operation.
|
s6 = pd.Series([1,2,3,4], index=['a','b','c','d'])
s6
s7 = pd.Series([4,3,2,1], index=['d','c','b','a'])
s7
s6 + s7
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
This is a very different result that what it would have been if it were two pure NumPy arrays being added. A NumPy ndarray would add the items in identical positions of each array resulting in different values.
|
a1 = np.array([1,2,3,4,5])
a2 = np.array([5,4,3,2,1])
a1 + a2
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
The process of adding two Series objects differs from the process of addition of arrays as it first aligns data based on index label values instead of simply applying the operation to elements in the same position. This becomes significantly powerful when using pandas Series to combine data based on labels instead of having to first order the data manually.
Arithmetic Operations
Arithemetic Operations <pre>(+,-,*,/)</pre> can be applied either to a Series or between 2 Series objects
|
# multiply all values in s3 by 2
s3 * 2
# scalar series using the s3's index
# not efficient as it will no use vectorisation
t = pd.Series(2,s3.index)
s3 * t
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
To reinforce the point that alignment is being performed when applying arithmetic operations across two Series objects, look at the following two Series as examples:
|
# we will add this to s9
s8 = pd.Series({'a':1,'b':2,'c':3,'d':5})
s8
s9 = pd.Series({'b':6,'c':7,'d':9,'e':10})
s9
# NaN's result for a and e demonstrates alignment
s8 + s9
s10 = pd.Series([1.0,2.0,3.0],index=['a','a','b'])
s10
s11 = pd.Series([4.0,5.0,6.0], index=['a','a','c'])
s11
# will result in four 'a' index labels
s10 + s11
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
The reason for the above result is that during alignment, pandas actually performs a cartesian product of the sets of all the unique index labels in both Series objects, and then applies the specified operation on all items in the products.
To explain why there are four 'a' index values s10 contains two 'a' labels and s11 also contains two 'a' labels. Every combination of 'a' labels in each will be calculated resulting in four 'a' labels. There is one 'b' label from s10 and one 'c' label from s11. Since there is no matching label for either in the other Series object, they only result in a sing row in the resulting Series object.
Each combination of values for 'a' in both Series are computed, resulting in the four values: 1+4,1+5,2+4 and 2+5.
So remember that an index can have duplicate labels, and during alignment this will result in a number of index labels equivalent to the products of the number of the labels in each Series.
The special case of Not-A-Number (NaN)
pandas mathematical operators and functions handle NaN in a special manner (compared to NumPy ndarray) that does not break the computations. pandas is lenient with missing data assuming that it is a common situation.
|
nda = np.array([1,2,3,4,5])
nda.mean()
# mean of numpy array values with a NaN
nda = np.array([1,2,3,4,np.NaN])
nda.mean()
# Series object ignores NaN values - does not get factored
s = pd.Series(nda)
s.mean()
# handle NaN values like Numpy
s.mean(skipna=False)
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Boolean selection
Items in a Series can be selected, based on the value instead of index labels, via the utilization of a Boolean selection.
|
# which rows have values that are > 5
s = pd.Series(np.arange(0,10))
s > 5
# select rows where values are > 5
# overloading the Series object [] operator
logicalResults = s > 5
s[logicalResults]
# a little shorter version
s[s > 5]
# using & operator
s[(s>5)&(s<9)]
# using | operator
s[(s > 3) | (s < 5)]
# are all items >= 0?
(s >=0).all()
# are any items < 2
s[s < 2].any()
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
The result of these logical expressions is a Boolean selection, a Series of True and False values. The .sum() method of a Series, when given a series of Boolean values, will treat True as 1 and False as 0. The following demonstrates using this to determine the number of items in a Series that satisfy a given expression:
|
(s < 2).sum()
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Reindexing a Series
Reindexing in pandas is a process that makes the data in a Series or DataFrame match a given set of labels.
This process of performing a reindex includes the following steps:
1. Reordering existing data to match a set of labels.
2. Inserting NaN markers where no data exists for a label.
3. Possibly, filling missing data for a label using some type of logic
|
# sample series of five items
s = pd.Series(np.random.randn(5))
s
# change the index
s.index = ['a','b','c','d','e']
s
# concat copies index values verbatim
# potentially making duplicates
np.random.seed(123456)
s1 = pd.Series(np.random.randn(3))
s2 = pd.Series(np.random.randn(3))
combined = pd.concat([s1,s2])
combined
# reset the index
combined.index = np.arange(0,len(combined))
combined
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Greater flexibility in creating a new index is provided using the .reindex() method. An example of the flexibility of .reindex() over assigning the .index property directly is that the list provided to .reindex() can be of a different length than the number of rows in the Series:
|
np.random.seed(123456)
s1 = pd.Series(np.random.randn(4),['a','b','c','d'])
# reindex with different number of labels
# results in dropped rows and/or NaN's
s2 = s1.reindex(['a','c','g'])
s2
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
There are several things here that are important to point out about .reindex() method.
First is that the result of .reindex() method is a new Series. This new Series has an index with labels that are provided as parameter to reindex().
For each item in the given parameter list, if the original Series contains that label, then the value is assigned to that label.
If that label does not exist in the original Series, pandas assigns a NaN value.
Rows in the Series without a label specified in the parameter of .reindex() is not included in the result.
To demonstrate that the result of .reindex() is a new Series object, changing a value in s2 does not change the values in s1:
|
# s2 is a different series than s1
s2['a'] = 0
s2
# this did not modify s1
s1
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Reindex is also useful when you want to align two Series to perform an operation on matching elements from each series; however, for some reason, the two Series has index labels that will not initially align.
|
# different types for the same values of labels causes big issue
s1 = pd.Series([0,1,2],index=[0,1,2])
s2 = pd.Series([3,4,5],index=['0','1','2'])
s1 + s2
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
The reason why this happens in pandas are as follows:
pandas first tries to align by the indexes and finds no matches, so it copies the index labels from the first series and tries to append the indexes from the second Series.
However, since they are different type, it defaults back to zero-based integer sequence that results in duplicate values.
Finally, all values are NaN because the operation tries to add the item in the first Series with the integer label 0, which has a value of 0, but can't find the item in the other series and therefore the result in NaN.
|
# reindex by casting the label types and we will get the desired result
s2.index = s2.index.values.astype(int)
s1 + s2
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
The default action of inserting NaN as a missing value during reindexing can be changed by using the fill_value parameter of the method.
|
# fill with 0 instead on NaN
s2 = s.copy()
s2.reindex(['a','f'],fill_value=0)
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
When performing a reindex on ordered data such as a time series, it is possible to perform interpolation or filling of values. The following example demonstrates forward filling, often referred to as "last known value".
|
# create example to demonstrate fills
s3 = pd.Series(['red','green','blue'],index=[0,3,5])
s3
# forward fill using ffill method
s3.reindex(np.arange(0,7), method='ffill')
# backward fill using bfill method
s3.reindex(np.arange(0,7),method='bfill')
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Modifying a Series in-place
There are several ways that an existing Series can be modified in-place having either its values changed or having rows added or deleted.
A new item can be added to a Series by assigning a value to an index label that does not already exist.
|
np.random.seed(123456)
s = pd.Series(np.random.randn(3),index=['a','b','c'])
s
# change a value in the Series
# this done in-place
# a new Series is not returned that has a modified value
s['d'] = 100
s
# value at a specific index label can be changed by assignment:
s['d'] = -100
s
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Items can be removed from a Series using the del() function and passing the index label(s) to be removed.
|
del(s['a'])
s
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Slicing a Series
|
# a series to use for slicing
# using index labels not starting at 0 to demonstrate
# position based slicing
s = pd.Series(np.arange(100,110),index=np.arange(10,20))
s
# items at position 0,2,4
s[0:6:2]
# equivalent to
s.iloc[[0,2,4]]
# first five by slicing, same as .head(5)
s[:5]
# fourth position to the end
s[4:]
# every other item in the first five positions
s[:5:2]
# every other item starting at the fourth position
s[4::2]
# reverse the series
s[::-1]
# every other starting at position 4, in reverse
s[4::-2]
# :-2 which means positions 0 through (10-2) which is [8]
s[:-2]
# last 3 items
# equivalent to tail(3)
s[-3:]
# equivalent to s.tail(4).head(3)
s[-4:-1]
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
An important thing to keep in mind when using slicing, is that the result of the slice is actually a view into the original Series. Modification of values through the result of the slice will modify the original Series.
|
# preserve s
# slice with first 2 rows
copy = s.copy()
slice = copy[:2]
slice
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Now the assignment of a value to an element of a slice will change the value in the original Series:
|
slice[11] = 1000
copy
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Slicing can be performed on Series objects with a non-integer index.
|
# used to demonstrate the next two slices
s = pd.Series(np.arange(0,5),index=['a','b','c','d','e'])
s
# slicing with integer values will extract items based on position:
s[1:3]
# with non-integer index, it is also possible to slice with values in the same type of the index:
s['b':'d']
|
pandas/01.Pandas - Series Object.ipynb
|
vravishankar/Jupyter-Books
|
mit
|
Load in the SIF file for Pathway Commons, using pandas.read_csv and specifying the three column names species1, interaction_type, and species2:
|
sif_data = pandas.read_csv("shared/pathway_commons.sif",
sep="\t", names=["species1","interaction_type","species2"])
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Subset the data frame to include only rows for which the interaction_type column contains the string controls-expression-of; subset columns to include only columns species1 and species2 using the [ operator and the list ["species1","species2"]; and eliminate redundant edges in the edge-list using the drop_duplicates method.
|
interac_grn = sif_data[sif_data.interaction_type == "controls-expression-of"]
interac_grn_unique = interac_grn[["species1","species2"]].drop_duplicates()
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Create an undirected graph in igraph, from the dataframe edge-list, using Graph.TupleList and specifying directed=False. Print out the graph summary using the summary instance method.
|
grn_igraph = igraph.Graph.TupleList(interac_grn_unique.values.tolist(), directed=False)
grn_igraph.summary()
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
For one vertex at a time (iterating over the vertex sequence grn_igraph.vs), compute that vertex's harmonic mean closeness centrality using Eq. 7.30 from Newman's book. Don't forget to eliminate the "0" distance between a vertex and itself, in the results you get back from calling the shortest_paths method on the Vertex object. Just for information purposes, measure how long the code takes to run, in seconds, using timeit.default_timer().
|
N = len(grn_igraph.vs)
# allocate a vector to contain the vertex closeness centralities; initialize to zeroes
# (so if a vertex is a singleton we don't have to update its closeness centrality)
closeness_centralities = numpy.zeros(N)
# initialize a counter
ctr = 0
# start the timer
start_time = timeit.default_timer()
# for each vertex in `grn_igraph.vs`
for my_vertex in grn_igraph.vs:
# compute the geodesic distance to every other vertex, from my_vertex, using the `shortest_paths` instance method;
# put it in a numpy.array
# filter the numpy array to include only entries that are nonzero and finite, using `> 0 & numpy.isfinite(...)`
# if there are any distance values that survived the filtering, take their element-wise reciprocals,
# then compute the sum, then divide by N-1 (following Eq. 7.30 in Newman)
# increment the counter
# compute the elapsed time
ci_elapsed = timeit.default_timer() - start_time
print(ci_elapsed)
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Histogram the harmonic-mean closeness centralities. Do they have a large dynamic range?
|
import matplotlib.pyplot
matplotlib.pyplot.hist(closeness_centralities)
matplotlib.pyplot.xlabel("Ci")
matplotlib.pyplot.ylabel("Frequency")
matplotlib.pyplot.show()
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Scatter plot the harmonic-mean closeness centralities vs. the log10 degree. Is there any kind of relationship?
|
ax = matplotlib.pyplot.gca()
ax.scatter(grn_igraph.degree(), closeness_centralities)
ax.set_xscale("log")
matplotlib.pyplot.xlabel("degree")
matplotlib.pyplot.ylabel("closeness")
matplotlib.pyplot.show()
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Which protein has the highest harmonic-mean closeness centrality in the network, and what is its centrality value? use numpy.argmax
|
print(numpy.max(closeness_centralities))
grn_igraph.vs[numpy.argmax(closeness_centralities)]["name"]
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Print names of the top 10 proteins in the network, by harmonic-mean closeness centrality:, using numpy.argsort:
|
grn_igraph.vs[numpy.argsort(closeness_centralities)[::-1][0:9].tolist()]["name"]
|
class10_closeness_python3_template.ipynb
|
ramseylab/networkscompbio
|
apache-2.0
|
Будем совсем неразумно обучаться на всем train'е, так как тогда мы переобучимся,
то есть наш алгоритм "подгониться" под закономерности, присущие только train'у,
а на реальных данных будет неистово лажать. Так что train разделим на две части:
на 75% будем обучаться, а на 25% проверять, что мы лажаем не неистово.
Если возьмем первые 25% от всего train'а, то может быть несбалансированное число
outdoor'ов и indoor'ов. Поэтому для train возьмем первые 75% outdoor'ов плюс
первые 75% indoor'ов. Тогда мы сохраним пропорции outdoor:indoor таким, какое оно
во всем train'е. Будет особенно клево, если и в исследуемых данных соблюдается так же
пропорция.
|
# Выделяем outdoor'ы и indoor'ы.
sample_out = sample[result[:, 0] == 1]
sample_in = sample[result[:, 1] == 1]
result_out = result[result[:, 0] == 1]
result_in = result[result[:, 1] == 1]
# Считаем размер indoor- и outdoor-частей в train'е.
train_size_in = int(sample_in.shape[0] * 0.75)
train_size_out = int(sample_out.shape[0] * 0.75)
# Разделяем outdoor'ы и indoor'ы на обучающую и тестовую часть.
x_train_out, x_test_out = np.split(sample_out, [train_size_out])
y_train_out, y_test_out = np.split(result_out, [train_size_out])
x_train_in, x_test_in = np.split(sample_in, [train_size_in])
y_train_in, y_test_in = np.split(result_in, [train_size_in])
# Делаем общий train и test, смешивая indoor'ы и outdoor'ы.
x_train = np.vstack([x_train_in, x_train_out])
y_train = np.vstack([y_train_in, y_train_out])
x_test = np.vstack([x_test_in, x_test_out])
y_test = np.vstack([y_test_in, y_test_out])
|
optimizaion/kaggle/eshlykov-kaggle.ipynb
|
eshlykov/mipt-day-after-day
|
unlicense
|
Для каждой картинки мы хотим найти вектор $(p_0, p_1)$, вероятностей такой, что $p_i$ - вероятность того, что картинка принадлежит классу $i$ ($0$ — outdoor, $1$ — indoor).
Реализуя логистическую регрессию, мы хотим приближать вероятности к их настоящему распределению.
Выражение выдает ответ вида $$ W x + b, $$
где $x$ — наш вектор картинки, а результат — числовой вектор размерности $2$ с какими-то числами. Для того, чтобы эти числа стали вероятностями от $0$ до $1$, реализуем функцию
$$
\text{softmax}(W, b, x) = \frac{e^{Wx+b}}{\sum(e^{Wx+b})},
$$
и полученные значения будут как раз давать в сумме 1, и ими мы будем приближать вероятности.
Оценивать качество нашей модели будем с помощью кросс-энтропии, см. https://en.wikipedia.org/wiki/Cross_entropy.
Сначала поймем, что $x$ - вектор размерности 3072, $W$ - матрица 2 на 3072, $b$ - вектор размерности 2.
Положим $x'i = x_i$ для $ i \leqslant 3072 $ и $x'{3073} = 1$. Получили вектор $x'$ размерности 3073. Положим $W'{i,j} = W{i,j}$ для $ i \leqslant 2, j \leqslant 3073$ и $W'_{i,3073}=b_i$ для $ i \leqslant 2 $.
Таким образом, к вектору $x$ просто дописали 1, а к матрице $W$ просто приписали вектор $b$ справа.
Заметим теперь, что в точности верно равенство: $Wx+b=W'x'$. Теперь забьем на вектор $b$ и будем считать, что у нас есть матрица 10 на 3073, элементы которой надо оценить. Далее везде считаем $W' = W$ и $x' = x$.
Градиентный спуск считается по формуле: $W_{k+1} = W_k - \eta_k \nabla L(W_k)$, где $\eta_k$ — шаг, а $L$ — функция $\text{loss}$. Значит, нам надо посчитать градиент функции $L$, то есь найти ее частные производные по всем 6146 переменным.
Вспомним, как определяется $L$. Обозначим через $y$ вектор вида $(1, 0)$ либо $(0, 1)$, где 1 на $k$-м месте, где $k - 1$ — тип исследуемой картинки. Размерность $y$ равна 2. Сам вектор $y$ олицетворяет ответ для данной картинки.
Тогда
$$ L(W) = -y_1 \ln \frac{e^{(Wx)1}}{e^{(Wx)_1} + e^{(Wx)_2}} -y{2} \ln \frac{e^{(Wx){2}}}{e^{(Wx)_1} + e^{(Wx)_2}} + \frac{\lambda}{2} \sum{i=1}^{2} \sum_{j=1}^{3073} W_{i,j}^2. $$
Последняя сумма — так называемый регуляризатор. Если у нас много признаков (у нас их 6146), то при логистической регресии может возникнуть переобучение. Добавляя все параметры в $\text{loss}$, мы не сможем получить неестественного результата, когда какие-то параметры очень маленькие, а какие-то очень большие, потому что большие будут сильно увеличивать регуляризатор, а функция минимизируется. Таким образом, более вероятно получение подходящего результата.
Это описано в курсе Machine Learning by Stanford University во втором уроке третьей недели. Ссылка: https://www.coursera.org/learn/machine-learning/lecture/4BHEy/regularized-logistic-regression.
Теперь найдем производную по $W_{i,j}$: $$
\frac{dL(W)}{dW_{i,j}} =
-y_1 \frac{e^{(Wx)1} + e^{(Wx)_2}}{e^{(Wx)_1}} \cdot
\frac{-e^{(Wx)_1} e^{(Wx)_i} x_j}
{e^{(Wx)_1} + e^{(Wx)_2}}
-y{2} \frac{e^{(Wx)1} + e^{(Wx)_2}}{e^{(Wx){2}}} \cdot
\frac{-e^{(Wx){2}} e^{(Wx)_i} x_j}
{e^{(Wx)_1} + e^{(Wx)_2}} -\
- y_i \frac{e^{(Wx)_1} + e^{(Wx)_2}}{e^{(Wx)_i}} \cdot
\frac{e^{(Wx)_i} x_j (e^{(Wx)_1} + e^{(Wx)_2})}
{(e^{(Wx)_1} + e^{(Wx)_2})^2}
+ \lambda W{i,j}. $$
Упростим немного: $$
\frac{dL(W)}{dW_{i,j}} =
\frac{ x_j e^{(Wx)i} (y_1 + y_2) }
{e^{(Wx)_1} + e^{(Wx)_2}}
-y_i x_j
+ \lambda W{i,j}. $$
Упрощая еще сильнее, приходим к окончательному ответу: $$
\frac{dL(W)}{dW_{i,j}} =\left( \frac{e^{(Wx)i}}{e^{(Wx)_1} + e^{(Wx)_2}} - y_i \right) x_j
+ \lambda W{i,j}.
$$
Соответственно, если $j = 3073$, то есть дифференцируем по переменным $ W_{1, 3073} = b_1, \ldots, W_{2, 3073} = b_2$, то коэффициент перед скобкой просто 1.
Перейдем к реализации.
|
def softmax(W, x):
# Функция logsumexp более стабтильно вычисляет функцию экспонент, почти
# избавляя нас от проблемы переполнения.
p = np.dot(x, W.T)
return np.exp(p - scm.logsumexp(p, axis=1).reshape(-1, 1))
def loss(y, softmax, W, l):
# Формула из Википедии по ссылке выше c добавленным регуляризатором.
return np.mean(-np.sum(y * np.log(softmax), axis=1)) + l * np.trace(W @ W.T) / (2 * y.shape[0])
# Считаем средний по всем картинкам градиент.
# Градиент у нас будет не вектор, как мы привыкли, а матрица 2x3073.
def gradients(W, x, y, l):
p = softmax(W, x)
grads = (p - y).T @ x + l * W
return grads / x.shape[0] # По максимимум матричных вычислений!
# Выбор шага по правилу Армихо из семинарского листочка.
def armijo(W, x, y, l, alpha=0.5, beta=0.5):
s = 1
grad = gradients(W, x, y, l)
dW = -grad # Направление спуска.
loss_1 = loss(y_train, softmax(W + s * dW, x), W, l)
loss_0 = loss(y_train, softmax(W, x), W, l)
while loss_1 > loss_0 + alpha * s * (grad * dW).sum():
s = beta * s
loss_1 = loss(y_train, softmax(W + s * dW, x), W, l)
loss_0 = loss(y_train, softmax(W, x), W, l)
return s
def classify(x_train, x_test, y_train, y_test, iters, l):
# Как было замечено выше, W Размера 2 на 3072, а b размера 2, но мы приписываем b к W.
W = np.zeros((2, 3072))
b = np.zeros(2)
# Для приписывания запишем b как вектор столбец и воспользуемся функцией hstack.
b = b.reshape(b.size, 1)
W = np.hstack([W, b])
# Соответственно, нужно поменять x_train и x_test, добавив по 1 снизу.
fictious = np.ones((x_train.shape[0], 1))
x_train = np.hstack([x_train, fictious])
fictious = np.ones((x_test.shape[0], 1))
x_test = np.hstack([x_test, fictious])
# Будем записывать потери на каждом шаге спуска.
losses_train = [loss(y_train, softmax(W, x_train), W, l)]
losses_test = [loss(y_test, softmax(W, x_test), W, l)]
# Собственно, сам спуск.
for i in tqdm.tqdm(np.arange(iters)):
# Именно так - в Армихо подставляется alpha = l, а l = 0!
# Потому что я накосячил и не заметил! =)
eta = armijo(W, x_train, y_train, 0, l)
W = W - eta * gradients(W, x_train, y_train, l)
losses_train.append(loss(y_train, softmax(W, x_train), W, l))
losses_test.append(loss(y_test, softmax(W, x_test), W, l))
# На выходе имеется оптимальное значение W и массивы потерь.
return W, losses_train, losses_test
l = 0.04 # Сработает лучше, чем вообще без регуляризатора (l = 0).
# Нам хватит и 100 итераций, переобучение начинается достаточно быстро.
W, losses_train, losses_test = classify(x_train, x_test, y_train, y_test, 100, l)
plt.plot(losses_train, color='green', label='train')
plt.plot(losses_test, color='red', label='test')
plt.xlabel('Gradient descent iteration')
plt.ylabel('Loss')
plt.legend()
plt.show()
iters = np.argmin(losses_test) # На этой итиреации ошибка на тесте минимальна.
# Делаем столько итераций.
W, losses_train, losses_test = classify(x_train, x_test, y_train, y_test, iters, l)
|
optimizaion/kaggle/eshlykov-kaggle.ipynb
|
eshlykov/mipt-day-after-day
|
unlicense
|
Посчитаем среднюю квадратичную ошибку на тесте, чтобы прикинуть, что будет на Kaggle.
|
# Добавляем 1 к выборке.
nx_test = np.hstack([x_test, np.ones(x_test.shape[0]).reshape(x_test.shape[0], 1)])
probabilities = softmax(W, nx_test) # Считаем вероятности.
recognized = np.argmax(probabilities, axis=1) # Что распознано.
answers = np.argmax(y_test, axis=1) # Правильные ответы.
np.sqrt(np.mean((recognized - answers) ** 2)) # Собственно, ошибка.
|
optimizaion/kaggle/eshlykov-kaggle.ipynb
|
eshlykov/mipt-day-after-day
|
unlicense
|
Теперь применяем найденную матрицу к исследумемым данным.
|
# Добавляем 1 к выборке.
ntest = np.hstack([test, np.ones(test.shape[0]).reshape(test.shape[0], 1)])
probabilities = softmax(W, ntest) # Считаем вероятности.
ress = np.argmax(probabilities, axis=1).reshape(-1, 1) # Что распознано.
# Осталось загнать все в табличку, чтобы ее записать в csv.
ids = np.arange(ress.size).reshape(-1, 1)
submit = np.hstack([ids, ress])
# Заполняем csv-шник.
import csv
with open('submission.csv', 'w', newline='') as csvfile:
submission = csv.writer(csvfile, delimiter=',')
submission.writerow(['id', 'res'])
submission.writerows(submit)
|
optimizaion/kaggle/eshlykov-kaggle.ipynb
|
eshlykov/mipt-day-after-day
|
unlicense
|
Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\vec{y}(t) = (\theta(t),\omega(t))$.
|
def derivs(y, t, a, b, omega0):
"""Compute the derivatives of the damped, driven pendulum.
Parameters
----------
y : ndarray
The solution vector at the current time t[i]: [theta[i],omega[i]].
t : float
The current time t[i].
a, b, omega0: float
The parameters in the differential equation.
Returns
-------
dy : ndarray
The vector of derviatives at t[i]: [dtheta[i],domega[i]].
"""
theta = y[0]
omega = y[1]
answer = []
for i in range(len(y)-1):
dy = -g/l*np.sin(theta)-a*omega-b*np.sin(omega0*t)
return dy
derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0)
assert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])
def energy(y):
"""Compute the energy for the state array y.
The state array y can have two forms:
1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.
2. It could be an ndim=2 array where each row is the [theta,omega] at single
time.
Parameters
----------
y : ndarray, list, tuple
A solution vector
Returns
-------
E/m : float (ndim=1) or ndarray (ndim=2)
The energy per mass.
"""
# YOUR CODE HERE
raise NotImplementedError()
assert np.allclose(energy(np.array([np.pi,0])),g)
assert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))
|
assignments/assignment10/ODEsEx03.ipynb
|
Jackporter415/phys202-2015-work
|
mit
|
Simple pendulum
Use the above functions to integrate the simple pendulum for the case where it starts at rest pointing vertically upwards. In this case, it should remain at rest with constant energy.
Integrate the equations of motion.
Plot $E/m$ versus time.
Plot $\theta(t)$ and $\omega(t)$ versus time.
Tune the atol and rtol arguments of odeint until $E/m$, $\theta(t)$ and $\omega(t)$ are constant.
Anytime you have a differential equation with a a conserved quantity, it is critical to make sure the numerical solutions conserve that quantity as well. This also gives you an opportunity to find other bugs in your code. The default error tolerances (atol and rtol) used by odeint are not sufficiently small for this problem. Start by trying atol=1e-3, rtol=1e-2 and then decrease each by an order of magnitude until your solutions are stable.
|
# YOUR CODE HERE
raise NotImplementedError()
# YOUR CODE HERE
raise NotImplementedError()
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this to grade the two plots and their tuning of atol, rtol.
|
assignments/assignment10/ODEsEx03.ipynb
|
Jackporter415/phys202-2015-work
|
mit
|
Damped pendulum
Write a plot_pendulum function that integrates the damped, driven pendulum differential equation for a particular set of parameters $[a,b,\omega_0]$.
Use the initial conditions $\theta(0)=-\pi + 0.1$ and $\omega=0$.
Decrease your atol and rtol even futher and make sure your solutions have converged.
Make a parametric plot of $[\theta(t),\omega(t)]$ versus time.
Use the plot limits $\theta \in [-2 \pi,2 \pi]$ and $\theta \in [-10,10]$
Label your axes and customize your plot to make it beautiful and effective.
|
def plot_pendulum(a=0.0, b=0.0, omega0=0.0):
"""Integrate the damped, driven pendulum and make a phase plot of the solution."""
# YOUR CODE HERE
raise NotImplementedError()
|
assignments/assignment10/ODEsEx03.ipynb
|
Jackporter415/phys202-2015-work
|
mit
|
Use interact to explore the plot_pendulum function with:
a: a float slider over the interval $[0.0,1.0]$ with steps of $0.1$.
b: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.
omega0: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.
|
# YOUR CODE HERE
raise NotImplementedError()
|
assignments/assignment10/ODEsEx03.ipynb
|
Jackporter415/phys202-2015-work
|
mit
|
BEM method
|
#Q = 2000/3 #strength of the source-sheet,stb/d
h=25.26 #thickness of local gridblock,ft
phi=0.2 #porosity
kx=200 #pemerability in x direction,md
ky=200 #pemerability in y direction,md
kr=kx/ky #pemerability ratio
miu=1 #viscosity,cp
Nw=1 #Number of well
Qwell_1=2000 #Flow rate of well 1
Boundary_V=-400 #boundary velocity ft/day
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Boundary Discretization
we will create a discretization of the body geometry into panels (line segments in 2D). A panel's attributes are: its starting point, end point and mid-point, its length and its orientation. See the following figure for the nomenclature used in the code and equations below.
<img src="./resources/PanelLocal.png" width="300">
<center>Figure 1. Nomenclature of the boundary element in the local coordinates</center>
Create panel and well class
|
class Panel:
"""Contains information related to a panel."""
def __init__(self, xa, ya, xb, yb):
"""Creates a panel.
Arguments
---------
xa, ya -- Cartesian coordinates of the first end-point.
xb, yb -- Cartesian coordinates of the second end-point.
"""
self.xa, self.ya = xa, ya
self.xb, self.yb = xb, yb
self.xc, self.yc = (xa+xb)/2, (ya+yb)/2 # control-point (center-point)
self.length = math.sqrt((xb-xa)**2+(yb-ya)**2) # length of the panel
# orientation of the panel (angle between x-axis and panel)
self.sinalpha=(yb-ya)/self.length
self.cosalpha=(xb-xa)/self.length
self.Q = 0. # source strength
self.U = 0. # velocity component
self.V = 0. # velocity component
self.P = 0. # pressure coefficient
class Well:
"""Contains information related to a panel."""
def __init__(self, xw, yw,rw,Q):
"""Creates a panel.
Arguments
---------
xw, yw -- Cartesian coordinates of well source.
Q -- Flow rate of well source.
rw -- radius of well source.
"""
self.xw, self.yw = xw, yw
self.Q = Q # source strength
self.rw = rw # velocity component
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
We create a node distribution on the boundary that is refined near the corner with cosspace function
|
def cosspace(st,ed,N):
N=N+1
AngleInc=numpy.pi/(N-1)
CurAngle = AngleInc
space=numpy.linspace(0,1,N)
space[0]=st
for i in range(N-1):
space[i+1] = 0.5*numpy.abs(ed-st)*(1 - math.cos(CurAngle));
CurAngle += AngleInc
if ed<st:
space[0]=ed
space=space[::-1]
return space
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Discretize boundary element along the boundary
Here we implement BEM in a squre grid
|
N=80 #Number of boundary element
Nbd=20 #Number of boundary element in each boundary
Dx=1. #Grid block length in X direction
Dy=1. #Gird block lenght in Y direction
#Create the array
x_ends = numpy.linspace(0, Dx, N) # computes a 1D-array for x
y_ends = numpy.linspace(0, Dy, N) # computes a 1D-array for y
interval=cosspace(0,Dx,Nbd)
rinterval=cosspace(Dx,0,Nbd)
#interval=numpy.linspace(0,1,Nbd+1)
#rinterval=numpy.linspace(1,0,Nbd+1)
#Define the rectangle boundary
for i in range(Nbd):
x_ends[i]=0
y_ends[i]=interval[i]
for i in range(Nbd):
x_ends[i+Nbd]=interval[i]
y_ends[i+Nbd]=Dy
for i in range(Nbd):
x_ends[i+Nbd*2]=Dx
y_ends[i+Nbd*2]=rinterval[i]
for i in range(Nbd):
x_ends[i+Nbd*3]=rinterval[i]
y_ends[i+Nbd*3]=0
x_ends,y_ends=numpy.append(x_ends, x_ends[0]), numpy.append(y_ends, y_ends[0])
#Define the panel
panels = numpy.empty(N, dtype=object)
for i in range(N):
panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1])
#Define the well
wells = numpy.empty(Nw, dtype=object)
wells[0]=Well(Dx/2,Dy/2,0.025,Qwell_1)
#for i in range(N):
# print("Panel Coordinate (%s,%s) sina,cosa (%s,%s) " % (panels[i].xc,panels[i].yc,panels[i].sinalpha,panels[i].cosalpha))
#print("Well Location (%s,%s) radius: %s Flow rate:%s " % (wells[0].xw,wells[0].yw,wells[0].rw,wells[0].Q))
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Plot boundary elements and wells
|
#Plot the panel
%matplotlib inline
val_x, val_y = 0.3, 0.3
x_min, x_max = min(panel.xa for panel in panels), max(panel.xa for panel in panels)
y_min, y_max = min(panel.ya for panel in panels), max(panel.ya for panel in panels)
x_start, x_end = x_min-val_x*(x_max-x_min), x_max+val_x*(x_max-x_min)
y_start, y_end = y_min-val_y*(y_max-y_min), y_max+val_y*(y_max-y_min)
size = 5
pyplot.figure(figsize=(size, (y_end-y_start)/(x_end-x_start)*size))
pyplot.grid(True)
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.xlim(x_start, x_end)
pyplot.ylim(y_start, y_end)
pyplot.plot(numpy.append([panel.xa for panel in panels], panels[0].xa),
numpy.append([panel.ya for panel in panels], panels[0].ya),
linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305');
pyplot.scatter(wells[0].xw,wells[0].yw,s=100,alpha=0.5)
pyplot.legend(['panels', 'Wells'],
loc=1, prop={'size':12})
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Boundary element implementation
<img src="./resources/BEMscheme2.png" width="400">
<center>Figure 2. Representation of a local gridblock with boundary elements</center>
Generally, the influence of all the j panels on the i BE node can be expressed as follows:
\begin{matrix}
{{c}{ij}}{{p}{i}}+{{p}{i}}\int{{{s}{j}}}{{{H}{ij}}d{{s}{j}}}=({{v}{i}}\cdot \mathbf{n})\int_{{{s}{j}}}{{{G}{ij}}}d{{s}_{j}}
\end{matrix}
Where,
${{c}_{ij}}$ is the free term, cased by source position.
<center>${{c}_{ij}}=\left{ \begin{matrix}
\begin{matrix}
1 & \text{source j on the internal domain} \
\end{matrix} \
\begin{matrix}
0.5 & \text{source j on the boundary} \
\end{matrix} \
\begin{matrix}
0 & \text{source j on the external domain} \
\end{matrix} \
\end{matrix} \right.$</center>
$\int_{{{s}{j}}}{{{H}{ij}}d{{s}_{j}}\text{ }}$ is the integrated effect of the boundary element source i on the resulting normal flux at BE node j.
$\int_{{{s}{j}}}{{{G}{ij}}}d{{s}_{j}}$ is the is the integrated effect of the boundary element source i on the resulting pressure at BE node j
Line segment source solution for pressure and velocity (Derived recently)
The integrated effect can be formulated using line segment source solution, which givs:
\begin{equation}
\int_{{{s}{j}}}{{{G}{ij}}}d{{s}{j}}=B{{Q}{w}}=P({{{x}'}{i}},{{{y}'}{i}})=-\frac{70.60\mu }{h\sqrt{{{k}{x}}{{k}{y}}}}\int_{t=0}^{t={{l}{j}}}{\ln \left{ {{({x}'-t\cos {{\alpha }{j}})}^{2}}+\frac{{{k}{x}}}{{{k}{y}}}{{({y}'-t\sin {{\alpha }{j}})}^{2}} \right}dt}\cdot {{Q}{w}}
\end{equation}
\begin{equation}
\int_{{{s}{j}}}{{{H}{ij}}d{{s}{j}}\text{ }}={{v}{i}}(s)\cdot {{\mathbf{n}}{i}}=-{{u}{i}}\sin {{\alpha }{i}}+{{v}{i}}\cos {{\alpha }_{i}}
\end{equation}
Where,
\begin{equation}
u\left( {{{{x}'}}{i}},{{{{y}'}}{i}} \right)={{A}{u}}{{Q}{j}}=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}{x}}}{{{k}{y}}}}\int_{t=0}^{t={{l}{j}}}{\frac{{{{{x}'}}{i}}-t\cos {{\alpha }{j}}}{{{\left( {{{{x}'}}{i}}-t\cos {{\alpha }{j}} \right)}^{2}}+\frac{{{k}{x}}}{{{k}{y}}}{{({{{{y}'}}{i}}-t\sin {{\alpha }{j}})}^{2}}}dt}\cdot {{Q}{j}}
\end{equation}
\begin{equation}
v\left( {{{{x}'}}{i}},{{{{y}'}}{i}} \right)={{A}{v}}{{Q}{j}}=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}{x}}}{{{k}{y}}}}\int_{t=0}^{t={{l}{j}}}{\frac{{{{{y}'}}{i}}-t\sin {{\alpha }{j}}}{{{\left( {{{{x}'}}{i}}-t\cos {{\alpha }{j}} \right)}^{2}}+\frac{{{k}{x}}}{{{k}{y}}}{{({{{{y}'}}{i}}-t\sin {{\alpha }{j}})}^{2}}}dt}\cdot {{Q}{j}}
\end{equation}
Line segment source Integration function (Bij and Aij)
|
#Panel infuence factor Bij
def InflueceP(x, y, panel):
"""Evaluates the contribution of a panel at one point.
Arguments
---------
x, y -- Cartesian coordinates of the point.
panel -- panel which contribution is evaluated.
Returns
-------
Integral over the panel of the influence at one point.
"""
#Transfer global coordinate point(x,y) to local coordinate
x=x-panel.xa
y=y-panel.ya
L1=panel.length
#Calculate the pressure and velocity influence factor
a=panel.cosalpha**2+kr*panel.sinalpha**2
b=x*panel.cosalpha+kr*panel.sinalpha*y
c=y*panel.cosalpha-x*panel.sinalpha
dp=70.6*miu/h/math.sqrt(kx*ky)
Cp = dp/a*(
(
b*math.log(x**2-2*b*L1+a*L1**2+kr*y**2)
-L1*a*math.log((x-L1*panel.cosalpha)**2+kr*(y-L1*panel.sinalpha)**2)
+2*math.sqrt(kr)*c*math.atan((b-a*L1)/math.sqrt(kr)/c)
)
-
(
b*math.log(x**2+kr*y**2)
+2*math.sqrt(kr)*c*math.atan((b)/math.sqrt(kr)/c)
)
)
#debug
#print("a: %s b:%s c:%s " % (a,b,c))
#angle=math.atan((b-a*L1)/math.sqrt(kr)/c)*180/numpy.pi
#print("Magic angle:%s"% angle)
return Cp
def InflueceU(x, y, panel):
"""Evaluates the contribution of a panel at one point.
Arguments
---------
x, y -- Cartesian coordinates of the point.
panel -- panel which contribution is evaluated.
Returns
-------
Integral over the panel of the influence at one point.
"""
#Transfer global coordinate point(x,y) to local coordinate
x=x-panel.xa
y=y-panel.ya
L1=panel.length
#Calculate the pressure and velocity influence factor
a=panel.cosalpha**2+kr*panel.sinalpha**2
b=x*panel.cosalpha+kr*panel.sinalpha*y
c=y*panel.cosalpha-x*panel.sinalpha
dv=-0.4468/h/phi*math.sqrt(kx/ky)
Cu = dv/a*(
(
panel.cosalpha*math.log(x**2-2*b*L1+a*L1**2+kr*y**2)+ 2*math.sqrt(kr)*panel.sinalpha*math.atan((a*L1-b)/math.sqrt(kr)/c)
)
-
(
panel.cosalpha*math.log(x**2+kr*y**2)+2*math.sqrt(kr)*panel.sinalpha*math.atan((-b)/math.sqrt(kr)/c)
)
)
#print("a: %s b:%s c:%s " % (a,b,c))
#angle=math.atan((b-a*L1)/math.sqrt(kr)/c)*180/numpy.pi
#print("Magic angle:%s"% angle)
return Cu
def InflueceV(x, y, panel):
"""Evaluates the contribution of a panel at one point.
Arguments
---------
x, y -- Cartesian coordinates of the point.
panel -- panel which contribution is evaluated.
Returns
-------
Integral over the panel of the influence at one point.
"""
#Transfer global coordinate point(x,y) to local coordinate
x=x-panel.xa
y=y-panel.ya
L1=panel.length
#Calculate the pressure and velocity influence factor
a=panel.cosalpha**2+kr*panel.sinalpha**2
b=x*panel.cosalpha+kr*panel.sinalpha*y
c=y*panel.cosalpha-x*panel.sinalpha
dv=-0.4468/h/phi*math.sqrt(kx/ky)
Cv = dv/a*(
(
panel.sinalpha*math.log(x**2-2*b*L1+a*L1**2+kr*y**2)+ 2*math.sqrt(1/kr)*panel.cosalpha*math.atan((b-a*L1)/math.sqrt(kr)/c)
)
-
(
panel.sinalpha*math.log(x**2+kr*y**2)+2*math.sqrt(1/kr)*panel.cosalpha*math.atan((b)/math.sqrt(kr)/c)
)
)
#print("a: %s b:%s c:%s " % (a,b,c))
#angle=math.atan((b-a*L1)/math.sqrt(kr)/c)*180/numpy.pi
#print("Magic angle:%s"% angle)
return Cv
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Well source function
Line source solution for pressure and velocity (Datta-Gupta, 2007)
\begin{equation}
P(x,y)=B{{Q}{w}}=-\frac{70.60\mu }{h\sqrt{{{k}{x}}{{k}{y}}}}\ln \left{ {{(x-{{x}{w}})}^{2}}+\frac{{{k}{x}}}{{{k}{y}}}{{(y-{{y}{w}})}^{2}} \right}{{Q}{w}}+{{P}_{avg}}
\end{equation}
\begin{equation}
\frac{\partial P}{\partial x}=u=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}{x}}}{{{k}{y}}}}\sum\limits_{k=1}^{{{N}{w}}}{{{Q}{k}}}\frac{x-{{x}{k}}}{{{\left( x-{{x}{k}} \right)}^{2}}+\frac{{{k}{x}}}{{{k}{y}}}{{(y-{{y}_{k}})}^{2}}}
\end{equation}
\begin{equation}
\frac{\partial P}{\partial y}=v=\frac{0.8936}{h\phi }\sqrt{\frac{{{k}{x}}}{{{k}{y}}}}\sum\limits_{k=1}^{{{N}{w}}}{{{Q}{k}}}\frac{y-{{y}{k}}}{{{\left( x-{{x}{k}} \right)}^{2}}+\frac{{{k}{x}}}{{{k}{y}}}{{(y-{{y}_{k}})}^{2}}}
\end{equation}
|
#Well influence factor
def InflueceP_W(x, y, well):
"""Evaluates the contribution of a panel at one point.
Arguments
---------
x, y -- Cartesian coordinates of the point.
panel -- panel which contribution is evaluated.
Returns
-------
Integral over the panel of the influence at one point.
"""
dp=-70.6*miu/h/math.sqrt(kx*ky)
Cp=dp*math.log((x-well.xw)**2+kr*(y-well.yw)**2)
return Cp
def InflueceU_W(x, y, well):
"""Evaluates the contribution of a panel at one point.
Arguments
---------
x, y -- Cartesian coordinates of the point.
panel -- panel which contribution is evaluated.
Returns
-------
Integral over the panel of the influence at one point.
"""
dv=0.8936/h/phi*math.sqrt(kx/ky)
Cu=dv*(x-well.xw)/((x-well.xw)**2+kr*(y-well.yw)**2)
return Cu
def InflueceV_W(x, y, well):
"""Evaluates the contribution of a panel at one point.
Arguments
---------
x, y -- Cartesian coordinates of the point.
panel -- panel which contribution is evaluated.
Returns
-------
Integral over the panel of the influence at one point.
"""
dv=0.8936/h/phi*math.sqrt(kx/ky)
Cv=dv*(y-well.yw)/((x-well.xw)**2+kr*(y-well.yw)**2)
return Cv
#InflueceV(0.5,1,panels[3])
#InflueceP(0,0.5,panels[0])
#InflueceU(0,0.5,panels[0])
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
BEM function solution
Generally, the influence of all the j panels on the i BE node can be expressed as follows:
\begin{matrix}
{{c}{ij}}{{p}{i}}+{{p}{i}}\int{{{s}{j}}}{{{H}{ij}}d{{s}{j}}}=({{v}{i}}\cdot \mathbf{n})\int_{{{s}{j}}}{{{G}{ij}}}d{{s}_{j}}
\end{matrix}
Applying boundary condition along the boundary on above equation, a linear systsem can be constructed as follows:
\begin{matrix}
\left[ {{{{H}'}}{ij}} \right]\left[ {{P}{i}} \right]=\left[ {{G}{ij}} \right]\left[ {{v}{i}}\cdot \mathbf{n} \right]
\end{matrix}
!!!!!MY IMPLEMENTATION MAY HAS SOME PROBLEM HERE!!!!!!
All the integration solution can be evaluated except on itself. Where,
<center>$
\left[ {{{{H}'}}{ij}} \right]=\left{ \begin{matrix}
\begin{matrix}
{{H}{ij}} & i\ne j \
\end{matrix} \
\begin{matrix}
{{H}_{ij}}+\frac{1}{2} & i=j \
\end{matrix} \
\end{matrix} \right.
$</center>
<img src="./resources/BEMscheme.png" width="400">
<center>Figure 3. Representation of coordinate systems and the principle of superstition with well source and boundary element source </center>
As shown in Fig.3, the pressure and velocity at any point i in the local gridblock can be determined using Eqs. below. Applying principle of superposition for each BE node along the boundary (Fig. 3), boundary condition can be written as follows:
\begin{matrix}
{{P}{i}}(s)=\sum\limits{j=1}^{M}{{{B}{ij}}{{Q}{j}}} & \text{constant pressure boundary} \
\end{matrix}
\begin{matrix}
{{v}{i}}(s)\cdot {{\mathbf{n}}{i}}=\sum\limits_{j=1}^{M}{{{A}{ij}}{{Q}{j}}} & \text{constant flux boundary} \
\end{matrix}
The Pi and v ·n are the konwn boundary codition. The flow rate(strength) of boundary elements in Hij and Gij are the only unknown terms.
So we could rearrange the matrix above as linear system:
<center>$
{{\left[ \begin{matrix}
{{A}{ij}} \
{{B}{ij}} \
\end{matrix} \right]}{N\times N}}{{\left[ \begin{matrix}
{{Q}{j}} \
{{Q}{j}} \
\end{matrix} \right]}{N\times 1}}={{\left[ \begin{matrix}
-{{u}{i}}\sin {{\alpha }{i}}+{{v}{i}}\cos {{\alpha }{i}} \
{{P}{i}} \
\end{matrix} \right]}{N\times 1}}
$</center>
|
def build_matrix(panels):
"""Builds the source matrix.
Arguments
---------
panels -- array of panels.
Returns
-------
A -- NxN matrix (N is the number of panels).
"""
N = len(panels)
A = numpy.empty((N, N), dtype=float)
#numpy.fill_diagonal(A, 0.5)
for i, p_i in enumerate(panels): #target nodes
for j, p_j in enumerate(panels): #BE source
#if i != j: ###Matrix construction
if i>=0 and i<Nbd or i>=3*Nbd and i<4*Nbd:
A[i,j] = -p_j.sinalpha*InflueceU(p_i.xc, p_i.yc, p_j)+p_j.cosalpha*InflueceV(p_i.xc, p_i.yc, p_j)
#A[i,j] = InflueceP(p_i.xc, p_i.yc, p_j)
if i>=Nbd and i<2*Nbd or i>=2*Nbd and i<3*Nbd:
A[i,j] = -p_j.sinalpha*InflueceU(p_i.xc, p_i.yc, p_j)+p_j.cosalpha*InflueceV(p_i.xc, p_i.yc, p_j)
#A[i,j] = InflueceP(p_i.xc, p_i.yc, p_j)
return A
def build_rhs(panels):
"""Builds the RHS of the linear system.
Arguments
---------
panels -- array of panels.
Returns
-------
b -- 1D array ((N+1)x1, N is the number of panels).
"""
b = numpy.empty(len(panels), dtype=float)
for i, panel in enumerate(panels):
V_well=( -panel.sinalpha*Qwell_1*InflueceU_W(panel.xc, panel.yc, wells[0])+panel.cosalpha*Qwell_1*InflueceV_W(panel.xc, panel.yc, wells[0]) )
if i>=0 and i<Nbd:
b[i]=0+V_well
#b[i]=4000
#b[i]=84
if i>=Nbd and i<2*Nbd:
b[i]=-V_well
#b[i]=-42
if i>=2*Nbd and i<3*Nbd:
b[i]=-V_well
#b[i]=-42
if i>=3*Nbd and i<4*Nbd:
b[i]=0+V_well
#b[i]=84
return b
#Qwell_1=300 #Flow rate of well 1
#Boundary_V=-227 #boundary velocity ft/day
A = build_matrix(panels) # computes the singularity matrix
b = build_rhs(panels) # computes the freestream RHS
# solves the linear system
Q = numpy.linalg.solve(A, b)
for i, panel in enumerate(panels):
panel.Q = Q[i]
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Plot results
|
#Visulize the pressure and velocity field
#Define meshgrid
Nx, Ny = 50, 50 # number of points in the x and y directions
x_start, x_end = -0.01, 1.01 # x-direction boundaries
y_start, y_end = -0.01, 1.01 # y-direction boundaries
x = numpy.linspace(x_start, x_end, Nx) # computes a 1D-array for x
y = numpy.linspace(y_start, y_end, Ny) # computes a 1D-array for y
X, Y = numpy.meshgrid(x, y) # generates a mesh grid
#Calculate the velocity and pressure field
p = numpy.empty((Nx, Ny), dtype=float)
u = numpy.empty((Nx, Ny), dtype=float)
v = numpy.empty((Nx, Ny), dtype=float)
#for i, panel in enumerate(panels):
#panel.Q = 0.
#panels[0].Q=100
#panels[5].Q=100
#Qwell_1=400
for i in range(Nx):
for j in range(Ny):
p[i,j] =sum([p.Q*InflueceP(X[i,j], Y[i,j], p) for p in panels])+Qwell_1*InflueceP_W(X[i,j], Y[i,j], wells[0])
u[i,j] =sum([p.Q*InflueceU(X[i,j], Y[i,j], p) for p in panels])+Qwell_1*InflueceU_W(X[i,j], Y[i,j], wells[0])
v[i,j] =sum([p.Q*InflueceV(X[i,j], Y[i,j], p) for p in panels])+Qwell_1*InflueceV_W(X[i,j], Y[i,j], wells[0])
#p[i,j] =sum([p.Q*InflueceP(X[i,j], Y[i,j], p) for p in panels])
#u[i,j] =sum([p.Q*InflueceU(X[i,j], Y[i,j], p) for p in panels])
#v[i,j] =sum([p.Q*InflueceV(X[i,j], Y[i,j], p) for p in panels])
#p[i,j] =Qwell_1*InflueceP_W(X[i,j], Y[i,j], wells[0])
#u[i,j] =Qwell_1*InflueceU_W(X[i,j], Y[i,j], wells[0])
#v[i,j] =Qwell_1*InflueceV_W(X[i,j], Y[i,j], wells[0])
# plots the streamlines
%matplotlib inline
size = 6
pyplot.figure(figsize=(size, size))
pyplot.grid(True)
pyplot.title('Streamline field')
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.xlim(-0.2, 1.2)
pyplot.ylim(-0.2, 1.2)
pyplot.plot(numpy.append([panel.xa for panel in panels], panels[0].xa),
numpy.append([panel.ya for panel in panels], panels[0].ya),
linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305');
stream =pyplot.streamplot(X, Y, u, v,density=2, linewidth=1, arrowsize=1, arrowstyle='->') #streamline
#cbar=pyplot.colorbar(orientation='vertical')
#equipotential=pyplot.contourf(X, Y, p1, extend='both')
size = 7
pyplot.figure(figsize=(size, size-1))
pyplot.title('Pressure field')
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.xlim(0, 1)
pyplot.ylim(0, 1)
pyplot.contour(X, Y, p, 15, linewidths=0.5, colors='k')
pyplot.contourf(X, Y, p, 15, cmap='rainbow',
vmax=abs(p).max(), vmin=-abs(p).max())
pyplot.colorbar() # draw colorbar
size = 7
pyplot.figure(figsize=(size, size-1))
pyplot.title('Total Velocity field')
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.xlim(0, 1)
pyplot.ylim(0, 1)
Vtotal= numpy.sqrt(u**2+v**2)
#Vtotal= numpy.abs(v)
pyplot.contour(X, Y, Vtotal, 15, linewidths=0.5, colors='k')
pyplot.contourf(X, Y, Vtotal, 15, cmap='rainbow')
#vmax=50, vmin=0)
pyplot.colorbar() # draw colorbar
pyplot.title('Darcy velocity on the outflow boundary, x component (ft/day)')
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.plot(y, u[49,:], '--', linewidth=2)
pyplot.plot(9.8425+y, u[:,49], '--', linewidth=2)
u[:,49]
pyplot.title('Darcy velocity on the outflow boundary, y component (ft/day)')
pyplot.plot(y, v[:,49], '--', linewidth=2)
pyplot.plot(9.8425+y, v[49,:], '--', linewidth=2)
v[49,:]
|
BEM_problem.ipynb
|
BradHub/SL-SPH
|
mit
|
Create a graph and pass it to the VersionedGraph wrapper that will take care of the version control.
|
graph_obj = NXGraph()
g = VersionedGraph(graph_obj)
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Now let's create a rule that adds to the graph two nodes connected with an edge and apply it. If we want the changes to be commited to the version control we rewrite through the rewrite method of a VersioneGraph object.
|
rule = Rule.from_transform(NXGraph())
rule.inject_add_node("a")
rule.inject_add_node("b")
rule.inject_add_edge("a", "b")
rhs_instance, _ = g.rewrite(rule, {}, message="Add a -> b")
plot_graph(g.graph)
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
We create a new branch called "branch"
|
branch_commit = g.branch("branch")
print("Branches: ", g.branches())
print("Current branch '{}'".format(g.current_branch()))
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Apply a rule that clones the node 'b' to the current vesion of the graph (branch 'branch')
|
pattern = NXGraph()
pattern.add_node("b")
rule = Rule.from_transform(pattern)
rule.inject_clone_node("b")
plot_rule(rule)
rhs_instance, commit_id = g.rewrite(rule, {"b": rhs_instance["b"]}, message="Clone b")
plot_graph(g.graph)
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
The rewrite method of VersionedGraph returns the RHS instance of the applied and the id of the newly created commit corresponding to this rewrite.
|
print("RHS instance", rhs_instance)
print("Commit ID: ", commit_id)
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Switch back to the 'master' branch
|
g.switch_branch("master")
print(g.current_branch())
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Apply a rule that adds a loop form 'a' to itself, a new node 'c' and connects it with 'a'
|
pattern = NXGraph()
pattern.add_node("a")
rule = Rule.from_transform(pattern)
rule.inject_add_node("c")
rule.inject_add_edge("c", "a")
rule.inject_add_edge("a", "a")
rhs_instance, _ = g.rewrite(rule, {"a": "a"}, message="Add c and c->a")
plot_graph(g.graph)
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Create a new branch 'dev'
|
g.branch("dev")
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
In this branch remove an edge from 'c' to 'a' and merge two nodes together
|
pattern = NXGraph()
pattern.add_node("c")
pattern.add_node("a")
pattern.add_edge("c", "a")
rule = Rule.from_transform(pattern)
rule.inject_remove_edge("c", "a")
rule.inject_merge_nodes(["c", "a"])
plot_rule(rule)
g.rewrite(rule, {"a": rhs_instance["a"], "c": rhs_instance["c"]}, message="Merge c and a")
plot_graph(g.graph)
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Switch back to the 'master' branch.
|
g.switch_branch("master")
|
examples/Tutorial_graph_audit.ipynb
|
Kappa-Dev/ReGraph
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.