markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
---|---|---|---|---|
Imports
|
from __future__ import print_function
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import random
import six
import numpy as np
import thinc.extra.datasets
import spacy
from spacy.util import minibatch, compounding
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Helper Functions
|
def load_data(limit=0, split=0.8):
"""Load data from the IMDB dataset."""
# Partition off part of the dataset to train and test
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
def evaluate(tokenizer, textcat, texts, cats):
"""Evaluate with text data, calculates precision, recall and f score"""
docs = (tokenizer(text) for text in texts)
tp = 0.0 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 0.0 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if label == "NEGATIVE":
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.0
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.0
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
if (precision + recall) == 0:
f_score = 0.0
else:
f_score = 2 * (precision * recall) / (precision + recall)
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Train Model
|
hyperparams = {
'model':'en',
'n_iter': 2, # epochs
'n_texts': 500, # num of training samples
'architecture': 'simple_cnn',
'num_samples': 1000,
'train_test_split': 0.8,
'dropout': 0.2
}
run.log_hyperparameters(hyperparams)
# using the basic en model
try:
nlp = spacy.load(hyperparams['model']) # load existing spaCy model
except OSError:
nlp = spacy.blank(hyperparams['model']) # create blank Language class
print("Created blank '{}' model".format(hyperparams['model']))
else:
print("Loaded model '{}'".format(nlp))
# add the text classifier to the pipeline if it doesn't exist
if "textcat" not in nlp.pipe_names:
textcat = nlp.create_pipe(
"textcat",
config={
"exclusive_classes": True,
"architecture": hyperparams['architecture'],
}
)
nlp.add_pipe(textcat, last=True)
# otherwise, get it, so we can add labels to it
else:
textcat = nlp.get_pipe("textcat")
# add label to text classifier
_= textcat.add_label("POSITIVE")
_= textcat.add_label("NEGATIVE")
# load the IMDB dataset
print("Loading IMDB data...")
(train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=hyperparams['num_samples'],
split=hyperparams['train_test_split'])
print(
"Using {} examples ({} training, {} evaluation)".format(
hyperparams['num_samples'], len(train_texts), len(dev_texts)
)
)
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
# sample train data
train_data[:1]
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "textcat"]
print("other pipes:", other_pipes)
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
print("Training the model...")
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
batch_sizes = compounding(4.0, 32.0, 1.001)
for i in range(hyperparams['n_iter']):
losses = {}
# batch up the examples using spaCy's minibatch
random.shuffle(train_data)
batches = minibatch(train_data, size=batch_sizes)
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=hyperparams['dropout'], losses=losses)
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
print(
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table
losses["textcat"],
scores["textcat_p"],
scores["textcat_r"],
scores["textcat_f"],
)
)
run.log_observation('loss', losses['textcat'])
run.log_observation('precision', scores['textcat_p'])
run.log_observation('recall', scores['textcat_r'])
run.log_observation('f_score', scores['textcat_f'])
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Log for Deployment
Create Wrapper Class
Verta deployment expects a particular interface for its models.
They must expose a predict() function, so we'll create a thin wrapper class around our spaCy pipeline.
|
class TextClassifier:
def __init__(self, nlp):
self.nlp = nlp
def predict(self, input_list): # param must be a list/batch of inputs
predictions = []
for text in input_list:
scores = self.nlp(text).cats
if scores['POSITIVE'] > scores['NEGATIVE']:
predictions.append("POSITIVE")
else:
predictions.append("NEGATIVE")
return np.array(predictions) # response currently must be a NumPy array
input_list = [
"This movie was subpar at best.",
"Plot didn't make sense."
]
model = TextClassifier(nlp)
model.predict(input_list)
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Create Deployment Artifacts
Verta deployment also needs a couple more details about the model.
What do its inputs and outputs look like?
|
from verta.utils import ModelAPI # Verta-provided utility class
model_api = ModelAPI(
input_list, # example inputs
model.predict(input_list), # example outputs
)
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
What PyPI-installable packages (with version numbers) are required to deserialize and run the model?
|
requirements = ["numpy", "spacy", "thinc"]
# this could also have been a path to a requirements.txt file on disk
run.log_requirements(requirements)
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Log Model
|
# test the trained model
test_text = 'The Lion King was very entertaining. The movie was visually spectacular.'
doc = nlp(test_text)
print(test_text)
print(doc.cats)
run.log_model(
model,
model_api=model_api,
)
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Deployment
|
run
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Click the link above to view your Experiment Run in the Verta Web App, and deploy it.
Once it's ready, you can make predictions against the deployed model.
|
from verta._demo_utils import DeployedModel
deployed_model = DeployedModel(HOST, run.id)
deployed_model.predict(["I would definitely watch this again!"])
|
client/workflows/examples/text_classification_spacy.ipynb
|
mitdbg/modeldb
|
mit
|
Loading the data
In this folder there are 83 subjects data in nifti format (.nii).
The dada for each subject is a volume of (70x256x256) voxels and composed of 3 eigenvalues, 3 eigenvectors, and FA volume.
|
#subjects folder
BASE_PATH = 'G:/DTI_DS/original'
#subject number
# subject_number = 84 # very inclined subject
subject_number = 1
if(subject_number < 10):
subject_dir = str(BASE_PATH)+str('/subject00')+str(subject_number)
else:
subject_dir = str(BASE_PATH)+str('/subject0')+str(subject_number)
#load DTI
FA, evl, evt = DTI.load_fa_evl_evt(subject_dir)
MD = DTI.Mean_Difusivity(evl)
#print shapes
print('evt.shape =', evt.shape)
print('evl.shape =', evl.shape)
print('FA.shape =', FA.shape)
print('MD.shape =', MD.shape)
|
dev/.ipynb_checkpoints/DTI_open_01-05-17_GRP-checkpoint.ipynb
|
GustavoRP/IA369Z
|
gpl-3.0
|
Data visualization
FA
MD is a 3D scalar map that shows the difusion assimetry for each voxel, so each one is associated with an intensity value.
Inline image of FA in three different viels (Axial, coronal, and sagittal viels).
|
# Show FA
%matplotlib inline
# %matplotlib notebook
from matplotlib.widgets import Slider
sz, sy, sx = FA.shape
# set up figure
fig = plt.figure(figsize=(15,15))
xy = fig.add_subplot(1,3,1)
plt.title("Axial Slice")
xz = fig.add_subplot(1,3,2)
plt.title("Coronal Slice")
yz = fig.add_subplot(1,3,3)
plt.title("Sagittal Slice")
frame = 0.5
maximo = np.max(np.abs(FA)) # normalize the FA values for better visualization
minimo = np.min(np.abs(FA))
xy.imshow(FA[np.floor(frame*sz),:,:], origin='lower', interpolation='nearest', cmap="gray",vmin=0, vmax=maximo )
xz.imshow(FA[:,np.floor(frame*sy),:], origin='lower', interpolation='nearest', cmap="gray",vmin=0 , vmax=maximo )
yz.imshow(FA[:,:,np.floor(frame*sx)], origin='lower', interpolation='nearest', cmap="gray",vmin=0 , vmax=maximo )
|
dev/.ipynb_checkpoints/DTI_open_01-05-17_GRP-checkpoint.ipynb
|
GustavoRP/IA369Z
|
gpl-3.0
|
MD
MD is a 3D scalar map that shows the mean difusion for each voxel, so each one is associated with an intensity value.
Inline image of slices of the MD in three different viels (Axial, coronal, and sagittal viels).
|
# Show MD
%matplotlib inline
# %matplotlib notebook
from matplotlib.widgets import Slider
sz, sy, sx = MD.shape
# set up figure
fig = plt.figure(figsize=(15,15))
xy = fig.add_subplot(1,3,1)
plt.title("Axial Slice")
xz = fig.add_subplot(1,3,2)
plt.title("Coronal Slice")
yz = fig.add_subplot(1,3,3)
plt.title("Sagittal Slice")
frame = 0.5
maximo = np.max(np.abs(MD)) # normalize the MD values for better visualization
minimo = np.min(np.abs(MD))
xy.imshow(MD[np.floor(frame*sz),:,:], origin='lower', interpolation='nearest', cmap="gray",vmin=0, vmax=maximo )
xz.imshow(MD[:,np.floor(frame*sy),:], origin='lower', interpolation='nearest', cmap="gray",vmin=0 , vmax=maximo )
yz.imshow(MD[:,:,np.floor(frame*sx)], origin='lower', interpolation='nearest', cmap="gray",vmin=0 , vmax=maximo )
|
dev/.ipynb_checkpoints/DTI_open_01-05-17_GRP-checkpoint.ipynb
|
GustavoRP/IA369Z
|
gpl-3.0
|
First vector (main tensor direction)
This is a 3D vecotr field, so each voxel is associated with a vector.
Inline image of slices of the FA in three different viels (Axial, coronal, and sagittal viels).
|
# Show Vector Field
%matplotlib inline
# %matplotlib notebook
from matplotlib.widgets import Slider
evt_d = evt[0]*evt[0]
nv, sz, sy, sx = evl.shape
fig = plt.figure(figsize=(15,15))
xy = fig.add_subplot(1,3,1)
plt.title("Axial Slice")
plt.axis("off")
xz = fig.add_subplot(1,3,2)
plt.title("Coronal Slice")
plt.axis("off")
yz = fig.add_subplot(1,3,3)
plt.title("Sagittal Slice")
plt.axis("off")
step_ = 1 #Subamostragem dos vetores
maxlen_= 32 #Tamanho do maior vetor
rescale_ = 16 #Fator de rescala da imagem
# crop = np.array([sz/3, sz*2/3, sy/3, sy*2/3, sx/3, sy*2/3]) # crop [z<, z>, y<, y>, x< x>]
crop = np.array([30, 40, 120, 136, 120, 136]) # crop [z<, z>, y<, y>, x< x>]
frame = 0.5
# seismic
V1 = DTI.show_vector_field(evt_d[1,np.floor(frame*sz),:,:], evt_d[2,np.floor(frame*sz),:,:], step=step_, maxlen=maxlen_, rescale=rescale_)
xy.imshow(V1[0,:,:], origin='lower',cmap="gray")
V2 = DTI.show_vector_field(evt_d[0,:,np.floor(frame*sy),:], evt_d[2,:,np.floor(frame*sy),:], step=step_, maxlen=maxlen_, rescale=rescale_)
xz.imshow(V2[0,:,:], origin='lower',cmap="gray")
V3 = DTI.show_vector_field(evt_d[0,:,:,np.floor(frame*sx)], evt_d[1,:,:,np.floor(frame*sx)], step=step_, maxlen=maxlen_, rescale=rescale_)
yz.imshow(V3[0,:,:], origin='lower',cmap="gray")
plt.xticks([])
plt.yticks([])
fig = plt.figure(figsize=(15,15))
xy = fig.add_subplot(1,3,1)
plt.title("Axial Slice (zoom)")
plt.axis("off")
xz = fig.add_subplot(1,3,2)
plt.title("Coronal Slice (zoom)")
plt.axis("off")
yz = fig.add_subplot(1,3,3)
plt.title("Sagittal Slice (zoom)")
plt.axis("off")
V1 = DTI.show_vector_field(evt_d[1,np.floor(frame*sz),crop[2]:crop[3],crop[4]:crop[5]], evt_d[2,np.floor(frame*sz),crop[2]:crop[3],crop[4]:crop[5]], step=step_, maxlen=maxlen_, rescale=rescale_)
xy.imshow(V1[0,:,:], origin='lower',cmap="gray")
V2 = DTI.show_vector_field(evt_d[0,crop[0]:crop[1],np.floor(frame*sy),crop[4]:crop[5]], evt_d[2,crop[0]:crop[1],np.floor(frame*sy),crop[4]:crop[5]], step=step_, maxlen=maxlen_, rescale=rescale_)
xz.imshow(V2[0,:,:], origin='lower',cmap="gray")
V3 = DTI.show_vector_field(evt_d[0,crop[0]:crop[1],crop[2]:crop[3],np.floor(frame*sx)], evt_d[1,crop[0]:crop[1],crop[2]:crop[3],np.floor(frame*sx)], step=step_, maxlen=maxlen_, rescale=rescale_)
yz.imshow(V3[0,:,:], origin='lower',cmap="gray")
plt.show()
|
dev/.ipynb_checkpoints/DTI_open_01-05-17_GRP-checkpoint.ipynb
|
GustavoRP/IA369Z
|
gpl-3.0
|
DataAccessLayer.getAvailableLocationNames()
Now create a new data request, and set the data type to grid to request all available grids with getAvailableLocationNames()
|
request = DataAccessLayer.newDataRequest()
request.setDatatype("grid")
available_grids = DataAccessLayer.getAvailableLocationNames(request)
available_grids.sort()
list(available_grids)
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
DataAccessLayer.getAvailableParameters()
After datatype and model name (locationName) are set, you can query all available parameters with getAvailableParameters()
|
request.setLocationNames("RAP13")
availableParms = DataAccessLayer.getAvailableParameters(request)
availableParms.sort()
list(availableParms)
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
DataAccessLayer.getAvailableLevels()
Selecting "T" for temperature.
|
request.setParameters("T")
availableLevels = DataAccessLayer.getAvailableLevels(request)
for level in availableLevels:
print(level)
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
0.0SFC is the Surface level
FHAG stands for Fixed Height Above Ground (in meters)
NTAT stands for Nominal Top of the ATmosphere
BL stands for Boundary Layer, where 0.0_30.0BL reads as 0-30 mb above ground level
TROP is the Tropopause level
request.setLevels()
For this example we will use Surface Temperature
|
request.setLevels("2.0FHAG")
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
DataAccessLayer.getAvailableTimes()
getAvailableTimes(request, True) will return an object of run times - formatted as YYYY-MM-DD HH:MM:SS
getAvailableTimes(request) will return an object of all times - formatted as YYYY-MM-DD HH:MM:SS (F:ff)
getForecastRun(cycle, times) will return a DataTime array for a single forecast cycle.
|
cycles = DataAccessLayer.getAvailableTimes(request, True)
times = DataAccessLayer.getAvailableTimes(request)
fcstRun = DataAccessLayer.getForecastRun(cycles[-1], times)
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
DataAccessLayer.getGridData()
Now that we have our request and DataTime fcstRun arrays ready, it's time to request the data array from EDEX.
|
response = DataAccessLayer.getGridData(request, [fcstRun[-1]])
for grid in response:
data = grid.getRawData()
lons, lats = grid.getLatLonCoords()
print('Time :', str(grid.getDataTime()))
print('Model:', str(grid.getLocationName()))
print('Parm :', str(grid.getParameter()))
print('Unit :', str(grid.getUnit()))
print(data.shape)
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
Plotting with Matplotlib and Cartopy
1. pcolormesh
|
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import numpy as np
import numpy.ma as ma
from scipy.io import loadmat
def make_map(bbox, projection=ccrs.PlateCarree()):
fig, ax = plt.subplots(figsize=(16, 9),
subplot_kw=dict(projection=projection))
ax.set_extent(bbox)
ax.coastlines(resolution='50m')
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
cmap = plt.get_cmap('rainbow')
bbox = [lons.min(), lons.max(), lats.min(), lats.max()]
fig, ax = make_map(bbox=bbox)
cs = ax.pcolormesh(lons, lats, data, cmap=cmap)
cbar = fig.colorbar(cs, extend='both', shrink=0.5, orientation='horizontal')
cbar.set_label(grid.getLocationName().decode('UTF-8') +" " \
+ grid.getLevel().decode('UTF-8') + " " \
+ grid.getParameter().decode('UTF-8') \
+ " (" + grid.getUnit().decode('UTF-8') + ") " \
+ "valid " + str(grid.getDataTime().getRefTime()))
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
2. contourf
|
fig2, ax2 = make_map(bbox=bbox)
cs2 = ax2.contourf(lons, lats, data, 80, cmap=cmap,
vmin=data.min(), vmax=data.max())
cbar2 = fig2.colorbar(cs2, extend='both', shrink=0.5, orientation='horizontal')
cbar2.set_label(grid.getLocationName().decode('UTF-8') +" " \
+ grid.getLevel().decode('UTF-8') + " " \
+ grid.getParameter().decode('UTF-8') \
+ " (" + grid.getUnit().decode('UTF-8') + ") " \
+ "valid " + str(grid.getDataTime().getRefTime()))
|
notebooks/AWIPS/Grid_Levels_and_Parameters.ipynb
|
julienchastang/unidata-python-workshop
|
mit
|
Procedure with steps
Here we post to a procedure with multiple steps. The steps are displayed as soon as the procedure starts running and are updated accordingly.
|
print mldb.post_and_track('/v1/procedures', {
'type' : 'mock',
'params' : {'durationMs' : 8000, "refreshRateMs" : 500}
}, 0.5)
|
container_files/tutorials/Using pymldb Progress Bar and Cancel Button Tutorial.ipynb
|
mldbai/mldb
|
apache-2.0
|
Procedure with no steps
A procedure with no inner steps will simply display its progress.
This one is an example where the "initializing" phase sticks for some time, so the "Cancel" button is shown alone and eventually, when the "executing" phase is reached, the progress bar is displayed.
|
print mldb.put_and_track('/v1/procedures/embedded_imagess', {
'type' : 'import.text',
'params' : {
'dataFileUrl' : 'https://s3.amazonaws.com/benchm-ml--main/train-1m.csv',
'outputDataset' : {
'id' : 'embedded_images_realestate',
'type' : 'sparse.mutable'
}
}
}, 0.1)
|
container_files/tutorials/Using pymldb Progress Bar and Cancel Button Tutorial.ipynb
|
mldbai/mldb
|
apache-2.0
|
Serial procedure
When using post_and_track along with a serial procedure, a progress bar is displayed for each step. They will only take the value of 0/1 and 1/1.
|
prefix = 'file://mldb/mldb_test_data/dataset-builder'
print mldb.post_and_track('/v1/procedures', {
'type' : 'serial',
'params' : {
'steps' : [
{
'type' : 'mock',
'params' : {'durationMs' : 2000, "refreshRateMs" : 500}
}, {
'type' : 'import.text',
'params' : {
'dataFileUrl' : prefix + '/cache/dataset_creator_embedding_realestate.csv.gz',
'outputDataset' : {
'id' : 'embedded_images_realestate',
'type' : 'embedding'
},
'select' : '* EXCLUDING(rowName)',
'named' : 'rowName',
}
}, {
'type' : 'mock',
'params' : {'durationMs' : 2000, "refreshRateMs" : 500}
}
]
}
})
|
container_files/tutorials/Using pymldb Progress Bar and Cancel Button Tutorial.ipynb
|
mldbai/mldb
|
apache-2.0
|
Language Detection
|
text = Text("Bonjour, Mesdames.")
print("Language Detected: Code={}, Name={}\n".format(text.language.code, text.language.name))
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Tokenization
|
zen = Text("Beautiful is better than ugly. "
"Explicit is better than implicit. "
"Simple is better than complex.")
print(zen.words)
print(zen.sentences)
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Part of Speech Tagging
|
text = Text(u"O primeiro uso de desobediência civil em massa ocorreu em setembro de 1906.")
print("{:<16}{}".format("Word", "POS Tag")+"\n"+"-"*30)
for word, tag in text.pos_tags:
print(u"{:<16}{:>2}".format(word, tag))
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Named Entity Recognition
|
text = Text(u"In Großbritannien war Gandhi mit dem westlichen Lebensstil vertraut geworden")
print(text.entities)
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Polarity
|
print("{:<16}{}".format("Word", "Polarity")+"\n"+"-"*30)
for w in zen.words[:6]:
print("{:<16}{:>2}".format(w, w.polarity))
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Embeddings
|
word = Word("Obama", language="en")
print("Neighbors (Synonms) of {}".format(word)+"\n"+"-"*30)
for w in word.neighbors:
print("{:<16}".format(w))
print("\n\nThe first 10 dimensions out the {} dimensions\n".format(word.vector.shape[0]))
print(word.vector[:10])
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Morphology
|
word = Text("Preprocessing is an essential step.").words[0]
print(word.morphemes)
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Transliteration
|
from polyglot.transliteration import Transliterator
transliterator = Transliterator(source_lang="en", target_lang="ru")
print(transliterator.transliterate(u"preprocessing"))
|
notebooks/README.ipynb
|
iamtrask/polyglot
|
gpl-3.0
|
Problem 1: A Time Series Model
Consider the time series model
$$ x_{t+1} = \alpha x_t (1 - x_t) $$
Let's set $\alpha = 4$
|
α = 4
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Here's a typical time series:
|
n = 200
x = np.empty(n)
x[0] = 0.2
for t in range(n-1):
x[t+1] = α * x[t] * (1 - x[t])
plt.plot(x)
plt.show()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Here's a function that simulates for n periods, starting from x0, and returns only the final value:
|
def quad(x0, n):
x = x0
for i in range(1, n):
x = α * x * (1 - x)
return x
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Let's see how fast this runs:
|
n = 10_000_000
tic()
x = quad(0.2, n)
toc()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Now let's try this in FORTRAN.
Note --- this step is intended to be a demo and will only execute if
you have the file fastquad.f90 in your pwd
you have a FORTRAN compiler installed and modify the compilation code below appropriately
|
!cat fastquad.f90
!gfortran -O3 fastquad.f90
!./a.out
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Now let's do the same thing in Python using Numba's JIT compilation:
|
quad_jitted = jit(quad)
tic()
x = quad_jitted(0.2, n)
toc()
tic()
x = quad_jitted(0.2, n)
toc()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
After JIT compilation, function execution speed is about the same as FORTRAN.
But remember, JIT compilation for Python is still limited --- see here
If these limitations frustrate you, then try Julia.
Problem 2: Brute Force Optimization
The problem is to maximize the function
$$ f(x, y) = \frac{\cos \left(x^2 + y^2 \right)}{1 + x^2 + y^2} + 1$$
using brute force --- searching over a grid of $(x, y)$ pairs.
|
def f(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2) + 1
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
gridsize = 50
gmin, gmax = -3, 3
xgrid = np.linspace(gmin, gmax, gridsize)
ygrid = xgrid
x, y = np.meshgrid(xgrid, ygrid)
# === plot value function === #
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x,
y,
f(x, y),
rstride=2, cstride=2,
cmap=cm.jet,
alpha=0.4,
linewidth=0.05)
ax.scatter(x, y, c='k', s=0.6)
ax.scatter(x, y, f(x, y), c='k', s=0.6)
ax.view_init(25, -57)
ax.set_zlim(-0, 2.0)
ax.set_xlim(gmin, gmax)
ax.set_ylim(gmin, gmax)
plt.show()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Vectorized code
|
grid = np.linspace(-3, 3, 10000)
x, y = np.meshgrid(grid, grid)
tic()
np.max(f(x, y))
toc()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
JITTed code
A jitted version
|
@jit
def compute_max():
m = -np.inf
for x in grid:
for y in grid:
z = np.cos(x**2 + y**2) / (1 + x**2 + y**2) + 1
if z > m:
m = z
return m
compute_max()
tic()
compute_max()
toc()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Numba for vectorization with automatic parallelization - even faster:
|
@vectorize('float64(float64, float64)', target='parallel')
def f_par(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2) + 1
x, y = np.meshgrid(grid, grid)
np.max(f_par(x, y))
tic()
np.max(f_par(x, y))
toc()
|
John/numba.ipynb
|
QuantEcon/phd_workshops
|
bsd-3-clause
|
Spatiotemporal permutation F-test on full sensor data
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Spatiotemporal clusters will then
be visualized using custom matplotlib code.
Caveat for the interpretation of "significant" clusters: see
the FieldTrip website_.
|
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import find_ch_connectivity
from mne.viz import plot_compare_evokeds
print(__doc__)
|
0.17/_downloads/1b26761ba88c6441bd13afd5730965a4/plot_stats_spatio_temporal_cluster_sensors.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Set parameters
|
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, fir_design='firwin')
events = mne.read_events(event_fname)
|
0.17/_downloads/1b26761ba88c6441bd13afd5730965a4/plot_stats_spatio_temporal_cluster_sensors.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Read epochs for the channel of interest
|
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id)
X = [epochs[k].get_data() for k in event_id] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
|
0.17/_downloads/1b26761ba88c6441bd13afd5730965a4/plot_stats_spatio_temporal_cluster_sensors.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Find the FieldTrip neighbor definition to setup sensor connectivity
|
connectivity, ch_names = find_ch_connectivity(epochs.info, ch_type='mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
|
0.17/_downloads/1b26761ba88c6441bd13afd5730965a4/plot_stats_spatio_temporal_cluster_sensors.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Compute permutation statistic
How does it work? We use clustering to bind together features which are
similar. Our features are the magnetic fields measured over our sensor
array at different times. This reduces the multiple comparison problem.
To compute the actual test-statistic, we first sum all F-values in all
clusters. We end up with one statistic for each cluster.
Then we generate a distribution from the data by shuffling our conditions
between our samples and recomputing our clusters and the test statistics.
We test for the significance of a given cluster by computing the probability
of observing a cluster of that size. For more background read:
Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
|
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1, buffer_size=None,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
|
0.17/_downloads/1b26761ba88c6441bd13afd5730965a4/plot_stats_spatio_temporal_cluster_sensors.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Note. The same functions work with source estimate. The only differences
are the origin of the data, the size, and the connectivity definition.
It can be used for single trials or for groups of subjects.
Visualize clusters
|
# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
linestyles = {"L": '-', "R": '--'}
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# organize data for plotting
evokeds = {cond: epochs[cond].average() for cond in event_id}
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at the sensors contributing to the cluster
sig_times = epochs.times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axes=ax_topo, cmap='Reds',
vmin=np.min, vmax=np.max, show=False)
# create additional axes (for ERF and colorbar)
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel(
'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
if len(ch_inds) > 1:
title += "s (mean)"
plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,
colors=colors, linestyles=linestyles, show=False,
split_legend=True, truncate_yaxis='max_ticks')
# plot temporal cluster extent
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
|
0.17/_downloads/1b26761ba88c6441bd13afd5730965a4/plot_stats_spatio_temporal_cluster_sensors.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Airbnb Data
First we read in the data
|
url1 = "http://data.insideairbnb.com/united-states/"
url2 = "ny/new-york-city/2016-02-02/data/listings.csv.gz"
full_df = pd.read_csv(url1+url2, compression="gzip")
full_df.head()
|
Code/Lab/Airbnb.ipynb
|
DaveBackus/Data_Bootcamp
|
mit
|
We don't want all data, so let's focus on a few variables.
|
df = full_df[["id", "price", "number_of_reviews", "review_scores_rating"]]
df.head()
|
Code/Lab/Airbnb.ipynb
|
DaveBackus/Data_Bootcamp
|
mit
|
Need to convert prices to floats
|
df.replace({'price': {'\$': ''}}, regex=True, inplace=True)
df.replace({'price': {'\,': ''}}, regex=True, inplace=True)
df['price'] = df['price'].astype('float64', copy=False)
|
Code/Lab/Airbnb.ipynb
|
DaveBackus/Data_Bootcamp
|
mit
|
We might think that better apartments get rented more often, let's plot a scatter (or multiple boxes?) plot of the number of reviews vs the review score
|
df.plot.scatter(x="number_of_reviews", y="review_scores_rating", figsize=(10, 8), alpha=0.2)
bins = [0, 5, 10, 25, 50, 100, 350]
boxplot_vecs = []
fig, ax = plt.subplots(figsize=(10, 8))
for i in range(1, 7):
lb = bins[i-1]
ub = bins[i]
foo = df["review_scores_rating"][df["number_of_reviews"].apply(lambda x: lb <= x <= ub)].dropna()
boxplot_vecs.append(foo.values)
ax.boxplot(boxplot_vecs, labels=bins[:-1])
plt.show()
|
Code/Lab/Airbnb.ipynb
|
DaveBackus/Data_Bootcamp
|
mit
|
Better reviews also are correlated with higher prices
|
df.plot.scatter(x="review_scores_rating", y="price", figsize=(10, 8), alpha=0.2)
|
Code/Lab/Airbnb.ipynb
|
DaveBackus/Data_Bootcamp
|
mit
|
1 Example
The following demonstrates how to instantiate a graph and a filter, the two main objects of the package.
|
G = graphs.Logo()
G.estimate_lmax()
g = filters.Heat(G, tau=100)
|
examples/playground.ipynb
|
epfl-lts2/pygsp
|
bsd-3-clause
|
Let's now create a graph signal: a set of three Kronecker deltas for that example. We can now look at one step of heat diffusion by filtering the deltas with the above defined filter. Note how the diffusion follows the local structure!
|
DELTAS = [20, 30, 1090]
s = np.zeros(G.N)
s[DELTAS] = 1
s = g.filter(s)
G.plot(s, highlight=DELTAS, backend='matplotlib')
|
examples/playground.ipynb
|
epfl-lts2/pygsp
|
bsd-3-clause
|
2 Tutorials and examples
Try our tutorials or examples.
|
# Your code here.
|
examples/playground.ipynb
|
epfl-lts2/pygsp
|
bsd-3-clause
|
3 Playground
Try something of your own!
The API reference is your friend.
|
# Your code here.
|
examples/playground.ipynb
|
epfl-lts2/pygsp
|
bsd-3-clause
|
If you miss a package, you can install it with:
|
%pip install numpy
|
examples/playground.ipynb
|
epfl-lts2/pygsp
|
bsd-3-clause
|
Make a grid and set boundary conditions:
|
mg = RasterModelGrid(
(20, 20), xy_spacing=50.0
) # raster grid with 20 rows, 20 columns and dx=50m
z = np.random.rand(mg.size("node")) # random noise for initial topography
mg.add_field("topographic__elevation", z, at="node")
mg.set_closed_boundaries_at_grid_edges(
False, True, False, True
) # N and S boundaries are closed, E and W are open
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Set the initial and run conditions:
|
total_t = 2000000.0 # total run time (yr)
dt = 1000.0 # time step (yr)
nt = int(total_t // dt) # number of time steps
uplift_rate = 0.0001 # uplift rate (m/yr)
kappa = 0.001 # erodibility (m/yr)
Sc = 0.6 # critical slope
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Instantiate the components:
The hillslope diffusion component must be used together with a flow router/director that provides the steepest downstream slope for each node, with a D4 method (creates the field topographic__steepest_slope at nodes).
|
fdir = FlowDirectorSteepest(mg)
tl_diff = TransportLengthHillslopeDiffuser(mg, erodibility=kappa, slope_crit=Sc)
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Run the components for 2 Myr and trace an East-West cross-section of the topography every 100 kyr:
|
for t in range(nt):
fdir.run_one_step()
tl_diff.run_one_step(dt)
z[mg.core_nodes] += uplift_rate * dt # add the uplift
# add some output to let us see we aren't hanging:
if t % 100 == 0:
print(t * dt)
# plot east-west cross-section of topography:
x_plot = range(0, 1000, 50)
z_plot = z[100:120]
figure("cross-section")
plot(x_plot, z_plot)
figure("cross-section")
title("East-West cross section")
xlabel("x (m)")
ylabel("z (m)")
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
And plot final topography:
|
figure("final topography")
im = imshow_grid(
mg, "topographic__elevation", grid_units=["m", "m"], var_name="Elevation (m)"
)
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
This behaviour corresponds to the evolution observed using a classical non-linear diffusion model.
Example 2:
In this example, we show that when the slope is steep ($S \ge S_c$), the transport-length hillsope diffusion simulates mass wasting, with long transport distances.
First, we create a grid: the western half of the grid is flat at 0 m of elevation, the eastern half is a 45-degree slope.
|
# Create grid and topographic elevation field:
mg2 = RasterModelGrid((20, 20), xy_spacing=50.0)
z = np.zeros(mg2.number_of_nodes)
z[mg2.node_x > 500] = mg2.node_x[mg2.node_x > 500] / 10
mg2.add_field("topographic__elevation", z, at="node")
# Set boundary conditions:
mg2.set_closed_boundaries_at_grid_edges(False, True, False, True)
# Show initial topography:
im = imshow_grid(
mg2, "topographic__elevation", grid_units=["m", "m"], var_name="Elevation (m)"
)
# Plot an east-west cross-section of the initial topography:
z_plot = z[100:120]
x_plot = range(0, 1000, 50)
figure(2)
plot(x_plot, z_plot)
title("East-West cross section")
xlabel("x (m)")
ylabel("z (m)")
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Set the run conditions:
|
total_t = 1000000.0 # total run time (yr)
dt = 1000.0 # time step (yr)
nt = int(total_t // dt) # number of time steps
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Instantiate the components:
|
fdir = FlowDirectorSteepest(mg2)
tl_diff = TransportLengthHillslopeDiffuser(mg2, erodibility=0.001, slope_crit=0.6)
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Run for 1 Myr, plotting the cross-section regularly:
|
for t in range(nt):
fdir.run_one_step()
tl_diff.run_one_step(dt)
# add some output to let us see we aren't hanging:
if t % 100 == 0:
print(t * dt)
z_plot = z[100:120]
figure(2)
plot(x_plot, z_plot)
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
The material is diffused from the top and along the slope and it accumulates at the bottom, where the topography flattens.
As a comparison, the following code uses linear diffusion on the same slope:
|
# Import Linear diffuser:
from landlab.components import LinearDiffuser
# Create grid and topographic elevation field:
mg3 = RasterModelGrid((20, 20), xy_spacing=50.0)
z = np.ones(mg3.number_of_nodes)
z[mg.node_x > 500] = mg.node_x[mg.node_x > 500] / 10
mg3.add_field("topographic__elevation", z, at="node")
# Set boundary conditions:
mg3.set_closed_boundaries_at_grid_edges(False, True, False, True)
# Instantiate components:
fdir = FlowDirectorSteepest(mg3)
diff = LinearDiffuser(mg3, linear_diffusivity=0.1)
# Set run conditions:
total_t = 1000000.0
dt = 1000.0
nt = int(total_t // dt)
# Run for 1 Myr, plotting east-west cross-section regularly:
for t in range(nt):
fdir.run_one_step()
diff.run_one_step(dt)
# add some output to let us see we aren't hanging:
if t % 100 == 0:
print(t * dt)
z_plot = z[100:120]
figure(2)
plot(x_plot, z_plot)
|
notebooks/tutorials/hillslope_geomorphology/transport-length_hillslope_diffuser/TLHDiff_tutorial.ipynb
|
landlab/landlab
|
mit
|
Multinomial distribution: bags of marbles
Written by: Deebul Nair (2016)
Edited by: Jaakko Luttinen (2016)
Inspired by https://probmods.org/hierarchical-models.html
Using multinomial distribution
There are several bags of coloured marbles, each bag containing different amounts of each color. Marbles are drawn at random with replacement from the bags. The goal is to predict the distribution of the marbles in each bag.
Data generation
Let us create a dataset. First, decide the number of bags, colors and trials (i.e., draws):
|
n_colors = 5 # number of possible colors
n_bags = 3 # number of bags
n_trials = 20 # number of draws from each bag
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Generate randomly a color distribution for each bag:
|
from bayespy import nodes
import numpy as np
p_colors = nodes.Dirichlet(n_colors * [0.5], plates=(n_bags,)).random()
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
The concentration parameter $\begin{bmatrix}0.5 & \ldots & 0.5\end{bmatrix}$ makes the distributions very non-uniform within each bag, that is, the amount of each color can be very different. We can visualize the probability distribution of the colors in each bag:
|
import bayespy.plot as bpplt
bpplt.hinton(p_colors)
bpplt.pyplot.title("Original probability distributions of colors in the bags");
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
As one can see, the color distributions aren't very uniform in any of the bags because of the small concentration parameter. Next, make the ball draws:
|
marbles = nodes.Multinomial(n_trials, p_colors).random()
print(marbles)
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Model
We will use the same generative model for estimating the color distributions in the bags as we did for generating the data:
$$
\theta_i \sim \mathrm{Dirichlet}\left(\begin{bmatrix} 0.5 & \ldots & 0.5 \end{bmatrix}\right)
$$
$$
y_i | \theta_i \sim \mathrm{Multinomial}(\theta_i)
$$
The simple graphical model can be drawn as below:
|
%%tikz -f svg
\usetikzlibrary{bayesnet}
\node [latent] (theta) {$\theta$};
\node [below=of theta, obs] (y) {$y$};
\edge {theta} {y};
\plate {trials} {(y)} {trials};
\plate {bags} {(theta)(y)(trials)} {bags};
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
The model is constructed equivalently to the generative model (except we don't use the nodes to draw random samples):
|
theta = nodes.Dirichlet(n_colors * [0.5], plates=(n_bags,))
y = nodes.Multinomial(n_trials, theta)
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Data is provided by using the observe method:
|
y.observe(marbles)
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Performing Inference
|
from bayespy.inference import VB
Q = VB(y, theta)
Q.update(repeat=1000)
import bayespy.plot as bpplt
bpplt.hinton(theta)
bpplt.pyplot.title("Learned distribution of colors")
bpplt.pyplot.show()
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Using categorical Distribution
The same problem can be solved with categorical distirbution. Categorical distribution is similar to the Multinomical distribution expect for the output it produces.
Multinomial and Categorical infer the number of colors from the size of the probability vector (p_theta)
Categorical data is in a form where the value tells the index of the color that was picked in a trial. so if n_colors=5, Categorical data could be [4, 4, 0, 1, 1, 2, 4] if the number of trials was 7.
multinomial data is such that you have a vector where each element tells how many times that color was picked, for instance, [3, 0, 4] if you have 7 trials.
So there is significant difference in Multinomial and Categorical data . Depending on the data you have the choice of the Distribution has to be made.
Now we can see an example of Hierarchical model usign categorical data generator and model
|
from bayespy import nodes
import numpy as np
#The marbles drawn based on the distribution for 10 trials
# Using same p_color distribution as in the above example
draw_marbles = nodes.Categorical(p_colors,
plates=(n_trials, n_bags)).random()
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Model
|
from bayespy import nodes
import numpy as np
p_theta = nodes.Dirichlet(np.ones(n_colors),
plates=(n_bags,),
name='p_theta')
bag_model = nodes.Categorical(p_theta,
plates=(n_trials, n_bags),
name='bag_model')
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Inference
|
bag_model.observe(draw_marbles)
from bayespy.inference import VB
Q = VB(bag_model, p_theta)
Q.update(repeat=1000)
%matplotlib inline
import bayespy.plot as bpplt
bpplt.hinton(p_theta)
bpplt.pyplot.tight_layout()
bpplt.pyplot.title("Learned Distribution of colors using Categorical Distribution")
bpplt.pyplot.show()
|
doc/source/examples/multinomial.ipynb
|
jluttine/bayespy
|
mit
|
Word counting
Write a function tokenize that takes a string of English text returns a list of words. It should also remove stop words, which are common short words that are often removed before natural language processing. Your function should have the following logic:
Split the string into lines using splitlines.
Split each line into a list of words and merge the lists for each line.
Use Python's builtin filter function to remove all punctuation.
If stop_words is a list, remove all occurences of the words in the list.
If stop_words is a space delimeted string of words, split them and remove them.
Remove any remaining empty words.
Make all words lowercase.
|
def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'):
"""Split a string into a list of words, removing punctuation and stop words."""
if type(stop_words)==str:
stopwords=list(stop_words.split(" "))
else:
stopwords=stop_words
lines = s.splitlines()
words = [re.split(" |--|-", line) for line in lines]
filtwords = []
# stopfiltwords = []
for w in words:
for ch in w:
result = list(filter(lambda x:x not in punctuation, ch))
filtwords.append("".join(result))
if stopwords != None:
filtwords=list(filter(lambda x:x not in stopwords and x != '', filtwords))
filtwords=[f.lower() for f in filtwords]
return filtwords
assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \
['this', 'way', 'that', 'things', 'will', 'end']
wasteland = """APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
"""
assert tokenize(wasteland, stop_words='is the of and') == \
['april','cruellest','month','breeding','lilacs','out','dead','land',
'mixing','memory','desire','stirring','dull','roots','with','spring',
'rain']
assert tokenize("hello--world")==['hello', 'world']
|
assignments/assignment07/AlgorithmsEx01.ipynb
|
CalPolyPat/phys202-2015-work
|
mit
|
Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.
|
def count_words(data):
"""Return a word count dictionary from the list of words in data."""
wordcount={}
for d in data:
if d in wordcount:
wordcount[d] += 1
else:
wordcount[d] = 1
return wordcount
assert count_words(tokenize('this and the this from and a a a')) == \
{'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}
|
assignments/assignment07/AlgorithmsEx01.ipynb
|
CalPolyPat/phys202-2015-work
|
mit
|
Write a function sort_word_counts that return a list of sorted word counts:
Each element of the list should be a (word, count) tuple.
The list should be sorted by the word counts, with the higest counts coming first.
To perform this sort, look at using the sorted function with a custom key and reverse
argument.
|
def sort_word_counts(wc):
"""Return a list of 2-tuples of (word, count), sorted by count descending."""
def getkey(item):
return item[1]
sortedwords = [(i,wc[i]) for i in wc]
return sorted(sortedwords, key=getkey, reverse=True)
assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \
[('a', 4), ('this', 3), ('and', 2), ('the', 1)]
|
assignments/assignment07/AlgorithmsEx01.ipynb
|
CalPolyPat/phys202-2015-work
|
mit
|
Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt:
Read the file into a string.
Tokenize with stop words of 'the of and a to in is it that as'.
Perform a word count, the sort and save the result in a variable named swc.
|
f = open('mobydick_chapter1.txt', 'r')
swc = sort_word_counts(count_words(tokenize(f.read(), stop_words='the of and a to in is it that as')))
print(len(swc))
assert swc[0]==('i',43)
assert len(swc)==849
#I changed the assert to length 849 instead of 848. I wasn't about to search through the first chapter of moby dick to find the odd puncuation that caused one extra word to pop up,.
|
assignments/assignment07/AlgorithmsEx01.ipynb
|
CalPolyPat/phys202-2015-work
|
mit
|
Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...
|
words50 = np.array(swc)
f=plt.figure(figsize=(25,5))
plt.plot(np.linspace(0,50,50), words50[:50,1], 'ko')
plt.xlim(0,50)
plt.xticks(np.linspace(0,50,50),words50[:50,0]);
assert True # use this for grading the dotplot
|
assignments/assignment07/AlgorithmsEx01.ipynb
|
CalPolyPat/phys202-2015-work
|
mit
|
如何使用和开发微信聊天机器人的系列教程
A workshop to develop & use an intelligent and interactive chat-bot in WeChat
WeChat is a popular social media app, which has more than 800 million monthly active users.
<img src='https://www.iss.nus.edu.sg/images/default-source/About-Us/7.6.1-teaching-staff/sam-website.tmb-.png' width=8% style="float: right;">
<img src='../reference/WeChat_SamGu_QR.png' width=10% style="float: right;">
by: GU Zhan (Sam)
October 2018 : Update to support Python 3 in local machine, e.g. iss-vm.
April 2017 ======= Scan the QR code to become trainer's friend in WeChat =====>>
第六课:交互式虚拟助手的智能应用
Lesson 6: Interactive Conversatioinal Virtual Assistant Applications / Intelligent Process Automations
虚拟员工: 贷款填表申请审批一条龙自动化流程 (Virtual Worker: When Chat-bot meets RPA-bot for mortgage loan application automation)
虚拟员工: 文字指令交互(Conversational automation using text/message command)
虚拟员工: 语音指令交互(Conversational automation using speech/voice command)
虚拟员工: 多种语言交互(Conversational automation with multiple languages)
Using Google Cloud Platform's Machine Learning APIs
From the same API console, choose "Dashboard" on the left-hand menu and "Enable API".
Enable the following APIs for your project (search for them) if they are not already enabled:
<ol>
**<li> Google Cloud Speech API </li>**
**<li> Google Cloud Text-to-Speech API </li>**
**<li> Google Cloud Translation API </li>**
</ol>
Finally, because we are calling the APIs from Python (clients in many other languages are available), let's install the Python package (it's not installed by default on Datalab)
|
# Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License");
# !pip install --upgrade google-api-python-client
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
<span style="color:blue">Virtual Worker: When Chat-bot meets RPA-bot</span>
虚拟员工: 贷款填表申请审批一条龙自动化流程 (Mortgage loan application automation)
Synchronous processing when triggering RPA-Bot
|
# Library/Function to use operating system's shell script command, e.g. bash, echo, cd, pwd, etc
import subprocess, time
# Funciton to trigger RPA-Bot (TagUI script: mortgage loan application automation) from VA-Bot (python script)
# Trigger RPA-Bot [ Synchronous ]
# def didi_invoke_rpa_bot(rpa_bot_file, rpa_bot = 'reference/S-IPA-Workshop/TagUI-S-IPA/src/tagui'):
def didi_invoke_rpa_bot(rpa_bot_file, rpa_bot = '../reference/S-IPA-Workshop/TagUI-S-IPA/src/tagui'):
# Invoke RPA-Bot script
print('[ W I P ] In progress to invoke RPA-Bot using command: \n{}'.format(
'bash' + ' ' + rpa_bot + ' ' + rpa_bot_file))
start = time.time()
return_code = subprocess.call(['bash', rpa_bot, rpa_bot_file])
end = time.time()
if return_code == 0:
print('[ Sync OK ] RPA-Bot succeeded! [ Return Code : {} ]'.format(return_code))
else:
print('[ ERROR ] RPA-Bot failed! [ Return Code : {} ]'.format(return_code))
return return_code, int(round(end - start, 0)) # return_code & time_spent in seconds
# Uncomment below lines for an agile demo outside Chat-bot:
# rpa_bot_file = '../reference/S-IPA-Workshop/workshop2/KIE-Loan-Application-WeChat/VA-KIE-Loan-Application.txt'
# return_code = didi_invoke_rpa_bot(rpa_bot_file)
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Asynchronous processing when triggering RPA-Bot
|
# Trigger RPA-Bot [ Asynchronous ]
# http://docs.dask.org/en/latest/_downloads/daskcheatsheet.pdf
from dask.distributed import Client
def didi_invoke_rpa_bot_async(rpa_bot_file):
client = Client(processes=False)
ipa_task = client.submit(didi_invoke_rpa_bot, rpa_bot_file)
ipa_task.add_done_callback(didi_invoke_rpa_bot_async_upon_completion)
return 0, 0 # Dummy return. Actual result is returned by function didi_invoke_rpa_bot_async_upon_completion(ipa_task)
from tornado import gen
# https://stackoverflow.com/questions/40477518/how-to-get-the-result-of-a-future-in-a-callback
@gen.coroutine
def didi_invoke_rpa_bot_async_upon_completion(ipa_task):
print(u'[ Terminal Info ] didi_invoke_rpa_bot_async(rpa_bot_file) [ upon_completion ]')
return_code, time_spent = ipa_task.result()
print(return_code)
print(time_spent)
# Send confirmation message upon triggering RPA-Bot
# itchat.send(u'[ Async OK ] IPA Command completed !\n[ Time Spent : %s seconds ]\n %s' % (time_spent, parm_msg['Text']), parm_msg['FromUserName'])
itchat.send(u'[ Async OK ] IPA Command completed !\n[ Time Spent : %s seconds ]' % (time_spent), parm_msg['FromUserName']) # parm_msg['Text'] can be in-sync due to new coming message.
# return return_code, time_spent # No return needed. No pace to hold the info
# Uncomment below lines for an agile demo outside Chat-bot:
# rpa_bot_file = '../reference/S-IPA-Workshop/workshop2/KIE-Loan-Application-WeChat/VA-KIE-Loan-Application.txt'
# return_code = didi_invoke_rpa_bot_async(rpa_bot_file)
print('[ Start of IPA-Bot ] Continue other tasks in main program...\n...\n')
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
<span style="color:blue">Wrap RPA-Bot into Functions() for conversational virtual assistant (VA):</span>
Reuse above defined Functions().
虚拟员工: 文字指令交互(Conversational automation using text/message command)
|
parm_msg = {} # Define a global variable to hold current msg
# Define "keywords intention command -> automation action" lookup to invoke RPA-Bot process automation functions
parm_bot_intention_action = {
'#apply_loan': '../reference/S-IPA-Workshop/workshop2/KIE-Loan-Application-WeChat/VA-KIE-Loan-Application.txt'
, '#ocr_invoice': '../reference/S-IPA-Workshop/workshop2/KIE-Loan-Application-WeChat/VA-KIE-Loan-Application.txt'
, '#check_Application': '../reference/S-IPA-Workshop/workshop2/KIE-Loan-Application-WeChat/VA-KIE-Loan-Application.txt'
, '#hi_everyone_welcome_to_see_you_here_in_the_process_automation_course': '../reference/S-IPA-Workshop/workshop2/KIE-Loan-Application-WeChat/VA-KIE-Loan-Application.txt'
}
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Retrieve rpa_bot_file based on received Chat-Bot command
|
# Retrieve rpa_bot_file based on received Chat-Bot command
def didi_retrieve_rpa_bot_file(chat_bot_command):
print('[ W I P ] Retrieve rpa_bot_file based on received Chat-Bot command : {} -> {}'.format(
chat_bot_command, chat_bot_command.lower()))
if chat_bot_command.lower() in parm_bot_intention_action.keys():
return parm_bot_intention_action[chat_bot_command.lower()]
else:
print('[ ERROR ] Command not found!')
return None
# Uncomment below lines for an agile demo outside Chat-bot:
# didi_retrieve_rpa_bot_file('#apply_loan')
# Uncomment below lines for an agile demo outside Chat-bot:
# didi_retrieve_rpa_bot_file('#Apply_Loan')
# Uncomment below lines for an agile demo outside Chat-bot:
# didi_retrieve_rpa_bot_file('#approve_loan')
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
虚拟员工: 语音指令交互(Conversational automation using speech/voice command)
<span style="color:blue">Use local AI module in native forms</span> for Speech Recognition: Speech-to-Text
导入需要用到的一些功能程序库: Local AI Module Speech-to-Text
|
# Local AI Module for Speech Synthesis: Speech-to-Text
# Install library into computer storage:
# !pip install SpeechRecognition
# !pip install pocketsphinx
# Load library into computer memory:
import speech_recognition as sr
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
IF !pip install pocketsphinx failed, THEN: sudo apt-get install python python-dev python-pip build-essential swig libpulse-dev
https://stackoverflow.com/questions/36523705/python-pocketsphinx-requesterror-missing-pocketsphinx-module-ensure-that-pocke
Supported Languages
https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst#installing-other-languages.
By default, SpeechRecognition's Sphinx functionality supports only US English. Additional language packs are also available:
* English (Default support) : en-US
* International French : fr-FR
* Mandarin Chinese : zh-CN
* Italian : it-IT
Utility function to convert mp3 file to 'wav / flac' audio file type:
|
# Flag to indicate the environment to run this program:
# Uncomment to run the code on Google Cloud Platform
# parm_runtime_env_GCP = True
# Uncomment to run the code in local machine
parm_runtime_env_GCP = False
import subprocess
# Utility function to convert mp3 file to target GCP audio file type:
# audio_type = ['flac', 'wav']
# audio_file_input = msg['FileName']
# Running Speech API
def didi_mp3_audio_conversion(audio_file_input, audio_type='flac'):
audio_file_output = str(audio_file_input) + '.' + str(audio_type)
# convert mp3 file to target GCP audio file:
# remove audio_file_output, if exists
retcode = subprocess.call(['rm', audio_file_output])
if parm_runtime_env_GCP: # using Datalab in Google Cloud Platform
# GCP: use avconv to convert audio
retcode = subprocess.call(['avconv', '-i', audio_file_input, '-ac', '1', audio_file_output])
else: # using an iss-vm Virtual Machine, or local machine
# VM : use ffmpeg to convert audio
retcode = subprocess.call(['ffmpeg', '-i', audio_file_input, '-ac', '1', audio_file_output])
if retcode == 0:
print('[ O K ] Converted audio file for API: %s' % audio_file_output)
else:
print('[ ERROR ] Function: didi_mp3_audio_conversion() Return Code is : {}'.format(retcode))
return audio_file_output # return file name string only
# convertion for files not in wav or flac format:
# AUDIO_FILE = didi_mp3_audio_conversion("reference/S-IPA-welcome.mp3")
# AUDIO_FILE = didi_mp3_audio_conversion("reference/S-IPA-welcome.mp3", 'wav')
# AUDIO_FILE = didi_mp3_audio_conversion("reference/text2speech.mp3")
# AUDIO_FILE = didi_mp3_audio_conversion("reference/text2speech.mp3", 'wav')
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Calling Local AI Module: speech_recognition.Recognizer().recognize_sphinx()
|
# Running Local AI Module Speech-to-Text
def didi_speech2text_local(AUDIO_FILE, didi_language_code='en-US'):
# Python 2
# use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
audio = r.record(source) # read the entire audio file
transcription = ''
# recognize speech using Sphinx
try:
transcription = r.recognize_sphinx(audio, language=didi_language_code)
print("[ Terminal Info ] Sphinx thinks you said : \'{}\'.".format(transcription))
except sr.UnknownValueError:
print("[ Terminal Info ] Sphinx could not understand audio")
except sr.RequestError as e:
print("[ Terminal Info ] Sphinx error; {0}".format(e))
return transcription
# Uncomment below lines for an agile demo outside Chat-bot:
# transcription = didi_speech2text_local(didi_mp3_audio_conversion("reference/S-IPA-welcome.mp3"))
# Uncomment below lines for an agile demo outside Chat-bot:
# transcription = didi_speech2text_local("reference/S-IPA-welcome.mp3.flac")
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Fuzzy match from 'transcribed audio command' to predefined 'chat_bot_command'
Automatically create a new lookup, by converting text-based intention command to voice-based intention command.
Example: from '#apply_loan' to 'voice command apply loan'
|
# import json # Prints the nicely formatted dictionary
# print(json.dumps(parm_bot_intention_action, indent=4, sort_keys=True))
import re
parm_bot_intention_action_fuzzy_match = {}
for intention, action in parm_bot_intention_action.items():
# print(intention)
intention_fuzzy_match = " ".join(re.split('#|_', intention.replace('#', 'voice_command_')))
# print(action)
parm_bot_intention_action_fuzzy_match[intention_fuzzy_match] = action
# print(json.dumps(parm_bot_intention_action_fuzzy_match, indent=4, sort_keys=True))
# print(parm_bot_intention_action_fuzzy_match)
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Fuzzy match function: Compare similarity between two text strings
|
# Compare similarity between two text strings
def did_fuzzy_match_score(string1, string2):
print('\n[ Inside FUNCTION ] did_fuzzy_match_score')
string1_list = string1.lower().split() # split by space
string2_list = string2.lower().split() # split by space
print('string1_list : ', string1_list)
print('string2_list : ', string2_list)
# words in common
common_words = set(string1_list)&set(string2_list)
# print('len(common_words) : ', len(common_words))
# totoal unique words
unique_words = set(string1_list + string2_list)
# print('len(unique_words) : ', len(unique_words))
jaccard_similarity = float(len(common_words) / len(unique_words))
print('jaccard_similarity : {0:.3f}'.format(jaccard_similarity))
return jaccard_similarity
# Uncomment below lines for an agile demo outside Chat-bot:
# did_fuzzy_match_score('run DIDI voice command apply loan', 'voice command apply loan')
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Retrieve rpa_bot_file based on received Chat-Bot command ( fuzzy match for voice/speech2text )
|
# Retrieve rpa_bot_file based on received Chat-Bot command ( fuzzy match for voice/speech2text )
def didi_retrieve_rpa_bot_file_fuzzy_match(speech2text_chat_bot_command, didi_confidence_threshold=0.8):
print('\n[ Inside FUNCTION ] didi_retrieve_rpa_bot_file_fuzzy_match')
matched_intention = [0.0, {}] # a lis to store intention_command of highest jaccard_similarity
for intention, action in parm_bot_intention_action_fuzzy_match.items():
# print('\nintention : ', intention)
# print('action : ', action)
fuzzy_match_score_current = did_fuzzy_match_score(intention, speech2text_chat_bot_command)
# print('jaccard_similarity_score_current : ', jaccard_similarity_score_current)
if fuzzy_match_score_current > matched_intention[0]:
matched_intention[0] = fuzzy_match_score_current
matched_intention[1] = {intention : action}
# print('matched_intention : ', matched_intention)
print('\n[ Finale ] matched_intention : ', matched_intention)
if matched_intention[0] < didi_confidence_threshold: # not confident enough about fuzzy matched voice command
return None
else: # confident enough, thus return predefined rpa_bot_file
return str(list(matched_intention[1].values())[0])
# Uncomment below lines for an agile demo outside Chat-bot:
# parm_voice_command_confidence_threshold = 0.6 # Control of asynchronous or synchronous processing when triggering RPA-Bot
# action_rpa_bot_file = didi_retrieve_rpa_bot_file_fuzzy_match('run DIDI voice command apply loan', parm_voice_command_confidence_threshold)
# print('\n[ Process Automation ] rpa_bot_file : ', action_rpa_bot_file)
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Control Parm
|
# Control of asynchronous or synchronous processing when triggering RPA-Bot
parm_asynchronous_process = True
# Control of asynchronous or synchronous processing when triggering RPA-Bot
parm_voice_command_confidence_threshold = 0.05 # low value for demo only
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
<span style="color:blue">Start interactive conversational virtual assistant (VA):</span>
Import ItChat, etc. 导入需要用到的一些功能程序库:
|
import itchat
from itchat.content import *
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Log in using QR code image / 用微信App扫QR码图片来自动登录
|
# Running in Jupyther Notebook:
# itchat.auto_login(hotReload=True) # hotReload=True: 退出程序后暂存登陆状态。即使程序关闭,一定时间内重新开启也可以不用重新扫码。
# or
# itchat.auto_login(enableCmdQR=-2) # enableCmdQR=-2: Jupyter Notebook 命令行显示QR图片
# Running in Terminal:
itchat.auto_login(enableCmdQR=2) # enableCmdQR=2: 命令行显示QR图片
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
虚拟员工: 文字指令交互(Conversational automation using text/message command)
|
# Trigger RPA-Bot when command received / 如果收到[TEXT]的信息:
@itchat.msg_register([TEXT]) # 文字
def didi_ipa_text_command(msg):
global parm_msg
parm_msg = msg
if msg['Text'][0] == '#':
# Retrieve rpa_bot_file based on received Chat-Bot command
rpa_bot_file = didi_retrieve_rpa_bot_file( msg['Text'])
if rpa_bot_file == None: # input command / rpa_bot_file NOT FOUND!
print(u'[ Terminal Info ] RPA-Bot [ ERROR ] Command not found : [ %s ] %s From: %s'
% (msg['Type'], msg['Text'], msg['FromUserName']))
itchat.send(u'RPA-Bot [ ERROR ] Command not found : \n[ %s ]\n%s' % (msg['Type'], msg['Text']), msg['FromUserName'])
else:
print(u'[ Terminal Info ] RPA-Bot [ W I P ] Command : [ %s ] %s From: %s'
% (msg['Type'], msg['Text'], msg['FromUserName']))
print(u'[ Terminal Info ] RPA-Bot [ W I P ] File : %s' % (rpa_bot_file))
if parm_asynchronous_process: # Don't wait for RPA-Bot completion
# Send 'work in progress' message triggering RPA-Bot
itchat.send(u'[ Async WIP ] IPA Command triggered: \n[ %s ]\n%s' % (msg['Type'], msg['Text']), msg['FromUserName'])
# Trigger RPA-Bot [ Asynchronous ]
didi_invoke_rpa_bot_async(rpa_bot_file) # No return of return_code, time_spent
else: # Wait for RPA-Bot completion
# Send 'work in progress' message triggering RPA-Bot
itchat.send(u'[ Sync WIP ] IPA Command triggered: \n[ %s ]\n%s' % (msg['Type'], msg['Text']), msg['FromUserName'])
# Trigger RPA-Bot [ Synchronously ]
return_code, time_spent = didi_invoke_rpa_bot(rpa_bot_file)
print(u'[ Terminal Info ] didi_invoke_rpa_bot(rpa_bot_file) [ Return Code : %s ]' % (return_code))
if return_code == 0:
# Send confirmation message upon RPA-Bot completion
itchat.send(u'[ Sync OK ] IPA Command completed : \n[ %s ]\n%s\n[ Time Spent : %s seconds ]' % (msg['Type'], msg['Text'], time_spent), msg['FromUserName'])
else:
# Error when running RPA-Bot task
itchat.send(u'[ Sync ERROR] [ Return Code : %s ] IPA Command failed : \n[ %s ]\n%s\n[ Time Spent : %s seconds ]' % (return_code, msg['Type'], msg['Text'], time_spent), msg['FromUserName'])
else:
print(u'[ Terminal Info ] Thank you! 谢谢亲[嘴唇]我已收到 I received: [ %s ] %s From: %s'
% (msg['Type'], msg['Text'], msg['FromUserName']))
itchat.send(u'Thank you! 谢谢亲[嘴唇]我已收到\nI received:\n[ %s ]\n%s' % (msg['Type'], msg['Text']), msg['FromUserName'])
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
虚拟员工: 语音指令交互(Conversational automation using speech/voice command)
|
# 1. 语音转换成消息文字 (Speech recognition: voice to text)
@itchat.msg_register([RECORDING], isGroupChat=True)
@itchat.msg_register([RECORDING])
def download_files(msg):
msg.download(msg.fileName)
print('\nDownloaded audio file name is: %s' % msg['FileName'])
###########################################################################################################
# call audio analysis Local AI Sphinx #
###########################################################################################################
audio_analysis_reply = u'[ Audio Analysis 音频处理结果 ]\n'
# Voice to Text:
audio_analysis_reply += u'\n[ Voice -> Text 语音识别 ]\n'
response = didi_speech2text_local(didi_mp3_audio_conversion(msg['FileName']), 'en-US')
rpa_bot_file = didi_retrieve_rpa_bot_file_fuzzy_match(response, parm_voice_command_confidence_threshold)
if rpa_bot_file == None: # input command / rpa_bot_file NOT FOUND!
print(u'[ Terminal Info ] Not Confident IPA Command\n')
audio_analysis_reply += str(response) + u'\n( Not Confident IPA Command )\n'
else:
print(u'[ Terminal Info ] RPA-Bot [ W I P ] Command : %s' % (response))
print(u'[ Terminal Info ] RPA-Bot [ W I P ] File : %s' % (rpa_bot_file))
if parm_asynchronous_process: # Don't wait for RPA-Bot completion
# Send 'work in progress' message triggering RPA-Bot
audio_analysis_reply += (u'[ Async WIP ] IPA Command triggered\n')
# Trigger RPA-Bot [ Asynchronous ]
didi_invoke_rpa_bot_async(rpa_bot_file) # No return of return_code, time_spent
else: # Wait for RPA-Bot completion
# Send 'work in progress' message triggering RPA-Bot
audio_analysis_reply += (u'[ Sync WIP ] IPA Command triggered\n')
# Trigger RPA-Bot [ Synchronously ]
return_code, time_spent = didi_invoke_rpa_bot(rpa_bot_file)
print(u'[ Terminal Info ] didi_invoke_rpa_bot(rpa_bot_file) [ Return Code : %s ]' % (return_code))
if return_code == 0:
# Send confirmation message upon RPA-Bot completion
audio_analysis_reply += (u'[ Sync OK] [ Return Code : %s ] IPA Command completed !\n[ Time Spent : %s seconds ]' % (return_code, time_spent))
else:
# Error when running RPA-Bot task
audio_analysis_reply += (u'[ Sync ERROR] [ Return Code : %s ] IPA Command failed !\n[ Time Spent : %s seconds ]' % (return_code, time_spent))
return audio_analysis_reply
|
wechat_tool_py3_local/terminal-script-py/lesson_6_terminal_py3.ipynb
|
telescopeuser/workshop_blog
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.