markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Find points inside a solid
# selecting all inside the solid # This two methods are equivelent but test=4 also works with open surfaces inside,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=1) inside1,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=4) err=inside==inside1 #print inside, tuple(p) print x[~err] print y[~err] print z[~err] # here we prepare to plot the solid, the x,y,z indicator and we also # plot the line (direction) used to ray trace # convert the data in the STL file into a renderer and then we plot it renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.70, background=(1,1,1)) # add indicator (r->x, g->y, b->z) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-7,-10,-10], color=(1, 0, 0)) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-7,-10], color=(0, 1, 0)) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-10,-7], color=(0, 0, 1)) # add ray to see where we are pointing pygslib.vtktools.addLine(renderer, (0.,0.,0.), tuple(p), color=(0, 0, 0)) # here we plot the points selected and non-selected in different color and size # add the points selected for i in range(len(inside)): p=[x[i],y[i],z[i]] if inside[i]!=0: #inside pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0)) else: pygslib.vtktools.addPoint(renderer, p, radius=0.2, color=(0.0, 1.0, 0.0)) #lets rotate a bit this pygslib.vtktools.vtk_show(renderer, camera_position=(0,0,50), camera_focalpoint=(0,0,0))
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Find points over a surface
# selecting all over a solid (test = 2) inside,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=2) # here we prepare to plot the solid, the x,y,z indicator and we also # plot the line (direction) used to ray trace # convert the data in the STL file into a renderer and then we plot it renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.70, background=(1,1,1)) # add indicator (r->x, g->y, b->z) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-7,-10,-10], color=(1, 0, 0)) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-7,-10], color=(0, 1, 0)) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-10,-7], color=(0, 0, 1)) # add ray to see where we are pointing pygslib.vtktools.addLine(renderer, (0.,0.,0.), tuple(-p), color=(0, 0, 0)) # here we plot the points selected and non-selected in different color and size # add the points selected for i in range(len(inside)): p=[x[i],y[i],z[i]] if inside[i]!=0: #inside pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0)) else: pygslib.vtktools.addPoint(renderer, p, radius=0.2, color=(0.0, 1.0, 0.0)) #lets rotate a bit this pygslib.vtktools.vtk_show(renderer, camera_position=(0,0,50), camera_focalpoint=(0,0,0))
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Find points below a surface
# selecting all over a solid (test = 2) inside,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=3) # here we prepare to plot the solid, the x,y,z indicator and we also # plot the line (direction) used to ray trace # convert the data in the STL file into a renderer and then we plot it renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.70, background=(1,1,1)) # add indicator (r->x, g->y, b->z) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-7,-10,-10], color=(1, 0, 0)) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-7,-10], color=(0, 1, 0)) pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-10,-7], color=(0, 0, 1)) # add ray to see where we are pointing pygslib.vtktools.addLine(renderer, (0.,0.,0.), tuple(p), color=(0, 0, 0)) # here we plot the points selected and non-selected in different color and size # add the points selected for i in range(len(inside)): p=[x[i],y[i],z[i]] if inside[i]!=0: #inside pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0)) else: pygslib.vtktools.addPoint(renderer, p, radius=0.2, color=(0.0, 1.0, 0.0)) #lets rotate a bit this pygslib.vtktools.vtk_show(renderer, camera_position=(0,0,50), camera_focalpoint=(0,0,0))
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Export points to a VTK file
data = {'inside': inside} pygslib.vtktools.points2vtkfile('points', x,y,z, data)
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Try out a simple reaction to link two SMILES strings
# Test by linking two molecules - anything with a C or O followed by a B can react with a C or O followed by an F to form # a bond betwen the two C or O atoms, dropping the B or F. libgen = oechem.OELibraryGen("[C,O:1][B:2].[C,O:3][F:4]>>[C,O:1][C,O:3]") mol = oechem.OEGraphMol() oechem.OESmilesToMol(mol, 'COCCB') libgen.SetStartingMaterial(mol, 0) mol.Clear() oechem.OESmilesToMol(mol, 'COCOCOCF') libgen.SetStartingMaterial(mol, 1) mols = [] for product in libgen.GetProducts(): print("product smiles= %s" %oechem.OEMolToSmiles(product)) mols.append(oechem.OEMol(product)) # Depict result depict(mols[0])
examples/substructure_linking.ipynb
bmanubay/open-forcefield-tools
mit
Proceed to library generation First, build some sets of molecules to link, capped by our "reactant" groups
# Build two small libraries of molecules for linking # Build a first set of molecules import itertools smileslist1 = [] #Take all two-item combinations of entries in the list for item in itertools.permutations(['C','O','c1ccccc1','CC', 'COC', 'CCOC', 'CCCOC', 'C1CC1', 'C1CCC1', 'C1CCCC1', 'C1CCCCC1','C1OCOCC1'], 2): smileslist1.append( ''.join(item)) #Now cap all of them terminally with a reaction site smileslist1_rxn = [ smi+'B' for smi in smileslist1] # Build a second set of molecules in the same manner smileslist2 = [] for item in itertools.permutations(['c1ccccc1OC','c1ccccc1COC','c1ccccc1O(CO)C','C(O)C','C(OCO)', 'C1OOC1','C1OCOC1', 'C1CCCCCCOC1','CO(COCO)C', 'COCO(O)OC'],2): smileslist2.append( ''.join(item)) # Cap all with reaction site smileslist2_rxn = [smi + 'F' for smi in smileslist2]
examples/substructure_linking.ipynb
bmanubay/open-forcefield-tools
mit
Now, generate our library
# Build overall set of reactants libgen = oechem.OELibraryGen("[C,O:1][B:2].[C,O:3][F:4]>>[C,O:1][C,O:3]") mol = oechem.OEGraphMol() for idx, smi in enumerate(smileslist1_rxn): oechem.OESmilesToMol(mol, smi) libgen.AddStartingMaterial(mol, 0) mol.Clear() for idx, smi in enumerate(smileslist2_rxn): oechem.OESmilesToMol(mol, smi) libgen.AddStartingMaterial(mol, 1) mol.Clear() products = [ oechem.OEMol(product) for product in libgen.GetProducts() ] print len(products) depict(products[0]) depict(products[4])
examples/substructure_linking.ipynb
bmanubay/open-forcefield-tools
mit
Generate a conformer for each and write out
omega = oeomega.OEOmega() omega.SetMaxConfs(1) omega.SetStrictStereo(False) # First do just the first 10 molecules #products = products[0:10] ofs = oechem.oemolostream('linked_substructures.oeb') for oemol in products: omega(oemol) oechem.OETriposAtomNames(mol) oechem.OEWriteMolecule(ofs, oemol) ofs.close() # Make sure I can read and write to mol2 ifs = oechem.oemolistream('linked_substructures.oeb') ofs = oechem.oemolostream('linked_substructures_sample.mol2') ct = 0 mol = oechem.OEMol() while oechem.OEReadMolecule(ifs, mol): oechem.OEWriteMolecule(ofs, mol) ct+=1 mol=oechem.OEMol() if ct > 10: break #Don't eat up tons of space, just test ifs.close() ofs.close()
examples/substructure_linking.ipynb
bmanubay/open-forcefield-tools
mit
Restart the Notebook kernel to use the SDK packages
from IPython.display import display_html display_html("<script>Jupyter.notebook.kernel.restart()</script>",raw=True)
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Import required packages
import kfp import kfp.dsl as dsl from kfp import components from kubeflow.katib import ApiClient from kubeflow.katib import V1beta1ExperimentSpec from kubeflow.katib import V1beta1AlgorithmSpec from kubeflow.katib import V1beta1EarlyStoppingSpec from kubeflow.katib import V1beta1EarlyStoppingSetting from kubeflow.katib import V1beta1ObjectiveSpec from kubeflow.katib import V1beta1ParameterSpec from kubeflow.katib import V1beta1FeasibleSpace from kubeflow.katib import V1beta1TrialTemplate from kubeflow.katib import V1beta1TrialParameterSpec
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Define an Experiment You have to create an Experiment object before deploying it. This Experiment is similar to this YAML.
# Experiment name and namespace. experiment_name = "median-stop" # for multi user deployment, please specify your own namespace instead of "anonymous" experiment_namespace = "anonymous" # Trial count specification. max_trial_count = 18 max_failed_trial_count = 3 parallel_trial_count = 2 # Objective specification. objective=V1beta1ObjectiveSpec( type="maximize", goal= 0.99, objective_metric_name="Validation-accuracy", additional_metric_names=[ "Train-accuracy" ] ) # Algorithm specification. algorithm=V1beta1AlgorithmSpec( algorithm_name="random", ) # Early Stopping specification. early_stopping=V1beta1EarlyStoppingSpec( algorithm_name="medianstop", algorithm_settings=[ V1beta1EarlyStoppingSetting( name="min_trials_required", value="2" ) ] ) # Experiment search space. # In this example we tune learning rate, number of layer and optimizer. # Learning rate has bad feasible space to show more early stopped Trials. parameters=[ V1beta1ParameterSpec( name="lr", parameter_type="double", feasible_space=V1beta1FeasibleSpace( min="0.01", max="0.3" ), ), V1beta1ParameterSpec( name="num-layers", parameter_type="int", feasible_space=V1beta1FeasibleSpace( min="2", max="5" ), ), V1beta1ParameterSpec( name="optimizer", parameter_type="categorical", feasible_space=V1beta1FeasibleSpace( list=[ "sgd", "adam", "ftrl" ] ), ), ]
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Define a Trial template In this example, the Trial's Worker is the Kubernetes Job.
# JSON template specification for the Trial's Worker Kubernetes Job. trial_spec={ "apiVersion": "batch/v1", "kind": "Job", "spec": { "template": { "metadata": { "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "containers": [ { "name": "training-container", "image": "docker.io/kubeflowkatib/mxnet-mnist:v1beta1-45c5727", "command": [ "python3", "/opt/mxnet-mnist/mnist.py", "--batch-size=64", "--lr=${trialParameters.learningRate}", "--num-layers=${trialParameters.numberLayers}", "--optimizer=${trialParameters.optimizer}" ] } ], "restartPolicy": "Never" } } } } # Configure parameters for the Trial template. # We set the retain parameter to "True" to not clean-up the Trial Job's Kubernetes Pods. trial_template=V1beta1TrialTemplate( retain=True, primary_container_name="training-container", trial_parameters=[ V1beta1TrialParameterSpec( name="learningRate", description="Learning rate for the training model", reference="lr" ), V1beta1TrialParameterSpec( name="numberLayers", description="Number of training model layers", reference="num-layers" ), V1beta1TrialParameterSpec( name="optimizer", description="Training model optimizer (sdg, adam or ftrl)", reference="optimizer" ), ], trial_spec=trial_spec )
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Define an Experiment specification Create an Experiment specification from the above parameters.
experiment_spec=V1beta1ExperimentSpec( max_trial_count=max_trial_count, max_failed_trial_count=max_failed_trial_count, parallel_trial_count=parallel_trial_count, objective=objective, algorithm=algorithm, early_stopping=early_stopping, parameters=parameters, trial_template=trial_template )
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Create a Pipeline using Katib component The best hyperparameters are printed after Experiment is finished. The Experiment is not deleted after the Pipeline is finished.
# Get the Katib launcher. katib_experiment_launcher_op = components.load_component_from_url( "https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml") PRINT_STR = """ name: print description: print msg inputs: - {name: message, type: JsonObject} implementation: container: image: library/bash:4.4.23 command: - sh - -c args: - | echo "Best HyperParameters: $0" - {inputValue: message} """ print_op = components.load_component_from_text(PRINT_STR) @dsl.pipeline( name="launch-katib-early-stopping-experiment", description="An example to launch Katib Experiment with early stopping" ) def median_stop(): # Katib launcher component. # Experiment Spec should be serialized to a valid Kubernetes object. op = katib_experiment_launcher_op( experiment_name=experiment_name, experiment_namespace=experiment_namespace, experiment_spec=ApiClient().sanitize_for_serialization(experiment_spec), experiment_timeout_minutes=60, delete_finished_experiment=False) # Output container to print the results. print_op(op.output)
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Run the Pipeline You can check the Katib Experiment info in the Katib UI. If you run this in a multi-user deployment, you need to follow the instructions here: https://github.com/kubeflow/kfp-tekton/tree/master/guides/kfp-user-guide#2-upload-pipelines-using-the-kfp_tektontektonclient-in-python Check the multi tenant section and create TektonClient with host and cookies arguments. For example: TektonClient( host='http://&lt;Kubeflow_public_endpoint_URL&gt;/pipeline', cookies='authservice_session=xxxxxxx' ) You also need to specify namespace argument when calling create_run_from_pipeline_func function
from kfp_tekton._client import TektonClient # Example code for multi user deployment: # TektonClient( # host='http://<Kubeflow_public_endpoint_URL>/pipeline', # cookies='authservice_session=xxxxxxx' # ).create_run_from_pipeline_func(median_stop, arguments={}, namespace='user namespace') TektonClient().create_run_from_pipeline_func(median_stop, arguments={})
samples/katib/early-stopping.ipynb
kubeflow/kfp-tekton
apache-2.0
Remember that IPython and Jupyter will automatically (and conveniently!) call the __repr__ of an object if it is the last thing in the cell. But I'll use the print() function explicitly just to be clear. This displays the string representation of the object. It usually includes: an object type (class) an object name a memory location Now, let's actually call the function (which we do with use of the parentheses), and assign the return value (a string) to a new variable.
# now *call* the function by using parens output = duplicator('yo') # verify the expected behavior print(output)
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Because functions are objects, they have attributes just like any other Python object.
# the dir() built-in function displays the argument's attributes dir(duplicator)
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Because functions are objects, we can pass them around like any other data type. For example, we can assign them to other variables! If you occasionally still have dreams about the Enumerator, this will look familiar.
# first, recall the normal behavior of useful_function() duplicator('ring') # now create a new variable and assign our function to it another_duplicator = duplicator # now, we can use the *call* notation because the new variable is # assigned the original function another_duplicator('giggity') # and we can verify that this is actually a reference to the # original function print( "original function: %s" % duplicator ) print print( "new function: %s" % another_duplicator )
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
By looking at the memory location, we can see that the second function is just a pointer to the first function! Cool! Functions inside functions With an understanding of what's inside a function and what we can do with it, consider the case were we define a new function within another function. This may seem overly complicated for a little while, but stick with me. In the example below, we'll define an outer function which includes a local variable, then a local function definition. The inner function returns a string. The outer function calls the inner function, and returns the resulting value (a string).
def speaker(): """ Simply return a word (a string). Other than possibly asking 'why are you writing this simple function in such a complicated fashion?' this should hopefuly should be pretty clear. """ # define a local variable word='hello' def shout(): """Return a capitalized version of word.""" # though not in the innermost scope, this is in the namespace one # level out from here return word.upper() # call shout and then return the result of calling it (a string) return shout() # remember that the result is a string, now print it. the sequence: # - put word and shout in local namespace # - define shout() # - call shout() # - look for 'word', return it # - return the return value of shout() print( speaker() )
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Now, this may be intuitive, but it's important to note that the inner function is not accessible outside of the outer function. The interpreter can always step out into larger (or "more outer") namespaces, but we can't dig deeper into smaller ones.
try: # this function only exists in the local scope of the outer function shout() except NameError, e: print(e)
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Functions out of functions What if we'd like our outer function to return a function? For example, return the inner function instead of the return value of the inner function.
def speaker_func(): """Similar to speaker(), but this time return the actual inner function!""" word = 'hello' def shout(): """Return an all-caps version of the passed word.""" return word.upper() # don't *call* shout(), just return it return shout # remember: our function returns another function print( speaker_func() )
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Remember that the return value of the outer function is another function. And just like we saw earlier, we can print the function to see the name and memory location. Note that the name is that of the inner function. Makes sense, since that's what we returned. Like we said before, since this is an object, we can pass this function around and assign it to other variables.
# this will assign to the variable new_shout, a value that is the shout function new_shout = speaker_func()
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Which means we can also call it with parens, as usual.
# which means we can *call* it new_shout()
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Functions into functions If functions are objects, we can just as easily pass a function into another function. You've probably seen this before in the context of sorting, or maybe using map:
from operator import itemgetter # we might want to sort this by the first or second item tuple_list = [(1,5),(9,2),(5,4)] # itemgetter is a callable (like a function) that we pass in as an argument to sorted() sorted(tuple_list, key=itemgetter(1)) def tuple_add(tup): """Sum the items in a tuple.""" return sum(tup) # now we can map the tuple_add() function across the tuple_list iterable. # note that we're passing a function as an argument! map(tuple_add, tuple_list)
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
If we can pass functions into and out of other functions, then I propose that we can extend or modify the behavior of a function without actually editing the original function! Decorators 🎉💥🎉💥🎉💥🎉💥🎉💥 For example, say there's some previously-defined function in and you'd like it to be more verbose. For now, let's just assume that printing a bunch of information to stdout is our goal. Below, we define a function verbose() that takes another function as an argument. It does other tasks both before and after actually calling the passed-in function.
def verbose(func): """Add some marginally annoying verbosity to the passed func.""" def inner(): print("heeeeey everyone, I'm about to do a thing!") print("hey hey hey, I'm about to call a function called: {}".format(func.__name__)) print # now call (and print) the passed function print func() print print("whoa, did everyone see that thing I just did?! SICK!!") return inner
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Now, imagine we have a function that we wish had more of this type of "logging." But, we don't want to jump in and add a bunch of code to the original function.
# here's our original function (that we don't want to modify) def say_hi(): """Return 'hi'.""" return '--> hi. <--' # understand the original behavior of the function say_hi()
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Instead, we pass the original function as an arg to our verbose function. Remember that this returns the inner function, so we can assign it and then call it.
# this is now a function... verbose_say_hi = verbose(say_hi) # which we can call... verbose_say_hi()
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Looking at the output, we can see that when we called verbose_say_hi(), all of the code in it ran: two print statements then the passed function say_hi() was called it's return value was printed finally, there was some other printing defined in the inner function We'd now say that verbose_say_hi() is a decorated version of say_hi(). And, correspondingly, that verbose() is our decorator. A decorator is a callable that takes a function as an argument and returns a function (probably a modified version of the original function). Now, you may also decide that the modified version of the function is the only version you want around. And, further, you don't want to change any other code that may depend on this. In that case, you want to overwrite the namespace value for the original function!
# this will clobber the existing namespace value (the original function def). # in it's place we have the verbose version! say_hi = verbose(say_hi) say_hi()
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Uneditable source code One use-case where this technique can be useful is when you need to use an existing base of code that you can't edit. There's an existing library that defines classes and methods that are aligned with your needs, but you need a slight variation on them. Imagine there is a library called (creatively) uneditable_lib that implements a Coordinate class (a point in two-dimensional space), and an add() method. The add() method allows you to add the vectors of two Coordinates together and returns a new Coordinate object. It has great documentation and you know the source Python source code looks like this:
! cat _uneditable_lib.py
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
BUT Imagine you don't actually have the Python source, you have the compiled binary. Try opening this file in vi and see how it looks.
! ls | grep .pyc # you can still *use* the compiled code from uneditable_lib import Coordinate, add # make a couple of coordinates using the existing library coord_1 = Coordinate(x=100, y=200) coord_2 = Coordinate(x=-500, y=400) print( coord_1 ) print( add(coord_1, coord_2) )
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
But, imagine that for our particular use-case, we need to confine the resulting coordinates to the first quadrant (that is, x &gt; 0 and y &gt; 0). We want any negative component in the coordinates to just be truncated to zero. We can't edit the source code, but we can decorate (and modify) it!
def coordinate_decorator(func): """Decorates the pre-built source code for Coordinates. We need the resulting coordinates to only exist in the first quadrant, so we'll truncate negative values to zero. """ def checker(a, b): """Enforces first-quadrant coordinates.""" ret = func(a, b) # check the result and make sure we're still in the # first quadrant at the end [ that is, x and y > 0 ] if ret.x < 0 or ret.y < 0: ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0 ) return ret return checker
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
We can decorate the preexisting add() function with our new wrapper. And since we may be using other code from uneditable_lib with an API that expects the function to still be called add(), we can just overwrite that namespace variable.
# first we decorate the existing function add = coordinate_decorator(add) # then we can call it as before print( add(coord_1, coord_2) )
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
And, we now have a truncated Coordinate that lives in the first quadrant.
from IPython.display import Image Image(url='http://i.giphy.com/8VrtCswiLDNnO.gif')
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
If we are running out of time, this is an ok place to wrap up. Examples Here are some real examples you might run across in the wild: Flask (web framework) uses decorators really well @app.route is a decorator that lets you decorate an arbitrary Python function and turn it into a URL path. @login_required is a decorator that lets your function define the appropriate authentication. Fabric (ops / remote server tasks) includes a number of "helper decorators" for task and hostname management. Here are some things we didn't cover If you go home tonight and can't possibly wait to learn more about decorators, here are the next things to look up: passing arguments to a decorator @functools.wraps implementing a decorator as a class If there is sufficient interest in a Decorators, Part Deux, those would be good starters. THE END Is there still time?! If so, here are a couple of other useful things worth saying, quickly... Decorating a function at definition (with @) You might still want to use a decorator to modify a function that you wrote in your own code. You might ask "But if we're already writing the code, why not just make the function do what we want in the first place?" Valid question. One place where this comes up is in practicing DRY (Don't Repeat Yourself) software engineering practices. If an identical block of logic is to be used in many places, that code should ideally be written in only one place. In our case, we could imagine making a bunch of different functions more verbose. Instead of adding the verbosity (print statements) to each of the functions, we should define that once and then decorate the other functions. Another nice example is making your code easier to understand by separating necessary operational logic from the business logic. There's a nice shorthand - some syntactic sugar - for this kind of statement. To illustrate it, let's just use a variation on a method from earlier. First, see how the original function behaves:
def say_bye(): """Return 'bye'.""" return '--> bye. <--' say_bye()
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Remember the verbose() decorator that we already created? If this function (and perhaps others) should be made verbose at the time they're defined, we can apply the decorator right then and there using the @ shorthand:
@verbose def say_bye(): """Return 'bye'.""" return '--> bye. <--' say_bye() Image(url='http://i.giphy.com/piupi6AXoUgTe.gif')
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
But that shouldn't actually blow your mind. Based on our discussion before, you can probably guess that the decorator notation is just shorthand for: say_bye = verbose( say_bye ) One place where this shorthand can come in particularly handy is when you need to stack a bunch of decorators. In place of nested decorators like this: python my_func = do_thing_a( add_numbers( subtract( verify( my_func )))) We can write this as: python @do_thing_a @add_numbers @subtract @verify def my_func(): # something useful happens here Note that the order matters! Ok, thank you, please come again. THE END AGAIN Ok, final round, I promise. Appendix This is material that I originally intended to include in this RST (because it's relevant), but ultimately cut for clarity. You can come back and revisit it any time. Scopes and namespaces Roughly speaking, scope and namespace are the reason you can type some code (like variable_1 = 'dog') and then transparently use variable_1 later in your code. Sounds obvious, but easy to take for granted! The concepts of scope and namespace in Python are pretty interesting and complex. Some time when you're bored and want to learn some details, have a read of this nice explainer or the official docs on the Python Execution Model. A short way to think about them is the following: A namespace is a mapping from (variable) names to values; think about it like a Python dictionary, where our code can look up the keys (the variable names) and then use the corresponding values. You will generally have many namespaces in your code, they are usually nested (sort of like an onion), and they can even have identical keys (variable names). The scope (at a particular location in the code) defines in which namespace we look for variables (dictionary keys) when our code is executing. While this RST isn't explicitly about scope, understanding these concepts will make it easier to read the code later on. Let's look at some examples. There are two built-in functions that can aid in exploring the namespace at various points in your code: globals() and locals() return a dictionary of the names and values in their respective scope. Since the namespaces in IPython are often huge, let's use IPython's bash magic to call out to a normal Python session to test how globals() works:
# -c option starts a new interpreter session in a subshell and evaluates the code in quotes. # here, we just assign the value 3 to the variable x and print the global namespace ! python -c 'x=3; print( globals() )'
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Note that there are a bunch of other dunder names that are in the global namespace. In particular, note that '__name__' = '__main__' because we ran this code from the command line (a comparison that you've made many times in the past!). And you can see the variable x that we assigned the value of 3. We can also look at the namespace in a more local scope with the locals() function. Inside the body of a function, the local namespace is limited to those variables defined within the function.
# this var is defined at the "outermost" level of this code block z = 10 def printer(x): """Print some things to stdout.""" # create a new var within the scope of this function animal = 'baboon' # ask about the namespace of the inner-most scope, "local" scope print('local namespace: {}\n'.format(locals())) # now, what about this var, which is defined *outside* the function? print('variable defined *outside* the function: {}'.format(z)) printer(17)
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
First, you can see that when our scope is 'inside the function', the namespace is very small. It's the local variables defined within the function, including the arg we passed the function. But, you can also see that we can still "see" the variable z, which was defined outside the function. This is because even though z doesn't exist in the local namespace, this is just the "innermost" of a series of nested namespaces. When we failed to find z in locals(), the interpreter steps "out" a layer, and looks for a namespace key (variable name) that's defined outside of the function. If we look through this (and any larger) namespace and still fail to find a key (variable name) for z, the interpreter will raise a NameError. While the interpreter will always continue looking in larger or more outer scopes, it can't do the opposite. Since y is created and assigned within the scope of our function, it goes "out of scope" as soon as the function returns. Local variables defined within the scope of a function are only accessible from that same scope - inside the function.
try: # remember that this var was created and assigned only within the function animal except NameError, e: print(e)
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
Closures This is all relevant, because part of the mechanism behind a decorator is the concept of a function closure. A function closure captures the enclosing state (namespace) at the time a non-global function is defined. To see an example, consider the following code:
def outer(x): def inner(): print x return inner
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
We saw earlier, that the variable x isn't directly accessible outside of the function outer() because it's created within the scope of that function. But, Python's function closures mean that because inner() is not defined in the global scope, it keeps track of the surrounding namespace wherein it was defined. We can verify this by inspecting an example object:
o = outer(7) o() try: x except NameError, e: print(e) print( dir(o) ) print( o.func_closure )
python-decorators-101/python-decorators-101.ipynb
fionapigott/Data-Science-45min-Intros
unlicense
1. Data input (source)
# do not forget to call "Update()" at the end of the reader rectGridReader = vtk.vtkRectilinearGridReader() rectGridReader.SetFileName("data/jet4_0.500.vtk") rectGridReader.Update()
.ipynb_checkpoints/05_NB_VTKPython_Scalar-checkpoint.ipynb
dianafprieto/SS_2017
mit
2. Filters Filter 1: vtkRectilinearGridOutlineFilter() creates wireframe outline for a rectilinear grid.
rectGridOutline = vtk.vtkRectilinearGridOutlineFilter() rectGridOutline.SetInputData(rectGridReader.GetOutput())
.ipynb_checkpoints/05_NB_VTKPython_Scalar-checkpoint.ipynb
dianafprieto/SS_2017
mit
3. Mappers Mapper: vtkPolyDataMapper() maps vtkPolyData to graphics primitives.
rectGridOutlineMapper = vtk.vtkPolyDataMapper() rectGridOutlineMapper.SetInputConnection(rectGridOutline.GetOutputPort())
.ipynb_checkpoints/05_NB_VTKPython_Scalar-checkpoint.ipynb
dianafprieto/SS_2017
mit
4. Actors
outlineActor = vtk.vtkActor() outlineActor.SetMapper(rectGridOutlineMapper) outlineActor.GetProperty().SetColor(0, 0, 0)
.ipynb_checkpoints/05_NB_VTKPython_Scalar-checkpoint.ipynb
dianafprieto/SS_2017
mit
5. Renderers and Windows
#Option 1: Default vtk render window renderer = vtk.vtkRenderer() renderer.SetBackground(0.5, 0.5, 0.5) renderer.AddActor(outlineActor) renderer.ResetCamera() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindow.SetSize(500, 500) renderWindow.Render() iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renderWindow) iren.Start() #Option 2: Using the vtk-viewer for Jupyter to interactively modify the pipeline vtkSimpleWin = SimpleVtkViewer() vtkSimpleWin.resize(1000,800) vtkSimpleWin.hide_axes() vtkSimpleWin.add_actor(outlineActor) vtkSimpleWin.add_actor(gridGeomActor) vtkSimpleWin.ren.SetBackground(0.5, 0.5, 0.5) vtkSimpleWin.ren.ResetCamera()
.ipynb_checkpoints/05_NB_VTKPython_Scalar-checkpoint.ipynb
dianafprieto/SS_2017
mit
### More Information The build_picklist_from_txtl_setup_csvs function is used for creating a picklist from a TX-TL setup spreadsheet (version 2.0 or later -- spreadsheets from before late 2016 will not work). You will need to feed it the names of two CSV files produced from the "recipe" sheet (the first sheet, containing explicit pipetting directions) and the "stocks" sheet (the second sheet, which details the concentrations of most of the materials used in the experiment). The standard workflow looks like: Edit your TX-TL setup Excel document (modifying the "Stocks" sheet and the "Layout" sheet, and the few cells shaded purple in the "Recipe" sheet). Save the "Recipe" and "Stocks" sheets from the xls/xlsx file as CSVs. Run build_picklist_from_txtl_setup_csvs, passing it the names of the two CSVs you just saved. Reaction size and master mix excess ratio are read from the recipe spreadsheet. You probably should not mess with those settings Note that you must give each reaction a plate location, i.e. "D4" or "E07". In the Excel spreadsheet, plate locations can be added in the "Layout" tab, and will be automatically propagated to the "Recipe" tab. build_picklist_from_txtl_setup_csvs requires that the EchoRun object have a source plate object associated with a source plate file. It will automatically assign wells to that source plate for all the materials required to run that reaction. Programmatic construction of TX-TL Reactions Quick Start Example The following creates an Echo picklist and simple experimental protcol for a two-way dilution series of a reporter plasmid and inducer.
import murraylab_tools.echo as mt_echo import os.path # Relevant input and output files. Check these out for examples of input file format. dilution_inputs = os.path.join("2D_dilution_series", "inputs") dilution_outputs = os.path.join("2D_dilution_series", "outputs") plate_file = os.path.join(dilution_inputs, "dilution_setup_example_plate.dat") # Keeps track of wells used output_name = os.path.join(dilution_outputs, "dilution_setup_example") # Output (both a picklist and a # small protocol for building the # source plate) # Build an EchoRun object dilution_plate = mt_echo.SourcePlate(filename = plate_file) default_master_mix = mt_echo.MasterMix(plate = dilution_plate) dilution_echo_calculator = mt_echo.EchoRun(plate = dilution_plate, master_mix = default_master_mix) # Set final concentrations of two materials gfp_final_concentrations = range(0,6,1) # in nM atc_final_concentrations = range(0,100,10) # in ng/uL # Define reporter plasmid material gfp_conc = 294 # Concentration in ng/uL gfp_len = 3202 # Size of DNA in bp gfp = mt_echo.EchoSourceMaterial('GFP Plasmid', gfp_conc, gfp_len, dilution_plate) # Define inducer material atc_conc = 1000 # Concentration in ng/uL (important that this matches the units of the final concentrations) atc_len = 0 # This isn't dsDNA, so it has 0 length. atc = mt_echo.EchoSourceMaterial("ATc", atc_conc, atc_len, dilution_plate) # Plan out the experiment starting_well = "D2" dilution_echo_calculator.build_dilution_series(gfp, atc, gfp_final_concentrations, atc_final_concentrations, starting_well) # Write results dilution_echo_calculator.write_picklist(output_name)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
Note the warnings -- a lot of mistakes you might make will cause you to pipette 0 nL at a time, so the code will warn you if you do so. In this case, those 0 volume pipetting steps are normal -- you just added a material to 0 concentration. Similar warnings will appear if you under-fill a reaction. You can also manually add a single material to a well (add_material_to_well) or a rectangular block of wells (add_material_to_block) by specifying the material (an EchoSourceMaterial object), a final concentration, and a location. For example, if we set up a dilution series using the variables above...
# Build an EchoRun object dilution_plate = mt_echo.SourcePlate(filename = plate_file) default_master_mix = mt_echo.MasterMix(plate = dilution_plate) dilution_echo_calculator = mt_echo.EchoRun(plate = dilution_plate, master_mix = default_master_mix) # Plan out a dilution series starting_well = "D2" dilution_echo_calculator.build_dilution_series(gfp, atc, gfp_final_concentrations, atc_final_concentrations, starting_well)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
...then we can add, say, bananas, to a 2x2 square in the top-left corner of the reaction.
# Bananas at 100 nM bananas = mt_echo.EchoSourceMaterial('Cavendish', 100, 0, None) dilution_echo_calculator.add_material_to_block(bananas, 3, 'D2', 'E3')
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
You can also add to a single well, if you really need to.
old_bananas = mt_echo.EchoSourceMaterial('Gros Michel', 100, 0, None) dilution_echo_calculator.add_material_to_well(old_bananas, 3, 'F5')
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
More Information The build_dilution_series function of EchoRun is useful for quickly building a grid of dilutions of two materials, one on each axis. This is useful for double titrations of, for example, plasmid vs. inducer, or titration of two inputs. If you want to do anything more complex, you'll probably need to move to a TX-TL setup spreadsheet. This function will always output reactions in solid blocks, with the upper-left-most well specified by the starting well argument of build_dilution_series (last argument). The function will also add a negative control well one row below the last row of the dilution series block, aligned with its first column. You will not get a positive control reaction -- you'll have to add that yourself, if you want it. Note that you must manually define source materials for 2D dilution setup. An EchoSourceMaterial object has four attributes -- a name, a concentration, a length, and a plate object. * The name can be whatever you want, but note that the names "water" and "txtl_mm" are reserved for water and TX-TL master mix, respectively. If you make one of your materials "water" or "txtl_mm", be aware that they're going to be assumed to actually be water and TX-TL master mix. * Concentration and length attributes of an EchoSourceMaterial follow specific unit conventions. In brief, if the material is dsDNA, length is the number of base pairs and concentration is in units of ng/µL; otherwise, length is 0 and concentration is in units of nM. See "How it all works" above for more details. * The EchoSourcePlate object should be the same EchoSourcePlate associated with the EchoRun object you're going to use. If you want to set up multiple dilutions series, you can call build_dilution_series multiple times on the same EchoRun object. All dilution series will be put on the same plate, and source wells for materials with the same names (including the master mix and water) will be combined. Just make sure to start each dilution series in a different well! build_dilution_series requires that the EchoRun object have a source plate object associated with a source plate file. It will automatically assign wells to that source plate for all the materials required to run that reaction. Association Spreadsheet Quick Start Example The following creates an Echo picklist for an automated PCR setup with three sets of primers to be applied individually to three different plasmids, as defined in a pair of CSV files (one defining the source plate, one describing what should go in the destination plates).
import murraylab_tools.echo as mt_echo import os.path # Relevant input and output files. Check these out for examples of input file format. assoc_inputs = os.path.join("association_list", "inputs") assoc_outputs = os.path.join("association_list", "outputs") stock_file = os.path.join(assoc_inputs, 'association_source_sheet.csv') assoc_file = os.path.join(assoc_inputs, 'association_final_sheet.csv') assoc_name = os.path.join(assoc_outputs, 'association_example') # Build an EchoRun object assoc_echo_calculator = mt_echo.EchoRun() assoc_echo_calculator.rxn_vol = 50000 # PCR is large-volume! # Define which column of the source file is what name_col = 'B' conc_col = 'C' len_col = 'D' well_col = 'A' plate_col = 'E' # Define the source plate based on the stock file. assoc_echo_calculator.load_source_plate(stock_file, name_col, conc_col, len_col, well_col, plate_col) # Build a protocol, based on the association file. assoc_echo_calculator.build_picklist_from_association_spreadsheet(assoc_file, well_col) # Write the picklist assoc_echo_calculator.write_picklist(assoc_name)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
More Information The build_picklist_from_association_spreadsheet function is used for arbitrary mappings of source plate wells to destination wells, using 1) a spreadsheet describing the contents of each source plate, and 2) a second spreadsheet describing what materials should be put in what wells, and at what concentration. You must always set up a source plate first. This should be done by calling load_source_plate with the name of the source plate spreadsheet and information about its organization. Alternatively, you could build your own manually. That is not recommended. The first 'source' spreadsheet (describing the source plate) is a CSV-format spreadsheet with, at minimum, columns with the following information. You can add any number of additional columns to the source plate spreadsheet, which will be ignored by this function. This is the spreadsheet read in with the load_source_plate command. * Location: This is the well number of the material, i.e. "C4" or "E08". If the same material is found in multiple wells, it will need one row for each well. * Name: Brief string describing the material. This name will be used in the recipe output of EchoRun. It can be any string, but be aware that the names "water" and "txtl_mm" are reserved for describing water and TX-TL master mix, respectively. For this function, that won't matter, but if you try to combine other setup commands with this one, you should avoid using "water" and "txtl_mm" as material names. * Concentration: If the material is dsDNA, this should be the concentration of the DNA in ng/µL. Otherwise, it should be the concentration of the material in whatever units you want (nM recommended). * Length: If the material is dsDNA, this should be the number of base pairs in the DNA. Otherwise, length should be 0. This is important for correct unit usage. * Plate: A single source plate spreadsheet can contain materials from different source plates, so a column is required to determine which plate the material is coming from. Name of the source plate. Put a number N here, and the plate will be auto-named "Plate[N]" (recommended usage). Alternatively, you can give the plate a custom name. The second 'association' spreadsheet (describing the what materials go together) is also a CSV-format spreadsheet. This spreadsheet determines what goes into the destination well. One column of the association spreadsheet determines that row's well on the destination plate. The EchoRun object will scan through every column, ignoring the well column, taking pairs of columns from left to right. There can be any number of pairs of columns; each one will cause one material to be moved from the source plate to the destination plate. The first clumn in each pair holds the name of a material. This name must exactly match one of the material names listed in the source spreadsheet, and determines where material will be taken from. The second column in each pair describes the final concentration of that material. If the material is dsDNA (has non-zero length), the units of final concentration are assumed to be nM. Otherwise, the units of final concentration are the same as the units of concentration used for that material in the source plate. Note that unlike the other two experimental settings of EchoRun, build_picklist_from_association_spreadsheet does not require that its EchoRun object be associated with a source plate file or SourcePlate object prior to the function being called -- a new SourcePlate object will be manufactured from the input spreadsheet when you call load_source_plate. Tweaking Settings
import murraylab_tools.echo as mt_echo import os.path plate_file = os.path.join("tweaking", "plate_file_example.dat") # Build an EchoRun object example_plate = mt_echo.SourcePlate(filename = plate_file) example_master_mix = mt_echo.MasterMix(example_plate) example_echo_calculator = mt_echo.EchoRun(plate = example_plate, master_mix = example_master_mix)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To change the reaction volume: Reaction volume is a property of an EchoRun object.
example_echo_calculator.rxn_vol = 10.5 * 1e3 # Volume IN nL!!!
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
Make sure to run this before running build_picklist_from_association_spreadsheet or build_dilution_series. You almost certainly shouldn't do this at all when using build_picklist_from_txtl_setup_csvs, because that function will automatically extract a reaction volume from the setup spreadsheet. To change the master mix composition/extract fraction: This is really only relevant for the 2D dilution series TX-TL setup (build_dilution_series) -- TX-TL setup from a spreadsheet pulls the extract fraction from the spreadsheet, and the association spreadsheet method has no knowledge of TX-TL. Accordingly, the extract fraction is an optional argument in build_dilution_series. To modify the master mix of a reaction, you'll have to set its MasterMix object, which is most safely done with the add_master_mix function. Changing the buffer/extract composition can be accomplished in the constructor of the new MasterMix object. Be sure to add the new MasterMix object before calling write_picklist! Also be sure that the new MasterMix object has the same reaction volume (rxn_vol) as the EchoRun object. Otherwise you'll get an error.
new_master_mix = mt_echo.MasterMix(example_plate, extract_fraction = 0.40, rxn_vol = example_echo_calculator.rxn_vol) example_echo_calculator.add_master_mix(new_master_mix) example_echo_calculator.build_dilution_series(gfp, atc, gfp_final_concentrations, atc_final_concentrations, starting_well)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
You can also add arbitrary components to the master mix. For example, the following code ads the dye DFHBI-1T to every well at a final concentration of 10 µM, from a 2 mM stock:
new_master_mix = mt_echo.MasterMix(example_plate, rxn_vol = example_echo_calculator.rxn_vol) dfhbi = mt_echo.EchoSourceMaterial("DFHBI-1T", 2000, 0, example_plate) new_master_mix.add_material(dfhbi, 10) example_echo_calculator.add_master_mix(new_master_mix) example_echo_calculator.build_dilution_series(gfp, atc, gfp_final_concentrations, atc_final_concentrations, starting_well)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To change buffer/extract aliquot size: Buffer and extract aliquot size are controlled by the MasterMix object. Like extract percentage, aliquot sizes can be changed in the MasterMix's constructor. Note that both aliquot sizes are in units of nL, not uL.
new_master_mix = mt_echo.MasterMix(example_plate, extract_per_aliquot = 50000, buffer_per_aliquot = 70000, rxn_vol = example_echo_calculator.rxn_vol) example_echo_calculator.add_master_mix(new_master_mix) example_echo_calculator.build_dilution_series(gfp, atc, gfp_final_concentrations, atc_final_concentrations, starting_well) # ... # calculator_with_odd_destination.write_picklist(...) #...
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To run a (dilution series) reaction without master mix: You can also make a dilution series without any master mix by either creating a new EchoRun object with None for its MasterMix, or by removing the MasterMix on an existing EchoRun object with a call to remove_master_mix.
dye1 = mt_echo.EchoSourceMaterial('A Dye', 100, 0, dilution_plate) dye2 = mt_echo.EchoSourceMaterial('Another Dye', 122, 0, dilution_plate) dye_concentrations = [x for x in range(10)] example_echo_calculator.remove_master_mix() example_echo_calculator.build_dilution_series(dye1, dye2, dye_concentrations, dye_concentrations, starting_well)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To change the source plate type/material type: Source plate types and material types are set as optional arguments in the constructor of a SourcePlate object. The type and material of a source plate are both set by a string, which can be any string that the Echo Plate Reformat software will recognize.
plate_type = "384PP_AQ_BP" # This is actually the default plate value retyped_example_plate = mt_echo.SourcePlate(filename = plate_file, SPtype = plate_type) another_example_echo_calculator = mt_echo.EchoRun(plate = retyped_example_plate)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To change the source plate name: Source plate names are set much like source plate types with the argument SPname. In addition, as a shorthand, you can set SPname to be a number N, in which case the plate will be named Source[N].
plate_name = "FirstPlate" renamed_example_plate = mt_echo.SourcePlate(filename = plate_file, SPname = plate_name) yet_another_example_echo_calculator = mt_echo.EchoRun(plate = renamed_example_plate)
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To change the destination plate type: Destination plate types are determined and stored directly in the EchoRun object. The destination plate type can be set by the optional argument DPtype in the constructor of an EchoRun object, or set manually any time before calling write_picklist on that EchoRun.
calculator_with_odd_destination = mt_echo.EchoRun(plate = example_plate, DPtype = "some_96_well_plate") calculator_with_odd_destination.DPtype = "Nunc_384_black_glassbottom" # Just kidding! # ... # calculator_with_odd_destination.write_picklist(...) #...
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
To change dead volume and max volume: You probably shouldn't do this. If you absolutely must squeeze every last bit of efficiency out of your source wells, you can set the dead_volume and max_volume variables, which are static variables in the murraylab_tools.echo package. If you change them, also make sure to set the static variable usable_volume, which defines the volume of material in a well that can actually be used by the Echo (this is normally calculated from dead_volume and max_volume at package import). Also, you should do this before running any experimental protocol function.
from murraylab_tools.echo.echo_functions import dead_volume, max_volume, usable_volume dead_volume = 10000 # Volume in nL! max_volume = 75000 # Volume in nL! usable_volume = max_volume - dead_volume # Don't forget to re-calculate this!
examples/Echo Setup Usage Examples.ipynb
smsaladi/murraylab_tools
mit
Extracting the samples we are interested in
# Let's extract ADHd and Bipolar patients (mutually exclusive) ADHD_men = X.loc[X['ADHD'] == 1] ADHD_men = ADHD_men.loc[ADHD_men['Bipolar'] == 0] BP_men = X.loc[X['Bipolar'] == 1] BP_men = BP_men.loc[BP_men['ADHD'] == 0] ADHD_cauc = Y.loc[Y['ADHD'] == 1] ADHD_cauc = ADHD_cauc.loc[ADHD_cauc['Bipolar'] == 0] BP_cauc = Y.loc[Y['Bipolar'] == 1] BP_cauc = BP_cauc.loc[BP_cauc['ADHD'] == 0] print ADHD_men.shape print BP_men.shape print ADHD_cauc.shape print BP_cauc.shape # Keeping a backup of the data frame object because numpy arrays don't play well with certain scikit functions ADHD_men = pd.DataFrame(ADHD_men.drop(['Patient_ID', 'Gender_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False)) BP_men = pd.DataFrame(BP_men.drop(['Patient_ID', 'Gender_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False)) ADHD_cauc = pd.DataFrame(ADHD_cauc.drop(['Patient_ID', 'race_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False)) BP_cauc = pd.DataFrame(BP_cauc.drop(['Patient_ID', 'race_id', 'ADHD', 'Bipolar'], axis = 1, inplace = False))
Code/Assignment-10/SubjectSelectionExperiments (rCBF data).ipynb
Upward-Spiral-Science/spect-team
apache-2.0
Dimensionality reduction Manifold Techniques ISOMAP
combined1 = pd.concat([ADHD_men, BP_men]) combined2 = pd.concat([ADHD_cauc, BP_cauc]) print combined1.shape print combined2.shape combined1 = preprocessing.scale(combined1) combined2 = preprocessing.scale(combined2) combined1 = manifold.Isomap(20, 20).fit_transform(combined1) ADHD_men_iso = combined1[:1056] BP_men_iso = combined1[1056:] combined2 = manifold.Isomap(20, 20).fit_transform(combined2) ADHD_cauc_iso = combined2[:1110] BP_cauc_iso = combined2[1110:]
Code/Assignment-10/SubjectSelectionExperiments (rCBF data).ipynb
Upward-Spiral-Science/spect-team
apache-2.0
Clustering and other grouping experiments K-Means clustering - iso
data1 = pd.concat([pd.DataFrame(ADHD_men_iso), pd.DataFrame(BP_men_iso)]) data2 = pd.concat([pd.DataFrame(ADHD_cauc_iso), pd.DataFrame(BP_cauc_iso)]) print data1.shape print data2.shape kmeans = KMeans(n_clusters=2) kmeans.fit(data1.get_values()) labels1 = kmeans.labels_ centroids1 = kmeans.cluster_centers_ print('Estimated number of clusters: %d' % len(centroids1)) for label in [0, 1]: ds = data1.get_values()[np.where(labels1 == label)] plt.plot(ds[:,0], ds[:,1], '.') lines = plt.plot(centroids1[label,0], centroids1[label,1], 'o') kmeans = KMeans(n_clusters=2) kmeans.fit(data2.get_values()) labels2 = kmeans.labels_ centroids2 = kmeans.cluster_centers_ print('Estimated number of clusters: %d' % len(centroids2)) for label in [0, 1]: ds2 = data2.get_values()[np.where(labels2 == label)] plt.plot(ds2[:,0], ds2[:,1], '.') lines = plt.plot(centroids2[label,0], centroids2[label,1], 'o')
Code/Assignment-10/SubjectSelectionExperiments (rCBF data).ipynb
Upward-Spiral-Science/spect-team
apache-2.0
As is evident from the above 2 experiments, no clear clustering is apparent.But there is some significant overlap and there 2 clear groups Classification Experiments Let's experiment with a bunch of classifiers
ADHD_men_iso = pd.DataFrame(ADHD_men_iso) BP_men_iso = pd.DataFrame(BP_men_iso) ADHD_cauc_iso = pd.DataFrame(ADHD_cauc_iso) BP_cauc_iso = pd.DataFrame(BP_cauc_iso) BP_men_iso['ADHD-Bipolar'] = 0 ADHD_men_iso['ADHD-Bipolar'] = 1 BP_cauc_iso['ADHD-Bipolar'] = 0 ADHD_cauc_iso['ADHD-Bipolar'] = 1 data1 = pd.concat([ADHD_men_iso, BP_men_iso]) data2 = pd.concat([ADHD_cauc_iso, BP_cauc_iso]) class_labels1 = data1['ADHD-Bipolar'] class_labels2 = data2['ADHD-Bipolar'] data1 = data1.drop(['ADHD-Bipolar'], axis = 1, inplace = False) data2 = data2.drop(['ADHD-Bipolar'], axis = 1, inplace = False) data1 = data1.get_values() data2 = data2.get_values() # Leave one Out cross validation def leave_one_out(classifier, values, labels): leave_one_out_validator = LeaveOneOut(len(values)) classifier_metrics = cross_validation.cross_val_score(classifier, values, labels, cv=leave_one_out_validator) accuracy = classifier_metrics.mean() deviation = classifier_metrics.std() return accuracy, deviation rf = RandomForestClassifier(n_estimators = 22) qda = QDA() lda = LDA() gnb = GaussianNB() classifier_accuracy_list = [] classifiers = [(rf, "Random Forest"), (lda, "LDA"), (qda, "QDA"), (gnb, "Gaussian NB")] for classifier, name in classifiers: accuracy, deviation = leave_one_out(classifier, data1, class_labels1) print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation) classifier_accuracy_list.append((name, accuracy)) for classifier, name in classifiers: accuracy, deviation = leave_one_out(classifier, data2, class_labels2) print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation) classifier_accuracy_list.append((name, accuracy))
Code/Assignment-10/SubjectSelectionExperiments (rCBF data).ipynb
Upward-Spiral-Science/spect-team
apache-2.0
Al usar la función open de webbrowser se abrirá el navegador o una pestaña nueva si el navegador ya estaba abierto. Ya que no indicamos el navegador, esto se realiza con el navegador configurado por defecto en nuestro sistema. Se puede usar para abrir pestañas y ventanas nuevas, cerrarlo, y tambien usar un navegador especifico.
webbrowser.open('http://github.com/')
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Sin embargo, la labor de extracción web depende de obtener el código fuente o elementos disponibles en las páginas, lo cual es imposible con solo abrir el navegador. Para este fin es posible usar urllib o como lo haremos en esta sesión, con request.
import requests res = requests.get('http://www.gutenberg.org/files/18251/18251-0.txt') res.status_code == requests.codes.ok # Validar código 200 (ok) type(res) len(res.text) print(res.text[:250])
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Ante un fallo en el proceso de obtención del código con la función get, es posible generar una notificación del motivo de fallo.
res = requests.get('http://github.com/yomeinventoesto') res.raise_for_status()
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Cuando obtenemos un elemento de una dirección, este se encuentra como binario y no como texto plano. Esto nos facilita algunas cosas. Nos permite descargar contenido que no se solo texto plano (archivos de texto o código fuente) sino tambien directamente archivos binarios como imagenes, ejecutables, videos, archivos de word y otros. Es importante aclarar, que si vamos a almacenar el archivo de texto plano, debemos hacerlo con creación de archivos binarios para no perder la codificación original que tenga el archivo.
res = requests.get('http://www.programmableweb.com/sites/default/files/github-jupyter.jpg') archivo_imagen = open('github-jupyter.jpg', 'wb') for bloques in res.iter_content(100000): archivo_imagen.write(bloques) archivo_imagen.close()
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
En el bloque anterior, el método iter_content genera bloques del archivo con el tamaño indicado en su argumento. Esto conviene para la escritura de archivos de gran tamaño.
import bs4
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Usarmos ahora bs4 (forma de importar Beautiful Soup), lo cual nos permitirá la búsqueda de texto y estructuras html especificas. Este es más conveniente que usar expresiones regulares directamente en el código fuente. Al crear el objeto, debemos indicar el texto sobre el cual actuará (puede ser obtenido directamente de un archivo abierto tambien) y el tipo de analizador sintactico, en este caso lxml.
res = requests.get('https://github.com/cosmoscalibur/herramientas_computacionales') gh = bs4.BeautifulSoup(res.text, "lxml") type(gh)
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Ahora, buscaremos todas las estructuras td que tengan el atributo class con valor content.
tabla_archivos = gh.find_all('td', {'class':'content'}) type(tabla_archivos)
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
El resultado es una lista con todos los resultados obtenidos. Tambien es posible una búsqueda uno a uno, usando find en lugar de find_all.
len(tabla_archivos) print(tabla_archivos)
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
En el filtrado anterior, ahora buscaremos todas las etiquetas a las cuales asociamos con la presencia del atributo href. De esta forma localizaremos la lista de archivos. Para obtener el texto al interior de una etiqueta, usamos la propiedad string y el valor de un atributo con el método get.
for content in tabla_archivos: lineas_a = content('a') if lineas_a: texto = "Se encontro el archivo '{}'".format(lineas_a[0].string.encode("utf-8")) texto += " con enlace '{}'.".format(lineas_a[0].get("href")) print(texto)
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Nos vimos en la necesidad de usar encode("utf-8") ya que la codificación de la página es utf-8 y no ascii (el usado por defecto en python). Podemos consultar los atributos de una etiqueta o si posee un atributo especifico, y no solo obtener el valor, de la siguiente forma.
lineas_a[0].has_attr("href") # Existencia de un atributo lineas_a[0].attrs # Atributos existentes from selenium import webdriver
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Invocar la instancia del controlador del navegador depende del navegador de interes. Hay que tener encuenta que no todos los navegadores son soportados. Podemos encontrar soporte para Chrome, Firefox, Opera, IE y PhantomJS. Este último permite realizar la labor sin la generación de una ventana para el navegador (en caso de ser necesario, incluso se puede generar capturas de pantalla para su validación con ayuda del controlador). Acorde a cada navegador, se puede tener requerimientos especificos. En el caso de firefox, se presenta la necesidad de indicar el directorio del perfil de usuario, en el caso de chrome se requiere indicar la ruta del controlador (se descarga ya que no viene incluido como si sucede en firefox o phantomjs). Podría ser posible (no he verificado) usar otros navegadores si usan el mismo motor de navegación realizando la indicación explicita de la ruta del ejecutable. Por ejemplo, se podría controlar vivaldi realizando el cambio de ruta de chrome (usan el mismo motor de navegación).
browser = webdriver.Chrome("/home/cosmoscalibur/Downloads/chromedriver") browser.get('http://github.com') username = browser.find_element_by_id("user[login]") username.send_keys("cosmoscalibur@gmail.com") dar_click = browser.find_element_by_link_text("privacy policy") dar_click.click()
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Resulta bastante útil el uso de selenium no tanto en los casos que requieran de interacción sino en los casos donde los contenidos (incluye elementos de interacción) son de generación dinámica o tras la interacción el nuevo enlace o contenido tiene retrasos apreciables, lo cual evitaría que Request obtenga el código adecuado. Podemos extraer el código fuente de la página en la cual se encuentra el foco del navegador de la siguiente forma.
codigo = browser.page_source print(codigo)
Presentaciones/Notas/09_Extraccion_web.ipynb
cosmoscalibur/herramientas_computacionales
mit
Unlike in the previous case, where we had word files that we could export as plaintext, in this case Manuela has prepared a sample chapter with four editions transcribed in parallel in an office spreadsheet. So we first of all make sure that we have good UTF-8 comma-separated-value files, e.g. by uploading a csv export of our office program of choice to a CSV Linting service. (As a side remark, in my case, exporting with LibreOffice provided me with options to select UTF-8 encoding and choose the field delimiter and resulted in a valid csv file. MS Excel did neither of those.) Below, we expect the file at the following position:
sourcePath = 'DHd2019/cap6_align_-_2018-01.csv'
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Then, we can go ahead and open the file in python's csv reader:
import csv sourceFile = open(sourcePath, newline='', encoding='utf-8') sourceTable = csv.reader(sourceFile)
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
And next, we read each line into new elements of four respective lists (since we're dealing with one sample chapter, we try to handle it all in memory first and see if we run into problems): (Note here and in the following that in most cases, when the program is counting, it does so beginning with zero. Which means that if we end up with 20 segments, they are going to be called segment 0, segment 1, ..., segment 19. There is not going to be a segment bearing the number twenty, although we do have twenty segments. The first one has the number zero and the twentieth one has the number nineteen. Even for more experienced coders, this sometimes leads to mistakes, called "off-by-one errors".)
import re # Initialize a list of lists, or two-dimensional list ... Editions = [[]] # ...with four sub-lists 0 to 3 for i in range(3): a = [] Editions.append(a) # Now populate it from our sourceTable sourceFile.seek(0) # in repeated runs, restart from the beginning of the file for row in sourceTable: for i, field in enumerate(row): # We normalize quite a bit here already: p = field.replace('¶', ' ¶ ') # spaces around ¶ p = re.sub("&([^c])"," & \\1", p) # always spaces around &, except for &c p = re.sub("([,.:?/])(\S)","\\1 \\2", p) # always a space after ',.:?/' p = re.sub("([0-9])([a-zA-Z])", "\\1 \\2", p) # always a space between numbers and word characters p = re.sub("([a-z]) ?\\(\\1\\b", " (\\1", p) # if a letter is repeated on its own in a bracketed # expression it's a note and we eliminate the character # from the preceding word p = " ".join(p.split()) # always only one space Editions[i].append(p) print(str(len(Editions[0])) + " rows read.\n") # As an example, see the first seven sections of the third edition (1556 SPA): for field in range(len(Editions[2])): print(Editions[2][field])
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Actually, let's define two more list variables to hold information about the different editions - language and year of print:
numOfEds = 4 language = ["PT", "PT", "ES", "LA"] # I am using language codes that later on can be used in babelnet year = [1549, 1552, 1556, 1573]
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
TF/IDF <a name="tfidf"></a> In the previous (i.e. Solórzano) analyses, things like tokenization, lemmatization and stop-word lists filtering are explained step by step. Here, we rely on what we have found there and feed it all into functions that are ready-made and available in suitable libraries... First, we build our lemmatization resource and "function":
lemma = [{} for i in range(numOfEds)] # lemma = {} # we build a so-called dictionary for the lookups for i in range(numOfEds): wordfile_path = 'Azpilcueta/wordforms-' + language[i].lower() + '.txt' # open the wordfile (defined above) for reading wordfile = open(wordfile_path, encoding='utf-8') tempdict = [] for line in wordfile.readlines(): tempdict.append(tuple(line.split('>'))) # we split each line by ">" and append # a tuple to a temporary list. lemma[i] = {k.strip(): v.strip() for k, v in tempdict} # for every tuple in the temp. list, # we strip whitespace and make a key-value # pair, appending it to our "lemma" # dictionary wordfile.close print(str(len(lemma[i])) + ' ' + language[i] + ' wordforms known to the system.')
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Again, a quick test: Let's see with which "lemma"/basic word the particular wordform "diremos" is associated, or, in other words, what value our lemma variable returns when we query for the key "diremos":
lemma[language.index("PT")]['diremos']
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
And we are going to need the stopwords lists:
stopwords = [] for i in range(numOfEds): stopwords_path = 'DHd2019/stopwords-' + language[i].lower() + '.txt' stopwords.append(open(stopwords_path, encoding='utf-8').read().splitlines()) print(str(len(stopwords[i])) + ' ' + language[i] + ' stopwords known to the system, e.g.: ' + str(stopwords[i][100:119]) + '\n')
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
(In contrast to simpler numbers that have been filtered out by the stopwords filter, I have left numbers representing years like "1610" in place.) And, later on when we try sentence segmentation, we are going to need the list of abbreviations - words where a subsequent period not necessarily means a new sentence:
abbreviations = [] # As of now, this is one for all languages :-( abbrs_path = 'DHd2019/abbreviations.txt' abbreviations = open(abbrs_path, encoding='utf-8').read().splitlines() print(str(len(abbreviations)) + ' abbreviations known to the system, e.g.: ' + str(abbreviations[100:119]))
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Next, we should find some very characteristic words for each segment for each edition. (Let's say we are looking for the "Top 20".) We should build a vocabulary for each edition individually and only afterwards work towards a common vocabulary of several "Top n" sets.
import re import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer numTopTerms = 20 # So first we build a tokenising and lemmatising function (per language) to work as # an input filter to the CountVectorizer function def ourLaLemmatiser(str_input): wordforms = re.split('\W+', str_input) return [lemma[language.index("LA")][wordform].lower().strip() if wordform in lemma[language.index("LA")] else wordform.lower().strip() for wordform in wordforms ] def ourEsLemmatiser(str_input): wordforms = re.split('\W+', str_input) return [lemma[language.index("ES")][wordform].lower().strip() if wordform in lemma[language.index("ES")] else wordform.lower().strip() for wordform in wordforms ] def ourPtLemmatiser(str_input): wordforms = re.split('\W+', str_input) return [lemma[language.index("PT")][wordform].lower().strip() if wordform in lemma[language.index("PT")] else wordform.lower().strip() for wordform in wordforms ] def ourLemmatiser(lang): if (lang == "LA"): return ourLaLemmatiser if (lang == "ES"): return ourEsLemmatiser if (lang == "PT"): return ourPtLemmatiser def ourStopwords(lang): if (lang == "LA"): return stopwords[language.index("LA")] if (lang == "ES"): return stopwords[language.index("ES")] if (lang == "PT"): return stopwords[language.index("PT")] topTerms = [] for i in range(numOfEds): topTermsEd = [] # Initialize the library's function, specifying our # tokenizing function from above and our stopwords list. tfidf_vectorizer = TfidfVectorizer(stop_words=ourStopwords(language[i]), use_idf=True, tokenizer=ourLemmatiser(language[i]), norm='l2') # Finally, we feed our corpus to the function to build a new "tfidf_matrix" object tfidf_matrix = tfidf_vectorizer.fit_transform(Editions[i]) # convert your matrix to an array to loop over it mx_array = tfidf_matrix.toarray() # get your feature names fn = tfidf_vectorizer.get_feature_names() # now loop through all segments and get the respective top n words. pos = 0 for j in mx_array: # We have empty segments, i.e. none of the words in our vocabulary has any tf/idf score > 0 if (j.max() == 0): topTermsEd.append([("", 0)]) # otherwise append (present) lemmatised words until numTopTerms or the number of words (-stopwords) is reached else: topTermsEd.append( [(fn[x], j[x]) for x in ((j*-1).argsort()) if j[x] > 0] \ [:min(numTopTerms, len( [word for word in re.split('\W+', Editions[i][pos]) if ourLemmatiser(language[i])(word) not in stopwords] ))]) pos += 1 topTerms.append(topTermsEd)
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Translations? Maybe there is an approach to inter-lingual comparison after all. After a first unsuccessful try with conceptnet.io, I next want to try Babelnet in order to lookup synonyms, related terms and translations. I still have to study the API... For example, let's take this single segment 19:
segment_no = 18
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
And then first let's see how this segment compares in the different editions:
print("Comparing words from segments " + str(segment_no) + " ...") print(" ") print("Here is the segment in the four editions:") print(" ") for i in range(numOfEds): print("Ed. " + str(i) + ":") print("------") print(Editions[i][segment_no]) print(" ") print(" ") print(" ") # Build List of most significant words for a segment print("Most significant words in the segment:") print(" ") for i in range(numOfEds): print("Ed. " + str(i) + ":") print("------") print(topTerms[i][segment_no]) print(" ")
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Now we look up the "concepts" associated to those words in babelnet. Then we look up the concepts associated with the words of the present segment from another edition/language, and see if the concepts are the same. But we have to decide on some particular editions to get things started. Let's take the Spanish and Latin ones:
startEd = 1 secondEd = 2
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
And then we can continue...
import urllib import json from collections import defaultdict babelAPIKey = '18546fd3-8999-43db-ac31-dc113506f825' babelGetSynsetIdsURL = "https://babelnet.io/v5/getSynsetIds?" + \ "targetLang=LA&targetLang=ES&targetLang=PT" + \ "&searchLang=" + language[startEd] + \ "&key=" + babelAPIKey + \ "&lemma=" # Build lists of possible concepts top_possible_conceptIDs = defaultdict(list) for (word, val) in topTerms[startEd][segment_no]: concepts_uri = babelGetSynsetIdsURL + urllib.parse.quote(word) response = urllib.request.urlopen(concepts_uri) conceptIDs = json.loads(response.read().decode(response.info().get_param('charset') or 'utf-8')) for rel in conceptIDs: top_possible_conceptIDs[word].append(rel.get("id")) print(" ") print("For each of the '" + language[startEd] + "' words, here are possible synsets:") print(" ") for word in top_possible_conceptIDs: print(word + ":" + " " + ', '.join(c for c in top_possible_conceptIDs[word])) print(" ") print(" ") print(" ") print(" ") babelGetSynsetIdsURL2 = "https://babelnet.io/v5/getSynsetIds?" + \ "targetLang=LA&targetLang=ES&targetLang=PT" + \ "&searchLang=" + language[secondEd] + \ "&key=" + babelAPIKey + \ "&lemma=" # Build list of 10 most significant words in the second language top_possible_conceptIDs_2 = defaultdict(list) for (word, val) in topTerms[secondEd][segment_no]: concepts_uri = babelGetSynsetIdsURL2 + urllib.parse.quote(word) response = urllib.request.urlopen(concepts_uri) conceptIDs = json.loads(response.read().decode(response.info().get_param('charset') or 'utf-8')) for rel in conceptIDs: top_possible_conceptIDs_2[word].append(rel.get("id")) print(" ") print("For each of the '" + language[secondEd] + "' words, here are possible synsets:") print(" ") for word in top_possible_conceptIDs_2: print(word + ":" + " " + ', '.join(c for c in top_possible_conceptIDs_2[word])) print(" ") # calculate number of overlapping terms values_a = set([item for sublist in top_possible_conceptIDs.values() for item in sublist]) values_b = set([item for sublist in top_possible_conceptIDs_2.values() for item in sublist]) overlaps = values_a & values_b print("Overlaps: " + str(overlaps)) babelGetSynsetInfoURL = "https://babelnet.io/v5/getSynset?key=" + babelAPIKey + \ "&targetLang=LA&targetLang=ES&targetLang=PT" + \ "&id=" for c in overlaps: info_uri = babelGetSynsetInfoURL + c response = urllib.request.urlopen(info_uri) words = json.loads(response.read().decode(response.info().get_param('charset') or 'utf-8')) senses = words['senses'] for result in senses[:1]: lemma = result['properties'].get('fullLemma') resultlang = result['properties'].get('language') print(c + ": " + lemma + " (" + resultlang.lower() + ")") # what's left: do a nifty ranking
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Actually I think this is somewhat promising - an overlap of four independent, highly meaning-bearing words, or of forty-something related concepts. At first glance, they should be capable of distinguishing this section from all the other ones. However, getting this result was made possible by quite a bit of manual tuning the stopwords and lemmatization dictionaries before, so this work is important and cannot be eliminated. New Approach: Use Aligner from Machine Translation Studies <a name="newApproach"/> In contrast to what I thought previously, there is a couple of tools for automatically aligning parallel texts after all. After some investigation of the literature, the most promising candidate seems to be HunAlign. However, as this is a commandline tool written in C++ (there is LF Aligner, a GUI, available), it is not possible to run it from within this notebook. First results were problematic, due to the different literary conventions that our editions follow: Punctuation was used inconsistently (but sentence length is one of the most relevant factors for aligning), as were abbreviations and notes. My current idea is to use this notebook to preprocess the texts and to feed a cleaned up version of them to hunalign... Coming back to this after a first couple of rounds with Hunalign, I have the feeling that the fact that literary conventions are so divergent probably means that Aligning via sentence lengths is a bad idea in our from the outset. Probably better to approach this with GMA or similar methods. Anyway, here are the first attempts with Hunalign:
from nltk import sent_tokenize ## First, train the sentence tokenizer: from pprint import pprint from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktLanguageVars, PunktTrainer class BulletPointLangVars(PunktLanguageVars): sent_end_chars = ('.', '?', ':', '!', '¶') trainer = PunktTrainer() trainer.INCLUDE_ALL_COLLOCS = True tokenizer = PunktSentenceTokenizer(trainer.get_params(), lang_vars = BulletPointLangVars()) for tok in abbreviations : tokenizer._params.abbrev_types.add(tok) ## Now we sentence-segmentize all our editions, printing results and saving them to files: # folder for the several segment files: outputBase = 'Azpilcueta/sentences' dest = None # Then, sentence-tokenize our segments: for i in range(numOfEds): dest = open(outputBase + '_' + str(year[i]) + '.txt', encoding='utf-8', mode='w') print("Sentence-split of ed. " + str(i) + ":") print("------") for s in range(0, len(Editions[i])): for a in tokenizer.tokenize(Editions[i][s]): dest.write(a.strip() + '\n') print(a) dest.write('<p>\n') print('<p>') dest.close()
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
... lemmatize/stopwordize it---
# folder for the several segment files: outputBase = 'Azpilcueta/sentences-lemmatized' dest = None # Then, sentence-tokenize our segments: for i in range(numOfEds): dest = open(outputBase + '_' + str(year[i]) + '.txt', encoding='utf-8', mode='w') stp = set(stopwords[i]) print("Cleaned/lemmatized ed. " + str(i) + " [" + language[i] + "]:") print("------") for s in range(len(Editions[i])): for a in tokenizer.tokenize(Editions[i][s]): dest.write(" ".join([x for x in ourLemmatiser(language[i])(a) if x not in stp]) + '\n') print(" ".join([x for x in ourLemmatiser(language[i])(a) if x not in stp])) dest.write('<p>\n') print('<p>') dest.close()
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
With these preparations made, Hunaligning 1552 and 1556 reports "Quality 0.63417" for unlemmatized and "Quality 0.51392" for lemmatized versions of the texts for its findings which still contain many errors. Removing ":" from the sentence end marks gives "Quality 0.517048/0.388377", but from a first impression with fewer errors. Results can be output in different formats, xls files are here and here. Similarity <a name="DocumentSimilarity"/> It seems we could now create another matrix replacing lemmata with concepts and retaining the tf/idf values (so as to keep a weight coefficient to the concepts). Then we should be able to calculate similarity measures across the same concepts... The approach to choose would probably be the "cosine similarity" of concept vector spaces. Again, there is a library ready for us to use (but you can find some documentation here, here and here.) However, this is where I have to take a break now. I will return to here soon...
from sklearn.metrics.pairwise import cosine_similarity similarities = pd.DataFrame(cosine_similarity(tfidf_matrix)) similarities[round(similarities, 0) == 1] = 0 # Suppress a document's similarity to itself print("Pairwise similarities:") print(similarities) print("The two most similar segments in the corpus are") print("segments", \ similarities[similarities == similarities.values.max()].idxmax(axis=0).idxmax(axis=1), \ "and", \ similarities[similarities == similarities.values.max()].idxmax(axis=0)[ similarities[similarities == similarities.values.max()].idxmax(axis=0).idxmax(axis=1) ].astype(int), \ ".") print("They have a similarity score of") print(similarities.values.max())
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
<div class="alert alertbox alert-success">Of course, in every set of documents, we will always find two that are similar in the sense of them being more similar to each other than to the other ones. Whether or not this actually *means* anything in terms of content is still up to scholarly interpretation. But at least it means that a scholar can look at the two documents and when she determines that they are not so similar after all, then perhaps there is something interesting to say about similar vocabulary used for different puproses. Or the other way round: When the scholar knows that two passages are similar, but they have a low "similarity score", shouldn't that say something about the texts's rhetorics?</div> Word Clouds <a name="WordClouds"/> We can use a library that takes word frequencies like above, calculates corresponding relative sizes of words and creates nice wordcloud images for our sections (again, taking the fourth segment as an example) like this:
from wordcloud import WordCloud import matplotlib.pyplot as plt # We make tuples of (lemma, tf/idf score) for one of our segments # But we have to convert our tf/idf weights to pseudo-frequencies (i.e. integer numbers) frq = [ int(round(x * 100000, 0)) for x in Editions[1][3]] freq = dict(zip(fn, frq)) wc = WordCloud(background_color=None, mode="RGBA", max_font_size=40, relative_scaling=1).fit_words(freq) # Now show/plot the wordcloud plt.figure() plt.imshow(wc, interpolation="bilinear") plt.axis("off") plt.show()
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
In order to have a nicer overview over the many segments than is possible in this notebook, let's create a new html file listing some of the characteristics that we have found so far...
outputDir = "Azpilcueta" htmlfile = open(outputDir + '/Overview.html', encoding='utf-8', mode='w') # Write the html header and the opening of a layout table htmlfile.write("""<!DOCTYPE html> <html> <head> <title>Section Characteristics</title> <meta charset="utf-8"/> </head> <body> <table> """) a = [[]] a.clear() dicts = [] w = [] # For each segment, create a wordcloud and write it along with label and # other information into a new row of the html table for i in range(len(mx_array)): # this is like above in the single-segment example... a.append([ int(round(x * 100000, 0)) for x in mx_array[i]]) dicts.append(dict(zip(fn, a[i]))) w.append(WordCloud(background_color=None, mode="RGBA", \ max_font_size=40, min_font_size=10, \ max_words=60, relative_scaling=0.8).fit_words(dicts[i])) # We write the wordcloud image to a file w[i].to_file(outputDir + '/wc_' + str(i) + '.png') # Finally we write the column row htmlfile.write(""" <tr> <td> <head>Section {a}: <b>{b}</b></head><br/> <img src="./wc_{a}.png"/><br/> <small><i>length: {c} words</i></small> </td> </tr> <tr><td>&nbsp;</td></tr> """.format(a = str(i), b = label[i], c = len(tokenised[i]))) # And then we write the end of the html file. htmlfile.write(""" </table> </body> </html> """) htmlfile.close()
gallery/DHD2019_Azpilcueta.ipynb
awagner-mainz/notebooks
mit
Message Receive Time In JSBSim the IMU messages are requested to be sent at the real IMU rate of 819.2 Hz: &lt;output name="localhost" type="SOCKET" protocol="UDP" port="5123" rate="819.2"&gt; But there they are then processed in python for noise and binary packing. Then it's sent as UDP packets which may get lost. Let's see how they appear in the flight comptuer.
# Get the time difference between each ADIS message diff = [(rust_time[i+1] - t)*1000 for i, t in enumerate(rust_time[:-1])] fig, ax1 = plt.subplots(figsize=(18,7)) plt.title(r"rust-fc ADIS Message Interval") plt.ylabel(r"Time Since Last Sample [ms]") plt.xlabel(r"Sample Number [#]") plt.plot(range(len(diff)), diff, 'r.', alpha=1.0, ms=0.3, label="rust-fc Sample Interval") plt.plot((0, len(diff)), (1.2207, 1.2207), 'k-', lw=0.6, alpha=0.7, label="Expected Sample Interval") ax1.set_yscale("log", nonposy='clip') plt.ylim([0.1,100]) #plt.xlim() ax1.legend(loc=1) plt.show() fig, ax1 = plt.subplots(figsize=(18,7)) plt.title(r"rust-fc ADIS Message Interval") plt.ylabel(r"Number of Samples [#]") plt.xlabel(r"Time Since Last Sample [ms]") n, bins, patches = plt.hist(diff, 1000, histtype='step', normed=1, alpha=0.8, linewidth=1, fill=True) plt.plot((1.2207, 1.2207), (0, 1000), 'k-', lw=0.6, alpha=0.7, label="Expected Sample Interval") plt.ylim([0, 35]) #plt.xlim() ax1.legend(loc=1) plt.show()
analysis/results.ipynb
natronics/rust-fc
gpl-3.0
IMU Noisy Acceleration Here we see the noise put into the IMU data and the true acceleration.
fig, ax1 = plt.subplots(figsize=(18,7)) plt.title(r"rust-fc Recorded IMU Acceleration") plt.ylabel(r"Acceleration [m/s${}^2$]") plt.xlabel(r"Run Time [s]") plt.plot(rust_time, rust_accel_x, alpha=0.8, lw=0.5, label="rust-fc IMU 'Up'") plt.plot(rust_time, rust_accel_y, alpha=0.8, lw=0.5, label="rust-fc IMU 'Y'") plt.plot(rust_time, rust_accel_z, alpha=0.6, lw=0.5, label="rust-fc IMU 'Z'") plt.plot(sim_time, measured_accel_x, 'k-', lw=1.3, alpha=0.6, label="JSBSim True Acceleration") #plt.ylim() #plt.xlim() ax1.legend(loc=1) plt.show()
analysis/results.ipynb
natronics/rust-fc
gpl-3.0
State Tracking The flight comptuer only knows the Inertial state (acceleration). It keeps track of velocity and altitude by integrating this signal. Here we compare rust-fc internal state to the exact numbers from the simulator.
# Computer difference from FC State and simulation "real" numbers sim_idx = 0 vel = 0 alt = 0 i_count = 0 sim_matched_vel = [] vel_diff = [] alt_diff = [] for i, t in enumerate(rust_state_time): vel += rust_vel[i] alt += rust_alt[i] i_count += 1 if sim_time[sim_idx] < t: sim_matched_vel.append(vel/float(i_count)) vel_diff.append(sim_vel_up[sim_idx] - (vel/float(i_count))) alt_diff.append(sim_alt[sim_idx] - (alt/float(i_count))) vel = 0 alt = 0 i_count = 0 sim_idx += 1 if sim_idx > len(sim_time)-1: break fig = plt.figure(figsize=(18,9)) plt.subplots_adjust(hspace=0.001) # no space between vertical charts gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) # stretch main chart to be most of the width ax1 = plt.subplot(gs[0]) plt.title(r"rust-fc State Tracking: Velocity And Velocity Integration Error") plt.ylabel(r"Velocity [m/s]") plt.plot(rust_state_time, rust_vel, alpha=0.8, lw=1.5, label="rust-fc State Vector Velocity") plt.plot(sim_time, sim_vel_up, 'k-', lw=1.3, alpha=0.6, label="JSBSim True Velocity") plt.ylim([-60,400]) ticklabels = ax1.get_xticklabels() plt.setp(ticklabels, visible=False) ax2 = plt.subplot(gs[1]) plt.xlabel(r"Run Time [s]") plt.ylabel(r"Integration Drift Error [m/s]") plt.plot(sim_time, vel_diff) ax1.legend(loc=1) plt.show() fig = plt.figure(figsize=(18,9)) plt.subplots_adjust(hspace=0.001) # no space between vertical charts gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) # stretch main chart to be most of the width ax1 = plt.subplot(gs[0]) plt.title(r"rust-fc State Tracking: Altitude And ALtitude Integration Error") plt.ylabel(r"Altitude MSL [m]") plt.plot(rust_state_time, rust_alt, alpha=0.8, lw=1.5, label="rust-fc State Vector Altitude") plt.plot(sim_time, sim_alt, 'k-', lw=1.3, alpha=0.6, label="JSBSim True Velocity") plt.ylim([1390, 7500]) ticklabels = ax1.get_xticklabels() plt.setp(ticklabels, visible=False) ax2 = plt.subplot(gs[1]) plt.xlabel(r"Run Time [s]") plt.ylabel(r"Integration Drift Error [m]") plt.plot(sim_time, alt_diff) #plt.xlim() ax1.legend(loc=1) plt.show()
analysis/results.ipynb
natronics/rust-fc
gpl-3.0
形状对象和画笔对象是最为抽象的形式。接下来,构造多个形状,如矩形和圆形:
class Rectangle(Shape): def __init__(self,long,width): self.name="Rectangle" self.param="Long:%s Width:%s"%(long,width) print ("Create a rectangle:%s"%self.param) class Circle(Shape): def __init__(self,radius): self.name="Circle" self.param="Radius:%s"%radius print ("Create a circle:%s"%self.param)
DesignPattern/BridgePattern.ipynb
gaufung/Data_Analytics_Learning_Note
mit
紧接着是构造多种画笔,如普通画笔和画刷:
class NormalPen(Pen): def __init__(self,shape): Pen.__init__(self,shape) self.type="Normal Line" def draw(self): print ("DRAWING %s:%s----PARAMS:%s"%(self.type,self.shape.getName(),self.shape.getParam())) class BrushPen(Pen): def __init__(self,shape): Pen.__init__(self,shape) self.type="Brush Line" def draw(self): print ("DRAWING %s:%s----PARAMS:%s" % (self.type,self.shape.getName(), self.shape.getParam())) normal_pen = NormalPen(Rectangle('20cm','10cm')) brush_pen = BrushPen(Circle('15cm')) normal_pen.draw() brush_pen.draw()
DesignPattern/BridgePattern.ipynb
gaufung/Data_Analytics_Learning_Note
mit
Implement Preprocessing Function Text to Word Ids As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the &lt;EOS&gt; word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end. You can get the &lt;EOS&gt; word id by doing: python target_vocab_to_int['&lt;EOS&gt;'] You can get other word ids using source_vocab_to_int and target_vocab_to_int.
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): """ Convert source and target text to proper word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists (source_id_text, target_id_text) """ source_id_text = [[source_vocab_to_int.get(letter, source_vocab_to_int['<UNK>']) for letter in line.split(' ')] for line in source_text.split('\n')] target_id_text = [[target_vocab_to_int.get(letter, target_vocab_to_int['<UNK>']) for letter in line.split(' ')] + [target_vocab_to_int['<EOS>']] for line in target_text.split('\n')] return source_id_text, target_id_text """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_text_to_ids(text_to_ids)
Udacity-Deep-Learning-Foundation-Nanodegree/Project-4/dlnd_language_translation.ipynb
joelowj/Udacity-Projects
apache-2.0