repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
list
types
list
biocore/scikit-bio-presentations
scipy-2015/A Bioinformatics Library for Data Scientists, Students, and Developers.ipynb
bsd-3-clause
[ "%%html\n<link rel='stylesheet' type='text/css' href='custom.css'/>\n\n!rm data/converted-seqs.fasta data/converted-seqs.qual data/not-yasf.fna\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n plt.ylabel('Known taxonomy')\n plt.xlabel('Predicted taxonomy')\n plt.tight_layout()\n plt.show()", "A Bioinformatics Library for Data Scientists, Students, and Developers\nJai Rideout and Evan Bolyen\nCaporaso Lab, Northern Arizona University\nWhat is scikit-bio?\nA Python bioinformatics library for:\n\n\ndata scientists\n\n\nstudents\n\n\ndevelopers\n\n\n\n\"The first step in developing a new genetic analysis algorithm is to decide how to make the input data file format different from all pre-existing analysis data file formats.\" - Law's First Law\n\n<span style='line-height:2em; word-spacing:2em'>Axt BAM SAM BED bedGraph bigBed bigGenePred table bigWig Chain GenePred table GFF GTF HAL MAF Microarray Net Personal Genome SNP format PSL VCF WIG abi ace clustal embl fasta fastq genbank ig imgt nexus phred phylip pir seqxml sff stockholm swiss tab qual uniprot-xml emboss PhyolXML NexML newick CDAO MDL bcf caf gcproj scf SBML lsmat ordination qseq BIOM ASN.1 .2bit .nib ENCODE ... </span>\n<span style='line-height:2em; word-spacing:2em'>Axt BAM SAM BED bedGraph bigBed bigGenePred table bigWig Chain GenePred table GFF GTF HAL MAF Microarray Net Personal Genome SNP format PSL VCF WIG abi ace <span class='supio'>clustal</span> embl <span class='supio'>fasta</span> <span class='supio'>fastq</span> genbank ig imgt nexus phred <span class='supio'>phylip</span> pir seqxml sff stockholm swiss tab qual uniprot-xml emboss PhyolXML NexML <span class='supio'>newick</span> CDAO MDL bcf caf gcproj scf SBML <span class='supio'>lsmat</span> <span class='supio'>ordination</span> <span class='supio'>qseq</span> BIOM ASN.1 .2bit .nib ENCODE ... </span>\nI/O in bioinformatics is hard\n\n\nformat redundancy (many-to-many)\n\n\nformat ambiguity\n\n\nheterogeneous sources\n\n\nHow can we solve this?\nAn I/O Registry!\nFormat redundancy (many-to-many)", "from skbio import DNA\n\nseq1 = DNA.read('data/seqs.fasta', qual='data/seqs.qual')\nseq2 = DNA.read('data/seqs.fastq', variant='illumina1.8')\nseq1\n\nseq1 == seq2", "Format ambiguity", "import skbio.io\n\nskbio.io.sniff('data/mystery_file.gz')", "Heterogeneous sources\nRead a gzip file from a URL:", "from skbio import TreeNode\n\ntree1 = skbio.io.read('http://localhost:8888/files/data/newick.gz', \n into=TreeNode)\nprint(tree1.ascii_art())", "Read a bz2 file from a file path:", "import io \n\nwith io.open('data/newick.bz2', mode='rb') as open_filehandle:\n tree2 = skbio.io.read(open_filehandle, into=TreeNode)\n\nprint(tree2.ascii_art())", "Read a list of lines:", "tree3 = skbio.io.read(['((a, b, c), d:15):0;'], into=TreeNode)\nprint(tree3.ascii_art())", "Let's make a format!\nYASF (Yet Another Sequence Format)", "!cat data/yasf-seq.yml\n\nimport yaml\n\nyasf = skbio.io.create_format('yasf')\n\n@yasf.sniffer()\ndef yasf_sniffer(fh):\n return fh.readline().rstrip() == \"#YASF\", {}\n\n@yasf.reader(DNA)\ndef yasf_to_dna(fh):\n seq = yaml.load(fh.read())\n return DNA(seq['Sequence'], metadata={\n 'id': seq['ID'],\n 'location': seq['Location'],\n 'description': seq['Description']\n })\n\nseq = DNA.read(\"data/yasf-seq.yml\")\nseq", "Convert YASF to FASTA", "seq.write(\"data/not-yasf.fna\", format='fasta')\n!cat data/not-yasf.fna", "We are in beta - should you even use our software?\nYES!\nAPI Lifecycle", "from skbio.util._decorator import stable\n\n@stable(as_of='0.4.0')\ndef add(a, b):\n \"\"\"add two numbers.\n \n Parameters\n ----------\n a, b : int\n Numbers to add.\n \n Returns\n -------\n int\n Sum of `a` and `b`.\n \n \"\"\"\n return a + b\n\nhelp(add)", "What is stable:\n\nskbio.io \nskbio.sequence\n\n&nbsp;\n&nbsp;\nWhat is next:\n\nskbio.alignment\nskbio.tree\nskbio.diversity\nskbio.stats\n&lt;your awesome subpackage!&gt;\n\nSequence API: putting the scikit in scikit-bio", "seq = DNA(\"AacgtGTggA\", lowercase='exon')\nseq", "Made with numpy", "seq.values", "And a pinch of pandas", "seq.positional_metadata", "Slicing with positional metadata:", "seq[seq.positional_metadata['exon']]", "Application: building a taxonomy classifier", "aligned_seqs_fp = 'data/gg_13_8_otus/rep_set_aligned/82_otus.fasta'\ntaxonomy_fp = 'data/gg_13_8_otus/taxonomy/82_otu_taxonomy.txt'\n\nfrom skbio import DNA\n\nfwd_primer = DNA(\"GTGCCAGCMGCCGCGGTAA\",\n metadata={'label':'fwd-primer'})\nrev_primer = DNA(\"GGACTACHVGGGTWTCTAAT\",\n metadata={'label':'rev-primer'}).reverse_complement()\n\ndef seq_to_regex(seq):\n result = []\n for base in str(seq):\n if base in DNA.degenerate_chars:\n result.append('[{0}]'.format(\n ''.join(DNA.degenerate_map[base])))\n else:\n result.append(base)\n\n return ''.join(result)\n\nregex = '({0}.*{1})'.format(seq_to_regex(fwd_primer),\n seq_to_regex(rev_primer))\n\nimport numpy as np\nimport skbio\n\nstarts = []\nstops = []\nfor seq in skbio.io.read(aligned_seqs_fp, format='fasta', \n constructor=DNA):\n for match in seq.find_with_regex(regex, ignore=seq.gaps()):\n starts.append(match.start)\n stops.append(match.stop)\n \nlocus = slice(int(np.median(starts)), int(np.median(stops)))\nlocus\n\nkmer_counts = []\nseq_ids = []\nfor seq in skbio.io.read(aligned_seqs_fp, format='fasta',\n constructor=DNA):\n seq_ids.append(seq.metadata['id'])\n sliced_seq = seq[locus].degap()\n kmer_counts.append(sliced_seq.kmer_frequencies(8))\n\nfrom sklearn.feature_extraction import DictVectorizer\nX = DictVectorizer().fit_transform(kmer_counts)\n\ntaxonomy_level = 3 # class\nid_to_taxon = {}\nwith open(taxonomy_fp) as f:\n for line in f:\n id_, taxon = line.strip().split('\\t')\n id_to_taxon[id_] = '; '.join(taxon.split('; ')[:taxonomy_level])\n\ny = [id_to_taxon[seq_id] for seq_id in seq_ids]\n\nfrom sklearn.feature_selection import SelectPercentile\n\nX = SelectPercentile().fit_transform(X, y)\n\nfrom sklearn.cross_validation import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n random_state=0)\n\nfrom sklearn.svm import SVC\n\ny_pred = SVC(C=10, kernel='linear', degree=3,\n gamma=0.001).fit(X_train, y_train).predict(X_test)\n\nfrom sklearn.metrics import confusion_matrix, f1_score\n\ncm = confusion_matrix(y_test, y_pred)\ncm_normalized = cm / cm.sum(axis=1)[:, np.newaxis]\nplot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')\n\nprint(\"F-score: %1.3f\" % f1_score(y_test, y_pred, average='micro'))", "Acknowledgements\nscikit-bio development team\nFunding\n\nAlfred P Sloan Foundation\nNational Science Foundation\nNational Institutes of Health\nArizona Board of Regents Technology and Research Investment Fund\n\nThe Caporaso Lab is hiring postdocs and developers, find us if you want to get paid to work on scikit-bio!\nWe're having a sprint on Saturday and Sunday!" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
open2c/bioframe
docs/guide-performance.ipynb
mit
[ "Performance\nThis notebook illustrates performance of typical use cases for bioframe on sets of randomly generated intervals.", "import platform\nimport psutil\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.facecolor']='white'\nplt.rcParams['font.size']=16\n\nimport bioframe\nimport pyranges\n\nprint(f\"Bioframe v.{bioframe.__version__}\")\nprint(f\"PyRanges v.{pyranges.__version__}\")\nprint(f\"System Platform: {platform.platform()}\")\nprint(f\"{psutil.cpu_count()} CPUs at {psutil.cpu_freq().current:.0f} GHz\") ", "Below we define a function to generate random intervals with various properties, returning a dataframe of intervals.", "def make_random_intervals(\n n=1e5, \n n_chroms=1, \n max_coord=None, \n max_length=10, \n sort=False,\n categorical_chroms=False,\n \n ):\n n = int(n)\n n_chroms = int(n_chroms)\n max_coord = (n // n_chroms) if max_coord is None else int(max_coord)\n max_length = int(max_length)\n \n chroms = np.array(['chr'+str(i+1) for i in range(n_chroms)])[\n np.random.randint(0, n_chroms, n)]\n starts = np.random.randint(0, max_coord, n)\n ends = starts + np.random.randint(0, max_length, n)\n\n df = pd.DataFrame({\n 'chrom':chroms,\n 'start':starts,\n 'end':ends\n })\n \n if categorical_chroms:\n df['chrom'] = df['chrom'].astype('category')\n\n if sort:\n df = df.sort_values(['chrom','start','end']).reset_index(drop=True)\n \n return df\n", "Overlap\nIn this chapter we characterize the performance of the key function, bioframe.overlap. We show that the speed depends on:\n- the number of intervals\n- number of intersections (or density of intervals)\n- type of overlap (inner, outer, left)\n- dtype of chromosomes\nvs number of intervals", "timings = {}\nfor n in [1e2, 1e3, 1e4, 1e5, 1e6]:\n df = make_random_intervals(n=n, n_chroms=1)\n df2 = make_random_intervals(n=n, n_chroms=1)\n timings[n] = %timeit -o -r 1 bioframe.overlap(df, df2)\n\nplt.loglog(\n list(timings.keys()),\n list([r.average for r in timings.values()]),\n 'o-',\n)\nplt.xlabel('N intervals')\nplt.ylabel('time, seconds')\nplt.gca().set_aspect(1.0)\nplt.grid()", "vs total number of intersections\nNote that not only the number of intervals, but also the density of intervals determines the performance of overlap.", "timings = {}\nn_intersections = {}\nn = 1e4\nfor avg_interval_len in [3, 1e1, 3e1, 1e2, 3e2]:\n df = make_random_intervals(n=n, n_chroms=1, max_length=avg_interval_len*2)\n df2 = make_random_intervals(n=n, n_chroms=1, max_length=avg_interval_len*2)\n timings[avg_interval_len] = %timeit -o -r 1 bioframe.overlap(df, df2)\n n_intersections[avg_interval_len] = bioframe.overlap(df, df2).shape[0]\n\nplt.loglog(\n list(n_intersections.values()),\n list([r.average for r in timings.values()]),\n 'o-',\n)\nplt.xlabel('N intersections')\nplt.ylabel('time, seconds')\nplt.gca().set_aspect(1.0)\nplt.grid()", "vs number of chromosomes\nIf we consider a genome of the same length, divided into more chromosomes, the timing is relatively unaffected.", "timings = {}\nn_intersections = {}\nn = 1e5\nfor n_chroms in [1, 3, 10, 30, 100, 300, 1000]:\n df = make_random_intervals(n, n_chroms)\n df2 = make_random_intervals(n, n_chroms)\n timings[n_chroms] = %timeit -o -r 1 bioframe.overlap(df, df2)\n n_intersections[n_chroms] = bioframe.overlap(df, df2).shape[0]\n", "Note this test preserves the number of intersections, which is likely why performance remains similar over the considered range.", "n_intersections\n\nplt.loglog(\n list(timings.keys()),\n list([r.average for r in timings.values()]),\n 'o-',\n)\nplt.ylim([1e-1, 10])\nplt.xlabel('# chromosomes')\nplt.ylabel('time, seconds')\n# plt.gca().set_aspect(1.0)\nplt.grid()", "vs other parameters: join type, sorted or categorical inputs\nNote that default for overlap: how='left', keep_order=True, and the returned dataframe is sorted after the overlaps have been ascertained. Also note that keep_order=True is only a valid argument for how='left' as the order is not well-defined for inner or outer overlaps.", "df = make_random_intervals()\ndf2 = make_random_intervals()\n%timeit -r 1 bioframe.overlap(df, df2)\n%timeit -r 1 bioframe.overlap(df, df2, how='left', keep_order=False)\n\ndf = make_random_intervals()\ndf2 = make_random_intervals()\n\n%timeit -r 1 bioframe.overlap(df, df2, how='outer')\n%timeit -r 1 bioframe.overlap(df, df2, how='inner')\n%timeit -r 1 bioframe.overlap(df, df2, how='left', keep_order=False)", "Note below that detection of overlaps takes a relatively small fraction of the execution time. The majority of the time the user-facing function spends on formatting the output table.", "df = make_random_intervals()\ndf2 = make_random_intervals()\n\n%timeit -r 1 bioframe.overlap(df, df2)\n%timeit -r 1 bioframe.overlap(df, df2, how='inner')\n%timeit -r 1 bioframe.ops._overlap_intidxs(df, df2)\n%timeit -r 1 bioframe.ops._overlap_intidxs(df, df2, how='inner')", "Note that sorting inputs provides a moderate speedup, as well as storing chromosomes as categoricals", "print('Default inputs (outer/inner joins):')\ndf = make_random_intervals()\ndf2 = make_random_intervals()\n\n%timeit -r 1 bioframe.overlap(df, df2)\n%timeit -r 1 bioframe.overlap(df, df2, how='inner')\n\nprint('Sorted inputs (outer/inner joins):')\ndf_sorted = make_random_intervals(sort=True)\ndf2_sorted = make_random_intervals(sort=True)\n\n%timeit -r 1 bioframe.overlap(df_sorted, df2_sorted)\n%timeit -r 1 bioframe.overlap(df_sorted, df2_sorted, how='inner')\n\nprint('Categorical chromosomes (outer/inner joins):')\ndf_cat = make_random_intervals(categorical_chroms=True)\ndf2_cat = make_random_intervals(categorical_chroms=True)\n\n%timeit -r 1 bioframe.overlap(df_cat, df2_cat)\n%timeit -r 1 bioframe.overlap(df_cat, df2_cat, how='inner')\n", "Vs Pyranges\nDefault arguments\nThe core intersection function of PyRanges is faster, since PyRanges object splits intervals by chromosomes at the object construction stage", "def df2pr(df):\n return pyranges.PyRanges(\n chromosomes=df.chrom,\n starts=df.start,\n ends=df.end,\n )\n\ntimings_bf = {}\ntimings_pr = {}\nfor n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:\n df = make_random_intervals(n=n, n_chroms=1)\n df2 = make_random_intervals(n=n, n_chroms=1)\n pr = df2pr(df)\n pr2 = df2pr(df2)\n timings_bf[n] = %timeit -o -r 1 bioframe.overlap(df, df2,how='inner')\n timings_pr[n] = %timeit -o -r 1 pr.join(pr2)\n \n\nplt.loglog(\n list(timings_bf.keys()),\n list([r.average for r in timings_bf.values()]),\n 'o-',\n label='bioframe'\n)\nplt.loglog(\n list(timings_pr.keys()),\n list([r.average for r in timings_pr.values()]),\n 'o-',\n label='pyranges'\n)\n\nplt.gca().set(\n xlabel='N intervals',\n ylabel='time, seconds',\n aspect=1.0,\n xticks=10**np.arange(2,6.1)\n)\nplt.grid()\nplt.legend()", "With roundtrips to dataframes\nNote that pyranges performs useful calculations at the stage of creating a PyRanges object. Thus a direct comparison for one-off operations on pandas DataFrames between bioframe and pyranges should take this step into account. This roundrip is handled by pyranges_intersect_dfs below.", "def pyranges_intersect_dfs(df, df2):\n return df2pr(df).intersect(df2pr(df2)).as_df()\n\ntimings_bf = {}\ntimings_pr = {}\nfor n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:\n df = make_random_intervals(n=n, n_chroms=1)\n df2 = make_random_intervals(n=n, n_chroms=1)\n timings_bf[n] = %timeit -o -r 1 bioframe.overlap(df, df2, how='inner')\n timings_pr[n] = %timeit -o -r 1 pyranges_intersect_dfs(df, df2)\n \n\nplt.loglog(\n list(timings_bf.keys()),\n list([r.average for r in timings_bf.values()]),\n 'o-',\n label='bioframe'\n)\nplt.loglog(\n list(timings_pr.keys()),\n list([r.average for r in timings_pr.values()]),\n 'o-',\n label='pyranges'\n)\nplt.gca().set(\n xlabel='N intervals',\n ylabel='time, seconds',\n aspect=1.0\n)\nplt.grid()\nplt.legend()", "Memory usage", "from memory_profiler import memory_usage\nimport time\n\ndef sleep_before_after(func, sleep_sec=0.5):\n def _f(*args, **kwargs):\n time.sleep(sleep_sec)\n func(*args, **kwargs)\n time.sleep(sleep_sec)\n return _f\n\nmem_usage_bf = {}\nmem_usage_pr = {}\n\nfor n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:\n df = make_random_intervals(n=n, n_chroms=1)\n df2 = make_random_intervals(n=n, n_chroms=1)\n mem_usage_bf[n] = memory_usage(\n (sleep_before_after(bioframe.overlap), (df, df2), dict( how='inner')), \n backend='psutil_pss', \n include_children=True,\n interval=0.1)\n mem_usage_pr[n] = memory_usage(\n (sleep_before_after(pyranges_intersect_dfs), (df, df2), dict()), \n backend='psutil_pss', \n include_children=True,\n interval=0.1)\n \n\nplt.figure(figsize=(8,6))\nplt.loglog(\n list(mem_usage_bf.keys()),\n list([max(r) - r[4] for r in mem_usage_bf.values()]),\n 'o-',\n label='bioframe'\n)\n\nplt.loglog(\n list(mem_usage_pr.keys()),\n list([max(r) - r[4] for r in mem_usage_pr.values()]),\n 'o-',\n label='pyranges'\n)\n\nplt.gca().set(\n xlabel='N intervals',\n ylabel='Memory usage, Mb',\n aspect=1.0\n)\nplt.grid()\nplt.legend()", "The 2x memory consumption of bioframe is due to the fact that bioframe store genomic coordinates as int64 by default, while pyranges uses int32:", "print('Bioframe dtypes:')\ndisplay(df.dtypes)\nprint()\nprint('Pyranges dtypes:')\ndisplay(df2pr(df).dtypes)\n\n\n### Combined performance figure.\n\nfig, axs = plt.subplot_mosaic(\n 'AAA.BBB',\n figsize=(9.0,4))\n\nplt.sca(axs['A']) \n\nplt.text(-0.25, 1.0, 'A', horizontalalignment='center',\n verticalalignment='center', transform=plt.gca().transAxes,\n fontsize=19)\n\nplt.loglog(\n list(timings_bf.keys()),\n list([r.average for r in timings_bf.values()]),\n 'o-',\n color='k',\n label='bioframe'\n)\nplt.loglog(\n list(timings_pr.keys()),\n list([r.average for r in timings_pr.values()]),\n 'o-',\n color='gray',\n label='pyranges'\n)\nplt.gca().set(\n xlabel='N intervals',\n ylabel='time, s',\n aspect=1.0,\n xticks=10**np.arange(2,6.1),\n yticks=10**np.arange(-3,0.1),\n\n)\n\nplt.grid()\nplt.legend()\n\nplt.sca(axs['B'])\nplt.text(-0.33, 1.0, 'B', horizontalalignment='center',\n verticalalignment='center', transform=plt.gca().transAxes,\n fontsize=19)\nplt.loglog(\n list(mem_usage_bf.keys()),\n list([max(r) - r[4] for r in mem_usage_bf.values()]),\n 'o-',\n color='k',\n label='bioframe'\n)\n\nplt.loglog(\n list(mem_usage_pr.keys()),\n list([max(r) - r[4] for r in mem_usage_pr.values()]),\n 'o-',\n color='gray',\n label='pyranges'\n)\nplt.gca().set(\n xlabel='N intervals',\n ylabel='Memory usage, Mb',\n aspect=1.0,\n xticks=10**np.arange(2,6.1),\n)\n\nplt.grid()\nplt.legend()", "Slicing", "timings_slicing_bf = {}\ntimings_slicing_pr = {}\n\n\nfor n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:\n df = make_random_intervals(n=n, n_chroms=1)\n timings_slicing_bf[n] = %timeit -o -r 1 bioframe.select(df, ('chr1', n//2, n//4*3))\n pr = df2pr(df)\n timings_slicing_pr[n] = %timeit -o -r 1 pr['chr1', n//2:n//4*3]\n \n\n\nplt.loglog(\n list(timings_slicing_bf.keys()),\n list([r.average for r in timings_bf.values()]),\n 'o-',\n label='bioframe'\n)\n\nplt.loglog(\n list(timings_slicing_pr.keys()),\n list([r.average for r in timings_pr.values()]),\n 'o-',\n label='pyranges'\n)\nplt.gca().set(\n xlabel='N intervals',\n ylabel='time, s',\n aspect=1.0\n)\nplt.grid()\nplt.legend()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
SATHVIKRAJU/Inferential_Statistics
Human_Temp.ipynb
mit
[ "What is the True Normal Human Body Temperature?\nBackground\nThe mean normal body temperature was held to be 37$^{\\circ}$C or 98.6$^{\\circ}$F for more than 120 years since it was first conceptualized and reported by Carl Wunderlich in a famous 1868 book. But, is this value statistically correct?\n<div class=\"span5 alert alert-info\">\n<h3>Exercises</h3>\n\n<p>In this exercise, you will analyze a dataset of human body temperatures and employ the concepts of hypothesis testing, confidence intervals, and statistical significance.</p>\n\n<p>Answer the following questions <b>in this notebook below and submit to your Github account</b>.</p> \n\n<ol>\n<li> Is the distribution of body temperatures normal? \n <ul>\n <li> Although this is not a requirement for CLT to hold (read CLT carefully), it gives us some peace of mind that the population may also be normally distributed if we assume that this sample is representative of the population.\n </ul>\n<li> Is the sample size large? Are the observations independent?\n <ul>\n <li> Remember that this is a condition for the CLT, and hence the statistical tests we are using, to apply.\n </ul>\n<li> Is the true population mean really 98.6 degrees F?\n <ul>\n <li> Would you use a one-sample or two-sample test? Why?\n <li> In this situation, is it appropriate to use the $t$ or $z$ statistic? \n <li> Now try using the other test. How is the result be different? Why?\n </ul>\n<li> At what temperature should we consider someone's temperature to be \"abnormal\"?\n <ul>\n <li> Start by computing the margin of error and confidence interval.\n </ul>\n<li> Is there a significant difference between males and females in normal temperature?\n <ul>\n <li> What test did you use and why?\n <li> Write a story with your conclusion in the context of the original problem.\n </ul>\n</ol>\n\nYou can include written notes in notebook cells using Markdown: \n - In the control panel at the top, choose Cell > Cell Type > Markdown\n - Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet\n\n#### Resources\n\n+ Information and data sources: http://www.amstat.org/publications/jse/datasets/normtemp.txt, http://www.amstat.org/publications/jse/jse_data_archive.htm\n+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet\n\n****\n</div>", "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\ndf = pd.read_csv('data/human_body_temperature.csv')\ndf.head()", "The normal distribution test:", "x=df.sort_values(\"temperature\",axis=0)\nt=x[\"temperature\"]\n#print(np.mean(t))\n\nplot_fit = stats.norm.pdf(t, np.mean(t), np.std(t)) \nplt.plot(t,plot_fit,'-o')\nplt.hist(df.temperature, bins = 20 ,normed = True)\nplt.ylabel('Frequency')\nplt.xlabel('Temperature')\nplt.show()\nstats.normaltest(t)", "To check if the distribution of temperature is normal, it is always better to visualize it. We plot the histogram of the values and plot the fitted values to obtain a normal distribution. We see that there are a few outliers in the distribution on the right side but still it correlates as a normal distribution. \nPerforming the Normaltest using Scipy's normal function and we obtain the p value of 0.25. Assuming the statistical significance to be 0.05 and the Null hypothesis being the distribution is normal. We can accept the Null hypothesis as the obtained p-value is greater than 0.05 which can also confirm the normal distribution.", "#Question 2: \nno_of_samples=df[\"temperature\"].count()\nprint(no_of_samples)", "We see the sample size is n= 130 and as a general rule of thumb inorder for CLT to be validated \nit is necessary for n>30. Hence the sample size is compartively large.\nQuestion 3\nHO: The true population mean is 98.6 degrees F (Null hypothesis)\nH1: The true population mean is not 98.6 degrees F (Alternative hypothesis)\nAlternatively we can state that,\nHO: μ1 = μ2\nH1: μ1 ≠ μ2", "from statsmodels.stats.weightstats import ztest\nfrom scipy.stats import ttest_ind\nfrom scipy.stats import ttest_1samp\nt_score=ttest_1samp(t,98.6)\nt_score_abs=abs(t_score[0])\nt_score_p_abs=abs(t_score[1])\nz_score=ztest(t,value=98.6)\nz_score_abs=abs(z_score[0])\np_value_abs=abs(z_score[1])\nprint(\"The z score is given by: %F and the p-value is given by %6.9F\"%(z_score_abs,p_value_abs))\nprint(\"The t score is given by: %F and the p-value is given by %6.9F\"%(t_score_abs,t_score_p_abs))", "Choosing one sample test vs two sample test:\nThe problem defined has a single sample and we need to test against the population mean and hence we would use a one sample test as against the two sample test. \nT-test vs Z-test:\nT-test is chosen and best suited when n<30 and hence we can choose z-test for this particular distribution.Also here we are comparing the mean of the population against a predetermined value i.e. 98.6 and it is best to use z-test. T- test is more useful when we compare the means of two sample distributions and check to see if there is a difference between them. \nThe p value is 0.000000049 which is less than the usual significance level 0.05 and hence we can reject the Null hypothesis and say that the population mean is not 98.6 \nTrying the t-test: Since we are comparing the mean value to a reference number, the calculation of both z score and t score remains same and hence value remains same. However the p-value differs slighlty from the other.", "#Question 4:\n#For a 95% Confidence Interval the Confidence interval can be computed as:\nvariance_=np.std(t)/np.sqrt(no_of_samples)\nmean_=np.mean(t)\nconfidence_interval = stats.norm.interval(0.95, loc=mean_, scale=variance_)\nprint(\"The Confidence Interval Lies between %F and %F\"%(confidence_interval[0],confidence_interval[1]))", "Any temperatures out of this range should be considered abnormal.\nQuestion 5:\nHere we use t-test statistic because we want to compare the mean of two groups involved, the male and the female group and it is better to use a t-test.", "temp_male=df.temperature[df.gender=='M']\nfemale_temp=df.temperature[df.gender=='F']\nttest_ind(temp_male,female_temp)", "Considering the Null hypothesis that there is no difference between the two groups, the p-value observed is lesser than the significance level and hence we can reject the Null hypothesis saying that there is a difference in the body temperature amongst men and women." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gaufung/Data_Analytics_Learning_Note
python-statatics-tutorial/advance-theme/Request.ipynb
mit
[ "Python Request 库入门\n1 urllib2 和 Request对比\nGet请求至https://api.github.com/", "import urllib2\nimport requests\nimport json\ngh_url = 'https://api.github.com'\ngh_user = 'gaufung'\ngh_pw = 'gaofenggit123'\nreq = urllib2.Request(gh_url)\n\npassword_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()\npassword_manager.add_password(None, gh_url, gh_user, gh_pw)\n\nauth_manager = urllib2.HTTPBasicAuthHandler(password_manager)\nopener = urllib2.build_opener(auth_manager)\n\nurllib2.install_opener(opener)\n\nhandler = urllib2.urlopen(req)\n\nif handler.getcode() == requests.codes.ok:\n text = handler.read()\n d_text = json.loads(text)\n for k, v in d_text.items():\n print k, v\n\nimport requests\nimport json\ngh_url = 'https://api.github.com'\ngh_user = 'gaufung'\ngh_pw = 'gaofenggit123'\nr = requests.get(gh_url,auth=(gh_user,gh_pw))\nif r.status_code == requests.codes.ok:\n for k, v in r.json().items():\n print k,v", "2 基本用法", "import requests\ncs_url = 'http://httpbin.org'\nr = requests.get(\"%s/%s\" % (cs_url, 'get'))\nr = requests.post(\"%s/%s\" % (cs_url, 'post'))\nr = requests.put(\"%s/%s\" % (cs_url, 'put'))\nr = requests.delete(\"%s/%s\" % (cs_url, 'delete'))\nr = requests.patch(\"%s/%s\" % (cs_url, 'patch'))\nr = requests.options(\"%s/%s\" % (cs_url, 'get'))", "3 URL 传参\n\nhttps://encrypted.google.com/search?q=hello \n<协议>://<域名>/<接口>?<键1>=<值1>&<键2>=<值2> \n\nrequests 库提供的 HTTP 方法,都提供了名为 params 的参数。这个参数可以接受一个 Python 字典,并自动格式化为上述格式。", "import requests\ncs_url = 'https://www.so.com/s'\nparam = {'ie':'utf-8','q':'query'}\nr = requests.get(cs_url,params = param)\nprint r.url", "4 设置超时\nrequests 的超时设置以秒为单位。例如,对请求加参数 timeout = 5 即可设置超时为 5 秒", "import requests\ncs_url = 'https://www.zhihu.com'\nr = requests.get(cs_url,timeout=100)", "5 请求头", "import requests\n\ncs_url = 'http://httpbin.org/get'\nr = requests.get (cs_url)\nprint r.content", "通常我们比较关注其中的 User-Agent 和 Accept-Encoding。如果我们要修改 HTTP 头中的这两项内容,只需要将一个合适的字典参数传给 headers 即可。", "import requests\n\nmy_headers = {'User-Agent' : 'From Liam Huang', 'Accept-Encoding' : 'gzip'}\ncs_url = 'http://httpbin.org/get'\nr = requests.get (cs_url, headers = my_headers)\nprint r.content", "6 响应头", "import requests\n\ncs_url = 'http://httpbin.org/get'\nr = requests.get (cs_url)\nprint r.headers", "7 响应内容\n长期以来,互联网都存在带宽有限的情况。因此,网络上传输的数据,很多情况下都是经过压缩的。经由 requests 发送的请求,当收到的响应内容经过 gzip 或 deflate 压缩时,requests 会自动为我们解包。我们可以用 Response.content 来获得以字节形式返回的相应内容。", "import requests\n\ncs_url = 'https://www.zhihu.com'\nr = requests.get (cs_url)\n\nif r.status_code == requests.codes.ok:\n print r.content", "如果相应内容不是文本,而是二进制数据(比如图片),则需要进行响应的解码", "import requests\nfrom PIL import Image\nfrom StringIO import StringIO\n\ncs_url = 'http://liam0205.me/uploads/avatar/avatar-2.jpg'\nr = requests.get (cs_url)\n\nif r.status_code == requests.codes.ok:\n Image.open(StringIO(r.content)).show()", "文本模式解码", "import requests\n\ncs_url = 'https://www.zhihu.com'\nr = requests.get (cs_url,auth=('gaofengcumt@126.com','gaofengcumt'))\n\nif r.status_code == requests.codes.ok:\n print r.text\nelse:\n print 'bad request'", "8 反序列化 JSON 数据", "import requests\n\ncs_url = 'http://ip.taobao.com/service/getIpInfo.php'\nmy_param = {'ip':'8.8.8.8'}\n\nr = requests.get(cs_url, params = my_param)\n\nprint r.json()['data']['country'].encode('utf-8')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
msanterre/deep_learning
sentiment-rnn/Sentiment_RNN.ipynb
mit
[ "Sentiment Analysis with an RNN\nIn this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.\nThe architecture for this network is shown below.\n<img src=\"assets/network_diagram.png\" width=400px>\nHere, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.\nFrom the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.\nWe don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.", "import numpy as np\nimport tensorflow as tf\n\nwith open('../sentiment-network/reviews.txt', 'r') as f:\n reviews = f.read()\nwith open('../sentiment-network/labels.txt', 'r') as f:\n labels_ = f.read()\n\nreviews[:2000]", "Data preprocessing\nThe first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.\nYou can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \\n. To deal with those, I'm going to split the text into each review using \\n as the delimiter. Then I can combined all the reviews back together into one big string.\nFirst, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.", "from string import punctuation\nall_text = ''.join([c for c in reviews if c not in punctuation])\nreviews = all_text.split('\\n')\n\nall_text = ' '.join(reviews)\nwords = all_text.split()\n\nall_text[:2000]\n\nwords[:100]", "Encoding the words\nThe embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.\n\nExercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.\nAlso, convert the reviews to integers and store the reviews in a new list called reviews_ints.", "# Create your dictionary that maps vocab words to integers here\nvocab_to_int = {word: idx+1 for (idx, word) in enumerate(set(words))}\n\nprint(\"Vocab to int\")\nprint(\"len words: \", len(set(words)))\nprint(\"len vocab: \", len(vocab_to_int))\nprint(\"Sample: \", vocab_to_int['in'])\n \n# Convert the reviews to integers, same shape as reviews list, but with integers\nreviews_ints = []\nfor review in reviews:\n word_ints = [vocab_to_int[word] for word in review.split()]\n reviews_ints.append(word_ints)\n \nprint()\nprint(\"Reviews ints\")\nprint(\"Review length: \", len(reviews))\nprint(\"Length: \", len(reviews_ints))\nprint(\"Sample: \", reviews_ints[0])", "Encoding the labels\nOur labels are \"positive\" or \"negative\". To use these labels in our network, we need to convert them to 0 and 1.\n\nExercise: Convert labels from positive and negative to 1 and 0, respectively.", "# Convert labels to 1s and 0s for 'positive' and 'negative'\nlabels = np.array([0 if a == \"negative\" else 1 for a in labels_.split()])\nprint(len(labels))\n\nprint(labels[:100])\nprint(labels_[:100])", "If you built labels correctly, you should see the next output.", "from collections import Counter\nreview_lens = Counter([len(x) for x in reviews_ints])\nprint(\"Zero-length reviews: {}\".format(review_lens[0]))\nprint(\"Maximum review length: {}\".format(max(review_lens)))", "Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.\n\nExercise: First, remove the review with zero length from the reviews_ints list.", "# Filter out that review with 0 length\n# for i, review in enumerate(reviews_ints):\n# if len(review) == 0:\n# np.delete(reviews_ints, i)\n# break\nreviews_ints = [r for r in reviews_ints if len(r) > 0]\nprint(\"Reviews ints len: \", len(reviews_ints))\nprint(\"Labels len: \", len(labels))", "Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.\n\nThis isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.", "seq_len = 200\nfeatures = []\nfor review in reviews_ints:\n cut = review[:seq_len]\n feature = ([0] * (seq_len - len(cut))) + cut\n features.append(feature)\nfeatures = np.array(features)", "If you build features correctly, it should look like that cell output below.", "features[:10,:100]", "Training, Validation, Test\nWith our data in nice shape, we'll split it into training, validation, and test sets.\n\nExercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.", "from sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)\n\ntrain_x = x_train\ntrain_y = y_train\n\nval_x = x_test[:len(x_test)//2]\nval_y = y_test[:len(y_test)//2]\n\ntest_x = x_test[len(x_test)//2:]\ntest_y = y_test[len(y_test)//2:]\n\nprint(\"\\t\\t\\tFeature Shapes:\")\nprint(\"Train set: \\t\\t{}\".format(train_x.shape), \n \"\\nValidation set: \\t{}\".format(val_x.shape),\n \"\\nTest set: \\t\\t{}\".format(test_x.shape))", "With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:\nFeature Shapes:\nTrain set: (20000, 200) \nValidation set: (2500, 200) \nTest set: (2500, 200)\nBuild the graph\nHere, we'll build the graph. First up, defining the hyperparameters.\n\nlstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.\nlstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.\nbatch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.\nlearning_rate: Learning rate", "lstm_size = 256\nlstm_layers = 1\nbatch_size = 500\nlearning_rate = 0.001", "For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.\n\nExercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.", "n_words = len(vocab_to_int) + 1 # Adding 1 because we use 0's for padding, dictionary started at 1\n\n# Create the graph object\ngraph = tf.Graph()\n# Add nodes to the graph\nwith graph.as_default():\n inputs_ = tf.placeholder(tf.int32, [None, None], name=\"inputs\")\n labels_ = tf.placeholder(tf.int32, [None, None], name=\"labels\")\n keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")", "Embedding\nNow we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.\n\nExercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].", "# Size of the embedding vectors (number of units in the embedding layer)\nembed_size = 300 \n\nwith graph.as_default():\n embedding = tf.Variable(tf.truncated_normal((n_words, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)", "LSTM cell\n<img src=\"assets/network_diagram.png\" width=400px>\nNext, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.\nTo create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:\ntf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;)\nyou can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like \nlstm = tf.contrib.rnn.BasicLSTMCell(num_units)\nto create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like\ndrop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\nMost of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:\ncell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)\nHere, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.\nSo the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.\n\nExercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.\n\nHere is a tutorial on building RNNs that will help you out.", "with graph.as_default():\n # Your basic LSTM cell\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n \n # Add dropout to the cell\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n \n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell([drop]*lstm_layers)\n \n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)", "RNN forward pass\n<img src=\"assets/network_diagram.png\" width=400px>\nNow we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)\nAbove I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.\n\nExercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.", "with graph.as_default():\n outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)", "Output\nWe only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.", "with graph.as_default():\n predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)\n cost = tf.losses.mean_squared_error(labels_, predictions)\n \n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)", "Validation accuracy\nHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.", "with graph.as_default():\n correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "Batching\nThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].", "def get_batches(x, y, batch_size=100):\n \n n_batches = len(x)//batch_size\n x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]\n for ii in range(0, len(x), batch_size):\n yield x[ii:ii+batch_size], y[ii:ii+batch_size]", "Training\nBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.", "epochs = 10\n\nwith graph.as_default():\n saver = tf.train.Saver()\n\nwith tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n iteration = 1\n for e in range(epochs):\n state = sess.run(initial_state)\n \n for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 0.5,\n initial_state: state}\n loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)\n \n if iteration%5==0:\n print(\"Epoch: {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Train loss: {:.3f}\".format(loss))\n\n if iteration%25==0:\n val_acc = []\n val_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for x, y in get_batches(val_x, val_y, batch_size):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: val_state}\n batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)\n val_acc.append(batch_acc)\n print(\"Val acc: {:.3f}\".format(np.mean(val_acc)))\n iteration +=1\n saver.save(sess, \"checkpoints/sentiment.ckpt\")", "Testing", "test_acc = []\nwith tf.Session(graph=graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n test_state = sess.run(cell.zero_state(batch_size, tf.float32))\n for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):\n feed = {inputs_: x,\n labels_: y[:, None],\n keep_prob: 1,\n initial_state: test_state}\n batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)\n test_acc.append(batch_acc)\n print(\"Test accuracy: {:.3f}\".format(np.mean(test_acc)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
liufuyang/deep_learning_tutorial
course-deeplearning.ai/course1-nn-and-deeplearning/Logistic+Regression+with+a+Neural+Network+mindset+v3.ipynb
mit
[ "Logistic Regression with a Neural Network mindset\nWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.\nInstructions:\n- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.\nYou will learn to:\n- Build the general architecture of a learning algorithm, including:\n - Initializing parameters\n - Calculating the cost function and its gradient\n - Using an optimization algorithm (gradient descent) \n- Gather all three functions above into a main model function, in the right order.\n1 - Packages\nFirst, let's run the cell below to import all the packages that you will need during this assignment. \n- numpy is the fundamental package for scientific computing with Python.\n- h5py is a common package to interact with a dataset that is stored on an H5 file.\n- matplotlib is a famous library to plot graphs in Python.\n- PIL and scipy are used here to test your model with your own picture at the end.", "import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\n%matplotlib inline", "2 - Overview of the Problem set\nProblem Statement: You are given a dataset (\"data.h5\") containing:\n - a training set of m_train images labeled as cat (y=1) or non-cat (y=0)\n - a test set of m_test images labeled as cat or non-cat\n - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).\nYou will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.\nLet's get more familiar with the dataset. Load the data by running the following code.", "# Loading the data (cat/non-cat)\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()", "We added \"_orig\" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).\nEach line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the index value and re-run to see other images.", "# Example of a picture\nindex = 25\nplt.imshow(train_set_x_orig[index])\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")", "Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. \nExercise: Find the values for:\n - m_train (number of training examples)\n - m_test (number of test examples)\n - num_px (= height = width of a training image)\nRemember that train_set_x_orig is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access m_train by writing train_set_x_orig.shape[0].", "train_set_y.shape\n\n### START CODE HERE ### (≈ 3 lines of code)\nm_train = train_set_y.shape[1]\nm_test = test_set_y.shape[1]\nnum_px = train_set_x_orig.shape[1]\n### END CODE HERE ###\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))", "Expected Output for m_train, m_test and num_px: \n<table style=\"width:15%\">\n <tr>\n <td>**m_train**</td>\n <td> 209 </td> \n </tr>\n\n <tr>\n <td>**m_test**</td>\n <td> 50 </td> \n </tr>\n\n <tr>\n <td>**num_px**</td>\n <td> 64 </td> \n </tr>\n\n</table>\n\nFor convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $$ num_px $$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.\nExercise: Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num_px $$ num_px $$ 3, 1).\nA trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$$c$$d, a) is to use: \npython\nX_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X", "# Reshape the training and test examples\n\n### START CODE HERE ### (≈ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n### END CODE HERE ###\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))", "Expected Output: \n<table style=\"width:35%\">\n <tr>\n <td>**train_set_x_flatten shape**</td>\n <td> (12288, 209)</td> \n </tr>\n <tr>\n <td>**train_set_y shape**</td>\n <td>(1, 209)</td> \n </tr>\n <tr>\n <td>**test_set_x_flatten shape**</td>\n <td>(12288, 50)</td> \n </tr>\n <tr>\n <td>**test_set_y shape**</td>\n <td>(1, 50)</td> \n </tr>\n <tr>\n <td>**sanity check after reshaping**</td>\n <td>[17 31 56 22 33]</td> \n </tr>\n</table>\n\nTo represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.\nOne common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).\n<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->\n\nLet's standardize our dataset.", "train_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.", "<font color='blue'>\nWhat you need to remember:\nCommon steps for pre-processing a new dataset are:\n- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)\n- Reshape the datasets such that each example is now a vector of size (num_px * num_px * 3, 1)\n- \"Standardize\" the data\n3 - General Architecture of the learning algorithm\nIt's time to design a simple algorithm to distinguish cat images from non-cat images.\nYou will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why Logistic Regression is actually a very simple Neural Network!\n<img src=\"images/LogReg_kiank.png\" style=\"width:650px;height:400px;\">\nMathematical expression of the algorithm:\nFor one example $x^{(i)}$:\n$$z^{(i)} = w^T x^{(i)} + b \\tag{1}$$\n$$\\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\\tag{2}$$ \n$$ \\mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \\log(a^{(i)}) - (1-y^{(i)} ) \\log(1-a^{(i)})\\tag{3}$$\nThe cost is then computed by summing over all training examples:\n$$ J = \\frac{1}{m} \\sum_{i=1}^m \\mathcal{L}(a^{(i)}, y^{(i)})\\tag{6}$$\nKey steps:\nIn this exercise, you will carry out the following steps: \n - Initialize the parameters of the model\n - Learn the parameters for the model by minimizing the cost\n - Use the learned parameters to make predictions (on the test set)\n - Analyse the results and conclude\n4 - Building the parts of our algorithm ##\nThe main steps for building a Neural Network are:\n1. Define the model structure (such as number of input features) \n2. Initialize the model's parameters\n3. Loop:\n - Calculate current loss (forward propagation)\n - Calculate current gradient (backward propagation)\n - Update parameters (gradient descent)\nYou often build 1-3 separately and integrate them into one function we call model().\n4.1 - Helper functions\nExercise: Using your code from \"Python Basics\", implement sigmoid(). As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \\frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().", "# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1.0 / (1.0 + np.exp(-z))\n ### END CODE HERE ###\n \n return s\n\nprint (\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))", "Expected Output: \n<table>\n <tr>\n <td>**sigmoid([0, 2])**</td>\n <td> [ 0.5 0.88079708]</td> \n </tr>\n</table>\n\n4.2 - Initializing parameters\nExercise: Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.", "# GRADED FUNCTION: initialize_with_zeros\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n w = np.zeros((dim, 1))\n b = 0\n ### END CODE HERE ###\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n \n return w, b\n\ndim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))", "Expected Output: \n<table style=\"width:15%\">\n <tr>\n <td> ** w ** </td>\n <td> [[ 0.]\n [ 0.]] </td>\n </tr>\n <tr>\n <td> ** b ** </td>\n <td> 0 </td>\n </tr>\n</table>\n\nFor image inputs, w will be of shape (num_px $\\times$ num_px $\\times$ 3, 1).\n4.3 - Forward and Backward propagation\nNow that your parameters are initialized, you can do the \"forward\" and \"backward\" propagation steps for learning the parameters.\nExercise: Implement a function propagate() that computes the cost function and its gradient.\nHints:\nForward Propagation:\n- You get X\n- You compute $A = \\sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$\n- You calculate the cost function: $J = -\\frac{1}{m}\\sum_{i=1}^{m}y^{(i)}\\log(a^{(i)})+(1-y^{(i)})\\log(1-a^{(i)})$\nHere are the two formulas you will be using: \n$$ \\frac{\\partial J}{\\partial w} = \\frac{1}{m}X(A-Y)^T\\tag{7}$$\n$$ \\frac{\\partial J}{\\partial b} = \\frac{1}{m} \\sum_{i=1}^m (a^{(i)}-y^{(i)})\\tag{8}$$", "# GRADED FUNCTION: propagate\n\ndef propagate(w, b, X, Y):\n \"\"\"\n Implement the cost function and its gradient for the propagation explained above\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\n\n Return:\n cost -- negative log-likelihood cost for logistic regression\n dw -- gradient of the loss with respect to w, thus same shape as w\n db -- gradient of the loss with respect to b, thus same shape as b\n \n Tips:\n - Write your code step by step for the propagation. np.log(), np.dot()\n \"\"\"\n \n m = X.shape[1]\n \n # FORWARD PROPAGATION (FROM X TO COST)\n ### START CODE HERE ### (≈ 2 lines of code)\n A = sigmoid(np.dot(w.T, X) + b) # compute activation\n cost = - 1.0 / m * np.sum(Y * np.log(A) + (1.0 - Y) * np.log(1-A)) # compute cost\n ### END CODE HERE ###\n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n ### START CODE HERE ### (≈ 2 lines of code)\n dw = 1.0 / m * np.dot(X, (A - Y).T)\n db = 1.0 / m * np.sum(A - Y)\n ### END CODE HERE ###\n\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost\n\nw, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])\ngrads, cost = propagate(w, b, X, Y)\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\nprint (\"cost = \" + str(cost))", "Expected Output:\n<table style=\"width:50%\">\n <tr>\n <td> ** dw ** </td>\n <td> [[ 0.99993216]\n [ 1.99980262]]</td>\n </tr>\n <tr>\n <td> ** db ** </td>\n <td> 0.499935230625 </td>\n </tr>\n <tr>\n <td> ** cost ** </td>\n <td> 6.000064773192205</td>\n </tr>\n\n</table>\n\nd) Optimization\n\nYou have initialized your parameters.\nYou are also able to compute a cost function and its gradient.\nNow, you want to update the parameters using gradient descent.\n\nExercise: Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\\theta$, the update rule is $ \\theta = \\theta - \\alpha \\text{ } d\\theta$, where $\\alpha$ is the learning rate.", "# GRADED FUNCTION: optimize\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \"\"\"\n This function optimizes w and b by running a gradient descent algorithm\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- True to print the loss every 100 steps\n \n Returns:\n params -- dictionary containing the weights w and bias b\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\n \n Tips:\n You basically need to write down two steps and iterate through them:\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\n 2) Update the parameters using gradient descent rule for w and b.\n \"\"\"\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate * dw\n b = b - learning_rate * db\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training examples\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs\n\nparams, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\n\nprint (\"w = \" + str(params[\"w\"]))\nprint (\"b = \" + str(params[\"b\"]))\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))", "Expected Output: \n<table style=\"width:40%\">\n <tr>\n <td> **w** </td>\n <td>[[ 0.1124579 ]\n [ 0.23106775]] </td>\n </tr>\n\n <tr>\n <td> **b** </td>\n <td> 1.55930492484 </td>\n </tr>\n <tr>\n <td> **dw** </td>\n <td> [[ 0.90158428]\n [ 1.76250842]] </td>\n </tr>\n <tr>\n <td> **db** </td>\n <td> 0.430462071679 </td>\n </tr>\n\n</table>\n\nExercise: The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the predict() function. There is two steps to computing predictions:\n\n\nCalculate $\\hat{Y} = A = \\sigma(w^T X + b)$\n\n\nConvert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector Y_prediction. If you wish, you can use an if/else statement in a for loop (though there is also a way to vectorize this).", "# GRADED FUNCTION: predict\n\ndef predict(w, b, X):\n '''\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n \n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n '''\n \n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n \n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n ### START CODE HERE ### (≈ 1 line of code)\n A = sigmoid(np.dot(w.T, X) + b)\n ### END CODE HERE ###\n \n for i in range(A.shape[1]):\n \n # Convert probabilities A[0,i] to actual predictions p[0,i]\n ### START CODE HERE ### (≈ 4 lines of code)\n Y_prediction[0, i] = A[0,i] > 0.5\n ### END CODE HERE ###\n \n assert(Y_prediction.shape == (1, m))\n \n return Y_prediction\n\nprint (\"predictions = \" + str(predict(w, b, X)))", "Expected Output: \n<table style=\"width:30%\">\n <tr>\n <td>\n **predictions**\n </td>\n <td>\n [[ 1. 1.]]\n </td> \n </tr>\n\n</table>\n\n<font color='blue'>\nWhat to remember:\nYou've implemented several functions that:\n- Initialize (w,b)\n- Optimize the loss iteratively to learn parameters (w,b):\n - computing the cost and its gradient \n - updating the parameters using gradient descent\n- Use the learned (w,b) to predict the labels for a given set of examples\n5 - Merge all functions into a model\nYou will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.\nExercise: Implement the model function. Use the following notation:\n - Y_prediction for your predictions on the test set\n - Y_prediction_train for your predictions on the train set\n - w, costs, grads for the outputs of optimize()", "# GRADED FUNCTION: model\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n \"\"\"\n Builds the logistic regression model by calling the function you've implemented previously\n \n Arguments:\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\n print_cost -- Set to true to print the cost every 100 iterations\n \n Returns:\n d -- dictionary containing information about the model.\n \"\"\"\n \n ### START CODE HERE ###\n \n # initialize parameters with zeros (≈ 1 line of code)\n w, b = initialize_with_zeros(X_train.shape[0])\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = print_cost)\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples (≈ 2 lines of code)\n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n\n ### END CODE HERE ###\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d", "Run the following cell to train your model.", "d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)", "Expected Output: \n<table style=\"width:40%\"> \n\n <tr>\n <td> **Train Accuracy** </td> \n <td> 99.04306220095694 % </td>\n </tr>\n\n <tr>\n <td>**Test Accuracy** </td> \n <td> 70.0 % </td>\n </tr>\n</table>\n\nComment: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!\nAlso, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the index variable) you can look at predictions on pictures of the test set.", "# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[d[\"Y_prediction_test\"][0,index]].decode(\"utf-8\") + \"\\\" picture.\")", "Let's also plot the cost function and the gradients.", "# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()", "Interpretation:\nYou can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. \n6 - Further analysis (optional/ungraded exercise)\nCongratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\\alpha$. \nChoice of learning rate\nReminder:\nIn order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may \"overshoot\" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.\nLet's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the learning_rates variable to contain, and see what happens.", "learning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()", "Interpretation: \n- Different learning rates give different costs and thus different predictions results.\n- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). \n- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.\n- In deep learning, we usually recommend that you: \n - Choose the learning rate that better minimizes the cost function.\n - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) \n7 - Test with your own image (optional/ungraded exercise)\nCongratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Change your image's name in the following code\n 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!", "## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"my_image.jpg\" # change this to the name of your image file \n## END CODE HERE ##\n\n# We preprocess the image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")", "<font color='blue'>\nWhat to remember from this assignment:\n1. Preprocessing the dataset is important.\n2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().\n3. Tuning the learning rate (which is an example of a \"hyperparameter\") can make a big difference to the algorithm. You will see more examples of this later in this course!\nFinally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:\n - Play with the learning rate and the number of iterations\n - Try different initialization methods and compare the results\n - Test other preprocessings (center the data, or divide each row by its standard deviation)\nBibliography:\n- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/\n- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jjonte/udacity-deeplearning-nd
py3/project-4/dlnd_language_translation.ipynb
unlicense
[ "Language Translation\nIn this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.\nGet the Data\nSince translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport problem_unittests as tests\n\nsource_path = 'data/small_vocab_en'\ntarget_path = 'data/small_vocab_fr'\nsource_text = helper.load_data(source_path)\ntarget_text = helper.load_data(target_path)", "Explore the Data\nPlay around with view_sentence_range to view different parts of the data.", "view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))\n\nsentences = source_text.split('\\n')\nword_counts = [len(sentence.split()) for sentence in sentences]\nprint('Number of sentences: {}'.format(len(sentences)))\nprint('Average number of words in a sentence: {}'.format(np.average(word_counts)))\n\nprint()\nprint('English sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(source_text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))\nprint()\nprint('French sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(target_text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))", "Implement Preprocessing Function\nText to Word Ids\nAs you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the &lt;EOS&gt; word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.\nYou can get the &lt;EOS&gt; word id by doing:\npython\ntarget_vocab_to_int['&lt;EOS&gt;']\nYou can get other word ids using source_vocab_to_int and target_vocab_to_int.", "def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):\n \"\"\"\n Convert source and target text to proper word ids\n :param source_text: String that contains all the source text.\n :param target_text: String that contains all the target text.\n :param source_vocab_to_int: Dictionary to go from the source words to an id\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :return: A tuple of lists (source_id_text, target_id_text)\n \"\"\"\n \n source_id_text = [[source_vocab_to_int[y] for y in x] for x in \n [sentence.split() for sentence in source_text.split('\\n')]]\n \n target_id_text = [[target_vocab_to_int[y] for y in x] for x in \n [sentence.split() for sentence in target_text.split('\\n')]]\n \n for l in target_id_text:\n l.append(target_vocab_to_int['<EOS>'])\n \n return source_id_text, target_id_text\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_text_to_ids(text_to_ids)", "Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nhelper.preprocess_and_save_data(source_path, target_path, text_to_ids)", "Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\nimport helper\n\n(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()", "Check the Version of TensorFlow and Access to GPU\nThis will check to make sure you have the correct version of TensorFlow and access to a GPU", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))", "Build the Neural Network\nYou'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:\n- model_inputs\n- process_decoding_input\n- encoding_layer\n- decoding_layer_train\n- decoding_layer_infer\n- decoding_layer\n- seq2seq_model\nInput\nImplement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n\nInput text placeholder named \"input\" using the TF Placeholder name parameter with rank 2.\nTargets placeholder with rank 2.\nLearning rate placeholder with rank 0.\nKeep probability placeholder named \"keep_prob\" using the TF Placeholder name parameter with rank 0.\n\nReturn the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)", "def model_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate, keep probability)\n \"\"\"\n inputs = tf.placeholder(tf.int32, [None, None], name='input')\n targets = tf.placeholder(tf.int32, [None, None], name='targets')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n \n return inputs, targets, learning_rate, keep_prob\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_model_inputs(model_inputs)", "Process Decoding Input\nImplement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the begining of each batch.", "def process_decoding_input(target_data, target_vocab_to_int, batch_size):\n \"\"\"\n Preprocess target data for dencoding\n :param target_data: Target Placehoder\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :param batch_size: Batch Size\n :return: Preprocessed target data\n \"\"\"\n ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n decoding_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)\n \n return decoding_input\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_process_decoding_input(process_decoding_input)", "Encoding\nImplement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().", "def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):\n \"\"\"\n Create encoding layer\n :param rnn_inputs: Inputs for the RNN\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param keep_prob: Dropout keep probability\n :return: RNN state\n \"\"\"\n enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)\n enc_cell = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)\n _, enc_state = tf.nn.dynamic_rnn(enc_cell, rnn_inputs, dtype=tf.float32)\n \n return enc_state\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_encoding_layer(encoding_layer)", "Decoding - Training\nCreate training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.", "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,\n output_fn, keep_prob):\n \"\"\"\n Create a decoding layer for training\n :param encoder_state: Encoder State\n :param dec_cell: Decoder RNN Cell\n :param dec_embed_input: Decoder embedded input\n :param sequence_length: Sequence Length\n :param decoding_scope: TenorFlow Variable Scope for decoding\n :param output_fn: Function to apply the output layer\n :param keep_prob: Dropout keep probability\n :return: Train Logits\n \"\"\"\n decoder = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)\n prediction, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder, dec_embed_input, \n sequence_length, scope=decoding_scope)\n logits = output_fn(prediction)\n return logits\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer_train(decoding_layer_train)", "Decoding - Inference\nCreate inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().", "def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,\n maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):\n \"\"\"\n Create a decoding layer for inference\n :param encoder_state: Encoder state\n :param dec_cell: Decoder RNN Cell\n :param dec_embeddings: Decoder embeddings\n :param start_of_sequence_id: GO ID\n :param end_of_sequence_id: EOS Id\n :param maximum_length: The maximum allowed time steps to decode\n :param vocab_size: Size of vocabulary\n :param decoding_scope: TensorFlow Variable Scope for decoding\n :param output_fn: Function to apply the output layer\n :param keep_prob: Dropout keep probability\n :return: Inference Logits\n \"\"\"\n decoder = tf.contrib.seq2seq.simple_decoder_fn_inference(output_fn, encoder_state, dec_embeddings, \n start_of_sequence_id, end_of_sequence_id, \n maximum_length, vocab_size)\n logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder, scope=decoding_scope)\n \n return logits\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer_infer(decoding_layer_infer)", "Build the Decoding Layer\nImplement decoding_layer() to create a Decoder RNN layer.\n\nCreate RNN cell for decoding using rnn_size and num_layers.\nCreate the output fuction using lambda to transform it's input, logits, to class logits.\nUse the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.\nUse your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.\n\nNote: You'll need to use tf.variable_scope to share variables between training and inference.", "def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,\n num_layers, target_vocab_to_int, keep_prob):\n \"\"\"\n Create decoding layer\n :param dec_embed_input: Decoder embedded input\n :param dec_embeddings: Decoder embeddings\n :param encoder_state: The encoded state\n :param vocab_size: Size of vocabulary\n :param sequence_length: Sequence Length\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :param keep_prob: Dropout keep probability\n :return: Tuple of (Training Logits, Inference Logits)\n \"\"\"\n with tf.variable_scope(\"decoding\") as decoding_scope:\n dec_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)\n dec_cell = tf.contrib.rnn.MultiRNNCell([dec_cell] * num_layers)\n \n _, dec_state = tf.nn.dynamic_rnn(dec_cell, dec_embed_input, dtype=tf.float32)\n \n output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)\n \n t_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,\n output_fn, keep_prob)\n \n with tf.variable_scope(\"decoding\", reuse=True) as decoding_scope:\n i_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'], \n target_vocab_to_int['<EOS>'], sequence_length, vocab_size, \n decoding_scope, output_fn, keep_prob) \n \n return t_logits, i_logits\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer(decoding_layer)", "Build the Neural Network\nApply the functions you implemented above to:\n\nApply embedding to the input data for the encoder.\nEncode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).\nProcess target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.\nApply embedding to the target data for the decoder.\nDecode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).", "def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,\n enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):\n \"\"\"\n Build the Sequence-to-Sequence part of the neural network\n :param input_data: Input placeholder\n :param target_data: Target placeholder\n :param keep_prob: Dropout keep probability placeholder\n :param batch_size: Batch Size\n :param sequence_length: Sequence Length\n :param source_vocab_size: Source vocabulary size\n :param target_vocab_size: Target vocabulary size\n :param enc_embedding_size: Decoder embedding size\n :param dec_embedding_size: Encoder embedding size\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :return: Tuple of (Training Logits, Inference Logits)\n \"\"\"\n rnn_inputs = tf.contrib.layers.embed_sequence(input_data, vocab_size=source_vocab_size, \n embed_dim=enc_embedding_size)\n encoder_state = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob)\n dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)\n dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))\n dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)\n \n t_logits, i_logits = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, target_vocab_size,\n sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)\n\n return t_logits, i_logits\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_seq2seq_model(seq2seq_model)", "Neural Network Training\nHyperparameters\nTune the following parameters:\n\nSet epochs to the number of epochs.\nSet batch_size to the batch size.\nSet rnn_size to the size of the RNNs.\nSet num_layers to the number of layers.\nSet encoding_embedding_size to the size of the embedding for the encoder.\nSet decoding_embedding_size to the size of the embedding for the decoder.\nSet learning_rate to the learning rate.\nSet keep_probability to the Dropout keep probability", "# Number of Epochs\nepochs = 4\n# Batch Size\nbatch_size = 128\n# RNN Size\nrnn_size = 384\n# Number of Layers\nnum_layers = 2\n# Embedding Size\nencoding_embedding_size = 128\ndecoding_embedding_size = 128\n# Learning Rate\nlearning_rate = 0.001\n# Dropout Keep Probability\nkeep_probability = 0.6", "Build the Graph\nBuild the graph using the neural network you implemented.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_path = 'checkpoints/dev'\n(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()\nmax_source_sentence_length = max([len(sentence) for sentence in source_int_text])\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n input_data, targets, lr, keep_prob = model_inputs()\n sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')\n input_shape = tf.shape(input_data)\n \n train_logits, inference_logits = seq2seq_model(\n tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),\n encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)\n\n tf.identity(inference_logits, 'logits')\n with tf.name_scope(\"optimization\"):\n # Loss function\n cost = tf.contrib.seq2seq.sequence_loss(\n train_logits,\n targets,\n tf.ones([input_shape[0], sequence_length]))\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)", "Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport time\n\ndef get_accuracy(target, logits):\n \"\"\"\n Calculate accuracy\n \"\"\"\n max_seq = max(target.shape[1], logits.shape[1])\n if max_seq - target.shape[1]:\n target = np.pad(\n target,\n [(0,0),(0,max_seq - target.shape[1])],\n 'constant')\n if max_seq - logits.shape[1]:\n logits = np.pad(\n logits,\n [(0,0),(0,max_seq - logits.shape[1]), (0,0)],\n 'constant')\n\n return np.mean(np.equal(target, np.argmax(logits, 2)))\n\ntrain_source = source_int_text[batch_size:]\ntrain_target = target_int_text[batch_size:]\n\nvalid_source = helper.pad_sentence_batch(source_int_text[:batch_size])\nvalid_target = helper.pad_sentence_batch(target_int_text[:batch_size])\n\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(epochs):\n for batch_i, (source_batch, target_batch) in enumerate(\n helper.batch_data(train_source, train_target, batch_size)):\n start_time = time.time()\n \n _, loss = sess.run(\n [train_op, cost],\n {input_data: source_batch,\n targets: target_batch,\n lr: learning_rate,\n sequence_length: target_batch.shape[1],\n keep_prob: keep_probability})\n \n batch_train_logits = sess.run(\n inference_logits,\n {input_data: source_batch, keep_prob: 1.0})\n batch_valid_logits = sess.run(\n inference_logits,\n {input_data: valid_source, keep_prob: 1.0})\n \n train_acc = get_accuracy(target_batch, batch_train_logits)\n valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)\n end_time = time.time()\n print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'\n .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_path)\n print('Model Trained and Saved')", "Save Parameters\nSave the batch_size and save_path parameters for inference.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params(save_path)", "Checkpoint", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()\nload_path = helper.load_params()", "Sentence to Sequence\nTo feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.\n\nConvert the sentence to lowercase\nConvert words into ids using vocab_to_int\nConvert words not in the vocabulary, to the &lt;UNK&gt; word id.", "def sentence_to_seq(sentence, vocab_to_int):\n \"\"\"\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n \"\"\" \n return [vocab_to_int[word] if word in vocab_to_int else vocab_to_int['<UNK>'] \n for word in sentence.lower().split()]\n \n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_sentence_to_seq(sentence_to_seq)", "Translate\nThis will translate translate_sentence from English to French.", "translate_sentence = 'he saw a old yellow truck .'\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ntranslate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)\n\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_path + '.meta')\n loader.restore(sess, load_path)\n\n input_data = loaded_graph.get_tensor_by_name('input:0')\n logits = loaded_graph.get_tensor_by_name('logits:0')\n keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n\n translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]\n\nprint('Input')\nprint(' Word Ids: {}'.format([i for i in translate_sentence]))\nprint(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))\n\nprint('\\nPrediction')\nprint(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))\nprint(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))", "Imperfect Translation\nYou might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.\nYou can train on the WMT10 French-English corpus. This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.\nSubmitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_language_translation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kubeflow/kfserving-lts
docs/samples/pipelines/kfs-pipeline-v1alpha2.ipynb
apache-2.0
[ "KFServing Pipeline samples\nThis notebook uses an older version of the KFServing Pipelines component meant for clusters using a KFServing version less than v0.5.0 which only supports the v1alpha2 API.\nInstall the necessary kfp library", "!pip3 install kfp --upgrade\n\nimport kfp.dsl as dsl\nimport kfp\nfrom kfp import components\nimport json\n\n# Create kfp client\n# Note: Add the KubeFlow Pipeline endpoint below if the client is not running on the same cluster.\nclient = kfp.Client('kfserving_endpoint')\nEXPERIMENT_NAME = 'KFServing Experiments'\nexperiment = client.create_experiment(name=EXPERIMENT_NAME)", "TensorFlow example\nNote: Change the action from update to create if you are deploying the model for the first time.", "kfserving_op = components.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/65bed9b6d1d676ef2d541a970d3edc0aee12400d/components/kubeflow/kfserving/component.yaml'\n)\n\n@dsl.pipeline(\n name='kfserving pipeline',\n description='A pipeline for kfserving.'\n)\ndef kfservingPipeline(\n action = 'update',\n model_name='tf-sample',\n default_model_uri='gs://kfserving-samples/models/tensorflow/flowers',\n canary_model_uri='gs://kfserving-samples/models/tensorflow/flowers-2',\n canary_model_traffic_percentage='10',\n namespace='your_namespace',\n framework='tensorflow',\n default_custom_model_spec='{}',\n canary_custom_model_spec='{}',\n autoscaling_target='0',\n kfserving_endpoint=''\n):\n\n # define workflow\n kfserving = kfserving_op(action = action,\n model_name=model_name,\n default_model_uri=default_model_uri,\n canary_model_uri=canary_model_uri,\n canary_model_traffic_percentage=canary_model_traffic_percentage,\n namespace=namespace,\n framework=framework,\n default_custom_model_spec=default_custom_model_spec,\n canary_custom_model_spec=canary_custom_model_spec,\n autoscaling_target=autoscaling_target,\n kfserving_endpoint=kfserving_endpoint).set_image_pull_policy('Always')\n\n# Compile pipeline\nimport kfp.compiler as compiler\ncompiler.Compiler().compile(kfservingPipeline, 'tf-flower.tar.gz')\n\n# Execute pipeline\nrun = client.run_pipeline(experiment.id, 'tf-flower', 'tf-flower.tar.gz')", "Custom model example", "kfserving_op = components.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/65bed9b6d1d676ef2d541a970d3edc0aee12400d/components/kubeflow/kfserving/component.yaml'\n)\n\n@dsl.pipeline(\n name='kfserving pipeline',\n description='A pipeline for kfserving.'\n)\ndef kfservingPipeline(\n action = 'update',\n model_name='custom-sample',\n default_model_uri='',\n canary_model_uri='',\n canary_model_traffic_percentage='0',\n namespace='kubeflow',\n framework='custom',\n default_custom_model_spec='{\"name\": \"image-segmenter\", \"image\": \"codait/max-image-segmenter:latest\", \"port\": \"5000\"}',\n canary_custom_model_spec='{}',\n autoscaling_target='0',\n kfserving_endpoint=''\n):\n\n # define workflow\n kfserving = kfserving_op(action = action,\n model_name=model_name,\n default_model_uri=default_model_uri,\n canary_model_uri=canary_model_uri,\n canary_model_traffic_percentage=canary_model_traffic_percentage,\n namespace=namespace,\n framework=framework,\n default_custom_model_spec=default_custom_model_spec,\n canary_custom_model_spec=canary_custom_model_spec,\n autoscaling_target=autoscaling_target,\n kfserving_endpoint=kfserving_endpoint).set_image_pull_policy('Always')\n\n# Compile pipeline\nimport kfp.compiler as compiler\ncompiler.Compiler().compile(kfservingPipeline, 'custom.tar.gz')\n\n# Execute pipeline\nrun = client.run_pipeline(experiment.id, 'custom-model', 'custom.tar.gz')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/nerc/cmip6/models/ukesm1-0-mmh/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: NERC\nSource ID: UKESM1-0-MMH\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:27\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nerc', 'ukesm1-0-mmh', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kastnerkyle/kastnerkyle.github.io-nikola
blogsite/posts/robust-matrix-decomposition.ipynb
bsd-3-clause
[ "Matrix factorization is a very interesting area of machine learning research. Formulating a problem as a 2D matrix $X$ to be decomposed into multiple matrices, which combine to return an approximation of $X$, can lead to state of the art results for many interesting problems. This core concept is the focus of compressive sensing, matrix completion, sparse coding, robust PCA, dictionary learning, and many other algorithms. One major website which shows many different types of matrix decomposition algorithms is the Matrix Factorization Jungle, run by Igor Carron. There has been a heavy focus on random projections in recent algorithms, which can often lead to increased stability and computationally efficient solutions.\n<!-- TEASER_END -->\n\nBelow is a link to the GoDec algorithm output, as applied to the \"Hall\" video (shown below) found in this zip file, which is a surveillance tape taken from a mall. Using the GoDec algorithm, the background is almost completely subtracted from the noisy elements of people walking, while still capturing periodic background elements as part of the background. I have written code for both the GoDec and Robust PCA algorithms in numpy based on their Matlab equivalents. There are many datasets which can be found here, and we will set up a simple download function for ease-of-access. Special thanks to @kuantkid for the PyRPCA repo, which was the inspiration to start and extend this work, and especially the idea of creating a demo video from PNGs which is PRETTY. DANG. AWESOME.\nInterstellar Overdrive", "from IPython.display import YouTubeVideo\nYouTubeVideo('JgfK46RA8XY')", "First we want to download a video, so that we can compare the algorithmic result against the original video. The file is downloaded, if it does not already exist in the working directory. Next, it will create a directory of the same name, and unzip the file contents (Campus.zip to Campus/filename).", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\nfrom scipy.io import loadmat, savemat\nimport os\n\next = {\"water\":'WaterSurface.zip',\n \"fountain\":'Fountain.zip',\n \"campus\":'Campus.zip',\n \"escalator\": 'Escalator.zip',\n \"curtain\": 'Curtain.zip',\n \"lobby\": 'Lobby.zip',\n \"mall\": 'ShoppingMall.zip',\n \"hall\": 'hall.zip',\n \"bootstrap\": 'Bootstrap.zip'}\n\nexample = \"mall\"\n\n\ndef progress_bar_downloader(url, fname, progress_update_every=5):\n #from http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python/22776#22776\n u = urlopen(url)\n f = open(fname, 'wb')\n meta = u.info()\n file_size = int(meta.get(\"Content-Length\"))\n print(\"Downloading: %s Bytes: %s\" % (fname, file_size))\n file_size_dl = 0\n block_sz = 8192\n p = 0\n while True:\n buffer = u.read(block_sz)\n if not buffer:\n break\n file_size_dl += len(buffer)\n f.write(buffer)\n if (file_size_dl * 100. / file_size) > p:\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size)\n print(status)\n p += progress_update_every\n f.close()\n\n\ndef get_video_clip(d):\n #Download files from http://perception.i2r.a-star.edu.sg/bk_model/bk_index.html\n if os.path.exists('./' + d):\n print('Video file %s already downloaded, continuing' % d)\n return\n else:\n print('Video file %s not found, downloading' % d)\n progress_bar_downloader(r'http://perception.i2r.a-star.edu.sg/BK_Model_TestData/' + d, d)\n\n \ndef bname(x): return x.split('.')[0]\n \nget_video_clip(ext[example])\n\nif not os.path.exists('./' + bname(ext[example])):\n os.makedirs(bname(ext[example]))\n os.system('unzip ' + ext[example] + ' -d ' + bname(ext[example]))", "The code below will read in all the .bmp images downloaded and unzipped from the website, as well as converting to grayscale, scaling the result between 0 and 1. Eventually, I plan to do a \"full-color\" version of this testing, but for now the greyscale will have to suffice.", "from scipy import misc\nimport numpy as np\nfrom glob import glob\n\ndef rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray / 255.\n\nfdir = bname(ext[example])\nnames = sorted(glob(fdir + \"/*.bmp\"))\nd1, d2, channels = misc.imread(names[0]).shape\nd1 = 128\nd2 = 160\nnum = len(names)\nX = np.zeros((d1, d2, num))\nfor n, i in enumerate(names):\n X[:, :, n] = misc.imresize(rgb2gray(misc.imread(i).astype(np.double)) / 255., (d1, d2))\n \nX = X.reshape(d1 * d2, num)\nclip = 100\nprint(X.shape)\nprint(d1)\nprint(d2)", "Robust PCA\nRobust Principal Component Analysis (PCA) is an extension of PCA. Rather than attempting to solve $X = L$, where $L$ is typically a low-rank approximation ($N \\times M$, vs. $N \\times P$, $M < P$), Robust PCA solves the factorization problem $X = L + S$, where $L$ is a low-rank approximation, and $S$ is a sparse component. By separating the factorization into two separate matrix components, Robust PCA makes a much better low-rank estimate $L$ on many problems.\nThere are a variety of algorithms to solve this optimization problem. The code below is an implementation of the Inexact Augmented Lagrangian Multiplier algorithm for Robust PCA which is identical to the equivalent MATLAB code (download), or as near as I could make it. The functionality seems equivalent, and for relevant details please see the paper. This algorithm was chosen because according to the timing results at the bottom of this page, it was both the fastest and most accurate of the formulas listed. Though it appears to be fairly slow in our testing, it is fully believable that this is an implementation issue, since this code has not been specifically optimized for numpy. Due to this limitation, we clip the algorithm to the first few frames to save time.", "import numpy as np\nfrom numpy.linalg import norm, svd\n\ndef inexact_augmented_lagrange_multiplier(X, lmbda=.01, tol=1e-3,\n maxiter=100, verbose=True):\n \"\"\"\n Inexact Augmented Lagrange Multiplier\n \"\"\"\n Y = X\n norm_two = norm(Y.ravel(), 2)\n norm_inf = norm(Y.ravel(), np.inf) / lmbda\n dual_norm = np.max([norm_two, norm_inf])\n Y = Y / dual_norm\n A = np.zeros(Y.shape)\n E = np.zeros(Y.shape)\n dnorm = norm(X, 'fro')\n mu = 1.25 / norm_two\n rho = 1.5\n sv = 10.\n n = Y.shape[0]\n itr = 0\n while True:\n Eraw = X - A + (1 / mu) * Y\n Eupdate = np.maximum(Eraw - lmbda / mu, 0) + np.minimum(Eraw + lmbda / mu, 0)\n U, S, V = svd(X - Eupdate + (1 / mu) * Y, full_matrices=False)\n svp = (S > 1 / mu).shape[0]\n if svp < sv:\n sv = np.min([svp + 1, n])\n else:\n sv = np.min([svp + round(.05 * n), n])\n Aupdate = np.dot(np.dot(U[:, :svp], np.diag(S[:svp] - 1 / mu)), V[:svp, :])\n A = Aupdate\n E = Eupdate\n Z = X - A - E\n Y = Y + mu * Z\n mu = np.min([mu * rho, mu * 1e7])\n itr += 1\n if ((norm(Z, 'fro') / dnorm) < tol) or (itr >= maxiter):\n break\n if verbose:\n print(\"Finished at iteration %d\" % (itr)) \n return A, E\n\nsz = clip\nA, E = inexact_augmented_lagrange_multiplier(X[:, :sz])\nA = A.reshape(d1, d2, sz) * 255.\nE = E.reshape(d1, d2, sz) * 255.\n#Refer to them by position desired for video demo later \nsavemat(\"./IALM_background_subtraction.mat\", {\"1\": A, \"2\": E})\nprint(\"RPCA complete\")", "GoDec\nThe code below contains an implementation of the GoDec algorithm, which attempts to solve the problem $X = L + S + G$, with $L$ low-rank, $S$ sparse, and $G$ as a component of Gaussian noise. By allowing the decomposition to expand to 3 matrix components, the algorithm is able to more effectively differentiate the sparse component from the low-rank.", "import numpy as np\nfrom numpy.linalg import norm\nfrom scipy.linalg import qr\n\ndef wthresh(a, thresh):\n #Soft wavelet threshold\n res = np.abs(a) - thresh\n return np.sign(a) * ((res > 0) * res)\n\n#Default threshold of .03 is assumed to be for input in the range 0-1...\n#original matlab had 8 out of 255, which is about .03 scaled to 0-1 range\ndef go_dec(X, thresh=.03, rank=2, power=0, tol=1e-3,\n max_iter=100, random_seed=0, verbose=True):\n m, n = X.shape\n if m < n:\n X = X.T\n m, n = X.shape\n L = X\n S = np.zeros(L.shape)\n itr = 0\n random_state = np.random.RandomState(random_seed) \n while True:\n Y2 = random_state.randn(n, rank)\n for i in range(power + 1):\n Y1 = np.dot(L, Y2)\n Y2 = np.dot(L.T, Y1);\n Q, R = qr(Y2, mode='economic')\n L_new = np.dot(np.dot(L, Q), Q.T)\n T = L - L_new + S\n L = L_new\n S = wthresh(T, thresh)\n T -= S\n err = norm(T.ravel(), 2)\n if (err < tol) or (itr >= max_iter):\n break \n L += T\n itr += 1\n #Is this even useful in soft GoDec? May be a display issue...\n G = X - L - S\n if m < n:\n L = L.T\n S = S.T\n G = G.T\n if verbose:\n print(\"Finished at iteration %d\" % (itr))\n return L, S, G\n\nsz = clip\nL, S, G = go_dec(X[:, :sz])\nL = L.reshape(d1, d2, sz) * 255.\nS = S.reshape(d1, d2, sz) * 255.\nG = G.reshape(d1, d2, sz) * 255.\nsavemat(\"./GoDec_background_subtraction.mat\", {\"1\": L, \"2\": S, \"3\": G, })\nprint(\"GoDec complete\")", "A Momentary Lapse of Reason\nNow it is time to do something a little unreasonable - we can actually take all of this data, reshape it into a series of images, and plot it as a video inside the IPython notebook! The first step is to generate the frames for the video as .png files, as shown below.", "import os\nimport sys\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nimport numpy as np\nfrom matplotlib import cm\nimport matplotlib\n\n#demo inspired by / stolen from @kuantkid on Github - nice work!\ndef mlabdefaults():\n matplotlib.rcParams['lines.linewidth'] = 1.5\n matplotlib.rcParams['savefig.dpi'] = 300 \n matplotlib.rcParams['font.size'] = 22\n matplotlib.rcParams['font.family'] = \"Times New Roman\"\n matplotlib.rcParams['legend.fontsize'] = \"small\"\n matplotlib.rcParams['legend.fancybox'] = True\n matplotlib.rcParams['lines.markersize'] = 10\n matplotlib.rcParams['figure.figsize'] = 8, 5.6\n matplotlib.rcParams['legend.labelspacing'] = 0.1\n matplotlib.rcParams['legend.borderpad'] = 0.1\n matplotlib.rcParams['legend.borderaxespad'] = 0.2\n matplotlib.rcParams['font.monospace'] = \"Courier New\"\n matplotlib.rcParams['savefig.dpi'] = 200\n \ndef make_video(alg, cache_path='/tmp/matrix_dec_tmp'):\n name = alg\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n #If you generate a big \n if not os.path.exists('%s/%s_tmp'%(cache_path, name)):\n os.mkdir(\"%s/%s_tmp\"%(cache_path, name))\n mat = loadmat('./%s_background_subtraction.mat'%(name))\n org = X.reshape(d1, d2, X.shape[1]) * 255.\n fig = plt.figure()\n ax = fig.add_subplot(111)\n usable = [x for x in sorted(mat.keys()) if \"_\" not in x][0]\n sz = min(org.shape[2], mat[usable].shape[2])\n for i in range(sz):\n ax.cla()\n ax.axis(\"off\")\n ax.imshow(np.hstack([mat[x][:, :, i] for x in sorted(mat.keys()) if \"_\" not in x] + \\\n [org[:, :, i]]), cm.gray)\n fname_ = '%s/%s_tmp/_tmp%03d.png'%(cache_path, name, i)\n if (i % 25) == 0:\n print('Completed frame', i, 'of', sz, 'for method', name)\n fig.tight_layout()\n fig.savefig(fname_, bbox_inches=\"tight\")\n #Write out an mp4 and webm video from the png files. -r 5 means 5 frames a second\n #libx264 is h.264 encoding, -s 160x130 is the image size\n #You may need to sudo apt-get install libavcodec\n plt.close()\n\n num_arrays = na = len([x for x in mat.keys() if \"_\" not in x])\n cdims = (na * d1, d2)\n cmd_h264 = \"ffmpeg -y -r 10 -i '%s/%s_tmp/_tmp%%03d.png' -c:v libx264 \" % (cache_path, name) + \\\n \"-s %dx%d -preset ultrafast -pix_fmt yuv420p %s_animation.mp4\" % (cdims[0], cdims[1], name)\n cmd_vp8 = \"ffmpeg -y -r 10 -i '%s/%s_tmp/_tmp%%03d.png' -c:v libvpx \" % (cache_path, name) + \\\n \"-s %dx%d -preset ultrafast -pix_fmt yuv420p %s_animation.webm\" % (cdims[0], cdims[1], name)\n os.system(cmd_h264)\n os.system(cmd_vp8)\n \nif __name__ == \"__main__\":\n mlabdefaults()\n all_methods = ['IALM', 'GoDec']\n for name in all_methods:\n make_video(name);\n\nprint(\"Background is generated from this file:\", example)", "Echoes\nThe code below will display HTML5 video for each of the videos generated in the previos step, and embed it in the IPython notebook. There are \"echoes\" of people, which are much more pronounced in the Robust PCA video than the GoDec version, likely due to the increased flexibility of an independent Gaussian term. Overall, the effect is pretty cool though not mathematically as good as the GoDec result.", "from IPython.display import HTML\nfrom base64 import b64encode\n\ndef html5_video(alg, frames):\n #This *should* support all browsers...\n framesz = 250\n info = {\"mp4\": {\"ext\":\"mp4\", \"encoded\": '', \"size\":(frames * framesz, framesz)}}\n html_output = []\n for k in info.keys():\n f = open(\"%s_animation.%s\" % (alg, info[k][\"ext\"]), \"rb\").read()\n encoded = b64encode(f).decode('ascii')\n video_tag = '<video width=\"500\" height=\"250\" autoplay=\"autoplay\" ' + \\\n 'loop src=\"data:video/%s;base64,%s\">' % (k, encoded)\n html_output.append(video_tag)\n return HTML(data=''.join(html_output))", "If these videos freeze for some reason, just hit refresh and they should start playing.", "html5_video(\"IALM\", 3)\n\nhtml5_video(\"GoDec\", 4)", "The Final Cut\nThough the results are definitely not identical to the video results, they are similar enough to the benchmark that I am satisfied for now. Future work in this area will involve more decomposition algorithms, dictionary learning, and matrix completion. Eventually, I would like to get this into scikit-learn format, and post as a gist or contribute to the codebase.\nkk" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
open2c/bioframe
docs/tutorials/tutorial_assign_motifs_to_peaks.ipynb
mit
[ "How to: assign TF Motifs to ChIP-seq peaks\nThis tutorial demonstrates one way to assign CTCF motifs to CTCF ChIP-seq peaks using bioframe.", "import bioframe\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom scipy.stats import pearsonr, spearmanr\n\nbase_dir = '/tmp/bioframe_tutorial_data/'\nassembly = 'GRCh38'", "Load CTCF ChIP-seq peaks for HFF from ENCODE\nThis approach makes use of the narrowPeak schema for bioframe.read_table .", "ctcf_peaks = bioframe.read_table(\"https://www.encodeproject.org/files/ENCFF401MQL/@@download/ENCFF401MQL.bed.gz\", schema='narrowPeak')\nctcf_peaks[0:5]", "Get CTCF motifs from JASPAR", "### CTCF motif: http://jaspar.genereg.net/matrix/MA0139.1/\njaspar_url = 'http://expdata.cmmt.ubc.ca/JASPAR/downloads/UCSC_tracks/2022/hg38/'\njaspar_motif_file = 'MA0139.1.tsv.gz'\nctcf_motifs = bioframe.read_table(jaspar_url+jaspar_motif_file,schema='jaspar',skiprows=1) \nctcf_motifs[0:4]", "Overlap peaks & motifs", "df_peaks_motifs = bioframe.overlap(ctcf_peaks,ctcf_motifs, suffixes=('_1','_2'), return_index=True)", "There are often multiple motifs overlapping one ChIP-seq peak, and a substantial number of peaks without motifs:", "# note that counting motifs per peak can also be handled directly with bioframe.count_overlaps\n# but since we re-use df_peaks_motifs below we instead use the pandas operations directly\nmotifs_per_peak = df_peaks_motifs.groupby([\"index_1\"])[\"index_2\"].count().values\n\nplt.hist(motifs_per_peak,np.arange(0,np.max(motifs_per_peak)))\nplt.xlabel('number of overlapping motifs per peak')\nplt.ylabel('number of peaks')\nplt.semilogy();\n\nprint(f'fraction of peaks without motifs {np.round(np.sum(motifs_per_peak==0)/len(motifs_per_peak),2)}')", "assign the strongest motif to each peak", "# since idxmax does not currently take NA, fill with -1\ndf_peaks_motifs['pval_2'] = df_peaks_motifs['pval_2'].fillna(-1) \nidxmax_peaks_motifs = df_peaks_motifs.groupby([\"chrom_1\", \"start_1\",\"end_1\"])[\"pval_2\"].idxmax().values\ndf_peaks_maxmotif = df_peaks_motifs.loc[idxmax_peaks_motifs]\ndf_peaks_maxmotif['pval_2'].replace(-1,np.nan,inplace=True)", "stronger peaks tend to have stronger motifs:", "plt.rcParams['font.size']=12\ndf_peaks_maxmotif['fc_1'] = df_peaks_maxmotif['fc_1'].values.astype('float')\nplt.scatter(df_peaks_maxmotif['fc_1'].values, \n df_peaks_maxmotif['pval_2'].values, 5, alpha=0.5,lw=0)\nplt.xlabel('ENCODE CTCF peak strength, fc')\nplt.ylabel('JASPAR CTCF motif strength \\n (-log10 pval *100)')\nplt.title('corr: '+str(np.round(df_peaks_maxmotif['fc_1'].corr(df_peaks_maxmotif['pval_2']),2)));", "We can also ask the reverse question: how many motifs overlap a ChIP-seq peak?", "df_motifs_peaks = bioframe.overlap(ctcf_motifs,ctcf_peaks,how='left', suffixes=('_1','_2'))\n\nm = df_motifs_peaks.sort_values('pval_1')\nplt.plot( m['pval_1'].values[::-1] ,\n np.cumsum(pd.isnull(m['chrom_2'].values[::-1])==0)/np.arange(1,len(m)+1))\nplt.xlabel('pval')\nplt.ylabel('probability motif overlaps a peak');\n", "filter peaks overlapping blacklisted regions\ndo any of our peaks overlap blacklisted genomic regions?", "blacklist = bioframe.read_table('https://www.encodeproject.org/files/ENCFF356LFX/@@download/ENCFF356LFX.bed.gz',\n schema='bed3')\nblacklist[0:3]", "there appears to be a small spike in the number of peaks close to blacklist regions", "closest_to_blacklist = bioframe.closest(ctcf_peaks,blacklist)\nplt.hist(closest_to_blacklist['distance'].astype('Float64').astype('float'),np.arange(0,1e4,100));", "to be safe, let's remove anything +/- 1kb from a blacklisted region", "# first let's select the columns we want for our final dataframe of peaks with motifs\ndf_peaks_maxmotif = df_peaks_maxmotif[\n ['chrom_1','start_1','end_1','fc_1',\n 'chrom_2','start_2','end_2','pval_2','strand_2']]\n# then rename columns for convenience when subtracting\nfor i in df_peaks_maxmotif.keys():\n if '_1' in i: df_peaks_maxmotif.rename(columns={i:i.split('_')[0]},inplace=True)\n\n# now subtract, expanding the blacklist by 1kb \ndf_peaks_maxmotif_clean = bioframe.subtract(df_peaks_maxmotif,bioframe.expand(blacklist,1000))", "there it is! we now have a dataframe containing positions of CTCF ChIP peaks, \nincluding the strongest motif underlying that peak, and after conservative\nfiltering for proximity to blacklisted regions", "df_peaks_maxmotif_clean.iloc[7:15]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
uliang/First-steps-with-the-Python-language
Day 1 - Unit 1.1.ipynb
mit
[ "from __future__ import print_function, division", "1. Your first steps with Python\n\n1.1 Introduction\nPython is a general purpose programming language. It is used extensively for scientific computing, data analytics and visualization, web development and software development. It has a wide user base and excellent library support. \nThere are many ways to use and interact with the Python language. The first way is to access it directly from the command prompt and calling python &lt;script&gt;.py. This runs a script written in Python and does whatever you have programmed the computer to do. But scripts have to be written and how do we actually write Python scripts? \nActually Python scripts are just .txt files. So you could just open a .txt file and write a script, saving the file with a .py extension. The downsides of this approach is obvious to anyone working with Windows. Usually, Python source code is written-not with Microsoft Word- but with and Integrated Development Environment. An IDE combines a text editor with a running Python console to test code and actually do work with Python without switching from one program to another. If you learnt the C, or C++ language, you will be familiar with Vim. Other popular IDE's for Python are Pycharm, Spyder and the Jupyter Notebook. \nIn this course, we will use the Jupyter Notebook as our IDE because of its ease of use ability to execute code cell by cell. It integrates with markdown so that one can annotate and document your code on the fly! All in all, it is an excellent tool for teaching and learning Python before one migrates to more advanced tools like Spyder for serious scripting and development work. \n1.2 Your best friends.\nIn order to get the most from Python, your best source of reference is the \nPython documentation. Getting good at Python is a matter using it regularly and familiarizing yourself with the keywords, constructs and commonly used idioms. \nLearn to use the Shift-Tab when coding. This activates a hovering tooltip that provides documentation for keywords, functions and even variables that you have declared in your environment. This convenient tooltip and be expanded into a pop-up window on your browser for easy reference. Use this often to reference function signatures, documentation and general help. \nJupyter notebook comes with Tab completion. This quality of life assists you in typing code by listing possible autocompletion options so that you don't have to type everything out! Use Tab completion as often as you can. This makes coding faster and less tedious. Tab completion also allows you to check out various methods on classes which comes in handy when learning a library for the first time (like matplotlib or seaborn). \nFinally ask Google. Once you have acquired enough \"vocabulary\", you can begin to query Google with your problem. And more often that not, somehow has experienced the same conundrum and left a message on Stackexchange. Browsing the solutions listed there is a powerful way to learn programming skills. \n1.3 The learning objectives for this unit\nThe learning objectives of this first unit are: \n\nGetting around the Jupyter notebook. \nLearning how to print(\"Hello world!\") \nUsing and coding with basic Python objects: int, str, float and bool. \nUsing the type function. \nWhat are variables and valid variable names.\nUsing the list object and list methods. \nLearning how to access items in list. Slicing and indexing. \n\n2. Getting around the Jupyter notebook\n\n2.1 Cells and colors, just remember, green is for go\nAll code is written in cells. Cells are where code blocks go. You execute a cell by pressing Shift-Enter or pressing the \"play\" button. Or you could just click on the drop down menu and select \"Run cell\" but who would want to do that! \nIn general, cells have two uses: One for writing \"live\" Python code which can be executed and one more to write documentation using markdown. To toggle between the two cell types, press Escape to exit from \"edit\" mode. The edges of the cell should turn blue. Now you are in \"command\" mode. Escape actually activates \"command\" mode. Enter activates \"edit\" mode. With the cell border coloured blue, press M to enter into markdown mode. You should see the In [ ]: prompt dissappear. Press Enter to change the border to green. This means you can now \"edit\" markdown. How does one change from markdown to a live coding cell? In \"command\" mode (remember blue border) press Y. Now the cell is \"hot\". When you Shift-Enter, you will execute code. If you happen to write markdown when in a \"coding\" cell, the Python kernel will shout at you. (Means raise an error message)\n2.1.1 Practise makes perfect\nNow its time for you to try. In the cell below, try switching to Markdown. Press Enter to activate \"edit\" mode and type some text in the cell. Press Shift-Enter and you should see the output rendered in html. Note that this is not coding yet", "# change this cell into a Markdown cell. Then type something here and execute it (Shift-Enter)", "2.2 Your first script\nIt is a time honoured tradition that your very first program should be to print \"Hello world!\" How is this achieved in Python?", "'''Make sure you are in \"edit\" mode and that this cell is for Coding ( You should see the In [ ]:)\non the left of the cell. '''\n\nprint(\"Hello world!\")", "Notice that Hello world! is printed at the bottom of the cell as an output. In general, this is how output of a python code is displayed to you. \nprint is a special function in Python. It's purpose is to display output to the console. Notice that we pass an argument-in this case a string \"Hello world!\"- to the function. All arguments passed to the function must be enclosed in round brackets and this signals to the Python interpreter to execute a function named print with the argument \"Hello world!\". \n2.2.1 Self introductions\nYour next exercise is to print your own name to the console. Remember to enclose your name in \" \" or ' '", "# print your name in this cell. ", "2.3 Commenting\nCommenting is a way to annotate and document code. There are two ways to do this: Inline using the # character or by using ''' &lt;documentation block&gt; ''', the latter being multi-line and hence used mainly for documenting functions or classes. Comments enclosed using ''' '''' style commenting are actually registed in Jupyter notebook and can be accessed from the Shift-Tab tooltip! \nOne should use # style commenting very sparingly. By right, code should be clear enough that # inline comments are not needed. \nHowever, # has a very important function. It is used for debugging and trouble-shooting. This is because commented code sections are never executed when you execute a cell (Shift-Enter) \n3. Python's building blocks\n\nPython is an Object Oriented Programming language. That means to all of python is made out of objects which are instances of classes. The main point here is that I am going to introduce 4 basic objects of Python which form the backbone of any program or script. \n\nIntegers or int. \nStrings or str. You've met one of these: \"Hello world!\". For those who know about character encoding, it is highly encouraged to code Python with UTF-8 encoding. \nFloat or float. Basically the computer version of real numbers. \nBooleans or bool. In Python, true and false are indicated by the reserved keywords True and False. Take note of the capitalized first letter.\n\n3.1 Numbers\nYou can't call yourself a scientific computing language without the ability to deal with numbers. The basic arithmetic operations for numbers are exactly as you expect it to be", "# Addition \n5+3\n\n# Subtraction\n8-9\n\n# Multiplication \n3*12\n\n# Division\n48/12", "Note the floating point answer. In previous versions of Python, / meant floor division. This is no longer the case in Python 3", "# Exponentiation. Limited precision though! \n\n16**0.5\n\n# Residue class modulo n \n\n5%2", "In the above 5%2 means return me the remainder after 5 is divided by 2 (which is indeed 1). \n3.1.1 Precedence\nA note on arithmetic precedence. As one expects, () have the highest precedence, following by * and /. Addition and subtraction have the lowest precedence.", "# Guess the output before executing this cell. Come on, don't cheat! \n\n6%(1+3)", "It is interesting to note that the % operator is not distributive. \n3.1.2 Variables\nIn general, one does not have to declare variables in python before using it. We merely need to assign numbers to variables. In the computer, this means that a certain place in memory has been allocated to store that particular number. Assignment to variables is executed by the = operator. The equal sign in Python is the binary comparison == operator. \nPython is case sensitive. So a variable name A is different from a. Variables cannot begin with numbers and cannot have empty spaces between them. So my variable is not a valid variable. Usually what is done is to write my_variable\nAfter assigning numbers to variables, the variable can be used to represent the number in any arithmetic operation.", "# Assignment \n\nx=1\ny=2\n\nx+y\n\nx/y", "Notice that after assignment, I can access the variables in a different cell. However, if you reassign a variable to a different number, the old values for that variable are overwritten.", "x=5\nx+y-2", "Now try clicking back to the cell x+y and re-executing it. What do you the answer will be? \nEven though that cell was above our reassignment cell, nevertheless re-executing that cell means executing that block of code that the latest values for that variable. It is for this reason that one must be very careful with the order of execution of code blocks. In order to help us keep track of the order of execution, each cell has a counter next to it. Notice the In [n]. Higher values of n indicates more recent executions. \nVariables can also be reassigned", "# For example \n\nx = x+1\nprint(x)", "So what happened here? Well, if we recall x originally was assigned 5. Therefore x+1 would give us 6. This value is then reassigned to the exact same location in memory represented by the variable x. So now that piece of memory contains the value 6. We then use the print function to display the content of x. \nAs this is a often used pattern, Python has a convenience syntax for this kind assignment", "# reset x to 5 \n\nx=5 \nx += 1\nprint(x)\n\nx = 5 \n\n#What do you think the values of x will be for x -= 1, x *= 2 or x /= 2? \n# Test it out in the space below\n\n\n\nprint(x)", "3.1.3 Floating point precision\nAll of the above applies equally to floating point numbers (or real numbers). However, we must be mindful of floating point precision.", "0.1+0.2", "The following exerpt from the Python documentation explains what is happening quite clearly. \nTo be fair, even our decimal system is inadequate to represent rational numbers like 1/3, 1/11 and so on. \n3.2 Strings\nStrings are basically text. These are enclosed in ' ' or \" \". The reason for having two ways of denoting strings is because we may need to nest a string within a string like in 'The quick brown fox \"jumped\" over the lazy old dog'. This is especially useful when setting up database queries and the like.", "# Noting the difference between printing quoted variables (strings) and printing the variable itself.\nx = 5\n\nprint(x)\nprint('x')", "In the second print function, the text 'x' is printed while in the first print function, it is the contents of x which is printed to the console. \n3.2.1 String formatting\nStrings can be assigned to variables just like numbers. And these can be recalled in a print function.", "my_name = 'Tang U-Liang'\nprint(my_name)\n\n# String formatting: Using the % \nage = 35\nprint('Hello doctor, my name is %s. I am %d years old. I weigh %.1f kg' % (my_name, age, 70.25))\n\n# or using .format method\n\nprint(\"Hi, I'm {name}. Please register {name} for this conference\".format(name=my_name))", "When using % to indicate string substitution, take note of the common formatting \"placeholders\" \n\n%s to substitue strings. \n%d for printing integer substitutions\n%.1f means to print a floating point number up to 1 decimal place. Note that there is no rounding\n\nThe utility of the .format method arises when the same string needs to printed in various places in a larger body of text. This avoids duplicating code. Also did you notice I used double quotation. Why? \nMore about string formats can be found in this excellent blog post \n3.2.2 Weaving strings into one beautiful tapestry of text\nBesides the .format and % operation on text, we can concatenate strings using + operator. However, strings cannot be changed once declared and assigned to variables. This property is called immutability", "fruit = 'Apple'\ndrink = 'juice'\n\nprint(fruit+drink) # concatenation \n\n#Don't like the lack of spacing between words? \n\nprint(fruit+' '+drink)", "Use [] to access specific letters in the string. Python uses 0 indexing. So the first letter is accessed by my_string[0] while my_string[1] accesses the second letter.", "print(fruit[0])\nprint(fruit[1])", "Slicing is a way of get specific subsets of the string. If you let $x_n$ denote the $n+1$-th letter (note zero indexing) in a string (and by letter this includes whitespace characters as well!) then writing my_string[i:j] returns a subset $$x_i, x_{i+1}, \\ldots, x_{j-1}$$ of letters in a string. That means the slice [i:j] takes all subsets of letters starting from index i and stops one index before the index indicated by j. \n0 indexing and stopping point convention frequently trips up first time users. So take special note of this convention. 0 indexing is used throughout Python especially in matplotlib and pandas.", "favourite_drink = fruit+' '+drink\nprint(\"Printing the first to 3rd letter.\") \nprint(favourite_drink[0:3])\nprint(\"\\nNow I want to print the second to seventh letter:\")\nprint(favourite_drink[1:7])", "Notice the use of \\n in the second print function. This is called a newline character which does exactly what its name says. Also in the third print function notice the seperation between e and j. It is actually not seperated. The sixth letter is a whitespace character ' '. \nSlicing also utilizes arithmetic progressions to return even more specific subsets of strings. So [i:j:k] means that the slice will return $$ x_{i}, x_{i+k}, x_{i+2k}, \\ldots, x_{i+mk}$$ where $m$ is the largest (resp. smallest) integer such that $i+mk \\leq j-1$ (resp $1+mk \\geq j+1$ if $i\\geq j$)", "print(favourite_drink[0:7:2])\n\n# Here's a trick, try this out\nprint(favourite_drink[3:0:-1])", "So what happened above? Well [3:0:-1] means that starting from the 4-th letter $x_3$ which is 'l' return a subtring including $x_{2}, x_{1}$ as well. Note that the progression does not include $x_0 =$ 'A' because the stopping point is non-inclusive of j. \nThe slice [:j] or [i:] means take substrings starting from the beginning up to the $j$-th letter (i.e. the $x_{j-1}$ letter) and substring starting from the $i+1$-th (i.e. the $x_{i}$) letter to the end of the string.\n3.2.3 A mini challenge\nPrint the string favourite_drink in reverse order. How would you do it?", "# Write your answer here and check it with the output below \n\n\n", "Answer: eciuj elppA\n3.3 The type function\nAll objects in python are instances of classes. It is useful sometimes to find out what type of object we are looking at, especially if it has been assigned to a variable. For this we use the type function.", "x = 5.0\ntype(x)\n\ntype(favourite_drink)\n\ntype(True)\n\ntype(500)", "4. list, here's where the magic begins\n\nlist are the fundamental data structure in Python. These are analogous to arrays in C or Java. If you use R, lists are analogous to vectors (and not R list)\nDeclaring a list is as simple as using square brackets [ ] to enclose a list of objects (or variables) seperated by commas.", "# Here's a list called staff containing his name, his age and current renumeration \n\nstaff = ['Andy', 28, 980.15]", "4.1 Properties of list objects and indexing\nOne of the fundamental properties we can ask about lists is how many objects they contain. We use the len (short for length) function to do that.", "len(staff)", "Perhaps you want to recover that staff's name. It's in the first position of the list.", "staff[0] ", "Notice that Python still outputs to console even though we did not use the print function. Actually the print function prints a particularly \"nice\" string representation of the object, which is why Andy is printed without the quotation marks if print was used. \nCan you find me Andy's age now?", "# type your answer here and run the cell \n\n", "The same slicing rules for strings apply to lists as well. If we wanted Andy's age and wage, we would type staff[1:3]", "staff[1:3]", "This returns us a sub-list containing Andy's age and renumeration. \n4.2 Nested lists\nLists can also contain other lists. This ability to have a nested structure in lists gives it flexibility.", "nested_list = ['apples', 'banana', [1.50, 0.40]]", "Notice that if I type nested_list[2], Python will return me the list [1.50, .40]. This can be accessed again using indexing (or slicing notation) [ ].", "# Accesing items from within a nested list structure. \n\nprint(nested_list[2])\n\n# Assigning nested_list[2] to a variable. The variable price represents a list\n\nprice = nested_list[2]\nprint(type(price))\n\n# Getting the smaller of the two floats \nprint(nested_list[2][1]) \n", "4.3 List methods\nRight now, let us look at four very useful list methods. Methods are basically operations which modify lists. These are: \n\npop which allows us to remove an item in a list. \n\nSo for example if $x_0, x_1, \\ldots, x_n$ are items in a list, calling my_list.pop(r) will modify the list so that it contains only $$x_0, \\ldots, x_{r-1}, x_{r+1},\\ldots, x_n$$ while returning the element $x_r$. \n\nappend which adds items to the end of the list. \n\nLet's say $x_{n+1}$ is the new object you wish to append to the end of the list. Calling the method my_list.append(x_n+1) will modify the list inplace so that the list will now contain $$x_0, \\ldots, x_n, x_{n+1}$$ Note that append does not return any output! \n\ninsert which as the name suggests, allows us to add items to a list in a particular index location\n\nWhen using this, type my_list.insert(r, x_{n+1}) with the second argument to the method the object you wish to insert and r the position (still 0 indexed) where this object ought to go in that list. This method modifies the list inplace and does not return any output. After calling the insert method, the list now contains $$x_0,\\ldots, x_{r-1}, x_{n+1}, x_{r}, \\ldots, x_n$$ This means that my_list[r] = $x_{n+1}$ while my_list[r+1] = $x_{r}$\n\n+ is used to concatenate two lists. If you have two lists and want to join them together producing a union of two (or more lists), use this binary operator. \n\nThis works by returning a union of two lists. So $$[ x_1,\\ldots, x_n] + [y_1,\\ldots, y_m]$$ is the list containing $$ x_1,\\ldots, x_n,y_1, \\ldots, y_m$$ This change is not permanent unless you assign the result of the operation to another variable.", "# append \n\nstaff.append('Finance')\nprint(staff)\n\n# pop away the information about his salary\n\nandys_salary = staff.pop(2)\nprint(andys_salary)\nprint(staff)\n\n# oops, made a mistake, I want to reinsert information about his salary\n\nstaff.insert(3, andys_salary)\nprint(staff)\n\ncontacts = [99993535, \"andy@company.com\"]\n\nstaff = staff+contacts # reassignment of the concatenated list back to staff\nprint(staff)", "4.3.1 Your first programming challenge\nMove information for Andy's email to the second position (i.e. index 1) in the list staff in one line of code", "staff = ['Andy', 28, 'Finance', 980.15, 99993535, 'andy@company.com']\n\nstaff\n\n# type your answer here\n\n\nprint(staff)", "Answer: ['Andy', 'andy@company.com', 28, 'Finance', 980.15, 99993535]\nConcluding remarks\n\nObviously there are much, much more that can be said about lists. But we have to move on. In the next unit, we will learn how to control program flow with for and if and a new data structure called dictionaries." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
saashimi/code_guild
interactive-coding-challenges/stacks_queues/sort_stack/sort_stack_challenge.ipynb
mit
[ "<small><i>This notebook was prepared by Donne Martin. Source and license info is on GitHub.</i></small>\nChallenge Notebook\nProblem: Sort a stack. You can use another stack as a buffer.\n\nConstraints\nTest Cases\nAlgorithm\nCode\nUnit Test\nSolution Notebook\n\nConstraints\n\nWhen sorted, should the largest element be at the top or bottom?\nTop\n\n\nCan you have duplicate values like 5, 5?\nYes\n\n\nCan we assume we already have a stack class that can be used for this problem?\nYes\n\n\n\nTest Cases\n\nEmpty stack -> None\nOne element stack\nTwo or more element stack (general case)\nAlready sorted stack\n\nAlgorithm\nRefer to the Solution Notebook. If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.\nCode", "%run ../stack/stack.py\n%load ../stack/stack.py\n\nclass MyStack(Stack):\n\n def sort(self):\n # TODO: Implement me\n pass", "Unit Test\nThe following unit test is expected to fail until you solve the challenge.", "# %load test_sort_stack.py\nfrom random import randint\nfrom nose.tools import assert_equal\n\n\nclass TestSortStack(object):\n\n def get_sorted_stack(self, numbers):\n stack = MyStack()\n for x in numbers:\n stack.push(x)\n sorted_stack = stack.sort()\n return sorted_stack\n\n def test_sort_stack(self):\n print('Test: Empty stack')\n sorted_stack = self.get_sorted_stack([])\n assert_equal(sorted_stack.pop(), None)\n\n print('Test: One element stack')\n sorted_stack = self.get_sorted_stack([1])\n assert_equal(sorted_stack.pop(), 1)\n\n print('Test: Two or more element stack (general case)')\n num_items = 10\n numbers = [randint(0, 10) for x in range(num_items)]\n sorted_stack = self.get_sorted_stack(numbers)\n sorted_numbers = []\n for _ in range(num_items):\n sorted_numbers.append(sorted_stack.pop())\n assert_equal(sorted_numbers, sorted(numbers, reverse=True))\n\n print('Success: test_sort_stack')\n\n\ndef main():\n test = TestSortStack()\n test.test_sort_stack()\n\n\nif __name__ == '__main__':\n main()", "Solution Notebook\nReview the Solution Notebook for a discussion on algorithms and code solutions." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
rhiever/scipy_2015_sklearn_tutorial
notebooks/03.2 Methods - Unsupervised Preprocessing.ipynb
cc0-1.0
[ "Example from Image Processing", "%matplotlib inline\nimport matplotlib.pyplot as plt", "Using PCA to extract features\nNow we'll take a look at unsupervised learning on a facial recognition example.\nThis uses a dataset available within scikit-learn consisting of a\nsubset of the Labeled Faces in the Wild\ndata. Note that this is a relatively large download (~200MB) so it may\ntake a while to execute.", "from sklearn import datasets\nlfw_people = datasets.fetch_lfw_people(min_faces_per_person=70, resize=0.4,\n data_home='datasets')\nlfw_people.data.shape", "Let's visualize these faces to see what we're working with:", "fig = plt.figure(figsize=(8, 6))\n# plot several images\nfor i in range(15):\n ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[])\n ax.imshow(lfw_people.images[i], cmap=plt.cm.bone)", "We'll do a typical train-test split on the images before performing unsupervised learning:", "from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(lfw_people.data, lfw_people.target, random_state=0)\n\nprint(X_train.shape, X_test.shape)", "Feature Reduction Using Principal Component Analysis\nWe can use PCA to reduce the original 1850 features of the face images to a manageable\nsize, while maintaining most of the information in the dataset. Here it is useful to use a variant\nof PCA called RandomizedPCA, which is an approximation of PCA that can be much faster for large\ndatasets.", "from sklearn import decomposition\npca = decomposition.RandomizedPCA(n_components=150, whiten=True)\npca.fit(X_train)", "One interesting part of PCA is that it computes the \"mean\" face, which can be\ninteresting to examine:", "plt.imshow(pca.mean_.reshape((50, 37)), cmap=plt.cm.bone)", "The principal components measure deviations about this mean along orthogonal axes.\nIt is also interesting to visualize these principal components:", "print(pca.components_.shape)\n\nfig = plt.figure(figsize=(16, 6))\nfor i in range(30):\n ax = fig.add_subplot(3, 10, i + 1, xticks=[], yticks=[])\n ax.imshow(pca.components_[i].reshape((50, 37)), cmap=plt.cm.bone)", "The components (\"eigenfaces\") are ordered by their importance from top-left to bottom-right.\nWe see that the first few components seem to primarily take care of lighting\nconditions; the remaining components pull out certain identifying features:\nthe nose, eyes, eyebrows, etc.\nWith this projection computed, we can now project our original training\nand test data onto the PCA basis:", "X_train_pca = pca.transform(X_train)\nX_test_pca = pca.transform(X_test)\n\nprint(X_train_pca.shape)\nprint(X_test_pca.shape)", "These projected components correspond to factors in a linear combination of\ncomponent images such that the combination approaches the original face." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.18/_downloads/a35e576fa66929a73782579dc334f91a/plot_time_frequency_mixed_norm_inverse.ipynb
bsd-3-clause
[ "%matplotlib inline", "Compute MxNE with time-frequency sparse prior\nThe TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)\nthat promotes focal (sparse) sources (such as dipole fitting techniques)\n[1] [2]. The benefit of this approach is that:\n\nit is spatio-temporal without assuming stationarity (sources properties\n can vary over time)\nactivations are localized in space, time and frequency in one step.\nwith a built-in filtering process based on a short time Fourier\n transform (STFT), data does not need to be low passed (just high pass\n to make the signals zero mean).\nthe solver solves a convex optimization problem, hence cannot be\n trapped in local minima.\n\nReferences\n.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski\n \"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with\n non-stationary source activations\",\n Neuroimage, Volume 70, pp. 410-422, 15 April 2013.\n DOI: 10.1016/j.neuroimage.2012.12.051\n.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski\n \"Functional Brain Imaging with M/EEG Using Structured Sparsity in\n Time-Frequency Dictionaries\",\n Proceedings Information Processing in Medical Imaging\n Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.\n DOI: 10.1007/978-3-642-22092-0_49", "# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.minimum_norm import make_inverse_operator, apply_inverse\nfrom mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles\nfrom mne.viz import (plot_sparse_source_estimates,\n plot_dipole_locations, plot_dipole_amplitudes)\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nsubjects_dir = data_path + '/subjects'\nfwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'\nave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'\ncov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'\n\n# Read noise covariance matrix\ncov = mne.read_cov(cov_fname)\n\n# Handling average file\ncondition = 'Left visual'\nevoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))\nevoked = mne.pick_channels_evoked(evoked)\n# We make the window slightly larger than what you'll eventually be interested\n# in ([-0.05, 0.3]) to avoid edge effects.\nevoked.crop(tmin=-0.1, tmax=0.4)\n\n# Handling forward solution\nforward = mne.read_forward_solution(fwd_fname)", "Run solver", "# alpha parameter is between 0 and 100 (100 gives 0 active source)\nalpha = 40. # general regularization parameter\n# l1_ratio parameter between 0 and 1 promotes temporal smoothness\n# (0 means no temporal regularization)\nl1_ratio = 0.03 # temporal regularization parameter\n\nloose, depth = 0.2, 0.9 # loose orientation & depth weighting\n\n# Compute dSPM solution to be used as weights in MxNE\ninverse_operator = make_inverse_operator(evoked.info, forward, cov,\n loose=loose, depth=depth)\nstc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,\n method='dSPM')\n\n# Compute TF-MxNE inverse solution with dipole output\ndipoles, residual = tf_mixed_norm(\n evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio, loose=loose,\n depth=depth, maxit=200, tol=1e-6, weights=stc_dspm, weights_min=8.,\n debias=True, wsize=16, tstep=4, window=0.05, return_as_dipoles=True,\n return_residual=True)\n\n# Crop to remove edges\nfor dip in dipoles:\n dip.crop(tmin=-0.05, tmax=0.3)\nevoked.crop(tmin=-0.05, tmax=0.3)\nresidual.crop(tmin=-0.05, tmax=0.3)", "Plot dipole activations", "plot_dipole_amplitudes(dipoles)\n\n# Plot dipole location of the strongest dipole with MRI slices\nidx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles])\nplot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample',\n subjects_dir=subjects_dir, mode='orthoview',\n idx='amplitude')\n\n# # Plot dipole locations of all dipoles with MRI slices\n# for dip in dipoles:\n# plot_dipole_locations(dip, forward['mri_head_t'], 'sample',\n# subjects_dir=subjects_dir, mode='orthoview',\n# idx='amplitude')", "Show the evoked response and the residual for gradiometers", "ylim = dict(grad=[-120, 120])\nevoked.pick_types(meg='grad', exclude='bads')\nevoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,\n proj=True, time_unit='s')\n\nresidual.pick_types(meg='grad', exclude='bads')\nresidual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,\n proj=True, time_unit='s')", "Generate stc from dipoles", "stc = make_stc_from_dipoles(dipoles, forward['src'])", "View in 2D and 3D (\"glass\" brain like 3D plot)", "plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),\n opacity=0.1, fig_name=\"TF-MxNE (cond %s)\"\n % condition, modes=['sphere'], scale_factors=[1.])\n\ntime_label = 'TF-MxNE time=%0.2f ms'\nclim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])\nbrain = stc.plot('sample', 'inflated', 'rh', views='medial',\n clim=clim, time_label=time_label, smoothing_steps=5,\n subjects_dir=subjects_dir, initial_time=150, time_unit='ms')\nbrain.add_label(\"V1\", color=\"yellow\", scalar_thresh=.5, borders=True)\nbrain.add_label(\"V2\", color=\"red\", scalar_thresh=.5, borders=True)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
phanrahan/magmathon
notebooks/tutorial/icestick/TFF.ipynb
mit
[ "Toggle Flip-Flop\nIn this example we create a toggle flip-flop (TFF) from a d-flip-flop (DFF) and an xor gate. In Magma, finite state machines can be constructed by composing combinational logic with register primitives, such as a DFF or Register.", "import magma as m\nm.set_mantle_target(\"ice40\")", "As before, we can use a native Python function to organize the definition of our TFF into a reusable component.", "from mantle import DFF\n\nclass TFF(m.Circuit):\n IO = ['O', m.Out(m.Bit)] + m.ClockInterface()\n @classmethod\n def definition(io):\n # instance a dff to hold the state of the toggle flip-flop - this needs to be done first\n dff = DFF()\n # compute the next state as the not of the old state ff.O\n io.O <= dff(~dff.O)\n \ndef tff():\n return TFF()()", "Then we simply call this function inside our definition of the IceStick main.", "from loam.boards.icestick import IceStick\n\nicestick = IceStick()\nicestick.Clock.on()\nicestick.J3[0].rename('J3').output().on()\n\nmain = icestick.DefineMain()\nmain.J3 <= tff()\nm.EndDefine()", "We'll compile and build our program using the standard flow.", "m.compile(\"build/tff\", main)\n\n%%bash\ncd build\nyosys -q -p 'synth_ice40 -top main -blif tff.blif' tff.v\narachne-pnr -q -d 1k -o tff.txt -p tff.pcf tff.blif \nicepack tff.txt tff.bin\n#iceprog tff.bin", "Let's inspect the generated verilog.", "%cat build/tff.v", "We can verify our implementation is function correctly by using a logic analyzer.", "%cat build/tff.pcf", "" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
chaitra8/ml_lab_ecsc_306
labwork/lab2/sci-learn/non_linear_regression.ipynb
apache-2.0
[ "%matplotlib inline", "===================================================================\nSupport Vector Regression (SVR) using linear and non-linear kernels\n===================================================================\nToy example of 1D regression using linear, polynomial and RBF kernels.", "print(__doc__)\n\nimport numpy as np\nfrom sklearn.svm import SVR\nimport matplotlib.pyplot as plt", "Generate sample data", "X = np.sort(5 * np.random.rand(40, 1), axis=0)\ny = np.sin(X).ravel()", "Add noise to targets", "y[::5] += 3 * (0.5 - np.random.rand(8))", "Fit regression model", "svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)\nsvr_lin = SVR(kernel='linear', C=1e3)\nsvr_poly = SVR(kernel='poly', C=1e3, degree=2)\ny_rbf = svr_rbf.fit(X, y).predict(X)\ny_lin = svr_lin.fit(X, y).predict(X)\ny_poly = svr_poly.fit(X, y).predict(X)", "look at the results", "lw = 2\nplt.scatter(X, y, color='darkorange', label='data')\nplt.hold('on')\nplt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')\nplt.plot(X, y_lin, color='c', lw=lw, label='Linear model')\nplt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')\nplt.xlabel('data')\nplt.ylabel('target')\nplt.title('Support Vector Regression')\nplt.legend()\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
RaspberryJamBe/ipython-notebooks
notebooks/nl-be/101 - Intro - Python leren kennen en IPython gebruiken.ipynb
cc0-1.0
[ "Hm, zullen we maar beginnen?\nDeze applicatie heet IPython en je kan er Python code mee uitvoeren (Python is een soort programmeertaal; een manier om aan een computer uit te leggen wat die computer voor jou moet doen).\nSelecteer de cel met de som hieronder door erin te klikken (er komt dan een groene kader omheen staan) en druk op Shift+Enter of op de Play (<i class=\"fa-play fa\"></i>) knop hierboven om de code in de cel uit te voeren:", "5+11", "De code is georganiseerd in cellen en als je wil kan je kan de code in de cellen aanpassen en opnieuw uitvoeren. Pas hierboven de som aan en voer ze opnieuw uit.\n... doe maar, ik wacht hier even ...\n\"Pffff, dat kan ik ook met eender welke rekenmachine\"\nKlopt, maar dit is nog maar het begin; laat ons eens iets anders proberen:", "print(\"Hallo allemaal!\")", "IPython zal, als het commando een resultaat geeft, deze \"output\" onder de cell uitprinten.\nEn als je iets vergeet of fout typt, wordt-ie boos:", "print(\"Dit lukt dus niet\"", "Er wordt dan door Python geprobeerd om uit te leggen wat er mis gaat, maar dat is niet altijd 100% duidelijk. Kan je uitvissen wat er hierboven misloopt?\n-Tip: het \"Hallo allemaal\" commando kan misschien helpen, het is maar een kleine vergetelheid, maar een computer kan van iets dergelijks helemaal in de war raken-\nOK, wat kan Python nog?\nDingen onthouden\ndaar zijn computers goed in (zolang je de stekker niet uittrekt...)\nEen variabele noemen we dat. Die moet je wel een naam geven, anders vind je ze niet meer terug:", "a = 'Dit is een tekst' # tekst moet je tussen aanhalingstekens '...' zetten\na = \"Dit is een tekst\" # maar het mogen ook dubbele aanhalingstekens \"...\" zijn (als je ze maar niet door mekaar haalt)\n# oh, ja en alles wat achter een # staat is commentaar, dat slaat Python gewoon over\n\nb = 13\nc = 273.15 # voor decimale cijfers, geen komma's, maar punten!", "Zie, je geen resultaat, dus IPython print niets uit, maar de variabelen zitten wel in het geheugen, kijk maar:", "print(a, b, c)", "Methodes en \"dot notation\" (punt notatie)\nsommige \"dingen\" of objecten die je in Python gebruikt krijgen een soort superpowers mee in de vorm van methodes die je kan aanroepen.\nDit doe je door een punt achter het object te zetten en dan de methode te typen (opgelet, voor het aanroepen van een functie moet je altijd haakjes achter de functienaam zetten, desnoods zonder iets tussen):", "# bvb een tekst in hoofdletters omzetten:\na.upper()", "Door na het punt op de <TAB> toets te drukken, zal IPython een lijst van beschikbare methodes laten zien; zet je cursor achter het punt en type <TAB> om het uit te proberen:", "a.", "Lijstjes onthouden\nAls je een lijst van objecten wil opslaan, kan dat met vierkante haakjes:", "minions = ['Dave','Stuart','Jerry','Jorge']\nprint(minions[2])\n#opgelet, elementnummers beginnen te tellen bij 0, daarom wordt de derde minion in de lijst geprint!", "Maar als je eigenlijk de favoriete ijsjes van de minions wil opslaan, gebruik je best een dictionary (Engels voor \"woordenboek\", omdat het je toelaat om dingen op te zoeken op basis van een index / sleutel):", "minion_ijsjes = {\n 'Dave':'aardbei', # 'Dave' is hier de sleutel, 'aardbei' is de ermee gekoppelde waarde \n 'Stuart':'vanille',\n 'Jerry':['mokka', 'vanille'], # Inderdaad, we kunnen dit nog veel ingewikkelder maken :-)\n 'Jorge':'chocolade'\n}\nprint(minion_ijsjes['Jerry'])\n\n# en begrijp je deze?\nprint(minion_ijsjes[minions[2]])", "Loopen\n-ja, dat moet echt met dubbele 'oo' en je spreekt het uit als 'loepen'-\nKan ik in plaats van maar één ook de ijsjes van alle minions printen?\nnota tussendoor: range() is een functie om lijstjes van nummers te maken.", "print(range(4))", "Dit kunnen we gebruiken om 4 keer dezelfde print te herhalen, maar telkens met één nummer hoger", "for nummer in range(4):\n print(minions[nummer])", "Opgelet! de \"whitespace\" (witruimte) vóór het print commando is van belang! Zonder deze spaties zou Python niet weten wat er binnen en wat er buiten de loop valt:", "for nummer in range(4):\n print(nummer)\n print(minions[nummer])", "Is niet hetzelfde als:", "for nummer in range(4):\n print(nummer)\nprint(minions[nummer])", "We kunnen ook iets één keer herhalen voor elke minion in ons minions lijstje.", "for minion in minions:\n print(minion)", "Of voor elke minion in ons minions lijstje de minion printen plus zijn ijsje(s) uit de minion_ijsjes dictionary", "for minion in minions:\n print(minion, minion_ijsjes[minion])", "While is een gelijkaardig soort loop. \"while\" is Engels voor \"terwijl\" en het betekent dat de instructies in de loop uitgevoerd zullen worden terwijl aan een bepaalde voorwaarde voldaan is.\nBijvoorbeeld met een dobbelsteen gooien tot er 6 gegooid wordt:", "import random # om de random module te kunnen gebruiken; import wordt verder nog uitgelegd\n\nworp = 0 # een worp van 0 kan niet, maar we moeten ergens beginnen...\nwhile worp < 6:\n worp = random.randint(1,6) # een willekeurig getal van 1 tot 6\n print(worp)", "Als je het een paar keer probeert kan je zien dat het echt willekeurig is (met Ctrl+Enter wordt de cel uitgevoerd terwijl de cursor blijft staan)\nEen speciaal geval is de \"while True:\" constructie; aangezien aan True (Engels voor \"Waar\") altijd voldaan wordt, blijft dit voor eeuwig loopen, tenzij je de executie manueel onderbreekt door op de Stop knop (<i class=\"fa-stop fa\"></i>) in de menubalk te drukken of Kernel > Interrupt te kiezen uit het dropdown menu.", "import time # om de time module te kunnen gebruiken; import wordt verder nog uitgelegd\n\nwhile True:\n print('.'), # met de komma voorkom je dat er na elk punt een nieuwe lijn gestart wordt\n time.sleep(0.2) # 0.2 seconden pauzeren", "Druk op de Stop knop (<i class=\"fa-stop fa\"></i>) om de executie te beëindigen.\nDeze methode wordt soms gebruikt om een programma te starten dat moet blijven lopen.\nVoor gevorderden: Je kan foutmeldingen neutralizeren en een dergelijke Kernel Interrupt dus op een elegante manier opvangen zonder dat IPython lelijke KeyboardInterrupts op het scherm toont.\nDat gebeurt met een \"try except\" constructie en wel op de volgende manier:", "import time\n\nwhile True:\n try: # probeer de code uit te voeren...\n print('.'),\n time.sleep(0.2)\n except KeyboardInterrupt: # ... en als een KeyboardInterrupt fout optreedt, toon ze dan niet, maar:\n print('\\nEinde') # print 'Einde' (op een nieuwe lijn)\n break # verlaat de while lus", "Voorwaarden\nMet een \"if\" uitdrukking kunnen we beïnvloeden hoe de uitvoering van de code verloopt.\n\"if\" betekent \"als\" in het Engels en het laat ons toe om de computer iets wel of niet te laten doen, afhankelijk wat we erachter zetten.", "punten = 85\nif punten > 90:\n print('Schitterend')\nelif punten > 80:\n print('Zeer goed')\nelif punten > 60:\n print('Goed')\nelse:\n print('Hm')", "Bijvoorbeeld: die Jerry is me toch wat gulzig, dus:", "for minion in minions:\n if minion == 'Jerry':\n print('--Gulzigaard--')\n else:\n print(minion, minion_ijsjes[minion])", "Echt programmeren\nWe kunnen in Python ook zelf functies maken en die vervolgens gebruiken; dat helpt om de code ordelijk te houden en bepaalde stukjes code maar één maal te moeten schrijven / corrigeren / onderhouden.", "def begroet(naam):\n print('Dag ' + naam)", "Zullen we onze nieuwe functie eens uitproberen?", "begroet('Mariette')", "En nog een extraatje\nwe kunnen strings (tekst variabelen) gebruiken als template om teksten samen te stellen zoals de Samenvoegen functonaliteit in tekstverwerkers zoals Microsoft Word.\nDat gebeurt met de format() functie:\n'Dag {}'.format(naam)\nmaakt dat de accolades in de tekst vervangen worden door de waarde van de variabele", "def begroet(naam):\n print('Dag {}'.format(naam))\n\nbegroet('Willy')", "Python libraries (Bibliotheken)\nNatuurlijk zijn er al heel wat mensen die Python code geschreven hebben en veel van die code is beschikbaar in de vorm van libraries die je kant en klaar kan installeren.\nAls zo'n library geïnstalleerd hebt, kan je ze importeren en de functies ervan beginnen gebruiken:", "import math\nprint(\"PI: {}\".format(math.pi))\nprint(\"sin(PI/2): {}\".format(math.cos(math.pi)))", "requests is een bibliotheek om webpagina's in te laden; hier bezoeken we een openweathermap en drukken een deel van de weersvoorspelling voor Mechelen af.", "import requests\nr = requests.get('http://api.openweathermap.org/data/2.5/weather?q=Mechelen').json()\nprint(r['weather'][0]['description'])", "Of een quote uit de online database van iheartquotes.com", "import requests\nr = requests.get('http://www.iheartquotes.com/api/v1/random')\nprint(r.text)", "Er is natuurlijk nog veel meer te leren, maar dit geeft je al een goede basis om Python code te begrijpen en zelf één en ander in mekaar te knutselen. Laat het weten als je suggesties of opmerkingen hebt!\nveel plezier met Python!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
sdpython/ensae_teaching_cs
_doc/notebooks/td2a_ml/ml_lasso_rf_grid_search_correction.ipynb
mit
[ "Hyperparamètres, LassoRandomForestRregressor et grid_search (correction)\nLe notebook explore l'optimisation des hyper paramaètres du modèle LassoRandomForestRegressor, et fait varier le nombre d'arbre et le paramètres alpha.", "from jyquickhelper import add_notebook_menu\nadd_notebook_menu()\n\n%matplotlib inline", "Données", "from sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\ndata = load_boston()\nX, y = data.data, data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)", "Premiers modèles", "from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score\n\nrf = RandomForestRegressor()\nrf.fit(X_train, y_train)\nr2_score(y_test, rf.predict(X_test))", "Pour le modèle, il suffit de copier coller le code écrit dans ce fichier lasso_random_forest_regressor.py.", "from ensae_teaching_cs.ml.lasso_random_forest_regressor import LassoRandomForestRegressor\nlrf = LassoRandomForestRegressor()\nlrf.fit(X_train, y_train)\nr2_score(y_test, lrf.predict(X_test))", "Le modèle a réduit le nombre d'arbres.", "len(lrf.estimators_)", "Grid Search\nOn veut trouver la meilleure paire de paramètres (n_estimators, alpha). scikit-learn implémente l'objet GridSearchCV qui effectue de nombreux apprentissage avec toutes les valeurs de paramètres qu'il reçoit. Voici tous les paramètres qu'on peut changer :", "lrf.get_params()\n\nparams = {\n 'lasso_estimator__alpha': [0.25, 0.5, 0.75, 1., 1.25, 1.5],\n 'rf_estimator__n_estimators': [20, 40, 60, 80, 100, 120]\n}\n\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.model_selection import GridSearchCV\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n\ngrid = GridSearchCV(estimator=LassoRandomForestRegressor(),\n param_grid=params, verbose=1)\ngrid.fit(X_train, y_train)", "Les meilleurs paramètres sont les suivants :", "grid.best_params_", "Et le modèle a gardé un nombre réduit d'arbres :", "len(grid.best_estimator_.estimators_)\n\nr2_score(y_test, grid.predict(X_test))", "Evolution de la performance en fonction des paramètres", "grid.cv_results_\n\nimport numpy\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(14, 6))\nax = fig.add_subplot(131, projection='3d')\nxs = numpy.array([el['lasso_estimator__alpha'] for el in grid.cv_results_['params']])\nys = numpy.array([el['rf_estimator__n_estimators'] for el in grid.cv_results_['params']])\nzs = numpy.array(grid.cv_results_['mean_test_score'])\nax.scatter(xs, ys, zs)\nax.set_title(\"3D...\")\n\nax = fig.add_subplot(132)\nfor x in sorted(set(xs)):\n y2 = ys[xs == x]\n z2 = zs[xs == x]\n ax.plot(y2, z2, label=\"alpha=%1.2f\" % x, lw=x*2)\nax.legend();\n\nax = fig.add_subplot(133)\nfor y in sorted(set(ys)):\n x2 = xs[ys == y]\n z2 = zs[ys == y]\n ax.plot(x2, z2, label=\"n_estimators=%d\" % y, lw=y/40)\nax.legend();", "Il semble que la valeur de alpha importe peu mais qu'un grand nombre d'arbres a un impact positif. Cela dit, il faut ne pas oublier l'écart-type de ces variations qui n'est pas négligeable." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
arcyfelix/Courses
17-09-17-Python-for-Financial-Analysis-and-Algorithmic-Trading/.ipynb_checkpoints/2 - Numpy Exercises-checkpoint.ipynb
apache-2.0
[ "<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>\n\n<center>Copyright Pierian Data 2017</center>\n<center>For more information, visit us at www.pieriandata.com</center>\nNumPy Exercises\nNow that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks and then you'll be asked some more complicated questions.\n IMPORTANT NOTE! Make sure you don't run the cells directly above the example output shown, otherwise you will end up writing over the example output! \nImport NumPy as np", "import numpy as np", "Create an array of 10 zeros", "# CODE HERE\nnp.zeros(10)", "Create an array of 10 ones", "# CODE HERE\nnp.ones(10)", "Create an array of 10 fives", "# CODE HERE\nnp.ones(10) * 5", "Create an array of the integers from 10 to 50", "# CODE HERE\nnp.arange(10, 51)", "Create an array of all the even integers from 10 to 50", "# CODE HERE\nnp.arange(10, 51, 2)", "Create a 3x3 matrix with values ranging from 0 to 8", "# CODE HERE\nnp.arange(9).reshape(3,3)", "Create a 3x3 identity matrix", "# CODE HERE\nnp.eye(3)", "Use NumPy to generate a random number between 0 and 1", "# CODE HERE\nnp.random.randn(1)", "Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution", "# CODE HERE\nnp.random.randn(25)", "Create the following matrix:", "np.arange(1, 101).reshape(10, 10) / 100", "Create an array of 20 linearly spaced points between 0 and 1:", "np.linspace(0, 1, 20)", "Numpy Indexing and Selection\nNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:", "# HERE IS THE GIVEN MATRIX CALLED MAT\n# USE IT FOR THE FOLLOWING TASKS\nmat = np.arange(1,26).reshape(5,5)\nmat\n\n# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE\n\nmat[2:, ]\n\n# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE\n\nmat[3, -1]\n\n# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE\n\nmat[:3, 1].reshape(3, 1)\n\n# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE\n\nmat[-1, :]\n\n# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE\n\nmat[-2:, :]", "Now do the following\nGet the sum of all the values in mat", "# CODE HERE\nnp.sum(mat)", "Get the standard deviation of the values in mat", "# CODE HERE\nnp.std(mat)", "Get the sum of all the columns in mat", "# CODE HERE\nnp.sum(mat, axis = 0)", "Bonus Question\nWe worked a lot with random data with numpy, but is there a way we can insure that we always get the same random numbers? Click Here for a Hint", "# My favourite number is 7\nnp.random.seed(7)", "Great Job!\nEasy / Woj" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.13/_downloads/plot_topo_compare_conditions.ipynb
bsd-3-clause
[ "%matplotlib inline", "Compare evoked responses for different conditions\nIn this example, an Epochs object for visual and\nauditory responses is created. Both conditions\nare then accessed by their respective names to\ncreate a sensor layout plot of the related\nevoked responses.", "# Authors: Denis Engemann <denis.engemann@gmail.com>\n# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n\n# License: BSD (3-clause)\n\n\nimport matplotlib.pyplot as plt\nimport mne\n\nfrom mne.viz import plot_evoked_topo\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()", "Set parameters", "raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nevent_id = 1\ntmin = -0.2\ntmax = 0.5\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)\n\n# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)\ninclude = [] # or stim channels ['STI 014']\n# bad channels in raw.info['bads'] will be automatically excluded\n\n# Set up amplitude-peak rejection values for MEG channels\nreject = dict(grad=4000e-13, mag=4e-12)\n\n# pick MEG channels\npicks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,\n include=include, exclude='bads')\n\n# Create epochs including different events\nevent_id = {'audio/left': 1, 'audio/right': 2,\n 'visual/left': 3, 'visual/right': 4}\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n picks=picks, baseline=(None, 0), reject=reject)\n\n# Generate list of evoked objects from conditions names\nevokeds = [epochs[name].average() for name in ('left', 'right')]", "Show topography for two different conditions", "colors = 'yellow', 'green'\ntitle = 'MNE sample data - left vs right (A/V combined)'\n\nplot_evoked_topo(evokeds, color=colors, title=title)\n\nconditions = [e.comment for e in evokeds]\nfor cond, col, pos in zip(conditions, colors, (0.025, 0.07)):\n plt.figtext(0.99, pos, cond, color=col, fontsize=12,\n horizontalalignment='right')\n\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mnschmit/LMU-Syntax-nat-rlicher-Sprachen
09-notebook-solution.ipynb
apache-2.0
[ "Übungsblatt 9\nPräsenzaufgaben\nAufgabe 1 &nbsp;&nbsp;&nbsp; Unifikation I", "from nltk.featstruct import FeatStruct", "Gegeben seien folgende Merkmalstrukturen:", "f1 = FeatStruct(\n '[Vorname=Max, Nachname=Mustermann,' + \n 'Privat=[Strasse=Hauptstrasse, Ort=[Muenchen]]]'\n)\nf2 = FeatStruct(\n '[Arbeit=[Strasse=\"Oettingenstrasse\", Ort=(1)[\"Muenchen\"]],' +\n 'Privat=[Ort->(1)]]')\nf3 = FeatStruct(\n '[Strasse=\"Hauptstrasse\"]'\n)\nf4 = FeatStruct(\n '[Privat=[Strasse=\"Hauptstrasse\", Ort=[\"Passau\"]]]'\n)", "Unifizieren Sie:\n- f1 mit f2", "print(f1.unify(f2).__repr__())", "f2 mit f4", "print(f2.unify(f4).__repr__())", "Aufgabe 2 &nbsp;&nbsp;&nbsp; Typhierarchie im NLTK\nGegeben sei folgende Typhierarchie:\n$$\\bot \\sqsubseteq \\text{Genitiv}$$\n$$\\bot \\sqsubseteq \\text{nicht-Genitiv}$$\n$$\\text{nicht-Genitiv} \\sqsubseteq \\text{Nominativ-Akkusativ}$$\n$$\\text{nicht-Genitiv} \\sqsubseteq \\text{Dativ}$$\n$$\\text{Nominativ-Akkusativ} \\sqsubseteq \\text{Nominativ}$$\n$$\\text{Nominativ-Akkusativ} \\sqsubseteq \\text{Akkusativ}$$\nImplementieren Sie mithilfe der Klasse HierarchicalFeature,\ndie Sie sich von der Kurs-Website herunterladen können, ein Feature CASE, das der vorgegebenen Typhierarchie entspricht.\nNutzen Sie dieses Feature dann, um Übergenerierung in folgender Grammatik zu vermeiden:", "grammar = \"\"\"\nS -> NP[*CASE*=nom] VP\nNP[*CASE*=?x] -> DET[*CASE*=?x,GEN=?y] NOM[*CASE*=?x,GEN=?y]\nNOM[*CASE*=?x,GEN=?y] -> N[*CASE*=?x,GEN=?y] NP[*CASE*=gen]\nNOM[*CASE*=?x,GEN=?y] -> N[*CASE*=?x,GEN=?y]\nVP -> V\n\nV -> \"schläft\"\nDET[*CASE*=nomakk,GEN=fem] -> \"die\"\nDET[*CASE*=nomakk,GEN=neut] -> \"das\"\nDET[*CASE*=gen,GEN=mask] -> \"des\"\nDET[*CASE*=gen,GEN=neut] -> \"des\"\nDET[*CASE*=nom,GEN=mask] -> \"der\"\nDET[*CASE*=gen,GEN=fem] -> \"der\"\n\nN[*CASE*=nongen,GEN=mask] -> \"Mann\"\nN[*CASE*=nongen,GEN=fem] -> \"Frau\"\nN[*CASE*=nongen,GEN=neut] -> \"Kind\"\nN[*CASE*=gen,GEN=fem] -> \"Frau\"\nN[*CASE*=gen,GEN=mask] -> \"Mannes\"\nN[*CASE*=gen,GEN=neut] -> \"Kindes\"\n\"\"\"\n\nfrom IPython.display import display\nimport nltk\nfrom typed_features import HierarchicalFeature, TYPE", "Hier muss die Typhierarchie in Form eines Dictionary definiert werden:", "type_hierarchy = {\n \"gen\": [],\n \"nongen\": [\"nomakk\", \"dat\"],\n \"nomakk\": [\"nom\", \"akk\"],\n \"nom\": [],\n \"dat\": [],\n \"akk\": []\n}\n\nCASE = HierarchicalFeature(\"CASE\", type_hierarchy)\ncompiled_grammar = nltk.grammar.FeatureGrammar.fromstring(\n grammar, features=(CASE, TYPE)\n)\nparser = nltk.FeatureEarleyChartParser(compiled_grammar)", "Folgendes sollte funktionieren:", "for t in parser.parse(\"das Kind der Frau schläft\".split()):\n display(t)", "Folgendes sollte leer sein:", "list(parser.parse(\"des Mannes schläft\".split()))", "Folgendes sollte wieder funktionieren. Betrachten Sie aufmerksam die Merkmale im Syntaxbaum.", "for t in parser.parse(\"der Mann der Frau schläft\".split()):\n display(t)", "Hausaufgaben\nAufgabe 3 &nbsp;&nbsp;&nbsp; Unifikation II\nEs seien wieder die Merkmalstrukturen aus Aufgabe 1 gegeben.\nUnifizieren Sie:\n- f1 mit f4", "print(f1.unify(f4).__repr__())", "f2 mit f3", "print(f2.unify(f3).__repr__())", "Aufgabe 4 &nbsp;&nbsp;&nbsp; Weniger Redundanz dank besonderer Merkmale\nBeseitigen Sie die Redundanz in den lexikalischen Regeln (Zeilen 8 - 32) der folgenden Grammatik durch eine Typhierarchie (wo dies nötig ist). Achten Sie darauf, die Menge der akzeptierten Sätze weder zu verkleinern noch zu vergrößern!\nAnzugeben sind die neuen Grammatikregeln, sowie Ihre Typhierarchie (z. B. in graphischer Form).", "case_hierarchy = {\n \"nongen\": [\"nomakk\", \"dat\"],\n \"gendat\": [\"gen\", \"dat\"],\n \"nomakk\": [\"nom\", \"akk\"],\n \"nom\": [],\n \"gen\": [],\n \"dat\": [],\n \"akk\": []\n}\ngen_hierarchy = {\n \"maskneut\": [\"mask\", \"neut\"],\n \"mask\": [],\n \"fem\": [],\n \"neut\": []\n}\n\nredundant_grammar = \"\"\"\nS -> NP[*KAS*=nom] VP\n\nNP[*KAS*=?y] -> DET[*GEN*=?x,*KAS*=?y] NOM[*GEN*=?x,*KAS*=?y]\nNOM[*GEN*=?x,*KAS*=?y] -> N[*GEN*=?x,*KAS*=?y] NP[*KAS*=gen]\nNOM[*GEN*=?x,*KAS*=?y] -> N[*GEN*=?x,*KAS*=?y]\n\nDET[*GEN*=mask,*KAS*=nom] -> \"der\"\nDET[*GEN*=maskneut,*KAS*=gen] -> \"des\"\nDET[*GEN*=maskneut,*KAS*=dat] -> \"dem\"\nDET[*GEN*=mask,*KAS*=akk] -> \"den\"\n\nDET[*GEN*=fem,*KAS*=nomakk] -> \"die\"\nDET[*GEN*=fem,*KAS*=gendat] -> \"der\"\n\nDET[*GEN*=neut,*KAS*=nomakk] -> \"das\"\n\nN[*GEN*=mask,*KAS*=nongen] -> \"Mann\"\nN[*GEN*=mask,*KAS*=gen] -> \"Mannes\"\nN[*GEN*=fem] -> \"Frau\"\nN[*GEN*=neut,*KAS*=nongen] -> \"Buch\"\nN[*GEN*=neut,*KAS*=gen] -> \"Buches\"\n\nVP -> V NP[*KAS*=dat] NP[*KAS*=akk]\nV -> \"gibt\" | \"schenkt\"\n\"\"\"\n\nCASE = HierarchicalFeature(\"KAS\", case_hierarchy)\nGEN = HierarchicalFeature(\"GEN\", gen_hierarchy)\ncompiled_grammar = nltk.grammar.FeatureGrammar.fromstring(\n redundant_grammar, features=(CASE, GEN, TYPE)\n)\nparser = nltk.FeatureEarleyChartParser(compiled_grammar)\n\npos_sentences = [\n \"der Mann gibt der Frau das Buch\",\n \"die Frau des Mannes gibt dem Mann der Frau das Buch des Buches\"\n]", "Testen Sie mit Ihren eigenen Negativbeispielen!", "neg_sentences = [\n \"des Mannes gibt der Frau das Buch\",\n \"Mann gibt der Frau das Buch\",\n \"der Mann gibt der Frau Buch\",\n \"der Frau gibt dem Buch den Mann\",\n \"das Buch der Mann gibt der Frau das Buch\"\n]\n\nimport sys\n\ndef test_grammar(parser, sentences): \n for i, sent in enumerate(sentences, 1):\n print(\"Satz {}: {}\".format(i, sent))\n sys.stdout.flush()\n results = parser.parse(sent.split())\n analyzed = False\n for tree in results:\n display(tree)\n analyzed = True\n if not analyzed:\n print(\"Keine Analyse möglich\", file=sys.stderr)\n sys.stderr.flush()\n\ntest_grammar(parser, pos_sentences)\n\ntest_grammar(parser, neg_sentences)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/ja/guide/tf_numpy.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "TensorFlow の NumPy API\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td> <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/tf_numpy\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\">TensorFlow.org で表示</a> </td>\n <td> <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/tf_numpy.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a> </td>\n <td> <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/tf_numpy.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示</a> </td>\n <td> <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/tf_numpy.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード</a> </td>\n</table>\n\n概要\nTensorFlow では、tf.experimental.numpyを利用してNumPy API のサブセットを実装します。これにより、TensorFlow により高速化された NumPy コードを実行し、TensorFlow のすべて API にもアクセスできます。\nセットアップ", "import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.experimental.numpy as tnp\nimport timeit\n\nprint(\"Using TensorFlow version %s\" % tf.__version__)", "NumPy 動作の有効化\ntnp を NumPy として使用するには、TensorFlow の NumPy の動作を有効にしてください。", "tnp.experimental_enable_numpy_behavior()", "この呼び出しによって、TensorFlow での型昇格が可能になり、リテラルからテンソルに変換される場合に、型推論も Numpy の標準により厳格に従うように変更されます。\n注意: この呼び出しは、tf.experimental.numpy モジュールだけでなく、TensorFlow 全体の動作を変更します。\nTensorFlow NumPy ND 配列\nND 配列と呼ばれる tf.experimental.numpy.ndarray は、特定のデバイスに配置されたある dtype の多次元の密な配列を表します。tf.Tensor のエイリアスです。ndarray.T、ndarray.reshape、ndarray.ravel などの便利なメソッドについては、ND 配列クラスをご覧ください。\nまず、ND 配列オブジェクトを作成してから、さまざまなメソッドを呼び出します。", "# Create an ND array and check out different attributes.\nones = tnp.ones([5, 3], dtype=tnp.float32)\nprint(\"Created ND array with shape = %s, rank = %s, \"\n \"dtype = %s on device = %s\\n\" % (\n ones.shape, ones.ndim, ones.dtype, ones.device))\n\n# `ndarray` is just an alias to `tf.Tensor`.\nprint(\"Is `ones` an instance of tf.Tensor: %s\\n\" % isinstance(ones, tf.Tensor))\n\n# Try commonly used member functions.\nprint(\"ndarray.T has shape %s\" % str(ones.T.shape))\nprint(\"narray.reshape(-1) has shape %s\" % ones.reshape(-1).shape)", "型昇格\nTensorFlow NumPy API には、リテラルを ND 配列に変換するためと ND 配列入力で型昇格を実行するための明確に定義されたセマンティクスがあります。詳細については、np.result_type をご覧ください。\nTensorFlow API は tf.Tensor 入力を変更せずそのままにし、それに対して型昇格を実行しませんが、TensorFlow NumPy API は NumPy 型昇格のルールに従って、すべての入力を昇格します。次の例では、型昇格を行います。まず、さまざまな型の ND 配列入力で加算を実行し、出力の型を確認します。これらの型昇格は、TensorFlow API では行えません。", "print(\"Type promotion for operations\")\nvalues = [tnp.asarray(1, dtype=d) for d in\n (tnp.int32, tnp.int64, tnp.float32, tnp.float64)]\nfor i, v1 in enumerate(values):\n for v2 in values[i + 1:]:\n print(\"%s + %s => %s\" % \n (v1.dtype.name, v2.dtype.name, (v1 + v2).dtype.name))", "最後に、ndarray.asarray を使ってリテラルをND 配列に変換し、結果の型を確認します。", "print(\"Type inference during array creation\")\nprint(\"tnp.asarray(1).dtype == tnp.%s\" % tnp.asarray(1).dtype.name)\nprint(\"tnp.asarray(1.).dtype == tnp.%s\\n\" % tnp.asarray(1.).dtype.name)", "リテラルを ND 配列に変換する際、NumPy は tnp.int64 や tnp.float64 といった幅広い型を優先します。一方、tf.convert_to_tensor は、tf.int32 と tf.float32 の型を優先して定数を tf.Tensor に変換します。TensorFlow NumPy API は、整数に関しては NumPy の動作に従っています。浮動小数点数については、experimental_enable_numpy_behavior の prefer_float32 引数によって、tf.float64 よりも tf.float32 を優先するかどうかを制御することができます(デフォルトは False です)。以下に例を示します。", "tnp.experimental_enable_numpy_behavior(prefer_float32=True)\nprint(\"When prefer_float32 is True:\")\nprint(\"tnp.asarray(1.).dtype == tnp.%s\" % tnp.asarray(1.).dtype.name)\nprint(\"tnp.add(1., 2.).dtype == tnp.%s\" % tnp.add(1., 2.).dtype.name)\n\ntnp.experimental_enable_numpy_behavior(prefer_float32=False)\nprint(\"When prefer_float32 is False:\")\nprint(\"tnp.asarray(1.).dtype == tnp.%s\" % tnp.asarray(1.).dtype.name)\nprint(\"tnp.add(1., 2.).dtype == tnp.%s\" % tnp.add(1., 2.).dtype.name)", "ブロードキャスティング\nTensorFlow と同様に、NumPy は「ブロードキャスト」値の豊富なセマンティクスを定義します。詳細については、NumPy ブロードキャストガイドを確認し、これを TensorFlow ブロードキャストセマンティクスと比較してください。", "x = tnp.ones([2, 3])\ny = tnp.ones([3])\nz = tnp.ones([1, 2, 1])\nprint(\"Broadcasting shapes %s, %s and %s gives shape %s\" % (\n x.shape, y.shape, z.shape, (x + y + z).shape))", "インデックス\nNumPy は、非常に洗練されたインデックス作成ルールを定義しています。NumPy インデックスガイドを参照してください。以下では、インデックスとして ND 配列が使用されていることに注意してください。", "x = tnp.arange(24).reshape(2, 3, 4)\n\nprint(\"Basic indexing\")\nprint(x[1, tnp.newaxis, 1:3, ...], \"\\n\")\n\nprint(\"Boolean indexing\")\nprint(x[:, (True, False, True)], \"\\n\")\n\nprint(\"Advanced indexing\")\nprint(x[1, (0, 0, 1), tnp.asarray([0, 1, 1])])\n\n# Mutation is currently not supported\ntry:\n tnp.arange(6)[1] = -1\nexcept TypeError:\n print(\"Currently, TensorFlow NumPy does not support mutation.\")", "サンプルモデル\n次に、モデルを作成して推論を実行する方法を見てみます。この簡単なモデルは、relu レイヤーとそれに続く線形射影を適用します。後のセクションでは、TensorFlow のGradientTapeを使用してこのモデルの勾配を計算する方法を示します。", "class Model(object):\n \"\"\"Model with a dense and a linear layer.\"\"\"\n\n def __init__(self):\n self.weights = None\n\n def predict(self, inputs):\n if self.weights is None:\n size = inputs.shape[1]\n # Note that type `tnp.float32` is used for performance.\n stddev = tnp.sqrt(size).astype(tnp.float32)\n w1 = tnp.random.randn(size, 64).astype(tnp.float32) / stddev\n bias = tnp.random.randn(64).astype(tnp.float32)\n w2 = tnp.random.randn(64, 2).astype(tnp.float32) / 8\n self.weights = (w1, bias, w2)\n else:\n w1, bias, w2 = self.weights\n y = tnp.matmul(inputs, w1) + bias\n y = tnp.maximum(y, 0) # Relu\n return tnp.matmul(y, w2) # Linear projection\n\nmodel = Model()\n# Create input data and compute predictions.\nprint(model.predict(tnp.ones([2, 32], dtype=tnp.float32)))", "TensorFlow NumPy および NumPy\nTensorFlow NumPy は、完全な NumPy 仕様のサブセットを実装します。シンボルは、今後追加される予定ですが、近い将来にサポートされなくなる体系的な機能があります。これらには、NumPy C API サポート、Swig 統合、Fortran ストレージ優先順位、ビュー、stride_tricks、およびいくつかのdtype(np.recarrayや<code> np.object</code>)が含まれます。詳細については、 <a>TensorFlow NumPy API ドキュメント</a>をご覧ください。\nNumPy 相互運用性\nTensorFlow ND 配列は、NumPy 関数と相互運用できます。これらのオブジェクトは、__array__インターフェースを実装します。NumPy はこのインターフェースを使用して、関数の引数を処理する前にnp.ndarray値に変換します。\n同様に、TensorFlow NumPy 関数は、np.ndarray などのさまざまなタイプの入力を受け入れることができます。これらの入力は、<code>ndarray.asarray</code> を呼び出すことにより、ND 配列に変換されます。\nND 配列をnp.ndarrayとの間で変換すると、実際のデータコピーがトリガーされる場合があります。詳細については、バッファコピーのセクションを参照してください。", "# ND array passed into NumPy function.\nnp_sum = np.sum(tnp.ones([2, 3]))\nprint(\"sum = %s. Class: %s\" % (float(np_sum), np_sum.__class__))\n\n# `np.ndarray` passed into TensorFlow NumPy function.\ntnp_sum = tnp.sum(np.ones([2, 3]))\nprint(\"sum = %s. Class: %s\" % (float(tnp_sum), tnp_sum.__class__))\n\n# It is easy to plot ND arrays, given the __array__ interface.\nlabels = 15 + 2 * tnp.random.randn(1, 1000)\n_ = plt.hist(labels)", "バッファコピー\nTensorFlow NumPy を NumPy コードと混在させると、データコピーがトリガーされる場合があります。これは、TensorFlow NumPy のメモリアライメントに関する要件が NumPy の要件よりも厳しいためです。\nnp.ndarrayが TensorFlow Numpy に渡されると、アライメント要件を確認し、必要に応じてコピーがトリガーされます。ND 配列 CPU バッファを NumPy に渡す場合、通常、バッファはアライメント要件を満たし、NumPy はコピーを作成する必要はありません。\nND 配列は、ローカル CPU メモリ以外のデバイスに配置されたバッファを参照できます。このような場合、NumPy 関数を呼び出すと、必要に応じてネットワークまたはデバイス全体でコピーが作成されます。\nこのため、NumPy API 呼び出しとの混合は通常、注意して行い、ユーザーはデータのコピーのオーバーヘッドに注意する必要があります。TensorFlow NumPy 呼び出しを TensorFlow 呼び出しとインターリーブすることは一般的に安全であり、データのコピーを避けられます。 詳細については、TensorFlow の相互運用性のセクションをご覧ください。\n演算子の優先順位\nTensorFlow NumPy は、NumPy よりも優先順位の高い__array_priority__を定義します。つまり、ND 配列とnp.ndarrayの両方を含む演算子の場合、前者が優先されます。np.ndarray入力は ND 配列に変換され、演算子の TensorFlow NumPy 実装が呼び出されます。", "x = tnp.ones([2]) + np.ones([2])\nprint(\"x = %s\\nclass = %s\" % (x, x.__class__))", "TF NumPy と TensorFlow\nTensorFlow NumPy は TensorFlow の上に構築されているため、TensorFlow とシームレスに相互運用できます。\ntf.Tensor と ND 配列\nND 配列は tf.Tensor のエイリアスであるため、実際のデータのコピーを呼び出さずに混在させることが可能です。", "x = tf.constant([1, 2])\nprint(x)\n\n# `asarray` and `convert_to_tensor` here are no-ops.\ntnp_x = tnp.asarray(x)\nprint(tnp_x)\nprint(tf.convert_to_tensor(tnp_x))\n\n# Note that tf.Tensor.numpy() will continue to return `np.ndarray`.\nprint(x.numpy(), x.numpy().__class__)", "TensorFlow 相互運用性\nND 配列は tf.Tensor のエイリアスにすぎないため、TensorFlow API に渡すことができます。前述のように、このような相互運用では、アクセラレータやリモートデバイスに配置されたデータであっても、データのコピーは行われません。\n逆に言えば、tf.Tensor オブジェクトを、データのコピーを実行せずに、tf.experimental.numpy API に渡すことができます。", "# ND array passed into TensorFlow function.\ntf_sum = tf.reduce_sum(tnp.ones([2, 3], tnp.float32))\nprint(\"Output = %s\" % tf_sum)\n\n# `tf.Tensor` passed into TensorFlow NumPy function.\ntnp_sum = tnp.sum(tf.ones([2, 3]))\nprint(\"Output = %s\" % tnp_sum)", "勾配とヤコビアン: tf.GradientTape\nTensorFlow の GradientTape は、TensorFlow と TensorFlow NumPy コードを介してバックプロパゲーションに使用できます。\nサンプルモデルセクションで作成されたモデルを使用して、勾配とヤコビアンを計算します。", "def create_batch(batch_size=32):\n \"\"\"Creates a batch of input and labels.\"\"\"\n return (tnp.random.randn(batch_size, 32).astype(tnp.float32),\n tnp.random.randn(batch_size, 2).astype(tnp.float32))\n\ndef compute_gradients(model, inputs, labels):\n \"\"\"Computes gradients of squared loss between model prediction and labels.\"\"\"\n with tf.GradientTape() as tape:\n assert model.weights is not None\n # Note that `model.weights` need to be explicitly watched since they\n # are not tf.Variables.\n tape.watch(model.weights)\n # Compute prediction and loss\n prediction = model.predict(inputs)\n loss = tnp.sum(tnp.square(prediction - labels))\n # This call computes the gradient through the computation above.\n return tape.gradient(loss, model.weights)\n\ninputs, labels = create_batch()\ngradients = compute_gradients(model, inputs, labels)\n\n# Inspect the shapes of returned gradients to verify they match the\n# parameter shapes.\nprint(\"Parameter shapes:\", [w.shape for w in model.weights])\nprint(\"Gradient shapes:\", [g.shape for g in gradients])\n# Verify that gradients are of type ND array.\nassert isinstance(gradients[0], tnp.ndarray)\n\n# Computes a batch of jacobians. Each row is the jacobian of an element in the\n# batch of outputs w.r.t. the corresponding input batch element.\ndef prediction_batch_jacobian(inputs):\n with tf.GradientTape() as tape:\n tape.watch(inputs)\n prediction = model.predict(inputs)\n return prediction, tape.batch_jacobian(prediction, inputs)\n\ninp_batch = tnp.ones([16, 32], tnp.float32)\noutput, batch_jacobian = prediction_batch_jacobian(inp_batch)\n# Note how the batch jacobian shape relates to the input and output shapes.\nprint(\"Output shape: %s, input shape: %s\" % (output.shape, inp_batch.shape))\nprint(\"Batch jacobian shape:\", batch_jacobian.shape)", "トレースコンパイル: tf.function\nTensorflow の tf.function は、コードを「トレースコンパイル」し、これらのトレースを最適化してパフォーマンスを大幅に向上させます。グラフと関数の概要を参照してください。\nまた、tf.function を使用して、TensorFlow NumPy コードを最適化することもできます。以下は、スピードアップを示す簡単な例です。tf.function コードの本文には、TensorFlow NumPy API への呼び出しが含まれていることに注意してください。", "inputs, labels = create_batch(512)\nprint(\"Eager performance\")\ncompute_gradients(model, inputs, labels)\nprint(timeit.timeit(lambda: compute_gradients(model, inputs, labels),\n number=10) * 100, \"ms\")\n\nprint(\"\\ntf.function compiled performance\")\ncompiled_compute_gradients = tf.function(compute_gradients)\ncompiled_compute_gradients(model, inputs, labels) # warmup\nprint(timeit.timeit(lambda: compiled_compute_gradients(model, inputs, labels),\n number=10) * 100, \"ms\")", "ベクトル化:tf.vectorized_map\nTensorFlow には、並列ループのベクトル化のサポートが組み込まれているため、10 倍から 100 倍のスピードアップが可能です。これらのスピードアップは、tf.vectorized_map API を介して実行でき、TensorFlow NumPy にも適用されます。\nw.r.t. (対応する入力バッチ要素)バッチで各出力の勾配を計算すると便利な場合があります。このような計算は、以下に示すように tf.vectorized_map を使用して効率的に実行できます。", "@tf.function\ndef vectorized_per_example_gradients(inputs, labels):\n def single_example_gradient(arg):\n inp, label = arg\n return compute_gradients(model,\n tnp.expand_dims(inp, 0),\n tnp.expand_dims(label, 0))\n # Note that a call to `tf.vectorized_map` semantically maps\n # `single_example_gradient` over each row of `inputs` and `labels`.\n # The interface is similar to `tf.map_fn`.\n # The underlying machinery vectorizes away this map loop which gives\n # nice speedups.\n return tf.vectorized_map(single_example_gradient, (inputs, labels))\n\nbatch_size = 128\ninputs, labels = create_batch(batch_size)\n\nper_example_gradients = vectorized_per_example_gradients(inputs, labels)\nfor w, p in zip(model.weights, per_example_gradients):\n print(\"Weight shape: %s, batch size: %s, per example gradient shape: %s \" % (\n w.shape, batch_size, p.shape))\n\n# Benchmark the vectorized computation above and compare with\n# unvectorized sequential computation using `tf.map_fn`.\n@tf.function\ndef unvectorized_per_example_gradients(inputs, labels):\n def single_example_gradient(arg):\n inp, label = arg\n return compute_gradients(model,\n tnp.expand_dims(inp, 0),\n tnp.expand_dims(label, 0))\n\n return tf.map_fn(single_example_gradient, (inputs, labels),\n fn_output_signature=(tf.float32, tf.float32, tf.float32))\n\nprint(\"Running vectorized computation\")\nprint(timeit.timeit(lambda: vectorized_per_example_gradients(inputs, labels),\n number=10) * 100, \"ms\")\n\nprint(\"\\nRunning unvectorized computation\")\nper_example_gradients = unvectorized_per_example_gradients(inputs, labels)\nprint(timeit.timeit(lambda: unvectorized_per_example_gradients(inputs, labels),\n number=10) * 100, \"ms\")", "デバイスに配置する\nTensorFlow NumPy は、CPU、GPU、TPU、およびリモートデバイスに演算を配置できます。デバイスにおける配置には標準の TensorFlow メカニズムを使用します。以下の簡単な例は、すべてのデバイスを一覧表示してから、特定のデバイスに計算を配置する方法を示しています。\nここでは取り上げませんが、TensorFlow には、デバイス間で計算を複製し、集合的な削減を実行するための API もあります。\nデバイスをリストする\n使用するデバイスを見つけるには、tf.config.list_logical_devices およびtf.config.list_physical_devices を使用します。", "print(\"All logical devices:\", tf.config.list_logical_devices())\nprint(\"All physical devices:\", tf.config.list_physical_devices())\n\n# Try to get the GPU device. If unavailable, fallback to CPU.\ntry:\n device = tf.config.list_logical_devices(device_type=\"GPU\")[0]\nexcept IndexError:\n device = \"/device:CPU:0\"", "演算の配置:tf.device\nデバイスに演算を配置するには、tf.device スコープでデバイスを呼び出します。", "print(\"Using device: %s\" % str(device))\n# Run operations in the `tf.device` scope.\n# If a GPU is available, these operations execute on the GPU and outputs are\n# placed on the GPU memory.\nwith tf.device(device):\n prediction = model.predict(create_batch(5)[0])\n\nprint(\"prediction is placed on %s\" % prediction.device)", "デバイス間での ND 配列のコピー: tnp.copy\n特定のデバイススコープで tnp.copy を呼び出すと、データがそのデバイスに既に存在しない限り、そのデバイスにデータがコピーされます。", "with tf.device(\"/device:CPU:0\"):\n prediction_cpu = tnp.copy(prediction)\nprint(prediction.device)\nprint(prediction_cpu.device)", "パフォーマンスの比較\nTensorFlow NumPy は、CPU、GPU、TPU にディスパッチできる高度に最適化された TensorFlow カーネルを使用します。TensorFlow は、演算の融合など、多くのコンパイラ最適化も実行し、パフォーマンスとメモリを向上します。詳細については、Grappler を使用した TensorFlow グラフの最適化をご覧ください。\nただし、TensorFlow では、NumPy と比較してディスパッチ演算のオーバーヘッドが高くなります。小規模な演算(約 10 マイクロ秒未満)で構成されるワークロードの場合、これらのオーバーヘッドがランタイムを支配する可能性があり、NumPy はより優れたパフォーマンスを提供する可能性があります。その他の場合、一般的に TensorFlow を使用するとパフォーマンスが向上するはずです。\n以下のベンチマークを実行して、さまざまな入力サイズでの NumPy と TensorFlow Numpy のパフォーマンスを比較します。", "def benchmark(f, inputs, number=30, force_gpu_sync=False):\n \"\"\"Utility to benchmark `f` on each value in `inputs`.\"\"\"\n times = []\n for inp in inputs:\n def _g():\n if force_gpu_sync:\n one = tnp.asarray(1)\n f(inp)\n if force_gpu_sync:\n with tf.device(\"CPU:0\"):\n tnp.copy(one) # Force a sync for GPU case\n\n _g() # warmup\n t = timeit.timeit(_g, number=number)\n times.append(t * 1000. / number)\n return times\n\n\ndef plot(np_times, tnp_times, compiled_tnp_times, has_gpu, tnp_times_gpu):\n \"\"\"Plot the different runtimes.\"\"\"\n plt.xlabel(\"size\")\n plt.ylabel(\"time (ms)\")\n plt.title(\"Sigmoid benchmark: TF NumPy vs NumPy\")\n plt.plot(sizes, np_times, label=\"NumPy\")\n plt.plot(sizes, tnp_times, label=\"TF NumPy (CPU)\")\n plt.plot(sizes, compiled_tnp_times, label=\"Compiled TF NumPy (CPU)\")\n if has_gpu:\n plt.plot(sizes, tnp_times_gpu, label=\"TF NumPy (GPU)\")\n plt.legend()\n\n# Define a simple implementation of `sigmoid`, and benchmark it using\n# NumPy and TensorFlow NumPy for different input sizes.\n\ndef np_sigmoid(y):\n return 1. / (1. + np.exp(-y))\n\ndef tnp_sigmoid(y):\n return 1. / (1. + tnp.exp(-y))\n\n@tf.function\ndef compiled_tnp_sigmoid(y):\n return tnp_sigmoid(y)\n\nsizes = (2 ** 0, 2 ** 5, 2 ** 10, 2 ** 15, 2 ** 20)\nnp_inputs = [np.random.randn(size).astype(np.float32) for size in sizes]\nnp_times = benchmark(np_sigmoid, np_inputs)\n\nwith tf.device(\"/device:CPU:0\"):\n tnp_inputs = [tnp.random.randn(size).astype(np.float32) for size in sizes]\n tnp_times = benchmark(tnp_sigmoid, tnp_inputs)\n compiled_tnp_times = benchmark(compiled_tnp_sigmoid, tnp_inputs)\n\nhas_gpu = len(tf.config.list_logical_devices(\"GPU\"))\nif has_gpu:\n with tf.device(\"/device:GPU:0\"):\n tnp_inputs = [tnp.random.randn(size).astype(np.float32) for size in sizes]\n tnp_times_gpu = benchmark(compiled_tnp_sigmoid, tnp_inputs, 100, True)\nelse:\n tnp_times_gpu = None\nplot(np_times, tnp_times, compiled_tnp_times, has_gpu, tnp_times_gpu)", "参考資料\n\nTensorFlow NumPy: 分散型画像分類のチュートリアル\nTensorFlow NumPy: Keras と分散ストラテジー\nTrax と TensorFlow NumPy を使用したセンチメント分析" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ekergy/jupyter_notebooks
curso/6-ML.ipynb
gpl-3.0
[ "Machine Learning\nLo que ha sido siempre la Estadística de toda la vida, que durante los 80 se llamó Inteligencia Artificial\n\n\nUna serie de técnicas de análisis y manipulación de datos que sirven para obtener patrones predecibles en datos reales.\n\n\nML Supervisado\n\n\nLos datos vienen con atributos objetivo. Queremos modelar o simplificar los datos para entender la relación entre datos y atributos y conocer mejor el problema. \n\n\nAlgoritmos de regresión y clasificación.\n\n\n\n\nML No supervisado\n\n\nQueremos encontrar estructura en nuestros datos.\n\n\nClustering, estimación de densidad...\n\n\n\n\nScikit-learn\n\n\nhttp://scikit-learn.org\n\n\nPython cuenta con una serie de módulos llamados scikits.\n\n\nScikit-learn (sklearn) es el más conocido.\n\n\nEs un toolkit bastante completo, rápido y una referencia en ML.\n\n\nUn buen tutorial del que he sacado parte de este ejemplo\n\n\nComprimir una imagen\nTomemos por ejemplo esta imagen de ejemplo de un templo en China", "import matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nfrom sklearn.datasets import load_sample_image\nchina = load_sample_image(\"china.jpg\")\nfig = plt.figure(1)\nax = fig.add_subplot(1,1,1)\nax.imshow(china)", "Queremos comprimir esta imagen para reducir el tamaño que cuesta almacenarlo en memoria. Una de las estrategias de compresión es reducir la paleta de colore\ns.\nEn cualquier imagen, la paleta de de colores es una combinación de 256 tonos de rojo, verde y azul; entonces el espacio de colores tiene 3 dimensiones y $256^3$ (unos 16.7 millones) colores posibles. El color (0,0,0) es el negro, mientras que el (255,255,255) es el blanco.\nUna estrategia para comprimir imagenes parte de la base que nuestro sentido de la vista no percibe todos los colores por igual ni la naturaleza usa todos los colores a la vez. Hay colores que nuesto cerebro no percibe bien (especialmente si somos hombres) y hay colores poco frecuentes, como tonos puros de azul o rojo.\nEntonces se puede reducir el número de colores posibles de $256^3$ a menos de 100 sin que nuestra percepción encuentre la imagen aberrante. En los albores de la computación se utilizaban paleatas para ahorrar memoria y poder representar gráficos de manera más eficiente. Era el caso de la SEGA Master System, una consola que apareció en el año 1986, y que disponía de esta paleta de 32 colores.\n\nEl objetivo es obtener una paleta lo suficientemente buena como para que basten 32 colores. Es una práctica habitual en machine learning: obtener los casos más significativos (los colores de una paleta para una imagen) de entre todas las posibilidades (los 16 millones y pico de colores posibles)\nPrimero vamos a explorar la imagen sólo como si fueran un montón de datos. Cualquier imagen es un array con 3 dimensiones, una para la dirección horizontal, otra para la dirección vertical y una tercera para los 3 colores. El primer paso es obviar las dimensiones espaciales y convertir la imagen en una tira de numeros", "iso = china.reshape(-1,3)\nprint(iso.shape)\nprint(iso.nbytes)", "Como se ha dicho anteriormente, hay colores más o menos posibles. Sabiendo que tenemos tres posibles canales, representaremos todos los píxeles en función de dónde están situados en el espacio de color. Para ello los proyectaremos en las combinaciones de dos canales rojo-verde, rojo-azul y verde-azul.", "fig = plt.figure(2)\nrg = fig.add_subplot(2,2,1)\nrb = fig.add_subplot(2,2,2)\ngb = fig.add_subplot(2,2,3)\n\nrg.plot(iso[::5,0], iso[::5,1], 'b.', markersize=1)\nrg.set_title('Red-Green channel', fontsize=10)\nrb.plot(iso[::5,0], iso[::5,2], 'b.', markersize=1)\nrb.set_title('Red-Blue channel', fontsize=10)\ngb.plot(iso[::5,1], iso[::5,2], 'b.', markersize=1)\ngb.set_title('Green-Blue channel', fontsize=10)\n\nfig.tight_layout()", "Como se puede ver, la mayoría de píxeles siguen un patrón desde el negro al blanco, pasando por combinaciones que tienden al gris (iguales cantidades de rojo verde y azul). Los colores más poco frecuentes son los rojos puros y los verdes puros.\nUna paleta eficiente se conseguirá resumiendo todos estos píxeles en unos cuantos colores característicos, que se aproximan a los más frecuentes. El algoritmo que permite realizar esta tarea se llama KMeans.\nSe trata de un algoritmo de clustering que en Scikit-Learn se usa como sigue. Primero se importa el modelo y luego se configuran el número de centroides deseados. Cada centroide será un color característico. Es también un algoritmo bastante pesado que requiere bastante esfuerzo de cálculo, así que pasaremos un -1 al parámetro n_jobs para que use todos los colores disponibles.\nAl utilizar el método fit_predict el modelo calculará todos los centroides y dará para cada píxel el centroide más cercano (labels)", "from sklearn.cluster import KMeans\nmodel = KMeans(32, n_jobs=-1)\nlabels = model.fit_predict(iso)\ncolors = model.cluster_centers_", "A continuación representaremos sobre la anterior figura los centroides como puntos en rojo. Como se aprecia perfectamente, hay mayor densidad de centroides donde hay colores más probables.", "fig = plt.figure(3)\nrg = fig.add_subplot(2,2,1)\nrb = fig.add_subplot(2,2,2)\ngb = fig.add_subplot(2,2,3)\n\nrg.plot(iso[::5,0], iso[::5,1], 'b.', markersize=1)\nrg.set_title('Red-Green channel', fontsize=10)\nrb.plot(iso[::5,0], iso[::5,2], 'b.', markersize=1)\nrb.set_title('Red-Blue channel', fontsize=10)\ngb.plot(iso[::5,1], iso[::5,2], 'b.', markersize=1)\ngb.set_title('Green-Blue channel', fontsize=10)\n\nrg.plot(colors[:,0], colors[:,1], 'r.')\nrb.plot(colors[:,0], colors[:,2], 'r.')\ngb.plot(colors[:,1], colors[:,2], 'r.')\n\nfig.tight_layout()", "Finalmente podemos reconstruir la imagen utilizando los valores ajustados al modelo, para ello tenemos que pasar de la representación bidimensional que hemos utilizaro para el modelo a la tridimensional que requiere la imagen.*", "new_image = colors[labels].reshape(china.shape).astype(np.uint8)\nfig = plt.figure(4)\nax = fig.add_subplot(1,1,1)\nax.imshow(new_image)", "Distinguir entre el Iris Virginica y el Iris Versicolor\nVolvemos al Iris, la flor preferida del Machine Learning.", "import pandas as pd\niris = pd.read_csv('data/iris.csv')\niris.head()", "Un problema clásico de predicción es poder distinguir entre la Iris Virginica y la Iris Versicolor. Los datos tomados para cada flor son la longitud y la anchura del sépalo y el pétalo respectivamente. Distinguir la setosa de la virginica y versicolor es sencillo, puesto que la setosa tiene un sépalo claramente más corto y más ancho que las otras dos variedades.", "fig = plt.figure(5)\nax = fig.add_subplot(1,1,1)\nfor s, c in zip(iris.groupby('Name'), ['r', 'w', 'b']):\n s[1].plot.scatter(x='SepalWidth',\n y='SepalLength',\n c=c,\n s=50*s[1]['PetalLength'],\n ax=ax,\n label=s[0])\n \nplt.xlabel('Sepal width')\nplt.ylabel('Sepal length')", "En cambio, no parece que haya una manera obvia de distinguir la versicolor de la virginica por sus propiedades. Los pétalos y los sépalos tienen un aspecto parecido: cuando son largos son anchos y al contrario. Entonces no es trivial entrenar un modelo que prediga, dadas las características de una flor, su variedad.\nLo que sucede es que estamos explorando sólo unas cuantas combinaciones dentro del espacio de posibles medidas. Si proyectamos las medidas como en el caso anterior nos limitamos a combinaciones entre dos de los cuatro parámetros. Esta limitación existe sólo en nuestros cerebros porque tenemos serias dificultades para visualizar espacios con más de tres dimensiones. En el caso de estos datos, el número de dimensiones asciende a 4.\nPero hay algoritmos que permiten resolver este entuerto. La pregunta es la siguiente. ¿Hay algún subespacio de dos dimensiones (una proyección), combinación de las 4 dimensiones, que permita separar las características de la virginica de la setosa? La respuesta, si es afirmativa, se puede encontrar con la descomposición en componentes principales (Principal Component Analysis o PCA).\nPara Scikit-Learn, PCA es un algoritmo de descomposición. Le cargamos las medidas como una matriz de 4 filas y una columna por cada medida", "from sklearn.decomposition import PCA\ndata = np.vstack((iris.SepalLength.as_matrix(),\n iris.SepalWidth.as_matrix(),\n iris.PetalLength.as_matrix(),\n iris.PetalWidth.as_matrix())).T\n\npca = PCA(n_components=2)\nX_r = pca.fit(data).transform(data)\nprint('Components', pca.components_)\nprint('Explained variance', pca.explained_variance_ratio_)", "Lo que obtenemos es que las dos medidas que separan bien la virginica de la versicolor son\n$$ m_1 = 0.36 s_l + -0.08 s_w + 0.86 p_l + 0.36 p_w $$\n$$ m_2 = -0.66 s_l + -0.73 s_w + 0.18 p_l + 0.07 p_w $$\ndonde $s_l$ y $s_w$ son la longitud y la anchura del sépalo y $p_l$ y $p_w$ son la longitud y la anchura del pétalo respectivamente.\nRepresentando todas las mediciones utilizando estas dos nuevas variables obtenemos que sí es posible (aproximadamente) separar la virginica de la versicolor.", "fig = plt.figure(6)\nax = fig.add_subplot(1,1,1)\nprojected = pd.DataFrame(\n {'Axis1': X_r[:,0],\n 'Axis2': X_r[:,1],\n 'Name': iris.Name.as_matrix()\n }\n)\n\nfor (group, data), c in zip(projected.groupby('Name'), 'rwb'):\n plt.scatter(data.Axis1, data.Axis2, c=c, label=group)\n\n \nax.set_xlabel(r'$m_1$', fontsize=18)\nax.set_ylabel(r'$m_2$', fontsize=18)\nplt.legend()\nplt.title('PCA of IRIS dataset')\n", "En estas nuevas medidas derivadas, la combinación de $m_1$ y $m_2$ de la virginica es proporcionalmente mayor que la versicolor. En este nuevo subespacio la setosa es aún más fácil de distinguir, especialmente tomando la medida $m_1$.\nPodemos también volver a utilizar el algoritmo KMeans par clasificar automáticamente las tres variedades.", "data = np.vstack((projected.Axis1.as_matrix(),\n projected.Axis2.as_matrix())).T\n\nmodel = KMeans(3, n_jobs=-1)\n\nlabels = model.fit_predict(data)\nlabel_name_map = {\n 1: 'Iris-setosa',\n 2: 'Iris-versicolor',\n 0: 'Iris-virginica'\n } \nprojected['Label'] = [label_name_map[l] for l in labels]\n\nfig = plt.figure(7)\nax = fig.add_subplot(1,1,1)\n\nright = 0\nwrong = 0\nfor i, (ax1, ax2, name, label) in projected.iterrows():\n if name != label:\n ax.scatter(ax1, ax2, color='r')\n wrong += 1\n elif name == label:\n ax.scatter(ax1, ax2, color='b')\n right += 1\n \nprint('Accuracy', right/(wrong+right))\n \nplt.title('Clustering error')", "En rojo se ven los errores de clasificación de las tres variedades que comete el KMeans, con un porcentaje de acierto de casi el 90%. No es desastroso, pero es claramente mejorable." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Rauf-Kurbanov/au_dl_course
seminar_1/homework_task1.ipynb
gpl-3.0
[ "import tensorflow as tf\nimport numpy as np\nimport math", "Hint: Use dtype=tf.float64 if you want to have same precision as numpy for testing<br>\nHint: You migth wanna use tf.InterativeSession for convenience\n1a: Create two random 0-d tensors x and y of any distribution. <br>\nCreate a TensorFlow object that returns x + y if x > y, and x - y otherwise. <br>\nHint: look up tf.cond() <br>\nI do the first problem for you <br>", "def task_1a_np(x, y):\n return np.where(x > y, x + y, x - y)\n\nX = tf.placeholder(tf.float64)\nY = tf.placeholder(tf.float64)\nout = tf.cond(tf.greater(X, Y), lambda: tf.add(X, Y), lambda: tf.subtract(X, Y))\n\nwith tf.Session() as sess:\n for xx, yy in np.random.uniform(size=(50, 2)):\n actual = sess.run(out, feed_dict={X:xx, Y:yy})\n expected = task_1a_np(xx, yy)\n if actual != expected:\n print('Fail')\n # something something\n else:\n print('Success')", "1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).<br>\nReturn x + y if x < y, x - y if x > y, 0 otherwise.<br>\nHint: Look up tf.case().<br>", "def task_1b_np(x, y):\n return np.select(condlist=[x < y, x > y],\n choicelist=[x + y, x - y],\n default=0)", "1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]] <br>\nand y as a tensor of zeros with the same shape as x. <br>\nReturn a boolean tensor that yields Trues if x equals y element-wise. <br>\nHint: Look up tf.equal(). <br>", "def task_1c_np():\n x = np.array([[0, -2, -1], [0, 1, 2]])\n y = np.zeros_like(x)\n return x == y", "1d:<br>\nGet the indices of elements in x whose values are greater than 30.<br>\nHint: Use tf.where().<br>\nThen extract elements whose values are greater than 30.<br>\nHint: Use tf.gather().<br>", "def task_1d_np(x):\n return x[x > 30].reshape(-1, 1)", "1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,<br>\n2, ..., 6<br>\nHint: Use tf.range() and tf.diag().<br>", "def task_1e_np():\n return np.diag(np.arange(1, 7))", "1f: Create a random 2-d tensor of size 10 x 10 from any distribution.<br>\nCalculate its determinant.<br>\nHint: Look at tf.matrix_determinant().<br>", "def task_1f_np(x):\n return np.linalg.det(x)", "1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].<br>\nReturn the unique elements in x<br>\nHint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.<br>", "def task_1g_np():\n x = [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9]\n _, idx = np.unique(x, return_index=True)\n return np.take(x, sorted(idx))", "1h: Create two tensors x and y of shape 300 from any normal distribution,<br>\nas long as they are from the same distribution.<br>\nUse tf.cond() to return:<br>\n- The mean squared error of (x - y) if the average of all elements in (x - y)<br>\n is negative, or<br>\n- The sum of absolute value of all elements in the tensor (x - y) otherwise.<br>\nHint: see the Huber loss function in the lecture slides 3.<br>", "def task_1h_np(x, y):\n average = np.mean(x - y)\n mse = np.mean((x - y) ** 2)\n asum = np.sum(np.abs(x - y))\n return mse if average < 0 else asum" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
folivetti/BIGDATA
Spark/Lab04.ipynb
mit
[ "Lab 5b - k-Means para Quantização de Atributos\nOs algoritmos de agrupamento de dados, além de serem utilizados em análise exploratória para extrair padrões de similaridade entre os objetos, pode ser utilizado para compactar o espaço de dados.\nNeste notebook vamos utilizar nossa base de dados de Sentiment Movie Reviews para os experimentos. Primeiro iremos utilizar a técnica word2vec que aprende uma transformação dos tokens de uma base em um vetor de atributos. Em seguida, utilizaremos o algoritmo k-Means para compactar a informação desses atributos e projetar cada objeto em um espaço de atributos de tamanho fixo.\nAs células-exercícios iniciam com o comentário # EXERCICIO e os códigos a serem completados estão marcados pelos comentários &lt;COMPLETAR&gt;.\n Nesse notebook: \nParte 1: Word2Vec\nParte 2: k-Means para quantizar os atributos\nParte 3: Aplicando um k-NN\nParte 0: Preliminares\nPara este notebook utilizaremos a base de dados Movie Reviews que será utilizada para o segundo projeto.\nA base de dados tem os campos separados por '\\t' e o seguinte formato:\n\"id da frase\",\"id da sentença\",\"Frase\",\"Sentimento\"\nPara esse laboratório utilizaremos apenas o campo \"Frase\".", "import os\nimport numpy as np\n\ndef parseRDD(point):\n \"\"\" Parser for the current dataset. It receives a data point and return\n a sentence (third field).\n Args:\n point (str): input data point\n Returns:\n str: a string\n \"\"\" \n data = point.split('\\t')\n return (int(data[0]),data[2])\n\ndef notempty(point):\n \"\"\" Returns whether the point string is not empty\n Args:\n point (str): input string\n Returns:\n bool: True if it is not empty\n \"\"\" \n return len(point[1])>0\n\nfilename = os.path.join(\"Data\",\"MovieReviews2.tsv\")\nrawRDD = sc.textFile(filename,100)\nheader = rawRDD.take(1)[0]\n\ndataRDD = (rawRDD\n #.sample(False, 0.1, seed=42)\n .filter(lambda x: x!=header)\n .map(parseRDD)\n .filter(notempty)\n #.sample( False, 0.1, 42 )\n )\n\nprint ('Read {} lines'.format(dataRDD.count()))\nprint ('Sample line: {}'.format(dataRDD.takeSample(False, 1)[0]))", "Parte 1: Word2Vec\nA técnica word2vec aprende através de uma rede neural semântica uma representação vetorial de cada token em um corpus de tal forma que palavras semanticamente similares sejam similares na representação vetorial.\nO PySpark contém uma implementação dessa técnica, para aplicá-la basta passar um RDD em que cada objeto representa um documento e cada documento é representado por uma lista de tokens na ordem em que aparecem originalmente no corpus. Após o processo de treinamento, podemos transformar um token utilizando o método transform para transformar cada token em uma representaçã vetorial.\nNesse ponto, cada objeto de nossa base será representada por uma matriz de tamanho variável.\n(1a) Gerando RDD de tokens\nUtilize a função de tokenização tokenize do Lab4d para gerar uma RDD wordsRDD contendo listas de tokens da nossa base original.", "# EXERCICIO\nimport re\n\nsplit_regex = r'\\W+'\n\nstopfile = os.path.join(\"Data\",\"stopwords.txt\")\nstopwords = set(sc.textFile(stopfile).collect())\n\ndef tokenize(string):\n \"\"\" An implementation of input string tokenization that excludes stopwords\n Args:\n string (str): input string\n Returns:\n list: a list of tokens without stopwords\n \"\"\"\n return <COMPLETAR>\n\nwordsRDD = dataRDD.map(lambda x: tokenize(x[1]))\n\nprint (wordsRDD.take(1)[0])\n\n# TEST Tokenize a String (1a)\nassert wordsRDD.take(1)[0]==[u'quiet', u'introspective', u'entertaining', u'independent', u'worth', u'seeking'], 'lista incorreta!'", "(1b) Aplicando transformação word2vec\nCrie um modelo word2vec aplicando o método fit na RDD criada no exercício anterior.\nPara aplicar esse método deve ser fazer um pipeline de métodos, primeiro executando Word2Vec(), em seguida aplicando o método setVectorSize() com o tamanho que queremos para nosso vetor (utilize tamanho 5), seguido de setSeed() para a semente aleatória, em caso de experimentos controlados (utilizaremos 42) e, finalmente, fit() com nossa wordsRDD como parâmetro.", "# EXERCICIO\nfrom pyspark.mllib.feature import Word2Vec\n\nmodel = Word2Vec().<COMPLETAR>\n\nprint (model.transform(u'entertaining'))\nprint (list(model.findSynonyms(u'entertaining', 2)))\n\ndist = np.abs(model.transform(u'entertaining')-np.array([0.0136831374839,0.00371457682922,-0.135785803199,0.047585401684,0.0414853096008])).mean()\nassert dist<1e-6, 'valores incorretos'\nassert list(model.findSynonyms(u'entertaining', 1))[0][0] == 'god', 'valores incorretos'", "(1c) Gerando uma RDD de matrizes\nComo primeiro passo, precisamos gerar um dicionário em que a chave são as palavras e o valor é o vetor representativo dessa palavra.\nPara isso vamos primeiro gerar uma lista uniqueWords contendo as palavras únicas do RDD words, removendo aquelas que aparecem menos do que 5 vezes $^1$. Em seguida, criaremos um dicionário w2v que a chave é um token e o valor é um np.array do vetor transformado daquele token$^2$.\nFinalmente, vamos criar uma RDD chamada vectorsRDD em que cada registro é representado por uma matriz onde cada linha representa uma palavra transformada.\n1\nNa versão 1.3 do PySpark o modelo Word2Vec utiliza apenas os tokens que aparecem mais do que 5 vezes no corpus, na versão 1.4 isso é parametrizado.\n2\nNa versão 1.4 do PySpark isso pode ser feito utilizando o método `getVectors()", "# EXERCICIO\nuniqueWords = (wordsRDD\n .<COMPLETAR>\n .<COMPLETAR>\n .<COMPLETAR>\n .<COMPLETAR>\n .collect()\n )\n\nprint ('{} tokens únicos'.format(len(uniqueWords)))\n\nw2v = {}\nfor w in uniqueWords:\n w2v[w] = <COMPLETAR>\nw2vb = sc.broadcast(w2v) # acesse como w2vb.value[w] \nprint ('Vetor entertaining: {}'.format( w2v[u'entertaining']))\n\nvectorsRDD = (wordsRDD\n .<COMPLETAR>\n )\nrecs = vectorsRDD.take(2)\nfirstRec, secondRec = recs[0], recs[1]\nprint (firstRec.shape, secondRec.shape)\n\n# TEST Tokenizing the small datasets (1c)\nassert len(uniqueWords) == 3388, 'valor incorreto'\nassert np.mean(np.abs(w2v[u'entertaining']-[0.0136831374839,0.00371457682922,-0.135785803199,0.047585401684,0.0414853096008]))<1e-6,'valor incorreto'\nassert secondRec.shape == (10,5)", "Parte 2: k-Means para quantizar os atributos\nNesse momento é fácil perceber que não podemos aplicar nossas técnicas de aprendizado supervisionado nessa base de dados:\n\n\nA regressão logística requer um vetor de tamanho fixo representando cada objeto\n\n\nO k-NN necessita uma forma clara de comparação entre dois objetos, que métrica de similaridade devemos aplicar?\n\n\nPara resolver essa situação, vamos executar uma nova transformação em nossa RDD. Primeiro vamos aproveitar o fato de que dois tokens com significado similar são mapeados em vetores similares, para agrupá-los em um atributo único.\nAo aplicarmos o k-Means nesse conjunto de vetores, podemos criar $k$ pontos representativos e, para cada documento, gerar um histograma de contagem de tokens nos clusters gerados.\n(2a) Agrupando os vetores e criando centros representativos\nComo primeiro passo vamos gerar um RDD com os valores do dicionário w2v. Em seguida, aplicaremos o algoritmo k-Means com $k = 200$ e $seed = 42$.", "# EXERCICIO\nfrom pyspark.mllib.clustering import KMeans\n\nvectors2RDD = sc.parallelize(np.array(list(w2v.values())),1)\nprint ('Sample vector: {}'.format(vectors2RDD.take(1)))\n\nmodelK = KMeans.<COMPLETAR>\n\nclustersRDD = vectors2RDD.<COMPLETAR>\nprint ('10 first clusters allocation: {}'.format(clustersRDD.take(10)))\n\n# TEST Amazon record with the most tokens (1d)\nassert clustersRDD.take(10)==[142, 83, 42, 0, 87, 52, 190, 17, 56, 0], 'valor incorreto'", "(2b) Transformando matriz de dados em vetores quantizados\nO próximo passo consiste em transformar nosso RDD de frases em um RDD de pares (id, vetor quantizado). Para isso vamos criar uma função quantizador que receberá como parâmetros o objeto, o modelo de k-means, o valor de k e o dicionário word2vec.\nPara cada ponto, vamos separar o id e aplicar a função tokenize na string. Em seguida, transformamos a lista de tokens em uma matriz word2vec. Finalmente, aplicamos cada vetor dessa matriz no modelo de k-Means, gerando um vetor de tamanho $k$ em que cada posição $i$ indica quantos tokens pertencem ao cluster $i$.", "# EXERCICIO\ndef quantizador(point, model, k, w2v):\n key = <COMPLETAR>\n words = <COMPLETAR>\n matrix = np.array( <COMPLETAR> )\n features = np.zeros(k)\n for v in matrix:\n c = <COMPLETAR>\n features[c] += 1\n return (key, features)\n \nquantRDD = dataRDD.map(lambda x: quantizador(x, modelK, 500, w2v))\n\nprint quantRDD.take(1)\n\n# TEST Implement a TF function (2a)\nassert quantRDD.take(1)[0][1].sum() == 5, 'valores incorretos'" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
vadim-ivlev/STUDY
handson-data-science-python/DataScience-Python3/.ipynb_checkpoints/histograms.ipynb-checkpoint.ipynb
mit
[ "New to Plotly?\nPlotly's Python library is free and open source! Get started by downloading the client and reading the primer.\n<br>You can set up Plotly to work in online or offline mode, or in jupyter notebooks.\n<br>We also have a quick-reference cheatsheet (new!) to help you get started!\nVersion Check\nNote: Histograms are available in version <b>1.9.12+</b><br>\nRun pip install plotly --upgrade to update your Plotly version", "import plotly\nplotly.__version__", "Basic Histogram", "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\nx = np.random.randn(500)\n\ndata = [\n go.Histogram(\n x=x\n )\n]\npy.iplot(data)", "Normalized Histogram", "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\nx = np.random.randn(500)\n\ndata = [\n go.Histogram(\n x=x,\n histnorm='probability'\n )\n]\npy.iplot(data)", "Horizontal Histogram", "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\ny = np.random.randn(500)\n\ndata = [\n go.Histogram(\n y=y\n )\n]\npy.iplot(data)\n", "Overlaid Histgram", "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\nx0 = np.random.randn(500)\nx1 = np.random.randn(500)+1\n\ntrace1 = go.Histogram(\n x=x0,\n opacity=0.75\n)\ntrace2 = go.Histogram(\n x=x1,\n opacity=0.75\n)\ndata = [trace1, trace2]\nlayout = go.Layout(\n barmode='overlay'\n)\nfig = go.Figure(data=data, layout=layout)\npy.iplot(fig)\n", "Stacked Histograms ###", "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\nx0 = np.random.randn(500)\nx1 = np.random.randn(500)+1\n\ntrace1 = go.Histogram(\n x=x0\n)\ntrace2 = go.Histogram(\n x=x1\n)\ndata = [trace1, trace2]\nlayout = go.Layout(\n barmode='stack'\n)\nfig = go.Figure(data=data, layout=layout)\npy.iplot(fig)\n", "Colored and Styled Histograms", "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\nx0 = np.random.randn(500)\nx1 = np.random.randn(500)+1\n\ntrace1 = go.Histogram(\n x=x0,\n histnorm='count',\n name='control',\n autobinx=False,\n xbins=dict(\n start=-3.2,\n end=2.8,\n size=0.2\n ),\n marker=dict(\n color='fuchsia',\n line=dict(\n color='grey',\n width=0\n )\n ),\n opacity=0.75\n)\ntrace2 = go.Histogram(\n x=x1,\n name='experimental',\n autobinx=False,\n xbins=dict(\n start=-1.8,\n end=4.2,\n size=0.2\n ),\n marker=dict(\n color='rgb(255, 217, 102)'\n ),\n opacity=0.75\n)\ndata = [trace1, trace2]\nlayout = go.Layout(\n title='Sampled Results',\n xaxis=dict(\n title='Value'\n ),\n yaxis=dict(\n title='Count'\n ),\n barmode='overlay',\n bargap=0.25,\n bargroupgap=0.3\n)\nfig = go.Figure(data=data, layout=layout)\npy.iplot(fig)\n", "Reference\nSee https://plot.ly/python/reference/#histogram for more information and chart attribute options!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
griffinfoster/fundamentals_of_interferometry
2_Mathematical_Groundwork/2_y_exercises.ipynb
gpl-2.0
[ "Outline\nGlossary\n2. Mathematical Groundwork\nPrevious: 2.x Further reading and references\nNext: 3. Positional Astronomy\n\n\n\n\nImport standard modules:", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom IPython.display import HTML \n#HTML('../style/course.css') #apply general CSS\nHTML('../style/code_toggle.html')", "Import section specific modules:", "pass", "2.y. Exercises<a id='math:sec:exercises'></a><!--\\label{math:sec:exercises}-->\nWe provide a small set of exercises suitable for an interferometry course.\n2.y.1. Fourier transforms and convolution: Fourier transform of the triangle function<a id='math:sec:exercises_fourier_triangle'></a><!--\\label{math:sec:exercises_fourier_triangle}-->\nConsider the triangle function given below.", "def plotviewgraph(fig, ax, xmin = 0, xmax = 1., ymin = 0., ymax = 1.):\n \"\"\"\n Prepare a viewvgraph for plotting a function\n \n Parameters:\n fig: Matplotlib figure\n ax: Matplotlib subplot\n xmin (float): Minimum of range\n xmax (float): Maximum of range\n ymin (float): Minimum of function\n ymax (float): Maximum of function\n\n return: axis and vertical and horizontal tick length\n \"\"\"\n \n # Axis ranges\n ax.axis([xmin-0.1*(xmax-xmin), xmax+0.1*(xmax-xmin), -0.2*(ymax-ymin), ymax])\n ax.axis('off')\n\n # get width and height of axes object to compute, see https://3diagramsperpage.wordpress.com/2014/05/25/arrowheads-for-axis-in-matplotlib/\n\n # matching arrowhead length and width\n dps = fig.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(dps)\n width, height = bbox.width, bbox.height\n \n # manual arrowhead width and length\n hw = 1./15.*(ymax-ymin) \n hl = 1./30.*(xmax-xmin)\n lw = 1. # axis line width\n ohg = 0.3 # arrow overhang\n \n # compute matching arrowhead length and width\n yhw = hw/(ymax-ymin)*(xmax-xmin)* height/width \n yhl = hl/(xmax-xmin)*(ymax-ymin)* width/height\n\n # Draw arrows\n ax.arrow(xmin-0.1*(xmax-xmin),0, 1.2*(xmax-xmin),0, fc='k', ec='k', lw = lw, \n head_width=hw, head_length=hl, overhang = ohg, \n length_includes_head= True, clip_on = False)\n ax.arrow(0,ymin-0.1*(ymax-ymin), 0., 1.4*(ymax-ymin), fc='k', ec='k', lw = lw, \n head_width=yhw, head_length=yhl, overhang = ohg, \n length_includes_head= True, clip_on = False)\n \n # Draw ticks for A, -A, and B\n twv = 0.01*height # vertical tick width\n twh = twv*(xmax-xmin)/(ymax-ymin)/ width*height\n \n return twv, twh\n\ndef plottriangle():\n \n A = 1.\n B = 1.\n \n # Start the plot, create a figure instance and a subplot\n fig = plt.figure(figsize=(20,5))\n ax = fig.add_subplot(111)\n \n twv, twh = plotviewgraph(fig, ax, xmin = -A, xmax = A, ymin = 0., ymax = B)\n \n ticx = [[-A,'-A'],[A,'A']]\n \n for tupel in ticx:\n ax.plot([tupel[0],tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0], 0.-twh, tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n \n ticy = [[B,'B']]\n for tupel in ticy:\n ax.plot([-twh, twh], [tupel[0], tupel[0]], 'k-')\n ax.text(0.+twv, tupel[0], tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'bottom', color = 'black')\n\n \n # Plot the function\n ax.plot([-A,0.,A],[0., B, 0.], 'r-', lw = 2)\n\n # Annotate axes\n ax.text(0.-twh, 1.2*(B), r'$f(x)$', fontsize = 24, horizontalalignment = 'right', verticalalignment = 'bottom', color = 'black')\n ax.text(1.2*B, 0., r'$x$', fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n\n \n # Show amplitude\n# plt.annotate(s='', xy=(mu+2*sigma,0.), xytext=(mu+2*sigma,a), \\\n# arrowprops=dict(color = 'magenta', arrowstyle='<->'))\n# ax.text(mu+2*sigma+sigma/10., a/2, '$a$', fontsize = 12, horizontalalignment = 'left', \\\n# verticalalignment = 'center', color = 'magenta')\n\n \nplottriangle()\n# <a id='math:fig:triangle'></a><!--\\label{math:fig:triangle}-->", "Figure 2.y.1: Triangle function with width $2A$ and amplitude $B$.<a id='math:fig:triangle'></a><!--\\label{math:fig:triangle}-->\n<b>Assignments:</b>\n<ol type=\"A\">\n <li>What can you tell about the complex part of the Fourier transform of $f$ using the symmetry of the function?</li>\n <li>Write down the function $f$ in two ways, once as a piece-wise defined function, once as a convolution of the rectangle function with itself.</li>\n <li>Calculate the Fourier transform, making use of expressing f as a convolution of a boxcar function with itself and using the convolution theorem.</li>\n</ol>\n\n2.y.1.1 Fourier transform of the triangle function: example answer to assignment 1.<a id='math:sec:exercises_fourier_triangle_a'></a><!--\\label{math:sec:exercises_fourier_triangle_a}-->\n<b>What can you tell about the complex part and the symmetry of the Fourier transform of $f$ using the symmetry of the function?</b>\nThe function is real-valued ($f^(x)\\,=\\,f(x)$) and even ($f(x)\\,=\\,f(-x)$), so it is Hermetian ($f^(x)\\,=\\,f(-x)$, see definition here &#10142; <!--\\ref{math:sec:fourier_transforms_of_real_valued_and_hermetian_functions}-->). According to Sect. 2.4.6 &#10142;<!--\\ref{math:sec:fourier_transforms_of_real_valued_and_hermetian_functions}-->, this means that the Fourier transform is a <b>real-valued</b> function (because it is the Fourier transform of a Hermetian function) and also Hermetian (because it is the Fourier transform of a real-valued function). Hence it is also <b>even</b> ($f^(x)\\,=\\,f(x) \\,\\land\\, f^(x)\\,=\\,f(-x)\\,\\Rightarrow\\,f(x)\\,=\\,f(-x)$). Real-valued means that the complex part of $f$ is $0$.\n2.y.1.2 Fourier transform of the triangle function: example answer to assignment 2.<a id='math:sec:exercises_fourier_triangle_b'></a><!--\\label{math:sec:exercises_fourier_triangle_b}-->\n<b>Write down the function $f$ in two ways, once as a piece-wise defined function, once as a convolution of the rectangle function with itself.</b>\nPart one is straightforward:\n<a id='math:eq:y_001'></a><!--\\label{math:eq:y_001}-->$$\n\\begin{align}\nf(x) &= \\left {\n \\begin{array}{lll}\n B-\\frac{B}{A}|x| & {\\rm for} & |x| \\leq A\\\n 0 & {\\rm for} & |x| > A\n\\end{array}\\right .\n\\end{align}\n$$\nThe solution to part two, using the definition as given in Sect. 2.4.6 &#10142;<!--\\ref{math:sec:boxcar_and_rectangle_function}-->\n<a id='math:eq:y_002'></a><!--\\label{math:eq:y_002}-->\n$$\n\\begin{align}\n f(x) \\,&=\\,\\frac{B}{A}\\cdot \\Pi_{-\\frac{A}{2},\\frac{A}{2}}\\circ \\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x)\\\n&=\\,\\frac{B}{A}\\cdot\\Pi_A\\circ \\Pi_A\\,\\,\\, {\\rm , where} \\,\\,\\,\\Pi_A(x) \\,=\\,\\Pi(\\frac{x}{A})\\\n\\end{align}\n$$\nrequires a little calculation, but is straightforward. Using the definition of the boxcar function &#10142; <!--\\ref{math:sec:boxcar_and_rectangle_function}--> and the definition of the convolution &#10142; <!--\\ref{math:sec:definition_of_the_convolution}-->, one can see:\n<a id='math:eq:y_003'></a><!--\\label{math:eq:y_003}-->\n$$\n\\begin{align}\n\\Pi_{-\\frac{A}{2},\\frac{A}{2}}\\circ \\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x)\\,& =\\, \\int_{-\\infty}^{\\infty}\\Pi_{-\\frac{A}{2},\\frac{A}{2}}(t)\\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x-t)\\,dt\\\n& =\\, \\int_{-\\frac{A}{2}}^{\\frac{A}{2}}\\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x-t)\\,dt\\\n& \\underset{u\\,=\\,x-t}{=} \\, \\int_{u(-\\frac{A}{2})}^{u(\\frac{A}{2})}\\Pi_{-\\frac{A}{2},\\frac{A}{2}}(u)\\frac{dx}{du}\\,du\\\n& =\\, \\int_{x+\\frac{A}{2}}^{x-\\frac{A}{2}}\\Pi_{-\\frac{A}{2},\\frac{A}{2}}(u)\\cdot(-1)du\\\n& =\\, \\int_{x-\\frac{A}{2}}^{x+\\frac{A}{2}}\\Pi_{-\\frac{A}{2},\\frac{A}{2}}(u)du\\\n\\end{align}\n$$\nand, accordingly\n<a id='math:eq:y_004'></a><!--\\label{math:eq:y_004}-->\n\\begin{align}\n|x| \\,>\\, A \\,&\\Rightarrow\\,\\Pi_{-\\frac{A}{2},\\frac{A}{2}}\\circ \\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x)\\, =\\, 0\\\n-A\\,\\leq\\,x\\,\\leq 0\\,&\\Rightarrow \\,\\Pi_{-\\frac{A}{2},\\frac{A}{2}}\\circ \\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x)\\,=\\,\\int_{-\\frac{A}{2}}^{x+\\frac{A}{2}}du\\,=\\,A+x\\\n0\\,\\leq\\,x\\,\\leq A\\,&\\Rightarrow \\,\\Pi_{\\frac{A}{2},\\frac{A}{2}}\\circ \\Pi_{-\\frac{A}{2},\\frac{A}{2}}(x)\\,=\\,\\int_{x-\\frac{A}{2}}^{\\frac{A}{2}}du\\,=\\,A-x\\\n\\end{align}\nThis is identical to above piece-wise definition &#10549;.\n2.y.1.3 Fourier transform of the triangle function: example answer to assignment 3.<a id='math:sec:exercises_fourier_triangle_c'></a><!--\\label{math:sec:exercises_fourier_triangle_c}-->\nWe know that (convolution theorem &#10142;<!--\\ref{math:sec:convolution_theorem}-->, similarity theorem &#10142;<!--\\ref{math:sec:similarity_theorem}-->, definition of the triangle function &#10549;<!--\\ref{math:eq:y_002}-->, Fourier transform of the rectangle boxcar function &#10142;<!--\\ref{math:sec:convolution_theorem}-->):\n<a id='math:eq:y_005'></a><!--\\label{math:eq:y_005}-->$$\n\\begin{align}\n\\mathscr{F}{h\\circ g}\\,&=\\,\\mathscr{F}{h}\\cdot\\mathscr{F}{g}\\\ng\\,=\\,h(ax) \\,&\\Rightarrow\\, \\mathscr{F}{g}(s) = \\frac{1}{|a|}\\mathscr{F}{h}(\\frac{s}{a})\\\nf(x) \\,&=\\, \\frac{B}{A}\\Pi_A\\circ\\Pi_A(x)\\\n\\Pi_A(x)\\,&=\\,\\Pi(\\frac{x}{A})\\\n\\mathscr{F}{\\Pi}(s) \\,&=\\,{\\rm sinc}(s) \\\n\\end{align}\n$$\nThis makes our calculations a lot shorter.\n<a id='math:eq:y_006'></a><!--\\label{math:eq:y_006}-->$$\n\\begin{align}\n\\mathscr{F}{f}(s)\\,&=\\,\\mathscr{F}{\\frac{B}{A}\\Pi_A\\circ\\Pi_A}(s)\\\n&=\\,\\frac{B}{A}\\mathscr{F}{\\Pi_A}(s)\\cdot\\mathscr{F}{\\Pi_A}(s)\\\n&=\\,\\frac{B}{A}\\mathscr{F}{A\\Pi}(As)\\cdot\\mathscr{F}{A\\Pi_A}(As)\\\n&=\\,AB\\,\\mathscr{F}{\\Pi}(As)\\cdot\\mathscr{F}{\\Pi}(As)\\\n&=\\,AB\\,{\\rm sinc}(As)\\cdot{\\rm sinc}(As)\\\n&=\\,AB\\,{\\rm sinc}^2(As)\\\n&=\\,AB\\,\\frac{sin^2 A\\pi s}{A^2\\pi^2 s^2}\\\n\\end{align}$$\nSo the solution looks like this:", "def plotfftriangle():\n \n A = 1.\n B = 1.\n \n # Start the plot, create a figure instance and a subplot\n fig = plt.figure(figsize=(20,5))\n ax = fig.add_subplot(111)\n\n \n twv, twh = plotviewgraph(fig, ax, xmin = -3./A, xmax = 3./A, ymin = -0.3, ymax = B) \n ticx = [[-A,r'$-\\frac{1}{A}$'],[A,'A']]\n \n ticx = [[-3.*A, r'$\\frac{-3}{A}$'], [-2.*A, r'$\\frac{-2}{A}$'], [-1./A, r'$\\frac{-1}{A}$'], [1./A, r'$\\frac{1}{A}$'], [2./A, r'$\\frac{2}{A}$'], [3./A, r'$\\frac{3}{A}$']]\n for tupel in ticx:\n ax.plot([tupel[0],tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0], 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'center', verticalalignment = 'top', color = 'black')\n \n ticx = [[0.,r'$0$']]\n for tupel in ticx:\n ax.plot([tupel[0],tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0]+twh, 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n\n \n ticy = [[B,r'$\\frac{B}{A}$']]\n for tupel in ticy:\n ax.plot([-twh, twh], [tupel[0], tupel[0]], 'k-')\n ax.text(0.+twv, tupel[0], tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'bottom', color = 'black')\n\n # Plot the function\n x = np.linspace(-4.*A, 4.*A, 900)\n y = np.power(np.sinc(x),2)\n\n # Annotate axes\n ax.text(0.-A/20, 1.2*(B), r'$f(x)$', fontsize = 24, horizontalalignment = 'right', verticalalignment = 'bottom', color = 'black')\n ax.text(1.2*3.*A, 0., r'$x$', fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n \n ax.plot(x, y, 'r-', lw = 2)\n \nplotfftriangle()\n# <a id='math:fig:fftriangle'></a><!--\\label{math:fig:fftriangle}-->", "Figure 2.y.2: Triangle function with width $2A$ and amplitude $B$.<a id='math:fig:ft_of_triangle'></a><!--\\label{math:fig:ft_of_triangle}-->\n2.y.2. Fourier transforms and convolution: Convolution of two functions with finite support<a id='math:sec:exercises_convolution_of_two_functions_with_finite_support'></a><!--\\label{math:sec:exercises_convolution_of_two_functions_with_finite_support}-->\nConsider the two functions given below:", "def plotrectntria():\n \n A = 1.\n B = 1.4\n \n # Start the plot, create a figure instance and a subplot\n fig = plt.figure(figsize=(20,5))\n ax = fig.add_subplot(121)\n \n twv, twh = plotviewgraph(fig, ax, xmin = 0., xmax = 3.*A, ymin = 0., ymax = 3.) \n\n ticx = [[1.*A, r'$A$'], [2.*A, r'$2A$'], [3.*A, r'$3A$']]\n for tupel in ticx:\n ax.plot([tupel[0],tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0], 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'center', verticalalignment = 'top', color = 'black')\n \n ticx = [[0.,r'$0$']]\n for tupel in ticx:\n ax.plot([-tupel[0],-tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0]+twh, 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n\n \n ticy = [[1,r'$1$'], [2.,r'$2$'], [3.,r'$3$']]\n for tupel in ticy:\n ax.plot([-twh, twh], [tupel[0], tupel[0]], 'k-')\n ax.text(0.-twv, tupel[0], tupel[1], fontsize = 24, horizontalalignment = 'right', verticalalignment = 'center', color = 'black')\n\n ticy = [[B, r'$B$']]\n for tupel in ticy:\n ax.plot([-twh, twh], [tupel[0], tupel[0]], 'k-')\n ax.text(0.+twv, tupel[0], tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'bottom', color = 'black')\n\n # Plot the function\n x = [A, A, 2*A, 2*A]\n y = [0., B, B, 0.]\n ax.plot(x, y, 'r-', lw = 2)\n\n x = [0., A]\n y = [B, B]\n ax.plot(x, y, 'k--', lw = 1)\n\n # Annotate axes\n ax.text(0.-3.*twh, 1.2*3., r'$g(x)$', fontsize = 24, horizontalalignment = 'right', verticalalignment = 'bottom', color = 'black')\n ax.text(1.1*3.*A, 0., r'$x$', fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n \n ###################\n \n ax = fig.add_subplot(122)\n\n twv, twh = plotviewgraph(fig, ax, xmin = 0., xmax = 3.*A, ymin = 0., ymax = 3.) \n\n ticx = [[1.*A, r'$A$'], [2.*A, r'$2A$'], [3.*A, r'$3A$']]\n for tupel in ticx:\n ax.plot([tupel[0],tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0], 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'center', verticalalignment = 'top', color = 'black')\n \n ticx = [[0.,r'$0$']]\n for tupel in ticx:\n ax.plot([-tupel[0],-tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0]+twh, 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n\n \n ticy = [[1,r'$1$'], [2.,r'$2$'], [3.,r'$3$']]\n for tupel in ticy:\n ax.plot([-twh, twh], [tupel[0], tupel[0]], 'k-')\n ax.text(0.-twv, tupel[0], tupel[1], fontsize = 24, horizontalalignment = 'right', verticalalignment = 'center', color = 'black')\n\n\n # Plot the function\n x = [A, A, 2*A, 3*A, 3*A]\n y = [0., 1., 3., 1., 0.]\n ax.plot(x, y, 'r-', lw = 2)\n\n x = [0., A]\n y = [1., 1.]\n ax.plot(x, y, 'k--', lw = 1)\n\n x = [0., 2*A]\n y = [3., 3.]\n ax.plot(x, y, 'k--', lw = 1)\n\n # Annotate axes\n ax.text(0.-3.*twh, 1.2*3., r'$f(x)$', fontsize = 24, horizontalalignment = 'right', verticalalignment = 'bottom', color = 'black')\n ax.text(1.1*3.*A, 0., r'$x$', fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n\nplotrectntria()\n# <a id='math:fig:two_fs_with_finite_support'></a><!--\\label{math:fig:two_fs_with_finite_support}-->", "Figure 2.y.3: Triangle function with width $2A$ and amplitude $B$.<a id='math:fig:two_fs_with_finite_support'></a><!--\\label{math:fig:two_fs_with_finite_support}-->\n<b>Assignments:</b>\n<ol type=\"A\">\n <li>Write down the functions g and h.</li>\n <li>Calculate their convolution.</li>\n</ol>\n\n2.y.2.1 Convolution of two functions with finite support: example answer to assignment 1.<a id='math:sec:exercises_convolution_of_two_functions_with_finite_support_a'></a><!--\\label{math:sec:exercises_convolution_of_two_functions_with_finite_support_a}-->\n<b>Write down the functions g and h.</b>\n<a id='math:eq:y_007'></a><!--\\label{math:eq:y_007}-->$$\n\\begin{align}\nh(x) &= \\left {\n \\begin{array}{lll}\n B & {\\rm for} & A \\leq x \\leq 2A\\\n 0 & {\\rm else}\n\\end{array}\\right .\\\ng(x) &= \\left {\n \\begin{array}{lll}\n g_1(x)\\,=\\,\\frac{2}{A}\\left(x-\\frac{A}{2}\\right) & {\\rm for} & A \\leq x \\leq 2A\\\n g_2(x)\\,=\\,-\\frac{2}{A}\\left(x-\\frac{7A}{2}\\right) & {\\rm for} & 2A \\leq x \\leq 3A\\\n 0 & {\\rm else}\n\\end{array}\\right .\\\n\\end{align}\n$$\n2.y.2.2 Convolution of two functions with finite support: example answer to assignment 2.<a id='math:sec:exercises_convolution_of_two_functions_with_finite_support_b'></a><!--\\label{math:sec:exercises_convolution_of_two_functions_with_finite_support_b}-->\nWe have to evaluate the integral (see definition of the convolution &#10142; <!--\\ref{math:sec:definition_of_the_convolution}-->):\n<a id='math:eq:y_008'></a><!--\\label{math:eq:y_008}-->$$\ng\\circ h(x) \\, = \\, \\int_{-\\infty}^{\\infty}g(x-t)h(t)\\,dt\n$$\nTo do so, we calculate the integral for ranges of $x$, depending on the supports (ranges where the function in non-zero) of $g(x-t)$ and $h(t)$, or $h_1(t)$ and $g_2(t)$ respectively.\nAs an aid, rewrite above functions &#10549;<!--\\ref{math:eq:y_008}-->:\n<a id='math:eq:y_009'></a><!--\\label{math:eq:y_009}-->$$\n\\begin{align}\ng(x-t) &= \\left {\n \\begin{array}{lll}\n B & {\\rm for} & -2A+x \\leq t \\leq -A+x\\\n 0 & {\\rm else}\n\\end{array}\\right .\\\nh(t) &= \\left {\n \\begin{array}{lll}\n h_1(t)\\,=\\,\\frac{2}{A}\\left(t-\\frac{A}{2}\\right) & {\\rm for} & A \\leq t \\leq 2A\\\n h_2(t)\\,=\\,-\\frac{2}{A}\\left(t-\\frac{7A}{2}\\right) & {\\rm for} & 2A \\leq t \\leq 3A\\\n 0 & {\\rm else}\n\\end{array}\\right .\\\n\\end{align}\n$$\nCase 1:\n<a id='math:eq:y_010'></a><!--\\label{math:eq:y_010}-->$$\n\\begin{align}\nx \\,&<\\, 2A\\qquad\\,\\Rightarrow\\\ng\\circ h(x) \\, &= \\, \\int_{-\\infty}^{A}g(x-t)h(t)\\,dt\\\n&=\\, 0\n\\end{align}\n$$\nCase 2:\n<a id='math:eq:y_011'></a><!--\\label{math:eq:y_011}-->$$\n\\begin{align}\n2A \\,&\\leq x \\,<\\, 3A\\qquad\\Rightarrow\\\ng\\circ h(x) \\, &= \\, \\int_{-\\infty}^{\\infty}g(x-t)h(t)\\,dt\\\n&=\\, \\int_{A}^{x-A}B\\,h_1(t)\\,dt\\,\\\n&=\\,\\int_{A}^{x-A}\\frac{2B}{A}\\left(t-\\frac{A}{2}\\right)\\,dt\\\n&=\\,\\frac{B}{A}\\left(x^2-3Ax+2A^2\\right)\\\n\\end{align}$$\nCase 3:\n<a id='math:eq:y_012'></a><!--\\label{math:eq:y_012}-->$$\n\\begin{align}\n3A \\,&\\leq\\, x \\,<\\, 4A\\qquad\\Rightarrow\\\ng\\circ h(x) \\, &=\\, \\int_{x-2A}^{2A}B\\,h_1(t)\\,dt+ \\int_{2A}^{x-A}B\\,h_2(t)\\,dt\\\n&=\\,\\int_{x-2A}^{2A}\\frac{2B}{A}\\left(t-\\frac{A}{2}\\right)\\,dt- \\int_{2A}^{x-A}\\frac{2B}{A}\\left(t-\\frac{7A}{2}\\right)\\,dt\\\n&=\\,\\frac{B}{A}\\left(-2x^2+14Ax-22A^2\\right)\\\n\\end{align}\n$$\nCase 4:\n<a id='math:eq:y_013'></a><!--\\label{math:eq:y_013}-->$$\n\\begin{align}\n4A \\,&\\leq x \\,<\\, 5A\\qquad\\Rightarrow\\\ng\\circ h(x) \\, &=\\, \\int_{x-2A}^{3A}B\\,h_2(t)\\,dt\\,=\\,\\int_{x-2A}^{3A}-\\frac{2B}{A}\\left(t-\\frac{7A}{2}\\right)\\,dt\\\n&=\\,\\frac{B}{A}\\left(x^2-11Ax+30A^2\\right)\\\n\\end{align}\n$$\nCase 5:\n<a id='math:eq:y_014'></a><!--\\label{math:eq:y_014}-->$$\n\\begin{align}\n5A&\\,\\leq\\,x\\qquad\\,\\Rightarrow\\\ng\\circ h(x) \\, &= \\, \\int_{3A}^{\\infty}g(x-t)h(t)\\,dt\\\n&=\\, 0\n\\end{align}\n$$\nSummarising, the convolution of g and h results in the following composite function:\n<a id='math:eq:y_014'></a><!--\\label{math:eq:y_014}-->$$\n\\begin{align}\ng\\circ h(x) \\, &= \n \\frac{B}{A}\\left{\\begin{array}{lll}\n 0 & {\\rm for} & x < 2A \\\n x^2-3Ax+2A^2 & {\\rm for} & 2A \\leq x < 3A\\\n -2x^2+14Ax-22A^2 & {\\rm for} & 3A \\leq x < 4A\\\n x^2-11Ax+30A^2 & {\\rm for} & 4A \\leq x < 5A\\\n 0 & {\\rm for} & 5A \\leq x \\\n\\end{array}\\right .\\\n\\end{align}$$", "def rectntriaconv(A,B,x):\n \n xn = x[x < (2*A)]\n yn = xn*0.\n y = yn\n \n xn = x[(x == 2*A) | (x > 2*A) & (x < 3*A)]\n yn = (B/A)*(np.power(xn,2)-3*A*xn+2*np.power(A,2))\n y = np.append(y,yn)\n \n xn = x[(x == 3*A) | (x > 3*A) & (x < 4*A)]\n yn = (B/A)*((-2*np.power(xn,2))+14*A*xn-22*np.power(A,2))\n y = np.append(y,yn)\n \n xn = x[(x == 4*A) | (x > 4*A) & (x < 5*A)]\n yn = (B/A)*(np.power(xn,2)-11*A*xn+30*np.power(A,2))\n y = np.append(y,yn)\n \n xn = x[(x == 5*A) | (x > 5*A)]\n yn = xn*0.\n y = np.append(y,yn)\n\n return y\n\ndef plotrectntriaconv():\n A = 1.\n B = 1.4\n \n # Start the plot, create a figure instance and a subplot\n fig = plt.figure(figsize=(20,5))\n ax = fig.add_subplot(121)\n \n twv, twh = plotviewgraph(fig, ax, xmin = 0., xmax = 6.*A, ymin = 0., ymax = 2.5*A*B) \n\n ticx = [[1.*A, r'$A$'], [2.*A, r'$2A$'], [3.*A, r'$3A$'], [4.*A, r'$4A$'], [5.*A, r'$5A$'], [6.*A, r'$6A$']]\n for tupel in ticx:\n ax.plot([tupel[0],tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0], 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'center', verticalalignment = 'top', color = 'black')\n \n ticx = [[0.,r'$0$']]\n for tupel in ticx:\n ax.plot([-tupel[0],-tupel[0]],[-twv, twv], 'k-')\n ax.text(tupel[0]+twh, 0.-2.*twh, tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n \n ticy = [[2*A*B, r'$2AB$'], [2.5*A*B, r'$\\frac{5}{2}AB$']]\n for tupel in ticy:\n ax.plot([-twh, twh], [tupel[0], tupel[0]], 'k-')\n ax.text(0.+5*twv, tupel[0], tupel[1], fontsize = 24, horizontalalignment = 'left', verticalalignment = 'bottom', color = 'black')\n\n # Plot the function\n x = np.linspace(0., 7.*A, 900)\n y = rectntriaconv(A,B,x)\n ax.plot(x, y, 'r-', lw = 2)\n\n # Plot a few lines\n x = [0., 4*A]\n y = [2.*A*B, 2.*A*B]\n ax.plot(x, y, 'k--', lw = 1)\n\n x = [0., 3.5*A]\n y = [2.5*A*B, 2.5*A*B]\n ax.plot(x, y, 'k--', lw = 1)\n\n x = [3.*A, 3.*A]\n y = [0., 2.*A*B]\n ax.plot(x, y, 'k--', lw = 1)\n\n x = [4.*A, 4.*A]\n y = [0., 2.*A*B]\n ax.plot(x, y, 'k--', lw = 1)\n\n \n # Annotate axes\n ax.text(0.-3.*twh, 1.25*2.5*A*B, r'$g\\circ h(x)$', fontsize = 24, horizontalalignment = 'right', verticalalignment = 'bottom', color = 'black')\n ax.text(1.1*6.*A, 0., r'$x$', fontsize = 24, horizontalalignment = 'left', verticalalignment = 'top', color = 'black')\n\nplotrectntriaconv()\n# <a id='math:fig:two_fs_wfs'></a><!--\\label{math:fig:two_fs_wfs}-->", "Figure 2.2.3: Convolution of the two functions g and h from Fig. 2.y.3 &#10549; .<!--\\ref{math:fig:two_fs_with_finite_support}-->.\n\n\nNext: 3. Positional Astronomy" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
phoebe-project/phoebe2-docs
2.2/examples/legacy_spots.ipynb
gpl-3.0
[ "Comparing Spots in PHOEBE 2 vs PHOEBE Legacy\nSetup\nLet's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).", "!pip install -I \"phoebe>=2.2,<2.3\"", "As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.", "%matplotlib inline\n\nimport phoebe\nfrom phoebe import u # units\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlogger = phoebe.logger()\n\nb = phoebe.default_binary()", "Adding Spots and Compute Options", "b.add_spot(component='primary', relteff=0.8, radius=20, colat=45, colon=90, feature='spot01')\n\nb.add_dataset('lc', times=np.linspace(0,1,101))\n\nb.add_compute('phoebe', irrad_method='none', compute='phoebe2')\n\nb.add_compute('legacy', irrad_method='none', compute='phoebe1')", "Let's use the external atmospheres available for both phoebe1 and phoebe2", "b.set_value_all('atm', 'extern_planckint')\n\nb.set_value_all('ld_mode', 'manual')\nb.set_value_all('ld_func', 'logarithmic')\nb.set_value_all('ld_coeffs', [0.0, 0.0])\n\nb.run_compute('phoebe2', model='phoebe2model')\n\nb.run_compute('phoebe1', model='phoebe1model')", "Plotting", "afig, mplfig = b.plot(legend=True, ylim=(1.95, 2.05), show=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
INM-6/Python-Module-of-the-Week
session20_NEST/jupyter_notebooks/1_first_steps.ipynb
mit
[ "PyNEST - First Steps\nModeling networks of spiking neurons using NEST\nPython Module of the Week, 03.05.2019\nAlexander van Meegen\nThis notebook guides through your first steps using NEST. It shows\n* how to get help\n* how to create and simulate a single neuron\n* how to visualize the output\nEssentially, this is a reproduction of the 'Hello World!' notebook with added explanations. \nFor more details see part 1 of the official PyNEST tutorial.", "# populate namespace with pylab functions and stuff\n%pylab inline\n\nimport nest # import NEST module", "Getting help", "# information about functions with Python's help() ...\nhelp(nest.Models)\n\n# ... or IPython's question mark\nnest.Models?\n\n# list neuron models\nnest.Models()\n\n# choose LIF neuron with exponential synaptic currents: 'iaf_psc_exp'\n# look in documentation for model description\n# or (if not compiled with MPI)\nnest.help('iaf_psc_exp')", "Creating a neuron", "# before creating a new network,\n# reset the simulation kernel / remove all nodes\nnest.ResetKernel()\n\n# create the neuron\nneuron = nest.Create('iaf_psc_exp')\n\n# investigate the neuron\n\n# Create() just returns a list (tuple) with handles to the new nodes\n# (handles = integer numbers called ids)\nneuron\n\n# current dynamical state/parameters of the neuron\n# note that the membrane voltage is at -70 mV\nnest.GetStatus(neuron)", "Creating a spikegenerator", "# create a spike generator\nspikegenerator = nest.Create('spike_generator')\n\n# check out 'spike_times' in its parameters\nnest.GetStatus(spikegenerator)\n\n# set the spike times at 10 and 50 ms\nnest.SetStatus(spikegenerator, {'spike_times': [10., 50.]})", "Creating a voltmeter", "# create a voltmeter for recording\nvoltmeter = nest.Create('voltmeter')\n\n# investigate the voltmeter\nvoltmeter\n\n# see that it records membrane voltage, senders, times\nnest.GetStatus(voltmeter)", "Connecting", "# investigate Connect() function\nnest.Connect?\n\n# connect spike generator and voltmeter to the neuron\nnest.Connect(spikegenerator, neuron, syn_spec={'weight': 1e3})\n\nnest.Connect(voltmeter, neuron)", "Simulating", "# run simulation for 100 ms\nnest.Simulate(100.)\n\n# look at nest's KernelStatus:\n# network_size (root node, neuron, spike generator, voltmeter)\n# num_connections\n# time (simulation duration)\nnest.GetKernelStatus()\n\n# note that voltmeter has recorded 99 events\nnest.GetStatus(voltmeter)\n\n# read out recording time and voltage from voltmeter\ntimes = nest.GetStatus(voltmeter)[0]['events']['times']\nvoltages = nest.GetStatus(voltmeter)[0]['events']['V_m']", "Plotting", "# plot results\n# units can be found in documentation\npylab.plot(times, voltages, label='Neuron 1')\npylab.xlabel('Time (ms)')\npylab.ylabel('Membrane potential (mV)')\npylab.title('Membrane potential')\npylab.legend()\n\n# create the same plot with NEST's build-in plotting function\nimport nest.voltage_trace\n\nnest.voltage_trace.from_device(voltmeter)", "Bored?\n\nTry to make the neuron spike (maybe use 0_hello_world.ipynb)\nConnect another neuron to the first neuron recieving that spike\nCheck out the official PyNEST tutorials, in particular\npart 1: Neurons and simple neural networks\npart 2: Populations of neurons" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.23/_downloads/61268d5dc873438a743241ad21a989fd/decoding_rsa_sgskip.ipynb
bsd-3-clause
[ "%matplotlib inline", "Representational Similarity Analysis\nRepresentational Similarity Analysis is used to perform summary statistics\non supervised classifications where the number of classes is relatively high.\nIt consists in characterizing the structure of the confusion matrix to infer\nthe similarity between brain responses and serves as a proxy for characterizing\nthe space of mental representations\n:footcite:Shepard1980,LaaksoCottrell2000,KriegeskorteEtAl2008.\nIn this example, we perform RSA on responses to 24 object images (among\na list of 92 images). Subjects were presented with images of human, animal\nand inanimate objects :footcite:CichyEtAl2014. Here we use the 24 unique\nimages of faces and body parts.\n<div class=\"alert alert-info\"><h4>Note</h4><p>this example will download a very large (~6GB) file, so we will not\n build the images below.</p></div>", "# Authors: Jean-Remi King <jeanremi.king@gmail.com>\n# Jaakko Leppakangas <jaeilepp@student.jyu.fi>\n# Alexandre Gramfort <alexandre.gramfort@inria.fr>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.manifold import MDS\n\nimport mne\nfrom mne.io import read_raw_fif, concatenate_raws\nfrom mne.datasets import visual_92_categories\n\n\nprint(__doc__)\n\ndata_path = visual_92_categories.data_path()\n\n# Define stimulus - trigger mapping\nfname = op.join(data_path, 'visual_stimuli.csv')\nconds = read_csv(fname)\nprint(conds.head(5))", "Let's restrict the number of conditions to speed up computation", "max_trigger = 24\nconds = conds[:max_trigger] # take only the first 24 rows", "Define stimulus - trigger mapping", "conditions = []\nfor c in conds.values:\n cond_tags = list(c[:2])\n cond_tags += [('not-' if i == 0 else '') + conds.columns[k]\n for k, i in enumerate(c[2:], 2)]\n conditions.append('/'.join(map(str, cond_tags)))\nprint(conditions[:10])", "Let's make the event_id dictionary", "event_id = dict(zip(conditions, conds.trigger + 1))\nevent_id['0/human bodypart/human/not-face/animal/natural']", "Read MEG data", "n_runs = 4 # 4 for full data (use less to speed up computations)\nfname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')\nraws = [read_raw_fif(fname % block, verbose='error')\n for block in range(n_runs)] # ignore filename warnings\nraw = concatenate_raws(raws)\n\nevents = mne.find_events(raw, min_duration=.002)\n\nevents = events[events[:, 2] <= max_trigger]", "Epoch data", "picks = mne.pick_types(raw.info, meg=True)\nepochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,\n picks=picks, tmin=-.1, tmax=.500, preload=True)", "Let's plot some conditions", "epochs['face'].average().plot()\nepochs['not-face'].average().plot()", "Representational Similarity Analysis (RSA) is a neuroimaging-specific\nappelation to refer to statistics applied to the confusion matrix\nalso referred to as the representational dissimilarity matrices (RDM).\nCompared to the approach from Cichy et al. we'll use a multiclass\nclassifier (Multinomial Logistic Regression) while the paper uses\nall pairwise binary classification task to make the RDM.\nAlso we use here the ROC-AUC as performance metric while the\npaper uses accuracy. Finally here for the sake of time we use\nRSA on a window of data while Cichy et al. did it for all time\ninstants separately.", "# Classify using the average signal in the window 50ms to 300ms\n# to focus the classifier on the time interval with best SNR.\nclf = make_pipeline(StandardScaler(),\n LogisticRegression(C=1, solver='liblinear',\n multi_class='auto'))\nX = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)\ny = epochs.events[:, 2]\n\nclasses = set(y)\ncv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n# Compute confusion matrix for each cross-validation fold\ny_pred = np.zeros((len(y), len(classes)))\nfor train, test in cv.split(X, y):\n # Fit\n clf.fit(X[train], y[train])\n # Probabilistic prediction (necessary for ROC-AUC scoring metric)\n y_pred[test] = clf.predict_proba(X[test])", "Compute confusion matrix using ROC-AUC", "confusion = np.zeros((len(classes), len(classes)))\nfor ii, train_class in enumerate(classes):\n for jj in range(ii, len(classes)):\n confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])\n confusion[jj, ii] = confusion[ii, jj]", "Plot", "labels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6\nfig, ax = plt.subplots(1)\nim = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])\nax.set_yticks(range(len(classes)))\nax.set_yticklabels(labels)\nax.set_xticks(range(len(classes)))\nax.set_xticklabels(labels, rotation=40, ha='left')\nax.axhline(11.5, color='k')\nax.axvline(11.5, color='k')\nplt.colorbar(im)\nplt.tight_layout()\nplt.show()", "Confusion matrix related to mental representations have been historically\nsummarized with dimensionality reduction using multi-dimensional scaling [1].\nSee how the face samples cluster together.", "fig, ax = plt.subplots(1)\nmds = MDS(2, random_state=0, dissimilarity='precomputed')\nchance = 0.5\nsummary = mds.fit_transform(chance - confusion)\ncmap = plt.get_cmap('rainbow')\ncolors = ['r', 'b']\nnames = list(conds['condition'].values)\nfor color, name in zip(colors, set(names)):\n sel = np.where([this_name == name for this_name in names])[0]\n size = 500 if name == 'human face' else 100\n ax.scatter(summary[sel, 0], summary[sel, 1], s=size,\n facecolors=color, label=name, edgecolors='k')\nax.axis('off')\nax.legend(loc='lower right', scatterpoints=1, ncol=2)\nplt.tight_layout()\nplt.show()", "References\n.. footbibliography::" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
agile-geoscience/striplog
docs/tutorial/12_Calculate_sand_proportion.ipynb
apache-2.0
[ "Calculate sand proportion\nWe'd like to compute a running-window sand log, given some striplog.\nThese are some sand beds:", "text = \"\"\"top,base,comp number\n24.22,24.17,20\n24.02,23.38,19\n22.97,22.91,18\n22.67,22.62,17\n21.23,21.17,16\n19.85,19.8,15\n17.9,17.5,14\n17.17,15.5,13\n15.18,14.96,12\n14.65,13.93,11\n13.4,13.05,10\n11.94,11.87,9\n10.17,10.11,8\n7.54,7.49,7\n6,5.95,6\n5.3,5.25,5\n4.91,3.04,4\n2.92,2.6,3\n2.22,2.17,2\n1.9,1.75,1\"\"\"", "Make a striplog", "from striplog import Striplog, Component\n\ns = Striplog.from_csv(text=text)\n\ns.plot(aspect=5)\n\ns[0]", "Make a sand flag log\nWe'll make a log version of the striplog:", "start, stop, step = 0, 25, 0.01\n\nL = s.to_log(start=start, stop=stop, step=step)\n\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(15, 2))\nplt.plot(L)", "Convolve with running window\nConvolution with a boxcar filter computes the mean in a window.", "import numpy as np\n\nwindow_length = 2.5 # metres.\n\nN = int(window_length / step)\nboxcar = 100 * np.ones(N) / N\n\nz = np.linspace(start, stop, L.size)\nprop = np.convolve(L, boxcar, mode='same')\n\nplt.plot(z, prop)\nplt.grid(c='k', alpha=0.2)\nplt.ylim(-5, 105)", "Write out as CSV\nHere's the proportion log we made:", "z_prop = np.stack([z, prop], axis=1)\nz_prop.shape", "Save it with NumPy (or you could build up a Pandas DataFrame)...", "np.savetxt('prop.csv', z_prop, delimiter=',', header='elev,perc', comments='', fmt='%1.3f')", "Check the file looks okay with a quick command line check (! sends commands to the shell).", "!head prop.csv", "Plot everything together", "fig, ax = plt.subplots(figsize=(5, 10), ncols=3, sharey=True)\n\n# Plot the striplog.\ns.plot(ax=ax[0])\nax[0].set_title('Striplog')\n\n# Fake a striplog by plotting the log... it looks nice!\nax[1].fill_betweenx(z, 0.5, 0, color='grey')\nax[1].fill_betweenx(z, L, 0, color='gold', lw=0)\nax[1].set_title('Faked with log')\n\n# Plot the sand proportion log.\nax[2].plot(prop, z, 'r', lw=1)\nax[2].set_title(f'% sand, {window_length} m')", "Make a histogram of thicknesses", "thicks = [iv.thickness for iv in s]\n\n_ = plt.hist(thicks, bins=51)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
lesonkorenac/dataquest-projects
2. Data Analysis and Visualization/Analyzing Thanksgiving Dinner/Thanksgiving survey.ipynb
mit
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndata = pd.read_csv(\"thanksgiving.csv\", encoding=\"Latin-1\")\n\ndata.columns\n\ncelebrate = data[data[\"Do you celebrate Thanksgiving?\"] == \"Yes\"].copy()\n\ndef display_counts(counts, title, labels = None):\n print(counts)\n \n labels = labels or counts.index\n \n value_range = range(0, len(counts))\n ax = plt.subplot(111) \n ax.bar(value_range, counts.values)\n plt.title(title)\n plt.xticks(value_range, labels, rotation = 'vertical')\n plt.show()\n\ntypical_main_dish_key = \"What is typically the main dish at your Thanksgiving dinner?\"\n\ndisplay_counts(celebrate[typical_main_dish_key].value_counts(), \"Typical Thanksgiving main dishes counts\")\n\ndisplay_counts(celebrate[celebrate[typical_main_dish_key] == \"Tofurkey\"]\n [\"Do you typically have gravy?\"].value_counts(),\n \"Gravy with Tofurkey\")\n\ndef get_notnull_values(series):\n return series[pd.notnull(series)]\n\n\ndef get_pie_question(pie_type):\n return \"Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - \" + pie_type\n\npie_types = [\"Apple\", \"Pumpkin\", \"Pecan\"]\npie_keys = list(map(get_pie_question, pie_types))\n\npie_type_counts = celebrate[pie_keys].apply(lambda column: len(get_notnull_values(column)))\n\ndisplay_counts(pie_type_counts, \"Pie type counts\", pie_types)\n\n\ndef get_notnull_values(series):\n return series[pd.notnull(series)]\n\ndef get_values_except(series, except_values):\n result = series\n for item in except_values:\n result = result[result != item]\n return result\n\ndef extract_column_data(data, column_name, extract_function, except_values = []):\n not_null_values = get_notnull_values(data[column_name])\n considered_values = get_values_except(not_null_values, except_values)\n return considered_values.apply(extract_function)\n\ndef display_statistics(data, chart_title):\n print(data.describe())\n display_counts(data.value_counts().sort_index(), chart_title)\n\n\ndef get_age_from(age_string):\n return int(age_string.split(\" \")[0].split(\"+\")[0])\n\nint_age = extract_column_data(celebrate, \"Age\", get_age_from)\ncelebrate[\"age\"] = int_age\n\ndisplay_statistics(int_age, \"Thanksgiving participation by age\")", "Age statistics flaws\nStatistics shown above are misleading since age categories are represented by single number that does not adequately describe each bracket.", "def get_money_from(money_string):\n return int(money_string.split(\" \")[0].split(\"$\")[1].replace(\",\", \"\"))\n\nint_income = extract_column_data(celebrate,\n \"How much total combined money did all members of your HOUSEHOLD earn last year?\",\n get_money_from,\n except_values=[\"Prefer not to answer\"])\ncelebrate[\"income\"] = int_income\n\ndisplay_statistics(int_income, \"Thanksgiving by households last year earnings\")", "Household earnings statistics flaws\nThere are same problems as in age statistics.", "travel = celebrate[\"How far will you travel for Thanksgiving?\"]\n\ndisplay_counts(travel.loc[int_income[int_income < 15000].index].value_counts(), \"Low income travel\")\ndisplay_counts(travel.loc[int_income[int_income >= 15000].index].value_counts(), \"High income travel\")", "Travel by income\nHypothesis that people with lower income travel more, because they might be younger does not seem to be valid (assumption that younger people have lower income may be wrong, we could use values from age instead).", "def thanksgiving_and_friends(data, aggregated_column):\n return data.pivot_table(index=\"Have you ever tried to meet up with hometown friends on Thanksgiving night?\",\n columns='Have you ever attended a \"Friendsgiving?\"',\n values=aggregated_column)\n\n\nprint(thanksgiving_and_friends(celebrate, \"age\"))\n\nprint(thanksgiving_and_friends(celebrate, \"income\"))", "Friendsgiving\nFriendsgiving appers to be more common for younger people and for people with lower income." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
MD2Korg/CerebralCortex
jupyter_demo/mprov_example.ipynb
bsd-2-clause
[ "Import required libs", "from util.dependencies import *\nfrom cerebralcortex.algorithms.gps.clustering import cluster_gps\nfrom cerebralcortex.kernel import Kernel", "Create CC object to setup required parameters\nPlease enable mprov param in '/cc_conf/cerebralcortex.yml'. mprov: pennprov. You would need to create a user on mprov server first and set the username and password in the '/cc_conf/cerebralcortex.yml'.", "CC = Kernel(\"/home/jovyan/cc_conf/\", study_name=\"default\")", "Generate synthetic GPS data", "ds_gps = gen_location_datastream(user_id=\"bfb2ca0c-e19c-3956-9db2-5459ccadd40c\", stream_name=\"gps--org.md2k.phonesensor--phone\")", "Create windows into 60 seconds chunks", "windowed_gps_ds=ds_gps.window(windowDuration=60)\ngps_clusters=cluster_gps(windowed_gps_ds)", "Print Data", "gps_clusters.show(10)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
JackDi/phys202-2015-work
assignments/assignment11/OptimizationEx01.ipynb
mit
[ "Optimization Exercise 1\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt", "Hat potential\nThe following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the \"hat potential\":\n$$ V(x) = -a x^2 + b x^4 $$\nWrite a function hat(x,a,b) that returns the value of this function:", "# YOUR CODE HERE\ndef hat(x,a,b):\n v=-1*a*x**2+b*x**4\n return v\n\nassert hat(0.0, 1.0, 1.0)==0.0\nassert hat(0.0, 1.0, 1.0)==0.0\nassert hat(1.0, 10.0, 1.0)==-9.0", "Plot this function over the range $x\\in\\left[-3,3\\right]$ with $b=1.0$ and $a=5.0$:", "x=np.linspace(-3,3)\nb=1.0\na=5.0\nplt.plot(x,hat(x,a,b))\n\n\n# YOUR CODE HERE\nx0=-2\na = 5.0\nb = 1.0\ny=opt.minimize(hat,x0,(a,b))\ny.x\n\nassert True # leave this to grade the plot", "Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$.\n\nUse scipy.optimize.minimize to find the minima. You will have to think carefully about how to get this function to find both minima.\nPrint the x values of the minima.\nPlot the function as a blue line.\nOn the same axes, show the minima as red circles.\nCustomize your visualization to make it beatiful and effective.", "# YOUR CODE HERE\nx0=-2\na = 5.0\nb = 1.0\ni=0\ny.x\nmini=[]\n\nx=np.linspace(-3,3)\nfor i in x:\n y=opt.minimize(hat,i,(a,b))\n z=int(y.x *100000)\n if np.any(mini[:] == z):\n i=i+1\n else: \n mini=np.append(mini,z)\n\nmini=mini/100000\nmini\n\n\nplt.plot(x,hat(x,a,b),label=\"Hat Function\")\nplt.plot(mini[0],hat(mini[0],a,b),'ro',label=\"Minima\")\nplt.plot(mini[1],hat(mini[1],a,b),'ro')\nplt.xlabel=(\"X-Axis\")\nplt.ylabel=(\"Y-Axis\")\nplt.title(\"Graph of Function and its Local Minima\")\nplt.legend()\n\nassert True # leave this for grading the plot", "To check your numerical results, find the locations of the minima analytically. Show and describe the steps in your derivation using LaTeX equations. Evaluate the location of the minima using the above parameters.\n\\begin{equation}\nV(x) = -a x^2 + b x^4 \\\n\\frac{dV}{dt}= -2ax + 4bx^3 \\\na=5.0 \\ b=1.0 \\\nV(x) = -5.0 x^2 + 1.0 x^4 \\\n\\frac{dV}{dt}= -10x + 4x^3 \\\n\\frac{dV}{dt}= -10x + 4x^3 =0 \\ when \\ x= +- \\sqrt{\\frac{5}{2}}\\\n(According\\ to \\ Wolfram \\ Alpha)\\\n\\sqrt{\\frac{5}{2}}=1.58\\\n\\end{equation}" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/asl-ml-immersion
notebooks/recommendation_systems/solutions/3_als_bqml_hybrid.ipynb
apache-2.0
[ "Hybrid Recommendations with the Movie Lens Dataset\nNote: It is recommended that you complete the companion als_bqml.ipynb notebook before continuing with this als_bqml_hybrid.ipynb notebook. This is, however, not a requirement for this lab as you have the option to bring over the dataset + trained model. If you already have the movielens dataset and trained model you can skip the \"Import the dataset and trained model\" section.\nLearning Objectives\n\nKnow extract user and product factors from a BigQuery Matrix Factorizarion Model\nKnow how to format inputs for a BigQuery Hybrid Recommendation Model", "PROJECT = !(gcloud config get-value core/project)\nPROJECT = PROJECT[0]\n\n%env PROJECT=$PROJECT", "Import the dataset and trained model\nIn the previous notebook, you imported 20 million movie recommendations and trained an ALS model with BigQuery ML.\nWe are going to use the same tables, but if this is a new environment, please run the below commands to copy over the clean data.\nFirst create the BigQuery dataset and copy over the data", "!bq mk movielens\n\n%%bash\nrm -r bqml_data\nmkdir bqml_data\ncd bqml_data\ncurl -O 'http://files.grouplens.org/datasets/movielens/ml-20m.zip'\nunzip ml-20m.zip\nyes | bq rm -r $PROJECT:movielens\nbq --location=US mk --dataset \\\n --description 'Movie Recommendations' \\\n $PROJECT:movielens\nbq --location=US load --source_format=CSV \\\n --autodetect movielens.ratings ml-20m/ratings.csv\nbq --location=US load --source_format=CSV \\\n --autodetect movielens.movies_raw ml-20m/movies.csv", "And create a cleaned movielens.movies table.", "%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.movies AS\n SELECT * REPLACE(SPLIT(genres, \"|\") AS genres)\n FROM movielens.movies_raw", "Next, copy over the trained recommendation model. Note that if you're project is in the EU you will need to change the location from US to EU below. Note that as of the time of writing you cannot copy models across regions with bq cp.", "%%bash\nbq --location=US cp \\\ncloud-training-demos:movielens.recommender \\\nmovielens.recommender", "Next, ensure the model still works by invoking predictions for movie recommendations:", "%%bigquery --project $PROJECT\nSELECT * FROM\nML.PREDICT(MODEL `movielens.recommender`, (\n SELECT \n movieId, title, 903 AS userId\n FROM movielens.movies, UNNEST(genres) g\n WHERE g = 'Comedy'\n))\nORDER BY predicted_rating DESC\nLIMIT 5", "Incorporating user and movie information\nThe matrix factorization approach does not use any information about users or movies beyond what is available from the ratings matrix. However, we will often have user information (such as the city they live, their annual income, their annual expenditure, etc.) and we will almost always have more information about the products in our catalog. How do we incorporate this information in our recommendation model?\nThe answer lies in recognizing that the user factors and product factors that result from the matrix factorization approach end up being a concise representation of the information about users and products available from the ratings matrix. We can concatenate this information with other information we have available and train a regression model to predict the rating.\nObtaining user and product factors\nWe can get the user factors or product factors from ML.WEIGHTS. For example to get the product factors for movieId=96481 and user factors for userId=54192, we would do:", "%%bigquery --project $PROJECT\nSELECT \n processed_input,\n feature,\n TO_JSON_STRING(factor_weights) AS factor_weights,\n intercept\nFROM ML.WEIGHTS(MODEL `movielens.recommender`)\nWHERE\n (processed_input = 'movieId' AND feature = '96481')\n OR (processed_input = 'userId' AND feature = '54192')", "Multiplying these weights and adding the intercept is how we get the predicted rating for this combination of movieId and userId in the matrix factorization approach.\nThese weights also serve as a low-dimensional representation of the movie and user behavior. We can create a regression model to predict the rating given the user factors, product factors, and any other information we know about our users and products.\nCreating input features\nThe MovieLens dataset does not have any user information, and has very little information about the movies themselves. To illustrate the concept, therefore, let’s create some synthetic information about users:", "%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.users AS\nSELECT\n userId,\n RAND() * COUNT(rating) AS loyalty,\n CONCAT(SUBSTR(CAST(userId AS STRING), 0, 2)) AS postcode\nFROM\n movielens.ratings\nGROUP BY userId", "Input features about users can be obtained by joining the user table with the ML weights and selecting all the user information and the user factors from the weights array.", "%%bigquery --project $PROJECT\nWITH userFeatures AS (\n SELECT \n u.*,\n (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights)) AS user_factors\n FROM movielens.users u\n JOIN ML.WEIGHTS(MODEL movielens.recommender) w\n ON processed_input = 'userId' AND feature = CAST(u.userId AS STRING)\n)\n\nSELECT * FROM userFeatures\nLIMIT 5", "Similarly, we can get product features for the movies data, except that we have to decide how to handle the genre since a movie could have more than one genre. If we decide to create a separate training row for each genre, then we can construct the product features using.", "%%bigquery --project $PROJECT\nWITH productFeatures AS (\n SELECT \n p.* EXCEPT(genres),\n g, (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS product_factors\n FROM movielens.movies p, UNNEST(genres) g\n JOIN ML.WEIGHTS(MODEL movielens.recommender) w\n ON processed_input = 'movieId' AND feature = CAST(p.movieId AS STRING)\n)\n\nSELECT * FROM productFeatures\nLIMIT 5", "Combining these two WITH clauses and pulling in the rating corresponding the movieId-userId combination (if it exists in the ratings table), we can create the training dataset.\nTODO 1: Combine the above two queries to get the user factors and product factor for each rating.", "%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.hybrid_dataset AS\n\n WITH userFeatures AS (\n SELECT \n u.*,\n (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS user_factors\n FROM movielens.users u\n JOIN ML.WEIGHTS(MODEL movielens.recommender) w\n ON processed_input = 'userId' AND feature = CAST(u.userId AS STRING)\n ),\n\n productFeatures AS (\n SELECT \n p.* EXCEPT(genres),\n g, (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS product_factors\n FROM movielens.movies p, UNNEST(genres) g\n JOIN ML.WEIGHTS(MODEL movielens.recommender) w\n ON processed_input = 'movieId' AND feature = CAST(p.movieId AS STRING)\n )\n\n SELECT\n p.* EXCEPT(movieId),\n u.* EXCEPT(userId),\n rating \n FROM productFeatures p, userFeatures u\n JOIN movielens.ratings r\n ON r.movieId = p.movieId AND r.userId = u.userId", "One of the rows of this table looks like this:", "%%bigquery --project $PROJECT\nSELECT *\nFROM movielens.hybrid_dataset\nLIMIT 1", "Essentially, we have a couple of attributes about the movie, the product factors array corresponding to the movie, a couple of attributes about the user, and the user factors array corresponding to the user. These form the inputs to our “hybrid” recommendations model that builds off the matrix factorization model and adds in metadata about users and movies.\nTraining hybrid recommendation model\nAt the time of writing, BigQuery ML can not handle arrays as inputs to a regression model. Let’s, therefore, define a function to convert arrays to a struct where the array elements are its fields:", "%%bigquery --project $PROJECT\nCREATE OR REPLACE FUNCTION movielens.arr_to_input_16_users(u ARRAY<FLOAT64>)\nRETURNS \n STRUCT<\n u1 FLOAT64,\n u2 FLOAT64,\n u3 FLOAT64,\n u4 FLOAT64,\n u5 FLOAT64,\n u6 FLOAT64,\n u7 FLOAT64,\n u8 FLOAT64,\n u9 FLOAT64,\n u10 FLOAT64,\n u11 FLOAT64,\n u12 FLOAT64,\n u13 FLOAT64,\n u14 FLOAT64,\n u15 FLOAT64,\n u16 FLOAT64\n > AS (STRUCT(\n u[OFFSET(0)],\n u[OFFSET(1)],\n u[OFFSET(2)],\n u[OFFSET(3)],\n u[OFFSET(4)],\n u[OFFSET(5)],\n u[OFFSET(6)],\n u[OFFSET(7)],\n u[OFFSET(8)],\n u[OFFSET(9)],\n u[OFFSET(10)],\n u[OFFSET(11)],\n u[OFFSET(12)],\n u[OFFSET(13)],\n u[OFFSET(14)],\n u[OFFSET(15)]\n));", "which gives:", "%%bigquery --project $PROJECT\nSELECT movielens.arr_to_input_16_users(u).*\nFROM (SELECT\n [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.] AS u)", "We can create a similar function named movielens.arr_to_input_16_products to convert the product factor array into named columns.\nTODO 2: Create a function that returns named columns from a size 16 product factor array.", "%%bigquery --project $PROJECT\nCREATE OR REPLACE FUNCTION movielens.arr_to_input_16_products(p ARRAY<FLOAT64>)\nRETURNS \n STRUCT<\n p1 FLOAT64,\n p2 FLOAT64,\n p3 FLOAT64,\n p4 FLOAT64,\n p5 FLOAT64,\n p6 FLOAT64,\n p7 FLOAT64,\n p8 FLOAT64,\n p9 FLOAT64,\n p10 FLOAT64,\n p11 FLOAT64,\n p12 FLOAT64,\n p13 FLOAT64,\n p14 FLOAT64,\n p15 FLOAT64,\n p16 FLOAT64\n > AS (STRUCT(\n p[OFFSET(0)],\n p[OFFSET(1)],\n p[OFFSET(2)],\n p[OFFSET(3)],\n p[OFFSET(4)],\n p[OFFSET(5)],\n p[OFFSET(6)],\n p[OFFSET(7)],\n p[OFFSET(8)],\n p[OFFSET(9)],\n p[OFFSET(10)],\n p[OFFSET(11)],\n p[OFFSET(12)],\n p[OFFSET(13)],\n p[OFFSET(14)],\n p[OFFSET(15)]\n));", "Then, we can tie together metadata about users and products with the user factors and product factors obtained from the matrix factorization approach to create a regression model to predict the rating:", "%%bigquery --project $PROJECT\nCREATE OR REPLACE MODEL movielens.recommender_hybrid \nOPTIONS(model_type='linear_reg', input_label_cols=['rating'])\nAS\n\nSELECT\n * EXCEPT(user_factors, product_factors),\n movielens.arr_to_input_16_users(user_factors).*,\n movielens.arr_to_input_16_products(product_factors).*\nFROM\n movielens.hybrid_dataset", "There is no point looking at the evaluation metrics of this model because the user information we used to create the training dataset was fake (not the RAND() in the creation of the loyalty column) -- we did this exercise in order to demonstrate how it could be done. And of course, we could train a dnn_regressor model and optimize the hyperparameters if we want a more sophisticated model. But if we are going to go that far, it might be better to consider using Auto ML tables.\nCopyright 2022 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
t-vi/pytorch-tvmisc
wasserstein-distance/Pytorch_Wasserstein.ipynb
mit
[ "An efficient implementation of the Sinkhorn algorithm for the GPU\nThomas Viehmann &#116;&#118;&#64;&#109;&#97;&#116;&#104;&#105;&#110;&#102;&#46;&#101;&#117;\nRecently the Wasserstein distance has seen new applications in machine learning and deep learning. It commonly replaces the Kullback-Leibler divergence (also often dubbed cross-entropy loss in the Deep Learning context). In contrast to the latter, Wasserstein distances not only consider the values probability distribution or density at any given point, but also incorporating spatial information in terms of the underlying metric regarding these differences. Intuitively, it yields a smaller distance if probability mass moved to a nearby point or region and a larger distance if probability mass moved far away.\nThere are two predominant variants of Wasserstein distance approximations used in machine learning:\n- Stochastically optimised online estimates of the Wasserstein distance. This is the concept underpinning many of the GAN applications using a (heuristic approximation of) the Wasserstein distance as a discriminator. Starting from the Wasserstein GAN as an improvement over the KL-based DCGAN, with improvements to how to estimate the Wasserstein distance in WGAN-GP, and SN-GAN.\n- Direct computation of the Wasserstein distance as a replacement for the cross-entropy loss in mini-batch training. This is commonly done using the entropy regularised Wasserstein distance and the Sinkhorn iterations Cuturi. In the context of deep learning this has been proposed by Frogner et al., but there is also earlier work in image retrieval using the (non-regularised) Wasserstein distance, see e.g. Y. Rubner et al. A comprehensive treatment is given in Peyré and Cuturi's book, R. Flamary's Python Optimal Transport library provides implementations for many algorithms in this area.\nThis code is concerned with this latter use of the Wasserstein distance. One of the challenges is the numerical stability of the Sinkhorn iteration and carrying that over to mini-batch computations efficiently. While the ingredients appear to be readily available, it seems that they have not been put together in recent implementations we observed.\nThe following is the code for Thomas Viehmann: \nImplementation of batched Sinkhorn iterations for entropy-regularized Wasserstein loss, arXiv 1907.01729. If you use the code in academic work, please cite this paper.\nThe paper has a self-contained writeup of the key calculations to derive the algorithm.\nFirst we need the some imports.", "import math\nimport time\nimport torch\nimport torch.utils.cpp_extension\n%matplotlib inline\n\nfrom matplotlib import pyplot\nimport matplotlib.transforms\n\nimport ot # for comparison\n", "The kernel\nThe following GPU kernel computes\n$$\n \\log v_{bj} := \\log \\nu_{bj} - \\operatorname{logsumexp}{i} (-\\frac{1}{\\lambda} c{ij} + \\log u_{bi}).\n$$\nThis has two key properties that shape our implementation:\n- The overall reduction structure is akin to a matrix multiplication, i.e. memory accesses to $c_{ij}$ and $\\log u_{bi}$\n to compute the result $\\log v_{bj}$, with the additional input $\\log \\nu$ following the same access pattern as the result. We parallelize in the independent dimensions ($b$ and $j$) and split the reduction over $i$ amongst multiple threads then combine their intermediate results. We have not employed tiling, which is commonly used to speed up the memory accesses for matrix multiplication.\n\nIn our implementation, the stabilisation of the logsumexp calculation is carried out in an online fashion, i.e. computing the stabilisation and the reduction result in a single pass, similar to the Welford algorithm for the variance.\n\nI explain a bit about the reduction (in particular the bits about WARP_SHFL_XOR) in this blog post.", "cuda_source = \"\"\"\n\n#include <torch/extension.h>\n#include <ATen/core/TensorAccessor.h>\n#include <ATen/cuda/CUDAContext.h>\n\nusing at::RestrictPtrTraits;\nusing at::PackedTensorAccessor;\n\n#if defined(__HIP_PLATFORM_HCC__)\nconstexpr int WARP_SIZE = 64;\n#else\nconstexpr int WARP_SIZE = 32;\n#endif\n\n// The maximum number of threads in a block\n#if defined(__HIP_PLATFORM_HCC__)\nconstexpr int MAX_BLOCK_SIZE = 256;\n#else\nconstexpr int MAX_BLOCK_SIZE = 512;\n#endif\n\n// Returns the index of the most significant 1 bit in `val`.\n__device__ __forceinline__ int getMSB(int val) {\n return 31 - __clz(val);\n}\n\n// Number of threads in a block given an input size up to MAX_BLOCK_SIZE\nstatic int getNumThreads(int nElem) {\n#if defined(__HIP_PLATFORM_HCC__)\n int threadSizes[5] = { 16, 32, 64, 128, MAX_BLOCK_SIZE };\n#else\n int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };\n#endif\n for (int i = 0; i != 5; ++i) {\n if (nElem <= threadSizes[i]) {\n return threadSizes[i];\n }\n }\n return MAX_BLOCK_SIZE;\n}\n\n\ntemplate <typename T>\n__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)\n{\n#if CUDA_VERSION >= 9000\n return __shfl_xor_sync(mask, value, laneMask, width);\n#else\n return __shfl_xor(value, laneMask, width);\n#endif\n}\n\n// While this might be the most efficient sinkhorn step / logsumexp-matmul implementation I have seen,\n// this is awfully inefficient compared to matrix multiplication and e.g. NVidia cutlass may provide\n// many great ideas for improvement\ntemplate <typename scalar_t, typename index_t>\n__global__ void sinkstep_kernel(\n // compute log v_bj = log nu_bj - logsumexp_i 1/lambda dist_ij - log u_bi\n // for this compute maxdiff_bj = max_i(1/lambda dist_ij - log u_bi)\n // i = reduction dim, using threadIdx.x\n PackedTensorAccessor<scalar_t, 2, RestrictPtrTraits, index_t> log_v,\n const PackedTensorAccessor<scalar_t, 2, RestrictPtrTraits, index_t> dist,\n const PackedTensorAccessor<scalar_t, 2, RestrictPtrTraits, index_t> log_nu,\n const PackedTensorAccessor<scalar_t, 2, RestrictPtrTraits, index_t> log_u,\n const scalar_t lambda) {\n\n using accscalar_t = scalar_t;\n\n __shared__ accscalar_t shared_mem[2 * WARP_SIZE];\n\n index_t b = blockIdx.y;\n index_t j = blockIdx.x;\n int tid = threadIdx.x;\n\n if (b >= log_u.size(0) || j >= log_v.size(1)) {\n return;\n }\n // reduce within thread\n accscalar_t max = -std::numeric_limits<accscalar_t>::infinity();\n accscalar_t sumexp = 0;\n \n if (log_nu[b][j] == -std::numeric_limits<accscalar_t>::infinity()) {\n if (tid == 0) {\n log_v[b][j] = -std::numeric_limits<accscalar_t>::infinity();\n }\n return;\n }\n\n for (index_t i = threadIdx.x; i < log_u.size(1); i += blockDim.x) {\n accscalar_t oldmax = max;\n accscalar_t value = -dist[i][j]/lambda + log_u[b][i];\n max = max > value ? max : value;\n if (oldmax == -std::numeric_limits<accscalar_t>::infinity()) {\n // sumexp used to be 0, so the new max is value and we can set 1 here,\n // because we will come back here again\n sumexp = 1;\n } else {\n sumexp *= exp(oldmax - max);\n sumexp += exp(value - max); // if oldmax was not -infinity, max is not either...\n }\n }\n\n // now we have one value per thread. we'll make it into one value per warp\n // first warpSum to get one value per thread to\n // one value per warp\n for (int i = 0; i < getMSB(WARP_SIZE); ++i) {\n accscalar_t o_max = WARP_SHFL_XOR(max, 1 << i, WARP_SIZE);\n accscalar_t o_sumexp = WARP_SHFL_XOR(sumexp, 1 << i, WARP_SIZE);\n if (o_max > max) { // we're less concerned about divergence here\n sumexp *= exp(max - o_max);\n sumexp += o_sumexp;\n max = o_max;\n } else if (max != -std::numeric_limits<accscalar_t>::infinity()) {\n sumexp += o_sumexp * exp(o_max - max);\n }\n }\n \n __syncthreads();\n // this writes each warps accumulation into shared memory\n // there are at most WARP_SIZE items left because\n // there are at most WARP_SIZE**2 threads at the beginning\n if (tid % WARP_SIZE == 0) {\n shared_mem[tid / WARP_SIZE * 2] = max;\n shared_mem[tid / WARP_SIZE * 2 + 1] = sumexp;\n }\n __syncthreads();\n if (tid < WARP_SIZE) {\n max = (tid < blockDim.x / WARP_SIZE ? shared_mem[2 * tid] : -std::numeric_limits<accscalar_t>::infinity());\n sumexp = (tid < blockDim.x / WARP_SIZE ? shared_mem[2 * tid + 1] : 0);\n }\n for (int i = 0; i < getMSB(WARP_SIZE); ++i) {\n accscalar_t o_max = WARP_SHFL_XOR(max, 1 << i, WARP_SIZE);\n accscalar_t o_sumexp = WARP_SHFL_XOR(sumexp, 1 << i, WARP_SIZE);\n if (o_max > max) { // we're less concerned about divergence here\n sumexp *= exp(max - o_max);\n sumexp += o_sumexp;\n max = o_max;\n } else if (max != -std::numeric_limits<accscalar_t>::infinity()) {\n sumexp += o_sumexp * exp(o_max - max);\n }\n }\n\n if (tid == 0) {\n log_v[b][j] = (max > -std::numeric_limits<accscalar_t>::infinity() ?\n log_nu[b][j] - log(sumexp) - max : \n -std::numeric_limits<accscalar_t>::infinity());\n }\n}\n\ntemplate <typename scalar_t>\ntorch::Tensor sinkstep_cuda_template(const torch::Tensor& dist, const torch::Tensor& log_nu, const torch::Tensor& log_u,\n const double lambda) {\n TORCH_CHECK(dist.is_cuda(), \"need cuda tensors\");\n TORCH_CHECK(dist.device() == log_nu.device() && dist.device() == log_u.device(), \"need tensors on same GPU\");\n TORCH_CHECK(dist.dim()==2 && log_nu.dim()==2 && log_u.dim()==2, \"invalid sizes\");\n TORCH_CHECK(dist.size(0) == log_u.size(1) &&\n dist.size(1) == log_nu.size(1) &&\n log_u.size(0) == log_nu.size(0), \"invalid sizes\");\n auto log_v = torch::empty_like(log_nu);\n using index_t = int32_t;\n \n auto log_v_a = log_v.packed_accessor<scalar_t, 2, RestrictPtrTraits, index_t>();\n auto dist_a = dist.packed_accessor<scalar_t, 2, RestrictPtrTraits, index_t>();\n auto log_nu_a = log_nu.packed_accessor<scalar_t, 2, RestrictPtrTraits, index_t>();\n auto log_u_a = log_u.packed_accessor<scalar_t, 2, RestrictPtrTraits, index_t>();\n \n auto stream = at::cuda::getCurrentCUDAStream();\n\n int tf = getNumThreads(log_u.size(1));\n dim3 blocks(log_v.size(1), log_u.size(0));\n dim3 threads(tf);\n \n sinkstep_kernel<<<blocks, threads, 2*WARP_SIZE*sizeof(scalar_t), stream>>>(\n log_v_a, dist_a, log_nu_a, log_u_a, static_cast<scalar_t>(lambda)\n );\n\n return log_v;\n}\n\ntorch::Tensor sinkstep_cuda(const torch::Tensor& dist, const torch::Tensor& log_nu, const torch::Tensor& log_u,\n const double lambda) {\n return AT_DISPATCH_FLOATING_TYPES(log_u.scalar_type(), \"sinkstep\", [&] {\n return sinkstep_cuda_template<scalar_t>(dist, log_nu, log_u, lambda);\n });\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n m.def(\"sinkstep\", &sinkstep_cuda, \"sinkhorn step\");\n}\n\n\"\"\"", "Incorporating it in PyTorch\nWe make this into a PyTorch extension module and add a convenience function (and \"manual\" implementation for the CPU).", "wasserstein_ext = torch.utils.cpp_extension.load_inline(\"wasserstein\", cpp_sources=\"\", cuda_sources=cuda_source,\n extra_cuda_cflags=[\"--expt-relaxed-constexpr\"] )\n\ndef sinkstep(dist, log_nu, log_u, lam: float):\n # dispatch to optimized GPU implementation for GPU tensors, slow fallback for CPU\n if dist.is_cuda:\n return wasserstein_ext.sinkstep(dist, log_nu, log_u, lam)\n assert dist.dim() == 2 and log_nu.dim() == 2 and log_u.dim() == 2\n assert dist.size(0) == log_u.size(1) and dist.size(1) == log_nu.size(1) and log_u.size(0) == log_nu.size(0)\n log_v = log_nu.clone()\n for b in range(log_u.size(0)):\n log_v[b] -= torch.logsumexp(-dist/lam+log_u[b, :, None], 0)\n return log_v", "We use this update step in a building block for the Sinkhorn iteration:", "class SinkhornOT(torch.autograd.Function):\n @staticmethod\n def forward(ctx, mu, nu, dist, lam=1e-3, N=100):\n assert mu.dim() == 2 and nu.dim() == 2 and dist.dim() == 2\n bs = mu.size(0)\n d1, d2 = dist.size()\n assert nu.size(0) == bs and mu.size(1) == d1 and nu.size(1) == d2\n log_mu = mu.log()\n log_nu = nu.log()\n log_u = torch.full_like(mu, -math.log(d1))\n log_v = torch.full_like(nu, -math.log(d2))\n for i in range(N):\n log_v = sinkstep(dist, log_nu, log_u, lam)\n log_u = sinkstep(dist.t(), log_mu, log_v, lam)\n\n # this is slight abuse of the function. it computes (diag(exp(log_u))*Mt*exp(-Mt/lam)*diag(exp(log_v))).sum()\n # in an efficient (i.e. no bxnxm tensors) way in log space\n distances = (-sinkstep(-dist.log()+dist/lam, -log_v, log_u, 1.0)).logsumexp(1).exp()\n ctx.log_v = log_v\n ctx.log_u = log_u\n ctx.dist = dist\n ctx.lam = lam\n return distances\n\n @staticmethod\n def backward(ctx, grad_out):\n return grad_out[:, None] * ctx.log_u * ctx.lam, grad_out[:, None] * ctx.log_v * ctx.lam, None, None, None\n", "We also define a function to get the coupling itself:", "def get_coupling(mu, nu, dist, lam=1e-3, N=1000):\n assert mu.dim() == 2 and nu.dim() == 2 and dist.dim() == 2\n bs = mu.size(0)\n d1, d2 = dist.size()\n assert nu.size(0) == bs and mu.size(1) == d1 and nu.size(1) == d2\n log_mu = mu.log()\n log_nu = nu.log()\n log_u = torch.full_like(mu, -math.log(d1))\n log_v = torch.full_like(nu, -math.log(d2))\n for i in range(N):\n log_v = sinkstep(dist, log_nu, log_u, lam)\n log_u = sinkstep(dist.t(), log_mu, log_v, lam)\n return (log_v[:, None, :]-dist/lam+log_u[:, :, None]).exp()", "We define some test distributions. These are similar to examples from Python Optimal Transport.", "# some test distribution densities\nn = 100\nlam = 1e-3\nx = torch.linspace(0, 100, n)\nmu1 = torch.distributions.Normal(20., 10.).log_prob(x).exp()\nmu2 = torch.distributions.Normal(60., 30.).log_prob(x).exp()\nmu3 = torch.distributions.Normal(40., 20.).log_prob(x).exp()\nmu1 /= mu1.sum()\nmu2 /= mu2.sum()\nmu3 /= mu3.sum()\nmu123 = torch.stack([mu1, mu2, mu3], dim=0)\nmu231 = torch.stack([mu2, mu3, mu1], dim=0)\ncost = (x[None, :]-x[:, None])**2\ncost /= cost.max()\npyplot.plot(mu1, label=\"$\\mu_1$\")\npyplot.plot(mu2, label=\"$\\mu_2$\")\npyplot.plot(mu3, label=\"$\\mu_3$\")\npyplot.legend();", "We run a sanity check for the distance:\n(This will take longer than you might expect, as it computes a rather large gradient numerically, but it finishes in $<1$ minute on a GTX 1080)", "t = time.time()\ndevice = \"cuda\"\nres = torch.autograd.gradcheck(lambda x: SinkhornOT.apply(x.softmax(1), \n mu231.to(device=device, dtype=torch.double),\n cost.to(device=device, dtype=torch.double),\n lam, 500),\n (mu123.log().to(device=device, dtype=torch.double).requires_grad_(),))\nprint(\"OK? {} took {:.0f} sec\".format(res, time.time()-t))", "We might also check that sinkstep is the same on GPU and CPU (Kai Zhao pointed out that this was not the case for an earlier versions of this notebook, thank you, and indeed, there was a bug in the CPU implementation.)", "res_cpu = sinkstep(cost.cpu(), mu123.log().cpu(), mu231.log().cpu(), lam)\nres_gpu = sinkstep(cost.to(device), mu123.log().to(device), mu231.log().to(device), lam).cpu()\nassert (res_cpu - res_gpu).abs().max() < 1e-5", "We can visiualize the coupling along with the marginals:", "coupling = get_coupling(mu123.cuda(), mu231.cuda(), cost.cuda())\npyplot.figure(figsize=(10,10))\npyplot.subplot(2, 2, 1)\npyplot.plot(mu2.cpu())\npyplot.subplot(2, 2, 4)\npyplot.plot(mu1.cpu(), transform=matplotlib.transforms.Affine2D().rotate_deg(270) + pyplot.gca().transData)\npyplot.subplot(2, 2, 3)\npyplot.imshow(coupling[0].cpu());\n", "This looks a lot like the coupling form Python Optimal Transport and in fact all three match results computed with POT:", "o_coupling12 = torch.tensor(ot.bregman.sinkhorn_stabilized(mu1.cpu(), mu2.cpu(), cost.cpu(), reg=1e-3))\no_coupling23 = torch.tensor(ot.bregman.sinkhorn_stabilized(mu2.cpu(), mu3.cpu(), cost.cpu(), reg=1e-3))\no_coupling31 = torch.tensor(ot.bregman.sinkhorn_stabilized(mu3.cpu(), mu1.cpu(), cost.cpu(), reg=1e-3))\npyplot.imshow(o_coupling12)\no_coupling = torch.stack([o_coupling12, o_coupling23, o_coupling31], dim=0)\n(o_coupling.float() - coupling.cpu()).abs().max().item()", "Performance comparison to existing implementations\nWe copy the code of Dazac's recent blog post in order to compare performance.\nDazac uses early stopping, but this comes at the cost of introducing a synchronization point after each iteration. I modified the code to take the distance matrix as an argument.", "# Copyright 2018 Daniel Dazac\n# MIT Licensed\n# License and source: https://github.com/dfdazac/wassdistance/\nclass SinkhornDistance(torch.nn.Module):\n r\"\"\"\n Given two empirical measures each with :math:`P_1` locations\n :math:`x\\in\\mathbb{R}^{D_1}` and :math:`P_2` locations :math:`y\\in\\mathbb{R}^{D_2}`,\n outputs an approximation of the regularized OT cost for point clouds.\n Args:\n eps (float): regularization coefficient\n max_iter (int): maximum number of Sinkhorn iterations\n reduction (string, optional): Specifies the reduction to apply to the output:\n 'none' | 'mean' | 'sum'. 'none': no reduction will be applied,\n 'mean': the sum of the output will be divided by the number of\n elements in the output, 'sum': the output will be summed. Default: 'none'\n Shape:\n - Input: :math:`(N, P_1, D_1)`, :math:`(N, P_2, D_2)`\n - Output: :math:`(N)` or :math:`()`, depending on `reduction`\n \"\"\"\n def __init__(self, eps, max_iter, reduction='none'):\n super(SinkhornDistance, self).__init__()\n self.eps = eps\n self.max_iter = max_iter\n self.reduction = reduction\n\n def forward(self, mu, nu, C):\n u = torch.zeros_like(mu)\n v = torch.zeros_like(nu)\n # To check if algorithm terminates because of threshold\n # or max iterations reached\n actual_nits = 0\n # Stopping criterion\n thresh = 1e-1\n\n # Sinkhorn iterations\n for i in range(self.max_iter):\n u1 = u # useful to check the update\n u = self.eps * (torch.log(mu+1e-8) - torch.logsumexp(self.M(C, u, v), dim=-1)) + u\n v = self.eps * (torch.log(nu+1e-8) - torch.logsumexp(self.M(C, u, v).transpose(-2, -1), dim=-1)) + v\n err = (u - u1).abs().sum(-1).mean()\n\n actual_nits += 1\n if err.item() < thresh:\n break\n\n U, V = u, v\n # Transport plan pi = diag(a)*K*diag(b)\n pi = torch.exp(self.M(C, U, V))\n # Sinkhorn distance\n cost = torch.sum(pi * C, dim=(-2, -1))\n self.actual_nits = actual_nits\n if self.reduction == 'mean':\n cost = cost.mean()\n elif self.reduction == 'sum':\n cost = cost.sum()\n\n return cost, pi, C\n\n def M(self, C, u, v):\n \"Modified cost for logarithmic updates\"\n \"$M_{ij} = (-c_{ij} + u_i + v_j) / \\epsilon$\"\n return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps\n\n @staticmethod\n def ave(u, u1, tau):\n \"Barycenter subroutine, used by kinetic acceleration through extrapolation.\"\n return tau * u + (1 - tau) * u1\n\nn = 100\nx = torch.linspace(0, 100, n)\nmu1 = torch.distributions.Normal(20., 10.).log_prob(x).exp()\nmu2 = torch.distributions.Normal(60., 30.).log_prob(x).exp()\nmu1 /= mu1.sum()\nmu2 /= mu2.sum()\nmu1, mu2, cost = mu1.cuda(), mu2.cuda(), cost.cuda()\nsinkhorn = SinkhornDistance(eps=1e-3, max_iter=200)\ndef x():\n mu1_ = mu1.detach().requires_grad_()\n dist, P, C = sinkhorn(mu1_, mu2, cost)\n gr, = torch.autograd.grad(dist, mu1_)\n torch.cuda.synchronize()\n\ndist, P, C = sinkhorn(mu1.cuda(), mu2.cuda(), cost.cuda())\ntorch.cuda.synchronize()\nx()\n%timeit x()\npyplot.imshow(P.cpu())\nsinkhorn.actual_nits\n\n\ndef y():\n mu1_ = mu1.detach().requires_grad_()\n l = SinkhornOT.apply(mu1_.unsqueeze(0), mu2.unsqueeze(0), cost, 1e-3, 200)\n gr, = torch.autograd.grad(l.sum(), mu1_)\n torch.cuda.synchronize()\ny()\n%timeit y()", "With this problem size and forward + backward, we achieve a speedup factor of approximately 6.5 when doing about 3 times as many iterations.\nBarycenters\nWe can also do barycenters. Let's go 2d to do so. I use relative small $N$ because at the time of writing, my GPU is partially occupied by a long-running training.", "N = 50\na, b, c = torch.zeros(3, N, N, device=\"cuda\")\nx = torch.linspace(-5, 5, N, device=\"cuda\")\na[N//5:-N//5, N//5:-N//5] = 1\nb[(x[None]**2+x[:,None]**2 > 4) & (x[None]**2+x[:,None]**2 < 9)] = 1\nc[((x[None]-2)**2+(x[:,None]-2)**2 < 4) | ((x[None]+2)**2+(x[:,None]+2)**2 < 4)] = 1\npyplot.imshow(c.cpu(), cmap=pyplot.cm.gray_r)\ncoords = torch.stack([x[None, :].expand(N, N), x[:, None].expand(N, N)], 2).view(-1, 2)\ndist = ((coords[None]-coords[:, None])**2).sum(-1)\ndist /= dist.max()\na = (a / a.sum()).view(1, -1)\nb = (c / b.sum()).view(1, -1)\nc = (c / c.sum()).view(1, -1)\nSinkhornOT.apply(a, b, dist, 1e-3, 200)\n\n\ndef get_barycenter(mu, dist, weights, lam=1e-3, N=1000):\n assert mu.dim() == 2 and dist.dim() == 2 and weights.dim() == 1\n bs = mu.size(0)\n d1, d2 = dist.size()\n assert mu.size(1) == d1 and d1 == d2 and weights.size(0) == bs\n log_mu = mu.log()\n log_u = torch.full_like(mu, -math.log(d1))\n zeros = torch.zeros_like(log_u)\n for i in range(N):\n log_v = sinkstep(dist.t(), log_mu, log_u, lam)\n log_u = sinkstep(dist, zeros, log_v, lam)\n a = torch.sum(-weights[:, None] * log_u, dim=0, keepdim=True)\n log_u += a\n return (log_v[:, None, :]-dist/lam+log_u[:, :, None]).exp()\n", "It's fast enough to just use baricenters for interpolation:", "res = []\nfor i in torch.linspace(0, 1, 10):\n res.append(get_barycenter(torch.cat([a, b, c], 0), dist, torch.tensor([i*0.9, (1-i)*0.9, 0], device=\"cuda\"), N=100))\n\npyplot.figure(figsize=(15,5))\npyplot.imshow(torch.cat([r[0].sum(1).view(N, N).cpu() for r in res], 1), cmap=pyplot.cm.gray_r)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
california-civic-data-coalition/python-calaccess-notebooks
calaccess-exploration/decoding-filing-periods.ipynb
mit
[ "Decoding Filing Periods\nThe raw data tables mix together filings from different reporting periods (e.g. quarterlys vs. semi-annual vs. pre-elections). But we need these filings to be sorted (or at least sortable) so that or users, for example, can compare the performance of two candidates in the same reporting period.\nThere are two vectors at play here:\n1. The \"Statement Type\", as described in CAL-ACCESS parlance, which indicates the length of time covered by the filing and how close it was filed to the election.\n2. The actual time interval the filing covers, denoted by a start date and an end date.\nThis notebook is pulling data from the downloads-website's dev database, which was last updated on...", "from calaccess_processed.models.tracking import ProcessedDataVersion\n\nProcessedDataVersion.objects.latest()", "Will also need to execute some raw SQL, so I'll import a helper function in order to make the results more readable:", "from project import sql_to_agate", "Let's start by examining the distinct values of the statement type on CVR_CAMPAIGN_DISCLOSURE_CD. And let's narrow the scope to only the Form 460 filings.", "sql_to_agate(\n \"\"\"\n SELECT UPPER(\"STMT_TYPE\"), COUNT(*)\n FROM \"CVR_CAMPAIGN_DISCLOSURE_CD\"\n WHERE \"FORM_TYPE\" = 'F460'\n GROUP BY 1\n ORDER BY COUNT(*) DESC;\n \"\"\"\n).print_table()", "Not all of these values are defined, as previously noted in our docs:\n* PR might be pre-election\n* QS is pro probably quarterly statement\n* YE might be...I don't know \"Year-end\"?\n* S is probably semi-annual\nMaybe come back later and look at the actual filings. There aren't that many.\nThere's another similar-named column on FILER_FILINGS_CD, but this seems to be a completely different thing:", "sql_to_agate(\n \"\"\"\n SELECT FF.\"STMNT_TYPE\", LU.\"CODE_DESC\", COUNT(*)\n FROM \"FILER_FILINGS_CD\" FF\n JOIN \"LOOKUP_CODES_CD\" LU\n ON FF.\"STMNT_TYPE\" = LU.\"CODE_ID\"\n AND LU.\"CODE_TYPE\" = 10000\n GROUP BY 1, 2;\n \"\"\"\n).print_table()", "One of the tables that caught my eye is FILING_PERIOD_CD, which appears to have a row for each quarterly filing period:", "sql_to_agate(\n \"\"\"\n SELECT *\n FROM \"FILING_PERIOD_CD\"\n \"\"\"\n).print_table()", "Every period is described as a quarter, and the records are equally divided among them:", "sql_to_agate(\n \"\"\"\n SELECT \"PERIOD_DESC\", COUNT(*)\n FROM \"FILING_PERIOD_CD\"\n GROUP BY 1;\n \"\"\"\n).print_table()", "The difference between every START_DATE and END_DATE is actually a three-month interval:", "sql_to_agate(\n \"\"\"\n SELECT \"END_DATE\" - \"START_DATE\" AS duration, COUNT(*)\n FROM \"FILING_PERIOD_CD\"\n GROUP BY 1;\n \"\"\"\n).print_table()", "And they have covered every year between 1973 and 2334 (how optimistic!):", "sql_to_agate(\n \"\"\"\n SELECT DATE_PART('year', \"START_DATE\")::int as year, COUNT(*)\n FROM \"FILING_PERIOD_CD\"\n GROUP BY 1\n ORDER BY 1 DESC;\n \"\"\"\n).print_table()", "Filings are linked to filing periods via FILER_FILINGS_CD.PERIOD_ID. While that column is not always populated, it is if you limit your results to just the Form 460 filings:", "sql_to_agate(\n \"\"\"\n SELECT ff.\"PERIOD_ID\", fp.\"START_DATE\", fp.\"END_DATE\", fp.\"PERIOD_DESC\", COUNT(*)\n FROM \"FILER_FILINGS_CD\" ff\n JOIN \"CVR_CAMPAIGN_DISCLOSURE_CD\" cvr\n ON ff.\"FILING_ID\" = cvr.\"FILING_ID\"\n AND ff.\"FILING_SEQUENCE\" = cvr.\"AMEND_ID\"\n AND cvr.\"FORM_TYPE\" = 'F460'\n JOIN \"FILING_PERIOD_CD\" fp\n ON ff.\"PERIOD_ID\" = fp.\"PERIOD_ID\"\n GROUP BY 1, 2, 3, 4\n ORDER BY fp.\"START_DATE\" DESC;\n \"\"\"\n).print_table()", "Also, is Schwarzenegger running this cycle? Who else could be filing from so far into the future?\nAAANNNNYYYway...Also need to check to make sure the join between FILER_FILINGS_CD and CVR_CAMPAIGN_DISCLOSURE_CD isn't filtering out too many filings:", "sql_to_agate(\n \"\"\"\n SELECT cvr.\"FILING_ID\", cvr.\"FORM_TYPE\", cvr.\"FILER_NAML\"\n FROM \"CVR_CAMPAIGN_DISCLOSURE_CD\" cvr\n LEFT JOIN \"FILER_FILINGS_CD\" ff\n ON cvr.\"FILING_ID\" = ff.\"FILING_ID\"\n AND cvr.\"AMEND_ID\" = ff.\"FILING_SEQUENCE\" \n WHERE cvr.\"FORM_TYPE\" = 'F460'\n AND (ff.\"FILING_ID\" IS NULL OR ff.\"FILING_SEQUENCE\" IS NULL)\n ORDER BY cvr.\"FILING_ID\";\n \"\"\"\n).print_table(max_column_width=60)", "So only a handful, mostly local campaigns or just nonsense test data.\nSo another important thing to check is how well these the dates from the filing period look-up records line up with the dates on the Form 460 filing records. It would be bad if the CVR_CAMPAIGN_DISCLOSURE_CD.FROM_DATE were before FILING_PERIOD_CD.START_DATE or if CVR_CAMPAIGN_DISCLOSURE_CD.THRU_DATE were after FILING_PERIOD_CD.END_DATE.", "sql_to_agate(\n \"\"\"\n SELECT \n CASE \n WHEN cvr.\"FROM_DATE\" < fp.\"START_DATE\" THEN 'filing from_date before period start_date'\n WHEN cvr.\"THRU_DATE\" > fp.\"END_DATE\" THEN 'filing thru_date after period end_date'\n ELSE 'okay'\n END as test,\n COUNT(*) \n FROM \"CVR_CAMPAIGN_DISCLOSURE_CD\" cvr\n JOIN \"FILER_FILINGS_CD\" ff\n ON cvr.\"FILING_ID\" = ff.\"FILING_ID\"\n AND cvr.\"AMEND_ID\" = ff.\"FILING_SEQUENCE\"\n JOIN \"FILING_PERIOD_CD\" fp\n ON ff.\"PERIOD_ID\" = fp.\"PERIOD_ID\"\n WHERE cvr.\"FORM_TYPE\" = 'F460'\n GROUP BY 1;\n \"\"\"\n).print_table(max_column_width=60)", "So half of the time, the THRU_DATE on the filing is later than the FROM_DATE on the filing period. How big of a difference can exist between these two dates?", "sql_to_agate(\n \"\"\"\n SELECT \n cvr.\"THRU_DATE\" - fp.\"END_DATE\" as date_diff,\n COUNT(*) \n FROM \"CVR_CAMPAIGN_DISCLOSURE_CD\" cvr\n JOIN \"FILER_FILINGS_CD\" ff\n ON cvr.\"FILING_ID\" = ff.\"FILING_ID\"\n AND cvr.\"AMEND_ID\" = ff.\"FILING_SEQUENCE\"\n JOIN \"FILING_PERIOD_CD\" fp\n ON ff.\"PERIOD_ID\" = fp.\"PERIOD_ID\"\n WHERE cvr.\"FORM_TYPE\" = 'F460'\n AND cvr.\"THRU_DATE\" > fp.\"END_DATE\"\n GROUP BY 1\n ORDER BY COUNT(*) DESC;\n \"\"\"\n).print_table(max_column_width=60)", "Ugh. Looks like, in most of the problem cases, the from date can be a whole quarter later than the end date of the filing period. Let's take a closer look at these...", "sql_to_agate(\n \"\"\"\n SELECT \n cvr.\"FILING_ID\",\n cvr.\"AMEND_ID\",\n cvr.\"FROM_DATE\",\n cvr.\"THRU_DATE\",\n fp.\"START_DATE\",\n fp.\"END_DATE\"\n FROM \"CVR_CAMPAIGN_DISCLOSURE_CD\" cvr\n JOIN \"FILER_FILINGS_CD\" ff\n ON cvr.\"FILING_ID\" = ff.\"FILING_ID\"\n AND cvr.\"AMEND_ID\" = ff.\"FILING_SEQUENCE\"\n JOIN \"FILING_PERIOD_CD\" fp\n ON ff.\"PERIOD_ID\" = fp.\"PERIOD_ID\"\n WHERE cvr.\"FORM_TYPE\" = 'F460'\n AND 90 < cvr.\"THRU_DATE\" - fp.\"END_DATE\" \n AND cvr.\"THRU_DATE\" - fp.\"END_DATE\" < 93\n ORDER BY cvr.\"THRU_DATE\" DESC;\n \"\"\"\n).print_table(max_column_width=60)", "So, actually, this sort of makes sense: Quarterly filings are for three month intervals, while the semi-annual filings are for six month intervals. And FILING_PERIOD_CD only has records for three month intervals. Let's test this theory by getting the distinct CVR_CAMPAIGN_DISCLOSURE_CD.STMT_TYPE values from these records:", "sql_to_agate(\n \"\"\"\n SELECT UPPER(cvr.\"STMT_TYPE\"), COUNT(*)\n FROM \"CVR_CAMPAIGN_DISCLOSURE_CD\" cvr\n JOIN \"FILER_FILINGS_CD\" ff\n ON cvr.\"FILING_ID\" = ff.\"FILING_ID\"\n AND cvr.\"AMEND_ID\" = ff.\"FILING_SEQUENCE\"\n JOIN \"FILING_PERIOD_CD\" fp\n ON ff.\"PERIOD_ID\" = fp.\"PERIOD_ID\"\n WHERE cvr.\"FORM_TYPE\" = 'F460'\n AND 90 < cvr.\"THRU_DATE\" - fp.\"END_DATE\" \n AND cvr.\"THRU_DATE\" - fp.\"END_DATE\" < 93\n GROUP BY 1\n ORDER BY COUNT(*) DESC;\n \"\"\"\n).print_table(max_column_width=60)", "At least this is mostly true." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
keras-team/keras-io
examples/keras_recipes/ipynb/tensorflow_numpy_models.ipynb
apache-2.0
[ "Writing Keras Models With TensorFlow NumPy\nAuthor: lukewood<br>\nDate created: 2021/08/28<br>\nLast modified: 2021/08/28<br>\nDescription: Overview of how to use the TensorFlow NumPy API to write Keras models.\nIntroduction\nNumPy is a hugely successful Python linear algebra library.\nTensorFlow recently launched tf_numpy, a\nTensorFlow implementation of a large subset of the NumPy API.\nThanks to tf_numpy, you can write Keras layers or models in the NumPy style!\nThe TensorFlow NumPy API has full integration with the TensorFlow ecosystem.\nFeatures such as automatic differentiation, TensorBoard, Keras model callbacks,\nTPU distribution and model exporting are all supported.\nLet's run through a few examples.\nSetup\nTensorFlow NumPy requires TensorFlow 2.5 or later.", "import tensorflow as tf\nimport tensorflow.experimental.numpy as tnp\nimport keras\nimport keras.layers as layers\nimport numpy as np", "Optionally, you can call tnp.experimental_enable_numpy_behavior() to enable type promotion in TensorFlow.\nThis allows TNP to more closely follow the NumPy standard.", "tnp.experimental_enable_numpy_behavior()", "To test our models we will use the Boston housing prices regression dataset.", "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(\n path=\"boston_housing.npz\", test_split=0.2, seed=113\n)\n\n\ndef evaluate_model(model: keras.Model):\n [loss, percent_error] = model.evaluate(x_test, y_test, verbose=0)\n print(\"Mean absolute percent error before training: \", percent_error)\n model.fit(x_train, y_train, epochs=200, verbose=0)\n [loss, percent_error] = model.evaluate(x_test, y_test, verbose=0)\n print(\"Mean absolute percent error after training:\", percent_error)\n", "Subclassing keras.Model with TNP\nThe most flexible way to make use of the Keras API is to subclass the\nkeras.Model class. Subclassing the Model class\ngives you the ability to fully customize what occurs in the training loop. This makes\nsubclassing Model a popular option for researchers.\nIn this example, we will implement a Model subclass that performs regression over the\nboston housing dataset using the TNP API. Note that differentiation and gradient\ndescent is handled automatically when using the TNP API alongside keras.\nFirst let's define a simple TNPForwardFeedRegressionNetwork class.", "\nclass TNPForwardFeedRegressionNetwork(keras.Model):\n def __init__(self, blocks=None, **kwargs):\n super(TNPForwardFeedRegressionNetwork, self).__init__(**kwargs)\n if not isinstance(blocks, list):\n raise ValueError(f\"blocks must be a list, got blocks={blocks}\")\n self.blocks = blocks\n self.block_weights = None\n self.biases = None\n\n def build(self, input_shape):\n current_shape = input_shape[1]\n self.block_weights = []\n self.biases = []\n for i, block in enumerate(self.blocks):\n self.block_weights.append(\n self.add_weight(\n shape=(current_shape, block), trainable=True, name=f\"block-{i}\"\n )\n )\n self.biases.append(\n self.add_weight(shape=(block,), trainable=True, name=f\"bias-{i}\")\n )\n current_shape = block\n\n self.linear_layer = self.add_weight(\n shape=(current_shape, 1), name=\"linear_projector\", trainable=True\n )\n\n def call(self, inputs):\n activations = inputs\n for w, b in zip(self.block_weights, self.biases):\n activations = tnp.matmul(activations, w) + b\n # ReLu activation function\n activations = tnp.maximum(activations, 0.0)\n\n return tnp.matmul(activations, self.linear_layer)\n", "Just like with any other Keras model we can utilize any supported optimizer, loss,\nmetrics or callbacks that we want.\nLet's see how the model performs!", "model = TNPForwardFeedRegressionNetwork(blocks=[3, 3])\nmodel.compile(\n optimizer=\"adam\",\n loss=\"mean_squared_error\",\n metrics=[keras.metrics.MeanAbsolutePercentageError()],\n)\nevaluate_model(model)", "Great! Our model seems to be effectively learning to solve the problem at hand.\nWe can also write our own custom loss function using TNP.", "\ndef tnp_mse(y_true, y_pred):\n return tnp.mean(tnp.square(y_true - y_pred), axis=0)\n\n\nkeras.backend.clear_session()\nmodel = TNPForwardFeedRegressionNetwork(blocks=[3, 3])\nmodel.compile(\n optimizer=\"adam\",\n loss=tnp_mse,\n metrics=[keras.metrics.MeanAbsolutePercentageError()],\n)\nevaluate_model(model)", "Implementing a Keras Layer Based Model with TNP\nIf desired, TNP can also be used in layer oriented Keras code structure. Let's\nimplement the same model, but using a layered approach!", "\ndef tnp_relu(x):\n return tnp.maximum(x, 0)\n\n\nclass TNPDense(keras.layers.Layer):\n def __init__(self, units, activation=None):\n super().__init__()\n self.units = units\n self.activation = activation\n\n def build(self, input_shape):\n self.w = self.add_weight(\n name=\"weights\",\n shape=(input_shape[1], self.units),\n initializer=\"random_normal\",\n trainable=True,\n )\n self.bias = self.add_weight(\n name=\"bias\",\n shape=(self.units,),\n initializer=\"random_normal\",\n trainable=True,\n )\n\n def call(self, inputs):\n outputs = tnp.matmul(inputs, self.w) + self.bias\n if self.activation:\n return self.activation(outputs)\n return outputs\n\n\ndef create_layered_tnp_model():\n return keras.Sequential(\n [\n TNPDense(3, activation=tnp_relu),\n TNPDense(3, activation=tnp_relu),\n TNPDense(1),\n ]\n )\n\n\nmodel = create_layered_tnp_model()\nmodel.compile(\n optimizer=\"adam\",\n loss=\"mean_squared_error\",\n metrics=[keras.metrics.MeanAbsolutePercentageError()],\n)\nmodel.build((None, 13,))\nmodel.summary()\n\nevaluate_model(model)", "You can also seamlessly switch between TNP layers and native Keras layers!", "\ndef create_mixed_model():\n return keras.Sequential(\n [\n TNPDense(3, activation=tnp_relu),\n # The model will have no issue using a normal Dense layer\n layers.Dense(3, activation=\"relu\"),\n # ... or switching back to tnp layers!\n TNPDense(1),\n ]\n )\n\n\nmodel = create_mixed_model()\nmodel.compile(\n optimizer=\"adam\",\n loss=\"mean_squared_error\",\n metrics=[keras.metrics.MeanAbsolutePercentageError()],\n)\nmodel.build((None, 13,))\nmodel.summary()\n\nevaluate_model(model)", "The Keras API offers a wide variety of layers. The ability to use them alongside NumPy\ncode can be a huge time saver in projects.\nDistribution Strategy\nTensorFlow NumPy and Keras integrate with\nTensorFlow Distribution Strategies.\nThis makes it simple to perform distributed training across multiple GPUs,\nor even an entire TPU Pod.", "gpus = tf.config.list_logical_devices(\"GPU\")\nif gpus:\n strategy = tf.distribute.MirroredStrategy(gpus)\nelse:\n # We can fallback to a no-op CPU strategy.\n strategy = tf.distribute.get_strategy()\nprint(\"Running with strategy:\", str(strategy.__class__.__name__))\n\nwith strategy.scope():\n model = create_layered_tnp_model()\n model.compile(\n optimizer=\"adam\",\n loss=\"mean_squared_error\",\n metrics=[keras.metrics.MeanAbsolutePercentageError()],\n )\n model.build((None, 13,))\n model.summary()\n evaluate_model(model)", "TensorBoard Integration\nOne of the many benefits of using the Keras API is the ability to monitor training\nthrough TensorBoard. Using the TensorFlow NumPy API alongside Keras allows you to easily\nleverage TensorBoard.", "keras.backend.clear_session()", "To load the TensorBoard from a Jupyter notebook, you can run the following magic:\n%load_ext tensorboard", "models = [\n (TNPForwardFeedRegressionNetwork(blocks=[3, 3]), \"TNPForwardFeedRegressionNetwork\"),\n (create_layered_tnp_model(), \"layered_tnp_model\"),\n (create_mixed_model(), \"mixed_model\"),\n]\nfor model, model_name in models:\n model.compile(\n optimizer=\"adam\",\n loss=\"mean_squared_error\",\n metrics=[keras.metrics.MeanAbsolutePercentageError()],\n )\n model.fit(\n x_train,\n y_train,\n epochs=200,\n verbose=0,\n callbacks=[keras.callbacks.TensorBoard(log_dir=f\"logs/{model_name}\")],\n )", "To load the TensorBoard from a Jupyter notebook you can use the %tensorboard magic:\n%tensorboard --logdir logs\nThe TensorBoard monitor metrics and examine the training curve.\n\nThe TensorBoard also allows you to explore the computation graph used in your models.\n\nThe ability to introspect into your models can be valuable during debugging.\nConclusion\nPorting existing NumPy code to Keras models using the tensorflow_numpy API is easy!\nBy integrating with Keras you gain the ability to use existing Keras callbacks, metrics\nand optimizers, easily distribute your training and use Tensorboard.\nMigrating a more complex model, such as a ResNet, to the TensorFlow NumPy API would be a\ngreat follow up learning exercise.\nSeveral open source NumPy ResNet implementations are available online." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
karlstroetmann/Artificial-Intelligence
Python/5 Linear Regression/Linear-Regression-Rounding.ipynb
gpl-2.0
[ "%%HTML\n<style>\n.container { width:100% }\n</style>", "Linear Regression: Rounding and Subclassing\nIn this notebook we investigate the influence of <em style=\"color:blue;\">rounding</em> and <em style=\"color:blue;\">subclassing</em> on linear regression. To begin, we import all the libraries we need.", "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn.linear_model as lm", "We will work with artificially generated data. The independent variable X is a numpy array \nof $\\texttt{N}=400$ random numbers that have a <em style=\"color:blue;\">normal</em> distribution with \nmean $\\mu = 10$ and standard deviation $1$. The data is created from random numbers.\nIn order to be able to reproduce our results, we use the method numpy.random.seed.", "np.random.seed(1)\nN = 400 \n𝜇 = 10\nX = np.random.randn(N) + 𝜇", "The dependent variable Y is created by adding some noise to the independent variable X. This noise is \n<em style=\"color:blue;\">normally</em> distributed with mean $0$ and standard deviation $0.5$.", "noise = 0.5 * np.random.randn(len(X))\nY = X + noise", "We build a linear model for X and Y.", "model = lm.LinearRegression()", "In order to use SciKit-Learn we have to reshape the array X into a matrix.", "X = np.reshape(X, (len(X), 1))", "We train the model and compute its score.", "M = model.fit(X, Y)\nM.score(X, Y)", "In order to plot the data together with the linear model, we extract the coefficients.", "ϑ0 = M.intercept_\nϑ1 = M.coef_[0]", "We plot Y versus X and the linear regression line.", "xMax = np.max(X) + 0.2\nxMin = np.min(X) - 0.2\n%matplotlib inline\nplt.figure(figsize=(15, 10))\nsns.set(style='darkgrid')\nplt.scatter(X, Y, c='b') # 'b' is blue color\nplt.xlabel('X values')\nplt.ylabel('true values + noise')\nplt.title('Influence of rounding on explained variance')\nplt.show(plt.plot([xMin, xMax], [ϑ0 + ϑ1 * xMin, ϑ0 + ϑ1 * xMax], c='r'))", "As we want to study the effect of <em style=\"color:blue;\">rounding</em>, the values of the dependent variable X are rounded to the nearest integer. To this end, the values are transformed to another unit, rounded and then transformed back to the original unkit. This way we can investigate how the performance of linear regression degrades if the precision of the measurements of the independent variable is low.", "X = np.round(X * 0.8) / 0.8", "We create a new <em style=\"color:blue;\">linear model</em>, fit it to the data and compute its score.", "model = lm.LinearRegression()\nM = model.fit(X, Y)\nM.score(X, Y)", "We can see that the performance of the linear model has degraded considerably.", "ϑ0 = M.intercept_\nϑ1 = M.coef_[0]\nxMax = max(X) + 0.2\nxMin = min(X) - 0.2\nplt.figure(figsize=(12, 10))\nsns.set(style='darkgrid')\nplt.scatter(X, Y, c='b')\nplt.plot([xMin, xMax], [ϑ0 + ϑ1 * xMin, ϑ0 + ϑ1 * xMax], c='r')\nplt.xlabel('rounded X values')\nplt.ylabel('true X values + noise')\nplt.title('Influence of rounding on explained variance')\nplt.show()", "Next, we investigate the effect of <em style=\"color:blue;\">subclassing</em>. We will only keep those values such that $X > 11$.", "X.shape\n\nselectorX = (X > 11)\nselectorY = np.reshape(selectorX, (N,))\nXS = X[selectorX]\nXS = np.reshape(XS, (len(XS), 1))\nYS = Y[selectorY]", "Again, we fit a linear model.", "model = lm.LinearRegression()\nM = model.fit(XS, YS)\nM.score(XS, YS)", "We see that the performance of linear regression has degraded considerably. Let's plot this.", "ϑ0 = M.intercept_\nϑ1 = M.coef_[0]\nxMax = max(XS) + 0.2\nxMin = min(XS) - 0.2\nplt.figure(figsize=(12, 10))\nsns.set(style='darkgrid')\nplt.scatter(XS, YS, c='b')\nplt.plot([xMin, xMax], [ϑ0 + ϑ1 * xMin, ϑ0 + ϑ1 * xMax], c='r')\nplt.xlabel('rounded X values')\nplt.ylabel('true X values + noise')\nplt.title('Influence of subclassing on explained variance')\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
betatim/BlackBox
examples/bayesian-optimization.ipynb
bsd-3-clause
[ "Bayesian optimization with skopt\nGilles Louppe, Manoj Kumar July 2016.", "import numpy as np\nnp.random.seed(123)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt", "Problem statement\nWe are interested in solving $$x^* = \\arg \\min_x f(x)$$ under the constraints that\n\n$f$ is a black box for which no closed form is known (nor its gradients);\n$f$ is expensive to evaluate;\nand evaluations of $y = f(x)$ may be noisy.\n\nDisclaimer. If you do not have these constraints, then there is certainly a better optimization algorithm than Bayesian optimization.\nBayesian optimization loop\nFor $t=1:T$:\n\n\nGiven observations $(x_i, y_i=f(x_i))$ for $i=1:t$, build a probabilistic model for the objective $f$. Integrate out all possible true functions, using Gaussian process regression.\n\n\noptimize a cheap acquisition/utility function $u$ based on the posterior distribution for sampling the next point.\n $$x_{t+1} = \\arg \\min_x u(x)$$\n Exploit uncertainty to balance exploration against exploitation.\n\n\nSample the next observation $y_{t+1}$ at $x_{t+1}$.\n\n\nAcquisition functions\nAcquisition functions $\\text{u}(x)$ specify which sample $x$ should be tried next:\n\nExpected improvement (default): $-\\text{EI}(x) = -\\mathbb{E} [f(x) - f(x_t^+)] $;\nLower confidence bound: $\\text{LCB}(x) = \\mu_{GP}(x) + \\kappa \\sigma_{GP}(x)$;\nProbability of improvement: $-\\text{PI}(x) = -P(f(x) \\geq f(x_t^+) + \\kappa) $;\n\nwhere $x_t^+$ is the best point observed so far.\nIn most cases, acquisition functions provide knobs (e.g., $\\kappa$) for\ncontrolling the exploration-exploitation trade-off.\n- Search in regions where $\\mu_{GP}(x)$ is high (exploitation)\n- Probe regions where uncertainty $\\sigma_{GP}(x)$ is high (exploration)\nToy example\nLet assume the following noisy function $f$:", "noise_level = 0.1\n\ndef f(x, noise_level=noise_level):\n return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level", "Note. In skopt, functions $f$ are assumed to take as input a 1D vector $x$ represented as an array-like and to return a scalar $f(x)$.", "# Plot f(x) + contours\nx = np.linspace(-2, 2, 400).reshape(-1, 1)\nfx = [f(x_i, noise_level=0.0) for x_i in x]\nplt.plot(x, fx, \"r--\", label=\"True (unknown)\")\nplt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx], \n [fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),\n alpha=.2, fc=\"r\", ec=\"None\")\nplt.legend()\nplt.grid()\nplt.show()", "Bayesian optimization based on gaussian process regression is implemented in skopt.gp_minimize and can be carried out as follows:", "from skopt import gp_minimize\n\nres = gp_minimize(f, # the function to minimize\n [(-2.0, 2.0)], # the bounds on each dimension of x\n acq_func=\"EI\", # the acquisition function\n n_calls=15, # the number of evaluations of f \n n_random_starts=5, # the number of random initialization points\n noise=0.1**2, # the noise level (optional)\n random_state=123) # the random seed", "Accordingly, the approximated minimum is found to be:", "\"x^*=%.4f, f(x^*)=%.4f\" % (res.x[0], res.fun)", "For further inspection of the results, attributes of the res named tuple provide the following information:\n\nx [float]: location of the minimum.\nfun [float]: function value at the minimum.\nmodels: surrogate models used for each iteration.\nx_iters [array]: location of function evaluation for each\n iteration.\nfunc_vals [array]: function value for each iteration.\nspace [Space]: the optimization space.\nspecs [dict]: parameters passed to the function.", "print(res)", "Together these attributes can be used to visually inspect the results of the minimization, such as the convergence trace or the acquisition function at the last iteration:", "from skopt.plots import plot_convergence\nplot_convergence(res);", "Let us now visually examine\n\nThe approximation of the fit gp model to the original function.\nThe acquistion values that determine the next point to be queried.", "from skopt.acquisition import gaussian_ei\n\nplt.rcParams[\"figure.figsize\"] = (8, 14)\n\nx = np.linspace(-2, 2, 400).reshape(-1, 1)\nx_gp = res.space.transform(x.tolist())\nfx = np.array([f(x_i, noise_level=0.0) for x_i in x])\n\n# Plot the 5 iterations following the 5 random points\nfor n_iter in range(5):\n gp = res.models[n_iter]\n curr_x_iters = res.x_iters[:5+n_iter]\n curr_func_vals = res.func_vals[:5+n_iter]\n\n # Plot true function.\n plt.subplot(5, 2, 2*n_iter+1)\n plt.plot(x, fx, \"r--\", label=\"True (unknown)\")\n plt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([fx - 1.9600 * noise_level, \n fx[::-1] + 1.9600 * noise_level]),\n alpha=.2, fc=\"r\", ec=\"None\")\n\n # Plot GP(x) + contours\n y_pred, sigma = gp.predict(x_gp, return_std=True)\n plt.plot(x, y_pred, \"g--\", label=r\"$\\mu_{GP}(x)$\")\n plt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([y_pred - 1.9600 * sigma, \n (y_pred + 1.9600 * sigma)[::-1]]),\n alpha=.2, fc=\"g\", ec=\"None\")\n\n # Plot sampled points\n plt.plot(curr_x_iters, curr_func_vals,\n \"r.\", markersize=8, label=\"Observations\")\n \n # Adjust plot layout\n plt.grid()\n\n if n_iter == 0:\n plt.legend(loc=\"best\", prop={'size': 6}, numpoints=1)\n \n if n_iter != 4:\n plt.tick_params(axis='x', which='both', bottom='off', \n top='off', labelbottom='off') \n\n # Plot EI(x)\n plt.subplot(5, 2, 2*n_iter+2)\n acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))\n plt.plot(x, acq, \"b\", label=\"EI(x)\")\n plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')\n \n next_x = res.x_iters[5+n_iter]\n next_acq = gaussian_ei(res.space.transform([next_x]), gp, y_opt=np.min(curr_func_vals))\n plt.plot(next_x, next_acq, \"bo\", markersize=6, label=\"Next query point\")\n \n # Adjust plot layout\n plt.ylim(0, 0.1)\n plt.grid()\n \n if n_iter == 0:\n plt.legend(loc=\"best\", prop={'size': 6}, numpoints=1)\n \n if n_iter != 4:\n plt.tick_params(axis='x', which='both', bottom='off', \n top='off', labelbottom='off') \n\nplt.show()", "The first column shows the following:\n\nThe true function.\nThe approximation to the original function by the gaussian process model\nHow sure the GP is about the function.\n\nThe second column shows the acquisition function values after every surrogate model is fit. It is possible that we do not choose the global minimum but a local minimum depending on the minimizer used to minimize the acquisition function.\nAt the points closer to the points previously evaluated at, the variance dips to zero. \nFinally, as we increase the number of points, the GP model approaches the actual function. The final few points are clustered around the minimum because the GP does not gain anything more by further exploration:", "plt.rcParams[\"figure.figsize\"] = (6, 4)\n\n# Plot f(x) + contours\nx = np.linspace(-2, 2, 400).reshape(-1, 1)\nx_gp = res.space.transform(x.tolist())\n\nfx = [f(x_i, noise_level=0.0) for x_i in x]\nplt.plot(x, fx, \"r--\", label=\"True (unknown)\")\nplt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx], \n [fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),\n alpha=.2, fc=\"r\", ec=\"None\")\n\n# Plot GP(x) + contours\ngp = res.models[-1]\ny_pred, sigma = gp.predict(x_gp, return_std=True)\n\nplt.plot(x, y_pred, \"g--\", label=r\"$\\mu_{GP}(x)$\")\nplt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([y_pred - 1.9600 * sigma, \n (y_pred + 1.9600 * sigma)[::-1]]),\n alpha=.2, fc=\"g\", ec=\"None\")\n\n# Plot sampled points\nplt.plot(res.x_iters, \n res.func_vals, \n \"r.\", markersize=15, label=\"Observations\")\n\nplt.title(r\"$x^* = %.4f, f(x^*) = %.4f$\" % (res.x[0], res.fun))\nplt.legend(loc=\"best\", prop={'size': 8}, numpoints=1)\nplt.grid()\n\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
merryjman/astronomy
Motion.ipynb
gpl-3.0
[ "Motion Analysis\nIn this example, you'll analyze the 1-D motion of an object. Don't be afraid to run code without knowing what every line does. A great way to learn is to:\n- run some code\n- see what it does\n- edit it\n- see what changed\nProgrammers often begin with a working program, then edit/modify it to do what they want.", "# First, we'll \"import\" the software packages needed.\nimport pandas as pd\nimport numpy as np\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\ninline_rc = dict(mpl.rcParams)\n\n# Starting a line with a hashtag tells the program not to read the line.\n# That way we can write \"comments\" to humans trying to figure out what the code does.\n# Blank lines don't do anything either, but they can make the code easier to read.", "Raw data", "# Whenever you type \"something =\" it defines a new variable, \"something\", \n# and sets it equal to whatever follows the equals sign. That could be a number, \n# another variable, or in this case an entire table of numbers.\n\n# enter raw data\ndata = pd.DataFrame.from_items([\n ('time (s)', [0,1,2,3]), \n ('position (m)', [0,2,4,6])\n ])\n# display data table\ndata", "Plotting the data", "# set variables = data['column label']\ntime = data['time (s)']\npos = data['position (m)']\n\n# Uncomment the next line to make it look like a graph from xkcd.com\n# plt.xkcd()\n# to make normal-looking plots again execute:\n# mpl.rcParams.update(inline_rc)\n\n# this makes a scatterplot of the data\n# plt.scatter(x values, y values)\nplt.scatter(time, pos)\nplt.title(\"Constant Speed?\")\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Position (cm)\")\nplt.autoscale(tight=True)\n\n# calculate a trendline equation\n# np.polyfit( x values, y values, polynomial order)\ntrend = np.polyfit(time, pos, 1)\n\n# plot trendline\n# plt.plot(x values, y values, other parameters)\nplt.plot(time, np.poly1d(trend)(time), label='trendline')\nplt.legend(loc='upper left')\n\n# display the trendline's coefficients (slope, y-int)\ntrend", "Calculate and plot velocity", "# create a new empty column\ndata['velocity (m/s)'] = ''\ndata\n\n# np.diff() calculates the difference between a value and the one after it\nvel = np.diff(pos) / np.diff(time)\n\n# fill the velocity column with values from the formula\ndata['velocity (m/s)'] = pd.DataFrame.from_items([('', vel)])\n\n# display the data table\ndata\n\n# That last velocity value will cause problems for further coding\n# Make a new table using only rows 0 through 2\ndata2 = data.loc[0:2,['time (s)', 'velocity (m/s)']]\ndata2\n\n# set new variables to plot\ntime2 = data2['time (s)']\nvel2 = data2['velocity (m/s)']\n\n\n# plot data just like before\nplt.scatter(time2, vel2)\nplt.title(\"Constant Speed?\")\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Velocity (m)\")\nplt.autoscale(tight=True)\n\n# calculate trendline equation like before\ntrend2 = np.polyfit(time2, vel2, 1)\n\n# plot trendline like before\nplt.plot(time2, np.poly1d(trend2)(time2), label='trendline')\nplt.legend(loc='lower left')\n\n# display the trendline's coefficients (slope, y-int)\ntrend2", "Part Two\nChoose one of the following:\n- Mess with the data: edit the original raw data to add more data points and vary them slightly to make it appear like realistic measurements of constant speed motion.\n- Constant a: edit/add to the raw data to represent an object moving with constant acceleration.\n- More math: create new cells below and add an \"acceleration\" collumn to the data table, then plot it." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
XinyiGong/pymks
notebooks/cahn_hilliard.ipynb
mit
[ "Cahn-Hilliard Example\nThis example demonstrates how to use PyMKS to solve the Cahn-Hilliard equation. The first section provides some background information about the Cahn-Hilliard equation as well as details about calibrating and validating the MKS model. The example demonstrates how to generate sample data, calibrate the influence coefficients and then pick an appropriate number of local states when state space is continuous. The MKS model and a spectral solution of the Cahn-Hilliard equation are compared on a larger test microstructure over multiple time steps.\nCahn-Hilliard Equation\nThe Cahn-Hilliard equation is used to simulate microstructure evolution during spinodial decomposition and has the following form,\n$$ \\dot{\\phi} = \\nabla^2 \\left( \\phi^3 - \\phi \\right) - \\gamma \\nabla^4 \\phi $$\nwhere $\\phi$ is a conserved ordered parameter and $\\sqrt{\\gamma}$ represents the width of the interface. In this example, the Cahn-Hilliard equation is solved using a semi-implicit spectral scheme with periodic boundary conditions, see Chang and Rutenberg for more details.", "%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n", "Modeling with MKS\nIn this example the MKS equation will be used to predict microstructure at the next time step using \n$$p[s, 1] = \\sum_{r=0}^{S-1} \\alpha[l, r, 1] \\sum_{l=0}^{L-1} m[l, s - r, 0] + ...$$\nwhere $p[s, n + 1]$ is the concentration field at location $s$ and at time $n + 1$, $r$ is the convolution dummy variable and $l$ indicates the local states varable. $\\alpha[l, r, n]$ are the influence coefficients and $m[l, r, 0]$ the microstructure function given to the model. $S$ is the total discretized volume and $L$ is the total number of local states n_states choosen to use.\nThe model will march forward in time by recussively replacing discretizing $p[s, n]$ and substituing it back for $m[l, s - r, n]$.\nCalibration Datasets\nUnlike the elastostatic examples, the microstructure (concentration field) for this simulation doesn't have discrete phases. The microstructure is a continuous field that can have a range of values which can change over time, therefore the first order influence coefficients cannot be calibrated with delta microstructures. Instead a large number of simulations with random initial conditions are used to calibrate the first order influence coefficients using linear regression.\nThe function make_cahn_hilliard from pymks.datasets provides an interface to generate calibration datasets for the influence coefficients. To use make_cahn_hilliard, we need to set the number of samples we want to use to calibrate the influence coefficients using n_samples, the size of the simulation domain using size and the time step using dt.", "import pymks\nfrom pymks.datasets import make_cahn_hilliard\n\nn = 41\nn_samples = 400\ndt = 1e-2\nnp.random.seed(99)\nX, y = make_cahn_hilliard(n_samples=n_samples, size=(n, n), dt=dt)\n", "The function make_cahnHilliard generates n_samples number of random microstructures, X, and the associated updated microstructures, y, after one time step y. The following cell plots one of these microstructures along with its update.", "from pymks.tools import draw_concentrations\n\ndraw_concentrations((X[0], y[0]), labels=('Input Concentration', 'Output Concentration'))\n", "Calibrate Influence Coefficients\nAs mentioned above, the microstructures (concentration fields) does not have discrete phases. This leaves the number of local states in local state space as a free hyper parameter. In previous work it has been shown that as you increase the number of local states, the accuracy of MKS model increases (see Fast et al.), but as the number of local states increases, the difference in accuracy decreases. Some work needs to be done in order to find the practical number of local states that we will use. \nOptimizing the Number of Local States\nLet's split the calibrate dataset into testing and training datasets. The function train_test_split for the machine learning python module sklearn provides a convenient interface to do this. 80% of the dataset will be used for training and the remaining 20% will be used for testing by setting test_size equal to 0.2. The state of the random number generator used to make the split can be set using random_state.", "import sklearn\nfrom sklearn.cross_validation import train_test_split\n\nsplit_shape = (X.shape[0],) + (np.product(X.shape[1:]),)\nX_train, X_test, y_train, y_test = train_test_split(X.reshape(split_shape), y.reshape(split_shape),\n test_size=0.5, random_state=3)\n", "We are now going to calibrate the influence coefficients while varying the number of local states from 2 up to 20. Each of these models will then predict the evolution of the concentration fields. Mean square error will be used to compared the results with the testing dataset to evaluate how the MKS model's performance changes as we change the number of local states. \nFirst we need to import the class MKSLocalizationModel from pymks.", "from pymks import MKSLocalizationModel\nfrom pymks.bases import PrimitiveBasis\n", "Next we will calibrate the influence coefficients while varying the number of local states and compute the mean squared error. The following demonstrates how to use Scikit-learn's GridSearchCV to optimize n_states as a hyperparameter. Of course, the best fit is always with a larger value of n_states. Increasing this parameter does not overfit the data.", "from sklearn.grid_search import GridSearchCV\n\nparameters_to_tune = {'n_states': np.arange(2, 11)}\nprim_basis = PrimitiveBasis(2, [-1, 1])\nmodel = MKSLocalizationModel(prim_basis)\ngs = GridSearchCV(model, parameters_to_tune, cv=5, fit_params={'size': (n, n)})\ngs.fit(X_train, y_train)\n\n\nprint(gs.best_estimator_)\nprint(gs.score(X_test, y_test))\n\nfrom pymks.tools import draw_gridscores\n\ndraw_gridscores(gs.grid_scores_, 'n_states',\n score_label='R-squared', param_label='L-Number of Local States')\n", "As expected the accuracy of the MKS model monotonically increases as we increase n_states, but accuracy doesn't improve significantly as n_states gets larger than signal digits. \nIn order to save on computation costs let's set calibrate the influence coefficients with n_states equal to 6, but realize that if we need slightly more accuracy the value can be increased.", "model = MKSLocalizationModel(basis=PrimitiveBasis(6, [-1, 1]))\nmodel.fit(X, y)\n", "Here are the first 4 influence coefficients.", "from pymks.tools import draw_coeff\n\ndraw_coeff(model.coeff[...,:4])\n", "Predict Microstructure Evolution\nWith the calibrated influence coefficients, we are ready to predict the evolution of a concentration field. In order to do this, we need to have the Cahn-Hilliard simulation and the MKS model start with the same initial concentration phi0 and evolve in time. In order to do the Cahn-Hilliard simulation we need an instance of the class CahnHilliardSimulation.", "from pymks.datasets.cahn_hilliard_simulation import CahnHilliardSimulation\nnp.random.seed(191)\n\nphi0 = np.random.normal(0, 1e-9, (1, n, n))\nch_sim = CahnHilliardSimulation(dt=dt)\nphi_sim = phi0.copy()\nphi_pred = phi0.copy()\n", "In order to move forward in time, we need to feed the concentration back into the Cahn-Hilliard simulation and the MKS model.", "time_steps = 10\n\nfor ii in range(time_steps):\n ch_sim.run(phi_sim)\n phi_sim = ch_sim.response\n phi_pred = model.predict(phi_pred)\n", "Let's take a look at the concentration fields.", "from pymks.tools import draw_concentrations_compare\n\ndraw_concentrations((phi_sim[0], phi_pred[0]), labels=('Simulation', 'MKS'))\n", "The MKS model was able to capture the microstructure evolution with 6 local states. \nResizing the Coefficients to use on Larger Systems\nNow let's try and predict a larger simulation by resizing the coefficients and provide a larger initial concentratio field.", "m = 3 * n\nmodel.resize_coeff((m, m))\n\nphi0 = np.random.normal(0, 1e-9, (1, m, m))\nphi_sim = phi0.copy()\nphi_pred = phi0.copy()\n", "Once again we are going to march forward in time by feeding the concentration fields back into the Cahn-Hilliard simulation and the MKS model.", "for ii in range(1000):\n ch_sim.run(phi_sim)\n phi_sim = ch_sim.response\n phi_pred = model.predict(phi_pred)\n", "Let's take a look at the results.", "from pymks.tools import draw_concentrations_compare\n\ndraw_concentrations_compare((phi_sim[0], phi_pred[0]), labels=('Simulation', 'MKS'))\n", "The MKS model with resized influence coefficients was able to reasonably predict the structure evolution for a larger concentration field." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
google/trax
trax/examples/trax_data_Explained.ipynb
apache-2.0
[ "<a href=\"https://colab.research.google.com/github/SauravMaheshkar/trax/blob/SauravMaheshkar-example-1/examples/trax_data_Explained.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "#@title\n# Copyright 2020 Google LLC.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n## Install the Latest Version of Trax\n!pip install --upgrade trax", "Notebook Author: @SauravMaheshkar\nIntroduction", "import trax", "Serial Fn\nIn Trax, we use combinators to build input pipelines, much like building deep learning models. The Serial combinator applies layers serially using function composition and uses stack semantics to manage data. \nTrax has the following definition for a Serial combinator.\n\ndef Serial(*fns):\n def composed_fns(generator=None):\n for f in fastmath.tree_flatten(fns):\n generator = f(generator)\n return generator\n return composed_fns\n\nThe Serial function has the following structure:\n\nIt takes as input arbitrary number of functions\nConvert the structure into lists\nIterate through the list and apply the functions Serially\n\n\nThe fastmath.tree_flatten() function, takes a tree as a input and returns a flattened list. This way we can use various generator functions like Tokenize and Shuffle, and apply them serially by 'iterating' through the list. \nInitially, we've defined generator to None. Thus, in the first iteration we have no input and thus the first step executes the first function in our tree structure. In the next iteration, the generator variable is updated to be the output of the next function in the list.\nLog Function\n\n```\ndef Log(n_steps_per_example=1, only_shapes=True):\n def log(stream):\n counter = 0\n for example in stream:\n item_to_log = example\n if only_shapes:\n item_to_log = fastmath.nested_map(shapes.signature, example)\n if counter % n_steps_per_example == 0:\n logging.info(str(item_to_log))\n print(item_to_log)\n counter += 1\n yield example\n return log\n\nEvery Deep Learning Framework needs to have a logging component for efficient debugging. \ntrax.data.Log generator uses the absl package for logging. It uses a fastmath.nested_map function that maps a certain function recursively inside a object. In the case depicted below, the function maps the shapes.signature recursively inside the input stream, thus giving us the shapes of the various objects in our stream.\n--\nThe following two cells show the difference between when we set the only_shapes variable to False", "data_pipeline = trax.data.Serial(\n trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=True),\n trax.data.Tokenize(vocab_dir='gs://trax-ml/vocabs/', vocab_file='en_8k.subword', keys=[0]),\n trax.data.Log(only_shapes=False)\n )\nexample = data_pipeline()\nprint(next(example))\n\ndata_pipeline = trax.data.Serial(\n trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=True),\n trax.data.Tokenize(vocab_dir='gs://trax-ml/vocabs/', vocab_file='en_8k.subword', keys=[0]),\n trax.data.Log(only_shapes=True)\n )\nexample = data_pipeline()\nprint(next(example))", "Shuffling our datasets\nTrax offers two generator functions to add shuffle functionality in our input pipelines. \n\nThe shuffle function shuffles a given stream\nThe Shuffle function returns a shuffle function instead\n\nshuffle\n\n```\ndef shuffle(samples, queue_size):\n if queue_size < 1:\n raise ValueError(f'Arg queue_size ({queue_size}) is less than 1.')\n if queue_size == 1:\n logging.warning('Queue size of 1 results in no shuffling.')\n queue = []\n try:\n queue.append(next(samples))\n i = np.random.randint(queue_size)\n yield queue[i]\n queue[i] = sample\n except StopIteration:\n logging.warning(\n 'Not enough samples (%d) to fill initial queue (size %d).',\n len(queue), queue_size)\n np.random.shuffle(queue)\n for sample in queue:\n yield sample\n\nThe shuffle function takes two inputs, the data stream and the queue size (minimum number of samples within which the shuffling takes place). Apart from the usual warnings, for negative and unity queue sizes, this generator function shuffles the given stream using np.random.randint() by randomly picks out integers using the queue_size as a range and then shuffle this new stream again using the np.random.shuffle()", "sentence = ['Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?',\n 'But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?',\n 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum',\n 'At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.']\n\ndef sample_generator(x):\n for i in x:\n yield i\n\nexample_shuffle = list(trax.data.inputs.shuffle(sample_generator(sentence), queue_size = 2))\nexample_shuffle", "Shuffle\n\n```\ndef Shuffle(queue_size=1024): \n return lambda g: shuffle(g, queue_size)\n\nThis function returns the aforementioned shuffle function and is mostly used in input pipelines.\nBatch Generators\nbatch\nThis function, creates batches for the input generator function.\n\n```\ndef batch(generator, batch_size):\n if batch_size <= 0:\n raise ValueError(f'Batch size must be positive, but is {batch_size}.')\n buf = []\n for example in generator:\n buf.append(example)\n if len(buf) == batch_size:\n batched_example = tuple(np.stack(x) for x in zip(*buf))\n yield batched_example\n buf = []\n\nIt keeps adding objects from the generator into a list until the size becomes equal to the batch_size and then creates batches using the np.stack() function.\nIt also raises an error for non-positive batch_sizes.\nBatch\n\n```\n def Batch(batch_size):\n return lambda g: batch(g, batch_size)\n\nThis Function returns the aforementioned batch function with given batch size.\nPad to Maximum Dimensions\nThis function is used to pad a tuple of tensors to a joint dimension and return their batch.\nFor example, in this case a pair of tensors (1,2) and ( (3,4) , (5,6) ) is changed to (1,2,0) and ( (3,4) , (5,6) , 0)", "import numpy as np\n\ntensors = np.array([(1.,2.),\n ((3.,4.),(5.,6.))])\npadded_tensors = trax.data.inputs.pad_to_max_dims(tensors=tensors, boundary=3)\npadded_tensors", "Creating Buckets\nFor training Recurrent Neural Networks, with large vocabulary a method called Bucketing is usually applied. \nThe usual technique of using padding ensures that all occurences within a mini-batch are of the same length. But this reduces the inter-batch variability and intuitively puts similar sentences into the same batch therefore, reducing the overall robustness of the system. \nThus, we use Bucketing where multiple buckets are created depending on the length of the sentences and these occurences are assigned to buckets on the basis of which bucket corresponds to it's length. We need to ensure that the bucket sizes are large for adding some variablity to the system.\nbucket_by_length\n\n```\ndef bucket_by_length(generator, length_fn, boundaries, batch_sizes,strict_pad_on_len=False):\n buckets = [[] for _ in range(len(batch_sizes))]\n boundaries = boundaries + [math.inf] \n for example in generator:\n length = length_fn(example)\n bucket_idx = min([i for i, b in enumerate(boundaries) if length <= b])\n buckets[bucket_idx].append(example)\n if len(buckets[bucket_idx]) == batch_sizes[bucket_idx]:\n batched = zip(*buckets[bucket_idx])\n boundary = boundaries[bucket_idx]\n boundary = None if boundary == math.inf else boundary\n padded_batch = tuple(\n pad_to_max_dims(x, boundary, strict_pad_on_len) for x in batched)\n yield padded_batch\n buckets[bucket_idx] = []\n\n\nThis function can be summarised as:\n\n\nCreate buckets as per the lengths given in the batch_sizes array\n\n\nAssign sentences into buckets if their length matches the bucket size\n\n\nIf padding is required, we use the pad_to_max_dims function\n\n\n\nParameters\n\ngenerator: The input generator function\nlength_fn: A custom length function for determing the length of functions, not necessarily len()\nboundaries: A python list containing corresponding bucket boundaries\nbatch_sizes: A python list containing batch sizes\nstrict_pad_on_len: – A python boolean variable (True or False). If set to true then the function pads on the length dimension, where dim[0] is strictly a multiple of boundary.\n\nBucketByLength\n\n```\ndef BucketByLength(boundaries, batch_sizes,length_keys=None, length_axis=0, strict_pad_on_len=False):\n length_keys = length_keys or [0, 1]\n length_fn = lambda x: _length_fn(x, length_axis, length_keys)\n return lambda g: bucket_by_length(g, length_fn, boundaries, batch_sizes, strict_pad_on_len)\n\n\nThis function, is usually used inside input pipelines(combinators) and uses the afforementioned bucket_by_length. It applies a predefined length_fn which chooses the maximum shape on length_axis over length_keys.\nIt's use is illustrated below", "data_pipeline = trax.data.Serial(\n trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=True),\n trax.data.Tokenize(vocab_dir='gs://trax-ml/vocabs/', vocab_file='en_8k.subword', keys=[0]),\n trax.data.BucketByLength(boundaries=[32, 128, 512, 2048],\n batch_sizes=[512, 128, 32, 8, 1],\n length_keys=[0]),\n trax.data.Log(only_shapes=True)\n )\nexample = data_pipeline()\nprint(next(example))", "Filter by Length\n\n```\ndef FilterByLength(max_length,length_keys=None, length_axis=0):\n length_keys = length_keys or [0, 1]\n length_fn = lambda x: _length_fn(x, length_axis, length_keys)\n def filtered(gen):\n for example in gen:\n if length_fn(example) <= max_length:\n yield example\n return filtered\n\n\nThis function used the same predefined length_fn to only include those instances which are less than the given max_length parameter.", "Filtered = trax.data.Serial(\n trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=True),\n trax.data.Tokenize(vocab_dir='gs://trax-ml/vocabs/', vocab_file='en_8k.subword', keys=[0]),\n trax.data.BucketByLength(boundaries=[32, 128, 512, 2048],\n batch_sizes=[512, 128, 32, 8, 1],\n length_keys=[0]),\n trax.data.FilterByLength(max_length=2048, length_keys=[0]),\n trax.data.Log(only_shapes=True)\n )\nfiltered_example = Filtered()\nprint(next(filtered_example))", "Adding Loss Weights\nadd_loss_weights\n\n```\ndef add_loss_weights(generator, id_to_mask=None):\n for example in generator:\n if len(example) > 3 or len(example) < 2:\n assert id_to_mask is None, 'Cannot automatically mask this stream.'\n yield example\n else:\n if len(example) == 2:\n weights = np.ones_like(example[1]).astype(np.float32)\n else:\n weights = example[2].astype(np.float32)\n mask = 1.0 - np.equal(example[1], id_to_mask).astype(np.float32)\n weights *= mask\n yield (example[0], example[1], weights)\n\n\nThis function essentially adds a loss mask (tensor of ones of the same shape) to the input stream. \nMasking is essentially a way to tell sequence-processing layers that certain timesteps in an input are missing, and thus should be skipped when processing the data.\nThus, it adds 'weights' to the system. \n\nParameters\n\ngenerator: The input data generator\nid_to_mask: The value with which to mask. Can be used as &lt;PAD&gt; in NLP.\n\n```\ntrain_generator = trax.data.inputs.add_loss_weights(\n data_generator(batch_size, x_train, y_train,vocab['<PAD>'], True),\n id_to_mask=vocab['<PAD>'])\n```\nFor example, in this case I used the add_loss_weights function to add padding while implementing Named Entity Recogntion using the Reformer Architecture. You can read more about the project here.\nAddLossWeights\nThis function performs the afforementioned add_loss_weights to the data stream. \n\n```\ndef AddLossWeights(id_to_mask=None):\n return lambda g: add_loss_weights(g,id_to_mask=id_to_mask)", "data_pipeline = trax.data.Serial(\n trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=True),\n trax.data.Tokenize(vocab_dir='gs://trax-ml/vocabs/', vocab_file='en_8k.subword', keys=[0]),\n trax.data.Shuffle(),\n trax.data.FilterByLength(max_length=2048, length_keys=[0]),\n trax.data.BucketByLength(boundaries=[ 32, 128, 512, 2048],\n batch_sizes=[512, 128, 32, 8, 1],\n length_keys=[0]),\n trax.data.AddLossWeights(),\n trax.data.Log(only_shapes=True)\n )\n\nexample = data_pipeline()\nprint(next(example))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
csaladenes/aviation
code/airport_dest_parser2.ipynb
mit
[ "import pandas as pd, json, numpy as np\nimport matplotlib.pyplot as plt\nfrom bs4 import BeautifulSoup\n%matplotlib inline", "Load airports of each country", "L=json.loads(file('../json/L.json','r').read())\nM=json.loads(file('../json/M.json','r').read())\nN=json.loads(file('../json/N.json','r').read())\n\nimport requests\n\nAP={}\nfor c in M:\n if c not in AP:AP[c]={}\n for i in range(len(L[c])):\n AP[c][N[c][i]]=L[c][i]", "record schedules for 2 weeks, then augment count with weekly flight numbers.\nseasonal and seasonal charter will count as once per week for 3 months, so 12/52 per week. TGM separate, since its history is in the past.\nparse Departures", "baseurl='https://www.airportia.com/'\nimport requests, urllib2\n\ndef urlgetter(url):\n s = requests.Session()\n cookiesopen = s.get(url)\n cookies=str(s.cookies)\n fcookies=[[k[:k.find('=')],k[k.find('=')+1:k.find(' for ')]] for k in cookies[cookies.find('Cookie '):].split('Cookie ')[1:]]\n #push token\n opener = urllib2.build_opener()\n for k in fcookies:\n opener.addheaders.append(('Cookie', k[0]+'='+k[1]))\n #read html\n return s.get(url).content", "good dates", "SD={}\nSC=json.loads(file('../json/SC2.json','r').read())\n\nfor h in range(2,5):#len(AP.keys())):\n c=AP.keys()[h]\n #country not parsed yet\n if c in SC:\n if c not in SD:\n SD[c]=[]\n print h,c\n airportialinks=AP[c]\n sch={}\n #all airports of country, where there is traffic\n for i in airportialinks:\n if i in SC[c]:\n print i,\n if i not in sch:sch[i]={}\n url=baseurl+airportialinks[i]\n m=urlgetter(url)\n for d in range (3,7):\n #date not parsed yet\n if d not in sch[i]:\n url=baseurl+airportialinks[i]+'departures/201704'+str(d)\n m=urlgetter(url)\n soup = BeautifulSoup(m, \"lxml\")\n #if there are flights at all\n if len(soup.findAll('table'))>0:\n sch[i][d]=pd.read_html(m)[0] \n else: print '--W-',d,\n SD[c]=sch\n print ", "Save", "cnc_path='../../universal/countries/'\ncnc=pd.read_excel(cnc_path+'cnc.xlsx').set_index('Name')\n\nMDF=pd.DataFrame()\n\nfor c in SD:\n sch=SD[c]\n mdf=pd.DataFrame()\n for i in sch:\n for d in sch[i]:\n df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1)\n df['From']=i\n df['Date']=d\n mdf=pd.concat([mdf,df])\n mdf=mdf.replace('Hahn','Frankfurt')\n mdf=mdf.replace('Hahn HHN','Frankfurt HHN')\n mdf['City']=[i[:i.rfind(' ')] for i in mdf['To']]\n mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['To']]\n file('../countries/'+cnc.T.loc[c]['ISO2'].lower()+\"/json/mdf_dest.json\",'w').write(json.dumps(mdf.reset_index().to_json()))\n MDF=pd.concat([MDF,mdf])\n\nMDF.reset_index().to_json('../json/MDF.json')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
AllenDowney/ThinkStats2
solutions/chap08soln.ipynb
gpl-3.0
[ "Chapter 8\nExamples and Exercises from Think Stats, 2nd Edition\nhttp://thinkstats2.com\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "from os.path import basename, exists\n\n\ndef download(url):\n filename = basename(url)\n if not exists(filename):\n from urllib.request import urlretrieve\n\n local, _ = urlretrieve(url, filename)\n print(\"Downloaded \" + local)\n\n\ndownload(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py\")\ndownload(\"https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py\")\n\nimport numpy as np\n\nimport thinkstats2\nimport thinkplot", "The estimation game\nRoot mean squared error is one of several ways to summarize the average error of an estimation process.", "def RMSE(estimates, actual):\n \"\"\"Computes the root mean squared error of a sequence of estimates.\n\n estimate: sequence of numbers\n actual: actual value\n\n returns: float RMSE\n \"\"\"\n e2 = [(estimate-actual)**2 for estimate in estimates]\n mse = np.mean(e2)\n return np.sqrt(mse)", "The following function simulates experiments where we try to estimate the mean of a population based on a sample with size n=7. We run iters=1000 experiments and collect the mean and median of each sample.", "import random\n\ndef Estimate1(n=7, iters=1000):\n \"\"\"Evaluates RMSE of sample mean and median as estimators.\n\n n: sample size\n iters: number of iterations\n \"\"\"\n mu = 0\n sigma = 1\n\n means = []\n medians = []\n for _ in range(iters):\n xs = [random.gauss(mu, sigma) for _ in range(n)]\n xbar = np.mean(xs)\n median = np.median(xs)\n means.append(xbar)\n medians.append(median)\n\n print('Experiment 1')\n print('rmse xbar', RMSE(means, mu))\n print('rmse median', RMSE(medians, mu))\n \nEstimate1()", "Using $\\bar{x}$ to estimate the mean works a little better than using the median; in the long run, it minimizes RMSE. But using the median is more robust in the presence of outliers or large errors.\nEstimating variance\nThe obvious way to estimate the variance of a population is to compute the variance of the sample, $S^2$, but that turns out to be a biased estimator; that is, in the long run, the average error doesn't converge to 0.\nThe following function computes the mean error for a collection of estimates.", "def MeanError(estimates, actual):\n \"\"\"Computes the mean error of a sequence of estimates.\n\n estimate: sequence of numbers\n actual: actual value\n\n returns: float mean error\n \"\"\"\n errors = [estimate-actual for estimate in estimates]\n return np.mean(errors)", "The following function simulates experiments where we try to estimate the variance of a population based on a sample with size n=7. We run iters=1000 experiments and two estimates for each sample, $S^2$ and $S_{n-1}^2$.", "def Estimate2(n=7, iters=1000):\n mu = 0\n sigma = 1\n\n estimates1 = []\n estimates2 = []\n for _ in range(iters):\n xs = [random.gauss(mu, sigma) for i in range(n)]\n biased = np.var(xs)\n unbiased = np.var(xs, ddof=1)\n estimates1.append(biased)\n estimates2.append(unbiased)\n\n print('mean error biased', MeanError(estimates1, sigma**2))\n print('mean error unbiased', MeanError(estimates2, sigma**2))\n \nEstimate2()", "The mean error for $S^2$ is non-zero, which suggests that it is biased. The mean error for $S_{n-1}^2$ is close to zero, and gets even smaller if we increase iters.\nThe sampling distribution\nThe following function simulates experiments where we estimate the mean of a population using $\\bar{x}$, and returns a list of estimates, one from each experiment.", "def SimulateSample(mu=90, sigma=7.5, n=9, iters=1000):\n xbars = []\n for j in range(iters):\n xs = np.random.normal(mu, sigma, n)\n xbar = np.mean(xs)\n xbars.append(xbar)\n return xbars\n\nxbars = SimulateSample()", "Here's the \"sampling distribution of the mean\" which shows how much we should expect $\\bar{x}$ to vary from one experiment to the next.", "cdf = thinkstats2.Cdf(xbars)\nthinkplot.Cdf(cdf)\nthinkplot.Config(xlabel='Sample mean',\n ylabel='CDF')", "The mean of the sample means is close to the actual value of $\\mu$.", "np.mean(xbars)", "An interval that contains 90% of the values in the sampling disrtribution is called a 90% confidence interval.", "ci = cdf.Percentile(5), cdf.Percentile(95)\nci", "And the RMSE of the sample means is called the standard error.", "stderr = RMSE(xbars, 90)\nstderr", "Confidence intervals and standard errors quantify the variability in the estimate due to random sampling.\nEstimating rates\nThe following function simulates experiments where we try to estimate the mean of an exponential distribution using the mean and median of a sample.", "def Estimate3(n=7, iters=1000):\n lam = 2\n\n means = []\n medians = []\n for _ in range(iters):\n xs = np.random.exponential(1.0/lam, n)\n L = 1 / np.mean(xs)\n Lm = np.log(2) / thinkstats2.Median(xs)\n means.append(L)\n medians.append(Lm)\n\n print('rmse L', RMSE(means, lam))\n print('rmse Lm', RMSE(medians, lam))\n print('mean error L', MeanError(means, lam))\n print('mean error Lm', MeanError(medians, lam))\n \nEstimate3()", "The RMSE is smaller for the sample mean than for the sample median.\nBut neither estimator is unbiased.\nExercises\nExercise: Suppose you draw a sample with size n=10 from an exponential distribution with λ=2. Simulate this experiment 1000 times and plot the sampling distribution of the estimate L. Compute the standard error of the estimate and the 90% confidence interval.\nRepeat the experiment with a few different values of n and make a plot of standard error versus n.", "# Solution\n\ndef SimulateSample(lam=2, n=10, iters=1000):\n \"\"\"Sampling distribution of L as an estimator of exponential parameter.\n\n lam: parameter of an exponential distribution\n n: sample size\n iters: number of iterations\n \"\"\"\n def VertLine(x, y=1):\n thinkplot.Plot([x, x], [0, y], color='0.8', linewidth=3)\n\n estimates = []\n for _ in range(iters):\n xs = np.random.exponential(1.0/lam, n)\n lamhat = 1.0 / np.mean(xs)\n estimates.append(lamhat)\n\n stderr = RMSE(estimates, lam)\n print('standard error', stderr)\n\n cdf = thinkstats2.Cdf(estimates)\n ci = cdf.Percentile(5), cdf.Percentile(95)\n print('confidence interval', ci)\n VertLine(ci[0])\n VertLine(ci[1])\n\n # plot the CDF\n thinkplot.Cdf(cdf)\n thinkplot.Config(xlabel='estimate',\n ylabel='CDF',\n title='Sampling distribution')\n\n return stderr\n\nSimulateSample()\n\n# Solution\n\n# My conclusions:\n\n# 1) With sample size 10:\n\n# standard error 0.762510819389\n# confidence interval (1.2674054394352277, 3.5377353792673705)\n\n# 2) As sample size increases, standard error and the width of\n# the CI decrease:\n\n# 10 0.90 (1.3, 3.9)\n# 100 0.21 (1.7, 2.4)\n# 1000 0.06 (1.9, 2.1)\n\n# All three confidence intervals contain the actual value, 2.", "Exercise: In games like hockey and soccer, the time between goals is roughly exponential. So you could estimate a team’s goal-scoring rate by observing the number of goals they score in a game. This estimation process is a little different from sampling the time between goals, so let’s see how it works.\nWrite a function that takes a goal-scoring rate, lam, in goals per game, and simulates a game by generating the time between goals until the total time exceeds 1 game, then returns the number of goals scored.\nWrite another function that simulates many games, stores the estimates of lam, then computes their mean error and RMSE.\nIs this way of making an estimate biased?", "def SimulateGame(lam):\n \"\"\"Simulates a game and returns the estimated goal-scoring rate.\n\n lam: actual goal scoring rate in goals per game\n \"\"\"\n goals = 0\n t = 0\n while True:\n time_between_goals = random.expovariate(lam)\n t += time_between_goals\n if t > 1:\n break\n goals += 1\n\n # estimated goal-scoring rate is the actual number of goals scored\n L = goals\n return L\n\n# Solution\n\n# The following function simulates many games, then uses the\n# number of goals scored as an estimate of the true long-term\n# goal-scoring rate.\n\ndef Estimate6(lam=2, m=1000000):\n\n estimates = []\n for i in range(m):\n L = SimulateGame(lam)\n estimates.append(L)\n\n print('Experiment 4')\n print('rmse L', RMSE(estimates, lam))\n print('mean error L', MeanError(estimates, lam))\n \n pmf = thinkstats2.Pmf(estimates)\n thinkplot.Hist(pmf)\n thinkplot.Config(xlabel='Goals scored', ylabel='PMF')\n \nEstimate6()\n\n# Solution\n\n# My conclusions:\n\n# 1) RMSE for this way of estimating lambda is 1.4\n\n# 2) The mean error is small and decreases with m, so this estimator\n# appears to be unbiased.\n\n# One note: If the time between goals is exponential, the distribution\n# of goals scored in a game is Poisson.\n\n# See https://en.wikipedia.org/wiki/Poisson_distribution", "Exercise: In this chapter we used $\\bar{x}$ and median to estimate µ, and found that $\\bar{x}$ yields lower MSE. Also, we used $S^2$ and $S_{n-1}^2$ to estimate σ, and found that $S^2$ is biased and $S_{n-1}^2$ unbiased.\nRun similar experiments to see if $\\bar{x}$ and median are biased estimates of µ. Also check whether $S^2$ or $S_{n-1}^2$ yields a lower MSE.", "# Solution\n\ndef Estimate4(n=7, iters=100000):\n \"\"\"Mean error for xbar and median as estimators of population mean.\n\n n: sample size\n iters: number of iterations\n \"\"\"\n mu = 0\n sigma = 1\n\n means = []\n medians = []\n for _ in range(iters):\n xs = [random.gauss(mu, sigma) for i in range(n)]\n xbar = np.mean(xs)\n median = np.median(xs)\n means.append(xbar)\n medians.append(median)\n\n print('Experiment 1')\n print('mean error xbar', MeanError(means, mu))\n print('mean error median', MeanError(medians, mu))\n \nEstimate4()\n\n# Solution\n\ndef Estimate5(n=7, iters=100000):\n \"\"\"RMSE for biased and unbiased estimators of population variance.\n\n n: sample size\n iters: number of iterations\n \"\"\"\n mu = 0\n sigma = 1\n\n estimates1 = []\n estimates2 = []\n for _ in range(iters):\n xs = [random.gauss(mu, sigma) for i in range(n)]\n biased = np.var(xs)\n unbiased = np.var(xs, ddof=1)\n estimates1.append(biased)\n estimates2.append(unbiased)\n\n print('Experiment 2')\n print('RMSE biased', RMSE(estimates1, sigma**2))\n print('RMSE unbiased', RMSE(estimates2, sigma**2))\n\nEstimate5()\n\n# Solution\n\n# My conclusions:\n\n# 1) xbar and median yield lower mean error as m increases, so neither\n# one is obviously biased, as far as we can tell from the experiment.\n\n# 2) The biased estimator of variance yields lower RMSE than the unbiased\n# estimator, by about 10%. And the difference holds up as m increases." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
JCardenasRdz/Insights-into-the-I-SPY-clinical-trial
2-Inferential_Stats.ipynb
mit
[ "<h1><center> Inferential statistics on the I-SPY1 Clinical Trial</center></h1>", "# import custom modules wrote by julio\nimport seaborn as sns\nimport pandas as pd\n%matplotlib inline\n#from capstone_01 import clean_data\nfrom ispy1 import inferential_statistics\n\n# reload modules without restartign the kernel (makes development easier)\n# import importlib\n#importlib.reload(inferential_statistics);", "0. load clean data", "df = pd.read_csv('./data/I-SPY_1_clean_data.csv')\ndf.head(2)", "1. Inferential_statistics: Categorical vs Categorical (Chi-2 test)\n1. 1 Effect of categorical predictors on Pathological complete response (PCR)", "# example of contingency table\ninferential_statistics.contingency_table('PCR', 'ER+',df)\n\n# Perform chi-2 test on all categorical variables\npredictors = ['White', 'ER+', 'PR+', 'HR+','Right_Breast']\noutcome = 'PCR'\ninferential_statistics.categorical_data(outcome, predictors, df)", "<h3><center> 1.1.2 Conclusion: Only `ER+` , `PR+`, and `HR+` have an effect on `PCR`</center></h3>\n\n1. 2 Effect of categorical predictors on Survival (Alive)", "predictors = ['White', 'ER+', 'PR+', 'HR+','Right_Breast','PCR']\noutcome = 'Alive'\ninferential_statistics.categorical_data(outcome, predictors, df)", "<h3><center> 1.2.2 Conclusion: Only `ER+` and `HR+` have an effect on `Alive`</center></h3>\n\n2. Inferential_statistics: Continous vs Categorical (ANOVA)\n2.1 Effect of Age on PCR", "predictor= ['age']\noutcome = 'PCR'\nanova_table, OLS = inferential_statistics.linear_models(df, outcome, predictor);\nsns.boxplot(x= outcome, y=predictor[0], data=df, palette=\"Set3\");", "2.2 Effect of Age on Survival", "predictor= ['age']\noutcome = 'Alive'\nanova_table, OLS = inferential_statistics.linear_models(df, outcome, predictor);\nsns.boxplot(x= outcome, y=predictor[0], data=df, palette=\"Set3\");", "2.3 Explore interactions between age, survival, and PCR", "# create a boxplot to visualize this interaction\nax = sns.boxplot(x= 'PCR', y='age', hue ='Alive',data=df, palette=\"Set3\");\nax.set_title('Interactions between age, survival, and PCR');\n\n# create dataframe only for patients with PCR = Yes\ndf_by_PCR = df.loc[df.PCR=='No',:]\ndf_by_PCR.head()\n\n# Anova age vs Alive\npredictor= ['age']\noutcome = 'Alive'\nanova_table, OLS = inferential_statistics.linear_models(df_by_PCR, outcome, predictor);\n\n# estimate the effect size\nmri_features = ['age']\noutcome = 'Alive'\n# Effect Size\ninferential_statistics.effect_size( df_by_PCR, mri_features, outcome)", "Conclusion.\n\nage has an important effect on Alive for patients with PCR = Yes\nTo quantitify this effect a logistic regression is needed\n\n2.4 Effect of MRI measurements on PCR ANOVA", "R = inferential_statistics.anova_MRI('PCR', df);", "Estimate the effect size", "mri_features = ['MRI_LD_Baseline', 'MRI_LD_1_3dAC', 'MRI_LD_Int_Reg', 'MRI_LD_PreSurg']\noutcome = 'PCR'\n# Effect Size\ninferential_statistics.effect_size( df, mri_features, outcome)", "2.5 Effect of MRI measurements on Survival ANOVA", "outcome = 'Alive'\nR = inferential_statistics.anova_MRI(outcome, df);\n\nmri_features = ['MRI_LD_Baseline', 'MRI_LD_1_3dAC', 'MRI_LD_Int_Reg', 'MRI_LD_PreSurg']\noutcome = 'Alive'\n# Effect Size\ninferential_statistics.effect_size( df, mri_features, outcome)", "stratify analysis by PCR", "# predictors and outcomes\npredictors= ['MRI_LD_Baseline', 'MRI_LD_1_3dAC', 'MRI_LD_Int_Reg', 'MRI_LD_PreSurg']\n\n# split data and run anova\nPCR_outcomes = ['No','Yes']\n\nfor out in PCR_outcomes:\n df_by_PCR = df.loc[df.PCR == out,:]\n print('Outcome = Alive' + ' | ' + 'PCR = ' + out)\n # Anova\n anova_table, OLS = inferential_statistics.linear_models(df_by_PCR, 'Alive', predictors);\n # Effect Size\n print(inferential_statistics.effect_size( df_by_PCR, predictors, 'Alive'))\n print('\\n' * 2)", "Conclusion\n\nThe largest tumor dimension measured at baseline (MRI_LD_Baseline) is not a statistically different between patients who achieved complete pathological response (PCR)and those who did not. While all other MRI measurements are statistically different between PCR = Yes, and PCR = No\nAll MRI measurements of the tumor dimension are different between patients who are Alive at the end of the trial and those who did not. These results do not indicate anything about the size of these effects. An statistically significant effect is not always clinically significant\nThe estimated effect sizes are very small, and most likley not clinically significant", "## 3. Inferential_statistics: Continous vs Categorical (ANOVA)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bzamecnik/ml
instrument-classification/analyze_instrument_ranges.ipynb
mit
[ "%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom music21.duration import Duration\nfrom music21.instrument import Instrument\nfrom music21.note import Note, Rest\nfrom music21.stream import Stream\nfrom music21.tempo import MetronomeMark\nimport numpy as np\nimport os\nimport scipy.io.wavfile\n\nfrom generate_audio_samples import make_instrument, write_midi\nfrom midi2audio import FluidSynth\n\nmpl.rc('figure', figsize=(20, 10))\n\nmidi_notes = np.arange(128)\ninstruments = np.arange(128)\n\ndef sweep_instrument(instrument_id, output_dir):\n s = Stream()\n duration = Duration(1.0)\n s.append(make_instrument(instrument_id))\n s.append(MetronomeMark(number=120))\n for midi_number in midi_notes:\n s.append(Note(midi=midi_number, duration=duration))\n s.append(Rest(duration=duration))\n os.makedirs(output_dir, exist_ok=True)\n midi_file, audio_file = [\n output_dir + '/instrument_{0:03d}.{1}'.format(instrument_id, ext)\n for ext in ['midi', 'wav']]\n write_midi(s, midi_file)\n print('instrument:', audio_file)\n FluidSynth().midi_to_audio(midi_file, audio_file)\n\ndef sweep_instruments(output_dir):\n for instrument_id in instruments:\n sweep_instrument(instrument_id, output_dir)\n\naudio_dir = 'data/working/instrument-ranges'\nsweep_instruments(audio_dir)\n\ndef analyze_instrument_rms(i, audio_dir):\n \"\"\"\n Compute the RMS of each note in the synthesized signal for a single instrument.\n \"\"\"\n fs, x = scipy.io.wavfile.read('{0}/instrument_{1:03d}.wav'.format(audio_dir, i))\n # convert from stereo to mono\n x = x.mean(axis=1)\n # cut the leading rest\n x = x[fs // 2:]\n # align the ending\n x = x[:len(x) // fs * fs]\n # split the notes\n x_notes = x.reshape(-1, fs)\n # RMS for each note\n x_notes_rms = np.sqrt((x_notes**2).mean(axis=1))\n return x_notes_rms\n\nplt.plot(analyze_instrument_rms(1, audio_dir), '.-')\nplt.title('power for each note')\nplt.xlabel('MIDI tone')\nplt.ylabel('RMS')\nplt.xlim(0,127);\n\ndef analyze_rms_for_all_instruments(audio_dir):\n \"\"\"\n Compute a matrix of RMS for each instrument and note.\n \"\"\"\n return np.vstack([analyze_instrument_rms(i, audio_dir) for i in instruments])\n\nx_rms_instruments_notes = analyze_rms_for_all_instruments(audio_dir)\n\nplt.imshow(x_rms_instruments_notes, interpolation='none')\nplt.suptitle('MIDI instruments range - RMS power')\nplt.xlabel('MIDI note')\nplt.ylabel('MIDI instrument')\nplt.savefig('data/working/instrument_ranges_rms.png');\n\nnp.save('data/working/instrument_ranges_rms.npy', x_rms_instruments_notes)", "There's a peak at value around 1.0 which represents quiet.", "plt.hist(x_rms_instruments_notes[x_rms_instruments_notes <= 1].flatten(), 200);\n\nplt.hist(x_rms_instruments_notes[x_rms_instruments_notes > 1].flatten(), 200);", "The range of instruments split into quiet (black) and sounding (white) regions. We can limit the pitches to the sounding ones.", "plt.imshow(x_rms_instruments_notes > 1, interpolation='none', cmap='gray')\nplt.grid(True)\nplt.suptitle('MIDI instruments range - RMS power')\nplt.xlabel('MIDI note')\nplt.ylabel('MIDI instrument')\nplt.savefig('data/working/instrument_ranges_binary.png');" ]
[ "code", "markdown", "code", "markdown", "code" ]
swirlingsand/deep-learning-foundations
gans/gan_mnist/Intro_to_GANs_Exercises.ipynb
mit
[ "Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.", "%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nprint(tf.__version__)\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')", "Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.\n\nExercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.", "def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, shape = (None, real_dim), name=\"inputs_real\")\n inputs_z = tf.placeholder(tf.float32, shape = (None, z_dim), name =\"inputs_z\")\n \n return inputs_real, inputs_z", "Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.\n\nExercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.", "def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n ''' Build the generator network.\n \n Arguments\n ---------\n z : Input tensor for the generator\n out_dim : Shape of the generator output\n n_units : Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out, logits: \n '''\n with tf.variable_scope('Generator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(z, n_units, activation = None)\n # Leaky ReLU\n h1 = tf.maximum( (alpha * h1), h1)\n \n # Logits and tanh output\n logits = tf.layers.dense(h1, out_dim, activation = None)\n out = tf.tanh(logits)\n \n return out", "Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.\n\nExercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.", "def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n ''' Build the discriminator network.\n \n Arguments\n ---------\n x : Input tensor for the discriminator\n n_units: Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out, logits: \n '''\n with tf.variable_scope('Discriminator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(x, n_units, activation = None)\n # Leaky ReLU\n h1 = tf.maximum ( (alpha * h1), h1)\n \n logits = tf.layers.dense(h1, 1, activation = None)\n out = tf.sigmoid(logits)\n \n return out, logits", "Hyperparameters", "# Size of input image to discriminator\ninput_size = 784 # 28x28 MNIST images flattened\n# Size of latent vector to generator\nz_size = 784\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 256\nd_hidden_size = 256\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Label smoothing \nsmooth = 0.1", "Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).\n\nExercise: Build the network from the functions you defined earlier.", "tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(input_size, z_size)\n\n# Generator network here\ng_model = generator(input_z, input_size)\n# g_model is the generator output\n\n# Disriminator network here\nd_model_real, d_logits_real = discriminator(input_real)\nd_model_fake, d_logits_fake = discriminator(g_model, reuse=True)", "Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.\n\nExercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.", "# Calculate losses\n\n# One's like for real labels for Discriminator \nreal_labels = tf.ones_like(d_logits_real) * (1 - smooth)\n\nd_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits = d_logits_real, labels=real_labels))\n\n# Zeros's like for real labels for Discriminator \nfake_labels = tf.zeros_like(d_logits_real)\n\nd_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits = d_logits_fake, labels= fake_labels))\n\n\nd_loss = d_loss_real + d_loss_fake\n\n\n# One's like for fake labels for generator\ngenerated_labels = tf.ones_like(d_logits_fake)\n\ng_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_fake, \n labels = generated_labels))", "Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.\n\nExercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.", "# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('Generator')]\nd_vars = [var for var in t_vars if var.name.startswith('Discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list = d_vars)\ng_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list = g_vars)", "Training", "batch_size = 100\nepochs = 80\nsamples = []\nlosses = []\nsaver = tf.train.Saver(var_list = g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g),\n \"Difference Loss: {:.4f}...\".format(train_loss_d-train_loss_g),\n ) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "Results with 128 hidden units\nEpoch 72/100... Discriminator Loss: 1.2292... Generator Loss: 1.0937 Difference Loss: 0.1355...\nEpoch 73/100... Discriminator Loss: 1.1977... Generator Loss: 1.0838 Difference Loss: 0.1139...\nEpoch 74/100... Discriminator Loss: 1.0160... Generator Loss: 1.4791 Difference Loss: -0.4632...\nEpoch 75/100... Discriminator Loss: 1.1122... Generator Loss: 1.0486 Difference Loss: 0.0637...\nEpoch 76/100... Discriminator Loss: 1.0662... Generator Loss: 1.5303 Difference Loss: -0.4641...\nEpoch 77/100... Discriminator Loss: 1.1943... Generator Loss: 1.1728 Difference Loss: 0.0215...\nEpoch 78/100... Discriminator Loss: 1.1579... Generator Loss: 1.3853 Difference Loss: -0.2274...\nEpoch 79/100... Discriminator Loss: 1.1481... Generator Loss: 1.1773 Difference Loss: -0.0292...\nEpoch 80/100... Discriminator Loss: 1.1529... Generator Loss: 1.6801 Difference Loss: -0.5272...\nTraining loss\nHere we'll check out the training losses for the generator and discriminator.", "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\n# With 128 hidden\nfig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()\n\n# With 256 hidden\nfig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.", "def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)\n\nplt.imshow(mnist.train.images[3].reshape(28,28), cmap='Greys_r')\n", "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "# with 128\n_ = view_samples(-1, samples)\n\n# with 256\n_ = view_samples(-1, samples)", "Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!", "# with 256\nrows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n# with 128\nrows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!", "saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\nview_samples(0, [gen_samples])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gmarceaucaron/ecole-apprentissage-profond
notebooks/classification/lasagne_nn.ipynb
bsd-3-clause
[ "This notebook explains how to define a classification task for vision with Lasagne and theano.\nTo execute a cell: Ctrl-Enter.\nThe code was executed with the default configuration of Theano: floatX=float64, device=cpu and the configuration for GPU floatX=float32,device=cuda.\nTested with:\n- Python 3.6.2, \n- Theano 0.10.0beta1.dev,\n- Lasagne 0.2.dev1,\n- cuDNN version 6021,\n- GeForce GTX TITAN Black", "import os\nos.environ['THEANO_FLAGS'] = 'floatX=float32,device=cuda,mode=FAST_RUN'\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nseed = 1\nlasagne.random.set_rng(np.random.RandomState(seed))", "Hyperparameters\nThe following are hyperparameters that will have an impact on the learning algorithm.", "# Architecture\nN_HIDDEN = [800,800]\nNON_LINEARITY = lasagne.nonlinearities.rectify\n\n# Dropout parameters\n#DROP_INPUT = 0.2\n#DROP_HIDDEN = 0.5\nDROP_INPUT = None\nDROP_HIDDEN = None\n\n# Number of epochs to train the net\nNUM_EPOCHS = 50\n\n# Optimization learning rate\nLEARNING_RATE = 0.01\n\n# Batch Size\nBATCH_SIZE = 128\n\n# Optimizer\neta = theano.shared(lasagne.utils.floatX(LEARNING_RATE))\nmy_optimizer = lambda loss, params: lasagne.updates.nesterov_momentum(\n loss, params, learning_rate=eta, momentum=0.9)", "An optimizer can be seen as a function that takes a gradient, obtained by backpropagation, and returns an update to be applied to the current parameters. Other optimizers can be found in: optimizer reference. In order to be able to change the learning rate dynamically, we must use a shared variable that will be accessible afterwards.\nDataset\nIn this example, we are using the celebrated MNIST dataset. The following are functions that download the MNIST dataset, resize it into a convenient numpy array for images of size (n_example, n_channel, img_width, img_height) and split the dataset into a train set (50k images) and a validation set (10k images). The pixels are normalized by 255.", "import os\ndef load_mnist():\n \"\"\"\n A dataloader for MNIST\n\n \"\"\"\n \n from urllib.request import urlretrieve\n \n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(-1, 1, 28, 28)\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n return data / np.float32(255)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n X_train = load_mnist_images('train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n# Load the dataset\nprint(\"Loading data...\")\nX_train, y_train, X_val, y_val, X_test, y_test = load_mnist()\nn_train = X_train.shape[0]\ninput_shape = X_train[0].shape\nprint(input_shape)\ninput_shape = (None, input_shape[0], input_shape[1], input_shape[2])\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nn_img_row = 3\nn_img_col = 3\nplt.rcParams['figure.figsize'] = (12,12) # Make the figures a bit bigger\nfor i in range(n_img_row*n_img_col):\n plt.subplot(n_img_row,n_img_col,i+1)\n plt.axis('off')\n idx = np.random.randint(n_train)\n plt.imshow(X_train[idx][0], cmap='gray')\n plt.title(\"Label {}\".format(y_train[idx]))", "The following auxiliary function creates a minibatch in a 3D tensor (batch_size, img_width, img_height).", "def iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n \"\"\"\n Return a minibatch of images with the associated targets\n\n Keyword arguments:\n :type inputs: numpy.ndarray\n :param inputs: the dataset of images\n :type targets: numpy.ndarray\n :param targets: the targets associated to the dataset\n :type batchsize: int\n :param batchsize: the number of datapoints in the minibatch\n :type shuffle: bool\n :param shuffle: a flag if we want to shuffle the dataset\n \"\"\"\n \n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]", "Model definition\nThe next two functions are general functions for creating multi-layer perceptron (mlp) and convolutional neural networks (cnn).", "def create_mlp(\n input_shape,\n input_var=None,\n nonlinearity = lasagne.nonlinearities.rectify,\n n_hidden=[800], \n drop_input=.2,\n drop_hidden=.5):\n \"\"\"\n A generic function for creating a multi-layer perceptron.\n If n_hidden is given as a list, then depth is ignored.\n \n :type input_shape: tuple\n :param input_shape: a tuple containing the shape of the input\n :type input_var: theano.tensor.var.TensorVariable\n :param input_var: a theano symbolic variable, created automatically if None\n :type nonlinearity: lasagne.nonlinearities\n :param nonlinearity: a nonlinearity function that follows all dense layers\n :type n_hidden: list\n :param n_hidden: number of hidden units per layer\n :type drop_input: float\n :param drop_input: the probability of dropout for the input\n :type drop_hidden: float\n :param drop_hidden: the probability of dropout for the hidden units\n \"\"\"\n\n # if input_shape is None, then the mlp is used on top of an existing model\n if input_shape:\n \n # if input_var is None, lasagne create \n # automatically the associated theano variable\n network = lasagne.layers.InputLayer(\n shape=input_shape,\n input_var=input_var)\n \n if drop_input:\n network = lasagne.layers.dropout(\n incoming=network,\n p=drop_input)\n else:\n network = input_var\n \n for i in range(len(n_hidden)):\n network = lasagne.layers.DenseLayer(\n incoming=network, \n num_units=n_hidden[i],\n nonlinearity=nonlinearity\n )\n if drop_hidden:\n network = lasagne.layers.dropout(\n incoming=network, \n p=drop_hidden\n )\n\n network = lasagne.layers.DenseLayer(\n incoming=network, \n num_units=10, \n nonlinearity=lasagne.nonlinearities.softmax\n )\n return network\n\n# Create a network\ninput_var = T.tensor4('inputs')\ntarget_var = T.ivector('targets')\n\nnetwork = create_mlp(\n input_shape,\n input_var=input_var,\n nonlinearity=NON_LINEARITY,\n n_hidden=N_HIDDEN, \n drop_input=DROP_INPUT, \n drop_hidden=DROP_HIDDEN)", "Optimization\nIn the following, we want to maximize the probability to output the right digit given the image. To do this, we retrieve the output of our model, which is a softmax (probability distribution) over the 10 digits, and we compare it to the actual target. Finally, since we are using minibatches of size BATCH_SIZE, we compute the mean over the examples of the minibatch.", "# Create a loss expression for training\nprediction = lasagne.layers.get_output(network)\nloss = lasagne.objectives.categorical_crossentropy(prediction, target_var).mean()\nparams = lasagne.layers.get_all_params(network, trainable=True)\nupdates = my_optimizer(loss, params)\n\n# Compile a function performing a training step on a mini-batch (by giving\n# the updates dictionary) and returning the corresponding training loss:\ntrain_fn = theano.function([input_var, target_var], loss, updates=updates)\n\n# Create a loss expression for validation/testing. The crucial difference\n# here is that we do a deterministic forward pass through the network,\n# disabling dropout layers.\nvalid_prediction = lasagne.layers.get_output(network, deterministic=True)\nvalid_loss = lasagne.objectives.categorical_crossentropy(valid_prediction, target_var).mean()\n\n# We also create an expression for the classification accuracy:\nvalid_acc = lasagne.objectives.categorical_accuracy(valid_prediction, target_var).mean()\n\n# Compile a second function computing the validation loss and accuracy:\nvalid_fn = theano.function([input_var, target_var], [valid_loss, valid_acc])", "Training loop\nThe following training loop is minimal and often insufficient for real-world purposes.\nThe idea here is to show the minimal requirements for training a neural network.\nAlso, we plot to show the evolution of the train and validation losses.|", "#%matplotlib notebook\nplt.rcParams['figure.figsize'] = (4,4) # Make the figures a bit bigger\n\nimport time\ndef train(\n train_fn,\n X_train,\n y_train,\n valid_fn,\n X_valid,\n y_valid,\n num_epochs=50,\n batchsize=100):\n \n ###################\n # code for plotting\n ###################\n fig,ax = plt.subplots(1,1)\n ax.set_xlabel('Epoch')\n ax.set_ylabel('NLL')\n ax.set_xlim(0,50)\n ax.set_ylim(0,0.5)\n \n train_log = []\n valid_log = []\n ###################\n \n n_train_batches = X_train.shape[0] // batchsize # Warning last examples are not used\n n_valid_batches = X_valid.shape[0] // batchsize\n \n for epoch in range(num_epochs):\n \n train_loss = 0\n for inputs, targets in iterate_minibatches(X_train, y_train, batchsize, shuffle=True):\n train_loss += train_fn(inputs, targets)\n\n valid_loss = 0\n for inputs, targets in iterate_minibatches(X_valid, y_valid, batchsize, shuffle=False):\n loss,_ = valid_fn(inputs, targets)\n valid_loss += loss\n \n ###################\n # code for plotting\n ###################\n train_log.append(train_loss/n_train_batches)\n valid_log.append(valid_loss/n_valid_batches)\n #print(train_loss/n_train_batches, valid_loss/n_valid_batches)\n if ax.lines:\n ax.lines[0].set_xdata(range(0,epoch+1))\n ax.lines[0].set_ydata(train_log)\n ax.lines[1].set_xdata(range(0,epoch+1))\n ax.lines[1].set_ydata(valid_log)\n else:\n ax.plot(epoch, train_log[epoch], 'b', label='train')\n ax.plot(epoch, valid_log[epoch], 'r', label='valid')\n ax.legend()\n ax.grid()\n fig.canvas.draw()\n time.sleep(0.1)\n ###################\n\ntrain(train_fn, X_train, y_train, valid_fn, X_val, y_val)", "The following training loop contains features that are interesting to consider:\n- early-stopping\n- logging and filenames\n- checkpointing\n- adaptive step-size (optional)\nThe first three are the most important ones.", "import time\nimport pickle\ndef train(\n train_fn,\n X_train,\n y_train,\n valid_fn,\n X_valid,\n y_valid,\n num_epochs=100,\n batchsize=64):\n \n print(\"Starting training...\")\n train_loss_array = []\n valid_loss_array = []\n \n # early-stopping parameters\n n_iter = 0\n n_train_batches = X_train.shape[0] // batchsize # Warning last examples are not used\n n_valid_batches = X_valid.shape[0] // batchsize\n patience = 10 * n_train_batches # look as this many examples regardless\n patience_increase = 2. # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience // 2)\n # go through this many\n # minibatche before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_valid_loss = np.inf\n best_iter = 0\n test_score = 0.\n\n epoch = 0\n done_looping = False\n \n if not isinstance(N_HIDDEN, list):\n n_hidden = [N_HIDDEN] * DEPTH\n else:\n n_hidden = N_HIDDEN\n \n exp_log_filename = 'mlp_lr-{}_arch-{}_bs-{}_seed-{}.log'.format(\n LEARNING_RATE, \n '-'.join(str(i) for i in n_hidden),\n batchsize,\n seed\n )\n with open(exp_log_filename, 'w') as f:\n log_line = '{} \\t\\t{} \\t\\t{} \\t\\t{} \\n'.format('epoch', 'train_loss', 'valid_loss', 'valid_acc')\n f.write(log_line)\n \n while epoch < num_epochs and not done_looping:\n \n train_loss = 0\n start_time = time.time()\n for inputs, targets in iterate_minibatches(X_train, y_train, batchsize, shuffle=True):\n train_loss += train_fn(inputs, targets)\n\n # And a full pass over the validation data:\n valid_loss = 0\n valid_acc = 0\n for inputs, targets in iterate_minibatches(X_valid, y_valid, batchsize, shuffle=False):\n loss, acc = valid_fn(inputs, targets)\n valid_loss += loss\n valid_acc += acc\n\n # Then we print the results for this epoch:\n avg_train_loss = train_loss / n_train_batches\n avg_valid_loss = valid_loss / n_valid_batches\n avg_valid_acc = valid_acc / n_valid_batches * 100\n print(\"Epoch {} of {} took {:.3f}s\".format(\n epoch + 1, num_epochs, time.time() - start_time))\n print(\" training loss:\\t\\t{:.6f}\".format(avg_train_loss))\n print(\" validation loss:\\t\\t{:.6f}\".format(avg_valid_loss))\n print(\" validation accuracy:\\t\\t{:.2f} %\".format(avg_valid_acc))\n train_loss_array.append(avg_train_loss)\n valid_loss_array.append(avg_valid_loss)\n \n with open(exp_log_filename, 'a') as f:\n log_line = '{} \\t\\t{:.6f} \\t\\t{:.6f} \\t\\t{:.2f} \\n'.format(epoch, avg_train_loss, avg_valid_loss, avg_valid_acc)\n f.write(log_line)\n \n # if we got the best validation score until now\n n_iter += n_train_batches\n if valid_loss < best_valid_loss:\n\n #improve patience if loss improvement is good enough\n if valid_loss < best_valid_loss * improvement_threshold:\n patience = max(patience, n_iter * patience_increase)\n\n best_valid_loss = valid_loss\n best_iter = n_iter\n\n # save the best model\n with open('best_model.pkl', 'wb') as f:\n all_params_values = lasagne.layers.get_all_param_values(network)\n pickle.dump(all_params_values, f)\n eta.set_value(lasagne.utils.floatX(eta.get_value() * 1.2))\n \n if patience <= n_iter:\n done_looping = True\n break\n \n else:\n all_params_values = pickle.load(open('best_model.pkl','rb'))\n lasagne.layers.set_all_param_values(network, all_params_values)\n eta.set_value(lasagne.utils.floatX(eta.get_value() * 0.5))\n \n epoch += 1\n\ntrain_log, valid_log = train(train_fn, X_train, y_train, valid_fn, X_val, y_val)\n\n!ls\n\n!tail mlp_lr-0.01_arch-800-800_bs-64_seed-1.log\n\n# load the saved model\nall_params_values = pickle.load(open('best_model.pkl','rb'))\nlasagne.layers.set_all_param_values(network, all_params_values)\n\n# After training, we compute the test error.\ntest_loss, test_acc = valid_fn(X_test, y_test)\nprint(\"Final results:\")\nprint(\" test loss:\\t\\t\\t {:.6f}\".format(np.asscalar(test_loss)))\nprint(\" test accuracy:\\t\\t {:.2f} %\".format(np.asscalar(test_acc*100)))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
MinnowBoard/fishbowl-notebooks
Slideshow.ipynb
mit
[ "Slideshow Project\nThis complicated project uses all the major components of the other projects to create a system that will display images and show information about the images all on the electronics connected to the Minnowboard.\nReview the wiki page at http://wiki.minnowboard.org/Projects/Maker_Slideshow for hardware requirements and setup.\nStart by importing all the necessary packages.", "import time\nimport sys\nimport os\nfrom PIL import Image\nfrom pyDrivers.ada_lcd import *\nimport pyDrivers.ILI9341 as TFT\nimport Adafruit_GPIO as GPIO\nimport Adafruit_GPIO.SPI as SPI", "Now we'll start by invoking the GPIO class, which will identify our board and initialize the pins. We will use two pins for input for scrolling through the slideshow. We default to the spidev device at <code>/dev/spidev0.0</code> for the minnow\nAdditionally, the Data/Command and Reset pins are defined for the TFT LCD display.", "myGPIO = GPIO.get_platform_gpio()\n\nmyGPIO.setup(12,GPIO.IN)\nmyGPIO.setup(16,GPIO.IN)\n\nlcd = ADA_LCD()\nlcd.clear()\n\nSPI_PORT = 0\nSPI_DEVICE = 0\nSPEED = 16000000\nDC = 10\nRST = 14 \n", "The following functions collect all the images in the specified directory and place them into a list. It will filter out all the non-image files in the directory. It will fail if no images are found.", "imageList = []\nrawList = os.listdir(\"/notebooks\")\nfor i in range(0,len(rawList)):\n if (rawList[i].lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))==True):\n imageList.append(\"/notebooks\" + \"/\" + rawList[i])\n \nif len(imageList)==0:\n print \"No images found!\"\n exit(1)\n \ncount = 0\n\nprint imageList", "Now we'll initialize the TFT LCD display and clear it.", "disp = TFT.ILI9341(DC, rst=RST, spi=SPI.SpiDev(SPI_PORT,SPI_DEVICE,SPEED))\ndisp.begin()", "This long infinite loop will work like so:\n<b>Clear the char LCD, write name of new image</b>\n<b>Wait for a button press</b>\n<b>Try to open an image</b>\n<b>Display the image on the TFT LCD</b>\n--If we fail to open the file, print an error message to the LCD display--\n----If we failed, open up the next file in the list. If we're at the end, restart at the beginning ----", "while True:\n \n lcd.clear()\n time.sleep(0.25)\n message = \" Image \" + str(count+1) + \" of \" + str(len(imageList)) + \"\\n\" + imageList[count][len(sys.argv[1]):]\n lcd.message(message)\n lcd.scroll()\n try:\n image = Image.open(imageList[count])\n except(IOError):\n lcd.clear()\n time.sleep(0.25)\n message = \" ERR: \" + str(count+1) + \" of \" + str(len(imageList)) + \"\\n\" + imageList[count][len(sys.argv[1]):]\n lcd.scroll()\n lcd.message(message)\n if(count == len(imageList)-1):\n image = Image.open(imageList[0])\n else:\n image = Image.open(imageList[count+1])\n\n image = image.rotate(90).resize((240, 320))\n disp.display(image)\n \n try:\n while True:\n if (myGPIO.input(12) != 1 and count != 0):\n count = count - 1\n break\n if (myGPIO.input(16) != 1 and count != len(imageList)-1):\n count = count + 1 \n break \n except (KeyboardInterrupt):\n lcd.clear()\n lcd.message(\"Terminated\")\n print\n exit(0)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
UWSEDS/LectureNotes
Spring2019/06a_Objects/Building Software With Objects.ipynb
bsd-2-clause
[ "Why Objects?\n\nProvide modularity and reuse through hierarchical structures\n\nObject oriented programming is a different way of thinking.\nProgramming With Objects", "from IPython.display import Image\nImage(filename='Classes_vs_Objects.png') ", "Initial concepts\n\nAn object is a container of data (attributes) and code (methods)\nA class is a template for creating objects\n\nReuse is provided by:\n\nreusing the same class to create many objects\n\"inheriting\" data and code from other classes", "# Definiting a Car class\nclass Car(object):\n pass\n\ncar = Car()", "Attributes", "from IPython.display import Image\nImage(filename='ClassAttributes.png') ", "Attributes are data associated with an object (instance) or class. Object attributes (and methods) are specified by using \"self\". Instance attributes and methods are accessed using the dot \".\" operator.", "class Car(object):\n \n # The following method is called when the class\n # is created or \"constructed\". The variables \"self.x\" refers\n # to the variable \"x\" in a created object.\n def __init__(self, color, car_type, speed):\n self.color = color\n self.car_type = car_type\n self.speed = speed\n\ncar1 = Car(\"blue\", \"sedan\", \"very slow\")\ncar2 = Car(\"red\", \"sedan\", \"not so slow\")\n\nprint(car1.speed, car2.speed)\n\nclass Car(object):\n \n # The following method is called when the class\n # is created or \"constructed\". The variables \"self.x\" refers\n # to the variable \"x\" in a created object.\n def __init__(self, color, car_type, speed, sunroof=True):\n self.color = color\n self.car_type = car_type\n if isinstance(speed, int):\n self.speed = speed\n else:\n raise ValueError(\"Bad speed value.\")\n self.sunroof = sunroof\n\ncar = Car(\"blue\", \"sedan\", 100)\n\n# Creating an object for a class with arguments in the __init__ method\ncar = Car(\"Blue\", \"HatchBack\", 100)\ncar.color\n\n# Creating an object for a class with arguments in the __init__ method\njoe_car = Car(\"Blue\", \"Sedan\", 100)\ndave_car = Car(\"Red\", \"Sports\", 150)\nprint (\"Type of joe_car is %s. Type of dave_car is %s\"% (type(joe_car), type(dave_car)))\n\n# Accessed instance attributes\njoe_car = Car(\"Blue\", \"Sedan\", 100)\nprint (\"Type of joe_car has (color, type, speed)=%s.\" % str((joe_car.color, joe_car.car_type, joe_car.speed)))", "EXERCISE: Change the constructor for Car to include the attribute \"doors\".\nInstance Methods", "from IPython.display import Image\nImage(filename='InstanceMethods.png') \n\n#Class diagram\nfrom IPython.display import Image\nImage(filename='SingleClassDiagram.png', width=200, height=200) ", "A class diagram provides a more compact representation of a class. There are three sections.\n- Class name\n- Attributes\n- Methods\nInstance methods\n- functions associated with the objects constructed for a class\n- provide a way to transform data in objects\n- use instance attributes (references to variables beginning with \"self.\")", "class Car(object):\n \n def __init__(self, color, car_type, speed):\n \"\"\"\n :param str color:\n :param str car_type:\n :param int speed:\n \"\"\"\n self.color = color\n self.car_type = car_type\n self.speed = speed\n \n def start(self):\n print (\"%s %s started!\" % (self.color, self.car_type))\n \n def stop(self):\n pass\n \n def turn(self, direction):\n \"\"\"\n :parm str direction: left or right\n \"\"\"\n pass\n\ncar = Car(\"Blue\", \"Sedan\", 100)\ncar.start()", "EXERCISE: Implement the stop and turn methods. Run the methods.\nInheritance\nInheritance is a common way that classes reuse data and code from other classes. A child class or derived class gets attributes and methods from its parent class.\nProgrammatically:\n- Specify inheritance in the class statement\n- Constructor for derived class (class that inherits) have access to the constructor of its parent.\nInheritance is represented in diagrams as an arror from the child class to its parent class.", "from IPython.display import Image\nImage(filename='SimpleClassHierarchy.png', width=400, height=400) \n\n# Code for inheritance\nclass Sedan(Car):\n # Sedan inherits from car\n \n def __init__(self, color, speed):\n \"\"\"\n :param str color:\n :param int speed:\n \"\"\"\n super().__init__(color, \"Sedan\", speed)\n \n def play_cd(self):\n print (\"Playing cd in %s sedan\" % self.color)\n\nsedan = Sedan(\"Yellow\", 1e6)\nsedan.color\n\nsedan.car_type\n\nsedan.car_type\n\njoe_car = Sedan(\"Blue\", 100)\nprint (\"Type of joe_car has (color, type, speed)=%s.\" % str((joe_car.color, joe_car.car_type, joe_car.speed)))", "Exercise: Implement SportsCar and create dave_car from SportsCar. Print attributes of dave_car.", "from IPython.display import Image\nImage(filename='ClassInheritance.png', width=400, height=400) ", "Subclasses can have their own methods.\nExercise: Add the play_cd() to Sedan and play_bluetooth() method to SportsCar. Construct a test to run these methods.\nWhat Else?\n\nClass attributes\nClass methods\n\nObject Oriented Design\nA design methodology must specify:\n- Components: What they do and how to build them\n- Interactions: How the components interact to implement use cases\nObject oriented designed\n- Components are specified by class diagrams.\n- Interactions are specified by interaction diagrams.\nClass diagram for the ATM system", "from IPython.display import Image\nImage(filename='ATMClassDiagram.png', width=400, height=400) ", "The diamond arrow is a \"has-a\" relationship. For example, the Controller has-a ATMInput. This means that a Controller object has an instance variable for an ATMInput object.\nInteraction Diagram for the ATM System\nAn interaction diagram specifies how components interact to achieve a use case. \nInteractions are from one object to another object, indicating that the first object calls a method in the second object.\nRules for drawing lines in an interaction diagram:\n- The calling object must know about the called object.\n- The called object must have the method invoked by the calling object.", "from IPython.display import Image\nImage(filename='ATMAuthentication.png', width=800, height=800) ", "Look at Objects/ATMDiagrams.pdf for a solution.\nWhat Else in Design?\n\nOther diagrams: state diagrams, package diagrams, ...\nObject oriented design patterns\n\nComplex Example of Class Hierarchy", "from IPython.display import Image\nImage(filename='SciSheetsCoreClasses.png', width=300, height=30) " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
SIMEXP/Projects
NSC2006/labo8_filtrage/labo 8 Introduction au filtrage reponses.ipynb
mit
[ "Laboratoire d'introduction au filtrage - Corrigé\n\nCours NSC-2006, année 2015\nMéthodes quantitatives en neurosciences \nPierre Bellec, Yassine Ben Haj Ali\n\nObjectifs:\nCe laboratoire a pour but de vous initier au filtrage de signaux temporels avec Matlab. Nous allons travailler avec un signal simulé qui contient plusieurs sources, une d'intérêt et d'autres qui sont du bruit. \n - Nous allons tout d'abord nous familiariser avec les différentes sources de signal, en temps et en fréquence. \n - Nous allons ensuite chercher un filtrage qui permette d'éliminer le bruit sans altérer de maniére forte le signal. \n - Enfin, nous évaluerons l'impact d'une perte de résolution temporelle sur notre capacité à débruiter le signal, lié au phénomène de repliement de fréquences (aliasing). \nPour réaliser ce laboratoire, il est nécessaire de récupérer la\nressource suivante sur studium:\n\nlabo7_filtrage.zip: cette archive contient plusieurs codes et jeux de données. SVP décompressez l'archive et copiez les fichiers dans votre répertoire de travail Matlab.\n\nDe nombreuses portions du labo consiste à modifier un code réalisé dans une autre question. Il est donc fortement conseillé d'ouvrir un nouveau fichier dans l'éditeur matlab, et d'exécuter le code depuis l'éditeur, de façon à pouvoir copier des paragraphes de code rapidement. Ne pas tenir compte et ne pas exécuter cette partie du code:", "%matplotlib inline\nfrom pymatbridge import Octave\noctave = Octave()\noctave.start()\n%load_ext pymatbridge", "Section 1: Exemple de signaux, temps et fréquence\n1. Commençons par générer un signal d'intérêt:", "%%matlab\n\n%% Définition du signal d'intêret\n% fréquence du signal\nfreq = 1; \n% on crée des blocs off/on de 15 secondes\nbloc = repmat([zeros(1,15*freq) ones(1,15*freq)],[1 10]); \n% les temps d'acquisition\nech = (0:(1/freq):(length(bloc)/freq)-(1/freq)); \n% ce paramètre fixe le pic de la réponse hémodynamique\npic = 5; \n% noyau de réponse hémodynamique\nnoyau = [linspace(0,1,(pic*freq)+1) linspace(1,-0.3,(pic*freq)/2) linspace(-0.3,0,(pic*freq)/2)]; \nnoyau = [zeros(1,length(noyau)-1) noyau]; \n% normalisation du noyau\nnoyau = noyau/sum(abs(noyau)); \n% convolution du bloc avec le noyau\nsignal = conv(bloc,noyau,'same'); \n% on fixe la moyenne de la réponse à zéro\nsignal = signal - mean(signal); ", "Représentez noyau et signal en temps, à l'aide de la commande plot. Utiliser les temps d'acquisition corrects, et labéliser les axes (xlabel, ylabel). Comment est généré signal? reconnaissez vous le processus employé? Est ce que le signal est périodique? si oui, quelle est sa période? Peut-on trouver la réponse dans le code?", "%%matlab\n%% représentation en temps\n% Nouvelle figure\nfigure\n% On commence par tracer le noyau\nplot(noyau,'-bo')\n% Nouvelle figure\nfigure\n% On trace le signal, en utilisant ech pour spécifier les échantillons temporels\nplot(ech,signal)\n% Les fonctions xlim et ylim permettent d'ajuster les valeurs min/max des axes\nxlim([-1 max(ech)+1])\nylim([-0.6 0.7])\n% Les fonctions xlabel et ylabel permettent de labéliser les axes\nxlabel('Temps (s)')\nylabel('a.u')", "A partir du graphe du noyau, on reconnait la fonction de réponse hémodynamique utilisée lors du laboratoire sur la convolution. Le signal est généré par convolution du noyau avec un vecteur bloc (ligne 18 du bloc de code initial). On voit que bloc est créé en assemblant deux vecteurs de 15 zéros et de 15 uns (ligne 7). Le signal est donc périodique. Comme la fréquence d'acquisition est de 1 Hz (ligne 9 définissant les échantillons temporels, on voit un pas de 1/freq, avec freq=1, ligne 5), la période du signal est de 30 secondes, soit une fréquence de 0.033Hz. On peut confirmer cela visuellement sur le graphe.\n2. Représenter le contenu fréquentiel de signal avec la commande Analyse_Frequence_Puissance.\nUtilisez la commande ylim pour ajuster les limites de l'axe y et pouvoir bien observer le signal. Notez que l'axe y (puissance) est en échelle log (dB). Quelles sont les fréquences principales contenues dans le signal? Etait-ce attendu?", "%%matlab\n%% représentation en fréquences\n% Nouvelle figure\nfigure\n% La fonction utilise le signal comme premier argument, et les échantillons temporels comme deuxième\nAnalyse_Frequence_Puissance(signal,ech);\n% On ajuste l'échelle de l'axe y.\nylim([10^(-10) 1])", "Comme attendu, étant donné le caractère périodique de période 30s du signal, la fréquence principale est de 0.033 Hz. Les pics suivants sont situés à 0.1 Hz et 0.166 Hz.\n3. Répétez les questions 1.1 et 1.2 avec un bruit dit blanc, généré ci dessous.", "%%matlab\n\n%% définition du bruit blanc\nbruit = 0.05*randn(size(signal));", "Pourquoi est ce que ce bruit porte ce nom?", "%%matlab\n% Ce code n'est pas commenté, car essentiellement identique\n% à ceux présentés en question 1.1. et 1.2.\n%% représentation en temps\nfigure\nplot(ech,bruit)\nylim([-0.6 0.7])\nxlabel('Temps (s)')\nylabel('a.u')\n\n%% représentation en fréquences\nfigure\nAnalyse_Frequence_Puissance(bruit,ech);\nylim([10^(-10) 1])", "Le vecteur bruit est généré à l'aide de la fonction randn, qui est un générateur pseudo-aléatoires d'échantillons indépendants Gaussiens. Le spectre de puissance représente l'amplitude de la contribution de chaque fréquence au signal. On peut également décomposer une couleur en fréquences. Quand toutes les fréquences sont présentes, et en proportion similaire, on obtient du blanc. Le bruit Gaussien a un spectre de puissance plat (hormis de petites variations aléatoires), ce qui lui vaut son surnom de bruit blanc.\n4. Bruit respiratoire.\nRépétez les les questions 1.1 et 1.2 avec un bruit dit respiratoire, généré ci dessous.", "%%matlab\n\n%% définition du signal de respiration\n% fréquence de la respiration\nfreq_resp = 0.3; \n% un modéle simple (cosinus) des fluctuations liées à la respiration\nresp = cos(2*pi*freq_resp*ech/freq); \n% fréquence de modulation lente de l'amplitude respiratoire\nfreq_mod = 0.01; \n% modulation de l'amplitude du signal lié à la respiration\nresp = resp.*(ones(size(resp))-0.1*cos(2*pi*freq_mod*ech/freq)); \n% on force une moyenne nulle, et une amplitude max de 0.1\nresp = 0.1*(resp-mean(resp)); \n\n%%matlab\n\n% Ce code n'est pas commenté, car essentiellement identique\n% à ceux présentés en question 1.1. et 1.2.\n\n%% représentation en temps\nfigure\nplot(ech,resp)\nxlim([-1 max(ech)/2+1])\nxlabel('Temps (s)')\nylabel('a.u')\n\n%% représentation en fréquences\nfigure\n[ech_f,signal_f,signal_af,signal_pu] = Analyse_Frequence_Puissance(resp,ech);\nset(gca,'yscale','log');\nylim([10^(-35) 1])", "Est ce une simulation raisonnable de variations liées à la respiration? pourquoi?\nOn voit que ce signal est essentiellement composé de fréquences autour de 0.3Hz. Cela était déjà apparent avec l'introduction d'un cosinus de fréquence 0.3Hz dans la génération (ligne 7). L'amplitude de ce cosinus est elle-même modulée par un autre cosinus, plus lent (ligne 11). D'aprés wikipedia, un adulte respire de 16 à 20 fois par minutes, soit une fréquence de 0.26 à 0.33Hz (en se ramenant en battements par secondes). Cette simulation utilise donc une fréquence raisonnable pour simuler la respiration. \n5. Ligne de base.\nRépétez les les questions 1.1 et 1.2 avec une dérive de la ligne de base, telle que générée ci dessous.", "%%matlab\n\n%% définition de la ligne de base\nbase = 0.1*(ech-mean(ech))/mean(ech);\n\n%%matlab\n\n% Ce code n'est pas commenté, car essentiellement identique\n% à ceux présentés en question 1.1. et 1.2.\n\n%% représentation en temps\nfigure\nplot(ech,base)\nxlim([-1 max(ech)+1])\nylim([-0.6 0.7])\nxlabel('Temps (s)')\nylabel('a.u')\n\n%% représentation en fréquence\nfigure\n[ech_f,base_f,base_af,base_pu] = Analyse_Frequence_Puissance(base,ech);\nylim([10^(-10) 1])", "Le vecteur base est une fonction linéaire du temps (ligne 4). En représentation fréquentielle, il s'agit d'un signal essentiellement basse fréquence. \n6. Mélange de signaux.\nOn va maintenant mélanger nos différentes signaux, tel qu'indiqué ci-dessous. Représentez les trois mélanges en temps et en fréquence, superposé au signal d'intérêt sans aucun bruit (variable signal). Pouvez-vous reconnaitre la contribution de chaque source dans le mélange fréquentiel? Est ce que les puissances de fréquences s'additionnent systématiquement?", "%%matlab\n\n%% Mélanges de signaux\ny_sr = signal + resp;\ny_srb = signal + resp + bruit;\ny_srbb = signal + resp + bruit + base;\n\n%%matlab\n\n% Ce code n'est pas commenté, car essentiellement identique\n% à ceux présentés en question 1.1. et 1.2.\n% notez tout de même l'utilisation d'un hold on pour superposer la variable `signal` (sans bruit)\n% au mélange de signaux.\n\ny = y_sr;\n% représentation en temps\nfigure\nplot(ech,y)\nhold on\nplot(ech,signal,'r')\nxlim([-1 301])\nylim([-0.8 0.8])\nxlabel('Temps (s)')\nylabel('a.u')\n\n% représentation en fréquence\nfigure\nAnalyse_Frequence_Puissance(y,ech);\nylim([10^(-10) 1])", "On reconnait clairement la série de pics qui composent la variable signal auquelle vient se rajouter les fréquences de la variable resp, à 0.3 Hz. Notez que les spectres de puissance ne s'additionnent pas nécessairement, cela dépend si, à une fréquence donnée, les signaux que l'on additionne sont ou non en phase.", "%%matlab\n\n% Idem au code précédent, y_sr est remplacé par y_srb dans la ligne suivante.\ny = y_srb;\n% représentation en temps\nfigure\nplot(ech,y)\nhold on\nplot(ech,signal,'r')\nxlim([-1 301])\nylim([-0.8 0.8])\nxlabel('Temps (s)')\nylabel('a.u')\n\n% représentation en fréquence\nfigure\n[freq_f,y_f,y_af,y_pu] = Analyse_Frequence_Puissance(y,ech);\nylim([10^(-10) 1])", "L'addition du bruit blanc ajoute des variations aléatoires dans la totalité du spectre et, hormis les pics du spectre associé à signal, il devient difficile de distinguer la contribution de resp.", "%%matlab\n% Idem au code précédent, y_srb est remplacé par y_srbb dans la ligne suivante.\ny = y_srbb;\n% représentation en temps\nfigure\nplot(ech,y)\nhold on\nplot(ech,signal,'r')\nxlim([-1 301])\nylim([-0.8 0.8])\nxlabel('Temps (s)')\nylabel('a.u')\n\n% représentation en fréquence\nfigure\n[freq_f,y_f,y_af,y_pu] = Analyse_Frequence_Puissance(y,ech);\nylim([10^(-10) 1])", "Section 2: Optimisation de filtre\n2.1. Nous allons commencer par appliquer un filtre de moyenne mobile, avec le signal le plus simple (y_sr).\nPour cela on crée un noyau et on applique une convolution, comme indiqué ci dessous.", "%%matlab\n%%définition d'un noyau de moyenne mobile\n% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels\ntaille = ceil(3*freq);\n% le noyau, défini sur une fenêtre identique aux signaux précédents\nnoyau = [zeros(1,(length(signal)-taille-1)/2) ones(1,taille) zeros(1,(length(signal)-taille-1)/2)];\n% normalisation du moyau\nnoyau = noyau/sum(abs(noyau));\n% convolution avec le noyau (filtrage)\ny_f = conv(y_sr,noyau,'same');", "Représentez le noyau en fréquence (avec Analyse_Frequence_Puissance), commentez sur l'impact fréquentiel de la convolution. Faire un deuxième graphe représentant le signal d'intérêt superposé au signal filtré.", "%%matlab\n\n%% Représentation fréquentielle du filtre\nfigure\n% représentation fréquentielle du noyau\nAnalyse_Frequence_Puissance(noyau,ech);\nylim([10^(-10) 1])\n\n%% représentation du signal filtré\nfigure\n% signal aprés filtrage\nplot(ech,y_f,'k')\nhold on\n% signal sans bruit\nplot(ech,signal,'r')\n\n%% erreur résiduelle\nerr = sqrt(mean((signal-y_f).^2))", "On voit que cette convolution supprime exactement la fréquence correspondant à la largeur du noyau (3 secondes). Il se trouve que cette fréquence est aussi trés proche de la fréquence respiratoire choisie! Visuellement, le signal filtré est très proche du signal original. La mesure d'erreur (tel que demandée dans la question 2.2. ci dessous est de 3%.\n2.2 Répétez la question 2.1 avec un noyau plus gros.\nCommentez qualitativement sur la qualité du débruitage.", "%%matlab\n\n% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels\n% On passe de 3 à 7\n% ATTENTION: sous matlab, ce code ne marche qu'avec des noyaux de taille impaire\ntaille = ceil(6*freq);\n% le noyau, défini sur une fenêtre identique aux signaux précédents\nnoyau = [zeros(1,(length(signal)-taille-1)/2) ones(1,taille) zeros(1,(length(signal)-taille-1)/2)];\n% normalisation du moyau\nnoyau = noyau/sum(abs(noyau));\n% convolution avec le noyau (filtrage)\ny_f = conv(y_sr,noyau,'same');\n\n%% Représentation fréquentielle du filtre\nfigure\nAnalyse_Frequence_Puissance(noyau,ech);\nylim([10^(-10) 1])\n\n%% représentation du signal filtré\nfigure\nplot(ech,y_f,'k')\nhold on\nplot(ech,signal,'r')\n\n%% erreur résiduelle\nerr = sqrt(mean((signal-y_f).^2))", "On voit que ce noyau, en plus de supprimer une fréquence légèrement au dessus de 0.3 Hz, supprime aussi une fréquence proche de 0.16 Hz. C'était l'un des pics que l'on avait identifié dans le spectre de signal. De fait, dans la représentation temporelle, on voit que le signal filtré (en noir) est dégradé: les fluctuations rapides du signal rouge sont perdues. Et effectivement, on a maintenant une erreur résiduelle de 7.6%, supérieure au 3% du filtre précédent.\n2.3 Nous allons maintenant appliquer des filtres de Butterworth.\nCes filtres sont disponibles dans des fonctions que vous avez déjà utilisé lors du laboratoire sur la transformée de Fourier:\n - FiltrePasseHaut.m: suppression des basses fréquences.\n - FiltrePasseBas.m: suppression des hautes fréquences.\nLe filtre de Butterworth n'utilise pas explicitement un noyau de convolution. Mais comme il s'agit d'un systéme linéaire invariant dans le temps, on peut toujours récupérer le noyau en regardant la réponse à une impulsion finie unitaire.", "%%matlab\n\n%% Définition d'une implusion finie unitaire\nimpulsion = zeros(size(signal));\nimpulsion(round(length(impulsion)/2))=1;\nnoyau = FiltrePasseHaut(impulsion,freq,0.1);", "Représentez le noyau en temps et en fréquence. Quelle est la fréquence de coupure du filtre?", "%%matlab\n\n%% représentation temporelle\nfigure\nplot(ech,noyau)\nxlabel('Temps (s)')\nylabel('a.u')\n\n%% représentation fréquentielle\nfigure\nAnalyse_Frequence_Puissance(noyau,ech);\nset(gca,'yscale','log');", "On observe une réduction importante de l'amplitude des fréquences inférieures à 0.1 Hz, qui correspond donc à la fréquence de coupure du filtre.\n2.4. Application du filtre de Butterworth.\nL'exemple ci dessous filtre le signal avec un filtre passe bas, avec une fréquence de coupure de 0.1. Faire un graphe représentant le signal d'intérêt (signal) superposé au signal filtré. Calculez l'erreur résiduelle, et comparez au filtre par moyenne mobile évalué précédemment.", "%%matlab\ny = y_sr;\ny_f = FiltrePasseBas(y,freq,0.1);\n\n%%représentation du signal filtré\nplot(ech,signal,'r')\nhold on\nplot(ech,y_f,'k')\nerr = sqrt(mean((signal-y_f).^2))", "Avec une fréquence de coupure de 0.1 Hz, on perd de nombreux pics de signal, notamment celui situé à 0.16Hz. Effectivement dans la représentation en temps on voit que les variations rapides de signal sont perdues, et l'erreur résiduelle est de 6%. \n2.5. Optimisation du filtre de Butterworth.\nTrouvez une combinaison de filtre passe-haut et de filtre passe-bas de Butterworth qui permette d'améliorer l'erreur résiduelle par rapport au filtre de moyenne mobile. Faire un graphe représentant le signal d'intérêt (signal) superposé au signal filtré, et un second avec le signal d'intérêt superposé au signal bruité, pour référence.", "%%matlab\ny = y_sr;\n\n%% filtre de Butterworth\n% on combine une passe-haut et un passe-bas, de maniére à retirer uniquement les fréquences autour de 0.3 Hz\ny_f = FiltrePasseHaut(y,freq,0.35);\ny_f = y_f+FiltrePasseBas(y,freq,0.25);\n\n%% représentation du signal filtré\nfigure\nplot(ech,signal,'r')\nhold on\nplot(ech,y_f,'k')\nerr = sqrt(mean((signal-y_f).^2))\n\n%% représentation du signal brut\nfigure\nplot(ech,signal,'r')\nhold on\nplot(ech,y,'k')", "En combinant une filtre passe-haut et un passe-bas, de maniére à retirer uniquement les fréquences autour de 0.3 Hz, on peut restaurer le signal de maniére trés précise, et obtenir une erreur proche de 1%, meilleure que le 3% obtenu par moyenne mobile." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
sdpython/ensae_teaching_cs
_doc/notebooks/sklearn_ensae_course/01_data_manipulation.ipynb
mit
[ "2A.ML101.1: Introduction to data manipulation with scientific Python\nIn this section we'll go through the basics of the scientific Python stack for data manipulation: using numpy and matplotlib.\nSource: Course on machine learning with scikit-learn by Gaël Varoquaux\nYou can skip this section if you already know the scipy stack.\nTo learn the scientific Python ecosystem: http://scipy-lectures.org", "# Start pylab inline mode, so figures will appear in the notebook\n%matplotlib inline", "Numpy Arrays\nManipulating numpy arrays is an important part of doing machine learning\n(or, really, any type of scientific computation) in Python. This will likely\nbe review for most: we'll quickly go through some of the most important features.", "import numpy as np\n\n# Generating a random array\nX = np.random.random((3, 5)) # a 3 x 5 array\n\nprint(X)\n\n# Accessing elements\n\n# get a single element\nprint(X[0, 0])\n\n# get a row\nprint(X[1])\n\n# get a column\nprint(X[:, 1])\n\n# Transposing an array\nprint(X.T)\n\n# Turning a row vector into a column vector\ny = np.linspace(0, 12, 5)\nprint(y)\n\n# make into a column vector\nprint(y[:, np.newaxis])", "There is much, much more to know, but these few operations are fundamental to what we'll\ndo during this tutorial.\nScipy Sparse Matrices\nWe won't make very much use of these in this tutorial, but sparse matrices are very nice\nin some situations. For example, in some machine learning tasks, especially those associated\nwith textual analysis, the data may be mostly zeros. Storing all these zeros is very\ninefficient. We can create and manipulate sparse matrices as follows:", "from scipy import sparse\n\n# Create a random array with a lot of zeros\nX = np.random.random((10, 5))\nprint(X)\n\n# set the majority of elements to zero\nX[X < 0.7] = 0\nprint(X)\n\n# turn X into a csr (Compressed-Sparse-Row) matrix\nX_csr = sparse.csr_matrix(X)\nprint(X_csr)\n\n# convert the sparse matrix to a dense array\nprint(X_csr.toarray())", "Matplotlib\nAnother important part of machine learning is visualization of data. The most common\ntool for this in Python is matplotlib. It is an extremely flexible package, but\nwe will go over some basics here.\nFirst, something special to IPython notebook. We can turn on the \"IPython inline\" mode,\nwhich will make plots show up inline in the notebook.", "%matplotlib inline\n\n# Here we import the plotting functions\nimport matplotlib.pyplot as plt\n\n# plotting a line\nx = np.linspace(0, 10, 100)\nplt.plot(x, np.sin(x));\n\n# scatter-plot points\nx = np.random.normal(size=500)\ny = np.random.normal(size=500)\nplt.scatter(x, y);\n\n# showing images\nx = np.linspace(1, 12, 100)\ny = x[:, np.newaxis]\n\nim = y * np.sin(x) * np.cos(y)\nprint(im.shape)\n\n# imshow - note that origin is at the top-left by default!\nplt.imshow(im);\n\n# Contour plot - note that origin here is at the bottom-left by default!\nplt.contour(im);", "There are many, many more plot types available. One useful way to explore these is by\nlooking at the matplotlib gallery: http://matplotlib.org/gallery.html\nYou can test these examples out easily in the notebook: simply copy the Source Code\nlink on each page, and put it in a notebook using the %load magic.\nFor example:", "# %load http://matplotlib.org/mpl_examples/pylab_examples/ellipse_collection.py\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.collections import EllipseCollection\n\nx = np.arange(10)\ny = np.arange(15)\nX, Y = np.meshgrid(x, y)\n\nXY = np.hstack((X.ravel()[:, np.newaxis], Y.ravel()[:, np.newaxis]))\n\nww = X/10.0\nhh = Y/15.0\naa = X*9\n\n\nfig, ax = plt.subplots()\n\nec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,\n transOffset=ax.transData)\nec.set_array((X + Y).ravel())\nax.add_collection(ec)\nax.autoscale_view()\nax.set_xlabel('X')\nax.set_ylabel('y')\ncbar = plt.colorbar(ec)\ncbar.set_label('X+Y');" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
root-mirror/training
NCPSchool2021/RDataFrame/02-rdataframe-collections.ipynb
gpl-2.0
[ "Working with collections and object selections\nRDataFrame reads collections as the special type ROOT::RVec (e.g. a branch containing an array of floating point numbers can be read as a ROOT::RVec&lt;float&gt;). C-style arrays (with variable or static size), std::vectors and most other collection types can be read this way. When reading ROOT data, column values of type ROOT::RVec&lt;T&gt; perform no copy of the underlying array.\nRVec is a container similar to std::vector (and can be used just like a std::vector) but it also offers a rich interface to operate on the array elements in a vectorised fashion, similarly to Python's NumPy arrays.", "import ROOT\n\ntreename = \"myDataset\"\nfilename = \"https://github.com/root-project/root/raw/master/tutorials/dataframe/df017_vecOpsHEP.root\"\ndf = ROOT.RDataFrame(treename, filename)\n\nprint(f\"Columns in the dataset: {df.GetColumnNames()}\")", "To quickly inspect the data we can export it as a dictionary of numpy arrays thanks to the AsNumpy RDataFrame method. Note that for each row, E is an array of values:", "npy_dict = df.AsNumpy([\"E\"])\n\nfor row, vec in enumerate(npy_dict[\"E\"]):\n print(f\"\\nRow {row} contains:\\n{vec}\")", "Define a new column with operations on RVecs", "df1 = df.Define(\"good_pt\", \"sqrt(px*px + py*py)[E>100]\")", "sqrt(px*px + py*py)[E&gt;100]:\n* px, py and E are columns the elements of which are RVecs\n* Operations on RVecs like sum, product, sqrt preserve the dimensionality of the array\n* [E&gt;100] selects the elements of the array that satisfy the condition\n* E &gt; 100: boolean expressions on RVecs such as E &gt; 100 return a mask, that is an array with information on which values pass the selection (e.g. [0, 1, 0, 0] if only the second element satisfies the condition)\nNow we can plot the newly defined column values in a histogram", "c = ROOT.TCanvas()\nh = df1.Histo1D((\"pt\", \"pt\", 16, 0, 4), \"good_pt\")\nh.Draw()\nc.Draw()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
arsenovic/galgebra
examples/ipython/colored_christoffel_symbols.ipynb
bsd-3-clause
[ "This example is kindly contributed by FreddyBaudine for reproducing pygae/galgebra#26 and pygae/galgebra#30 with modifications by utensil.\nPlease note before Python code, there's an invisible markdown cell with the following code to enable color and define some colors from http://latexcolor.com/:\nmarkdown\n$$\n\\require{color}\n\\definecolor{airforceblue}{rgb}{0.36, 0.54, 0.66}\n\\definecolor{applegreen}{rgb}{0.55, 0.71, 0.0}\n\\definecolor{atomictangerine}{rgb}{1.0, 0.6, 0.4}\n$$\n$$\n\\require{color}\n\\definecolor{airforceblue}{rgb}{0.36, 0.54, 0.66}\n\\definecolor{applegreen}{rgb}{0.55, 0.71, 0.0}\n\\definecolor{atomictangerine}{rgb}{1.0, 0.6, 0.4}\n$$", "from __future__ import print_function\nimport sys\nfrom galgebra.printer import Format, xpdf\nFormat()\nfrom sympy import symbols, sin, pi, latex, Array, permutedims\nfrom galgebra.ga import Ga\n\nfrom IPython.display import Math", "Base manifold (three dimensional)\nMetric tensor (cartesian coordinates - norm = False)", "from sympy import cos, sin, symbols\ng3coords = (x,y,z) = symbols('x y z')\ng3 = Ga('ex ey ez', g = [1,1,1], coords = g3coords,norm=False) # Create g3\n(e_x,e_y,e_z) = g3.mv()\n\nMath(r'g =%s' % latex(g3.g))", "Two dimensioanal submanifold - Unit sphere\nBasis not normalised", "sp2coords = (theta, phi) = symbols(r'{\\color{airforceblue}\\theta} {\\color{applegreen}\\phi}', real = True)\nsp2param = [sin(theta)*cos(phi), sin(theta)*sin(phi), cos(theta)]\n\nsp2 = g3.sm(sp2param, sp2coords, norm = False) # submanifold\n\n(etheta, ephi) = sp2.mv() # sp2 basis vectors\n(rtheta, rphi) = sp2.mvr() # sp2 reciprocal basis vectors\n\nsp2grad = sp2.grad\n\nsph_map = [1, theta, phi] # Coordinate map for sphere of r = 1\n\nMath(r'(\\theta,\\phi)\\rightarrow (r,\\theta,\\phi) = %s' % latex(sph_map))\n\nMath(r'e_\\theta \\cdot e_\\theta = %s' % (etheta|etheta))\n\nMath(r'e_\\phi \\cdot e_\\phi = %s' % (ephi|ephi))\n\nMath('g = %s' % latex(sp2.g))\n\nMath(r'g^{-1} = %s' % latex(sp2.g_inv))", "Christoffel symbols of the first kind:", "Cf1 = sp2.Christoffel_symbols(mode=1)\nCf1 = permutedims(Array(Cf1), (2, 0, 1))\n\nMath(r'\\Gamma_{1, \\alpha, \\beta} = %s \\quad \\Gamma_{2, \\alpha, \\beta} = %s ' % (latex(Cf1[0, :, :]), latex(Cf1[1, :, :])))\n\nCf2 = sp2.Christoffel_symbols(mode=2)\nCf2 = permutedims(Array(Cf2), (2, 0, 1))\n\nMath(r'\\Gamma^{1}_{\\phantom{1,}\\alpha, \\beta} = %s \\quad \\Gamma^{2}_{\\phantom{2,}\\alpha, \\beta} = %s ' % (latex(Cf2[0, :, :]), latex(Cf2[1, :, :])))\n\nF = sp2.mv('F','vector',f=True) #scalar function\nf = sp2.mv('f','scalar',f=True) #vector function\n\nMath(r'\\nabla = %s' % sp2grad)\n\nMath(r'\\nabla f = %s' % (sp2.grad * f))\n\nMath(r'F = %s' % F)\n\nMath(r'\\nabla F = %s' % (sp2.grad * F))", "One dimensioanal submanifold\nBasis not normalised", "cir_th = phi = symbols(r'{\\color{atomictangerine}\\phi}',real = True)\ncir_map = [pi/8, phi]\n\nMath(r'(\\phi)\\rightarrow (\\theta,\\phi) = %s' % latex(cir_map))\n\ncir1d = sp2.sm( cir_map , (cir_th,), norm = False) # submanifold\n\ncir1dgrad = cir1d.grad\n\n(ephi) = cir1d.mv()\n\nMath(r'e_\\phi \\cdot e_\\phi = %s' % latex(ephi[0] | ephi[0]))\n\nMath('g = %s' % latex(cir1d.g))\n\nh = cir1d.mv('h','scalar',f= True)\n\nH = cir1d.mv('H','vector',f= True)\n\nMath(r'\\nabla = %s' % cir1dgrad)\n\nMath(r'\\nabla h = %s' %(cir1d.grad * h).simplify())\n\nMath('H = %s' % H)\n\nMath(r'\\nabla H = %s' % (cir1d.grad * H).simplify())" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
kialio/gsfcpyboot
Day_01/01_Pandas/1. Introduction to Pandas.ipynb
mit
[ "Introduction to Pandas\npandas is a Python package providing fast, flexible, and expressive data structures designed to work with relational or labeled data both. It is a fundamental high-level building block for doing practical, real world data analysis in Python. \npandas is well suited for:\n\nTabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet\nOrdered and unordered (not necessarily fixed-frequency) time series data.\nArbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels\nAny other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure\n\nKey features:\n\nEasy handling of missing data\nSize mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects\nAutomatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the data can be aligned automatically\nPowerful, flexible group by functionality to perform split-apply-combine operations on data sets\nIntelligent label-based slicing, fancy indexing, and subsetting of large data sets\nIntuitive merging and joining data sets\nFlexible reshaping and pivoting of data sets\nHierarchical labeling of axes\nRobust IO tools for loading data from flat files, Excel files, databases, and HDF5\nTime series functionality: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc.", "from IPython.core.display import HTML\nHTML(\"<iframe src=http://pandas.pydata.org width=800 height=350></iframe>\")\n\nimport pandas as pd\nimport numpy as np\n\n# Set some Pandas options\npd.set_option('html', False)\npd.set_option('max_columns', 30)\npd.set_option('max_rows', 20)", "Pandas Data Structures\nSeries\nA Series is a single vector of data (like a NumPy array) with an index that labels each element in the vector.", "counts = pd.Series([632, 1638, 569, 115])\ncounts", "If an index is not specified, a default sequence of integers is assigned as the index. A NumPy array comprises the values of the Series, while the index is a pandas Index object.", "counts.values\n\ncounts.index", "We can assign meaningful labels to the index, if they are available:", "bacteria = pd.Series([632, 1638, 569, 115], \n index=['Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes'])\n\nbacteria", "These labels can be used to refer to the values in the Series.", "bacteria['Actinobacteria']\n\nbacteria[[name.endswith('bacteria') for name in bacteria.index]]\n\n[name.endswith('bacteria') for name in bacteria.index]", "Notice that the indexing operation preserved the association between the values and the corresponding indices.\nWe can still use positional indexing if we wish.", "bacteria[0]", "We can give both the array of values and the index meaningful labels themselves:", "bacteria.name = 'counts'\nbacteria.index.name = 'phylum'\nbacteria", "NumPy's math functions and other operations can be applied to Series without losing the data structure.", "np.log(bacteria)\n\nbacteria", "We can also filter according to the values in the Series:", "bacteria[bacteria>1000]", "A Series can be thought of as an ordered key-value store. In fact, we can create one from a dict:", "bacteria_dict = {'Firmicutes': 632, 'Proteobacteria': 1638, 'Actinobacteria': 569, 'Bacteroidetes': 115}\npd.Series(bacteria_dict)", "Notice that the Series is created in key-sorted order.\nIf we pass a custom index to Series, it will select the corresponding values from the dict, and treat indices without corrsponding values as missing. Pandas uses the NaN (not a number) type for missing values.", "bacteria2 = pd.Series(bacteria_dict, index=['Cyanobacteria','Firmicutes','Proteobacteria','Actinobacteria'])\nbacteria2\n\nbacteria2.isnull()", "Critically, the labels are used to align data when used in operations with other Series objects:", "bacteria + bacteria2", "Contrast this with NumPy arrays, where arrays of the same length will combine values element-wise; adding Series combined values with the same label in the resulting series. Notice also that the missing values were propogated by addition.\nDataFrame\nInevitably, we want to be able to store, view and manipulate data that is multivariate, where for every index there are multiple fields or columns of data, often of varying data type.\nA DataFrame is a tabular data structure, encapsulating multiple series like columns in a spreadsheet. Data are stored internally as a 2-dimensional object, but the DataFrame allows us to represent and manipulate higher-dimensional data.", "data = pd.DataFrame({'value':[632, 1638, 569, 115, 433, 1130, 754, 555],\n 'patient':[1, 1, 1, 1, 2, 2, 2, 2],\n 'phylum':['Firmicutes', 'Proteobacteria', 'Actinobacteria', \n 'Bacteroidetes', 'Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes']})\ndata", "Notice the DataFrame is sorted by column name. We can change the order by indexing them in the order we desire:", "newdata = data[['phylum','value','patient']]\n\nnewdata", "A DataFrame has a second index, representing the columns:", "data.columns\n\nnewdata.columns", "If we wish to access columns, we can do so either by dict-like indexing or by attribute:", "data['value']\n\ndata.value\n\ntype(data.value)\n\ntype(data[['value']])", "Notice this is different than with Series, where dict-like indexing retrieved a particular element (row). If we want access to a row in a DataFrame, we index its ix attribute.", "data.ix[3]", "Alternatively, we can create a DataFrame with a dict of dicts:", "data = pd.DataFrame({0: {'patient': 1, 'phylum': 'Firmicutes', 'value': 632},\n 1: {'patient': 1, 'phylum': 'Proteobacteria', 'value': 1638},\n 2: {'patient': 1, 'phylum': 'Actinobacteria', 'value': 569},\n 3: {'patient': 1, 'phylum': 'Bacteroidetes', 'value': 115},\n 4: {'patient': 2, 'phylum': 'Firmicutes', 'value': 433},\n 5: {'patient': 2, 'phylum': 'Proteobacteria', 'value': 1130},\n 6: {'patient': 2, 'phylum': 'Actinobacteria', 'value': 754},\n 7: {'patient': 2, 'phylum': 'Bacteroidetes', 'value': 555}})\n\ndata", "We probably want this transposed:", "data = data.T\ndata", "Its important to note that the Series returned when a DataFrame is indexted is merely a view on the DataFrame, and not a copy of the data itself. So you must be cautious when manipulating this data:", "vals = data.value\nvals\n\nvals[5] = 0\nvals\n\ndata\n\nvals = data.value.copy()\nvals[5] = 1000\ndata", "We can create or modify columns by assignment:", "data.value[3] = 14\ndata\n\ndata['year'] = 2013\ndata", "But note, we cannot use the attribute indexing method to add a new column:", "data.treatment = 1\ndata\n\ndata.treatment", "Specifying a Series as a new columns cause its values to be added according to the DataFrame's index:", "treatment = pd.Series([0]*4 + [1]*2)\ntreatment\n\ndata['treatment'] = treatment\ndata", "Other Python data structures (ones without an index) need to be the same length as the DataFrame:", "month = ['Jan', 'Feb', 'Mar', 'Apr']\ndata['month'] = month\n\ndata['month'] = ['Jan']*len(data)\ndata", "We can use del to remove columns, in the same way dict entries can be removed:", "del data['month']\ndata", "We can extract the underlying data as a simple ndarray by accessing the values attribute:", "data.values", "Notice that because of the mix of string and integer (and NaN) values, the dtype of the array is object. The dtype will automatically be chosen to be as general as needed to accomodate all the columns.", "df = pd.DataFrame({'foo': [1,2,3], 'bar':[0.4, -1.0, 4.5]})\ndf.values", "Pandas uses a custom data structure to represent the indices of Series and DataFrames.", "data.index", "Index objects are immutable:", "data.index[0] = 15", "This is so that Index objects can be shared between data structures without fear that they will be changed.", "bacteria2.index = bacteria.index\n\nbacteria2", "Importing data\nA key, but often under-appreciated, step in data analysis is importing the data that we wish to analyze. Though it is easy to load basic data structures into Python using built-in tools or those provided by packages like NumPy, it is non-trivial to import structured data well, and to easily convert this input into a robust data structure:\ngenes = np.loadtxt(\"genes.csv\", delimiter=\",\", dtype=[('gene', '|S10'), ('value', '&lt;f4')])\n\nPandas provides a convenient set of functions for importing tabular data in a number of formats directly into a DataFrame object. These functions include a slew of options to perform type inference, indexing, parsing, iterating and cleaning automatically as data are imported.\nLet's start with some more bacteria data, stored in csv format.", "!cat data/microbiome.csv", "This table can be read into a DataFrame using read_csv:", "mb = pd.read_csv(\"data/microbiome.csv\")\nmb", "Notice that read_csv automatically considered the first row in the file to be a header row.\nWe can override default behavior by customizing some the arguments, like header, names or index_col.", "pd.read_csv(\"data/microbiome.csv\", header=None).head()", "read_csv is just a convenience function for read_table, since csv is such a common format:", "mb = pd.read_table(\"data/microbiome.csv\", sep=',')", "The sep argument can be customized as needed to accomodate arbitrary separators. For example, we can use a regular expression to define a variable amount of whitespace, which is unfortunately very common in some data formats: \nsep='\\s+'\n\nFor a more useful index, we can specify the first two columns, which together provide a unique index to the data.", "mb = pd.read_csv(\"data/microbiome.csv\", index_col=['Taxon','Patient'])\nmb.head()", "This is called a hierarchical index, which we will revisit later in the tutorial.\nIf we have sections of data that we do not wish to import (for example, known bad data), we can populate the skiprows argument:", "pd.read_csv(\"data/microbiome.csv\", skiprows=[3,4,6]).head()", "Conversely, if we only want to import a small number of rows from, say, a very large data file we can use nrows:", "pd.read_csv(\"data/microbiome.csv\", nrows=4)", "Alternately, if we want to process our data in reasonable chunks, the chunksize argument will return an iterable object that can be employed in a data processing loop. For example, our microbiome data are organized by bacterial phylum, with 15 patients represented in each:", "data_chunks = pd.read_csv(\"data/microbiome.csv\", chunksize=15)\n\nmean_tissue = {chunk.Taxon[0]:chunk.Tissue.mean() for chunk in data_chunks}\n \nmean_tissue", "Most real-world data is incomplete, with values missing due to incomplete observation, data entry or transcription error, or other reasons. Pandas will automatically recognize and parse common missing data indicators, including NA and NULL.", "!cat data/microbiome_missing.csv\n\npd.read_csv(\"data/microbiome_missing.csv\").head(20)", "Above, Pandas recognized NA and an empty field as missing data.", "pd.isnull(pd.read_csv(\"data/microbiome_missing.csv\")).head(20)", "Unfortunately, there will sometimes be inconsistency with the conventions for missing data. In this example, there is a question mark \"?\" and a large negative number where there should have been a positive integer. We can specify additional symbols with the na_values argument:", "pd.read_csv(\"data/microbiome_missing.csv\", na_values=['?', -99999]).head(20)", "These can be specified on a column-wise basis using an appropriate dict as the argument for na_values.\nMicrosoft Excel\nSince so much financial and scientific data ends up in Excel spreadsheets (regrettably), Pandas' ability to directly import Excel spreadsheets is valuable. This support is contingent on having one or two dependencies (depending on what version of Excel file is being imported) installed: xlrd and openpyxl (these may be installed with either pip or easy_install).\nImporting Excel data to Pandas is a two-step process. First, we create an ExcelFile object using the path of the file:", "mb_file = pd.ExcelFile('data/microbiome/MID1.xls')\nmb_file", "Then, since modern spreadsheets consist of one or more \"sheets\", we parse the sheet with the data of interest:", "mb1 = mb_file.parse(\"Sheet 1\", header=None)\nmb1.columns = [\"Taxon\", \"Count\"]\nmb1.head()", "There is now a read_excel convenience function in Pandas that combines these steps into a single call:", "mb2 = pd.read_excel('data/microbiome/MID2.xls', sheetname='Sheet 1', header=None)\nmb2.head()", "There are several other data formats that can be imported into Python and converted into DataFrames, with the help of buitl-in or third-party libraries. These include JSON, XML, HDF5, relational and non-relational databases, and various web APIs. These are beyond the scope of this tutorial, but are covered in Python for Data Analysis.\nPandas Fundamentals\nThis section introduces the new user to the key functionality of Pandas that is required to use the software effectively.\nFor some variety, we will leave our digestive tract bacteria behind and employ some baseball data.", "baseball = pd.read_csv(\"data/baseball.csv\", index_col='id')\nbaseball.head()", "Notice that we specified the id column as the index, since it appears to be a unique identifier. We could try to create a unique index ourselves by combining player and year:", "player_id = baseball.player + baseball.year.astype(str)\nbaseball_newind = baseball.copy()\nbaseball_newind.index = player_id\nbaseball_newind.head()", "This looks okay, but let's check:", "baseball_newind.index.is_unique", "So, indices need not be unique. Our choice is not unique because some players change teams within years.", "pd.Series(baseball_newind.index).value_counts()", "The most important consequence of a non-unique index is that indexing by label will return multiple values for some labels:", "baseball_newind.ix['wickmbo012007']", "We will learn more about indexing below.\nWe can create a truly unique index by combining player, team and year:", "player_unique = baseball.player + baseball.team + baseball.year.astype(str)\nbaseball_newind = baseball.copy()\nbaseball_newind.index = player_unique\nbaseball_newind.head()\n\nbaseball_newind.index.is_unique", "We can create meaningful indices more easily using a hierarchical index; for now, we will stick with the numeric id field as our index.\nManipulating indices\nReindexing allows users to manipulate the data labels in a DataFrame. It forces a DataFrame to conform to the new index, and optionally, fill in missing data if requested.\nA simple use of reindex is to alter the order of the rows:", "baseball.reindex(baseball.index[::-1]).head()", "Notice that the id index is not sequential. Say we wanted to populate the table with every id value. We could specify and index that is a sequence from the first to the last id numbers in the database, and Pandas would fill in the missing data with NaN values:", "id_range = range(baseball.index.values.min(), baseball.index.values.max())\nbaseball.reindex(id_range).head()", "Missing values can be filled as desired, either with selected values, or by rule:", "baseball.reindex(id_range, method='ffill', columns=['player','year']).head()\n\nbaseball.reindex(id_range, fill_value='mr.nobody', columns=['player']).head()", "Keep in mind that reindex does not work if we pass a non-unique index series.\nWe can remove rows or columns via the drop method:", "baseball.shape\n\nbaseball.drop([89525, 89526])\n\nbaseball.drop(['ibb','hbp'], axis=1)", "Indexing and Selection\nIndexing works analogously to indexing in NumPy arrays, except we can use the labels in the Index object to extract values in addition to arrays of integers.", "# Sample Series object\nhits = baseball_newind.h\nhits\n\n# Numpy-style indexing\nhits[:3]\n\n# Indexing by label\nhits[['womacto01CHN2006','schilcu01BOS2006']]", "We can also slice with data labels, since they have an intrinsic order within the Index:", "hits['womacto01CHN2006':'gonzalu01ARI2006']\n\nhits['womacto01CHN2006':'gonzalu01ARI2006'] = 5\nhits", "In a DataFrame we can slice along either or both axes:", "baseball_newind[['h','ab']]\n\nbaseball_newind[baseball_newind.ab>500]", "The indexing field ix allows us to select subsets of rows and columns in an intuitive way:", "baseball_newind.ix['gonzalu01ARI2006', ['h','X2b', 'X3b', 'hr']]\n\nbaseball_newind.ix[['gonzalu01ARI2006','finlest01SFN2006'], 5:8]\n\nbaseball_newind.ix[:'myersmi01NYA2006', 'hr']", "Similarly, the cross-section method xs (not a field) extracts a single column or row by label and returns it as a Series:", "baseball_newind.xs('myersmi01NYA2006')", "Operations\nDataFrame and Series objects allow for several operations to take place either on a single object, or between two or more objects.\nFor example, we can perform arithmetic on the elements of two objects, such as combining baseball statistics across years:", "hr2006 = baseball[baseball.year==2006].xs('hr', axis=1)\nhr2006.index = baseball.player[baseball.year==2006]\n\nhr2007 = baseball[baseball.year==2007].xs('hr', axis=1)\nhr2007.index = baseball.player[baseball.year==2007]\n\nhr2006 = pd.Series(baseball.hr[baseball.year==2006].values, index=baseball.player[baseball.year==2006])\nhr2007 = pd.Series(baseball.hr[baseball.year==2007].values, index=baseball.player[baseball.year==2007])\n\nhr_total = hr2006 + hr2007\nhr_total", "Pandas' data alignment places NaN values for labels that do not overlap in the two Series. In fact, there are only 6 players that occur in both years.", "hr_total[hr_total.notnull()]", "While we do want the operation to honor the data labels in this way, we probably do not want the missing values to be filled with NaN. We can use the add method to calculate player home run totals by using the fill_value argument to insert a zero for home runs where labels do not overlap:", "hr2007.add(hr2006, fill_value=0)", "Operations can also be broadcast between rows or columns.\nFor example, if we subtract the maximum number of home runs hit from the hr column, we get how many fewer than the maximum were hit by each player:", "baseball.hr - baseball.hr.max()", "Or, looking at things row-wise, we can see how a particular player compares with the rest of the group with respect to important statistics", "baseball.ix[89521][\"player\"]\n\nstats = baseball[['h','X2b', 'X3b', 'hr']]\ndiff = stats - stats.xs(89521)\ndiff[:10]", "We can also apply functions to each column or row of a DataFrame", "stats.apply(np.median)\n\nstat_range = lambda x: x.max() - x.min()\nstats.apply(stat_range)", "Lets use apply to calculate a meaningful baseball statistics, slugging percentage:\n$$SLG = \\frac{1B + (2 \\times 2B) + (3 \\times 3B) + (4 \\times HR)}{AB}$$\nAnd just for fun, we will format the resulting estimate.", "slg = lambda x: (x['h']-x['X2b']-x['X3b']-x['hr'] + 2*x['X2b'] + 3*x['X3b'] + 4*x['hr'])/(x['ab']+1e-6)\nbaseball.apply(slg, axis=1).apply(lambda x: '%.3f' % x)", "Sorting and Ranking\nPandas objects include methods for re-ordering data.", "baseball_newind.sort_index().head()\n\nbaseball_newind.sort_index(ascending=False).head()\n\nbaseball_newind.sort_index(axis=1).head()", "We can also use order to sort a Series by value, rather than by label.", "baseball.hr.order(ascending=False)", "For a DataFrame, we can sort according to the values of one or more columns using the by argument of sort_index:", "baseball[['player','sb','cs']].sort_index(ascending=[False,True], by=['sb', 'cs']).head(10)", "Ranking does not re-arrange data, but instead returns an index that ranks each value relative to others in the Series.", "baseball.hr.rank()", "Ties are assigned the mean value of the tied ranks, which may result in decimal values.", "pd.Series([100,100]).rank()", "Alternatively, you can break ties via one of several methods, such as by the order in which they occur in the dataset:", "baseball.hr.rank(method='first')", "Calling the DataFrame's rank method results in the ranks of all columns:", "baseball.rank(ascending=False).head()\n\nbaseball[['r','h','hr']].rank(ascending=False).head()", "Exercise\nCalculate on base percentage for each player, and return the ordered series of estimates.\n$$OBP = \\frac{H + BB + HBP}{AB + BB + HBP + SF}$$\nHierarchical indexing\nIn the baseball example, I was forced to combine 3 fields to obtain a unique index that was not simply an integer value. A more elegant way to have done this would be to create a hierarchical index from the three fields.", "baseball_h = baseball.set_index(['year', 'team', 'player'])\nbaseball_h.head(10)", "This index is a MultiIndex object that consists of a sequence of tuples, the elements of which is some combination of the three columns used to create the index. Where there are multiple repeated values, Pandas does not print the repeats, making it easy to identify groups of values.", "baseball_h.index[:10]\n\nbaseball_h.index.is_unique\n\nbaseball_h.ix[(2007, 'ATL', 'francju01')]", "Recall earlier we imported some microbiome data using two index columns. This created a 2-level hierarchical index:", "mb = pd.read_csv(\"data/microbiome.csv\", index_col=['Taxon','Patient'])\n\nmb.head(10)\n\nmb.index", "With a hierachical index, we can select subsets of the data based on a partial index:", "mb.ix['Proteobacteria']", "Hierarchical indices can be created on either or both axes. Here is a trivial example:", "frame = pd.DataFrame(np.arange(12).reshape(( 4, 3)), \n index =[['a', 'a', 'b', 'b'], [1, 2, 1, 2]], \n columns =[['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']])\n\nframe", "If you want to get fancy, both the row and column indices themselves can be given names:", "frame.index.names = ['key1', 'key2']\nframe.columns.names = ['state', 'color']\nframe", "With this, we can do all sorts of custom indexing:", "frame.ix['a']['Ohio']\n\nframe.ix['b', 2]['Colorado']", "Additionally, the order of the set of indices in a hierarchical MultiIndex can be changed by swapping them pairwise:", "mb.swaplevel('Patient', 'Taxon').head()", "Data can also be sorted by any index level, using sortlevel:", "mb.sortlevel('Patient', ascending=False).head()", "Missing data\nThe occurence of missing data is so prevalent that it pays to use tools like Pandas, which seamlessly integrates missing data handling so that it can be dealt with easily, and in the manner required by the analysis at hand.\nMissing data are represented in Series and DataFrame objects by the NaN floating point value. However, None is also treated as missing, since it is commonly used as such in other contexts (e.g. NumPy).", "foo = pd.Series([NaN, -3, None, 'foobar'])\nfoo\n\nfoo.isnull()", "Missing values may be dropped or indexed out:", "bacteria2\n\nbacteria2.dropna()\n\nbacteria2[bacteria2.notnull()]", "By default, dropna drops entire rows in which one or more values are missing.", "data\n\ndata.dropna()", "This can be overridden by passing the how='all' argument, which only drops a row when every field is a missing value.", "data.dropna(how='all')", "This can be customized further by specifying how many values need to be present before a row is dropped via the thresh argument.", "data.ix[7, 'year'] = nan\ndata\n\ndata.dropna(thresh=4)", "This is typically used in time series applications, where there are repeated measurements that are incomplete for some subjects.\nIf we want to drop missing values column-wise instead of row-wise, we use axis=1.", "data.dropna(axis=1)", "Rather than omitting missing data from an analysis, in some cases it may be suitable to fill the missing value in, either with a default value (such as zero) or a value that is either imputed or carried forward/backward from similar data points. We can do this programmatically in Pandas with the fillna argument.", "bacteria2.fillna(0)\n\ndata.fillna({'year': 2013, 'treatment':2})", "Notice that fillna by default returns a new object with the desired filling behavior, rather than changing the Series or DataFrame in place (in general, we like to do this, by the way!).", "data", "We can alter values in-place using inplace=True.", "_ = data.year.fillna(2013, inplace=True)\ndata", "Missing values can also be interpolated, using any one of a variety of methods:", "bacteria2.fillna(method='bfill')\n\nbacteria2.fillna(bacteria2.mean())", "Data summarization\nWe often wish to summarize data in Series or DataFrame objects, so that they can more easily be understood or compared with similar data. The NumPy package contains several functions that are useful here, but several summarization or reduction methods are built into Pandas data structures.", "baseball.sum()", "Clearly, sum is more meaningful for some columns than others. For methods like mean for which application to string variables is not just meaningless, but impossible, these columns are automatically exculded:", "baseball.mean()", "The important difference between NumPy's functions and Pandas' methods is that the latter have built-in support for handling missing data.", "bacteria2\n\nbacteria2.mean()", "Sometimes we may not want to ignore missing values, and allow the nan to propagate.", "bacteria2.mean(skipna=False)", "Passing axis=1 will summarize over rows instead of columns, which only makes sense in certain situations.", "extra_bases = baseball[['X2b','X3b','hr']].sum(axis=1)\nextra_bases.order(ascending=False)", "A useful summarization that gives a quick snapshot of multiple statistics for a Series or DataFrame is describe:", "baseball.describe()", "describe can detect non-numeric data and sometimes yield useful information about it.", "baseball.player.describe()", "We can also calculate summary statistics across multiple columns, for example, correlation and covariance.\n$$cov(x,y) = \\sum_i (x_i - \\bar{x})(y_i - \\bar{y})$$", "baseball.hr.cov(baseball.X2b)", "$$corr(x,y) = \\frac{cov(x,y)}{(n-1)s_x s_y} = \\frac{\\sum_i (x_i - \\bar{x})(y_i - \\bar{y})}{\\sqrt{\\sum_i (x_i - \\bar{x})^2 \\sum_i (y_i - \\bar{y})^2}}$$", "baseball.hr.corr(baseball.X2b)\n\nbaseball.ab.corr(baseball.h)\n\nbaseball.corr()", "If we have a DataFrame with a hierarchical index (or indices), summary statistics can be applied with respect to any of the index levels:", "mb.head()\n\nmb.sum(level='Taxon')", "Writing Data to Files\nAs well as being able to read several data input formats, Pandas can also export data to a variety of storage formats. We will bring your attention to just a couple of these.", "mb.to_csv(\"mb.csv\")", "The to_csv method writes a DataFrame to a comma-separated values (csv) file. You can specify custom delimiters (via sep argument), how missing values are written (via na_rep argument), whether the index is writen (via index argument), whether the header is included (via header argument), among other options.\nAn efficient way of storing data to disk is in binary format. Pandas supports this using Python’s built-in pickle serialization.", "baseball.to_pickle(\"baseball_pickle\")", "The complement to to_pickle is the read_pickle function, which restores the pickle to a DataFrame or Series:", "pd.read_pickle(\"baseball_pickle\")", "As Wes warns in his book, it is recommended that binary storage of data via pickle only be used as a temporary storage format, in situations where speed is relevant. This is because there is no guarantee that the pickle format will not change with future versions of Python." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
XInterns/IPL-Sparkers
src/Match Outcome Prediction with IPL Data (Gursahej).ipynb
mit
[ "Predicting the Outcome of Cricket Matches\nIntroduction\nIn this project, we shall build a model which predicts the outcome of cricket matches in the Indian Premier League using data about matches and deliveries.\nData Mining:\n\nSeason : 2008 - 2015 (8 Seasons)\nTeams : DD, KKR, MI, RCB, KXIP, RR, CSK (7 Teams)\nNeglect matches that have inconsistencies such as No Result, Tie, D/L Method, etc.\n\nPossible Features:\n\nAverage Batsman Rating (top 5) \nAverage Bowler Rating (top 4)\nPlayer of the match frequency \nPrevious Encounter - Win by runs, Win by Wickets \nRecent form (Last 5 Games)\nVenue - Home, Away, Neutral", "# The %... is an iPython thing, and is not part of the Python language.\n# In this case we're just telling the plotting library to draw things on\n# the notebook, instead of on a separate window.\n%matplotlib inline \n#this line above prepares IPython notebook for working with matplotlib\n\n# See all the \"as ...\" contructs? They're just aliasing the package names.\n# That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot().\n\nimport numpy as np # imports a fast numerical programming library\nimport scipy as sp #imports stats functions, amongst other things\nimport matplotlib as mpl # this actually imports matplotlib\nimport matplotlib.cm as cm #allows us easy access to colormaps\nimport matplotlib.pyplot as plt #sets up plotting under plt\nimport pandas as pd #lets us handle data as dataframes\n#sets up pandas table display\npd.set_option('display.width', 500)\npd.set_option('display.max_columns', 100)\npd.set_option('display.notebook_repr_html', True)\nimport seaborn as sns #sets up styles and gives us more plotting options\nfrom __future__ import division", "Data Mining", "# Reading in the data\nallmatches = pd.read_csv(\"../data/matches.csv\")\nalldeliveries = pd.read_csv(\"../data/deliveries.csv\")\nallmatches.head(10)\n\n# Selecting Seasons 2008 - 2015\nmatches_seasons = allmatches.loc[allmatches['season'] != 2016]\ndeliveries_seasons = alldeliveries.loc[alldeliveries['match_id'] < 518]\n\n# Selecting teams DD, KKR, MI, RCB, KXIP, RR, CSK\nmatches_teams = matches_seasons.loc[(matches_seasons['team1'].isin(['Kolkata Knight Riders', \\\n'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \\\n'Mumbai Indians', 'Kings XI Punjab'])) & (matches_seasons['team2'].isin(['Kolkata Knight Riders', \\\n'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \\\n'Mumbai Indians', 'Kings XI Punjab']))]\nmatches_team_matchids = matches_teams.id.unique()\ndeliveries_teams = deliveries_seasons.loc[deliveries_seasons['match_id'].isin(matches_team_matchids)]\nprint \"Teams selected:\\n\"\nfor team in matches_teams.team1.unique():\n print team\n\n# Neglect matches with inconsistencies like 'No Result' or 'D/L Applied'\nmatches = matches_teams.loc[(matches_teams['result'] == 'normal') & (matches_teams['dl_applied'] == 0)]\nmatches_matchids = matches.id.unique()\ndeliveries = deliveries_teams.loc[deliveries_teams['match_id'].isin(matches_matchids)]\n\n# Verifying consistency between datasets\n(matches.id.unique() == deliveries.match_id.unique()).all()", "Building Features", "# Team Strike rates for first 5 batsmen in the team (Higher the better)\n\ndef getMatchDeliveriesDF(match_id):\n return deliveries.loc[deliveries['match_id'] == match_id]\n\ndef getInningsOneBatsmen(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()[0:5]\n\ndef getInningsTwoBatsmen(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()[0:5]\n\ndef getBatsmanStrikeRate(batsman, match_id):\n onstrikedeliveries = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['batsman'] == batsman)]\n total_runs = onstrikedeliveries['batsman_runs'].sum()\n total_balls = onstrikedeliveries.shape[0]\n if total_balls != 0: \n return (total_runs/total_balls) * 100\n else:\n return None\n\n\ndef getTeamStrikeRate(batsmen, match_id):\n strike_rates = []\n for batsman in batsmen:\n bsr = getBatsmanStrikeRate(batsman, match_id)\n if bsr != None:\n strike_rates.append(bsr)\n return np.mean(strike_rates)\n\ndef getAverageStrikeRates(match_id):\n match_deliveries = getMatchDeliveriesDF(match_id)\n innOneBatsmen = getInningsOneBatsmen(match_deliveries)\n innTwoBatsmen = getInningsTwoBatsmen(match_deliveries)\n teamOneSR = getTeamStrikeRate(innOneBatsmen, match_id)\n teamTwoSR = getTeamStrikeRate(innTwoBatsmen, match_id)\n return teamOneSR, teamTwoSR\n\n# Testing Functionality\ngetAverageStrikeRates(517)\n\n\n# Bowler Rating : Wickets/Run (Higher the Better)\n# Team 1: Batting First; Team 2: Fielding First\n\ndef getInningsOneBowlers(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()[0:4]\n\ndef getInningsTwoBowlers(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()[0:4]\n\ndef getBowlerWPR(bowler, match_id):\n balls = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['bowler'] == bowler)]\n total_runs = balls['total_runs'].sum()\n total_wickets = balls.loc[balls['dismissal_kind'].isin(['caught', 'bowled', 'lbw', \\\n 'caught and bowled', 'stumped'])].shape[0]\n if total_runs != 0:\n return (total_wickets/total_runs) * 100\n else:\n return total_wickets\n\ndef getTeamWPR(bowlers, match_id):\n totalWPRs = []\n for bowler in bowlers:\n totalWPRs.append(getBowlerWPR(bowler, match_id))\n return np.mean(totalWPRs)\n\ndef getAverageWPR(match_id):\n match_deliveries = getMatchDeliveriesDF(match_id)\n innOneBowlers = getInningsOneBowlers(match_deliveries)\n innTwoBowlers = getInningsTwoBowlers(match_deliveries)\n teamOneWPR = getTeamWPR(innTwoBowlers, match_id)\n teamTwoWPR = getTeamWPR(innOneBowlers, match_id)\n return teamOneWPR, teamTwoWPR\n\n#Testing Functionality \ngetAverageWPR(517)\n\n# Man of the Match Awards for players of both Teams \n\ndef getInningsOneAllBatsmen(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()\n\ndef getInningsTwoAllBatsmen(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()\n\ndef getInningsOneAllBowlers(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()\n\ndef getInningsTwoAllBowlers(match_deliveries):\n return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()\n\ndef getTeam(batsmen,bowlers):\n p = []\n p = np.append(p, batsmen)\n for i in bowlers:\n if i not in batsmen:\n p = np.append(p, i)\n return p\n\ndef getPlayerMVPAwards(player, match_id):\n return matches.loc[(matches[\"player_of_match\"] == player) & (matches['id'] < match_id)].shape[0]\n\ndef getTeamMVPAwards(team, match_id):\n mvpAwards = 0\n for player in team:\n mvpAwards = mvpAwards + getPlayerMVPAwards(player,match_id)\n \n return mvpAwards\n\ndef bothTeamMVPAwards(match_id):\n matchDeliveries = getMatchDeliveriesDF(match_id)\n innOneBatsmen = getInningsOneAllBatsmen(matchDeliveries)\n innTwoBatsmen = getInningsTwoAllBatsmen(matchDeliveries)\n innOneBowlers = getInningsTwoAllBowlers(matchDeliveries)\n innTwoBowlers = getInningsOneAllBowlers(matchDeliveries)\n team1 = getTeam(innOneBatsmen, innTwoBowlers)\n \n team2 = getTeam(innTwoBatsmen, innOneBowlers)\n team1Awards = getTeamMVPAwards(team1,match_id)\n team2Awards = getTeamMVPAwards(team2,match_id)\n return team1Awards, team2Awards\n\n \n\n \n\n\n\n#Testing Functionality\nbothTeamMVPAwards(517)\n\n#Function to generate squad rating \n\ndef generateSquadRating(match_id):\n gameday_teams = deliveries.loc[(deliveries['match_id'] == match_id)].batting_team.unique()\n teamOne = gameday_teams[0]\n teamTwo = gameday_teams[1]\n teamOneSR, teamTwoSR = getAverageStrikeRates(match_id)\n teamOneWPR, teamTwoWPR = getAverageWPR(match_id)\n teamOneMVPs, teamTwoMVPs = bothTeamMVPAwards(match_id)\n print \"Comparing squads for {} vs {}\".format(teamOne,teamTwo)\n print \"\\nAverage Strike Rate for Batsmen in {} : {}\".format(teamOne,teamOneSR)\n print \"\\nAverage Strike Rate for Batsmen in {} : {}\".format(teamTwo,teamTwoSR)\n print \"\\nBowler Rating (W/R) for {} : {}\".format(teamOne,teamOneWPR)\n print \"\\nBowler Rating (W/R) for {} : {}\".format(teamTwo,teamTwoWPR)\n print \"\\nNumber of MVP Awards in {} : {}\".format(teamOne,teamOneMVPs)\n print \"\\nNumber of MVP Awards in {} : {}\".format(teamTwo,teamTwoMVPs)\n\n#Testing Functionality\ngenerateSquadRating(517)\n\n## 2nd Feature : Previous Encounter\n# Won by runs and won by wickets (Higher the better)\n\ndef getTeam1(match_id):\n return matches.loc[matches[\"id\"] == match_id].team1.unique()\n\ndef getTeam2(match_id):\n return matches.loc[matches[\"id\"] == match_id].team2.unique()\n\ndef getPreviousEncDF(match_id):\n team1 = getTeam1(match_id)\n team2 = getTeam2(match_id)\n return matches.loc[(matches[\"id\"] < match_id) & (((matches[\"team1\"].isin(team1)) & (matches[\"team2\"].isin(team2))) | ((matches[\"team1\"].isin(team2)) & (matches[\"team2\"].isin(team1))))]\ndef getTeamWBR(match_id, team):\n WBR = 0\n DF = getPreviousEncDF(match_id)\n winnerDF = DF.loc[DF[\"winner\"] == team]\n WBR = winnerDF['win_by_runs'].sum() \n return WBR\n\n\ndef getTeamWBW(match_id, team):\n WBW = 0 \n DF = getPreviousEncDF(match_id)\n winnerDF = DF.loc[DF[\"winner\"] == team]\n WBW = winnerDF['win_by_wickets'].sum()\n return WBW \n \ndef getTeamWinPerc(match_id):\n dF = getPreviousEncDF(match_id)\n timesPlayed = dF.shape[0]\n team1 = getTeam1(match_id)[0].strip(\"[]\")\n timesWon = dF.loc[dF[\"winner\"] == team1].shape[0]\n if timesPlayed != 0:\n winPerc = (timesWon/timesPlayed) * 100\n else:\n winPerc = 0\n return winPerc\n\ndef getBothTeamStats(match_id):\n DF = getPreviousEncDF(match_id)\n team1 = getTeam1(match_id)[0].strip(\"[]\")\n team2 = getTeam2(match_id)[0].strip(\"[]\")\n timesPlayed = DF.shape[0]\n timesWon = DF.loc[DF[\"winner\"] == team1].shape[0]\n WBRTeam1 = getTeamWBR(match_id, team1)\n WBRTeam2 = getTeamWBR(match_id, team2)\n WBWTeam1 = getTeamWBW(match_id, team1)\n WBWTeam2 = getTeamWBW(match_id, team2)\n\n print \"Out of {} times in the past {} have won {} times({}%) from {}\".format(timesPlayed, team1, timesWon, getTeamWinPerc(match_id), team2)\n print \"{} won by {} total runs and {} total wickets.\".format(team1, WBRTeam1, WBWTeam1)\n print \"{} won by {} total runs and {} total wickets.\".format(team2, WBRTeam2, WBWTeam2)\n\n\n\n\n#Testing functionality \ngetBothTeamStats(517)\n\n\n#3rd Feature: Recent Form (Win Percentage of 3 previous matches of a team in the same season)\n#Higher the better\n\ndef getMatchYear(match_id):\n return matches.loc[matches[\"id\"] == match_id].season.unique()\n\ndef getTeam1DF(match_id, year):\n team1 = getTeam1(match_id)\n return matches.loc[(matches[\"id\"] < match_id) & (matches[\"season\"] == year) & ((matches[\"team1\"].isin(team1)) | (matches[\"team2\"].isin(team1)))].tail(3)\n\ndef getTeam2DF(match_id, year):\n team2 = getTeam2(match_id)\n return matches.loc[(matches[\"id\"] < match_id) & (matches[\"season\"] == year) & ((matches[\"team1\"].isin(team2)) | (matches[\"team2\"].isin(team2)))].tail(3)\n\ndef getTeamWinPercentage(match_id):\n win = 0\n total = 0\n year = int(getMatchYear(match_id))\n team1 = getTeam1(match_id)[0].strip(\"[]\")\n team2 = getTeam2(match_id)[0].strip(\"[]\")\n team1DF = getTeam1DF(match_id, year)\n team2DF = getTeam2DF(match_id, year)\n team1TotalMatches = team1DF.shape[0]\n team1WinMatches = team1DF.loc[team1DF[\"winner\"] == team1].shape[0]\n team2TotalMatches = team2DF.shape[0]\n team2WinMatches = team2DF.loc[team2DF[\"winner\"] == team2].shape[0]\n if (team1TotalMatches != 0) and (team2TotalMatches !=0):\n winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100) \n winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100) \n elif (team1TotalMatches != 0) and (team2TotalMatches ==0):\n winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100) \n winPercTeam2 = 0\n elif (team1TotalMatches == 0) and (team2TotalMatches !=0):\n winPercTeam1 = 0\n winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100) \n else:\n winPercTeam1 = 0\n winPercTeam2 = 0\n \n return winPercTeam1, winPercTeam2\n \n \ndef displayTeamWin(match_id):\n year = int(getMatchYear(match_id))\n team1 = getTeam1(match_id)[0].strip(\"[]\")\n team2 = getTeam2(match_id)[0].strip(\"[]\")\n P,Q = getTeamWinPercentage(match_id)\n print \"In the season of {}, {} has a win percentage of {}% and {} has a win percentage of {}% \".format(year, team1, P, team2, Q)\n\n#Function to implement all features\ndef getAllFeatures(match_id):\n generateSquadRating(match_id)\n print (\"\\n\")\n getBothTeamStats(match_id)\n print(\"\\n\")\n displayTeamWin(match_id)\n\n\n#Testing Functionality\ngetAllFeatures(517)", "Adding Columns", "#Create Column for Team 1 Winning Status (1 = Won, 0 = Lost)\n\nmatches['team1Winning'] = np.where(matches['team1'] == matches['winner'], 1, 0)\n\n#New Column for Difference of Average Strike rates (First Team SR - Second Team SR) [Negative value means Second team is better]\n\nfirstTeamSR = []\nsecondTeamSR = []\nfor i in matches['id'].unique():\n P, Q = getAverageStrikeRates(i)\n firstTeamSR.append(P), secondTeamSR.append(Q)\nfirstSRSeries = pd.Series(firstTeamSR)\nsecondSRSeries = pd.Series(secondTeamSR)\nmatches[\"Avg_SR_Difference\"] = firstSRSeries.values - secondSRSeries.values \n\n#New Column for Difference of Wickets Per Run (First Team WPR - Second Team WPR) [Negative value means Second team is better]\n\nfirstTeamWPR = []\nsecondTeamWPR = []\nfor i in matches['id'].unique():\n R, S = getAverageWPR(i)\n firstTeamWPR.append(R), secondTeamWPR.append(S)\nfirstWPRSeries = pd.Series(firstTeamWPR)\nsecondWPRSeries = pd.Series(secondTeamWPR)\nmatches[\"Avg_WPR_Difference\"] = firstWPRSeries.values - secondWPRSeries.values \n\n#New column for difference of MVP Awards (Negative value means Second team is better)\n\nfirstTeamMVP = []\nsecondTeamMVP = []\nfor i in matches['id'].unique():\n T, U = bothTeamMVPAwards(i)\n firstTeamMVP.append(T), secondTeamMVP.append(U)\nfirstMVPSeries = pd.Series(firstTeamMVP)\nsecondMVPSeries = pd.Series(secondTeamMVP)\nmatches[\"Total_MVP_Difference\"] = firstMVPSeries.values - secondMVPSeries.values \n\n#New column for win percentage of Team1 in previous encounter \n\nfirstTeamWP = []\nfor i in matches['id'].unique():\n WP = getTeamWinPerc(i)\n firstTeamWP.append(WP)\nfirstWPSeries = pd.Series(firstTeamWP)\nmatches[\"Prev_Enc_Team1_WinPerc\"] = firstWPSeries.values\n\n\n\n\n#New column for Recent form(Win Percentage in the current season) of 1st Team compared to 2nd Team(Negative means 2nd team has higher win percentage)\n\nfirstTeamRF = []\nsecondTeamRF = []\nfor i in matches['id'].unique():\n K, L = getTeamWinPercentage(i)\n firstTeamRF.append(K), secondTeamRF.append(L)\nfirstRFSeries = pd.Series(firstTeamRF)\nsecondRFSeries = pd.Series(secondTeamRF)\nmatches[\"Total_RF_Difference\"] = firstRFSeries.values - secondRFSeries.values \n\n#Testing \nmatches.tail(20)", "Visualisation", "#Graph for Strike Rate \nmatches.boxplot(column = 'Avg_SR_Difference', by='team1Winning', showfliers= False)\n\n#Graph for WPR Difference\nmatches.boxplot(column = 'Avg_WPR_Difference', by='team1Winning', showfliers= False)\n\n# Graph for MVP Difference\nmatches.boxplot(column = 'Total_MVP_Difference', by='team1Winning', showfliers= False)\n\n#Graph for Previous encounters Win Percentage of Team #1\nmatches.boxplot(column = 'Prev_Enc_Team1_WinPerc', by='team1Winning', showfliers= False)\n\n# Graph for Recent form(Win Percentage in the same season)\nmatches.boxplot(column = 'Total_RF_Difference', by='team1Winning', showfliers= False)", "Predictions for the data", "from sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics\nfrom patsy import dmatrices\n\ny, X = dmatrices('team1Winning ~ 0 + Avg_SR_Difference + Avg_WPR_Difference + Total_MVP_Difference + Prev_Enc_Team1_WinPerc + \\\n Total_RF_Difference', matches, return_type=\"dataframe\")\ny_arr = np.ravel(y)", "Training and testing on Entire Data", "# instantiate a logistic regression model, and fit with X and y\nmodel = LogisticRegression()\nmodel = model.fit(X, y_arr)\n# check the accuracy on the training set\nprint \"Accuracy is\", model.score(X, y_arr)*100, \"%\"", "Splitting train and test using train_test_split", "# evaluate the model by splitting into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y_arr, random_state = 0)\n\n# Logistic Regression on train_test_split\nmodel2 = LogisticRegression()\nmodel2.fit(X_train, y_train)\n# predict class labels for the test set\npredicted = model2.predict(X_test)\n# generate evaluation metrics\nprint \"Accuracy is \", metrics.accuracy_score(y_test, predicted)*100, \"%\"\n\n# KNN Classification on train_test_split\nk_range = list(range(1, 61))\nk_score = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors = k)\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n k_score.append(metrics.accuracy_score(y_test, y_pred))\nplt.plot(k_range, k_score)\n\n# Best values of k in train_test_split\nknn = KNeighborsClassifier(n_neighbors = 50)\nknn.fit(X_train, y_train)\ny_pred = knn.predict(X_test)\nprint \"Accuracy is \", metrics.accuracy_score(y_test, y_pred)*100, \"%\" ", "Splitting Training Set (2008-2013) and Test Set (2013-2015) based on Seasons", "#Splitting\nX_timetrain = X.loc[X.index < 398]\nY_timetrain = y.loc[y.index < 398]\nY_timetrain_arr = np.ravel(Y_timetrain)\nX_timetest = X.loc[X.index >= 398]\nY_timetest = y.loc[y.index >= 398]\nY_timetest_arr = np.ravel(Y_timetest)\n\n# Logistic Regression on time-based split sets\nmodel3 = LogisticRegression()\nmodel3.fit(X_timetrain, Y_timetrain_arr)\ntimepredicted = model3.predict(X_timetest)\nprint \"Accuracy is \", metrics.accuracy_score(Y_timetest_arr, timepredicted)*100, \"%\"\n\n# KNN Classification on time-based split sets\nk_range = list(range(1, 61))\nk_score = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors = k)\n knn.fit(X_timetrain, Y_timetrain_arr)\n y_pred = knn.predict(X_timetest)\n k_score.append(metrics.accuracy_score(Y_timetest_arr, y_pred))\nplt.plot(k_range, k_score) \n\n# Best values of k in time-based split data\nknn1 = KNeighborsClassifier(n_neighbors = 31)\nknn1.fit(X_timetrain, Y_timetrain_arr)\ny_pred = knn1.predict(X_timetest)\nprint \"Accuracy is \", metrics.accuracy_score(Y_timetest_arr, y_pred)*100, \"%\"", "Support Vector Machines", "clf = svm.SVC(gamma=0.001, C=10)\nclf.fit(X_timetrain, Y_timetrain_arr)\nclf_pred = clf.predict(X_timetest)\nprint \"Accuracy is \", metrics.accuracy_score(Y_timetest_arr, clf_pred)*100, \"%\"", "Random Forests", "rfc = RandomForestClassifier(n_jobs = -1, random_state = 1)\nrfc.fit(X_timetrain, Y_timetrain_arr)\nrfc_pred = rfc.predict(X_timetest)\nprint \"Accuracy is \", metrics.accuracy_score(Y_timetest_arr, rfc_pred)*100, \"%\"\n\nfi = zip(X.columns, rfc.feature_importances_)\nprint \"Feature Importance according to Random Forests Model\\n\"\nfor i in fi:\n print i[0], \":\", i[1]", "Naive Bayes Classifier", "gclf = GaussianNB()\ngclf.fit(X_timetrain, Y_timetrain_arr)\ngclf_pred = gclf.predict(X_timetest)\nprint \"Accuracy is \", metrics.accuracy_score(Y_timetest_arr, gclf_pred) *100, \"%\"" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
quoniammm/mine-tensorflow-examples
fastAI/deeplearning1/nbs/char-rnn.ipynb
mit
[ "from theano.sandbox import cuda\ncuda.use('gpu2')\n\n%matplotlib inline\nimport utils; reload(utils)\nfrom utils import *\nfrom __future__ import division, print_function\n\nfrom keras.layers import TimeDistributed, Activation\nfrom numpy.random import choice", "Setup\nWe haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.", "path = get_file('nietzsche.txt', origin=\"https://s3.amazonaws.com/text-datasets/nietzsche.txt\")\ntext = open(path).read().lower()\nprint('corpus length:', len(text))\n\n!tail {path} -n25\n\n#path = 'data/wiki/'\n#text = open(path+'small.txt').read().lower()\n#print('corpus length:', len(text))\n\n#text = text[0:1000000]\n\nchars = sorted(list(set(text)))\nvocab_size = len(chars)+1\nprint('total chars:', vocab_size)\n\nchars.insert(0, \"\\0\")\n\n''.join(chars[1:-6])\n\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\nidx = [char_indices[c] for c in text]\n\nidx[:10]\n\n''.join(indices_char[i] for i in idx[:70])", "Preprocess and create model", "maxlen = 40\nsentences = []\nnext_chars = []\nfor i in range(0, len(idx) - maxlen+1):\n sentences.append(idx[i: i + maxlen])\n next_chars.append(idx[i+1: i+maxlen+1])\nprint('nb sequences:', len(sentences))\n\nsentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])\nnext_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])\n\nsentences.shape, next_chars.shape\n\nn_fac = 24\n\nmodel=Sequential([\n Embedding(vocab_size, n_fac, input_length=maxlen),\n LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,\n consume_less='gpu'),\n Dropout(0.2),\n LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,\n consume_less='gpu'),\n Dropout(0.2),\n TimeDistributed(Dense(vocab_size)),\n Activation('softmax')\n ]) \n\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())", "Train", "def print_example():\n seed_string=\"ethics is a basic foundation of all that\"\n for i in range(320):\n x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]\n preds = model.predict(x, verbose=0)[0][-1]\n preds = preds/np.sum(preds)\n next_char = choice(chars, p=preds)\n seed_string = seed_string + next_char\n print(seed_string)\n\nmodel.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)\n\nprint_example()\n\nmodel.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)\n\nprint_example()\n\nmodel.optimizer.lr=0.001\n\nmodel.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)\n\nprint_example()\n\nmodel.optimizer.lr=0.0001\n\nmodel.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)\n\nprint_example()\n\nmodel.save_weights('data/char_rnn.h5')\n\nmodel.optimizer.lr=0.00001\n\nmodel.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)\n\nprint_example()\n\nmodel.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)\n\nprint_example()\n\nprint_example()\n\nmodel.save_weights('data/char_rnn.h5')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tuanavu/coursera-university-of-washington
machine_learning/2_regression/assignment/week4/week-4-ridge-regression-assignment-1-exercise.ipynb
mit
[ "Regression Week 4: Ridge Regression (interpretation)\nIn this notebook, we will run ridge regression multiple times with different L2 penalties to see which one produces the best fit. We will revisit the example of polynomial regression as a means to see the effect of L2 regularization. In particular, we will:\n* Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression\n* Use matplotlib to visualize polynomial regressions\n* Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression, this time with L2 penalty\n* Use matplotlib to visualize polynomial regressions under L2 regularization\n* Choose best L2 penalty using cross-validation.\n* Assess the final fit using test data.\nWe will continue to use the House data from previous notebooks. (In the next programming assignment for this module, you will implement your own ridge regression learning algorithm using gradient descent.)\nFire up graphlab create", "import sys\nsys.path.append('C:\\Anaconda2\\envs\\dato-env\\Lib\\site-packages')\nimport graphlab", "Polynomial regression, revisited\nWe build on the material from Week 3, where we wrote the function to produce an SFrame with columns containing the powers of a given input. Copy and paste the function polynomial_sframe from Week 3:", "def polynomial_sframe(feature, degree):\n # assume that degree >= 1\n # initialize the SFrame:\n poly_sframe = graphlab.SFrame()\n # and set poly_sframe['power_1'] equal to the passed feature\n poly_sframe['power_1'] = feature\n # first check if degree > 1\n if degree > 1:\n # then loop over the remaining degrees:\n # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree\n for power in range(2, degree+1): \n # first we'll give the column a name:\n name = 'power_' + str(power)\n # then assign poly_sframe[name] to the appropriate power of feature\n poly_sframe[name] = feature ** power\n return poly_sframe", "Let's use matplotlib to visualize what a polynomial regression looks like on the house data.", "import matplotlib.pyplot as plt\n%matplotlib inline\n\nsales = graphlab.SFrame('kc_house_data.gl/')", "As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.", "sales = sales.sort(['sqft_living','price'])", "Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using polynomial_sframe() and fit a model with these features. When fitting the model, use an L2 penalty of 1e-5:", "l2_small_penalty = 1e-5", "Note: When we have so many features and so few data points, the solution can become highly numerically unstable, which can sometimes lead to strange unpredictable results. Thus, rather than using no regularization, we will introduce a tiny amount of regularization (l2_penalty=1e-5) to make the solution numerically stable. (In lecture, we discussed the fact that regularization can also help with numerical stability, and here we are seeing a practical example.)\nWith the L2 penalty specified above, fit the model and print out the learned weights.\nHint: make sure to add 'price' column to the new SFrame before calling graphlab.linear_regression.create(). Also, make sure GraphLab Create doesn't create its own validation set by using the option validation_set=None in this call.", "poly15_data = polynomial_sframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe`\npoly15_features = poly15_data.column_names() # get the name of the features\npoly15_data['price'] = sales['price'] # add price to the data since it's the target\n\nmodel1 = graphlab.linear_regression.create(poly15_data, target = 'price', \n features = poly15_features, l2_penalty=l2_small_penalty,\n validation_set=None,verbose=False)\nmodel1.get(\"coefficients\")", "QUIZ QUESTION: What's the learned value for the coefficient of feature power_1?\nObserve overfitting\nRecall from Week 3 that the polynomial fit of degree 15 changed wildly whenever the data changed. In particular, when we split the sales data into four subsets and fit the model of degree 15, the result came out to be very different for each subset. The model had a high variance. We will see in a moment that ridge regression reduces such variance. But first, we must reproduce the experiment we did in Week 3.\nFirst, split the data into split the sales data into four subsets of roughly equal size and call them set_1, set_2, set_3, and set_4. Use .random_split function and make sure you set seed=0.", "(semi_split1, semi_split2) = sales.random_split(.5,seed=0)\n(set_1, set_2) = semi_split1.random_split(0.5, seed=0)\n(set_3, set_4) = semi_split2.random_split(0.5, seed=0)", "Next, fit a 15th degree polynomial on set_1, set_2, set_3, and set_4, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model.\nHint: When calling graphlab.linear_regression.create(), use the same L2 penalty as before (i.e. l2_small_penalty). Also, make sure GraphLab Create doesn't create its own validation set by using the option validation_set = None in this call.", "def get_poly_model(set_data, l2_penalty):\n poly15_data = polynomial_sframe(set_data['sqft_living'], 15)\n poly15_features = poly15_data.column_names() # get the name of the features\n poly15_data['price'] = set_data['price'] # add price to the data since it's the target\n model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = poly15_features, \n l2_penalty=l2_penalty,\n validation_set=None,verbose=False)\n return poly15_data, model15\n \n\ndef get_coef(set_data, l2_penalty):\n poly15_data, model15 = get_poly_model(set_data, l2_penalty)\n return model15.get(\"coefficients\")\n\ndef plot_fitted_line(set_data, l2_penalty):\n poly15_data, model15 = get_poly_model(set_data, l2_penalty)\n return plt.plot(poly15_data['power_1'],poly15_data['price'],'.',\n poly15_data['power_1'], model15.predict(poly15_data),'-')\n\nset_1_coef = get_coef(set_1, l2_small_penalty)\nprint set_1_coef[set_1_coef['name'] == 'power_1']\nplot_fitted_line(set_1, l2_small_penalty)\n\nset_2_coef = get_coef(set_2, l2_small_penalty)\nprint set_2_coef[set_2_coef['name'] == 'power_1']\nplot_fitted_line(set_2, l2_small_penalty)\n\nset_3_coef = get_coef(set_3, l2_small_penalty)\nprint set_3_coef[set_3_coef['name'] == 'power_1']\nplot_fitted_line(set_3, l2_small_penalty)\n\nset_4_coef = get_coef(set_4, l2_small_penalty)\nprint set_4_coef[set_4_coef['name'] == 'power_1']\nplot_fitted_line(set_4, l2_small_penalty)", "The four curves should differ from one another a lot, as should the coefficients you learned.\nQUIZ QUESTION: For the models learned in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature power_1? (For the purpose of answering this question, negative numbers are considered \"smaller\" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)\nRidge regression comes to rescue\nGenerally, whenever we see weights change so much in response to change in data, we believe the variance of our estimate to be large. Ridge regression aims to address this issue by penalizing \"large\" weights. (Weights of model15 looked quite small, but they are not that small because 'sqft_living' input is in the order of thousands.)\nWith the argument l2_penalty=1e5, fit a 15th-order polynomial model on set_1, set_2, set_3, and set_4. Other than the change in the l2_penalty parameter, the code should be the same as the experiment above. Also, make sure GraphLab Create doesn't create its own validation set by using the option validation_set = None in this call.", "l2_new_penalty = 1e5\n\nset_1_coef = get_coef(set_1, l2_new_penalty)\nprint set_1_coef[set_1_coef['name'] == 'power_1']\nplot_fitted_line(set_1, l2_new_penalty)\n\nset_2_coef = get_coef(set_2, l2_new_penalty)\nprint set_2_coef[set_2_coef['name'] == 'power_1']\nplot_fitted_line(set_2, l2_new_penalty)\n\nset_3_coef = get_coef(set_3, l2_new_penalty)\nprint set_3_coef[set_3_coef['name'] == 'power_1']\nplot_fitted_line(set_3, l2_new_penalty)\n\nset_4_coef = get_coef(set_4, l2_new_penalty)\nprint set_4_coef[set_4_coef['name'] == 'power_1']\nplot_fitted_line(set_4, l2_new_penalty)", "These curves should vary a lot less, now that you applied a high degree of regularization.\nQUIZ QUESTION: For the models learned with the high level of regularization in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature power_1? (For the purpose of answering this question, negative numbers are considered \"smaller\" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)\nSelecting an L2 penalty via cross-validation\nJust like the polynomial degree, the L2 penalty is a \"magic\" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. Cross-validation seeks to overcome this issue by using all of the training set in a smart way.\nWe will implement a kind of cross-validation called k-fold cross-validation. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows:\nSet aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation set<br>\nSet aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set<br>\n...<br>\nSet aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set\nAfter this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data. \nTo estimate the generalization error well, it is crucial to shuffle the training data before dividing them into segments. GraphLab Create has a utility function for shuffling a given SFrame. We reserve 10% of the data as the test set and shuffle the remainder. (Make sure to use seed=1 to get consistent answer.)", "(train_valid, test) = sales.random_split(.9, seed=1)\ntrain_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)", "Once the data is shuffled, we divide it into equal segments. Each segment should receive n/k elements, where n is the number of observations in the training set and k is the number of segments. Since the segment 0 starts at index 0 and contains n/k elements, it ends at index (n/k)-1. The segment 1 starts where the segment 0 left off, at index (n/k). With n/k elements, the segment 1 ends at index (n*2/k)-1. Continuing in this fashion, we deduce that the segment i starts at index (n*i/k) and ends at (n*(i+1)/k)-1.\nWith this pattern in mind, we write a short loop that prints the starting and ending indices of each segment, just to make sure you are getting the splits right.", "n = len(train_valid_shuffled)\nk = 10 # 10-fold cross-validation\n\nfor i in xrange(k):\n start = (n*i)/k\n end = (n*(i+1))/k-1 \n print i, (start, end)", "Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of train_valid_shuffled. Notice that the first index (0) is included in the slice but the last index (10) is omitted.", "train_valid_shuffled[0:10] # rows 0 to 9", "Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the train_valid_shuffled dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above.\nExtract the fourth segment (segment 3) and assign it to a variable called validation4.", "print len(train_valid_shuffled)\n\n# start = (n*i)/k\n# end = (n*(i+1))/k-1\n# validation4 = train_valid_shuffled[(n*3)/k : (n*(3+1))/k-1] #5818, 7757\nvalidation4 = train_valid_shuffled[5818 : 7757]", "To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.", "print int(round(validation4['price'].mean(), 0))", "After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0:start) and (end+1:n) of the data and paste them together. SFrame has append() method that pastes together two disjoint sets of rows originating from a common dataset. For instance, the following cell pastes together the first and last two rows of the train_valid_shuffled dataframe.", "n = len(train_valid_shuffled)\nfirst_two = train_valid_shuffled[0:2]\nlast_two = train_valid_shuffled[n-2:n]\nprint first_two.append(last_two)", "Extract the remainder of the data after excluding fourth segment (segment 3) and assign the subset to train4.", "first_part = train_valid_shuffled[0:5817]\nlast_part = train_valid_shuffled[7758:]\ntrain4 = first_part.append(last_part)\nprint len(train4)", "To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450.", "print int(round(train4['price'].mean(), 0))", "Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) k, (ii) l2_penalty, (iii) dataframe, (iv) name of output column (e.g. price) and (v) list of feature names. The function returns the average validation error using k segments as validation sets.\n\nFor each i in [0, 1, ..., k-1]:\nCompute starting and ending indices of segment i and call 'start' and 'end'\nForm validation set by taking a slice (start:end+1) from the data.\nForm training set by appending slice (end+1:n) to the end of slice (0:start).\nTrain a linear model using training set just formed, with a given l2_penalty\nCompute validation error using validation set just formed", "import numpy as np\ndef k_fold_cross_validation(k, l2_penalty, data, output_name, features_list): \n rss_sum = 0\n n = len(data)\n for i in xrange(k):\n start = (n*i)/k\n end = (n*(i+1))/k-1\n validation_set = data[start:end+1]\n training_set = data[0:start].append(data[end+1:n]) \n model = graphlab.linear_regression.create(training_set, target = output_name, features = features_list, \n l2_penalty=l2_penalty,\n validation_set=None,verbose=False)\n predictions = model.predict(validation_set)\n residuals = validation_set['price'] - predictions\n rss = sum(residuals * residuals)\n rss_sum += rss\n validation_error = rss_sum / k # average = sum / size or you can use np.mean(list_of_validation_error)\n return validation_error ", "Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following:\n* We will again be aiming to fit a 15th-order polynomial model using the sqft_living input\n* For l2_penalty in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, you can use this Numpy function: np.logspace(1, 7, num=13).)\n * Run 10-fold cross-validation with l2_penalty\n* Report which L2 penalty produced the lowest average validation error.\nNote: since the degree of the polynomial is now fixed to 15, to make things faster, you should generate polynomial features in advance and re-use them throughout the loop. Make sure to use train_valid_shuffled when generating polynomial features!", "poly_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)\nmy_features = poly_data.column_names()\npoly_data['price'] = train_valid_shuffled['price']\n\nval_err_dict = {}\nfor l2_penalty in np.logspace(1, 7, num=13):\n val_err = k_fold_cross_validation(10, l2_penalty, poly_data, 'price', my_features) \n print l2_penalty#, val_err\n val_err_dict[l2_penalty] = val_err\nprint val_err_dict\n\nimport pprint \npprint.pprint(val_err_dict)\n\nprint min(val_err_dict.items(), key=lambda x: x[1]) \n\nmin_val = min(val_err_dict.itervalues())\nprint min_val\n\nprint min(val_err_dict, key=val_err_dict.get)", "QUIZ QUESTIONS: What is the best value for the L2 penalty according to 10-fold validation?\nYou may find it useful to plot the k-fold cross-validation errors you have obtained to better understand the behavior of the method.", "l2_penalty = graphlab.SArray(val_err_dict.keys())\nvalidation_error = graphlab.SArray(val_err_dict.values())\n\nsf = graphlab.SFrame({'l2_penalty':l2_penalty,'validation_error':validation_error})\nprint sf\n\n# Plot the l2_penalty values in the x axis and the cross-validation error in the y axis.\n# Using plt.xscale('log') will make your plot more intuitive.\n\nplt.plot(sf['l2_penalty'],sf['validation_error'],'k.')\nplt.xscale('log')", "Once you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of l2_penalty. This way, your final model will be trained on the entire dataset.", "poly_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)\nfeatures_list = poly_data.column_names()\npoly_data['price'] = train_valid_shuffled['price']\nl2_penalty_best = 1000.0\n\nmodel = graphlab.linear_regression.create(poly_data, target='price',\n features=features_list,\n l2_penalty=l2_penalty_best,\n validation_set=None)", "QUIZ QUESTION: Using the best L2 penalty found above, train a model using all training data. What is the RSS on the TEST data of the model you learn with this L2 penalty?", "poly_test = polynomial_sframe(test['sqft_living'], 15)\npredictions = model.predict(poly_test)\nerrors = predictions-test['price']\nrss = (errors*errors).sum()\nprint rss" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
qwertzuhr/2015_Data_Analyst_Project_3
.ipynb_checkpoints/Data Analysis Project 3 - Data Wrangle OpenStreetMaps Data-checkpoint.ipynb
agpl-3.0
[ "Data Analyst Project 3\nData Wrangle (Retrieve, Analyze and Clean) OpenStreetMaps Data from the City of Dresden\nby Benjamin Söllner, benjamin.soellner@gmail.com\nbased on the Udacity.com Data Wrangling With MongoDB\n<img src=\"city_dresden_json.png\" alt=\"The city of Dresden as a JSON object illustration\" width=\"400\" height=\"312\" style=\"display: inline; margin: 6pt;\" />\nAbstract\nThis paper describes describes the process of downloading, analyzing and cleaning of an OpenStreet Map data set of my former home town as a student: Dresden, a state capital in eastern Germany, a baroque town beautifully located on the board of the river Elbe and town home to a high-tech conglomerate from the micro-electronics sector called Silicon Saxony.\nIn this paper, first, the pipeline (and python script) to perform retrieval, analysis and cleaning of the data is introduced (chapters Approach) and results of the analysis stage are presented (chapter Overview of the Data). During the analysis, interesting facts of Dresden are uncovered, like the most popular religion, sport, beer, cuisine or leisure activity.\nFor the cleaning stage (chapter Problems Encountered in the Map), canonicalizing phone numbers present in the data set and unifying cuisine classifications where the challenge of choice. Some other cleaning techniques like cleaning street names and post codes where tried, but proved not fruitful. The paper is finally concluded with some further ideas for data set cleaning (chapter Other Ideas about the Data Set).\nThe Approach\nI implemented retrieving / storing / analysing and cleaning in a python script. The script can be used like this:\n```\npython project.py\nUsage:\n python project.py -d Download & unpack bz2 file to OSM file (experimental)\n python project.py -p Process OSM file and write JSON file\n python project.py -w Write JSON file to MongoDB\n python project.py -z Download and install the zipcode helpers\"\n python project.py -f Audit format / structure of data\n python project.py -s Audit statistics of data\n python project.py -q Audit quality of data\n python project.py -Z Audit quality of data: Zipcodes - (see -z option)\n python project.py -c Clean data in MongoDB\n python project.py -C Clean data debug mode - don't actually write to DB\n```\nDifferent options can be combined, so python project.py -dpwfsqc will do the whole round trip. During the process, I re-used most of the code and data format developed during the \"Data Wrangling With MongoDB\" Udacity course. For example, the data format used for storing the data (-p and -w option) is completely based on Lesson 6 - with some fine-tuning.\nSome output of the script is shown on the terminal, some is written to local files. If a file is written, this is indicated in the terminal output. A sample of the script's terminal output is included in the output_*.txt files included in the submission.\nData Format\nTry it out: Use python project.py -f to obtain the data for this chapter. This is a long-running process which might take a few hours to complete! There is an output file written to Project/data/audit_format_map.csv which can be beautified into an Excel spreadsheet.\n\nFirst, the data format was audited, which consisted of going through all the documents and aggregating the occurence of any attributes and the prevalence of their types (string, integer, float and other). For this, batches of 1000 documents each are retrieved from the collection and each combed through by the python code while a Python Dataframe keeps track of the counters. Since there are 1,360,000 elements, this process takes many hours; an alternative would be to run the query natively in JavaScript code on the MongoDB shell or to issue the command as a BSON command.\nThe overview of the format showed no obvious big problems with the data at first glance but provided some valuable insights:\n\nOne area of improvement could be the phone number, which is scattered across multiple data fields (address:phone, phone and phone_mobile) and was identified as a potential candidate for cleaning (see Auditing Phone Numbers and Cleaning Phone Numbers).\nSome values are present in the dataset as sometimes string, othertimes numeric: The XML parsing process takes care that each value is, whenever parsable, stored as integer or float. For attributes like street numbers, mixed occurences may be in the data set.\nThis automatic parsing of int or float turned out to be not always useful: a problem are leading zeros which in certain cases hold semantics. For german phone numbers, a leading zero signifies the start of an area code (0) or the start of a country code (00). For german postcodes, a leading zero in a postcode represents the german state of Saxony. As an outcome of this insight, I changed the parsing routine of the XML data to only parse values as numeric, if they do not contain a leading zero (not s.startswith(\"0\"))\nI checked some of the lesser-common values for sanity. E.g., there is a parameter dogshit which appears three times. As it turns out, this is not a prank of some map editors, who document dog feces they find in the area, but an indication about whether a public trash can contains a dispenser of plastic bags for relevant situations.\n\nOverview of the Data\nTry it out: Use python project.py -s to obtain the data for this chapter. See Sample Output in file Project/output_project.py_-s.txt.\nA couple of basic MongoDB queries were run to explore the data set based on the knowledge of its format from the previous chapter. The queries produce mostly rankings of values for certain data fields. Some of them are subsequently also visualized in a ggplot graph (png file) relying on the skill set gained in Udacity's Intro to Data Science course, Lesson 4: Data Visualization while not too much effort was put in making the graphs look particularily beautiful. The graphs are located in Project/data/stats_*.png.\nFilesize, Number of Nodes and Ways\nThe total file size of the OSM export is 281.778.428 Bytes, there are 208813 nodes and 1146807 ways in the dataset.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function Project.audit_stats_map.stats_general\npipeline = [\n {\"$group\": {\"_id\": \"$type\", \"count\": {\"$sum\": 1}}},\n {\"$match\": {\"_id\": {\"$in\": [\"node\", \"way\"]}}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l)", "Users Involved\nThere were about 1634 users involved in creating the data set, the top 10 of all users accounts for 40% of the created data. There is no direct evidence from the user name that any of them are bot-like users. This could be determined by further research. Many users (over 60%) have made less than 10 entries.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_users(...):\npipeline = [\n {\"$match\": {\"created.user\": {\"$exists\": True}}},\n {\"$group\": {\"_id\": \"$created.user\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\nprint str(len(l)) + \" users were involved:\"\npprint.pprint(l[1:5]+[\"...\"]+l[-5:])", "Types of Amenities\nThe attribute amenity inspired me to do further research in which kind of buildings / objects / facilities are stored in the Open Street Map data in larger quantities in order to do more detailed research on those objects. Especially Restaurants, Pubs and Churches / Places of Worship were investigated further (as can be seen below).", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_amenities(...):\npipeline = [\n {\"$match\": {\"amenity\": {\"$exists\": True}}},\n {\"$group\": {\"_id\": \"$amenity\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l[1:10]+['...'])", "Popular Leisure Activities\nThe attribute leisure shows the types of leisure activities one can do in Dresden and inspired me to invesigate more on popular sports in the city (leisure=sports_center or leisure=stadium).", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_amenities(...):\npipeline = [\n {\"$match\": {\"leisure\": {\"$exists\": True}}},\n {\"$group\": {\"_id\": \"$leisure\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l[1:10]+['...'])", "Religions in Places of Worship\nGrouping and sorting by the occurences of the religion attribute for all amenities classified as place_of_worship or community_center gives us an indication, how prevalent religions are in our city: obviously, christian is the most prevalent here.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_religions(...):\npipeline = [\n {\"$match\": {\"amenity\":{\"$in\": [\"place_of_worship\",\"community_center\"]}}},\n {\"$group\": {\"_id\": \"$religion\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l)", "Cuisines in Restaurants\nWe can list the types of cuisines in restaurants (elements with attribute amenity matching restaurant) and sort them in decending order. We can notice certain inconsistencies or overlaps in the classifications of this data: e.g., a kebab cuisine may very well be also classified as an arab cuisine or may, in fact a sub- or super-classification of this cuisine. One could, e.g., eliminate or cluster together especially occurences of cuisines which are less common, but Without having a formal taxonomy of all cuisines, I decided that is probably best to leave the data as-is in order to not sacrifice preciseness for consistency.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_cuisines(...):\npipeline = [\n {\"$match\": {\"amenity\": \"restaurant\"}},\n {\"$group\": {\"_id\": \"$cuisine\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l[1:10]+['...'])", "Beers in Pubs\nGermans do love their beers and the dataset shows that certain pubs, restaurants or bars are sponsored by certain beer brands (often advertised on the pubs entrance). We can analyze the prevalence of beer brands by grouping and sorting by occurence of the attribute brewery for all the amenities classified as respective establishment. Most popular are Radeberger, a very popular local beer, Feldschlösschen, a swiss beer and Dresdner Felsenkeller, a very local and niche-sort-of beer.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_beers(...):\npipeline = [\n {\"$match\": {\"amenity\": {\"$in\":[\"pub\",\"bar\",\"restaurant\"]}}},\n {\"$group\": {\"_id\": \"$brewery\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l)", "Popular Sports\nTo investigate, which sports are popular, we can group and sort by the (occurence of the) sport attribute for all elements classified as sports_centre or stadium in their leisure attribute. Unsurprisingly for a german city, we notice that 9pin (bowling) and soccer are the most popular sports, followed by climbing, an activity very much enjoyed by people in Dresden, presumably because of the close-by sand-stone mountains of the national park Sächsische Schweiz.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_sports(...):\npipeline = [\n {\"$match\": {\"leisure\": {\"$in\": [\"sports_centre\",\"stadium\"]}}},\n {\"$group\": {\"_id\": \"$sport\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}\n ]\nl = list(project_coll.aggregate(pipeline))\npprint.pprint(l[1:5]+['...'])", "Where to Dance in Dresden\nI am a passionate social dancer, so a list of dance schools in Dresden should not be abscent from this investigation. We can quickly grab all elements which have the leisure attribute set to dancing.", "from Project.notebook_stub import project_coll\nimport pprint\n\n# Query used - see function: Project.audit_stats_map.stats_dances(...):\nl = list(project_coll.distinct(\"name\", {\"leisure\": \"dance\"}))\npprint.pprint(l[1:10]+['...'])", "Problems Encountered in the Map / Data Quality\nTry it out: Use python project.py -q to obtain the data from this chapter. See Sample Output in file Project/output_project.py_-q.txt. The script also writes a CSV file to Project/data/audit_buildings.csv, which is also beautified into a Excel File.\nLeading Zeros\nAs already discussed, during the parsing stage, we are using an optimistic approach of parsing any numerical value as integer or float, if it is parsable as such. However, we noticed that we should not do this, if leading zeros are present as those hold semantics for phone numbers and zip codes. Otherwise, this cleaning approach gives us a much smaller representation of the data in MongoDB and in-memory.\nNormalizing / Cleaning Cuisines\nAs hinted in section Cuisines in Restaurant, classification of cuisines is inconsistent. There are two problems with this value:\n\nThere are multiple values separated by ';' which makes the parameter hard to parse. We overcome this by creating a parameter cuisineTag which stores the cuisine classifications as an array:\n\npython\n db.eval('''db.osmnodes.find({\n \"cuisine\": {\"$exists\": true},\n \"amenity\": \"restaurant\"\n }).snapshot().forEach(function(val, idx) {\n val.cuisineTags = val.cuisine.split(';');\n db.osmnodes.save(val)\n })\n ''')\n\nSome values are inconsistently used; therefore, we unify them with a mapping table and a subsequent MongoDB update:\n\n```python\n cuisines_synonyms = {\n 'german': ['regional', 'schnitzel', 'buschenschank'],\n 'portuguese': ['Portugiesisches_Restaurant_&_Weinbar'],\n 'italian': ['pizza', 'pasta'],\n 'mediterranean': ['fish', 'seafood'],\n 'japanese': ['sushi'],\n 'turkish': ['kebab'],\n 'american': ['steak_house']\n }\n # not mapped:\n # greek, asian, chinese, indian, international, vietnamese, thai, spanish, arabic\n # sudanese, russian, korean, hungarian, syrian, vegan, soup, croatian, african\n # balkan, mexican, french, cuban, lebanese\nfor target in cuisines_synonyms:\n db.osmnodes.update( {\n \"cuisine\": {\"$exists\": True},\n \"amenity\": \"restaurant\",\n \"cuisineTags\": {\"$in\": cuisines_synonyms[target]}\n }, {\n \"$pullAll\": { \"cusineTags\": cuisines_synonyms[target] },\n \"$addToSet\": { \"cuisineTags\": [ target ] }\n }, multi=False )\n ```\nThis allows us to convert a restaurant with the MongoDB representation\n{..., \"cuisine\": \"pizza;kebab\", ...} \nto the alternative representation\n{..., \"cuisine\": \"pizza;kebab\", \"cuisineTag\": [\"italian\", \"turkish\"], ...}\nAuditing Phone Numbers\nPhone re scattered over different attributes (address.phone, phone and mobile_phone) and come in different styles of formating (like +49 351 123 45 vs. 0049-351-12345). First, we retrieve a list of all phone numbers. With the goal in mind to later store the normalized phone number back into the attribute phone, this value has to be read first, and only if it is empty, mobile_phone or address.phone should be used.", "from Project.notebook_stub import project_coll\n\n# Query used - see function: Project.audit_quality_map.audit_phone_numbers(...):\npipeline = [\n {\"$match\": {\"$or\": [\n {\"phone\": {\"$exists\": True}},\n {\"mobile_phone\": {\"$exists\": True}},\n {\"address.phone\": {\"$exists\": True}}\n ]}},\n {\"$project\": {\n \"_id\": 1,\n \"phone\": {\"$ifNull\": [\"$phone\", {\"$ifNull\": [\"$mobile_phone\", \"$address.phone\"]}]}\n }}\n ]\nl = project_coll.aggregate(pipeline)\n\n# Output too long... See the file Project/output_project.py_-q.txt", "Cleaning Phone Numbers\nTry it out: Use python project.py -C to clean in debug mode. See Sample Output in file Project/output_project.py_-C.txt. The script also writes a CSV file to Project/data/clean_phones.csv, which is also beautified into a Excel File.\nCleaning the phone numbers involves:\n* unifying the different phone attributes (phone, address.phone and mobile_phone) - this is already taken care by extracting the phone numbers during the audit stage\n* if possible, canonicalizing the phone number notations by parsing them using a regular expression:\npython\nphone_regex = re.compile(ur'^(\\(?([\\+|\\*]|00) *(?P&lt;country&gt;[1-9][0-9]*)\\)?)?' + # country code\n ur'[ \\/\\-\\.]*\\(?0?\\)?[ \\/\\-\\.]*' + # separator\n ur'(\\(0?(?P&lt;area1&gt;[1-9][0-9 ]*)\\)|0?(?P&lt;area2&gt;[1-9][0-9]*))?' + # area code\n ur'[ \\/\\-\\.]*' + # separator\n ur'(?P&lt;number&gt;([0-9]+ *[\\/\\-.]? *)*)$', # number\n re.UNICODE)\nThe regular expression is resilient to various separators (\"/\", \"-\", \" \", \"(0)\") and bracket notation of phone numbers. It is not resilient for some unicode characters or written lists of phone numbers which are designed to be interpreted by humans (using separators like \",\", \"/-\" or \"oder\" lit. or). During the cleaning stage, an output is written which phone numbers could not be parsed. This contains only a tiny fraction of phone numbers (9 or 0.5%) which would be easily cleanable by hand.\nThe following objects couldn't be parsed:\n normalized\n55f57294b1c8a72c34523897 +49 35207 81429 or 81469\n55f57299b1c8a72c345272cd +49 351 8386837, +49 176 67032256\n55f572c2b1c8a72c34546689 0351 4810426\n55f572c3b1c8a72c34546829 +49 351 8902284 or 2525375\n55f572fdb1c8a72c34574963 +49 351 4706625, +49 351 0350602\n55f573bdb1c8a72c3460bdb3 +49 351 87?44?44?00\n55f573bdb1c8a72c3460c066 0162 2648953, 0162 2439168\n55f573edb1c8a72c346304b1 03512038973, 03512015831\n55f5740eb1c8a72c34649008 0351 4455193 / -118\nIf the phone number was parsable, the country code, area code and rest of the phone number are separated and subsequently strung together to a canonical form. The data to be transformed is stored into a Pandas Dataframe. By using the option -C instead of -c the execution of the transformation can be surpressed and the Dataframe instead be written to a CSV file which might be further beautified into an Excel File in order to test or debug the transformation before writing it to the database with the -c option.\n\nAuditing Street Names (Spoiler Alert: No Cleaning Necessary)\nAuditing the map's street names analogous to how it was done in the Data Wrangling course was done as follows: Check, whether 'weird' street names occur, which do not end on a suffix like street (in German -straße or Straße, depending on whether it is a compound word or not). It is assumed that then, they would most likely end in an abbreviation like str.. For this we use a regular expression querying all streets <u>not</u> ending with a particular suffix like [Ss]traße (street), [Ww]eg (way) etc. This is accomplished by a chain of \"negative lookbehind\" expressions ((?&lt;!...)) which must all in sequence evaluate to \"true\" in order to flag a street name as non-conforming.", "from Project.notebook_stub import project_coll\n\n# Query used - see function: Project.audit_quality_map.audit_streets(...):\nexpectedStreetPattern = \\\n u\"^.*(?<![Ss]tra\\u00dfe)(?<![Ww]eg)(?<![Aa]llee)(?<![Rr]ing)(?<![Bb]erg)\" + \\\n u\"(?<![Pp]ark)(?<![Hh]\\u00f6he)(?<![Pp]latz)(?<![Bb]r\\u00fccke)(?<![Gg]rund)$\"\nl = list(project_coll.distinct(\"name\", {\n \"type\": \"way\",\n \"name\": {\"$regex\": expectedStreetPattern}\n }))\n# Output too long... See the file Project/output_project.py_-q.txt", "Skimming through the list, it was noticable that the nature of the german language (and how in Germany streetnames work) results in the fact, that there are many small places without a suffix like \"street\" but \"their own thing\" (like Am Hang lit. 'At The Slope', Beerenhut lit. 'Berry Hat', Im Grunde lit. 'In The Ground'). The street names can therefore not be processed just by looking at the suffixes - I tried something different...\nCross Auditing Street Names with Street Addresses (Spoiler Alert: No Cleaning Necessary)\nI did not want to trust the street names of the data set fully yet. Next, I tried figuring out if street names of buildings were consistent with street names of objects in close proximity. Therefore, a JavaScript query is run directly on the database server returning all buildings with the objects nearby having an address.street parameter. This should allow us to cross-audit if objects in close proximity do have the same street names.", "from Project.notebook_stub import project_db\n\n# Query used - see function: Project.audit_quality_map.audit_buildings(...):\nbuildings_with_streets = project_db.eval('''\n db.osmnodes.ensureIndex({pos:\"2dsphere\"});\n result = [];\n db.osmnodes.find(\n {\"building\": {\"$exists\": true}, \"address.street\": {\"$exists\": true}, \"pos\": {\"$exists\": true}},\n {\"address.street\": \"\", \"pos\": \"\"}\n ).forEach(function(val, idx) {\n val.nearby = db.osmnodes.distinct(\"address.street\",\n {\"_id\": {\"$ne\": val._id}, \"pos\": {\"$near\": {\"$geometry\": {\"type\": \"Point\", \"coordinates\": val.pos}, \"$maxDistance\": 50, \"$minDistance\": 0}}}\n );\n result.push(val);\n })\n return result;\n ''')\n\n# Output too long... See the file Project/output_project.py_-q.txt", "The resulting objects are then iterated through and the best and worst fitting nearby street name are identified each using the Levenshtein distance. For each object, a row is created in a DataFrame which is subsequently exported to a csv file Project/data/audit_buildings.csv that was manually beautified into an Excel File. \n\nAs can be seen, street names of nearby objects mostly match those of the building itself (Levenshtein distance is zero). If they deviate greatly, they are totally different street names in the same area and not just \"typos\" or non-conforming abbreviations.\nAuditing Zip Codes (Spoiler Alert: No Cleaning Necessary)\nTry it out: Use python project.py -Z which runs the auditing script for zipcodes. See Sample Output in file Project/output_project.py_-Z.txt. To be able to run this script correctly, the zipcode data from Geonames.org needs to be downloaded and installed first using the -z option (see output in `Project/output_project.py_-Z.txt).\nThis part of the auditing process makes use of an additional at Geonames.org to resolve and audit the zip codes in the data set. During the \"installation process\" (option -z) the zipcode data (provided as a tab-separated file) is downloaded and, line-by-line, stored to a (separate) MongoDB collection. However, we are only interested \"zipcode\" (2) and \"place\" (3)\nDuring the auditing stage (option -Z) we first get a list of all used zipcode using the following query:\npython\npipeline = [\n { \"$match\": {\"address.postcode\": {\"$exists\": 1}} },\n { \"$group\": {\"_id\": \"$address.postcode\", \"count\": {\"$sum\": 1}} },\n { \"$sort\": {\"count\": 1} }\n ]\nThe zipcodes are then all looked up in the zipcode collection using the $in-operator. The data obtained is joined back into the original result.\npython\nzipcodeObjects = zipcodeColl.find( {\"zipcode\": {\"$in\": [z[\"_id\"] for z in zipcodeList]}} )\nThe following output shows that there the lesser used zipcodes are from the Dresden metropolitan area, not Dresden itself:", "from Project.audit_zipcode_map import audit_zipcode_map\nfrom Project.notebook_stub import project_server, project_port\nimport pprint\n\nzipcodeJoined = audit_zipcode_map(project_server, project_port, quiet=True)\npprint.pprint(zipcodeJoined[1:10]+['...'])", "Other Ideas about the Data Set\nAuditing City Names for Correctness and Completeness\nThe Geonames.org data could help us to validate the entered city names or add them, where missing. One could compare the address.city attribute of any OSM element with all of the 4 hierarchically names of the Geonames.org document which belongs to the zipcode referred by address.postcode.\n\nIf no address.city is present in the OSM element at all, the lowestmost value in the Geonames.org hieararchy could be added and the data therefore enhanced.\nIf the value of the OSM element does not match any name of the Geonames.org data, the element could be flagged for manual processing.\n\nCost: Relatively easily implementable, however, out of scope for this project. We should, however, strive for implementing the related query in native BSON code in order to not hit the database with every zipcode-to-Geonames-element mapping request for each OSM element.\nBenefit: Potentially high, depending on how many cities are not entered at all (quick win) or entered correctly (some additional manual work required).\nCuisine Taxonomy\nThe taxonomy of cuisines could be further formalized to contain super- and subsets of cuisines (e.g. each \"italian\" cuisine is also an \"international\" cuisine). With domain knowledge, coarsly classified restaurants could potentially also be sub-classified.\nCost: High, The creation of a proper cuisine-taxonomy would require substantial knowledge of the subject matter of cuisines and the subtle differences in culinary art. Also, rather than a tree-based classification, some \"fusion\" kitchens might overlap: any simplification or unification we carry out here comes at the cost of sacrificing detail.\nBenefit: Medium-high in certain use cases, higher number of restaurants with a certain classification lets us better find the restaurant of our taste and compare various restaurants with each other. \nOther Open Questions\nOverall, the data set of Dresden is pretty neat and tidy. Compared to other, huger cities (e.g., in India) I might have had an easier job. Further open questions or ideas (out of scope for this report) include:\n\nThe users might be analyzed further: Why are so many nodes (many thousands) created by so few users? Are bots at work? Why are so many users only contributing with very few edits? (Maybe gamification - leaderboards for who has the most edits during the recent week - would provide help.)\nOne could audit for completeness by parsing several sample websites for addresses and trying to retrieve those addresses in the Open Street Map data.\nOne could feed the normalized phone data back into Open Street Map by either using a Web Service or using the XML format.\n\nReferences\n\nProject Rubric\nSample Project\nLesson & Problem Set 6 of Udacities Data Wrangling with OpenDB Class\nProject Evaluation & Submission\nPython CSV Reader Documentation\nPython ElementTree Documentation\nMongoDB Aggregation Framework Operators\nMongoDB: Indexes\nRegex Lookarounds\nMongoDB University\nBZip2 Module\nMapZen Metro Extracts\nMongoDB Extended JSON\nRetrieving URLs\nUsing the Levenshtein distance\nZipcode Helper from Geonames.org (cc-by)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
davidzchen/tensorflow
tensorflow/lite/g3doc/performance/post_training_integer_quant_16x8.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Post-training integer quantization with int16 activations\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/performance/post_training_quant_16x8\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_quant_16x8.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_quant_16x8.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/lite/g3doc/performance/post_training_quant_16x8.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nOverview\nTensorFlow Lite now supports\nconverting activations to 16-bit integer values and weights to 8-bit integer values during model conversion from TensorFlow to TensorFlow Lite's flat buffer format. We refer to this mode as the \"16x8 quantization mode\". This mode can improve accuracy of the quantized model significantly, when activations are sensitive to the quantization, while still achieving almost 3-4x reduction in model size. Moreover, this fully quantized model can be consumed by integer-only hardware accelerators. \nSome examples of models that benefit from this mode of the post-training quantization include: \n* super-resolution, \n* audio signal processing such\nas noise cancelling and beamforming, \n* image de-noising, \n* HDR reconstruction\nfrom a single image\nIn this tutorial, you train an MNIST model from scratch, check its accuracy in TensorFlow, and then convert the model into a Tensorflow Lite flatbuffer using this mode. At the end you check the accuracy of the converted model and compare it to the original float32 model. Note that this example demonstrates the usage of this mode and doesn't show benefits over other available quantization techniques in TensorFlow Lite.\nBuild an MNIST model\nSetup", "import logging\nlogging.getLogger(\"tensorflow\").setLevel(logging.DEBUG)\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport pathlib", "Check that the 16x8 quantization mode is available", "tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8", "Train and export the model", "# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 to 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Define the model architecture\nmodel = keras.Sequential([\n keras.layers.InputLayer(input_shape=(28, 28)),\n keras.layers.Reshape(target_shape=(28, 28, 1)),\n keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Flatten(),\n keras.layers.Dense(10)\n])\n\n# Train the digit classification model\nmodel.compile(optimizer='adam',\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nmodel.fit(\n train_images,\n train_labels,\n epochs=1,\n validation_data=(test_images, test_labels)\n)", "For the example, you trained the model for just a single epoch, so it only trains to ~96% accuracy.\nConvert to a TensorFlow Lite model\nUsing the Python TFLiteConverter, you can now convert the trained model into a TensorFlow Lite model.\nNow, convert the model using TFliteConverter into default float32 format:", "converter = tf.lite.TFLiteConverter.from_keras_model(model)\ntflite_model = converter.convert()", "Write it out to a .tflite file:", "tflite_models_dir = pathlib.Path(\"/tmp/mnist_tflite_models/\")\ntflite_models_dir.mkdir(exist_ok=True, parents=True)\n\ntflite_model_file = tflite_models_dir/\"mnist_model.tflite\"\ntflite_model_file.write_bytes(tflite_model)", "To instead quantize the model to 16x8 quantization mode, first set the optimizations flag to use default optimizations. Then specify that 16x8 quantization mode is the required supported operation in the target specification:", "converter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]", "As in the case of int8 post-training quantization, it is possible to produce a fully integer quantized model by setting converter options inference_input(output)_type to tf.int16.\nSet the calibration data:", "mnist_train, _ = tf.keras.datasets.mnist.load_data()\nimages = tf.cast(mnist_train[0], tf.float32) / 255.0\nmnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)\ndef representative_data_gen():\n for input_value in mnist_ds.take(100):\n # Model has only one input so each data point has one element.\n yield [input_value]\nconverter.representative_dataset = representative_data_gen", "Finally, convert the model as usual. Note, by default the converted model will still use float input and outputs for invocation convenience.", "tflite_16x8_model = converter.convert()\ntflite_model_16x8_file = tflite_models_dir/\"mnist_model_quant_16x8.tflite\"\ntflite_model_16x8_file.write_bytes(tflite_16x8_model)", "Note how the resulting file is approximately 1/3 the size.", "!ls -lh {tflite_models_dir}", "Run the TensorFlow Lite models\nRun the TensorFlow Lite model using the Python TensorFlow Lite Interpreter.\nLoad the model into the interpreters", "interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))\ninterpreter.allocate_tensors()\n\ninterpreter_16x8 = tf.lite.Interpreter(model_path=str(tflite_model_16x8_file))\ninterpreter_16x8.allocate_tensors()", "Test the models on one image", "test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]\n\ninterpreter.set_tensor(input_index, test_image)\ninterpreter.invoke()\npredictions = interpreter.get_tensor(output_index)\n\nimport matplotlib.pylab as plt\n\nplt.imshow(test_images[0])\ntemplate = \"True:{true}, predicted:{predict}\"\n_ = plt.title(template.format(true= str(test_labels[0]),\n predict=str(np.argmax(predictions[0]))))\nplt.grid(False)\n\ntest_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)\n\ninput_index = interpreter_16x8.get_input_details()[0][\"index\"]\noutput_index = interpreter_16x8.get_output_details()[0][\"index\"]\n\ninterpreter_16x8.set_tensor(input_index, test_image)\ninterpreter_16x8.invoke()\npredictions = interpreter_16x8.get_tensor(output_index)\n\nplt.imshow(test_images[0])\ntemplate = \"True:{true}, predicted:{predict}\"\n_ = plt.title(template.format(true= str(test_labels[0]),\n predict=str(np.argmax(predictions[0]))))\nplt.grid(False)", "Evaluate the models", "# A helper function to evaluate the TF Lite model using \"test\" dataset.\ndef evaluate_model(interpreter):\n input_index = interpreter.get_input_details()[0][\"index\"]\n output_index = interpreter.get_output_details()[0][\"index\"]\n\n # Run predictions on every image in the \"test\" dataset.\n prediction_digits = []\n for test_image in test_images:\n # Pre-processing: add batch dimension and convert to float32 to match with\n # the model's input data format.\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\n interpreter.set_tensor(input_index, test_image)\n\n # Run inference.\n interpreter.invoke()\n\n # Post-processing: remove batch dimension and find the digit with highest\n # probability.\n output = interpreter.tensor(output_index)\n digit = np.argmax(output()[0])\n prediction_digits.append(digit)\n\n # Compare prediction results with ground truth labels to calculate accuracy.\n accurate_count = 0\n for index in range(len(prediction_digits)):\n if prediction_digits[index] == test_labels[index]:\n accurate_count += 1\n accuracy = accurate_count * 1.0 / len(prediction_digits)\n\n return accuracy\n\nprint(evaluate_model(interpreter))", "Repeat the evaluation on the 16x8 quantized model:", "# NOTE: This quantization mode is an experimental post-training mode,\n# it does not have any optimized kernels implementations or\n# specialized machine learning hardware accelerators. Therefore,\n# it could be slower than the float interpreter.\nprint(evaluate_model(interpreter_16x8))", "In this example, you have quantized a model to 16x8 with no difference in the accuracy, but with the 3x reduced size." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
fraserw/PyMOP
tutorial/trippytutorial.ipynb
gpl-2.0
[ "TRIPPy examples\nIntroduction: SExtractor and emcee\nTo perform photometry and source subtraction, in addition to having a good PSF (which trippy will generate) one needs three very important parameters: x, y, and m, or source position and amplitude. \nWhen one has the PSF and TSF already generated, one can run a fitting routine to solve for these. For this purpose, we use emcee. emcee is an MCMC routine which allows for good estimates of (x,y,m) and their uncertainties. We use a likelihood definition as the natural log likelihood of the exponential flux, basically exactly what you'd expect. If you are uncertain of what this means, or care for more detail, please go read the emcee documentation.\nIf the PSF or TSF is not yet known, to get a centroid (x,y), we need to use some other software. We haven't included this inside trippy because there is no point in reinventing a wheel that has already been nearly perfected. For this purpose, we use the venerable SExtractor. All jokes on its name aside, sextractor does exactly what we need, as well as we would ever need it to be done. \nTrippy includes a module trippy.scamp with functions defined in scamp.py and makeParFiles.py that mearly provide convenient wrappers to call sextractor. This has been done in a couple other packages, but not in a way that satisfies me. Hence my own implementation. A couple details to note: makeParFiles creates all the parameter files in the working directory (eg. makeParFiles.writeConv()), and scamp is responsible for sextractor execution and catalog reading (scamp.runSex() and scamp.getCatalog). Catalogs are stored in FITS_LDAC format. This choice was done to facilitate execution of the sextractor sister program scamp, though we won't need to know what that means for full use of trippy. If you are unfamiliar with sextractor and its use, don't adopt trippy as a blackbox. RTFM!\nWith that out of the way, on to actual business.\nThe trippy tutorial\nThe first thing to do is import all the necessary packages. Note that this notebook assumes you have the optional packages installed, as well as SExtractor available on your command line.\nNOTE: proper use of psfStarChooser requires plot interaction. So for this tutorial you'd best comment out the first line, %matplotlib inline. But for my web presentation, I leave inline.", "#%matplotlib inline\nimport numpy as num, astropy.io.fits as pyf,pylab as pyl\nfrom trippy import psf, pill, psfStarChooser\nfrom trippy import scamp,MCMCfit\nimport scipy as sci\nfrom os import path\nimport os\nfrom astropy.visualization import interval, ZScaleInterval", "The function trim catalog is a convenience function to simply return only those sources that are well enough isolated for PSF generation. It rejects any sources within 30 pixels of another source, any sources with peak pixel above 70,000, and any sources that sextractor has flagged for what ever reason. We may fold this into psfStarChooser in the future.", "def trimCatalog(cat):\n good=[]\n for i in range(len(cat['XWIN_IMAGE'])):\n try:\n a = int(cat['XWIN_IMAGE'][i])\n b = int(cat['YWIN_IMAGE'][i])\n m = num.max(data[b-4:b+5,a-4:a+5])\n except: pass\n dist = num.sort(((cat['XWIN_IMAGE']-cat['XWIN_IMAGE'][i])**2+(cat['YWIN_IMAGE']-cat['YWIN_IMAGE'][i])**2)**0.5)\n d = dist[1]\n if cat['FLAGS'][i]==0 and d>30 and m<70000:\n good.append(i)\n good=num.array(good)\n outcat = {}\n for i in cat:\n outcat[i] = cat[i][good]\n return outcat", "Get the image this tutorial assumes you have. If wget fails then you are likely on a mac, and should just download it manually", "inputFile='Polonskaya.fits'\nif not path.isfile(inputFile):\n os.system('wget -O Polonskaya.fits http://www.canfar.phys.uvic.ca/vospace/nodes/fraserw/Polonskaya.fits?view=data')\nelse:\n print(\"We already have the file.\")\n ", "First load the fits image and get out the header, data, and exposure time.", "with pyf.open(inputFile) as han:\n data = han[0].data\n header = han[0].header\n EXPTIME = header['EXPTIME']", "Next run sextractor on the images, and use trimCatalog to create a trimmed down list of isolated sources. \nmakeParFiles handles the creation of all the sextractor files, including the .sex file which we call example.sex, the default.conv, the param file which is saved as def.param.\n.runSex creates example.cat which is read by .getCatalog. getCatalog takes as input the catalog name and the parameter file \"def.param\".\nThe parameters that are actually used by psfStarChooser and psf.genLookupTable are XWIN_IMAGE, YWIN_IMAGE, FLUX_AUTO, and FLUXERR_AUTO, which are the x,y coordinates, the flux, and the flux uncertainty estimate respectively. The latter two are used in the SNR cut that psfStarChooser makes.", "scamp.makeParFiles.writeSex('example.sex',\n minArea=3.,\n threshold=5.,\n zpt=27.8,\n aperture=20.,\n min_radius=2.0,\n catalogType='FITS_LDAC',\n saturate=55000)\nscamp.makeParFiles.writeConv()\nscamp.makeParFiles.writeParam(numAps=1) #numAps is thenumber of apertures that you want to use. Here we use 1\n\nscamp.runSex('example.sex', inputFile ,options={'CATALOG_NAME':'example.cat'},verbose=False)\ncatalog = trimCatalog(scamp.getCatalog('example.cat',paramFile='def.param'))", "Finally, find the source closest to 811, 4005 which is the bright asteroid, 2006 Polonskaya. Also, set the rate and angle of motion. These were found from JPL horizons. The 1 degree increase is to account for the slight rotation of the image.\nNote: in this image, the asteroid is near (4005,811) and we apply a distance sort to the catalog to find correct catalog entry, and the source centroid, which we store in (xt,yt).\nSetting the important asteroid parameters. xt,yt contain the location of the asteroid itself (near 811,4005), rate and angle are the rate and angle of traililng, in \"/hr and degrees. We find the actual centroid as the location closest to that point.", "dist = ((catalog['XWIN_IMAGE']-811)**2+(catalog['YWIN_IMAGE']-4005)**2)**0.5\nargs = num.argsort(dist)\nxt = catalog['XWIN_IMAGE'][args][0]\nyt = catalog['YWIN_IMAGE'][args][0]\n\nrate = 18.4588 # \"/hr\nangle = 31.11+1.1 # degrees counter clockwise from horizontal, right", "Now use psfStarChooser to select the PSF stars. The first and second parameters to starChooser are the fitting box width in pixels, and the SNR minimum required for a star to be considered as a potential PSF star. \nOptional but important inputs are autoTrim and noVisualSelection. The former, when True, uses bgFinder.fraserMode to attempt to determine what FWHM corresponds to actual stars, and rejects all sources with FWHM outside +-0.5 pixels of the modal value. noVisualSelection determines if manual input is required. When set to false, all stars are considered. Until you know the software, I suggest you use noVisualSelection=True for manual selection, and autoTrim=False to see all sources in the plot window.\nFor each star provided to psfStarChooser, it will print a line to screen of x,y and best fit alpha, beta, and FWHM of the moffat profile fit.\nThen psfStarChooser will pop-up a multipanel window. Top left: histogram of fit chi values. Top right: chi vs. FWHM for each fitted source. Middle right: histogram of FWHM. Bottom right: image display of the currently selected source. Bottom left: Radial profiles of all sources displayed in the top right scatter plot.\nThe point of this window is to select only good stars for PSF generation, done by zooming to the good sources, and rejecting those that are bad.\nUse the zoom tool to select the region containing the stars. In this image, that's a cluser at FWHM~3.5 pixels.\nLeft and right clicks will select a source, now surrounded by a diamond, displaying the radial profile bottom left, and the actual image bottom right.\nRight click will oscillate between accepted source and rejected source (blue and red respectively). \nKeyboard funcitonality is now also implemented. Use the left/right arrow keys (or a/d) to cycle through each source, and the up/down keys (or w/d) to mark a source as rejected (red) or accepted (blue). This is probably the fastest way to cycle through sources. Note that for some mac python installs, key presses won't be recognized inside a pylab window. To solve this, invoke your trippy script with pythonw instead of python.\nWhen the window is closed, only those sources shown as blue points, and within the zoom of the top right plot will be used to generate the PSF.\nThe array goodFits is returned for convenience and contains the moffat fit details of each accepted source. Each entry is [FWHM, chi, alpha, beta, x, y, local background value].\nThe array goodMeds is just the median of goodFits, and provides the median moffat alpha and beta of the selected stars.\nNote on a couple starChooser options: \n--bgRadius is the radius outside of which the image background level is sampled. The fitting is relatively insensitive to this value, however, if you happen to know what the FWHM is approximately, then the best fitting results can be had with bgRadius>~3xFWHM in pixels.\n--ftol is the least squares fitting tolerance parameter passed to the scipy least sqaures fitter. Increasing this number can result in dramatic performance improvements. Default is 1.4e-8 to provide an extremely accurate fit. Good enough fits can be had with 1.e-7 or even 1.e-6 if one has a need for speed.\n--repFact defaults to 5. If you want to run faster but still preserve most accuracy in the fitting procedure, use repFact = 3\n--quickFit = True will provide the fastest moffat fitting. The speed improvement over quickFit = False is dramatic, but results in slightly less accurate moffat fit parameters. For the majority of use cases, where the number of good psf stars are more than a few, the degredation in PSF accuracy will not be appreciable because of the fact that a lookup table is used. But the user should confirm this be comparing PSFs generated in both circumstances.\n--printStarInfo = True will display an inset in the starchooser plot that shows the parameters of the selected source, such as alpha, beta, and FWHM, among others.", "starChooser=psfStarChooser.starChooser(data,\n catalog['XWIN_IMAGE'],catalog['YWIN_IMAGE'],\n catalog['FLUX_AUTO'],catalog['FLUXERR_AUTO'])\n(goodFits,goodMeds,goodSTDs) = starChooser(30,200,noVisualSelection=False,autoTrim=True, \n bgRadius=15, quickFit = False,\n printStarInfo = True,\n repFact = 5, ftol=1.49012e-08)\nprint(goodFits)\nprint(goodMeds)", "Generate the PSF. We want a 61 pixel wide PSF, adopt a repFactor of 10, and use the mean star fits chosen above.\nalways use odd values for the dimensions. Even values (eg. 60 instead of 61) result in off centered lookup tables.\nRepfactors of 5 and 10 have been tested thoroughly. Larger is pointless, smaller is inaccurate. 5 is faster than 10, 10 is more accurate than 5.\nThe PSF has to be wide/tall enough to handle the trailing length and the seeing disk. For Polonskaya, the larger is trailing, at ~19\"/hr*480s/3600/0.185\"/pix = 14 pixels. Choose something a few times larger. Also, stick with odd width PSFs, as the even ones have some funny centroid stuff that I haven't fully sorted out.\nThe full PSF is created with instantiation, and running both genLookupTable and genPSF.", "goodPSF = psf.modelPSF(num.arange(61),num.arange(61), alpha=goodMeds[2],beta=goodMeds[3],repFact=10)\ngoodPSF.genLookupTable(data,goodFits[:,4],goodFits[:,5],verbose=False)\nfwhm = goodPSF.FWHM() ###this is the FWHM with lookuptable included\nfwhm = goodPSF.FWHM(fromMoffatProfile=True) ###this is the pure moffat FWHM.\n\nprint(\"Full width at half maximum {:5.3f} (in pix).\".format(fwhm))\n\nzscale = ZScaleInterval()\n(z1, z2) = zscale.get_limits(goodPSF.lookupTable)\nnormer = interval.ManualInterval(z1,z2)\npyl.imshow(normer(goodPSF.lookupTable))\npyl.show()", "Now generate the TSF, which we call the line/long PSF interchangeably through the code...\nRate is in units of length/time and pixScale is in units of length/pixel, time and length are in units of your choice. Sanity suggests arcseconds and hours. Then rate in \"/hr and pixScale in \"/pix. Angle is in degrees counter clockwise from horizontal between +-90 degrees.\nThis can be rerun to create a TSF with different rate/angle of motion, though keep in mind that the psf class only contains one longPSF (one rate/angle) at any given time.", "goodPSF.line(rate,angle,EXPTIME/3600.,pixScale=0.185,useLookupTable=True)", "Now calculate aperture corrections for the PSF and TSF. Store for values of r=1.4*FWHM.\nNote that the precision of the aperture correction depends lightly on the sampling from the compute functions. 10 is generally enough to preserve 1% precision in the .roundAperCorr() and lineAperCorr() functions which use linear interpolation to get the value one actually desires.\nNOTE: Set useLookupTable=False if one wants to calculate from the moffat profile alone. Generally, not accuarate for small apertures however.", "goodPSF.computeRoundAperCorrFromPSF(psf.extent(0.8*fwhm,4*fwhm,10),display=False,\n displayAperture=False,\n useLookupTable=True)\nroundAperCorr = goodPSF.roundAperCorr(1.4*fwhm)\n\ngoodPSF.computeLineAperCorrFromTSF(psf.extent(0.1*fwhm,4*fwhm,10),\n l=(EXPTIME/3600.)*rate/0.185,a=angle,display=False,displayAperture=False)\nlineAperCorr = goodPSF.lineAperCorr(1.4*fwhm)\nprint(lineAperCorr,roundAperCorr)\n", "Store the PSF. In TRIPPy v1.0 we introduced a new psf save format which decreases the storage requirements by roughly half, at the cost of increase CPU time when restoring the stored PSF. The difference is that the moffat component of the PSF was originally saved in the fits file's first extension. This is no longer saved, as it's pretty quick to calculate. \nDefault behaviour is the old PSF format, but the new format can be flagged with psfV2=True as shown below.", "goodPSF.psfStore('psf.fits', psfV2=True)", "If we've already done the above once, we could doing it again by restoring the previously constructed PSF by the following commented out code.", "#goodPSF = psf.modelPSF(restore='psf.fits')", "And we could generate a new line psf by recalling .line with a new rate and angle", "#goodPSF.line(new_rate,new_angle,EXPTIME/3600.,pixScale=0.185,useLookupTable=True)", "Now let's do some pill aperture photometry. Instantiate the class, then call the object you created to get photometry of Polonskaya. Again assume repFact=10.\npillPhot takes as input the same coordinates as outputted by sextractor.\nFirst example is of a round star which I have manually taken the coordinates from above. Second example is for the asteroid itself.\nNew feature! The input radii can either be singletons like in the example below, or a numpy array of radii. If photometry of the same source using multiple radii are needed, the numpy array is much much faster than passing individual singletons.\nenableBGselection=True will cause a popup display of the source, in which one can zoom to a section with no background source.\nThe detault background selection technique is \"smart\". See bgFinder documentation for what that means. If you want to change this away from 'fraserMode', take a look at the options in bgFinder.\ndisplay=True to see the image subsection\nr is the radius of the pill, l is the length, a is the angle. Sky radius is the radius of a larger pill aperture. The pixels in this larger aperture, but outside the smaller aperture are ignored. Anything outside the larger pill, but inside +-width is used for background estimation.\nTrimbghighpix is mostly made not important if mode=smart. But if you want to use a mean or median for some reason, then this value is used to reject pixels with values trimBGhighPix standard deviations above the mean of the cutout.", "#initiate the pillPhot object\nphot = pill.pillPhot(data,repFact=10)\n#get photometry, assume ZPT=26.0\n#enableBGselection=True allows you to zoom in on a good background region in the aperture display window\n#trimBGhighPix is a sigma cut to get rid of the cosmic rays. They get marked as blue in the display window\n#background is selected inside the box and outside the skyRadius value\n#mode is th background mode selection. Options are median, mean, histMode (JJ's jjkmode technique), fraserMode (ask me about it), gaussFit, and \"smart\". Smart does a gaussian fit first, and if the gaussian fit value is discrepant compared to the expectation from the background std, it resorts to the fraserMode. \"smart\" seems quite robust to nearby bright sources\n\n#examples of round sources\nphot(goodFits[0][4], goodFits[0][5],radius=3.09*1.1,l=0.0,a=0.0,\n skyRadius=4*3.09,width=6*3.09,\n zpt=26.0,exptime=EXPTIME,enableBGSelection=True,display=True,\n backupMode=\"fraserMode\",trimBGHighPix=3.)\n\n\n#example of a trailed source\nphot(xt,yt,radius=fwhm*1.4,l=(EXPTIME/3600.)*rate/0.185,a=angle,\n skyRadius=4*fwhm,width=6*fwhm,\n zpt=26.0,exptime=EXPTIME,enableBGSelection=True,display=True,\n backupMode=\"smart\",trimBGHighPix=3.)", "The SNR function calculates the SNR of the aperture,as well as provide an estiamte of the magnitude/flux uncertainties. Select useBGstd=True if you wish to use the background noise level instead of sqrt of the background level in your uncertainty estimate. Note: currently, this uncertainty estimate is approximate, good to a few percent. Future improvements will be made to get this a bit more accurate.\nIf the photometry radius was an array, then so are the products created using the SNR function.\nverbose=True puts some nice terminal output in your face. These values can be accessed with their internal names.", "phot.SNR(verbose=True)\n\n#get those values\nprint(phot.magnitude)\nprint(phot.dmagnitude)\nprint(phot.sourceFlux)\nprint(phot.snr)\nprint(phot.bg)", "Let's get aperture corrections measured directly from a star.", "phot.computeRoundAperCorrFromSource(goodFits[0,4],goodFits[0,5],num.linspace(1*fwhm,4*fwhm,10),\n skyRadius=5*fwhm, width=6*fwhm,displayAperture=False,display=True)\nprint('Round aperture correction for a 4xFWHM aperture is {:.3f}.'.format(phot.roundAperCorr(1.4*fwhm)))", "Finally, let's do some PSF source subtraction. This is only possible with emcee and sextractor installed.\nFirst get the cutout. This makes everything faster later. Also, remove the background, just because.\nThis also provides an example of how to use zscale now built into trippy and astropy.visualization to display an astronomy image using the zscale scaling.", "Data = data[int(yt)-200:int(yt)+200,int(xt)-200:int(xt)+200]-phot.bg\n\nzscale = ZScaleInterval()\n(z1, z2) = zscale.get_limits(Data)\nnormer = interval.ManualInterval(z1,z2)\n\npyl.imshow(normer(Data))\npyl.show()\n", "Now instantiate the MCMCfitter class, and then perform the fit. Verbose=False will not put anything to terminal. Setting to true will dump the result of each step. Only good idea if you insist on seeing what's happening. Do you trust black boxes?\nSet useLinePSF to True if you are fitting a trailed source, False if a point source. \nSet useErrorMap to True if you care to use an estimate of the poisson noise in each pixel during your fit. This produces honest confidence ranges.\nI personally like nWalkers=nBurn=nStep=40. To get a reasonable fit however, that's overkill. But to get the best... your mileage will vary.\nThis will take a while on a computer. ~1 minute on a modern i5 processor, much longer if you computer is a few years old. You can reduce the number of walkers, nBurn and nStep to ~10 each if you are impatient. This will drop the run time by ~4x", "fitter = MCMCfit.MCMCfitter(goodPSF,Data)\nfitter.fitWithModelPSF(200+xt-int(xt)-1,200+yt-int(yt)-1, m_in=1000.,\n fitWidth=10, \n nWalkers=20, nBurn=20, nStep=20, \n bg=phot.bg, useLinePSF=True, verbose=False,useErrorMap=False)", "Now get the fits results, including best fit and confidence region using the input value. 0.67 for 1-sigma is shown", "(fitPars, fitRange) = fitter.fitResults(0.67)\nprint(fitPars)\nprint(fitRange)", "Finally, lets produce the model best fit image, and perform a subtraction. Plant will plant a fake source with the given input x,y,amplitude into the input data. If returnModel=True, then no source is planted, but the model image that would have been planted is returned.\nremove will do the opposite of plant given input data (it actually just calls plant).", "modelImage = goodPSF.plant(fitPars[0],fitPars[1],fitPars[2],Data,addNoise=False,useLinePSF=True,returnModel=True)\npyl.imshow(normer(modelImage))\npyl.show()", "Now show the image and the image with model removed for comparison.", "removed = goodPSF.remove(fitPars[0],fitPars[1],fitPars[2],Data,useLinePSF=True)\n\npyl.imshow(normer(removed))\npyl.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
hetland/python4geosciences
materials/7_shapefiles.ipynb
mit
[ "Shapefiles on maps", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport cartopy\nimport cartopy.io.shapereader as shpreader\n\nimport shapely.geometry", "What is a shapefile?\nA shapefile contains spatial information in a particular format and is used commonly in GIS applications. It typically contains information like the polygons describing counties, countries, or other political boundaries; lakes, rivers, or bays; or land and coastline. A shapefile record has a geometry, which contains the points that make up the objects, and attributes, which store information like the name of the record.\nShapefiles are commonly available online through local or federal agencies for geometric data on public lands and waterways.\nRead and examine records from Natural Earth\nWe saw in the maps notebook how easy it is to access shapefiles through Natural Earth and cartopy. Here we go into more detail.\nWe can read in a dataset from Natural Earth with the following lines. \nNote\nIf we didn't re-read this each time this cell was run, we could only run through the records once. Once the states have been iterated over, the pointer is at the end of them and there are none left to show. This is like reading all of the lines of a file and reaching the end.", "# how we tell cartopy which data we want, from the list at the end of the maps notebook\nshapename = 'admin_1_states_provinces_lakes_shp'\n\n# Set up reader for this file\nstates_shp = shpreader.natural_earth(category='cultural', resolution='110m', name=shapename)\nreader = shpreader.Reader(states_shp)\n\n# Read in the data from the file into the \"states\" generator which we can iterate/loop over\nstates = reader.records()", "Information about the states is in variable states and is a generator. Without going into too much detail about generators, they are used in loops and we can see two ways to access the individual records (or states in this case) in the next few cells.\nLet's look at a few of the states by looking at the generator as a list:", "list(states)[:2]", "Note\nEach time you access the states, you will need to rerun the cell above that reads in the records in the first place.\nOr in its natural state, we can step through the records of the generator using next after rereading in the records. The following cell shows the first record, which contains a single state.", "next(states)", "Now the next.", "next(states)", "We can save one to a variable name so that we can examine it more carefully:", "state = next(states)\nstate", "We are seeing the attributes of the record, unique to this file, which we can access more specifically as follows:", "state.attributes", "... and then each attribute individually as in a dictionary:", "state.attributes['name']", "We can also access the geometry of the record:", "state.geometry\n\nstate.geometry.centroid.xy # this is in lon/lat", "and properties of the geometry like the area and centroid location:", "state.geometry.area # what are the units of this area?", "Pull out specific records\nFind states that start with \"A\":", "pc = cartopy.crs.PlateCarree()\n\n\n# how we tell cartopy which data we want, from the list at the end of the maps notebook\nshapename = 'admin_1_states_provinces_lakes_shp'\n\n# Set up reader for this file\nstates_shp = shpreader.natural_earth(category='cultural', resolution='110m', name=shapename)\nreader = shpreader.Reader(states_shp)\n\n# Read in the data from the file into the \"states\" generator which we can iterate/loop over\nstates = reader.records()\n\nAstates = [] # initialize list to save states that start with \"A\"\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1, projection=cartopy.crs.Mercator())\nax.set_extent([-170,-80,20,75], pc)\nfor state in states:\n\n if state.attributes['name'][0] == 'A':\n print(state.attributes['name'])\n ax.add_geometries([state.geometry], pc,\n facecolor='k', alpha=0.4)\n # save state\n Astates.append(state)\n", "How could you change this loop to check for states in a specific region of the country?\nTransforming geometry between projections\nShapefiles are often in geographic coordinates (lon/lat), and they come out of Natural Earth as lon/lat. \nHere we change a state's projection from PlateCarree (pc) to LambertConformal. We use the project_geometry method in the projection we want to transform to (lc in this case), and input the current projection of the shape into the method (pc in this case).", "state.geometry # we can see the shape in PlateCarree\n\nlc = cartopy.crs.LambertConformal()\nstatelc = lc.project_geometry(state.geometry, cartopy.crs.PlateCarree())\nstatelc # this is now the geometry of the record only, without attributes\n # the shape has changed in the new projection", "Reading your own shapes and using cartopy\nYou can read in shapefiles outside of the Natural Earth dataset and use them on maps with cartopy. Here we look at shipping lanes in the northwest Gulf of Mexico. You can get to the shapes or polygons themselves two different ways using cartopy. The first uses the feature interface that we've been using (with add_feature), but limits our ability to access attributes of the files. The second gives more access.\n1st approach for using a generic shapefile:\nWe start with a map:", "proj = cartopy.crs.LambertConformal()\npc = cartopy.crs.PlateCarree()\nland_10m = cartopy.feature.NaturalEarthFeature('physical', 'land', '10m', edgecolor='face')\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=proj)\nax.set_extent([-98, -87, 25, 31], pc)\nax.add_feature(land_10m, facecolor='0.8')", "We then set up to read in shipping lane data, which is in the data directory:", "fname = '../data/fairway/fairway.shp'\nshipping_lanes = cartopy.feature.ShapelyFeature(shpreader.Reader(fname).geometries(),\n cartopy.crs.PlateCarree(), facecolor='none')", "Now we can just add the shipping lanes onto our map!", "fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=proj)\nax.set_extent([-98, -87, 25, 31], cartopy.crs.PlateCarree())\nax.add_feature(land_10m, facecolor='0.8')\n\n# shipping lanes\nax.add_feature(shipping_lanes, edgecolor='r', linewidth=0.5)", "2nd approach for using a generic shapefile", "fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=proj)\nax.set_extent([-98, -87, 25, 31], cartopy.crs.PlateCarree())\nax.add_feature(land_10m, facecolor='0.8')\n\nfname = '../data/fairway/fairway.shp'\nax.add_geometries(cartopy.io.shapereader.Reader(fname).geometries(),\n pc, edgecolor='darkcyan')\n", "Great Circle Distance\nHow do you find an airplane's flight path? The shortest line between two places on earth is not necessarily a straight line in the projection you are using. The shortest distance is called the Great Circle distance and it is the shortest distance between two places on a sphere. \nFor example, here is the shortest path between Boston and Tokyo. It is a straight line in this rather globe-like projection because it preserves this property.\n\nHowever, this link shows the flight path in a different projection. Not so straight anymore.\nHere are previously-saved latitude and longitude points along the great circle line between the LA and Newark airports (calculated using the pyproj package which is great but beyond the scope of this notebook).\nIn particular, the LA and Newark airports have the following coordinates and are in the first and last elements of the two arrays.\nLAX: 33.9425° N, 118.4081° W\nEWR: 40.6925° N, 74.1686° W", "lons = [-118.4081, -116.53656281803954, -114.63494404602989, -112.70342143546311,\n -110.74234511851722, -108.75224911337924, -106.73386144433508, -104.6881124356053,\n -102.6161407277617, -100.51929657411526, -98.3991420049751, -96.25744750245255,\n -94.09618490844686, -91.91751639275596, -89.72377943401308, -87.51746790832203,\n -85.30120953200326, -83.07774005710772, -80.84987476165341, -78.62047790110475,\n -76.39243088444343, -74.1686]\nlats = [33.9425, 34.62185468395183, 35.27195983702588, 35.89163680795418, 36.47971217805657,\n 37.03502459436787, 37.5564322473648, 38.042820934293715, 38.493112624072936,\n 38.9062744137114, 39.281327740305926, 39.61735768834621, 39.9135222108212,\n 40.169061066104604, 40.38330426236194, 40.55567979862256, 40.68572049769913,\n 40.773069741323866, 40.81748594212188, 40.818845619619054, 40.77714498701483, 40.6925]\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=cartopy.crs.Mercator())\nax.set_extent([-128, -60, 24, 50], cartopy.crs.PlateCarree())\nax.add_feature(cartopy.feature.LAND, facecolor='0.9')\nax.add_feature(cartopy.feature.OCEAN, facecolor='w')\n\n# add end points\nax.plot(lons, lats, transform=cartopy.crs.PlateCarree())\n", "Make your own Shape from points\nYou can create your own Shape geometry from coordinate locations or x,y points, so that you can interact with it in a similar manner as from a shapefile. Once you have a Shape, you can change projections and look at geometric properties of the Shape, as we did above for a single state.", "# use lons and lats of the great circle path from above\nline = shapely.geometry.LineString(zip(lons, lats))\nline", "We can look at properties like the length of the line, though keep in mind that any properties will be calculated in the projection being used. In this case, the line is in geographic coordinates, so the length is also in geographic coordinates, not in meters.", "line.length", "Exercise\n\nConvert the line between these two cities to another projection, calculate the length, and compare with the actual distance. Which projection should you use for this calculation and why? \n\n\nOther shape options include:\n\nPolygon\nLineString\nMultiLineString\nMultiPoint\nMultiPolygon\nPoint\n\nand some basic information about working with shapes separately from maps and shapefiles is available in notebook ST_shapes.ipynb.\nStates Flown Over\nConsider the following: \nWhat states do you travel over when you fly from LA (airport code LAX) to NYC (airport code EWR)?\nFirst, a plot of the problem:", "fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=cartopy.crs.Mercator())\nax.set_extent([-128, -60, 24, 50], cartopy.crs.PlateCarree())\nax.add_feature(cartopy.feature.LAND, facecolor='0.9')\nax.add_feature(cartopy.feature.OCEAN, facecolor='w')\n\n# add states\n# can plot states like this, but doesn't allow access to metadata\nshapename = 'admin_1_states_provinces_lakes_shp'\nstates = cartopy.feature.NaturalEarthFeature(category='cultural', scale='110m', facecolor='none', name=shapename)\nax.add_feature(states, edgecolor='gray')\n\n# add end points\nax.plot([lons[0], lons[-1]], [lats[0], lats[-1]], 'ro', transform=pc)\n\n# add the flight path as a shape\nax.add_geometries([line], pc, facecolor='none', edgecolor='k')", "Shape intersections\nAn easy way to find what states the flight path intersects is looking for intersections of the Shapes.", "# Set up reader for this file\nstates_shp = shpreader.natural_earth(category='cultural', resolution='110m', name=shapename)\nreader = shpreader.Reader(states_shp)\n\n# Read in the data from the file into the \"states\" generator which we can iterate over\nstates = reader.records()\n\n# Note that if we didn't re-read this each time this cell was run, we could only run it once.\n# Once the states have been iterated over, the pointer is at the end of them and there are\n# none left to show. This is like reading all of the lines of a file and reaching the end.\n\n# Remake map here\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=cartopy.crs.Mercator())\nax.set_extent([-128, -60, 24, 50], cartopy.crs.PlateCarree())\nax.add_feature(cartopy.feature.LAND, facecolor='0.9')\nax.add_feature(cartopy.feature.OCEAN, facecolor='w')\n\n# add end points\nax.plot([lons[0], lons[-1]], [lats[0], lats[-1]], 'ro', transform=pc)\n\n# add the flight path as a shape\nax.add_geometries([line], pc, facecolor='none', edgecolor='k')\n\n# Loop through states and see if they intersect flight path\n# deal with shapes differently if want to dig into them more\n\nvisible_states = [] # initialize for storing states\n\nfor state in states:\n # pick a default color for the land with a black outline,\n # this will change if the flight intersects with a state\n facecolor = '0.9'\n edgecolor = 'black'\n\n if state.geometry.intersects(line):\n facecolor = 'red'\n # also save to list if intersects\n visible_states.append(state.attributes['name'])\n\n ax.add_geometries([state.geometry], pc,\n facecolor=facecolor, edgecolor=edgecolor, alpha=0.4)\n\nprint(visible_states)", "Exercise\n\nWhat additional states could a passenger in this airplane see? Assume he or she can see 100km from the airplane's position, on either side of the plane.\nMake a buffer away from the flight path.\n * What should the units of the projection be? \n * First you will need to convert projections. \n * What is a good choice for a projection and why?\n * Once you set up your buffer, add it to the map.\n\n\n\nExercise (continued)\n\nWhat additional states could a passenger in this airplane see? Assume he or she can see 100km from the airplane's position, on either side of the plane.\nWhat states are visible?\n * Save the names of the visible states and print them out \n * Color differently on the map the states that are visible from the plane but that aren't actually flown over.\n\n\n\nExercise (continued)\n\nFind the length of the flight track and area of the flight buffer region.\nWhat projection should we use to get a good approximation of the real values?\n * Compare your length with the actual distance\n * Compare your buffer region with an appropriate estimation" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
SamLau95/nbinteract
notebooks/Using_Interact.ipynb
bsd-3-clause
[ "Using Interact\nThe interact function (ipywidgets.interact) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython's widgets.", "from __future__ import print_function\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets", "Basic interact\nAt the most basic level, interact autogenerates UI controls for function arguments, and then calls the function with those arguments when you manipulate the controls interactively. To use interact, you need to define a function that you want to explore. Here is a function that prints its only argument x.", "def f(x):\n return x", "When you pass this function as the first argument to interact along with an integer keyword argument (x=10), a slider is generated and bound to the function parameter.", "interact(f, x=10);", "When you move the slider, the function is called, which prints the current value of x.\nIf you pass True or False, interact will generate a checkbox:", "interact(f, x=True);", "If you pass a string, interact will generate a text area.", "interact(f, x='Hi there!');", "interact can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, interact also works with functions that have multiple arguments.", "@interact(x=True, y=1.0)\ndef g(x, y):\n return (x, y)", "Fixing arguments using fixed\nThere are times when you may want to explore a function using interact, but fix one or more of its arguments to specific values. This can be accomplished by wrapping values with the fixed function.", "def h(p, q):\n return (p, q)", "When we call interact, we pass fixed(20) for q to hold it fixed at a value of 20.", "interact(h, p=5, q=fixed(20));", "Notice that a slider is only produced for p as the value of q is fixed.\nWidget abbreviations\nWhen you pass an integer-valued keyword argument of 10 (x=10) to interact, it generates an integer-valued slider control with a range of [-10,+3*10]. In this case, 10 is an abbreviation for an actual slider widget:\npython\nIntSlider(min=-10,max=30,step=1,value=10)\nIn fact, we can get the same result if we pass this IntSlider as the keyword argument for x:", "interact(f, x=widgets.IntSlider(min=-10,max=30,step=1,value=10));", "This examples clarifies how interact proceses its keyword arguments:\n\nIf the keyword argument is a Widget instance with a value attribute, that widget is used. Any widget with a value attribute can be used, even custom ones.\nOtherwise, the value is treated as a widget abbreviation that is converted to a widget before it is used.\n\nThe following table gives an overview of different widget abbreviations:\n<table class=\"table table-condensed table-bordered\">\n <tr><td><strong>Keyword argument</strong></td><td><strong>Widget</strong></td></tr> \n <tr><td>`True` or `False`</td><td>Checkbox</td></tr> \n <tr><td>`'Hi there'`</td><td>Text</td></tr>\n <tr><td>`value` or `(min,max)` or `(min,max,step)` if integers are passed</td><td>IntSlider</td></tr>\n <tr><td>`value` or `(min,max)` or `(min,max,step)` if floats are passed</td><td>FloatSlider</td></tr>\n <tr><td>`['orange','apple']` or `{'one':1,'two':2}`</td><td>Dropdown</td></tr>\n</table>\nNote that a dropdown is used if a list or a dict is given (signifying discrete choices), and a slider is used if a tuple is given (signifying a range).\nYou have seen how the checkbox and textarea widgets work above. Here, more details about the different abbreviations for sliders and dropdowns are given.\nIf a 2-tuple of integers is passed (min,max), an integer-valued slider is produced with those minimum and maximum values (inclusively). In this case, the default step size of 1 is used.", "interact(f, x=(0,4));", "If a 3-tuple of integers is passed (min,max,step), the step size can also be set.", "interact(f, x=(0,8,2));", "A float-valued slider is produced if the elements of the tuples are floats. Here the minimum is 0.0, the maximum is 10.0 and step size is 0.1 (the default).", "interact(f, x=(0.0,10.0));", "The step size can be changed by passing a third element in the tuple.", "interact(f, x=(0.0,10.0,0.01));", "For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to 5.5.", "@interact(x=(0.0,20.0,0.5))\ndef h(x=5.5):\n return x", "Dropdown menus are constructed by passing a list of strings. In this case, the strings are both used as the names in the dropdown menu UI and passed to the underlying Python function.", "interact(f, x=['apples','oranges']);", "If you want a dropdown menu that passes non-string values to the Python function, you can pass a list of (label, value) pairs.", "interact(f, x=[('one', 10), ('two', 20)]);", "interactive\nIn addition to interact, IPython provides another function, interactive, that is useful when you want to reuse the widgets that are produced or access the data that is bound to the UI controls.\nNote that unlike interact, the return value of the function will not be displayed automatically, but you can display a value inside the function with IPython.display.display.\nHere is a function that returns the sum of its two arguments and displays them. The display line may be omitted if you don't want to show the result of the function.", "from IPython.display import display\ndef f(a, b):\n display(a + b)\n return a+b", "Unlike interact, interactive returns a Widget instance rather than immediately displaying the widget.", "w = interactive(f, a=10, b=20)", "The widget is an interactive, a subclass of VBox, which is a container for other widgets.", "type(w)", "The children of the interactive are two integer-valued sliders and an output widget, produced by the widget abbreviations above.", "w.children", "To actually display the widgets, you can use IPython's display function.", "display(w)", "At this point, the UI controls work just like they would if interact had been used. You can manipulate them interactively and the function will be called. However, the widget instance returned by interactive also gives you access to the current keyword arguments and return value of the underlying Python function. \nHere are the current keyword arguments. If you rerun this cell after manipulating the sliders, the values will have changed.", "w.kwargs", "Here is the current return value of the function.", "w.result", "Disabling continuous updates\nWhen interacting with long running functions, realtime feedback is a burden instead of being helpful. See the following example:", "def slow_function(i):\n print(int(i),list(x for x in range(int(i)) if \n str(x)==str(x)[::-1] and \n str(x**2)==str(x**2)[::-1]))\n return\n\n%%time\nslow_function(1e6)", "Notice that the output is updated even while dragging the mouse on the slider. This is not useful for long running functions due to lagging:", "from ipywidgets import FloatSlider\ninteract(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));", "There are two ways to mitigate this. You can either only execute on demand, or restrict execution to mouse release events.\ninteract_manual\nThe interact_manual function provides a variant of interaction that allows you to restrict execution so it is only done on demand. A button is added to the interact controls that allows you to trigger an execute event.", "interact_manual(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));", "continuous_update\nIf you are using slider widgets, you can set the continuous_update kwarg to False. continuous_update is a kwarg of slider widgets that restricts executions to mouse release events.", "interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5, continuous_update=False));", "interactive_output\ninteractive_output provides additional flexibility: you can control how the UI elements are laid out.\nUnlike interact, interactive, and interact_manual, interactive_output does not generate a user interface for the widgets. This is powerful, because it means you can create a widget, put it in a box, and then pass the widget to interactive_output, and have control over the widget and its layout.", "a = widgets.IntSlider()\nb = widgets.IntSlider()\nc = widgets.IntSlider()\nui = widgets.HBox([a, b, c])\ndef f(a, b, c):\n print((a, b, c))\n\nout = widgets.interactive_output(f, {'a': a, 'b': b, 'c': c})\n\ndisplay(ui, out)", "Arguments that are dependent on each other\nArguments that are dependent on each other can be expressed manually using observe. See the following example, where one variable is used to describe the bounds of another. For more information, please see the widget events example notebook.", "x_widget = FloatSlider(min=0.0, max=10.0, step=0.05)\ny_widget = FloatSlider(min=0.5, max=10.0, step=0.05, value=5.0)\n\ndef update_x_range(*args):\n x_widget.max = 2.0 * y_widget.value\ny_widget.observe(update_x_range, 'value')\n\ndef printer(x, y):\n print(x, y)\ninteract(printer,x=x_widget, y=y_widget);", "Flickering and jumping output\nOn occasion, you may notice interact output flickering and jumping, causing the notebook scroll position to change as the output is updated. The interactive control has a layout, so we can set its height to an appropriate value (currently chosen manually) so that it will not change size as it is updated.", "%matplotlib inline\nfrom ipywidgets import interactive\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef f(m, b):\n plt.figure(2)\n x = np.linspace(-10, 10, num=1000)\n plt.plot(x, m * x + b)\n plt.ylim(-5, 5)\n plt.show()\n\ninteractive_plot = interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gabrielhpbc/CD
Projeto 1 - CD.ipynb
mit
[ "Ciência dos Dados - PROJETO 1\nGabriel Heusi Pereira Bueno de Camargo\nTítulo\nO comportamento da segurança alimentar no território brasileiro.\n\nIntrodução\nA diversidade do território brasileiro se apresenta em diversos fatores, entre eles há um negativo que deve ser destacado: a insegurança alimentar, ou melhor, a segurança alimentar restrita para uma parcela baixa da população. A partir disso buscou-se realizar uma análise de como isso se comporta, observando a diferença entre as regiões, entre a zona urbana e rural e ainda um destaque para a distribuição desse problema de acordo com a renda familiar. Dessa forma com auxílio de uma ferramenta do IBGE, a PNAD, os dados serão analisados e explicados ao longo da análise para então uma conclusão ao final. Vale destacar ainda que será tratado com a base de dados de 2009 e 2013, a última divulgada que involve questões de âmbito alimentar. A orientação do projeto como um todo vai ao encontro de responder a seguinte pergunta sobre segurança alimentar: ao comparar as pesquisas de 2009 e 2013, qual é a faixa de renda familiar em que se concentra maior número de pessoas que já passaram por situação de insegurança alimentar? A comparação será feita apenas para as 2 regiões que demonstram maior disparidade desse problema entre zona rural e urbana.", "%matplotlib inline\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom numpy import zeros_like\n\nprint('Esperamos trabalhar no diretório')\nprint(os.getcwd())\n\nbase = pd.read_csv('DOM2013.csv',sep=',')\nbase9 = pd.read_csv('DOM2009.csv',sep=',')", "MUDANÇA DA VARIÁVEL INICIAL QUE MOSTRA O ANO DE PESQUISA.", "base.V0101=base.V0101.astype(\"int\")\nbase9.V0101=base9.V0101.astype(\"int\")", "DEFINIÇÃO DAS REGIÕES E TRANSFORMAÇÃO EM UMA CATEGORIA;", "base.loc[(base.UF<18),\"REGIAO\"]=\"NORTE\"\nbase.loc[(base.UF>20)&(base.UF<30),\"REGIAO\"]=\"NORDESTE\"\nbase.loc[(base.UF>30)&(base.UF<36),\"REGIAO\"]=\"SUDESTE\"\nbase.loc[(base.UF>35)&(base.UF<44),\"REGIAO\"]=\"SUL\"\nbase.loc[(base.UF>43)&(base.UF<54),\"REGIAO\"]=\"CENTRO-OESTE\"\nbase.REGIAO=base.REGIAO.astype(\"category\")\n\nbase9.loc[(base9.UF<18),\"REGIAO\"]=\"NORTE\"\nbase9.loc[(base9.UF>20)&(base9.UF<30),\"REGIAO\"]=\"NORDESTE\"\nbase9.loc[(base9.UF>30)&(base9.UF<36),\"REGIAO\"]=\"SUDESTE\"\nbase9.loc[(base9.UF>35)&(base9.UF<44),\"REGIAO\"]=\"SUL\"\nbase9.loc[(base9.UF>43)&(base9.UF<54),\"REGIAO\"]=\"CENTRO-OESTE\"\nbase9.REGIAO=base9.REGIAO.astype(\"category\")", "DIVISÃO EM ZONA RURAL E URBANA, A SEGUNDA VARIÁVEL DE ANÁLISE", "base.loc[(base.V4105<4),\"ZONA\"]=\"Urbana\"\nbase.loc[(base.V4105>3),\"ZONA\"]=\"Rural\"\nbase.ZONA=base.ZONA.astype(\"category\")\n\nbase9.loc[(base9.V4105<4),\"ZONA\"]=\"Urbana\"\nbase9.loc[(base9.V4105>3),\"ZONA\"]=\"Rural\"\nbase9.ZONA=base9.ZONA.astype(\"category\")", "CRIACÃO DA VARIÁVEL INSEGURANÇA ALIMENTAR:\nA SEGUIR MODIFICA-SE AS VARIÁVEIS (PERGUNTAS SOBRE INSEGURANÇA ALIMENTAR) CRIANDO UMA ÚNICA CHAMADA \"INSEGURANÇA ALIMENTAR\". O MOTIVO PARA ISSO É QUE AS 4 PERGUNTAS FEITAS REPRESENTAM SITUAÇÕES DE DIFICULDADE PARA SE ALIMENTAR, PORTANTO PARA SE CONSIDERAR UMA PESSOA QUE PASSOU POR SITUAÇÃO DE DIFICULDADE ALIMENTAR DEVE SE TER PELO MENOS UMA PERGUNTA RESPONDIDA COM \"SIM\". HÁ AINDA A CARACTERIZACAO PARA CATEGORIA DAS 4 PERGUNTAS.", "base.loc[(base.V2103==1) | (base.V2105==1) | (base.V2107==1) | (base.V2109==1),'Insegurança_Alimentar'] = 'Sim'\nbase.loc[(base.V2103==3) & (base.V2105==3) & (base.V2107==3) & (base.V2109==3),'Insegurança_Alimentar'] = 'Não'\nbase.V2103=base.V2103.astype(\"category\")\nbase.V2105=base.V2105.astype(\"category\")\nbase.V2107=base.V2107.astype(\"category\")\nbase.V2109=base.V2109.astype(\"category\")\n\nbase9.loc[(base9.V2103==1) | (base9.V2105==1) | (base9.V2107==1) | (base9.V2109==1),'Insegurança_Alimentar'] = 'Sim'\nbase9.loc[(base9.V2103==3) & (base9.V2105==3) & (base9.V2107==3) & (base9.V2109==3),'Insegurança_Alimentar'] = 'Não'\nbase9.V2103=base9.V2103.astype(\"category\")\nbase9.V2105=base9.V2105.astype(\"category\")\nbase9.V2107=base9.V2107.astype(\"category\")\nbase9.V2109=base9.V2109.astype(\"category\")", "CRIAÇÃO DO \"PROBLEMA ALIMENTAR\":\nEM SEQUÊNCIA HÁ MAIS 4 PERGUNTAS DESTINADAS APENAS ÀQUELES QUE APRESENTARAM INSEGURANÇA ALIMENTAR. PORTANTO UTILIZOU-SE O MESMO\nPROCESSO DO QUADRO ACIMA. ESSAS PERGUNTAS REFLETEM ALGUNS PROBLEMAS PELOS QUAIS AS PESSOAS PODERIAM TER PASSADO CASO RESPONDESSEM PELO MENOS UM SIM NAS 4 PERGUNTAS INICIAIS.", "base.loc[(base.V2113==1) | (base.V2115==1) | (base.V2117==1) | (base.V2121==1),'Problema_Alimentar'] = 'Sim'\nbase.loc[(base.V2113==3) & (base.V2115==3) & (base.V2117==3) & (base.V2121==3),'Problema_Alimentar'] = 'Não'\nbase.V2113=base.V2113.astype(\"category\")\nbase.V2115=base.V2115.astype(\"category\")\nbase.V2117=base.V2117.astype(\"category\")\nbase.V2121=base.V2121.astype(\"category\")\n\nbase9.loc[(base9.V2111==1) | (base9.V2113==1) | (base9.V2115==1) | (base9.V2117==1) | (base9.V2119==1) | (base9.V2120==1) | (base9.V2121==1),'Problema_Alimentar'] = 'Sim'\nbase9.loc[(base9.V2111==3) & (base9.V2113==3) & (base9.V2115==3) & (base9.V2117==3) & (base9.V2119==3) & (base9.V2120==3) & (base9.V2121==3),'Problema_Alimentar'] = 'Não'\nbase9.V2113=base9.V2113.astype(\"category\")\nbase9.V2115=base9.V2115.astype(\"category\")\nbase9.V2117=base9.V2117.astype(\"category\")\nbase9.V2117=base9.V2119.astype(\"category\")\nbase9.V2121=base9.V2120.astype(\"category\")\nbase9.V2121=base9.V2121.astype(\"category\")", "FILTRAGEM INICIAL:\nTRANSFORMACÃO DAS SIGLAS EM NOME DAS VARIÁVEIS DE INTERESSE E POSTERIOR FILTRO PARA RETIRAR PESSOAS QUE NAO RESPONDERAM (NaN)\nAS 4 PERGUNTAS INICAIS E RENDA. VALE DESTACAR QUE NAO SE UTILIZOU PARA A VARIÁVEL \"PROBLEMA_ALIMENTAR\" POIS AQUELES QUE NÃO \nTIVERAM INSEGURANÇA ALIMENTAR NÃO FORAM CHEGARAM A SER QUESTIONADOS SOBRE E PORTANTO PERDERIA-SE DADOS.", "base=base.loc[:,[\"V0101\",\"REGIAO\",\"ZONA\",\"V4614\",'Insegurança_Alimentar',\"Problema_Alimentar\"]]\nbase.columns=[\"ANO\",\"REGIAO\",\"ZONA\",\"RENDA\",'Insegurança_Alimentar',\"Problema_Alimentar\"]\nbase=base.dropna(subset=[\"RENDA\",\"Insegurança_Alimentar\"])\nbase", "TABELA 1 - 2013", "writer = pd.ExcelWriter('Tabela1-2013.xlsx',engine='xlsxwriter')\nbase.to_excel(writer,sheet_name=\"Projeto_1\")\nwriter.save()\n\nbase9=base9.loc[:,[\"V0101\",\"REGIAO\",\"ZONA\",\"V4614\",'Insegurança_Alimentar',\"Problema_Alimentar\"]]\nbase9.columns=[\"ANO\",\"REGIAO\",\"ZONA\",\"RENDA\",'Insegurança_Alimentar',\"Problema_Alimentar\"]\nbase9=base9.dropna(subset=[\"RENDA\",\"Insegurança_Alimentar\"])\nbase9", "TABELA 1 - 2009", "writer = pd.ExcelWriter('Tabela1-2009.xlsx',engine='xlsxwriter')\nbase9.to_excel(writer,sheet_name=\"Projeto_1\")\nwriter.save()", "PRIMEIRA OBSERVAÇÃO:\nOCORRÊNCIA DE PESSOAS QUE JÁ PASSARAM POR SITUAÇÕES DE INSEGURANÇA ALIMENTAR (\"Sim\") PARA POSTERIORMENTE ANALISAR AINDA A DIFERENÇA ENTRE AS REGIÕES E ZONAS.", "g1 = (base.Insegurança_Alimentar.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nplot = g1.plot(kind='bar',title='DIFICULDADE ALIMENTAR 2013 (G1)',figsize=(5, 5),color=('b','g'))\nprint(g1,\"\\n\")\n\ng2 = (base9.Insegurança_Alimentar.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nplot = g2.plot(kind='bar',title='DIFICULDADE ALIMENTAR 2009 (G2)',figsize=(5, 5),color=('b','g'))\nprint(g2,\"\\n\")", "APROFUNDAMENTO NAS REGIÕES:\nGRÁFICO DE FREQUÊNCIA SEGUIDO DE UMA TABELA QUE POTENCIALIZA A ANÁLISE DOS VALORES, JÁ QUE MOSTRA OS VALORES ABSOLUTOS E VISA BUSCAR MAIOR COMPREENSÃO E COERÊNCIA DOS VALORES.", "tb1= (pd.crosstab(base.REGIAO,base.Insegurança_Alimentar,margins=True,rownames=[\"REGIÃO\"],colnames=[\"Insegurança Alimentar\"],normalize='index')*100).round(decimals=1)\nplot = tb1.plot(kind=\"bar\",title=\"Distribuição Regional de Insegurança Alimentar 2013 (G3)\")\n\nabs1=pd.crosstab(base.REGIAO,base.Insegurança_Alimentar, margins=True, rownames=['REGIÃO'], colnames=['INSEGURANÇA ALIMENTAR'])\nabs1=abs1.loc[['NORTE','NORDESTE','SUDESTE','SUL','CENTRO-OESTE']]\nabs1", "Nesse caso pode-se observar uma clara coerência entre os dados percentuais e absolutos, isso porque as regiões Norte e Nordeste mostram a maior frequência e número de pessoas que já passaram por situação de insegurança alimentar.", "tb19= (pd.crosstab(base9.REGIAO,base9.Insegurança_Alimentar,margins=True,rownames=[\"REGIÃO\"],colnames=[\"Insegurança Alimentar\"],normalize='index')*100).round(decimals=1)\nplot = tb19.plot(kind=\"bar\",title=\"Distribuição Regional de Insegurança Alimentar 2009 (G4)\")\n\nabs19=pd.crosstab(base9.REGIAO,base9.Insegurança_Alimentar, margins=True, rownames=['REGIÃO'], colnames=['INSEGURANÇA ALIMENTAR'])\nabs19=abs19.loc[['NORTE','NORDESTE','SUDESTE','SUL','CENTRO-OESTE']]\nabs19", "OBSERVAÇÃO DA SITUAÇÃO NA ZONA URBANA E RURAL:\nASSIM COMO NA CELULA SUPERIOR, UM GRÁFICO INICIAL PERCENTUAL SEGUIDO DE UMA TABELA CONTENDO VALORES ABSOLUTOS QUE POSSIBILITAM OBSERVAR A DIFERENÇA ENTRE AS DUAS ZONAS", "tb2 = (pd.crosstab(base.ZONA,base.Insegurança_Alimentar,margins=True,rownames=[\"ZONA\"],colnames=[\"Insegurança Alimentar\"],normalize='index')*100).round(decimals=1)\nplot = tb2.plot(kind=\"bar\",title=\"Distribuição em Zonas de Insegurança Alimentar 2013 (G5)\")\n\nabs2=pd.crosstab(base.ZONA,base.Insegurança_Alimentar, margins=True, rownames=['ZONA'], colnames=['INSEGURANÇA ALIMENTAR'])\nabs2=abs2.loc[['Rural','Urbana']]\nabs2\n\n\ntb29 = (pd.crosstab(base9.ZONA,base9.Insegurança_Alimentar,margins=True,rownames=[\"ZONA\"],colnames=[\"Insegurança Alimentar\"],normalize='index')*100).round(decimals=1)\nplot = tb29.plot(kind=\"bar\",title=\"Distribuição em Zonas de Insegurança Alimentar 2009 (G6)\")\n\nabs29=pd.crosstab(base9.ZONA,base9.Insegurança_Alimentar, margins=True, rownames=['ZONA'], colnames=['INSEGURANÇA ALIMENTAR'])\nabs29=abs29.loc[['Rural','Urbana']]\nabs29\n", "CRUZAMENTO DE DADOS:\nSUB-DIVISÃO MAIS COMPLEXA, CADA ZONA DIVIDIDA POR ESTADO E A FREQUÊNCIA DE CADA UM DESSES, O OBJETIVO DESTE GRÁFICO É ANALISAR EM UMA ÚNICA IMAGEM AS DIFERENÇAS NOTÁVEIS ENTRE OS FATORES TERRITORIAIS ANALISADOS E ASSIM FOCAR DIRETAMENTE NAS REGIÕES QUE PRECISAM DA ANÁLISE PARA RESPONDER A PERGUNTA", "ct1=(pd.crosstab([base.REGIAO, base.ZONA],base.Insegurança_Alimentar, normalize='index')*100).round(decimals=1)\nct1\nprint(ct1,'\\n')\nplot = ct1.plot(kind='bar',title=\"Análise de Insegurança Alimentar 2013 (G7)\")\nax = plt.subplot(111)\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.ylabel('Freq.Relativa (em %)')\nplt.show()\n\nct2=(pd.crosstab([base9.REGIAO, base9.ZONA],base9.Insegurança_Alimentar, normalize='index')*100).round(decimals=1)\nct2\nprint(ct2,'\\n')\nplot = ct2.plot(kind='bar',title=\"Análise de Insegurança Alimentar 2009 (G8)\")\nax = plt.subplot(111)\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.ylabel('Freq.Relativa (em %)')\nplt.show()", "SEQUÊNCIA DE ANÁLISE PARA CADA ANO:\nObservando os dois últimos gráficos pode-se perceber precisamente as duas regiões que apresentam maior disparidade entre zona urbana e rural. No caso de 2013 (1°gráfico) Norte e Nordeste são as duas regiões que serão analisadas a fim de responder a pergunta-guia do projeto, já na situação de 2009 apresenta-se o Centro-Oeste e o Nordeste. \nANÁLISE QUANTITATIVA:\nOBSERVAR COMO SE COMPORTA A INSEGURANÇA ALIMENTAR DE ACORDO COM A RENDA FAMILIAR.\nO PRIMEIRO HISTOGRAMA DEMONSTRA A FREQUÊNCIA ENTRE AQUELES QUE RESPONDERAM PELO MENOS UM \"Sim\" NAS 4 PERGUNTAS INICIAIS E SÃO CONSIDERADOS PORTANTO, EM INSEGURANÇA ALIMENTAR.", "faixa = np.arange(0,7350,350)\nfrenda = pd.cut(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")], bins=faixa, right=False)\nt1 = (frenda.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t1,\"\\n\")\n\nplot = base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")].plot.hist(bins=faixa,title=\"Histograma - Insegurança Alimentar - NORTE - 2013 (H1)\", weights=zeros_like(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")])+1./base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")].size*100, figsize=(6, 6), alpha=0.5)\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()\n\n\nfaixa = np.arange(0,7350,350)\nfrenda2 = pd.cut(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")], bins=faixa, right=False)\nt2 = (frenda2.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t2,\"\\n\")\n\nplot = base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")].plot.hist(bins=faixa,title=\"Histograma - Insegurança Alimentar - NORDESTE - 2013(H2)\", weights=zeros_like(base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")])+1./base.RENDA[(base.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"red\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()\n\nfrenda9 = pd.cut(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base.REGIAO==\"CENTRO-OESTE\")], bins=faixa, right=False)\nt19 = (frenda9.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t19,\"\\n\")\n\nplot = base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"CENTRO-OESTE\")].plot.hist(bins=faixa,title=\"Histograma - Insegurança Alimentar - CENTRO-OESTE - 2009(H3)\", weights=zeros_like(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"CENTRO-OESTE\")])+1./base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"CENTRO-OESTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"chocolate\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()\n\nfrenda29 = pd.cut(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")], bins=faixa, right=False)\nt29 = (frenda29.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t29,\"\\n\")\n\nplot = base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")].plot.hist(bins=faixa,title=\"Histograma - Insegurança Alimentar - NORDESTE - 2009(H4)\", weights=zeros_like(base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")])+1./base9.RENDA[(base9.Insegurança_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"darkslategray\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()", "ANÁLISE INICIAL E NOVA FILTRAGEM:\nCOM A PRECISÃO DOS VALORES MOSTRADOS ACIMA, PODE-SE OBSERVAR ONDE HÁ MAIOR CONCENTRAÇÃO EM CADA UMA DAS REGIÕES DE INTERESSE DE ACORDO COM A DISPARIDADE ANALISADA ANTERIORAMENTE NOS GRÁFICOS. DESSA FORMA A PARTIR DE AGORA A ANÁLISE SE CENTRARÁ APENAS ÀQUELES QUE PASSARAM POR SITUACÃO DE INSEGURANÇA ABRINDO PARA UMA NOVA VARIÁVEL, CHAMADA DE PROBLEMA ALIMENTAR E PAUTADA EM PERGUNTAS QUE DEMONSTRAM FALTA DE COMIDA OU ALIMENTAÇÃO RESTRITA POR CONTA DE FALTA DE DINHEIRO.", "base=base[(base.Insegurança_Alimentar==\"Sim\")]\nbase", "TABELA 2 - 2013", "writer = pd.ExcelWriter('Tabela2-2013.xlsx',engine='xlsxwriter')\nbase.to_excel(writer,sheet_name=\"Projeto_1\")\nwriter.save()\n\nbase9=base9[(base9.Insegurança_Alimentar==\"Sim\")]\nbase9", "TABELA 2 - 2009", "writer = pd.ExcelWriter('Tabela2-2009.xlsx',engine='xlsxwriter')\nbase9.to_excel(writer,sheet_name=\"Projeto_1\")\nwriter.save()", "Caracterização dos problemas alimentares:\nOs próximos gráficos tem como objetivo avaliar, além do comportamento da variável \"problema alimentar\" de acordo com a renda mensal familiar comparar com a distribuição de \"insegurança alimentar\" ou seja se a distribuição analisada anteriormente se mantém de certa maneira nessa variável que por sinal é dependente da inicial, \"insegurança alimentar\".", "frenda3 = pd.cut(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")], bins=faixa, right=False)\nt3 = (frenda3.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t3,\"\\n\")\n\nplot = base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")].plot.hist(bins=faixa,title=\"Problema Alimentar - NORTE - 2013 (H5)\", weights=zeros_like(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")])+1./base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"purple\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()\n\nfrenda4 = pd.cut(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")], bins=faixa, right=False)\nt4 = (frenda4.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t4,\"\\n\")\n\nplot = base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")].plot.hist(bins=faixa,title=\"Problema Alimentar - NORDESTE - 2013(H6)\", weights=zeros_like(base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")])+1./base.RENDA[(base.Problema_Alimentar=='Sim')&(base.REGIAO==\"NORDESTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"darkgreen\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()\n\nfrenda39 = pd.cut(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base.REGIAO==\"CENTRO-OESTE\")], bins=faixa, right=False)\nt39 = (frenda39.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t39,\"\\n\")\n\nplot = base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO==\"CENTRO-OESTE\")].plot.hist(bins=faixa,title=\"Problema Alimentar - CENTRO-OESTE - 2009(H7)\", weights=zeros_like(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO==\"CENTRO-OESTE\")])+1./base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO==\"CENTRO-OESTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"black\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()\n\nfrenda49 = pd.cut(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base.REGIAO==\"CENTRO-OESTE\")], bins=faixa, right=False)\nt49 = (frenda49.value_counts(sort=False, normalize=True)*100).round(decimals=1)\nprint(t49,\"\\n\")\n\nplot = base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")].plot.hist(bins=faixa,title=\"Problema Alimentar - NORDESTE - 2009(H8) \", weights=zeros_like(base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")])+1./base9.RENDA[(base9.Problema_Alimentar=='Sim')&(base9.REGIAO==\"NORDESTE\")].size*100, figsize=(6, 6), alpha=0.5,color=\"orange\")\nplt.ylabel('Frequência relativa (em %)')\nplt.xlabel('Renda (em reais)')\nplt.show()", "Em comparação com os primeiros histogramas pode-se dizer que se mantém o ideal da distribuição, ou seja os primeiros 4 intervalos (350 cada) concentram a maior parte das pessoas que já passaram por alguma situação de insegurança alimentar e também apresentaram algum tipo de problema com alimentação.\nConclusão\n Inicialmente deve-se destacar que as variáveis selecionadas foram apenas aquelas que existem nos 2 anos de pesquisa para que assim houvesse maior coerência no momento de comparação. Em termos de efeito conclusivo é válido observar primeiramente uma melhora percentual nos números de insegurança alimentar no país, os primeiros gráficos (G1 E G2) mostram isso a ponto de que em 2009 31,7% da população já havia passado por alguma situação de insegurança alimentar e 2013 mostrou que 23,8% apresentou essa falta de segurança. \n Em termos direcionados a pergunta-guia, o gráfico G3 já apresenta as regiões com maiores problemas quanto à insegurança alimentar e coincidentemente as mesmas que apresentam a maior divergência nos valores entre zona urbana e rural, lembrando que trata-se dos dados de 2013. Em relação a 2009, G4 nâo apresenta grande coerência entre os dois tipos de valores, porém o Nordeste já se mostra em destaque negativo pelos altíssimos números. Partindo disso deve-se partir para o foco diretamente, os gráficos G7 e G8 apresentam enfim as duas regiões de cada análise, primeiramente Norte e Nordeste, como já dito anteriormente, no ano de 2013, e Centro-Oeste e Nordeste em 2009. A partir daí vale observar os histogramas relacionados àqueles que tiveram insegurança alimentar de acordo com a renda mensal familiar, sendo assim H1,H2,H3 e H4 apresentam um certo padrão de concentração dos entrevistados nas 4 primeiras faixas de renda, sendo um intervalo de 350 entre cada uma. Isso mostra que basicamente a população mais pobre com rendas até aproximadamente 1500 sofrem mais com falta de alimentos nessas regiões de análise, que retomando apresentam maior disparidade entre zona urbana e rural para insegurança e segurança alimentar.\n Com o aprofundamento das questões, partiu-se para \"Problema Alimentar\" que pode ser descrito como problemas relacionados a falta de comida ou alimentação restrita devido basicamente a falta de capacidade monetária, e novamente pode-se observar uma concentração na margem esquerda (menor renda familiar), ou seja isso mostra que apesar de uma melhora ao longo dos 4 anos entre as duas pesquisas manteve-se um padrão negativo nas regiões, apesar do centro-oeste ter melhorado seus valores percentuais a região Norte se mostrou em decadência e \"tomou\" esse lugar no que se observou em 2013 tanto para \"problema\" quanto \"insegurança\" alimentar.\n Há uma possível demonstração de que as áreas centrais ainda se mostrem mais desenvolvidas e cada vez mais distantes de regiões periféricas, primeiro que na maioria dos casos observados a região urbana apresentava maiores problemas percentuais para acesso a alimentação e as próprias regiões de maior problema nessa questão, a manutenção do Nordeste e a passagem do Centro-Oeste justamente para outra região mais distante do centro desenvolvido do país, o Norte." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
phasedchirp/Assorted-Data-Analysis
exercises/SlideRule-DS-Intensive/Inferential Statistics/sliderule_dsi_inferential_statistics_exercise_2.ipynb
gpl-2.0
[ "Examining racial discrimination in the US job market\nBackground\nRacial discrimination continues to be pervasive in cultures throughout the world. Researchers examined the level of racial discrimination in the United States labor market by randomly assigning identical résumés black-sounding or white-sounding names and observing the impact on requests for interviews from employers.\nData\nIn the dataset provided, each row represents a resume. The 'race' column has two values, 'b' and 'w', indicating black-sounding and white-sounding. The column 'call' has two values, 1 and 0, indicating whether the resume received a call from employers or not.\nNote that the 'b' and 'w' values in race are assigned randomly to the resumes.\nExercise\nYou will perform a statistical analysis to establish whether race has a significant impact on the rate of callbacks for resumes.\nAnswer the following questions in this notebook below and submit to your Github account. \n\nWhat test is appropriate for this problem? Does CLT apply?\nWhat are the null and alternate hypotheses?\nCompute margin of error, confidence interval, and p-value.\nDiscuss statistical significance.\n\nYou can include written notes in notebook cells using Markdown: \n - In the control panel at the top, choose Cell > Cell Type > Markdown\n - Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet\nResources\n\nExperiment information and data source: http://www.povertyactionlab.org/evaluation/discrimination-job-market-united-states\nScipy statistical methods: http://docs.scipy.org/doc/scipy/reference/stats.html \nMarkdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet", "%matplotlib inline\nfrom __future__ import division\nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (15.0,5.0)\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\n\ndata = pd.io.stata.read_stata('data/us_job_market_discrimination.dta')\nprint \"Total count: \",len(data)\nprint \"race == 'b': \",len(data[data.race=='b'])\nprint \"race == 'w': \",len(data[data.race=='w'])\n\ndata.head()\n\n# number of callbacks and proportion of callbacks\nprint \"Callback count for black-sounding names: \",sum(data[data.race=='b'].call)\nprint \"Callback proportion for black-sounding names: \",sum(data[data.race=='b'].call)/len(data[data.race=='b'])\nprint \"Callback count for white-sounding names: \",sum(data[data.race=='w'].call)\nprint \"Callback proportion for white-sounding names: \",sum(data[data.race=='w'].call)/len(data[data.race=='w'])", "The outcome variable here is binary, so this might be treated in several ways. First, it might be possible to apply the normal approximation to the binomial distribution. In this case, the distribution proportions is $\\mathcal{N}(np,np(1-p))$\nThere are a number of guidelines as to whether this is a suitable approximation (see Wikipedia for a list of such conditions), some of which include:\n\nn > 20 (or 30)\nnp > 5, np(1-p) > 5 (or 10)\n\nBut these conditions can be roughly summed up as not too small of a sample and an estimated proportion far enough from 0 and 1 that the distribution isn't overly skewed. If the normal approximation is reasonable, a z-test can be used, with the following standard error calculation:\n$$SE = \\sqrt{\\hat{p}(1-\\hat{p})\\left(\\frac{1}{n_1}+\\frac{1}{n_2}\\right)}$$\nwhere $$\\hat{p}=\\frac{np_1+np_2}{n_1+n_2}$$\ngiving\n$$z = \\frac{p_1-p2}{SE}$$", "xb = sum(data[data.race=='b'].call)\nnb = len(data[data.race=='b'])\nxw = sum(data[data.race=='w'].call)\nnw = len(data[data.race=='w'])\npHat = (nb*(xb/nb) + nw*(xw/nw))/(nb+nw)\nse = np.sqrt(pHat*(1-pHat)*(1/nb + 1/nw))\nz = (xb/nb -xw/nw)/se\nprint \"z-score:\",round(z,3),\"p =\", round(stats.norm.sf(abs(z))*2,6)", "So, the difference in probability of a call-back is statistically significant here.\nPlotting the distribution for call-backs with black-sounding names, it looks fairly symmetrical and well-behaved, so it's quite likely that the normal approximation is fairly reasonable here.", "pb = xb/nb\nx = np.arange(110,210)\nmatplotlib.pyplot.vlines(x,0,stats.binom.pmf(x,nb,pb))", "Alternatives\nBecause the normal distribution is only an approximation, the assumptions don't always work out for a particular data set. There are several methods for calculating confidence intervals around the estimated proportion. For example, with a significance level of $\\alpha$, the Jeffrey's interval is defined as the $\\frac{\\alpha}{2}$ and 1-$\\frac{\\alpha}{2}$ quantiles of a beta$(x+\\frac{1}{2}, n-x+\\frac{1}{2})$ distribution. Using scipy:", "intervalB = (stats.beta.ppf(0.025,xb+0.5,nb-xb+0.5),stats.beta.ppf(0.975,xb+0.5,nb-xb+0.5))\nintervalW = (stats.beta.ppf(0.025,xw+0.5,nw-xw+0.5),stats.beta.ppf(0.975,xw+0.5,nw-xw+0.5))\nprint \"Interval for black-sounding names: \",map(lambda x: round(x,3),intervalB)\nprint \"Interval for white-sounding names: \",map(lambda x: round(x,3),intervalW)", "The complete lack of overlap in the intervals here implies a significant difference with $p\\lt 0.05$ (Cumming & Finch,2005). Given that this particular interval can be interpreted as a Bayesian credible interval, this is a fairly comfortable conclusion.\nCalculating credible intervals using Markov Chain Monte Carlo\nSlightly different method of calculating approximately the same thing (the beta distribution used above the posterior distribution given given the observations with a Jeffreys prior):", "import pystan\n\nmodelCode = '''\ndata {\n int<lower=0> N; \n int<lower=1,upper=2> G[N];\n int<lower=0,upper=1> y[N];\n} \nparameters {\n real<lower=0,upper=1> theta[2];\n} \nmodel {\n # beta(0.5,0.5) prior\n theta ~ beta(0.5,0.5);\n # bernoulli likelihood\n # This could be modified to use a binomial with successes and counts instead\n for (i in 1:N) \n y[i] ~ bernoulli(theta[G[i]]);\n}\ngenerated quantities {\n real diff;\n // difference in proportions:\n diff <- theta[1]-theta[2];\n}\n'''\n\nmodel = pystan.StanModel(model_code=modelCode)\n\ndataDict = dict(N=len(data),G=np.where(data.race=='b',1,2),y=map(int,data.call))\nfit = model.sampling(data=dataDict)\n\nprint fit\n\nsamples = fit.extract(permuted=True)\nMCMCIntervalB = np.percentile(samples['theta'].transpose()[0],[2.5,97.5])\nMCMCIntervalW = np.percentile(samples['theta'].transpose()[1],[2.5,97.5])\nfit.plot().show()", "Estimating rough 95% credible intervals:", "print map(lambda x: round(x,3),MCMCIntervalB)\nprint map(lambda x: round(x,3),MCMCIntervalW)", "So, this method gives a result that fits quite nicely with previous results, while allowing more flexible specification of priors.\nInterval for sampled differences in proportions:", "print map(lambda x: round(x,3),np.percentile(samples['diff'],[2.5,97.5]))", "And this interval does not include 0, so that we're left fairly confident that black-sounding names get less call-backs, although the estimated differences in proportions are fairly small (significant in the technical sense isn't really the right word to describe this part).\nAccounting for additional factors:\nA next step here would be to check whether other factors influence the proportion of call-backs. This can be done using logistic regression, although there will be a limit to the complexity of the model to be fit, given that the proportion of call-backs is quite small, potentially leading to small cell-counts and unstable estimates (one rule of thumb being n>30 per cell is reasonably safe).", "data.columns\n\n# The data is balanced by design, and this mostly isn't a problem for relatively simple models.\n# For example:\npd.crosstab(data.computerskills,data.race)\n\nimport statsmodels.formula.api as smf", "Checking to see if computer skills have a significant effect on call-backs:", "glm = smf.Logit.from_formula(formula=\"call~race+computerskills\",data=data).fit()\nglm.summary()", "The effect might be described as marginal, but probably best not to over-interpret. But maybe the combination of race and computer skills makes a difference? Apparently not in this data (not even an improvement to the model log-likelihood or other measures of model fit):", "glm2 = smf.Logit.from_formula(formula=\"call~race*computerskills\",data=data).fit()\nglm2.summary()", "But, there's still rather a lot of stuff left to explore in this data." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.17/_downloads/f44d9c0360e7806c2f8988ccd7a3b432/plot_point_spread.ipynb
bsd-3-clause
[ "%matplotlib inline", "Corrupt known signal with point spread\nThe aim of this tutorial is to demonstrate how to put a known signal at a\ndesired location(s) in a :class:mne.SourceEstimate and then corrupt the\nsignal with point-spread by applying a forward and inverse solution.", "import os.path as op\n\nimport numpy as np\nfrom mayavi import mlab\n\nimport mne\nfrom mne.datasets import sample\n\nfrom mne.minimum_norm import read_inverse_operator, apply_inverse\nfrom mne.simulation import simulate_stc, simulate_evoked", "First, we set some parameters.", "seed = 42\n\n# parameters for inverse method\nmethod = 'sLORETA'\nsnr = 3.\nlambda2 = 1.0 / snr ** 2\n\n# signal simulation parameters\n# do not add extra noise to the known signals\nnave = np.inf\nT = 100\ntimes = np.linspace(0, 1, T)\ndt = times[1] - times[0]\n\n# Paths to MEG data\ndata_path = sample.data_path()\nsubjects_dir = op.join(data_path, 'subjects')\nfname_fwd = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis-meg-oct-6-fwd.fif')\nfname_inv = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis-meg-oct-6-meg-fixed-inv.fif')\n\nfname_evoked = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis-ave.fif')", "Load the MEG data", "fwd = mne.read_forward_solution(fname_fwd)\nfwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True,\n use_cps=False)\nfwd['info']['bads'] = []\ninv_op = read_inverse_operator(fname_inv)\n\nraw = mne.io.RawFIF(op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_raw.fif'))\nevents = mne.find_events(raw)\nevent_id = {'Auditory/Left': 1, 'Auditory/Right': 2}\nepochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)\nepochs.info['bads'] = []\nevoked = epochs.average()\n\nlabels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)\nlabel_names = [l.name for l in labels]\nn_labels = len(labels)", "Estimate the background noise covariance from the baseline period", "cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)", "Generate sinusoids in two spatially distant labels", "# The known signal is all zero-s off of the two labels of interest\nsignal = np.zeros((n_labels, T))\nidx = label_names.index('inferiorparietal-lh')\nsignal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)\nidx = label_names.index('rostralmiddlefrontal-rh')\nsignal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)", "Find the center vertices in source space of each label\nWe want the known signal in each label to only be active at the center. We\ncreate a mask for each label that is 1 at the center vertex and 0 at all\nother vertices in the label. This mask is then used when simulating\nsource-space data.", "hemi_to_ind = {'lh': 0, 'rh': 1}\nfor i, label in enumerate(labels):\n # The `center_of_mass` function needs labels to have values.\n labels[i].values.fill(1.)\n\n # Restrict the eligible vertices to be those on the surface under\n # consideration and within the label.\n surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']\n restrict_verts = np.intersect1d(surf_vertices, label.vertices)\n com = labels[i].center_of_mass(subject='sample',\n subjects_dir=subjects_dir,\n restrict_vertices=restrict_verts,\n surf='white')\n\n # Convert the center of vertex index from surface vertex list to Label's\n # vertex list.\n cent_idx = np.where(label.vertices == com)[0][0]\n\n # Create a mask with 1 at center vertex and zeros elsewhere.\n labels[i].values.fill(0.)\n labels[i].values[cent_idx] = 1.", "Create source-space data with known signals\nPut known signals onto surface vertices using the array of signals and\nthe label masks (stored in labels[i].values).", "stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,\n value_fun=lambda x: x)", "Plot original signals\nNote that the original signals are highly concentrated (point) sources.", "kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,\n time_unit='s', initial_time=0.05, size=1200,\n views=['lat', 'med'])\nclim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])\nfigs = [mlab.figure(1), mlab.figure(2), mlab.figure(3), mlab.figure(4)]\nbrain_gen = stc_gen.plot(clim=clim, figure=figs, **kwargs)", "Simulate sensor-space signals\nUse the forward solution and add Gaussian noise to simulate sensor-space\n(evoked) data from the known source-space signals. The amount of noise is\ncontrolled by nave (higher values imply less noise).", "evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave,\n random_state=seed)\n\n# Map the simulated sensor-space data to source-space using the inverse\n# operator.\nstc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)", "Plot the point-spread of corrupted signal\nNotice that after applying the forward- and inverse-operators to the known\npoint sources that the point sources have spread across the source-space.\nThis spread is due to the minimum norm solution so that the signal leaks to\nnearby vertices with similar orientations so that signal ends up crossing the\nsulci and gyri.", "figs = [mlab.figure(5), mlab.figure(6), mlab.figure(7), mlab.figure(8)]\nbrain_inv = stc_inv.plot(figure=figs, **kwargs)", "Exercises\n\nChange the method parameter to either dSPM or MNE to explore the\n effect of the inverse method.\nTry setting evoked_snr to a small, finite value, e.g. 3., to see the\n effect of noise." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gfeiden/Notebook
Daily/20160910_low_mass_polytrope_lum.ipynb
mit
[ "Determining initial $T_{\\rm eff}$ and luminosity for DMESTAR seed polytropes\nCurrently, we are having difficulty with models in the mass range of $0.14 M_{\\odot}$ -- $0.22 M_{\\odot}$ not converging after an initial relaxation. There are several potential candidates for why the models are not converging. The first is FreeEOS is running with a set of plasma properties (pressure, temperature) that are outside of it's typical working range. I suspect this is not the case, as lower mass models converge properly, despite having cooler temperatures. Other potential candidates are the seed luminosity and $T_{\\rm eff}$ supplied to $\\texttt{newpoly}$ for computation of an initial polytrope model that DMESTAR then relaxes before a full stellar evolution run. To test this idea, we can compare model properties for the seed polytropes with the final relaxed quantities determined by DMESTAR.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "Current seed values\nScripts used to generate a new polytrope for DMESTAR models rely on a piece-wise function to generate an appropriate combination of $T_{\\rm eff}$ and luminosity for a model based on the requested stellar mass and solar composition. That piece-wise function is \n\\begin{align}\n \\log(T) & = 3.64 & M \\ge 3.9 \\\n \\log(L) & = 0.2\\cdot (M - 5.0) + 2.6 & \\\n & \\\n \\log(T) & = -0.028\\cdot M + 3.875 & 3.9 > M \\ge 3.0 \\\n \\log(L) & = 0.55 \\cdot M + 0.1 & \\ \n & \\\n \\log(T) & = 0.039\\cdot M + 3.5765 & 3.0 > M \\ge 1.5 \\\n \\log(L) & = 1.7 & \\\n & \\\n \\log(T) & = 0.039\\cdot M + 3.5765 & 1.5 > M \\ge 0.23 \\\n \\log(L) & = 0.85\\cdot M + 0.4 & \\\n & \\\n \\log(T) & = 0.614\\cdot M + 3.3863 & 0.23 > M \\\n \\log(L) & = -0.16877\\cdot M - 0.117637 & \\\n\\end{align}\nWhile models with masses below $0.23 M$ are found to converge, the greatest issues occur right in the vicinity of the final piecewise condition. We can view this graphically,", "fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n\nmasses = np.arange(0.08, 5.0, 0.02)\n\n# compute and plot temperature relationship\np1 = [3.64 for m in masses if m >= 3.9]\np2 = [-0.028*m + 3.875 for m in masses if 3.9 > m >= 3.0]\np3 = [0.039*m + 3.5765 for m in masses if 3.0 > m >= 0.23]\np4 = [0.614*m + 3.3863 for m in masses if m < 0.23]\ntr = p4 + p3 + p2 + p1\n\nax[0].set_xlabel(\"initial mass [Msun]\")\nax[0].set_ylabel(\"log(T / K)\")\n\nax[0].plot(masses, tr, '-', c='#dc143c', lw=3)\n\n# plot luminosity relationship\n# compute and plot temperature relationship\np1 = [0.2*(m - 5.0) + 2.6 for m in masses if m >= 3.9]\np2 = [0.55*m + 0.1 for m in masses if 3.9 > m >= 3.0]\np3 = [1.7 for m in masses if 3.0 > m >= 1.5]\np4 = [0.85*m + 0.4 for m in masses if 1.5 > m >= 0.23]\np5 = [-0.16877*m - 0.117637 for m in masses if m < 0.23]\nlr = p5 + p4 + p3 + p2 + p1\n\nax[1].set_xlabel(\"initial mass [Msun]\")\nax[1].set_ylabel(\"log(L / Lsun)\")\n\nax[1].plot(masses, lr, '-', c='#dc143c', lw=3)", "Relaxed model values\nWe can compare the relationship(s) quoted above with model values for temperature and luminosity after the model has relaxed to a stable configuration. This takes only a couple time steps to achieve, so we will look at the model relationship during the third time step for all models with masses between 0.08 and 5.0 Msun. Models are taken from a recent study where we used the most up-to-date version of the Dartmouth models for young stars (Feiden 2016).", "model_directory = \"../../papers/MagneticUpperSco/models/trk/std/\"\n\n# get all file names\nfrom os import listdir\nall_fnames = listdir(model_directory)\n\n# sort out only those file names that end in .trk\nfnames = [f for f in all_fnames if f[-4:] == \".trk\"]\n\n# sort numerically\nfnames = sorted(fnames)", "To select which model time step is most representative of a relaxed model, we can step through the first 50 iterations to find if there are any noticable jumps in model properties.", "fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n\nmodel_props = np.empty((len(fnames), 3))\nfor j in range(0, 50):\n for i, f in enumerate(fnames):\n model_props[i, 0] = float(f[1:5])/1000.0\n \n try:\n trk = np.genfromtxt(model_directory + f, usecols=(0, 1, 2, 3))\n except ValueError: \n model_props[i, 1] = 0.0 # temperature\n model_props[i, 2] = 0.0 # luminosity\n continue\n \n model_props[i, 1] = trk[j, 1] # temperature\n model_props[i, 2] = trk[j, 3] # luminosity\n\n ax[0].semilogx(model_props[:,0], model_props[:,1], '-', c='#008b8b', lw=3)\n ax[1].semilogx(model_props[:,0], model_props[:,2], '-', c='#008b8b', lw=3)\n", "We can now iterate through these filenames and save the third timestep to an array.", "model_props = np.empty((len(fnames), 3))\nfor i, f in enumerate(fnames):\n model_props[i, 0] = float(f[1:5])/1000.0\n \n try:\n trk = np.genfromtxt(model_directory + f, usecols=(0, 1, 2, 3))\n except ValueError: \n model_props[i, 1] = 0.0 # temperature\n model_props[i, 2] = 0.0 # luminosity\n continue\n \n model_props[i, 1] = trk[1, 1] # temperature\n model_props[i, 2] = trk[1, 3] # luminosity", "Plotting these two relations, we can compare against the function used to generate the polytrope seed model.", "fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n\nmasses = np.arange(0.08, 5.0, 0.02)\n\nax[0].set_xlabel(\"initial mass [Msun]\")\nax[0].set_ylabel(\"log(T / K)\")\n\nax[0].semilogx(model_props[:,0], model_props[:,1], '-', c='#008b8b', lw=3)\nax[0].semilogx(masses, tr, '-', c='#dc143c', lw=3)\n\nax[1].set_xlabel(\"initial mass [Msun]\")\nax[1].set_ylabel(\"log(L / Lsun)\")\n\nax[1].semilogx(model_props[:,0], model_props[:,2], '-', c='#008b8b', lw=3)\nax[1].semilogx(masses, lr, '-', c='#dc143c', lw=3)", "There are clear discrepancies, particularly in the low-mass regime. However, we note there are significant differences in relaxed effective temperatures starting around 1.5 solar masses. Luminosities tend to trace the relaxed models quite well until approximately 0.4 Msun. Since these are logarithmic values, noticeable differences are quite sizeable when it comes to model adjustments during runtime. It's quite likely that corrections will exceed tolerances in the allowed parameter adjustments during a model's evolution.\nEffective temperature", "tp1 = np.array([line for line in model_props if line[0] < 0.23])\ntp2 = np.array([line for line in model_props if 0.23 <= line[0] < 1.5])\n\ntpoly1 = np.polyfit(tp1[:,0], tp1[:,1], 2)\ntpoly2 = np.polyfit(tp2[:,0], tp2[:,1], 3)\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\n\nax.semilogx(tp1[:,0], tp1[:,1], '-', c='#008b8b', lw=3)\nax.semilogx(tp2[:,0], tp2[:,1], '-', c='#008b8b', lw=3)\nax.semilogx(tp1[:,0], tpoly1[0]*tp1[:,0]**2 + tpoly1[1]*tp1[:,0] + tpoly1[2], '--', c='black', lw=3)\nax.semilogx(tp2[:,0], tpoly2[0]*tp2[:,0]**3 + tpoly2[1]*tp2[:,0]**2 + tpoly2[2]*tp2[:,0] + tpoly2[3], '--', c='black', lw=3)", "Luminosity\nAbove 1.5 Msun, there appear to be very little deviations of the true model sequence from the initial seed model sequence. We can thus leave this parameteriztion alone. Below 1.5 Msun, we can alter the shape of the relationship down to 0.23 Msun. In addition, we can prescribe a new shape to the relationship for objects with masses below 0.23 Msun.", "p1 = np.array([line for line in model_props if line[0] < 0.23])\np2 = np.array([line for line in model_props if 0.23 <= line[0] < 1.5])\n\npoly1 = np.polyfit(p1[:,0], p1[:,2], 2)\npoly2 = np.polyfit(p2[:,0], p2[:,2], 2)\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\n\nax.semilogx(p1[:,0], p1[:,2], '-', c='#008b8b', lw=3)\nax.semilogx(p2[:,0], p2[:,2], '-', c='#008b8b', lw=3)\nax.semilogx(p1[:,0], poly1[0]*p1[:,0]**2 + poly1[1]*p1[:,0] + poly1[2], '--', c='black', lw=3)\nax.semilogx(p2[:,0], poly2[0]*p2[:,0]**2 + poly2[1]*p2[:,0] + poly2[2], '--', c='black', lw=3)", "Implementation\nThese new fits better represent the relaxed models, but will they work when implemented as seed values?", "print \"log(T) and log(L) Coefficients for the lowest mass objects: \\n\", tpoly1, poly1\nprint \"\\n\\nlog(T) and log(L) Coefficients for low mass objects: \\n\", tpoly2, poly2", "The new parameterization had no influence over model convergence.\nUPDATE\nError appears to have been due to a value (JCORE) being read or set to zero erroneously. Since it is used to define an array index later in the routine (STARIN), a memory reference error was thrown and SEGFAULT initiated. A temporary fix has been issued." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
timothydmorton/usrp-sciprog
day4/Newton-Method.ipynb
mit
[ "Optimization\nWe're often interested in the best-fitting model to some data. On Day 3, we introduced the concept of a likelihood and least-squares fitting. For linear models, we can do that in one step because the problem is uniquely determined. Today we will introduce how to fit functions that have non-linear parameters.\nGradients!\nWe will assume the we know the likelihood $\\mathcal{L}$ (it's often Gaussian), which means that we have a function that is maximized with the choice of good parameters. The function we normally work with is\n$$\nf(x) = - \\log\\mathcal{L}(x\\mid\\mathcal{D})\n$$\nwhich we then minimize. The log is there to remove the exponentials in many likelihoods. For example, for the ordinary least-squares solution, $f=\\chi^2$.\n\nMind the sign!\nIt's very common to write down a model, optimize it, and then get some nonsense fit from the Minimum-Likelihood™ parameters.\n\nThe variable $x$ stands for the parameter we want to find the optimal value for. Notice that we don't require it to have any specific relation (for instance linear). Instead, we will demand that $f(x)$ represents a well-behaved function: we can expect derivatives of $f$ to exist everywhere in the region of interest. We can thus write down the Taylor series expansion for $f$ about some point $x_0$:\n$$\nf (x) = f (x_0) + g(x_0) (x - x_0) + \\frac{1}{2} H(x_0) (x - x_0)^2 + \\mathcal{O}((x-x_0)^3)\n$$\nwhere $g$ is the gradient, i.e. $g \\equiv df(x)/dx$, and the Hessian $H$ is $H \\equiv d^2 f(x) / dx^2$.\nAlthough we don't know anything a priori about the convergence of this series, it is clear that as the distance $x - x_0$ becomes smaller, the higher-order terms become less important.\nThe first term of the above series is constant, so it will not tell much about where to look for a minimum. The second term is proportional to the gradient, telling in which direction the function is decreasing fastest, but it doesn't tell us what step size to take.\nA first-order gradient descent method thus is typically a fixed-point iteration of the kind\n$$\nx_{t+1} = x_t - \\lambda_t g(x_t)\n$$\nAt iteration $t$, it goes downhill by a certain amount $\\lambda_t$, which yet needs to be determined; setting it properly may require experience in the dark arts.\nThe third, or quadratic term describes a parabolic behavior and is therefore the lowest-order term to predict a minimum. Unlike $g$, we can expect $H$ to be roughly constant over small regions because it's variations are of higher-order (and in the case of a true parabola: identically zero).\nThus second-order gradient descent (also called Newton methods) have fixed-point iterations of the form\n$$\nx_{t+1} = x_t - H^{-1}(x_t) g(x_t)\n$$\nWe'll see why in a minute. This means that the optimal step size for a quadratic approximation of the function $f$ is given by the inverse curvature of $f$. That sounds intuitive enough, but let's have a picture anyway.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# two parabolae\nf = lambda x, c: c*x**2\nc = 1, 0.5\n\n# initial point\nx_ = -0.75\ny_ = [ f(x_, ci) for ci in c ]\n\n# compute gradient and hessian\ng = [ 2*c[i]*x_ for i in range(2) ]\nH = [ 2*c[i] for i in range(2) ]\n\n# Newton step\nx__ = [ x_ - 1/H[i]*g[i] for i in range(2) ]\ny__ = [ f(xi, ci) for xi, ci in zip(x__, c) ]\n\n# plotting\nx = np.linspace(-1,1,100)\nfig, axes = plt.subplots(1, 2, figsize=(12,6))\naxes[0].plot(x, f(x, c[0]))\naxes[1].plot(x, f(x, c[1]))\naxes[0].axis('off')\naxes[1].axis('off')\naxes[0].scatter([x_, x__[0]], [y_[0], y__[0]], c=['k', 'w'], ec='k', s=100, zorder=10)\naxes[0].plot([x_, x_, x__[0]], [y_[0], y__[0], y__[0]], c='k')\naxes[1].scatter([x_, x__[1]], [y_[1], y__[1]], c=['k', 'w'], ec='k', s=100, zorder=10)\naxes[1].plot([x_, x_, x__[1]], [y_[1], y__[1], y__[1]], c='k')\naxes[0].text(0,0.25,'$f(x)=x^2,\\,H=2$', ha='center', size=16)\naxes[1].text(0,0.25,'$f(x)=x^2/2,\\,H=1$', ha='center', size=16)\naxes[0].text(x_/2, x__[0]-0.01, '$\\Delta x$', ha='center', va='top', size=16)\naxes[1].text(x_/2, x__[1]-0.01, '$\\Delta x$', ha='center', va='top', size=16)\naxes[0].text(x_-0.01, y_[0]/2, '$\\Delta y$', ha='right', va='center', size=16)\naxes[1].text(x_-0.01, y_[1]/2, '$\\Delta y$', ha='right', va='center', size=16)\naxes[0].set_ylim(-0.2,1.1)\naxes[1].set_ylim(-0.2,1.1)\nfig.tight_layout()", "Despite having different slopes at the starting position (filled circle), the Newton scheme performs only a single step (open circle) to move to the exact minimum, from any starting position, if the function is quadratic. This is even more useful because \n\nAny smooth function close to its minimum looks like a quadratic function!\n\nThat's a consequence of the Taylor expansion because the first-order term $g$ vanishes close to the minimum, so all deviations from the quadratic form are of order 3 or higher in $x-x_0$.\nSo, why doesn't everyone compute the Hessian for optimization. Well, it's typically expensive to compute a second derivative. And in $d$ dimensions (one for each parameter), the Hessian is a matrix with $d(d+1)/2$ elements. This is why there are several quasi-Newton methods like BFGS, that accumulate information from previous iterations into an estimate of $H$.\nNewton's Method for finding a root\nNewton's method was initially designed to find the root of a function, not its minimum. So, let's find out how these two are connected.\nThe central idea is to approximate $f$ by its tangent at some initial position $x_0$:\n$$\ny = f(x_0) + g(x_0) (x-x_0)\n$$\nAs we can see in this animation from Wikipedia, the $x$-intercept of this line is then closer to the root than the starting position $x_0$:\n\nThat is, we need to solve the linear relation\n$$\nf(x_0) + g(x_0) (x-x_0) = 0\n$$\nfor $x$ to get the updated position. In 1D: $x_1 = x_0 - f(x_0)/g(x_0)$. Repeating this sequence\n$$\nx_{t+1} = x_t - \\frac{f(x_t)}{g(x_t)}\n$$\nwill yield a fixed point, which is the root of $f$ if one exists in the vicinity of $x_0$.", "def newtons_method(f, df, x0, tol=1E-6):\n x_n = x0 \n while abs(f(x_n)) > tol:\n x_n = x_n - f(x_n)/df(x_n)\n return x_n", "Minimizing a function\nAs the maximum and minimum of a function are defined by $f'(x) = 0$, we can use Newton's method to find extremal points by applying it to the first derivative. That's the origin for the Newton update formula above:\n$$\nx_{t+1} = x_t - H^{-1}(x_t) \\ g(x_t)\n$$\nLet's try this with a simply function with known minimum:", "# define a test function\ndef f(x):\n return (x-3)**2 - 9\n\ndef df(x):\n return 2*(x-3)\n\ndef df2(x):\n return 2.\n\nroot = newtons_method(f, df, x0=0.1)\nprint (\"root {0}, f(root) = {1}\".format(root, f(root)))\n\nminimum = newtons_method(df, df2, x0=0.1)\nprint (\"minimum {0}, f'(minimum) = {1}\".format(minimum, df(minimum)))", "There is an important qualifier in the statement about fixed points: a root needs to exist in the vicinity of $x_0$! Let's see what happens if that's not the case:", "def g(x):\n return (x-3)**2 + 1\ndg = df # same derivatives for f and g\nnewtons_method(g, dg, x0=0.1)", "As long as you don't interrupt the execution of this cell (Tip: click \"Interrupt Kernel\"), newtons_method will not terminate and come back with a result.\nWith a little more defensive programming we can make sure that the function will terminate after a given number of iterations:", "def newtons_method2(f, df, x0, tol=1E-6, maxiter=100000):\n x_n = x0 \n for _ in range(maxiter):\n x_n = x_n - f(x_n)/df(x_n)\n \n if abs(f(x_n)) < tol:\n return x_n\n \n raise RuntimeError(\"Failed to find a minimum within {} iterations \".format(maxiter))\n\nnewtons_method2(g, dg, x0=0.1)", "Using scipy.optimize\nscipy comes with a pretty feature-rich optimization package, for one- and multi-dimensional optimization. As so often, it's better (as in faster and more reliable) to leverage exisiting and battle-tested code than to try to implement it yourself.\nExercise 1:\nFind the minimum of f with scipy.optimize.minimize_scalar. Look up the various arguments to function in the documentation (either online or by typing scipy.optimize.minimize_scalar?) and choose appropriate inputs. When done, visualize your result to confirm its correctness.\nExercise 2:\nTo make this more interesting, we'll create a new multi-dimensional function that resembles f:", "def h(x, p):\n return np.sum(np.abs(x-3)**p, axis=-1) - 9", "In 2D, find the minimum of h for p=2 with scipy.optimimze.minimize. Note that you have not been given a derivative of h. You can choose to compute it analytically, or see if minimize has options that allow you to work without.\nWhen done, visualize your result to confirm its correctness." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
vvishwa/deep-learning
batch-norm/Batch_Normalization_Exercises.ipynb
mit
[ "Batch Normalization – Practice\nBatch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now.\nThis is not a good network for classfying MNIST digits. You could create a much simpler network and get better results. However, to give you hands-on experience with batch normalization, we had to make an example that was:\n1. Complicated enough that training would benefit from batch normalization.\n2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization.\n3. Simple enough that the architecture would be easy to understand without additional resources.\nThis notebook includes two versions of the network that you can edit. The first uses higher level functions from the tf.layers package. The second is the same network, but uses only lower level functions in the tf.nn package.\n\nBatch Normalization with tf.layers.batch_normalization\nBatch Normalization with tf.nn.batch_normalization\n\nThe following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named mnist. You'll need to run this cell before running anything else in the notebook.", "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True, reshape=False)", "Batch Normalization using tf.layers.batch_normalization<a id=\"example_1\"></a>\nThis version of the network uses tf.layers for almost everything, and expects you to implement batch normalization using tf.layers.batch_normalization \nWe'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function.\nThis version of the function does not include batch normalization.", "\"\"\"\nDO NOT MODIFY THIS CELL\n\"\"\"\ndef fully_connected(prev_layer, num_units):\n \"\"\"\n Create a fully connectd layer with the given layer as input and the given number of neurons.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param num_units: int\n The size of the layer. That is, the number of units, nodes, or neurons.\n :returns Tensor\n A new fully connected layer\n \"\"\"\n layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)\n return layer", "We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network.\nThis version of the function does not include batch normalization.", "\"\"\"\nDO NOT MODIFY THIS CELL\n\"\"\"\ndef conv_layer(prev_layer, layer_depth):\n \"\"\"\n Create a convolutional layer with the given layer as input.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param layer_depth: int\n We'll set the strides and number of feature maps based on the layer's depth in the network.\n This is *not* a good way to make a CNN, but it helps us create this example with very little code.\n :returns Tensor\n A new convolutional layer\n \"\"\"\n strides = 2 if layer_depth % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)\n return conv_layer", "Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions). \nThis cell builds the network without batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training.", "\"\"\"\nDO NOT MODIFY THIS CELL\n\"\"\"\ndef train(num_batches, batch_size, learning_rate):\n # Build placeholders for the input samples and labels \n inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\n labels = tf.placeholder(tf.float32, [None, 10])\n \n # Feed the inputs into a series of 20 convolutional layers \n layer = inputs\n for layer_i in range(1, 20):\n layer = conv_layer(layer, layer_i)\n\n # Flatten the output from the convolutional layers \n orig_shape = layer.get_shape().as_list()\n layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\n\n # Add one fully connected layer\n layer = fully_connected(layer, 100)\n\n # Create the output layer with 1 node for each \n logits = tf.layers.dense(layer, 10)\n \n # Define loss and training operations\n model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n \n # Create operations to test accuracy\n correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Train and test the network\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # train this batch\n sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})\n \n # Periodically check the validation or training loss and accuracy\n if batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,\n labels: mnist.validation.labels})\n print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n elif batch_i % 25 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})\n print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n # At the end, score the final accuracy for both the validation and test sets\n acc = sess.run(accuracy, {inputs: mnist.validation.images,\n labels: mnist.validation.labels})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images,\n labels: mnist.test.labels})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n \n # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.\n correct = 0\n for i in range(100):\n correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]]})\n\n print(\"Accuracy on 100 samples:\", correct/100)\n\n\nnum_batches = 800\nbatch_size = 64\nlearning_rate = 0.002\n\ntf.reset_default_graph()\nwith tf.Graph().as_default():\n train(num_batches, batch_size, learning_rate)", "With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)\nUsing batch normalization, you'll be able to train this same network to over 90% in that same number of batches.\nAdd batch normalization\nWe've copied the previous three cells to get you started. Edit these cells to add batch normalization to the network. For this exercise, you should use tf.layers.batch_normalization to handle most of the math, but you'll need to make a few other changes to your network to integrate batch normalization. You may want to refer back to the lesson notebook to remind yourself of important things, like how your graph operations need to know whether or not you are performing training or inference. \nIf you get stuck, you can check out the Batch_Normalization_Solutions notebook to see how we did things.\nTODO: Modify fully_connected to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.", "def fully_connected(prev_layer, num_units, is_training):\n \"\"\"\n Create a fully connectd layer with the given layer as input and the given number of neurons.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param num_units: int\n The size of the layer. That is, the number of units, nodes, or neurons.\n :returns Tensor\n A new fully connected layer\n \"\"\"\n layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)\n\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer", "TODO: Modify conv_layer to add batch normalization to the convolutional layers it creates. Feel free to change the function's parameters if it helps.", "def conv_layer(prev_layer, layer_depth, is_training):\n \"\"\"\n Create a convolutional layer with the given layer as input.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param layer_depth: int\n We'll set the strides and number of feature maps based on the layer's depth in the network.\n This is *not* a good way to make a CNN, but it helps us create this example with very little code.\n :returns Tensor\n A new convolutional layer\n \"\"\"\n strides = 2 if layer_depth % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)\n conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer", "TODO: Edit the train function to support batch normalization. You'll need to make sure the network knows whether or not it is training, and you'll need to make sure it updates and uses its population statistics correctly.", "def train(num_batches, batch_size, learning_rate):\n # Build placeholders for the input samples and labels \n inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\n labels = tf.placeholder(tf.float32, [None, 10])\n \n #boolean to hold if network is training\n is_training = tf.placeholder(tf.bool)\n # Feed the inputs into a series of 20 convolutional layers \n layer = inputs\n for layer_i in range(1, 20):\n layer = conv_layer(layer, layer_i, is_training)\n\n # Flatten the output from the convolutional layers \n orig_shape = layer.get_shape().as_list()\n layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\n\n # Add one fully connected layer\n layer = fully_connected(layer, 100, is_training)\n\n # Create the output layer with 1 node for each \n logits = tf.layers.dense(layer, 10)\n \n # Define loss and training operations\n model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n \n # Create operations to test accuracy\n correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Train and test the network\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # train this batch\n sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training:True})\n \n # Periodically check the validation or training loss and accuracy\n if batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training:False})\n print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n elif batch_i % 25 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training:False})\n print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n # At the end, score the final accuracy for both the validation and test sets\n acc = sess.run(accuracy, {inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training:False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images,\n labels: mnist.test.labels,\n is_training:False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n \n # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.\n correct = 0\n for i in range(100):\n correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]],\n is_training:False})\n\n print(\"Accuracy on 100 samples:\", correct/100)\n\n\nnum_batches = 800\nbatch_size = 64\nlearning_rate = 0.002\n\ntf.reset_default_graph()\nwith tf.Graph().as_default():\n train(num_batches, batch_size, learning_rate)", "With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output: Accuracy on 100 samples. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference.\nBatch Normalization using tf.nn.batch_normalization<a id=\"example_2\"></a>\nMost of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things.\nThis version of the network uses tf.nn for almost everything, and expects you to implement batch normalization using tf.nn.batch_normalization.\nOptional TODO: You can run the next three cells before you edit them just to see how the network performs without batch normalization. However, the results should be pretty much the same as you saw with the previous example before you added batch normalization. \nTODO: Modify fully_connected to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.\nNote: For convenience, we continue to use tf.layers.dense for the fully_connected layer. By this point in the class, you should have no problem replacing that with matrix operations between the prev_layer and explicit weights and biases variables.", "def fully_connected(prev_layer, num_units, is_training):\n \"\"\"\n Create a fully connectd layer with the given layer as input and the given number of neurons.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param num_units: int\n The size of the layer. That is, the number of units, nodes, or neurons.\n :returns Tensor\n A new fully connected layer\n \"\"\"\n layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)\n gamma = tf.Variable(tf.ones([num_units]))\n beta = tf.Variable(tf.zeros([num_units]))\n\n pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)\n pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)\n\n epsilon = 1e-3\n \n def batch_norm_training():\n batch_mean, batch_variance = tf.nn.moments(layer, [0])\n\n decay = 0.99\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))\n\n with tf.control_dependencies([train_mean, train_variance]):\n return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)\n \n def batch_norm_inference():\n return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)\n\n batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)\n return tf.nn.relu(batch_normalized_output)\n ", "TODO: Modify conv_layer to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.\nNote: Unlike in the previous example that used tf.layers, adding batch normalization to these convolutional layers does require some slight differences to what you did in fully_connected.", "def conv_layer(prev_layer, layer_depth, is_training):\n \"\"\"\n Create a convolutional layer with the given layer as input.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param layer_depth: int\n We'll set the strides and number of feature maps based on the layer's depth in the network.\n This is *not* a good way to make a CNN, but it helps us create this example with very little code.\n :returns Tensor\n A new convolutional layer\n \"\"\"\n strides = 2 if layer_depth % 3 == 0 else 1\n\n in_channels = prev_layer.get_shape().as_list()[3]\n out_channels = layer_depth*4\n \n weights = tf.Variable(\n tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))\n \n bias = tf.Variable(tf.zeros(out_channels))\n\n conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')\n conv_layer = tf.nn.bias_add(conv_layer, bias)\n gamma = tf.Variable(tf.ones([out_channels]))\n beta = tf.Variable(tf.zeros([out_channels]))\n\n pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)\n pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)\n\n epsilon = 1e-3\n \n def batch_norm_training():\n # Important to use the correct dimensions here to ensure the mean and variance are calculated \n # per feature map instead of for the entire layer\n batch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)\n\n decay = 0.99\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))\n\n with tf.control_dependencies([train_mean, train_variance]):\n return tf.nn.batch_normalization(conv_layer, batch_mean, batch_variance, beta, gamma, epsilon)\n \n def batch_norm_inference():\n return tf.nn.batch_normalization(conv_layer, pop_mean, pop_variance, beta, gamma, epsilon)\n\n batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)\n return tf.nn.relu(batch_normalized_output)", "TODO: Edit the train function to support batch normalization. You'll need to make sure the network knows whether or not it is training.", "def train(num_batches, batch_size, learning_rate):\n # Build placeholders for the input samples and labels \n inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\n labels = tf.placeholder(tf.float32, [None, 10])\n \n # boolean variable if network is training\n is_training = tf.placeholder(tf.bool)\n # Feed the inputs into a series of 20 convolutional layers \n layer = inputs\n for layer_i in range(1, 20):\n layer = conv_layer(layer, layer_i, is_training)\n\n # Flatten the output from the convolutional layers \n orig_shape = layer.get_shape().as_list()\n layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\n\n # Add one fully connected layer\n layer = fully_connected(layer, 100, is_training)\n\n # Create the output layer with 1 node for each \n logits = tf.layers.dense(layer, 10)\n \n # Define loss and training operations\n model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n \n # Create operations to test accuracy\n correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Train and test the network\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # train this batch\n sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training:True})\n \n # Periodically check the validation or training loss and accuracy\n if batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training:False})\n print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n elif batch_i % 25 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys,\n is_training:False})\n print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n # At the end, score the final accuracy for both the validation and test sets\n acc = sess.run(accuracy, {inputs: mnist.validation.images,\n labels: mnist.validation.labels, is_training:False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images,\n labels: mnist.test.labels, is_training:False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n \n # Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.\n correct = 0\n for i in range(100):\n correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]], is_training:False})\n\n print(\"Accuracy on 100 samples:\", correct/100)\n\n\nnum_batches = 800\nbatch_size = 64\nlearning_rate = 0.002\n\ntf.reset_default_graph()\nwith tf.Graph().as_default():\n train(num_batches, batch_size, learning_rate)", "Once again, the model with batch normalization should reach an accuracy over 90%. There are plenty of details that can go wrong when implementing at this low level, so if you got it working - great job! If not, do not worry, just look at the Batch_Normalization_Solutions notebook to see what went wrong." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.15/_downloads/plot_eeg_erp.ipynb
bsd-3-clause
[ "%matplotlib inline", "EEG processing and Event Related Potentials (ERPs)\nFor a generic introduction to the computation of ERP and ERF\nsee tut_epoching_and_averaging. Here we cover the specifics\nof EEG, namely:\n- setting the reference\n- using standard montages :func:`mne.channels.Montage`\n- Evoked arithmetic (e.g. differences)", "import mne\nfrom mne.datasets import sample", "Setup for reading the raw data", "data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nraw = mne.io.read_raw_fif(raw_fname, preload=True)\nraw.set_eeg_reference('average', projection=True) # set EEG average reference", "Let's restrict the data to the EEG channels", "raw.pick_types(meg=False, eeg=True, eog=True)", "By looking at the measurement info you will see that we have now\n59 EEG channels and 1 EOG channel", "print(raw.info)", "In practice it's quite common to have some EEG channels that are actually\nEOG channels. To change a channel type you can use the\n:func:mne.io.Raw.set_channel_types method. For example\nto treat an EOG channel as EEG you can change its type using", "raw.set_channel_types(mapping={'EOG 061': 'eeg'})\nprint(raw.info)", "And to change the nameo of the EOG channel", "raw.rename_channels(mapping={'EOG 061': 'EOG'})", "Let's reset the EOG channel back to EOG type.", "raw.set_channel_types(mapping={'EOG': 'eog'})", "The EEG channels in the sample dataset already have locations.\nThese locations are available in the 'loc' of each channel description.\nFor the first channel we get", "print(raw.info['chs'][0]['loc'])", "And it's actually possible to plot the channel locations using\n:func:mne.io.Raw.plot_sensors.", "raw.plot_sensors()\nraw.plot_sensors('3d') # in 3D", "Setting EEG montage\nIn the case where your data don't have locations you can set them\nusing a :class:mne.channels.Montage. MNE comes with a set of default\nmontages. To read one of them do:", "montage = mne.channels.read_montage('standard_1020')\nprint(montage)", "To apply a montage on your data use the set_montage method.\nfunction. Here don't actually call this function as our demo dataset\nalready contains good EEG channel locations.\nNext we'll explore the definition of the reference.\nSetting EEG reference\nLet's first remove the reference from our Raw object.\nThis explicitly prevents MNE from adding a default EEG average reference\nrequired for source localization.", "raw_no_ref, _ = mne.set_eeg_reference(raw, [])", "We next define Epochs and compute an ERP for the left auditory condition.", "reject = dict(eeg=180e-6, eog=150e-6)\nevent_id, tmin, tmax = {'left/auditory': 1}, -0.2, 0.5\nevents = mne.read_events(event_fname)\nepochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,\n reject=reject)\n\nevoked_no_ref = mne.Epochs(raw_no_ref, **epochs_params).average()\ndel raw_no_ref # save memory\n\ntitle = 'EEG Original reference'\nevoked_no_ref.plot(titles=dict(eeg=title))\nevoked_no_ref.plot_topomap(times=[0.1], size=3., title=title)", "Average reference: This is normally added by default, but can also\nbe added explicitly.", "raw_car, _ = mne.set_eeg_reference(raw, 'average', projection=True)\nevoked_car = mne.Epochs(raw_car, **epochs_params).average()\ndel raw_car # save memory\n\ntitle = 'EEG Average reference'\nevoked_car.plot(titles=dict(eeg=title))\nevoked_car.plot_topomap(times=[0.1], size=3., title=title)", "Custom reference: Use the mean of channels EEG 001 and EEG 002 as\na reference", "raw_custom, _ = mne.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])\nevoked_custom = mne.Epochs(raw_custom, **epochs_params).average()\ndel raw_custom # save memory\n\ntitle = 'EEG Custom reference'\nevoked_custom.plot(titles=dict(eeg=title))\nevoked_custom.plot_topomap(times=[0.1], size=3., title=title)", "Evoked arithmetics\nTrial subsets from Epochs can be selected using 'tags' separated by '/'.\nEvoked objects support basic arithmetic.\nFirst, we create an Epochs object containing 4 conditions.", "event_id = {'left/auditory': 1, 'right/auditory': 2,\n 'left/visual': 3, 'right/visual': 4}\nepochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,\n reject=reject)\nepochs = mne.Epochs(raw, **epochs_params)\n\nprint(epochs)", "Next, we create averages of stimulation-left vs stimulation-right trials.\nWe can use basic arithmetic to, for example, construct and plot\ndifference ERPs.", "left, right = epochs[\"left\"].average(), epochs[\"right\"].average()\n\n# create and plot difference ERP\nmne.combine_evoked([left, -right], weights='equal').plot_joint()", "This is an equal-weighting difference. If you have imbalanced trial numbers,\nyou could also consider either equalizing the number of events per\ncondition (using\n:meth:epochs.equalize_event_counts &lt;mne.Epochs.equalize_event_counts&gt;).\nAs an example, first, we create individual ERPs for each condition.", "aud_l = epochs[\"auditory\", \"left\"].average()\naud_r = epochs[\"auditory\", \"right\"].average()\nvis_l = epochs[\"visual\", \"left\"].average()\nvis_r = epochs[\"visual\", \"right\"].average()\n\nall_evokeds = [aud_l, aud_r, vis_l, vis_r]\nprint(all_evokeds)", "This can be simplified with a Python list comprehension:", "all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]\nprint(all_evokeds)\n\n# Then, we construct and plot an unweighted average of left vs. right trials\n# this way, too:\nmne.combine_evoked(all_evokeds,\n weights=(0.25, -0.25, 0.25, -0.25)).plot_joint()", "Often, it makes sense to store Evoked objects in a dictionary or a list -\neither different conditions, or different subjects.", "# If they are stored in a list, they can be easily averaged, for example,\n# for a grand average across subjects (or conditions).\ngrand_average = mne.grand_average(all_evokeds)\nmne.write_evokeds('/tmp/tmp-ave.fif', all_evokeds)\n\n# If Evokeds objects are stored in a dictionary, they can be retrieved by name.\nall_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)\nprint(all_evokeds['left/auditory'])\n\n# Besides for explicit access, this can be used for example to set titles.\nfor cond in all_evokeds:\n all_evokeds[cond].plot_joint(title=cond)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
quantopian/research_public
notebooks/lectures/Random_Variables/answers/notebook.ipynb
apache-2.0
[ "Exercises: Random Variables - Answer Key\nBy Christopher van Hoecke, Max Margenot, and Delaney Mackenzie\nLecture Link :\nhttps://www.quantopian.com/lectures/random-variables\nIMPORTANT NOTE:\nThis lecture corresponds to the Random Variables lecture, which is part of the Quantopian lecture series. This homework expects you to rely heavily on the code presented in the corresponding lecture. Please copy and paste regularly from that lecture when starting to work on the problems, as trying to do them from scratch will likely be too difficult.\nPart of the Quantopian Lecture Series:\n\nwww.quantopian.com/lectures\ngithub.com/quantopian/research_public\n\n\nKey Concepts", "# Useful Functions\nclass DiscreteRandomVariable:\n def __init__(self, a=0, b=1):\n self.variableType = \"\"\n self.low = a\n self.high = b\n return\n def draw(self, numberOfSamples):\n samples = np.random.randint(self.low, self.high, numberOfSamples)\n return samples\n \nclass BinomialRandomVariable(DiscreteRandomVariable):\n def __init__(self, numberOfTrials = 10, probabilityOfSuccess = 0.5):\n self.variableType = \"Binomial\"\n self.numberOfTrials = numberOfTrials\n self.probabilityOfSuccess = probabilityOfSuccess\n return\n def draw(self, numberOfSamples):\n samples = np.random.binomial(self.numberOfTrials, self.probabilityOfSuccess, numberOfSamples)\n return samples\n \ndef factorial(n):return reduce(lambda x,y:x*y,[1]+range(1,n+1))\n\n# Useful Libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.stats as stats\nfrom statsmodels.stats import stattools\nfrom __future__ import division", "Exercise 1 : Uniform Distribution\n\nPlot the histogram of 10 tosses with a fair coin (let 1 be heads and 2 be tails). \nPlot the histogram of 1000000 tosses of a fair coin", "# Histograms with 10 tosses. \ncointoss = DiscreteRandomVariable(1, 3)\nplt.hist(cointoss.draw(10), align = 'mid')\n\nplt.xlabel('Value')\nplt.ylabel('Occurences')\nplt.legend(['Coin Tosses']);\n\n# Histograms with 1000000 tosses. \ncointoss = DiscreteRandomVariable(1, 3)\nplt.hist(cointoss.draw(1000000), align = 'mid')\n\nplt.xlabel('Value')\nplt.ylabel('Occurences')\nplt.legend(['Coin Tosses']);", "Exercise 2 : Binomial Distributions.\n\nGraph the histogram of 1000000 samples from a binomial distribution of probability 0.25 and $n = 20$\nFind the value that occurs the most often\nCalculate the probability of the value that occurs the most often occurring. Use the factorial(x) function to find factorials", "# Binomial distribution with p=0.25 and n=20\nbinomialdistribution = BinomialRandomVariable(20, 0.25)\nbins = np.arange(0,21,1)\nn, bins, patches = plt.hist(binomialdistribution.draw(1000000), bins=bins)\n\nplt.title('Binomial Distribution with p=0.25 and n=20')\nplt.xlabel('Value')\nplt.ylabel('Occurrences')\nplt.legend(['Die Rolls']);\n\n# Finding x which occurs most often\nelem = np.argmax(n)\nprint 'Maximum occurance for x =', elem\n\n# Calculating the probability of finding x. \nn = 20\np = 0.5\nx = elem\nn_factorial = factorial(n)\nx_factorial = factorial(x)\nn_x_factorial = factorial(n-x)\nfact = n_factorial / (n_x_factorial * x_factorial)\nprobability = fact * (p**x) * ((1-p)**(n-x))\nprint 'proabability of x = %d' % x, probability", "Exercise 3 : Normal Distributions\na. Graphing\nGraph a normal distribution using the Probability Density Function bellow, with a mean of 0 and standard deviation of 5. \n$$f(x) = \\frac{1}{\\sigma\\sqrt{2\\pi}}e^{-\\frac{(x - \\mu)^2}{2\\sigma^2}}$$", "# Graphing a normal distribution pdf. \nmu = 0\nsigma = 5\nx = np.linspace(-30, 30, 200)\ny = (1/(sigma * np.sqrt(2 * 3.14159))) * np.exp(-(x - mu)*(x - mu) / (2 * sigma * sigma))\nplt.plot(x, y)\nplt.title('Graph of PDF with mu = 0 and sigma = 5')\nplt.xlabel('Value')\nplt.ylabel('Probability');", "b. Confidence Intervals.\n\nCalculate the first, second, and third confidence intervals. \nPlot the PDF and the first, second, and third confidence intervals.", "# finding the 1st, 2nd, and third confidence intervals. \nfirst_ci = (-sigma, sigma)\nsecond_ci = (-2*sigma, 2*sigma)\nthird_ci = (-3*sigma, 3*sigma)\n\nprint '1-sigma -> mu +/-', sigma\nprint '2-sigma -> mu +/-', second_ci[1]\nprint '3-sigma -> mu +/-', third_ci[1]\n\nplt.axvline(first_ci[0], linestyle='dashdot', label='68% of observations', color = 'blue')\nplt.axvline(first_ci[1], linestyle='dashdot', label='68% of observations', color = 'blue')\nplt.axvline(second_ci[0], linestyle='dashdot', label='95% of observations', color = 'red')\nplt.axvline(second_ci[1],linestyle='dashdot', color = 'red')\nplt.axvline(third_ci[0], linestyle='dashdot', label='99% of observations', color = 'green')\nplt.axvline(third_ci[1], linestyle='dashdot', color = 'green')\nplt.plot(x,y)\nplt.title('Graph of PDF with 3 confidence intervals.')\n\nplt.legend();", "Exercise 4: Financial Applications:\nFit the returns of SPY from 2016-01-01 to 2016-05-01 to a normal distribution. \n- Fit the returns to a normal distribution by clacluating the values of $\\mu$ and $\\sigma$\n- Plot the returns and the distribution, along with 3 confidence intervals. \n- Use the Jarque-Bera test to check for normality.", "# Collect prices and returns. \nprices = get_pricing('SPY', start_date = '2016-01-01', end_date='2016-05-01', \n fields = 'price')\nreturns = prices.pct_change()[1:]\n\n# Calculating the mean and standard deviation. \nsample_mean = np.mean(returns)\nsample_std_dev = np.std(returns)\n\nx = np.linspace(-(sample_mean + 4 * sample_std_dev), (sample_mean + 4 * sample_std_dev), len(returns))\nsample_distribution = ((1/(sample_std_dev * 2 * np.pi)) * \n np.exp(-(x - sample_mean)*(x - sample_mean) / (2 * sample_std_dev * sample_std_dev)))\n\n# Plotting histograms and confidence intervals. \nplt.hist(returns, range=(returns.min(), returns.max()), normed = True);\nplt.plot(x, sample_distribution)\n\nplt.axvline(sample_std_dev, linestyle='dashed', color='red', label='1st Confidence Interval')\nplt.axvline(-sample_std_dev, linestyle='dashed', color='red')\nplt.axvline(2*sample_std_dev, linestyle='dashed', color='k', label='2st Confidence Interval')\nplt.axvline(-2*sample_std_dev, linestyle='dashed', color='k')\nplt.axvline(3*sample_std_dev, linestyle='dashed', color='green', label='3st Confidence Interval')\nplt.axvline(-3*sample_std_dev, linestyle='dashed', color='green')\n\nplt.legend();\n\n# Run the JB test for normality. \ncutoff = 0.01\n_, p_value, skewness, kurtosis = stattools.jarque_bera(returns)\nprint \"The JB test p-value is: \", p_value\nprint \"We reject the hypothesis that the data are normally distributed \", p_value < cutoff\nprint \"The skewness of the returns is: \", skewness\nprint \"The kurtosis of the returns is: \", kurtosis", "Congratulations on completing the Random Variables answer key!\nAs you learn more about writing trading models and the Quantopian platform, enter a daily Quantopian Contest. Your strategy will be evaluated for a cash prize every day.\nStart by going through the Writing a Contest Algorithm tutorial.\nThis presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. (\"Quantopian\"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hail-is/hail
hail/python/hail/docs/tutorials/06-joins.ipynb
mit
[ "Table Joins Tutorial\nThis tutorial walks through some ways to join Hail tables. We'll use a simple movie dataset to illustrate. The movie dataset comes in multiple parts. Here are a few questions we might naturally ask about the dataset:\n\nWhat is the mean rating per genre?\nWhat is the favorite movie for each occupation?\nWhat genres are most preferred by women vs men?\n\nWe'll use joins to combine datasets in order to answer these questions. \nLet's initialize Hail, fetch the tutorial data, and load three tables: users, movies, and ratings.", "import hail as hl\n\nhl.utils.get_movie_lens('data/')\n\nusers = hl.read_table('data/users.ht')\nmovies = hl.read_table('data/movies.ht')\nratings = hl.read_table('data/ratings.ht')", "The Key to Understanding Joins\nTo understand joins in Hail, we need to revisit one of the crucial properties of tables: the key.\nA table has an ordered list of fields known as the key. Our users table has one key, the id field. We can see all the fields, as well as the keys, of a table by calling describe().", "users.describe()", "key is a struct expression of all of the key fields, so we can refer to the key of a table without explicitly specifying the names of the key fields.", "users.key.describe()", "Keys need not be unique or non-missing, although in many applications they will be both.\nWhen tables are joined in Hail, they are joined based on their keys. In order to join two tables, they must share the same number of keys, same key types (i.e. string vs integer), and the same order of keys.\nLet's look at a simple example of a join. We'll use the Table.parallelize() method to create two small tables, t1 and t2.", "t1 = hl.Table.parallelize([\n {'a': 'foo', 'b': 1},\n {'a': 'bar', 'b': 2},\n {'a': 'bar', 'b': 2}],\n hl.tstruct(a=hl.tstr, b=hl.tint32),\n key='a')\nt2 = hl.Table.parallelize([\n {'t': 'foo', 'x': 3.14},\n {'t': 'bar', 'x': 2.78},\n {'t': 'bar', 'x': -1},\n {'t': 'quam', 'x': 0}],\n hl.tstruct(t=hl.tstr, x=hl.tfloat64),\n key='t')\n\nt1.show()\n\nt2.show()", "Now, we can join the tables.", "j = t1.annotate(t2_x = t2[t1.a].x)\nj.show()", "Let's break this syntax down. \nt2[t1.a] is an expression referring to the row of table t2 with value t1.a. So this expression will create a map between the keys of t1 and the rows of t2. You can view this mapping directly:", "t2[t1.a].show()", "Since we only want the field x from t2, we can select it with t2[t1.a].x. Then we add this field to t1 with the anntotate_rows() method. The new joined table j has a field t2_x that comes from the rows of t2. The tables could be joined, because they shared the same number of keys (1) and the same key type (string). The keys do not need to share the same name. Notice that the rows with keys present in t2 but not in t1 do not show up in the final result. This join syntax performs a left join. Tables also have a SQL-style inner/left/right/outer join() method.\nThe magic of keys is that they can be used to create a mapping, like a Python dictionary, between the keys of one table and the row values of another table: table[expr] will refer to the row of table that has a key value of expr. If the row is not unique, one such row is chosen arbitrarily.\nHere's a subtle bit: if expr is an expression indexed by a row of table2, then table[expr] is also an expression indexed by a row of table2.\nAlso note that while they look similar, table['field'] and table1[table2.key] are doing very different things!\ntable['field'] selects a field from the table, while table1[table2.key] creates a mapping between the keys of table2 and the rows of table1.", "t1['a'].describe()\n\nt2[t1.a].describe()", "Joining Tables\nNow that we understand the basics of how joins work, let's use a join to compute the average movie rating per genre.\nWe have a table ratings, which contains user_id, movie_id, and rating fields. Group by movie_id and aggregate to get the mean rating of each movie.", "t = (ratings.group_by(ratings.movie_id) \n .aggregate(rating = hl.agg.mean(ratings.rating)))\nt.describe()", "To get the mean rating by genre, we need to join in the genre field from the movies table.", "t = t.annotate(genres = movies[t.movie_id].genres)\nt.describe()\n\nt.show()", "We want to group the ratings by genre, but they're packed up in an array. To unpack the genres, we can use explode. \nexplode creates a new row for each element in the value of the field, which must be a collection (array or set).", "t = t.explode(t.genres)\nt.show()", "Finally, we can get group by genre and aggregate to get the mean rating per genre.", "t = (t.group_by(t.genres)\n .aggregate(rating = hl.agg.mean(t.rating)))\nt.show(n=100)", "Let's do another example. This time, we'll see if we can determine what the highest rated movies are, on average, for each occupation. We start by joining the two tables movies and users.", "movie_data = ratings.annotate(\n movie = movies[ratings.movie_id].title,\n occupation = users[ratings.user_id].occupation)\n\nmovie_data.show()", "Next, we'll use group_by along with the aggregator hl.agg.mean to determine the average rating of each movie by occupation. Remember that the group_by operation is always associated with an aggregation.", "ratings_by_job = movie_data.group_by(\n movie_data.occupation, movie_data.movie).aggregate(\n mean = hl.agg.mean(movie_data.rating))\n\nratings_by_job.show()", "Now we can use another group_by to determine the highest rated movie, on average, for each occupation.\nThe syntax here needs some explaining. The second step in the cell below is just to clean up the table created by the preceding step. If you examine the intermediate result (for example, by giving a new name to the output of the first step), you will see that there are two columns corresponding to occupation, occupation and val.occupation. This is an artifact of the aggregator syntax and the fact that we are retaining the entire row from ratings_by_job. So in the second line, we use select to keep those columns that we want, and also rename them to drop the val. syntax. Since occupation is a key of this table, we don't need to select for it.", "highest_rated = ratings_by_job.group_by(\n ratings_by_job.occupation).aggregate(\n val = hl.agg.take(ratings_by_job.row,1, ordering = -ratings_by_job.mean)[0]\n)\n\nhighest_rated = highest_rated.select(movie = highest_rated.val.movie,\n mean = highest_rated.val.mean)\n\nhighest_rated.show()", "Let's try to get a deeper understanding of this result. Notice that every movie displayed has an average rating of 5, which means that every person gave these movies the highest rating. Is that unlikely? We can determine how many people rated each of these movies by working backwards and filtering our original movie_data table by fields in highest_rated.\nNote that in the second line below, we are taking advantage of the fact that Hail tables are keyed.", "highest_rated = highest_rated.key_by(\n highest_rated.occupation, highest_rated.movie)\n\ncounts_temp = movie_data.filter(\n hl.is_defined(highest_rated[movie_data.occupation, movie_data.movie]))\n\ncounts = counts_temp.group_by(counts_temp.occupation, counts_temp.movie).aggregate(\n counts = hl.agg.count())\n\ncounts.show()", "So it looks like the highest rated movies, when computed naively, mostly have a single viewer rating them. To get a better understanding of the data, we can recompute this list but only include movies which have more than 1 viewer (left as an exercise). \nExercises\n\nWhat is the favorite movie for each occupation, conditional on there being more than one viewer?\nWhat genres are rated most differently by men and women?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
streety/biof509
Wk09-Advanced-ML-tasks-Deep-Learning.ipynb
mit
[ "Week 8 - Advanced Machine Learning\nDuring the course we have covered a variety of different tasks and algorithms. These were chosen for their broad applicability and ease of use with many important techniques and areas of study skipped. The goal of this class is to provide a brief overview of some of the latest advances and areas that could not be covered due to our limited time.\nDeep learning\n\nGlosser.ca via wikimedia. \nAlthough a neural network has been added to scikit learn relatively recently it only runs on the CPU making the large neural networks now popular prohibitively slow. Fortunately, there are a number of different packages available for python that can run on a GPU. \nTheano is the GPGPU equivalent of numpy. It implements all the core functionality needed to build a deep neural network, and run it on the GPGPU, but does not come with an existing implementation.\nA variety of packages have been built on top of Theano that enable neural networks to be implemented in a relatively straightforward manner. Parrallels can be draw with the relationship between numpy and scikit learn. Pylearn2 was perhaps the first major package built on Theano but has now been superseded by a number of new packages, including blocks, keras, and lasagne.\nYou may have also heard of TensorFlow that was released by Google a year or two ago. TensorFlow lies somewhere between the low-level Theano and the high-level packages such as blocks, keras, and lasagne. Currently only keras supports TensorFlow as an alternative backend. Keras will also be included with TensorFlow soon.\nInstalling these packages with support for executing code on the GPU is more challenging than simply conda install ... or pip install .... In addition to installing these packages it is also necessary to install the CUDA packages. \nBeyond the advances due to the greater computational capacity available on the GPU there have been a number of other important approaches utilized:\n\nConvolutional neural nets\nRecurrent neural nets\nDropout\nEarly stopping\nData augmentation\n\n\nAphex34 via wikimedia.", "import matplotlib.pyplot as plt\n%matplotlib inline\nplt.gray()\nfrom keras.datasets import mnist\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nfig, axes = plt.subplots(3,5, figsize=(12,8))\nfor i, ax in enumerate(axes.flatten()):\n ax.imshow(X_train[i], interpolation='nearest')\nplt.show()\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\n\nbatch_size = 512\nnb_classes = 10\nnb_epoch = 3\n\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28)\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28)\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\nX_train /= 255\nX_test /= 255\n\n# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\n# CAUTION: Without utilizing a GPU even this very short example is incredibly slow to run.\n\nmodel = Sequential()\n\n#model.add(Convolution2D(8, 1, 3, 3, input_shape=(1,28,28), activation='relu'))\nmodel.add(Convolution2D(4, 3, 3, input_shape=(1,28,28), activation='relu'))\n#model.add(Convolution2D(4, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(4, input_dim=4*28*28*0.25, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes, input_dim=4, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])\n\nmodel.fit(X_train[:1024], Y_train[:1024], batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, \n validation_data=(X_test, Y_test))\n\nscore = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test score:', score)\n\npredictions = model.predict_classes(X_test)\n\nfig, axes = plt.subplots(3,5, figsize=(12,8))\nfor i, ax in enumerate(axes.flatten()):\n ax.imshow(X_test[predictions == 7][i].reshape((28,28)), interpolation='nearest')\nplt.show()", "The performance here is very poor. We really need to train with more samples and for more epochs.", "from sklearn.metrics import confusion_matrix\n\ncm = confusion_matrix(y_test, predictions)\n\nnp.fill_diagonal(cm, 0)\n\nplt.bone()\nplt.matshow(cm)\nplt.colorbar()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')", "Building a model on the NIH HPC\nThe high performance computing group at NIH provides GPU equipped nodes on their GPU partition. This is an easy way to begin leveraging GPUs without the startup costs and maintainance requirements. \nUtilizing these nodes is relatively straightforward as long as you can connect to the Biowulf cluster.\nBuilding a model on the Amazon EC2 service\nIf you don't have access to the NIH HPC resources and don't want to buy a GPU there are a number of cloud services with GPU enabled machines available for rent. Probably the most well known service is Amazon Web services, and specifically their EC2 service.\nAnyone (willing to pay) can use these services.\nUsing the Bitfusion AMI increases the cost but simplifies the startup process. Installation of the required drivers and software requires multiple steps, and this image has everything we need pre-configured." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.12/_downloads/plot_linear_model_patterns.ipynb
bsd-3-clause
[ "%matplotlib inline", "Linear classifier on sensor data with plot patterns and filters\nDecoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG\ndata in sensor space. Fit a linear classifier with the LinearModel object\nproviding topographical patterns which are more neurophysiologically\ninterpretable [1] than the classifier filters (weight vectors).\nThe patterns explain how the MEG and EEG data were generated from the\ndiscriminant neural sources which are extracted by the filters.\nNote patterns/filters in MEG data are more similar than EEG data\nbecause the noise is less spatially correlated in MEG than EEG.\n[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,\nBlankertz, B., & Bießmann, F. (2014). On the interpretation of\nweight vectors of linear models in multivariate neuroimaging.\nNeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067", "# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n# Romain Trachel <trachelr@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\n\n# import a linear classifier from mne.decoding\nfrom mne.decoding import LinearModel\n\nprint(__doc__)\n\ndata_path = sample.data_path()", "Set parameters", "raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\ntmin, tmax = -0.2, 0.5\nevent_id = dict(aud_l=1, vis_l=3)\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(2, None, method='iir') # replace baselining with high-pass\nevents = mne.read_events(event_fname)\n\n# Read epochs\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n decim=4, baseline=None, preload=True)\n\nlabels = epochs.events[:, -1]\n\n# get MEG and EEG data\nmeg_epochs = epochs.copy().pick_types(meg=True, eeg=False)\nmeg_data = meg_epochs.get_data().reshape(len(labels), -1)\neeg_epochs = epochs.copy().pick_types(meg=False, eeg=True)\neeg_data = eeg_epochs.get_data().reshape(len(labels), -1)", "Decoding in sensor space using a LogisticRegression classifier", "clf = LogisticRegression()\nsc = StandardScaler()\n\n# create a linear model with LogisticRegression\nmodel = LinearModel(clf)\n\n# fit the classifier on MEG data\nX = sc.fit_transform(meg_data)\nmodel.fit(X, labels)\n# plot patterns and filters\nmodel.plot_patterns(meg_epochs.info, title='MEG Patterns')\nmodel.plot_filters(meg_epochs.info, title='MEG Filters')\n\n# fit the classifier on EEG data\nX = sc.fit_transform(eeg_data)\nmodel.fit(X, labels)\n# plot patterns and filters\nmodel.plot_patterns(eeg_epochs.info, title='EEG Patterns')\nmodel.plot_filters(eeg_epochs.info, title='EEG Filters')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jwjohnson314/data-801
notebooks/more_data_structures.ipynb
mit
[ "More on data structures\nIterable vs. Iterators\nLists are examples of iterable data structures, which means that you can iterate over the actual objects in these data structures.", "# iterating over a list by object\n\nx = ['bob', 'sue', 'mary']\n\nfor name in x: \n print(name.upper() + ' WAS HERE')\n\n# alternatively, you could iterate over position\n\nfor i in range(len(x)):\n print(x[i].upper() + ' WAS HERE')\n\ndir(x) # ignore the __ methods for now", "generators return their contents 'lazily'. This leaves a minimal memory footprint, at the cost of making the generator nonreusable.", "y = (x*x for x in [1, 2, 3])\n\ntype(y)\n\ndir(y)\n\ny.send??\n\ny[5]\n\nnext(y)\n\ny.send(1)\n\nnext(y) # run this cell twice - what happens?", "'range' is something like a generator, but with special properties because of its intended use case (in 'for' loops or similar structures.", "z = range(10, 5, -1)\n\ndir(range)\n\n# let's filter that list a little\n[x for x in dir(range) if not x.startswith('_')]\n\nz.start\n\nlen(z) # __ function - overloaded operator", "From the docs (https://docs.python.org/3/library/stdtypes.html#typesseq-range): The advantage of the range type over a regular list or tuple is that a range object will always take the same (small) amount of memory, no matter the size of the range it represents (as it only stores the start, stop and step values, calculating individual items and subranges as needed).\nRange objects implement the collections.abc.Sequence ABC, and provide features such as containment tests, element index lookup, slicing and support for negative indices (see Sequence Types — list, tuple, range):", "for i in z:\n print(i)", "zips produced iterators from pairs:", "GPA = zip(['bob', 'sue', 'mary'], [2.3, 4.0, 3.7])\n\ntype(GPA)\n\ndir(GPA)\n\nnext(GPA)\n\nnext(GPA)[1]", "More on Dicts\nThe dict data structure shows up all over Python.", "dict?", "from assignment:", "GPA_2 = dict(bob=2.0, sue=3.4, mary=4.0)", "from iterator:", "names = ['bob', 'mary', 'sue', 'lisa']\ngpas = [3.2, 4.0, 3.1, 2.8]\n\nGPA_3 = dict(zip(names, gpas))\nGPA_3", "In function definitions:", "# explicitly named arguments are also positional\n# Anything after * in a function is a positional argument - tuple\n# Anything after ** is a named argument \n# the latter are unpacked as dicts\n\ndef arg_explainer(x, y, *args, **kwargs):\n print('-'*30)\n print('x is %d, even though you didn\\'t specify it, because of its position.' % x)\n print('same with y, which is %d.' %y)\n if args:\n print('-'*30)\n print('type(*args) = %s' % type(args))\n print('these are the *args arguments: ')\n for arg in args:\n print(arg)\n else:\n print('-'*30)\n print('no *args today!')\n if kwargs:\n print('-'*30)\n print('type(**kwargs) == %s' % type(kwargs))\n for key in kwargs:\n print(key, kwargs[key])\n else:\n print('-'*30)\n print('no **kwargs today!')\n print('-'*30)\n\narg_explainer(2, 4, 3, 7, 8, 9, 10, plot=True, sharey=True, rotate=False)", "In function calls:", "my_kwargs = {'plot': False, 'sharey': True}\n\narg_explainer(1, 2, **my_kwargs)", "This allows, for instance, matplotlibs plot function to accept a huge range of different plotting options, or few to none at all.", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n?plt.plot\n\nx = np.linspace(-5, 5, 100)\ny1 = np.sin(x)\ny2 = np.cos(x)\n\nplt.plot(x, y1) # all of these arguments are *args\nplt.plot(x, y2, color='red', label='just on the cosine, for no reason at all') # starting w/ color, **kwargs\nplt.legend(loc='center');" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/end_to_end_ml/labs/sample_babyweight.ipynb
apache-2.0
[ "Creating a Sampled Dataset\nLearning Objectives\n\nSetup up the environment\nSample the natality dataset to create train, eval, test sets\nPreprocess the data in Pandas dataframe\n\nIntroduction\nIn this notebook, we'll read data from BigQuery into our notebook to preprocess the data within a Pandas dataframe for a small, repeatable sample.\nWe will set up the environment, sample the natality dataset to create train, eval, test splits, and preprocess the data in a Pandas dataframe.\nEach learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook.\nSet up environment variables and load necessary libraries", "!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst\n\n!pip install --user google-cloud-bigquery==1.25.0", "Note: Restart your kernel to use updated packages.\nKindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage.\nImport necessary libraries.", "from google.cloud import bigquery\nimport pandas as pd", "Lab Task #1: Set up environment variables so that we can use them throughout the notebook", "%%bash\n# TODO 1\n# TODO -- Your code here.\necho \"Your current GCP Project Name is: \"$PROJECT\n\nPROJECT = \"cloud-training-demos\" # Replace with your PROJECT", "Create ML datasets by sampling using BigQuery\nWe'll begin by sampling the BigQuery data to create smaller datasets. Let's create a BigQuery client that we'll use throughout the lab.", "bq = bigquery.Client(project = PROJECT)", "We need to figure out the right way to divide our hash values to get our desired splits. To do that we need to define some values to hash within the module. Feel free to play around with these values to get the perfect combination.", "modulo_divisor = 100\ntrain_percent = 80.0\neval_percent = 10.0\n\ntrain_buckets = int(modulo_divisor * train_percent / 100.0)\neval_buckets = int(modulo_divisor * eval_percent / 100.0)", "We can make a series of queries to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly. Therefore, to make our code more compact and reusable, let's define a function to return the head of a dataframe produced from our queries up to a certain number of rows.", "def display_dataframe_head_from_query(query, count=10):\n \"\"\"Displays count rows from dataframe head from query.\n \n Args:\n query: str, query to be run on BigQuery, results stored in dataframe.\n count: int, number of results from head of dataframe to display.\n Returns:\n Dataframe head with count number of results.\n \"\"\"\n df = bq.query(\n query + \" LIMIT {limit}\".format(\n limit=count)).to_dataframe()\n\n return df.head(count)", "For our first query, we're going to use the original query above to get our label, features, and columns to combine into our hash which we will use to perform our repeatable splitting. There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are. We will need to include all of these extra columns to hash on to get a fairly uniform spread of the data. Feel free to try less or more in the hash and see how it changes your results.", "# Get label, features, and columns to hash and split into buckets\nhash_cols_fixed_query = \"\"\"\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n year,\n month,\n CASE\n WHEN day IS NULL THEN\n CASE\n WHEN wday IS NULL THEN 0\n ELSE wday\n END\n ELSE day\n END AS date,\n IFNULL(state, \"Unknown\") AS state,\n IFNULL(mother_birth_state, \"Unknown\") AS mother_birth_state\nFROM\n publicdata.samples.natality\nWHERE\n year > 2000\n AND weight_pounds > 0\n AND mother_age > 0\n AND plurality > 0\n AND gestation_weeks > 0\n\"\"\"\n\ndisplay_dataframe_head_from_query(hash_cols_fixed_query)", "Using COALESCE would provide the same result as the nested CASE WHEN. This is preferable when all we want is the first non-null instance. To be precise the CASE WHEN would become COALESCE(wday, day, 0) AS date. You can read more about it here.\nNext query will combine our hash columns and will leave us just with our label, features, and our hash values.", "data_query = \"\"\"\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n FARM_FINGERPRINT(\n CONCAT(\n CAST(year AS STRING),\n CAST(month AS STRING),\n CAST(date AS STRING),\n CAST(state AS STRING),\n CAST(mother_birth_state AS STRING)\n )\n ) AS hash_values\nFROM\n ({CTE_hash_cols_fixed})\n\"\"\".format(CTE_hash_cols_fixed=hash_cols_fixed_query)\n\ndisplay_dataframe_head_from_query(data_query)", "The next query is going to find the counts of each of the unique 657484 hash_values. This will be our first step at making actual hash buckets for our split via the GROUP BY.", "# Get the counts of each of the unique hash of our splitting column\nfirst_bucketing_query = \"\"\"\nSELECT\n hash_values,\n COUNT(*) AS num_records\nFROM\n ({CTE_data})\nGROUP BY\n hash_values\n\"\"\".format(CTE_data=data_query)\n\ndisplay_dataframe_head_from_query(first_bucketing_query)", "The query below performs a second layer of bucketing where now for each of these bucket indices we count the number of records.", "# Get the number of records in each of the hash buckets\nsecond_bucketing_query = \"\"\"\nSELECT\n ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index,\n SUM(num_records) AS num_records\nFROM\n ({CTE_first_bucketing})\nGROUP BY\n ABS(MOD(hash_values, {modulo_divisor}))\n\"\"\".format(\n CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor)\n\ndisplay_dataframe_head_from_query(second_bucketing_query)", "The number of records is hard for us to easily understand the split, so we will normalize the count into percentage of the data in each of the hash buckets in the next query.", "# Calculate the overall percentages\npercentages_query = \"\"\"\nSELECT\n bucket_index,\n num_records,\n CAST(num_records AS FLOAT64) / (\n SELECT\n SUM(num_records)\n FROM\n ({CTE_second_bucketing})) AS percent_records\nFROM\n ({CTE_second_bucketing})\n\"\"\".format(CTE_second_bucketing=second_bucketing_query)\n\ndisplay_dataframe_head_from_query(percentages_query)", "We'll now select the range of buckets to be used in training.", "# Choose hash buckets for training and pull in their statistics\ntrain_query = \"\"\"\nSELECT\n *,\n \"train\" AS dataset_name\nFROM\n ({CTE_percentages})\nWHERE\n bucket_index >= 0\n AND bucket_index < {train_buckets}\n\"\"\".format(\n CTE_percentages=percentages_query,\n train_buckets=train_buckets)\n\ndisplay_dataframe_head_from_query(train_query)", "We'll do the same by selecting the range of buckets to be used evaluation.", "# Choose hash buckets for validation and pull in their statistics\neval_query = \"\"\"\nSELECT\n *,\n \"eval\" AS dataset_name\nFROM\n ({CTE_percentages})\nWHERE\n bucket_index >= {train_buckets}\n AND bucket_index < {cum_eval_buckets}\n\"\"\".format(\n CTE_percentages=percentages_query,\n train_buckets=train_buckets,\n cum_eval_buckets=train_buckets + eval_buckets)\n\ndisplay_dataframe_head_from_query(eval_query)", "Lastly, we'll select the hash buckets to be used for the test split.", "# Choose hash buckets for testing and pull in their statistics\ntest_query = \"\"\"\nSELECT\n *,\n \"test\" AS dataset_name\nFROM\n ({CTE_percentages})\nWHERE\n bucket_index >= {cum_eval_buckets}\n AND bucket_index < {modulo_divisor}\n\"\"\".format(\n CTE_percentages=percentages_query,\n cum_eval_buckets=train_buckets + eval_buckets,\n modulo_divisor=modulo_divisor)\n\ndisplay_dataframe_head_from_query(test_query)", "In the below query, we'll UNION ALL all of the datasets together so that all three sets of hash buckets will be within one table. We added dataset_id so that we can sort on it in the query after.", "# Union the training, validation, and testing dataset statistics\nunion_query = \"\"\"\nSELECT\n 0 AS dataset_id,\n *\nFROM\n ({CTE_train})\nUNION ALL\nSELECT\n 1 AS dataset_id,\n *\nFROM\n ({CTE_eval})\nUNION ALL\nSELECT\n 2 AS dataset_id,\n *\nFROM\n ({CTE_test})\n\"\"\".format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query)\n\ndisplay_dataframe_head_from_query(union_query)", "Lastly, we'll show the final split between train, eval, and test sets. We can see both the number of records and percent of the total data. It is really close to that we were hoping to get.", "# Show final splitting and associated statistics\nsplit_query = \"\"\"\nSELECT\n dataset_id,\n dataset_name,\n SUM(num_records) AS num_records,\n SUM(percent_records) AS percent_records\nFROM\n ({CTE_union})\nGROUP BY\n dataset_id,\n dataset_name\nORDER BY\n dataset_id\n\"\"\".format(CTE_union=union_query)\n\ndisplay_dataframe_head_from_query(split_query)", "Now that we know that our splitting values produce a good global splitting on our data, here's a way to get a well-distributed portion of the data in such a way that the train, eval, test sets do not overlap and takes a subsample of our global splits.\nLab Task #2: Sample the natality dataset", "# TODO 2\n# TODO -- Your code here.\n# every_n allows us to subsample from each of the hash values\n# This helps us get approximately the record counts we want\nprint(\"There are {} examples in the train dataset.\".format(len(train_df)))\nprint(\"There are {} examples in the validation dataset.\".format(len(eval_df)))\nprint(\"There are {} examples in the test dataset.\".format(len(test_df)))", "Preprocess data using Pandas\nWe'll perform a few preprocessing steps to the data in our dataset. Let's add extra rows to simulate the lack of ultrasound. That is we'll duplicate some rows and make the is_male field be Unknown. Also, if there is more than child we'll change the plurality to Multiple(2+). While we're at it, we'll also change the plurality column to be a string. We'll perform these operations below. \nLet's start by examining the training dataset as is.", "train_df.head()", "Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data)", "train_df.describe()", "It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a preprocess function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect.\nLab Task #3: Preprocess the data in Pandas dataframe", " # TODO 3\n # TODO -- Your code here.\n\n # Modify plurality field to be a string\n twins_etc = dict(zip([1,2,3,4,5],\n [\"Single(1)\",\n \"Twins(2)\",\n \"Triplets(3)\",\n \"Quadruplets(4)\",\n \"Quintuplets(5)\"]))\n df[\"plurality\"].replace(twins_etc, inplace=True)\n\n # Clone data and mask certain columns to simulate lack of ultrasound\n no_ultrasound = df.copy(deep=True)\n\n # Modify is_male\n no_ultrasound[\"is_male\"] = \"Unknown\"\n \n # Modify plurality\n condition = no_ultrasound[\"plurality\"] != \"Single(1)\"\n no_ultrasound.loc[condition, \"plurality\"] = \"Multiple(2+)\"\n\n # Concatenate both datasets together and shuffle\n return pd.concat(\n [df, no_ultrasound]).sample(frac=1).reset_index(drop=True)", "Let's process the train, eval, test set and see a small sample of the training data after our preprocessing:", "train_df = preprocess(train_df)\neval_df = preprocess(eval_df)\ntest_df = preprocess(test_df)\n\ntrain_df.head()\n\ntrain_df.tail()", "Let's look again at a summary of the dataset. Note that we only see numeric columns, so plurality does not show up.", "train_df.describe()", "Write to .csv files\nIn the final versions, we want to read from files, not Pandas dataframes. So, we write the Pandas dataframes out as csv files. Using csv files gives us the advantage of shuffling during read. This is important for distributed training because some workers might be slower than others, and shuffling the data helps prevent the same data from being assigned to the slow workers.", "# Define columns\ncolumns = [\"weight_pounds\",\n \"is_male\",\n \"mother_age\",\n \"plurality\",\n \"gestation_weeks\"]\n\n# Write out CSV files\ntrain_df.to_csv(\n path_or_buf=\"train.csv\", columns=columns, header=False, index=False)\neval_df.to_csv(\n path_or_buf=\"eval.csv\", columns=columns, header=False, index=False)\ntest_df.to_csv(\n path_or_buf=\"test.csv\", columns=columns, header=False, index=False)\n\n%%bash\nwc -l *.csv\n\n%%bash\nhead *.csv\n\n%%bash\ntail *.csv", "Lab Summary:\nIn this lab, we set up the environment, sampled the natality dataset to create train, eval, test splits, and preprocessed the data in a Pandas dataframe.\nCopyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
elliekinz/Disease-ontology
wiki_pubmed_fuzzy/.ipynb_checkpoints/Wiki-PubMed-Fuzzy-checkpoint.ipynb
apache-2.0
[ "import sys\nsys.path.append('./../')\n%load_ext autoreload\n%autoreload 2\n\nfrom ontology import get_ontology\n\nontology = get_ontology('../data/doid.obo')\nname2doid = {term.name: term.id for term in ontology.get_terms()}\ndoid2name = {term.id: term.name for term in ontology.get_terms()}\n\nimport numpy as np\nimport re", "Wiki links from obo descriptions", "import wiki\nlst = wiki.get_links_from_ontology(ontology)\nprint r'example:{:}'.format(repr(lst[10]))", "urllib2 to read page in html", "page = wiki.get_html(lst[101])\npage[:1000]", "Fuzzy logic", "import fuzzywuzzy.process as fuzzy_process\nfrom fuzzywuzzy import fuzz\n\nstring = \"ventricular arrhythmia\"\nnames = np.sort(name2doid.keys())\nprint fuzzy_process.extractOne(string, names, scorer=fuzz.token_set_ratio)\n\nstring = \"Complete remission of hairy cell leukemia variant (HCL-v) complicated by red cell aplasia post treatment with rituximab.\"\nprint fuzzy_process.extractOne(string, names, scorer=fuzz.partial_ratio)", "Wikipedia search engine: headers", "query = \"ventricular arrhythmia\"\n\ntop = wiki.get_top_headers(query)\ntop\n\nfor header in top:\n results = fuzzy_process.extractOne(header, names, scorer=fuzz.token_set_ratio)\n print results\n\npage = wikipedia.WikipediaPage(title='Cell_proliferation')\npage.summary", "[name for name in names if len(re.split(' ', name)) > 3]\npub-med", "import pubmed\n\nquery = 'hcl-v'\ntitles = pubmed.get(query)\ntitles_len = [len(title) for title in titles] \nfor i, string in enumerate(titles):\n print(\"%d) %s\" % (i+1, string))\n print fuzzy_process.extractOne(string, names, scorer=fuzz.partial_ratio)\n print ", "def find_synonym(s_ref, s):\n last = s_ref.find('(' + s + ')')\n if last == -1:\n return None\nn_upper = len(''.join([c for c in s if c.isupper()]))\nfirst = [(i,c) for i, c in enumerate(s_ref[:last]) if c.isupper()][-n_upper][0]\nreturn s_ref[first:last-1]\n\nprint find_synonym('Wolff-Parkinson-White syndrome (WPW) and athletes: Darwin at play?',\n 'WPW')\nsynonyms", "import utils\n\nprint utils.find_synonym('Wolff-Parkinson-White syndrome (WPW) and athletes: Darwin at play?', 'WPW')\nprint utils.find_synonym('Complete remission of hairy cell leukemia variant (HCL-v)...', 'hcl-v')", "Assymetric distance", "s_ref = 'artery disease'\ns = 'nonartery'\nprint utils.assym_dist(s, s_ref)", "Length statistics", "print 'Mean term name length:', np.mean([len(term.name) for term in ontology.get_terms()])\nprint 'Mean article title length:', np.mean(titles_len)", "Unique words", "words = [re.split(' |-', term.name) for term in ontology.get_terms()]\nwords = np.unique([l for sublist in words for l in sublist if len(l) > 0])\nwords = [w for w in words if len(w) >= 4]\nwords[:10]", "Threading", "from threading import Thread\nfrom time import sleep\n\nfrom ontology import get_ontology\n\nquery_results = None\ndef fn_get_q(query):\n global query_results\n query_results = fuzzy_process.extractOne(query, names, scorer=fuzz.ratio)\n return True\n\nwiki_results = None\ndef fn_get_wiki(query):\n global wiki_results\n header = wiki.get_top_headers(query, 1)[0]\n wiki_results = fuzzy_process.extractOne(header, names, scorer=fuzz.ratio)\n #sleep(0.1)\n return True\n\npubmed_results = None\ndef fn_get_pubmed(query):\n global pubmed_results\n string = pubmed.get(query, topK=1)\n\n if string is not None:\n string = string[0]\n print string\n pubmed_results = fuzzy_process.extractOne(string, names, scorer=fuzz.partial_ratio)\n return True\n else:\n return False\n\n'''main'''\n## from bot\nquery = 'valve disease'\n\ndef find_answer(query):\n query = query.lower()\n \n # load ontology\n ontology = get_ontology('../data/doid.obo')\n name2doid = {term.name: term.id for term in ontology.get_terms()}\n doid2name = {term.id: term.name for term in ontology.get_terms()}\n \n ## exact match\n if query in name2doid.keys():\n doid = name2doid[query]\n else:\n # exact match -- no\n th_get_q = Thread(target = fn_get_q, args = (query,))\n th_get_wiki = Thread(target = fn_get_wiki, args = (query,))\n th_get_pubmed = Thread(target = fn_get_pubmed, args = (query,))\n\n th_get_q.start()\n th_get_wiki.start()\n th_get_pubmed.start()\n\n\n ## search engine query --> vertices, p=100(NLP??); synonyms\n\n ## new thread for synonyms???\n\n ## synonyms NLP\n\n ## new thread for NLP\n\n ## tree search on vertices (returned + synonyms)\n\n ## sleep ?\n\n th_get_q.join()\n print query_results\n\n th_get_wiki.join()\n print wiki_results\n\n th_get_pubmed.join()\n print pubmed_results\n\n ## final answer\n ## draw graph\n\n doid = None\n \n graph = None\n return doid, graph\n" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jamesfolberth/NGC_STEM_camp_AWS
notebook_solutions/ML_morning_JTN/02_Logistic_Regression_and_Text_Models.ipynb
bsd-3-clause
[ "Lecture 3: Logistic Regression and Text Models\n\n<img src=\"figs/logregwordcloud.png\",width=1000,height=50>\nProblem 1: Logistic Regression for 2D Continuous Features\n\nIn the video lecture you saw some examples of using logistic regression to do binary classification on text data (SPAM vs HAM) and on 1D continuous data. In this problem we'll look at logistic regression for 2D continuous data. The data we'll use are <a href=\"https://www.math.umd.edu/~petersd/666/html/iris_with_labels.jpg\">sepal</a> measurements from the ubiquitous iris dataset. \n<p>\n<img style=\"float:left; width:450px\" src=\"https://upload.wikimedia.org/wikipedia/commons/9/9f/Iris_virginica.jpg\">\n</p>\nThe two features of our model will be the sepal length and sepal width. Execute the following cell to see a plot of the data. The blue points correspond to the sepal measurements of the Iris Setosa (left) and the red points correspond to the sepal measurements of the Iris Versicolour (right).", "import matplotlib.pyplot as plt\n%matplotlib inline \n\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nX_train = iris.data[iris.target != 2, :2] # first two features and\ny_train = iris.target[iris.target != 2] # first two labels only \n\nfig = plt.figure(figsize=(8,8))\nmycolors = {\"blue\": \"steelblue\", \"red\": \"#a76c6e\", \"green\": \"#6a9373\"}\nplt.scatter(X_train[:, 0], X_train[:, 1], s=100, alpha=0.9, c=[mycolors[\"red\"] if yi==1 else mycolors[\"blue\"] for yi in y_train])\nplt.xlabel('sepal length', fontsize=16)\nplt.ylabel('sepal width', fontsize=16);", "We'll train a logistic regression model of the form \n$$\np(y = 1 ~|~ {\\bf x}; {\\bf w}) = \\frac{1}{1 + \\textrm{exp}[-(w_0 + w_1x_1 + w_2x_2)]}\n$$\nusing sklearn's logistic regression classifier as follows", "from sklearn.linear_model import LogisticRegression # import from sklearn \nlogreg = LogisticRegression() # initialize classifier \nlogreg.fit(X_train, y_train); # train on training data ", "Q: Determine the parameters ${\\bf w}$ fit by the model. It might be helpful to consult the documentation for the classifier on the <a href=\"http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\">sklearn website</a>. Hint: The classifier stores the coefficients and bias term separately. \nQ: In general, what does the Logistic Regression decision boundary look like for data with two features? \nQ: Modify the code below to plot the decision boundary along with the data.", "import numpy as np\nimport math\n\nfig = plt.figure(figsize=(8,8))\nplt.scatter(X_train[:, 0], X_train[:, 1], s=100, c=[mycolors[\"red\"] if yi==1 else mycolors[\"blue\"] for yi in y_train])\nplt.xlabel('Sepal length')\nplt.ylabel('Sepal width')\nx_min, x_max = np.min(X_train[:,0])-0.1, np.max(X_train[:,0])+0.1\ny_min, y_max = np.min(X_train[:,1])-0.1, np.max(X_train[:,1])+0.1\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\n\nx1 = np.linspace(x_min, x_max, 100)\nw0 = logreg.intercept_\nw1 = logreg.coef_[0][0]\nw2 = logreg.coef_[0][1]\nx2 = (-w0 - w1*x1)/w2#TODO \nplt.plot(x1, x2, color=\"gray\");", "Problem 2: The Bag-of-Words Text Model\n\nThe remainder of today's exercise will consider the problem of predicting the semantics of text. In particular, later we'll look at predicting whether movie reviews are positive or negative just based on their text. \nBefore we can utilize text as features in a learning model, we need a concise mathematical way to represent things like words, phrases, sentences, etc. The most common text models are based on the so-called <a href=\"https://en.wikipedia.org/wiki/Vector_space_model\">Vector Space Model</a> (VSM) where individual words in a document are associated with entries of a vector: \n$$\n\\textrm{\"The sky is blue\"} \\quad \\Rightarrow \\quad \n\\left[\n\\begin{array}{c}\n0 \\\n1 \\ \n0 \\\n0 \\\n1\n\\end{array}\n\\right]\n$$\nThe first step in creating a VSM is to define a vocabulary, $V$, of words that you will include in your model. This vocabulary can be determined by looking at all (or most) of the words in the training set, or even by including a fixed vocabulary based on the english language. A vector representation of a document like a movie review is then a vector with length $|V|$ where each entry in the vector maps uniquely to a word in the vocabulary. A vector encoding of a document would then be a vector that is nonzero in positions corresponding to words present in the document and zero everywhere else. How you fill in the nonzero entries depends on the model you're using. Two simple conventions are the Bag-of-Words model and the binary model. \nIn the binary model we simply set an entry of the vector to $1$ if the associate word appears at least once in the document. In the more common Bag-of-Words model we set an entry of the vector equal to the frequency with which the word appears in the document. Let's see if we can come up with a simple implementation of the Bag-of-Words model in Python, and then later we'll see how sklearn can do the heavy lifting for us. \nConsider a training set containing three documents, specified as follows \n$\\texttt{Training Set}:$\n$\\texttt{d1}: \\texttt{new york times}$\n$\\texttt{d2}: \\texttt{new york post}$\n$\\texttt{d3}: \\texttt{los angeles times}$\nFirst we'll define the vocabulary based on the words in the test set. It is $V = { \\texttt{angeles}, \\texttt{los}, \\texttt{new}, \\texttt{post}, \\texttt{times}, \\texttt{york}}$. \nWe need to define an association between the particular words in the vocabulary and the specific entries in our vectors. Let's define this association in the order that we've listed them above. We can store this mapping as a Python dictionary as follows:", "V = {\"angeles\": 0, \"los\": 1, \"new\": 2, \"post\": 3, \"times\": 4, \"york\": 5}", "Let's also store the documents in a list as follows:", "D = [\"the new york times\", \"the new york post\", \"the los angeles times\"]", "To be consistent with sklearn conventions, we'll encode the documents as row-vectors stored in a matrix. In this case, each row of the matrix corresponds to a document, and each column corresponds to a term in the vocabulary. For our example this gives us a matrix $M$ of shape $3 \\times 6$. The $(d,t)$-entry in $M$ is then the number of times the term $t$ appears in document $d$\nQ: Your first task is to write some simple Python code to construct the term-frequency matrix $M$", "M = np.zeros((len(D),len(V)))\n\nfor ii, doc in enumerate(D): \n for term in doc.split(): \n if(term in V): #only print if the term is in our dictionary\n M[ii,V[term]] += 1 #TODO\n \nprint(M)", "Hopefully your code returns the matrix \n$$M = \n\\left[\n\\begin{array}{ccccccc}\n0 & 0 & 1 & 0 & 1 & 1 \\\n0 & 0 & 1 & 1 & 0 & 1 \\\n1 & 1 & 0 & 0 & 1 & 0 \\\n\\end{array}\n\\right]$$. \nNote that the entry in the (2,0) position is $1$ because the first word (angeles) appears once in the third document. \nOK, let's see how we can construct the same term-frequency matrix in sklearn. We will use something called the <a href=\"http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\">CountVectorizer</a> to accomplish this. Let's see some code and then we'll explain how it functions. \nTo avoid common words, such as \"the\", in our analysis, we will remove any word from a list of common english words in our analysis. We can do so by typing \nstop_words = 'english'\n\nin the CountVectorizer call.", "from sklearn.metrics.pairwise import euclidean_distances\n\nfrom sklearn.feature_extraction.text import CountVectorizer # import CountVectorizer \nvectorizer = CountVectorizer(stop_words = 'english') # initialize the vectorizer\nX = vectorizer.fit_transform(D,) # fit to training data and transform to matrix ", "The $\\texttt{fit_transform}$ method actually does two things. It fits the model to the training data by building a vocabulary. It then transforms the text in $D$ into matrix form. \nIf we wish to see the vocabulary you can do it like so", "print(vectorizer.vocabulary_)", "Note that this is the same vocabulary and indexing that we definfed ourselves (just in a different order). Hopefully that means we'll get the same term-frequency matrix. We can print $X$ and check", "print(X.todense())", "Yep, they're the same! Notice that we had to convert $X$ to a dense matrix for printing. This is because CountVectorizer actually returns a sparse matrix. This is a very good thing since most vectors in a text model will be extremely sparse, since most documents will only contain a handful of words from the vocabulary. \nOK, let's see how we can use the CountVectorizer to transform the test documents into their own term-frequency matrix.", "#get a sense of how different the vectors are\n\nfor f in X:\n print(euclidean_distances(X[0],f))\n\n", "OK, now suppose that we have a query document not included in the training set that we want to vectorize.", "d4 = [\"new york new tribune\"]", "We've already fit the CountVectorizer to the training set, so all we need to do is transform the test set documents into a term-frequency vector using the same conventions. Since we've already fit the model, we do the transformation with the $\\texttt{transform}$ method:", "x4 = vectorizer.transform(d4)", "Let's print it and see what it looks like", "print(x4.todense())", "Notice that the query document included the word $\\texttt{new}$ twice, which corresponds to the entry in the $(0,2)$-position. \nQ: What's missing from $x4$ that we might expect to see from the query document? \n<br>\nProblem 3: Term Frequency - Inverse Document Frequency\n\nThe Bag-of-Words model for text classification is very popular, but let's see if we can do better. Currently we're weighting every word in the corpus by it's frequency. It turns out that in text classification there are often features that are not particularly useful predictors for the document class, either because they are too common or too uncommon. Stop-words are extremely common, low-information words like \"a\", \"the\", \"as\", etc. Removing these from documents is typically the first thing done in peparing data for document classification. \nQ: Can you think of a situation where it might be useful to keep stop words in the corpus? \nOther words that tend to be uninformative predictors are words that appear very very rarely. In particular, if they do not appear frequently enough in the training data then it is difficult for a classification algorithm to weight them heavily in the classification process. \nIn general, the words that tend to be useful predictors are the words that appear frequently, but not too frequently. Consider the following frequency graph for a corpus. \n<img src=\"figs/feat_freq.png\",width=400,height=50>\nThe features in column A appear too frequently to be very useful, and the features in column C appear too rarely. One first-pass method of feature selection in text classification would be to discard the words from columns A and C, and build a classifier with only features from column B.\nAnother common model for identifying the useful terms in a document is the Term Frequency - Inverse Document Frequency (tf-idf) model. Here we won't throw away any terms, but we'll replace their Bag-of-Words frequency counts with tf-idf scores which we describe below. \nThe tf-idf score is the product of two statistics, term frequency and inverse document frequency\n$$\\texttt{tfidf(d,t)} = \\texttt{tf(d,t)} \\times \\texttt{idf(t)}$$\nThe term frequency $\\texttt{tf(d,t)}$ is a measure of the frequency with which term $t$ appears in document $d$. The inverse document frequency $\\texttt{idf(t)}$ is a measure of how much information the word provides, that is, whether the term is common or rare across all documents. By multiplying the two quantities together, we obtain a representation of term $t$ in document $d$ that weighs how common the term is in the document with how common the word is in the entire corpus. You can imagine that the words that get the highest associated values are terms that appear many times in a small number of documents. \nThere are many ways to compute the composite terms $\\texttt{tf}$ and $\\texttt{idf}$. For simplicity, we'll define $\\texttt{tf(d,t)}$ to be the number of times term $t$ appears in document $d$ (i.e., Bag-of-Words). We will define the inverse document frequency as follows: \n$$\n\\texttt{idf(t)} = \\ln ~ \\frac{\\textrm{total # documents}}{\\textrm{1 + # documents with term }t}\n = \\ln ~ \\frac{|D|}{|d: ~ t \\in d |}\n$$\nNote that we could have a potential problem if a term comes up that is not in any of the training documents, resulting in a divide by zero. This might happen if you use a canned vocabulary instead of constructing one from the training documents. To guard against this, many implementations will use add-one smoothing in the denominator (this is what sklearn does). \n$$\n\\texttt{idf(t)} = \\ln ~ \\frac{\\textrm{total # documents}}{\\textrm{1 + # documents with term }t}\n = \\ln ~ \\frac{|D|}{1 + |d: ~ t \\in d |}\n$$\nQ: Compute $\\texttt{idf(t)}$ (without smoothing) for each of the terms in the training documents from the previous problem\nQ: Compute the td-ifd matrix for the training set", "idf = np.array([np.log(3), np.log(3), np.log(3./2), np.log(3), np.log(3./2), np.log(3./2)])\nXtfidf = np.dot(X.todense(), np.diag(idf))", "Hopefully you got something like the following: \n$$\nX_{tfidf} = \n\\left[\n\\begin{array}{ccccccccc}\n0. & 0. & 0.40546511 & 0. & 0.40546511 & 0.40546511 \\\n0. & 0. & 0.40546511 & 1.09861229 & 0. & 0.40546511 \\\n1.09861229 & 1.09861229 & 0. & 0. & 0.40546511 & 0. \n\\end{array}\n\\right]\n$$\nThe final step in any VSM method is the normalization of the vectors. This is done so that very long documents to not completely overpower the small and medium length documents.", "row_norms = np.array([np.linalg.norm(row) for row in Xtfidf])\nX_tfidf_n = np.dot(np.diag(1./row_norms), Xtfidf)\n\nprint(X_tfidf_n)", "Let's see what we get when we use sklearn. Sklearn has a vectorizer called TfidfVectorizer which is similar to CountVectorizer, but it computes tf-idf scores.", "from sklearn.feature_extraction.text import TfidfVectorizer\ntfidf = TfidfVectorizer()\nY = tfidf.fit_transform(D)\nprint(Y.todense())", "Note that these are not quite the same, because sklearn's implementation of tf-idf uses the add-one smoothing in the denominator for idf. \nOkay, now let's see if we can use TFIDF analysis on real text documents!\nRun the following code to use this analysis on his inauguration speech from 2009. It will output what TFIDF thinks are the most important words from each paragraph\nQ: Is the analysis able to pick out the most important words correctly? Why does it sometimes pick the wrong words?\nQ: You can do the same analysis for his 2012 State of the Union Speech by replacing the first line of code with \"obama_SOU_2012.txt\". How does the analysis do here?\nQ: Find some other piece of text on your own and do the same analysis here by saving it in .txt file and entering the name of this file in the first line of code. You can find a big source of speeches http://www.americanrhetoric.com/newtop100speeches.htm.", "#load in text\nObamaText = open(\"data/obama_SOU_2012.txt\").readlines()\n\n#create TFIDF matrix\nX = vectorizer.fit_transform(ObamaText)\nD_tot = X.shape[0]\nXtfidf = np.zeros(X.shape)\n\nfor i,col in enumerate(X.T): #loop over rows of X (i.e. paragraphs of text)\n \n #number of lines the word appears in (no need for smoothing here)\n freq = np.count_nonzero(col.todense()) \n #compute theidf\n idf = math.log(D_tot/(freq))\n #calculate the tf-idf\n Xtfidf[:,i:i+1] = X[:,i].todense()*idf\n\n#normalize Xtfidf matrix\nrow_norms = np.array([np.linalg.norm(row) for row in Xtfidf])\nXtfidf_norm = np.dot(np.diag(1./row_norms),Xtfidf)\n\n#create a list from the dictionary\nV_words, V_nums = vectorizer.vocabulary_.keys(), vectorizer.vocabulary_.values()\nV_reverse = zip(V_nums,V_words)\nV_reverse_dict = dict(V_reverse)\n\n#loop through the paragraphs of the text and print most important word\nfor i,row in enumerate(Xtfidf_norm):\n row_str = \" \"\n row_str = row_str + V_reverse_dict[np.argmax(row)]\n #top_words_ind = np.argsort(row)[-5:]\n #for ii in top_words_ind:\n # row_str = row_str + V_reverse_dict[ii] + \" \"\n \n print(\"The top word in paragraph \" + str(i) + \" is \" + row_str)\n \n", "<br>\nProblem 4: Classifying Semantics in Movie Reviews\n\n\nThe data for this problem was taken from the <a href=\"https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-1-for-beginners-bag-of-words\">Bag of Words Meets Bag of Popcorn</a> Kaggle competition\n\nIn this problem you will use the text from movie reviews to predict whether the reviewer felt positively or negatively about the movie using Bag-of-Words and tf-idf. I've partially cleaned the data and stored it in files called $\\texttt{labeledTrainData.tsv}$ and $\\texttt{labeledTestData.tsv}$ in the data directory.", "import csv \n\ndef read_and_clean_data(fname, remove_stops=True):\n \n with open('data/stopwords.txt', 'rt') as f:\n stops = [line.rstrip('\\n') for line in f]\n \n with open(fname,'rt') as tsvin:\n reader = csv.reader(tsvin, delimiter='\\t')\n labels = []; text = [] \n for ii, row in enumerate(reader):\n labels.append(int(row[0]))\n words = row[1].lower().split()\n words = [w for w in words if not w in stops] if remove_stops else words \n text.append(\" \".join(words))\n \n return text, labels\n\ntext_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=True)\ntext_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=True)", "The current parameters are set to not remove stop words from the text so that it's a bit easier to explore. \nLook at a few of the reviews stored in $\\texttt{text_train}$ as well as their associated labels in $\\texttt{labels_train}$. Can you figure out which label refers to a positive review and which refers to a negative review?", "labels_train[:4]", "The first review is labeled $1$ and has the following text:", "text_train[1]", "The fourth review is labeled $0$ and has the following text:", "text_train[0]", "Hopefully it's obvious that label 1 corresponds to positive reviews and label 0 to negative reviews! \nOK, the first thing we'll do is train a logistic regression classifier using the Bag-of-Words model, and see what kind of accuracy we can get. To get started, we need to vectorize the text into mathematical features that we can use. We'll use CountVectorizer to do the job. (Before starting, I'm going to reload the data and remove the stop words this time)", "text_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=True)\ntext_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=True)\n\ncvec = CountVectorizer()\nX_bw_train = cvec.fit_transform(text_train)\ny_train = np.array(labels_train)\nX_bw_test = cvec.transform(text_test)\ny_test = np.array(labels_test)", "Q: How many different words are in the vocabulary? \nOK, now we'll train a logistic regression classifier on the training set, and test the accuracy on the test set. To do this we'll need to load some kind of accuracy metric from sklearn.", "from sklearn.metrics import accuracy_score\nbwLR = LogisticRegression()\nbwLR.fit(X_bw_train, y_train)\npred_bwLR = bwLR.predict(X_bw_test)\n\nprint(\"Logistic Regression accuracy with Bag-of-Words: \" + str(accuracy_score(y_test, pred_bwLR)))", "OK, so we got an accuracy of around 81% using Bag-of-Words. Now lets do the same tests but this time with tf-idf features.", "tvec = TfidfVectorizer()\nX_tf_train = tvec.fit_transform(text_train)\nX_tf_test = tvec.transform(text_test)\n\ntfLR = LogisticRegression()\ntfLR.fit(X_tf_train, y_train)\npred_tfLR = tfLR.predict(X_tf_test)\n\nprint(\"Logistic Regression accuracy with tf-idf: \" + str(accuracy_score(y_test, pred_tfLR)))", "WOOHOO! With tf-idf features we got around 85% accuracy, which is a 4% improvement. (If you're scoffing at this, wait until you get some more experience working with real-world data. 4% improvement is pretty awesome). \nQ: Which words are the strongest predictors for a positive review and which words are the strongest predictors for negative reviews? I'm not going to give you the answer to this one because it's the same question we'll ask on the next homework assignment. But if you figure this out you'll have a great head start! \n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\nNotebook Solutions\n\n<br><br><br>\nProblem 1: Logistic Regression for 2D Continuous Features\n\nIn the video lecture you saw some examples of using logistic regression to do binary classification on text data (SPAM vs HAM) and on 1D continuous data. In this problem we'll look at logistic regression for 2D continuous data. The data we'll use are <a href=\"https://www.math.umd.edu/~petersd/666/html/iris_with_labels.jpg\">sepal</a> measurements from the ubiquitous iris dataset. \n<!---\n<img style=\"float:left; width:450px\" src=\"https://upload.wikimedia.org/wikipedia/commons/9/9f/Iris_virginica.jpg\",width=300,height=50>\n-->\n\n<img style=\"float:left; width:450px\" src=\"http://www.twofrog.com/images/iris38a.jpg\",width=300,height=50>\n<!---\n<img style=\"float:right; width:490px\" src=\"https://upload.wikimedia.org/wikipedia/commons/4/41/Iris_versicolor_3.jpg\",width=300,height=50>\n-->\n<img style=\"float:right; width:490px\" src=\"http://blazingstargardens.com/wp-content/uploads/2016/02/Iris-versicolor-Blue-Flag-Iris1.jpg\",width=300,height=62>\nThe two features of our model will be the sepal length and sepal width. Execute the following cell to see a plot of the data. The blue points correspond to the sepal measurements of the Iris Setosa (left) and the red points correspond to the sepal measurements of the Iris Versicolour (right).", "import matplotlib.pyplot as plt\n%matplotlib inline \n\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nX_train = iris.data[iris.target != 2, :2] # first two features and\ny_train = iris.target[iris.target != 2] # first two labels only \n\nfig = plt.figure(figsize=(8,8))\nmycolors = {\"blue\": \"steelblue\", \"red\": \"#a76c6e\", \"green\": \"#6a9373\"}\nplt.scatter(X_train[:, 0], X_train[:, 1], s=100, alpha=0.9, c=[mycolors[\"red\"] if yi==1 else mycolors[\"blue\"] for yi in y_train])\nplt.xlabel('sepal length', fontsize=16)\nplt.ylabel('sepal width', fontsize=16);", "We'll train a logistic regression model of the form \n$$\np(y = 1 ~|~ {\\bf x}; {\\bf w}) = \\frac{1}{1 + \\textrm{exp}[-(w_0 + w_1x_1 + w_2x_2)]}\n$$\nusing sklearn's logistic regression classifier as follows", "from sklearn.linear_model import LogisticRegression # import from sklearn \nlogreg = LogisticRegression() # initialize classifier \nlogreg.fit(X_train, y_train); # train on training data ", "Q: Determine the parameters ${\\bf w}$ fit by the model. It might be helpful to consult the documentation for the classifier on the <a href=\"http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\">sklearn website</a>. Hint: The classifier stores the coefficients and bias term separately. \nA: The bias term is stored in logreg.intercept_ . The remaining coefficients are stored in logreg.coef_ . For this problem we have \n$$\nw_0 =-0.599, \\quad w_1 = 2.217, \\quad \\textrm{and} \\quad w_2 = -3.692\n$$\nQ: In general, what does the Logistic Regression decision boundary look like for data with two features? \nA: The decision boundary for Logistic Regresion for data with two features is a line. To see this, remember that the decision boundary is made up of $(x_1, x_2)$ points such that $\\textrm{sigm}({\\bf w}^T{\\bf x}) = 0.5$. We then have \n$$\n\\frac{1}{1 + \\textrm{exp}[-(w_0 + w_1x_1 + w_2x_2)]} = \\frac{1}{2} ~~\\Rightarrow ~~ w_0 + w_1x_1 + w_2x_2 = 0 ~~\\Rightarrow~~ x_2 = -\\frac{w_1}{w_2}x_1 - \\frac{w_0}{w_2}\n$$\nSo the decision boundary is a line with slope $-w_1/w_2$ and intercept $-w_0/w_2$. \nQ: Modify the code below to plot the decision boundary along with the data.", "import numpy as np\n\nfig = plt.figure(figsize=(8,8))\nplt.scatter(X_train[:, 0], X_train[:, 1], s=100, c=[mycolors[\"red\"] if yi==1 else mycolors[\"blue\"] for yi in y_train])\nplt.xlabel('Sepal length')\nplt.ylabel('Sepal width')\nx_min, x_max = np.min(X_train[:,0])-0.1, np.max(X_train[:,0])+0.1\ny_min, y_max = np.min(X_train[:,1])-0.1, np.max(X_train[:,1])+0.1\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\n\nx1 = np.linspace(x_min, x_max, 100)\nw0 = logreg.intercept_\nw1 = logreg.coef_[0][0]\nw2 = logreg.coef_[0][1]\nx2 = -(w0/w2) - (w1/w2)*x1 #TODO \nplt.plot(x1, x2, color=\"gray\");", "Problem 2: The Bag-of-Words Text Model\n\nThe remainder of today's exercise will consider the problem of predicting the semantics of text. In particular, later we'll look at predicting whether movie reviews are positive or negative just based on their text. \nBefore we can utilize text as features in a learning model, we need a concise mathematical way to represent things like words, phrases, sentences, etc. The most common text models are based on the so-called <a href=\"https://en.wikipedia.org/wiki/Vector_space_model\">Vector Space Model</a> (VSM) where individual words in a document are associated with entries of a vector: \n$$\n\\textrm{\"The sky is blue\"} \\quad \\Rightarrow \\quad \n\\left[\n\\begin{array}{c}\n0 \\\n1 \\ \n0 \\\n0 \\\n1\n\\end{array}\n\\right]\n$$\nThe first step in creating a VSM is to define a vocabulary, $V$, of words that you will include in your model. This vocabulary can be determined by looking at all (or most) of the words in the training set, or even by including a fixed vocabulary based on the english language. A vector representation of a document like a movie review is then a vector with length $|V|$ where each entry in the vector maps uniquely to a word in the vocabulary. A vector encoding of a document would then be a vector that is nonzero in positions corresponding to words present in the document and zero everywhere else. How you fill in the nonzero entries depends on the model you're using. Two simple conventions are the Bag-of-Words model and the binary model. \nIn the binary model we simply set an entry of the vector to $1$ if the associate word appears at least once in the document. In the more common Bag-of-Words model we set an entry of the vector equal to the frequency with which the word appears in the document. Let's see if we can come up with a simple implementation of the Bag-of-Words model in Python, and then later we'll see how sklearn can do the heavy lifting for us. \nConsider a training set containing three documents, specified as follows \n$\\texttt{Training Set}:$\n$\\texttt{d1}: \\texttt{new york times}$\n$\\texttt{d2}: \\texttt{new york post}$\n$\\texttt{d3}: \\texttt{los angeles times}$\nFirst we'll define the vocabulary based on the words in the test set. It is $V = { \\texttt{angeles}, \\texttt{los}, \\texttt{new}, \\texttt{post}, \\texttt{times}, \\texttt{york}}$. \nWe need to define an association between the particular words in the vocabulary and the specific entries in our vectors. Let's define this association in the order that we've listed them above. We can store this mapping as a Python dictionary as follows:", "V = {\"angeles\": 0, \"los\": 1, \"new\": 2, \"post\": 3, \"times\": 4, \"york\": 5}", "Let's also store the documents in a list as follows:", "D = [\"new york times\", \"new york post\", \"los angeles times\"]", "To be consistent with sklearn conventions, we'll encode the documents as row-vectors stored in a matrix. In this case, each row of the matrix corresponds to a document, and each column corresponds to a term in the vocabulary. For our example this gives us a matrix $M$ of shape $3 \\times 6$. The $(d,t)$-entry in $M$ is then the number of times the term $t$ appears in document $d$\nQ: Your first task is to write some simple Python code to construct the term-frequency matrix $M$", "M = np.zeros((len(D),len(V)))\n\nfor ii, doc in enumerate(D): \n for term in doc.split(): \n M[ii, V[term]] += 1\n \nprint(M)", "Hopefully your code returns the matrix \n$$M = \n\\left[\n\\begin{array}{ccccccc}\n0 & 0 & 1 & 0 & 1 & 1 \\\n0 & 0 & 1 & 1 & 0 & 1 \\\n1 & 1 & 0 & 0 & 1 & 0 \\\n\\end{array}\n\\right]$$. \nNote that the entry in the (2,0) position is $1$ because the first word (angeles) appears once in the third document. \nOK, let's see how we can construct the same term-frequency matrix in sklearn. We will use something called the <a href=\"http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\">CountVectorizer</a> to accomplish this. Let's see some code and then we'll explain how it functions.", "from sklearn.feature_extraction.text import CountVectorizer # import CountVectorizer \nvectorizer = CountVectorizer() # initialize the vectorizer\nX = vectorizer.fit_transform(D) # fit to training data and transform to matrix ", "The $\\texttt{fit_transform}$ method actually does two things. It fits the model to the training data by building a vocabulary. It then transforms the text in $D$ into matrix form. \nIf we wish to see the vocabulary you can do it like so", "print(vectorizer.vocabulary_)", "Note that this is the same vocabulary and indexing that we definfed ourselves. Hopefully that means we'll get the same term-frequency matrix. We can print $X$ and check", "print(X.todense())", "Yep, they're the same! Notice that we had to convert $X$ to a dense matrix for printing. This is because CountVectorizer actually returns a sparse matrix. This is a very good thing since most vectors in a text model will be extremely sparse, since most documents will only contain a handful of words from the vocabulary. \nOK, now suppose that we have a query document not included in the training set that we want to vectorize.", "d4 = [\"new york new tribune\"]", "We've already fit the CountVectorizer to the training set, so all we need to do is transform the test set documents into a term-frequency vector using the same conventions. Since we've already fit the model, we do the transformation with the $\\texttt{transform}$ method:", "x4 = vectorizer.transform(d4)", "Let's print it and see what it looks like", "print(x4.todense())", "Notice that the query document included the word $\\texttt{new}$ twice, which corresponds to the entry in the $(0,2)$-position. \nQ: What's missing from $x4$ that we might expect to see from the query document? \nA: The word $\\texttt{tribune}$ do not appear in vector $x4$ at all. This is because it did not occur in the training set, which means it is not present in the VSM vocabulary. This should not bother us too much. Most reasonable text data sets will have most of the important words present in the training set and thus in the vocabulary. On the other hand, the throw-away words that are present only in the test set are probably useless anyway, since the learning model is trained based on the text in the training set, and thus won't be able to do anything intelligent with words the model hasn't seen yet. \n<br>\nProblem 3: Term Frequency - Inverse Document Frequency\n\nThe Bag-of-Words model for text classification is very popular, but let's see if we can do better. Currently we're weighting every word in the corpus by it's frequency. It turns out that in text classification there are often features that are not particularly useful predictors for the document class, either because they are too common or too uncommon. Stop-words are extremely common, low-information words like \"a\", \"the\", \"as\", etc. Removing these from documents is typically the first thing done in peparing data for document classification. \nQ: Can you think of a situation where it might be useful to keep stop words in the corpus? \nA: If you plan to use bi-grams or tri-grams as features. Bi-grams are pairs of words that appear side-by-side in a document, e.g. \"he went\", \"went to\", \"to the\", \"the store\". \nOther words that tend to be uninformative predictors are words that appear very very rarely. In particular, if they do not appear frequently enough in the training data then it is difficult for a classification algorithm to weight them heavily in the classification process. \nIn general, the words that tend to be useful predictors are the words that appear frequently, but not too frequently. Consider the following frequency graph for a corpus. \n<img src=\"figs/feat_freq.png\",width=400,height=50>\nThe features in column A appear too frequently to be very useful, and the features in column C appear too rarely. One first-pass method of feature selection in text classification would be to discard the words from columns A and C, and build a classifier with only features from column B.\nAnother common model for identifying the useful terms in a document is the Term Frequency - Inverse Document Frequency (tf-idf) model. Here we won't throw away any terms, but we'll replace their Bag-of-Words frequency counts with tf-idf scores which we describe below. \nThe tf-idf score is the product of two statistics, term frequency and inverse document frequency\n$$\\texttt{tfidf(d,t)} = \\texttt{tf(d,t)} \\times \\texttt{idf(t)}$$\nThe term frequency $\\texttt{tf(d,t)}$ is a measure of the frequency with which term $t$ appears in document $d$. The inverse document frequency $\\texttt{idf(t)}$ is a measure of how much information the word provides, that is, whether the term is common or rare across all documents. By multiplying the two quantities together, we obtain a representation of term $t$ in document $d$ that weighs how common the term is in the document with how common the word is in the entire corpus. You can imagine that the words that get the highest associated values are terms that appear many times in a small number of documents. \nThere are many ways to compute the composite terms $\\texttt{tf}$ and $\\texttt{idf}$. For simplicity, we'll define $\\texttt{tf(d,t)}$ to be the number of times term $t$ appears in document $d$ (i.e., Bag-of-Words). We will define the inverse document frequency as follows: \n$$\n\\texttt{idf(t)} = \\ln ~ \\frac{\\textrm{total # documents}}{\\textrm{# documents with term }t}\n = \\ln ~ \\frac{|D|}{|d: ~ t \\in d |}\n$$\nNote that we could have a potential problem if a term comes up that is not in any of the training documents, resulting in a divide by zero. This might happen if you use a canned vocabulary instead of constructing one from the training documents. To guard against this, many implementations will use add-one smoothing in the denominator (this is what sklearn does). \n$$\n\\texttt{idf(t)} = \\ln ~ \\frac{\\textrm{total # documents}}{\\textrm{1 + # documents with term }t}\n = \\ln ~ \\frac{|D|}{1 + |d: ~ t \\in d |}\n$$\nQ: Compute $\\texttt{idf(t)}$ (without smoothing) for each of the terms in the training documents from the previous problem\nA: \n$\n\\texttt{idf}(\\texttt{angeles}) = \\ln ~ \\frac{3}{1} = \\ln ~ \\frac{3}{1} = 1.10\n$\n$\n\\texttt{idf}(\\texttt{los}) = \\ln ~ \\frac{3}{1} = \\ln ~ \\frac{3}{1} = 1.10\n$\n$\n\\texttt{idf}(\\texttt{new}) = \\ln ~ \\frac{3}{2} = \\ln ~ \\frac{3}{2} = 0.41\n$\n$\n\\texttt{idf}(\\texttt{post}) = \\ln ~ \\frac{3}{1} = \\ln ~ \\frac{3}{1} = 1.10\n$\n$\n\\texttt{idf}(\\texttt{times}) = \\ln ~ \\frac{3}{2} = \\ln ~ \\frac{3}{2} = 0.41\n$\n$\n\\texttt{idf}(\\texttt{york}) = \\ln ~ \\frac{3}{2} = \\ln ~ \\frac{3}{2} = 0.41\n$\nQ: Compute the td-ifd matrix for the training set \nA: There are several ways to do this. One way would be to multiply the term-frequency matrix on the right with a diagonal matrix with the idf-values on the main diagonal", "idf = np.array([np.log(3), np.log(3), np.log(3./2), np.log(3), np.log(3./2), np.log(3./2)])\nXtfidf = np.dot(X.todense(), np.diag(idf))\n\nprint(Xtfidf)", "Hopefully you got something like the following: \n$$\nX_{tfidf} = \n\\left[\n\\begin{array}{ccccccccc}\n0. & 0. & 0.40546511 & 0. & 0.40546511 & 0.40546511 \\\n0. & 0. & 0.40546511 & 1.09861229 & 0. & 0.40546511 \\\n1.09861229 & 1.09861229 & 0. & 0. & 0.40546511 & 0. \n\\end{array}\n\\right]\n$$\nThe final step in any VSM method is the normalization of the vectors. This is done so that very long documents to not completely overpower the small and medium length documents.", "row_norms = np.array([np.linalg.norm(row) for row in Xtfidf])\nX_tfidf_n = np.dot(np.diag(1./row_norms), Xtfidf)\n\nprint(X_tfidf_n)", "Let's see what we get when we use sklearn. Sklearn has a vectorizer called TfidfVectorizer which is similar to CountVectorizer, but it computes tf-idf scores.", "from sklearn.feature_extraction.text import TfidfVectorizer\ntfidf = TfidfVectorizer()\nY = tfidf.fit_transform(D)\nprint(Y.todense())", "Note that these are not quite the same, becuase sklearn's implementation of tf-idf uses the add-one smoothing in the denominator for idf. \n<br>\nProblem 4: Classifying Semantics in Movie Reviews\n\n\nThe data for this problem was taken from the <a href=\"https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-1-for-beginners-bag-of-words\">Bag of Words Meets Bag of Popcorn</a> Kaggle competition\n\nIn this problem you will use the text from movie reviews to predict whether the reviewer felt positively or negatively about the movie using Bag-of-Words and tf-idf. I've partially cleaned the data and stored it in files called $\\texttt{labeledTrainData.tsv}$ and $\\texttt{labeledTestData.tsv}$ in the data directory.", "import csv \n\ndef read_and_clean_data(fname, remove_stops=True):\n \n with open('data/stopwords.txt', 'r') as f:\n stops = [line.rstrip('\\n') for line in f]\n \n with open(fname,'r') as tsvin:\n reader = csv.reader(tsvin, delimiter='\\t')\n labels = []; text = [] \n for ii, row in enumerate(reader):\n labels.append(int(row[0]))\n words = row[1].lower().split()\n words = [w for w in words if not w in stops] if remove_stops else words \n text.append(\" \".join(words))\n \n return text, labels\n\ntext_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=False)\ntext_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=False)", "The current parameters are set to not remove stop words from the text so that it's a bit easier to explore. \nQ: Look at a few of the reviews stored in $\\texttt{text_train}$ as well as their associated labels in $\\texttt{labels_train}$. Can you figure out which label refers to a positive review and which refers to a negative review? \nA:", "labels_train[:4]", "The first review is labeled $1$ and has the following text:", "text_train[0]", "The fourth review is labeled $0$ and has the following text:", "text_train[3]", "Hopefully it's obvious that label 1 corresponds to positive reviews and label 0 to negative reviews! \nOK, the first thing we'll do is train a logistic regression classifier using the Bag-of-Words model, and see what kind of accuracy we can get. To get started, we need to vectorize the text into mathematical features that we can use. We'll use CountVectorizer to do the job. (Before starting, I'm going to reload the data and remove the stop words this time)", "text_train, labels_train = read_and_clean_data('data/labeledTrainData.tsv', remove_stops=True)\ntext_test, labels_test = read_and_clean_data('data/labeledTestData.tsv', remove_stops=True)\n\ncvec = CountVectorizer()\nX_bw_train = cvec.fit_transform(text_train)\ny_train = np.array(labels_train)\nX_bw_test = cvec.transform(text_test)\ny_test = np.array(labels_test)", "Q: How many different words are in the vocabulary?", "X_bw_train.shape", "A: It looks like around 17,800 distinct words \nOK, now we'll train a logistic regression classifier on the training set, and test the accuracy on the test set. To do this we'll need to load some kind of accuracy metric from sklearn.", "from sklearn.metrics import accuracy_score\nbwLR = LogisticRegression()\nbwLR.fit(X_bw_train, y_train)\npred_bwLR = bwLR.predict(X_bw_test)\n\nprint(\"Logistic Regression accuracy with Bag-of-Words: \", accuracy_score(y_test, pred_bwLR))", "OK, so we got an accuracy of around 81% using Bag-of-Words. Now lets do the same tests but this time with tf-idf features.", "tvec = TfidfVectorizer()\nX_tf_train = tvec.fit_transform(text_train)\nX_tf_test = tvec.transform(text_test)\n\ntfLR = LogisticRegression()\ntfLR.fit(X_tf_train, y_train)\npred_tfLR = tfLR.predict(X_tf_test)\n\nprint(\"Logistic Regression accuracy with tf-idf: \", accuracy_score(y_test, pred_tfLR))", "WOOHOO! With tf-idf features we got around 85% accuracy, which is a 4% improvement. (If you're scoffing at this, wait until you get some more experience working with real-world data. 4% improvement is pretty awesome). \nQ: Which words are the strongest predictors for a positive review and which words are the strongest predictors for negative reviews? I'm not going to give you the answer to this one because it's the same question we'll ask on the next homework assignment. But if you figure this out you'll have a great head start! \n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>\n<br><br><br>", "from IPython.core.display import HTML\nHTML(\"\"\"\n<style>\n.MathJax nobr>span.math>span{border-left-width:0 !important};\n</style>\n\"\"\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mohanprasath/Course-Work
numpy/numpy_exercises_from_kyubyong/Discrete_Fourier_Transform.ipynb
gpl-3.0
[ "from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom datetime import date\ndate.today()\n\nauthor = \"kyubyong. https://github.com/Kyubyong/numpy_exercises\"\n\nnp.__version__", "Complex Numbers\nQ1. Return the angle of a in radian.", "a = 1+1j\noutput = ...\nprint(output)", "Q2. Return the real part and imaginary part of a.", "a = np.array([1+2j, 3+4j, 5+6j])\nreal = ...\nimag = ...\nprint(\"real part=\", real)\nprint(\"imaginary part=\", imag)", "Q3. Replace the real part of a with 9, the imaginary part with [5, 7, 9].", "a = np.array([1+2j, 3+4j, 5+6j])\n...\n...\nprint(a)", "Q4. Return the complex conjugate of a.", "a = 1+2j\noutput = ...\nprint(output)", "Discrete Fourier Transform\nQ5. Compuete the one-dimensional DFT of a.", "a = np.exp(2j * np.pi * np.arange(8))\noutput = ...\nprint(output)\n", "Q6. Compute the one-dimensional inverse DFT of the output in the above question.", "print(\"a=\", a)\ninversed = ...\nprint(\"inversed=\", a)", "Q7. Compute the one-dimensional discrete Fourier Transform for real input a.", "a = [0, 1, 0, 0]\noutput = ...\nprint(output)\nassert output.size==len(a)//2+1 if len(a)%2==0 else (len(a)+1)//2\n\n# cf.\noutput2 = np.fft.fft(a)\nprint(output2)", "Q8. Compute the one-dimensional inverse DFT of the output in the above question.", "inversed = ...\nprint(\"inversed=\", a)", "Q9. Return the DFT sample frequencies of a.", "signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=np.float32)\nfourier = np.fft.fft(signal)\nn = signal.size\nfreq = ...\nprint(freq)", "Window Functions", "fig = plt.figure(figsize=(19, 10))\n\n# Hamming window\nwindow = np.hamming(51)\nplt.plot(np.bartlett(51), label=\"Bartlett window\")\nplt.plot(np.blackman(51), label=\"Blackman window\")\nplt.plot(np.hamming(51), label=\"Hamming window\")\nplt.plot(np.hanning(51), label=\"Hanning window\")\nplt.plot(np.kaiser(51, 14), label=\"Kaiser window\")\nplt.xlabel(\"sample\")\nplt.ylabel(\"amplitude\")\nplt.legend()\nplt.grid()\n\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
aje/POT
notebooks/plot_otda_classes.ipynb
mit
[ "%matplotlib inline", "OT for domain adaptation\nThis example introduces a domain adaptation in a 2D setting and the 4 OTDA\napproaches currently supported in POT.", "# Authors: Remi Flamary <remi.flamary@unice.fr>\n# Stanislas Chambon <stan.chambon@gmail.com>\n#\n# License: MIT License\n\nimport matplotlib.pylab as pl\nimport ot", "Generate data", "n_source_samples = 150\nn_target_samples = 150\n\nXs, ys = ot.datasets.get_data_classif('3gauss', n_source_samples)\nXt, yt = ot.datasets.get_data_classif('3gauss2', n_target_samples)", "Instantiate the different transport algorithms and fit them", "# EMD Transport\not_emd = ot.da.EMDTransport()\not_emd.fit(Xs=Xs, Xt=Xt)\n\n# Sinkhorn Transport\not_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)\not_sinkhorn.fit(Xs=Xs, Xt=Xt)\n\n# Sinkhorn Transport with Group lasso regularization\not_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0)\not_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt)\n\n# Sinkhorn Transport with Group lasso regularization l1l2\not_l1l2 = ot.da.SinkhornL1l2Transport(reg_e=1e-1, reg_cl=2e0, max_iter=20,\n verbose=True)\not_l1l2.fit(Xs=Xs, ys=ys, Xt=Xt)\n\n# transport source samples onto target samples\ntransp_Xs_emd = ot_emd.transform(Xs=Xs)\ntransp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs)\ntransp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs)\ntransp_Xs_l1l2 = ot_l1l2.transform(Xs=Xs)", "Fig 1 : plots source and target samples", "pl.figure(1, figsize=(10, 5))\npl.subplot(1, 2, 1)\npl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')\npl.xticks([])\npl.yticks([])\npl.legend(loc=0)\npl.title('Source samples')\n\npl.subplot(1, 2, 2)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')\npl.xticks([])\npl.yticks([])\npl.legend(loc=0)\npl.title('Target samples')\npl.tight_layout()", "Fig 2 : plot optimal couplings and transported samples", "param_img = {'interpolation': 'nearest', 'cmap': 'spectral'}\n\npl.figure(2, figsize=(15, 8))\npl.subplot(2, 4, 1)\npl.imshow(ot_emd.coupling_, **param_img)\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nEMDTransport')\n\npl.subplot(2, 4, 2)\npl.imshow(ot_sinkhorn.coupling_, **param_img)\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nSinkhornTransport')\n\npl.subplot(2, 4, 3)\npl.imshow(ot_lpl1.coupling_, **param_img)\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nSinkhornLpl1Transport')\n\npl.subplot(2, 4, 4)\npl.imshow(ot_l1l2.coupling_, **param_img)\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nSinkhornL1l2Transport')\n\npl.subplot(2, 4, 5)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.3)\npl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.xticks([])\npl.yticks([])\npl.title('Transported samples\\nEmdTransport')\npl.legend(loc=\"lower left\")\n\npl.subplot(2, 4, 6)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.3)\npl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.xticks([])\npl.yticks([])\npl.title('Transported samples\\nSinkhornTransport')\n\npl.subplot(2, 4, 7)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.3)\npl.scatter(transp_Xs_lpl1[:, 0], transp_Xs_lpl1[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.xticks([])\npl.yticks([])\npl.title('Transported samples\\nSinkhornLpl1Transport')\n\npl.subplot(2, 4, 8)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.3)\npl.scatter(transp_Xs_l1l2[:, 0], transp_Xs_l1l2[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.xticks([])\npl.yticks([])\npl.title('Transported samples\\nSinkhornL1l2Transport')\npl.tight_layout()\n\npl.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
pmorissette/bt
examples/Strategy_Combination.ipynb
mit
[ "This notebook creates a parent strategy(combined) with 2 child strategies(Equal Weight, Inv Vol).\nAlternatively, it creates the 2 child strategies, runs the backtest, combines the results, and creates a parent strategy using both of the backtests.", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport ffn\nimport bt ", "Create fake data", "rf = 0.04\nnp.random.seed(1)\nmus = np.random.normal(loc=0.05,scale=0.02,size=5) + rf\nsigmas = (mus - rf)/0.3 + np.random.normal(loc=0.,scale=0.01,size=5)\n\nnum_years = 10\nnum_months_per_year = 12\nnum_days_per_month = 21\nnum_days_per_year = num_months_per_year*num_days_per_month\n\nrdf = pd.DataFrame(\n index = pd.date_range(\n start=\"2008-01-02\",\n periods=num_years*num_months_per_year*num_days_per_month,\n freq=\"B\"\n ),\n columns=['foo','bar','baz','fake1','fake2']\n)\n\nfor i,mu in enumerate(mus):\n sigma = sigmas[i]\n rdf.iloc[:,i] = np.random.normal(\n loc=mu/num_days_per_year,\n scale=sigma/np.sqrt(num_days_per_year),\n size=rdf.shape[0]\n )\npdf = np.cumprod(1+rdf)*100\npdf.iloc[0,:] = 100\n\npdf.plot()\n\nstrategy_names = np.array(\n [\n 'Equal Weight',\n 'Inv Vol'\n ]\n)\n\nrunMonthlyAlgo = bt.algos.RunMonthly(\n run_on_first_date=True,\n run_on_end_of_period=True\n)\nselectAllAlgo = bt.algos.SelectAll()\nrebalanceAlgo = bt.algos.Rebalance()\n\nstrats = []\ntests = []\n\nfor i,s in enumerate(strategy_names):\n if s == \"Equal Weight\":\n wAlgo = bt.algos.WeighEqually()\n elif s == \"Inv Vol\":\n wAlgo = bt.algos.WeighInvVol()\n \n strat = bt.Strategy(\n s,\n [\n runMonthlyAlgo,\n selectAllAlgo,\n wAlgo,\n rebalanceAlgo\n ]\n )\n strats.append(strat)\n \n t = bt.Backtest(\n strat,\n pdf,\n integer_positions = False,\n progress_bar=False\n )\n tests.append(t)\n\n\ncombined_strategy = bt.Strategy(\n 'Combined',\n algos = [\n runMonthlyAlgo,\n selectAllAlgo,\n bt.algos.WeighEqually(),\n rebalanceAlgo\n ],\n children = [x.strategy for x in tests]\n)\n\ncombined_test = bt.Backtest(\n combined_strategy,\n pdf,\n integer_positions = False,\n progress_bar = False\n)\n\nres = bt.run(combined_test)\n\nres.prices.plot()\n\nres.get_security_weights().plot()", "In order to get the weights of each strategy, you can run each strategy, get the prices for each strategy, combine them into one price dataframe, run the combined strategy on the new data set.", "strategy_names = np.array(\n [\n 'Equal Weight',\n 'Inv Vol'\n ]\n)\n\nrunMonthlyAlgo = bt.algos.RunMonthly(\n run_on_first_date=True,\n run_on_end_of_period=True\n)\nselectAllAlgo = bt.algos.SelectAll()\nrebalanceAlgo = bt.algos.Rebalance()\n\nstrats = []\ntests = []\nresults = []\n\nfor i,s in enumerate(strategy_names):\n if s == \"Equal Weight\":\n wAlgo = bt.algos.WeighEqually()\n elif s == \"Inv Vol\":\n wAlgo = bt.algos.WeighInvVol()\n \n strat = bt.Strategy(\n s,\n [\n runMonthlyAlgo,\n selectAllAlgo,\n wAlgo,\n rebalanceAlgo\n ]\n )\n strats.append(strat)\n \n t = bt.Backtest(\n strat,\n pdf,\n integer_positions = False,\n progress_bar=False\n )\n tests.append(t)\n \n res = bt.run(t)\n results.append(res)\n\n\nfig, ax = plt.subplots(nrows=1,ncols=1)\nfor i,r in enumerate(results):\n r.plot(ax=ax)\n\nmerged_prices_df = bt.merge(results[0].prices,results[1].prices)\n\ncombined_strategy = bt.Strategy(\n 'Combined',\n algos = [\n runMonthlyAlgo,\n selectAllAlgo,\n bt.algos.WeighEqually(),\n rebalanceAlgo\n ]\n)\n\ncombined_test = bt.Backtest(\n combined_strategy,\n merged_prices_df,\n integer_positions = False,\n progress_bar = False\n)\n\nres = bt.run(combined_test)\n\nres.plot()\n\nres.get_security_weights().plot()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
WomensCodingCircle/CodingCirclePython
Lesson08_Dictionaries/Dictionary.ipynb
mit
[ "Dictionaries\nA dictionary is datatype that contains a series of key-value pairs. It is similar to a list except for that the indices of the values can be strings, tuples, etc. not just integers. It is also different in that it is unordered. You cannot expect to get the keys in the same order out as you put them in.\nTo create a dictionary:\nmy_dict = { key1: value1, key2: value2 }\n\nCreating an empty dictionary\nmy_dict = {} \nmy_dict = dict()", "fruit_season = {\n 'raspberry': 'May',\n 'apple' : 'September',\n 'peach' : 'July',\n 'grape' : 'August'\n} \n\nprint(type(fruit_season))\nprint(fruit_season)", "To access a value, you index into it similarly to a list using square brackets.\nvalue_of_key1 = my_dict['key1']", "raspberry_season = fruit_season['raspberry']\nprint(raspberry_season)", "Trying to access a key not in the dictionary throws an error", "print(fruit_season['mangos'])", "To add an item to the dictionary set the value equal to the indexed keys\ndict['new_key'] = value", "fruit_season['strawberry'] = 'May'\nprint(fruit_season)", "To delete a key, use the del keyword\ndel dict['key to delete']", "del fruit_season['strawberry']\nprint(fruit_season)", "Rules on keys\nKeys in dictionary must be unique. If you try to make a duplicate key, the data will be overwritten\nKeys must be hashable. What this means is they must come from immutable values and be comparable. You can use strings, numbers, tuples, sets, (most) objects. You cannot use lists or dictionaries as keys.", "duplicate_fruit_season = {\n 'raspberry': 'May',\n 'raspberry': 'June',\n} \nprint(duplicate_fruit_season)\n\nmutable_key = {\n ['watermelon', 'cantaloupe', 'honeydew']: 'July'\n}\n\n# The solution is to use a tuple instead\nimmutable_key = {\n ('watermelon', 'cantelope', 'honeydew'): 'July'\n}", "TRY IT\nCreate a dictionary called vegetable_season with Eggplant-> July and Onion -> May\nDictionary Operators\nThe in operator returns a boolean for whether the key is in the dictionary or not.\nkey in dictionary", "print('raspberry' in fruit_season)\nprint('mangos' in fruit_season)", "You can use this in if statement", "if 'pineapple' in fruit_season:\n print('Lets eat tropical fruit')\nelse:\n print(\"Temperate fruit it is.\")", "TRY IT\nCheck if 'broccoli' is in vegetable_season. If so, print 'Yum, little trees!'\nDictionaries and Loops\nYou can use a for in loop to loop through dictionaries\nfor key in dictionary:\n print key", "for fruit in fruit_season:\n print(\"{0} is best in {1} (at least in Virginia)\".format(fruit.title(), fruit_season[fruit]))", "Dictionary Methods\nYou can use the keys, values, or items methods to return lists of keys, values, or key-value tuples respectively.\nYou can then use these for sorting or for looping", "print(list(fruit_season.keys()))\nprint(list(fruit_season.values()))\nprint(list(fruit_season.items()))\n\nfor key, value in list(fruit_season.items()):\n print(\"In {0} eat a {1}\".format(value, key))\n\nprint(sorted(fruit_season.keys()))", "TRY IT\nLoop through the sorted keys of the vegetable_season dictionary. For each key, print the month it is in season\nMore complex dictionaries\nDictionary keys and values can be almost anything. The keys must be hashable which means it cannot change. That means that lists and dictionaries cannot be keys (but strings, tuples, and integers can).\nValues can be just about anything, though.", "my_complicated_dictionary = {\n (1, 2, 3): 6,\n 'weevil': {\n 'e': 2,\n 'i': 1,\n 'l': 1,\n 'v': 1,\n 'w': 1,\n },\n 9: [3, 3]\n}\nprint(my_complicated_dictionary)", "Let's use this to create a more realistic fruit season dictionary", "true_fruit_season = {\n 'raspberry': ['May', 'June'],\n 'apple': ['September', 'October', 'November', 'December'],\n 'peach': ['July', 'August'],\n 'grape': ['August', 'September', 'October']\n} \n\nprint(true_fruit_season)\n\nmonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\n\nfor month in months:\n print(('It is {0}'.format(month)))\n for fruit, season in list(true_fruit_season.items()):\n if month in season:\n print((\"\\tEat {0}\".format(fruit)))", "TRY IT\nAdd a key to the true_fruit_season for 'watermelons' the season is July, August, and September\nProject: Acrostic\nCreate an acrostic poem generator.\nYou will create a function that takes a name and generates an acrostic poem\n\nCreate a dictionary that has each of the capital letters as keys and an adjective that start with the letter as the value and store in variable named adjectives. (Reference: http://www.enchantedlearning.com/wordlist/adjectives.shtml)\nCreate a function called acrostic that takes one parameter name.\nIn the acrostic function capitalize the name (use the upper method)\nFor each letter in the name\nGet the adjective corresponding to that letter and store in a variable called current_adj\nPrint out Letter-current_adj\n\n Challenge instead of just one adjective have each letter's value be a list of adjectives. Use the random module to select a random adjective instead of always selecting the same one.\nBonus Material\nAuto generating the dictionary for the acrostic:", "# If you have a list of adjectives\nmy_dict = {}\n\n# Imaging this is the full alphabet\nfor i in ['A', 'B', 'C']:\n my_dict[i] = []\n \n \nfor i in ['Adoreable', 'Acceptable', 'Bad', 'Cute', 'Basic', 'Dumb']:\n first_char = i[0]\n if first_char in my_dict:\n my_dict[first_char].append(i)\nprint(my_dict)\n\n# Generating from a file\nmy_dict = {}\n\nfor i in ['A', 'B', 'C']:\n my_dict[i] = []\n \n# adjectives.txt has one adjective per line\nwith open('adjectives.txt') as fh:\n for line in fh:\n word = line.rstrip().title()\n first_char = word[0]\n if first_char in my_dict:\n my_dict[first_char].append(word)\n \nprint(my_dict['A'])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
qutip/qutip-notebooks
docs/guide/Eseries.ipynb
lgpl-3.0
[ "Eseries Class\nContents\n\nExponential-Series Representation of Quantum Objects\nApplications of Exponential-Series", "%matplotlib inline\nimport numpy as np\nfrom pylab import *\nfrom qutip import *", "<a id='exponential'></a>\nExponential-Series Representation of Time-Dependent Quantum Objects\nThe eseries object in QuTiP is a representation of an exponential-series expansion of time-dependent quantum objects (a concept borrowed from the quantum optics toolbox). \nAn exponential series is parameterized by its amplitude coefficients $c_i$ and rates $r_i$, so that the series takes the form \n$E(t) = \\sum_i c_i e^{r_i t}$. The coefficients are typically quantum objects (i.e. states, operators, etc.), so that the value of the eseries also is a quantum object, and the rates can be either real or complex numbers (describing decay rates and oscillation frequencies, respectively). Note that all amplitude coefficients in an exponential series must be of the same dimensions and composition. \nIn QuTiP, an exponential series object is constructed by creating an instance of the class eseries:", "es1 = eseries(sigmax(), 1j)", "where the first argument is the amplitude coefficient (here, the sigma-X operator), and the second argument is the rate. The eseries in this example represents the time-dependent operator $\\sigma_x e^{i t}$. To add more terms to an eseries object we simply add objects using the + operator:", "omega = 1.0\nes2 = (eseries(0.5 * sigmax(), 1j * omega) + eseries(0.5 * sigmax(), -1j * omega))", "The eseries in this example represents the operator $0.5 \\sigma_x e^{i\\omega t} + 0.5 \\sigma_x e^{-i\\omega t}$, which is the exponential series representation of $\\sigma_x \\cos(\\omega t)$. Alternatively, we can also specify a list of amplitudes and rates when the eseries is created:", "es2 = eseries([0.5 * sigmax(), 0.5 * sigmax()], [1j * omega, -1j * omega])", "We can inspect the structure of an eseries object by printing it to the standard output console:", "es2", "and we can evaluate it at time $t$ by using the esval function or the value method:", " esval(es2, 0.0) # equivalent to es2.value(0.0)\n\nes2.value(0)", "or for a list of times [0.0, 1.0 * pi, 2.0 * pi]:", "times = [0.0, 1.0 * np.pi, 2.0 * np.pi]\nesval(es2, times)\n\nes2.value(times)", "To calculate the expectation value of an time-dependent operator represented by an eseries, we use the expect function. For example, consider the operator $\\sigma_x \\cos(\\omega t) + \\sigma_z\\sin(\\omega t)$, and say we would like to know the expectation value of this operator for a spin in its excited state (rho = fock_dm(2,1) produce this state):", "es3 = (eseries([0.5*sigmaz(), 0.5*sigmaz()], [1j, -1j]) + \n eseries([-0.5j*sigmax(), 0.5j*sigmax()], [1j, -1j]))\n\nrho = fock_dm(2, 1)\nes3_expect = expect(rho, es3)\nes3_expect", "Note the expectation value of the eseries object, expect(rho, es3), itself is an eseries, but with amplitude coefficients that are c-numbers instead of quantum operators. To evaluate the c-number eseries at the times times we use es3_expect.value(times) or equivalently esval(es3_expect, times).", "es3_expect.value([0.0, pi/2])", "<a id='applications'></a>\nApplications of Exponential Series\nThe exponential series formalism can be useful for the time-evolution of quantum systems. One approach to calculating the time evolution of a quantum system is to diagonalize its Hamiltonian (or Liouvillian, for dissipative systems) and to express the propagator (e.g., $\\exp(-iHt) \\rho \\exp(iHt)$) as an exponential series. \nThe QuTiP function ode2es and essolve use this method to evolve quantum systems in time. The exponential series approach is particularly suitable for cases when the same system is to be evolved for many different initial states, since the diagonalization only needs to be performed once (as opposed to e.g. the ode solver that would need to be ran independently for each initial state).\nAs an example, consider a spin-1/2 with a Hamiltonian pointing in the $\\sigma_z$ direction, and that is subject to noise causing relaxation. For a spin originally is in the up state, we can create an eseries object describing its dynamics by using the es2ode function:", "psi0 = basis(2,1)\nH = sigmaz()\nL = liouvillian(H, [sqrt(1.0) * destroy(2)])\nes = ode2es(L, psi0)", "The ode2es function diagonalizes the Liouvillian $L$ and creates an exponential series with the correct eigenfrequencies and amplitudes for the initial state \n$\\psi_0$ (psi0).\nWe can examine the resulting eseries object by printing a text representation:", "es", "or by evaluating it and arbitrary points in time (here at 0.0 and 1.0):", "es.value([0.0, 1.0])", "and the expectation value of the exponential series can be calculated using the expect function:", " es_expect = expect(sigmaz(), es)", "The result es_expect is now an exponential series with c-numbers as amplitudes, which easily can be evaluated at arbitrary times:", "times = linspace(0.0, 10.0, 100)\nsz_expect = es_expect.value(times)\nplot(times, sz_expect, lw=2)\nxlabel(\"Time\", fontsize=14)\nylabel(\"Expectation value of sigma-z\", fontsize=14)\nshow()\n\nfrom IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../styles/guide.css\", \"r\").read()\n return HTML(styles)\ncss_styling()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Michaelt293/ANZSMS-Programming-workshop
Python_tutorial.ipynb
cc0-1.0
[ "ANZSMS 2015 Python Programming Workshop\nPrint Function\nThe print function prints output to the command-line interface (terminal in OS X/Linux, powershell in Windows). In the following example, we print the text string \"Hello, world!\"", "print(\"Hello, world!\")", "Variables, numbers and math basic operations\nTake the following statement, x = 33. In this statement, x is a variable with a value of 33 (integer literal). As its name suggests, variables can be reassigned when executing a program.\nSimple data types in Python include: integers, floating point numbers, strings and Boolean (True/False) values. In the following examples, the variables Ag and Au are assigned to integer values. The variables are then reassigned to floating point numbers and some simple math operations demonstrated.\nAu and Ag variables assigned to integer values\nIntegers are whole numbers, i.e., 1, 0, -20 etc.", "Ag = 107\n\nAu = 197\n\ntype(Ag)\n\ntype(Au)", "Au and Ag variables reassigned to floating point values\nFloats use a decimal point or exponential notation. Note that values such as 20.0, 1.0, 0.0 etc. are floats, not integers. Also, floating point values may not be a true representation of the number since computer use a binary (base-2) number system. This leads to floating point errors when working with floating point values.", "Ag = 106.9\n\nAu = 197.0\n\ntype(Ag)\n\ntype(Au)", "Math operations\nIn Python 3, math operations behave as you would expect. In Python 2, division is the same as floor division when working with integers!", "Au + Ag # addition (Note: You can add comments to your code by using the # symbol)\n\nAu - Ag # subtraction\n\nAu * 5 # multiplication\n\nAg ** 2 # exponential - mass of silver squared in this case\n\nAu / Ag # division\n\nAu // Ag # floor division", "Type conversion\nWe can convert a float to an integer or an integer to a float. Conversions of string representations of numbers to actual numbers (integers and floats) is also common in Python programming.", "integer_Ag = int(Ag)\n\ninteger_Ag\n\ntype(integer_Ag)", "The modulo operator (%)\nThe modulo (%) operator gives the remainder from division. This is commonly used to check whether a number is odd or even. In mass spectrometry, we could use this to test whether an ion has an odd number of nitrogen atoms. For example, \nmz = 114; if mz % 2 == 0:; print(\"Ion has an odd number of nitrogens\")", "modulo = integer_Ag % 2\n\nif modulo == 0:\n print(\"Ion is even\")", "Strings and indexing\nIn the following example, we have a file name represented as a string. From this string, we can use to indexing to select certain characters or substrings from the file name string.\nStrings\nString are simply strings of characters (letters, numbers, symbols etc.). Strings are indicated using either single quotes ('This is a string') or double quotes (\"This is another string\"). Multiline strings can be made using triple quotes (i.e. \"\"\"Multiline string\"\"\").", "MS2_spectrum = \"Liver_MS2_406.raw\"\n\nMS2_spectrum", "Indexing\nSingle characters are indexed using square brackets after the variable name. In Python, the first character is 0. Characters may also be index from the end of the string (starting at -1).", "MS2_spectrum[0]\n\nMS2_spectrum[-1]", "Substrings can be indexed using string[start:end]", "sample = MS2_spectrum[0:5]\n\nsample # Note: The character at position 5 is not included in the substring.\n\nion = MS2_spectrum[10:13] \n\nion\n\nfile_format = MS2_spectrum[-3:]\n\nfile_format", "Type conversion - string to float", "type(ion)\n\nfloat(ion)", "Collection data types\nLists\nLists are mutable (i.e., modifiable), ordered collections of items. Lists are created by enclosing a collection of items with square brackets. An empty list may also be created simply by assigning [] to a variable, i.e., empty_list = [].", "MS_files = [\"MS_spectrum\", \"MS2_405\", \"MS2_471\", \"MS2_495\"]\n\nMS_files", "Indexing in lists is the same as for strings", "MS_files[2]", "Several list 'methods' exist for manipulating lists", "MS_files.remove(\"MS2_405\")\n\nMS_files\n\nMS_files.append(\"MS3_225\")\n\nMS_files", "Tuples\nTuples are immutable (i.e., can't modified after their creation), ordered collections of items and are the simplist collection data type. Tuples are created by enclosing a collection of items by parentheses).", "Fe_isotopes = (53.9, 55.9, 56.9, 57.9)\n\nFe_isotopes", "Indexing", "Fe_isotopes[0]", "Dictionaries\nDictionaries are mutable, unordered collections of key: value pairs. Dictionaries are created created by enclosing key: value pairs with curly brackets. Importantly, keys must be hashable. This means, for example, that lists can't be used as keys since the items inside a list may be modified.", "carbon_isotopes = {\"12\": 0.9893, \"13\": 0.0107}", "Fetching the value for a certain key", "carbon_isotopes[\"12\"]", "Dictionary methods", "carbon_isotopes.keys()\n\ncarbon_isotopes.values()\n\ncarbon_isotopes.items()", "Sets\nSets are another data type which are like an unordered list with no dublicates. They are especially useful for finding all the unique items from a list as shown below.", "phospholipids = [\"PA(16:0/18:1)\", \"PA(16:0/18:2)\", \"PC(14:0/16:0)\", \"PC(16:0/16:1)\", \"PC(16:1/16:2)\"]\n# Lets assume we apply a function that finds the type of phospholipid name to \nphospholipid_fatty_acids = [\"16:0\", \"18:1\", \"16:0\", \"18:2\", \"14:0\", \"16:0\", \"16:0\", \"16:1\", \"16:1\", \"16:2\"]\n\nunique_fatty_acids = set(phospholipid_fatty_acids)\n\nunique_fatty_acids \n\nnum_unique_fa = len(unique_fatty_acids)\n\nnum_unique_fa", "Boolean operators\nBoolean operators asses the truth or falseness of a statement.", "Ag > Au\n\nAg < Au\n\nAg == 106.9\n\nAu >= 100\n\nAg <= Au and Ag > 200\n\nAg <= Au or Ag > 200", "Conditional statements\nCode is only executed if the conditional statement is evaluated as True. In the following example, Ag has a value of greater than 100 and therefore only the \"Ag is greater than 100 Da\" string is printed. A colon follows the conditional statement and the following code block is indented by 4 spaces (always use 4 spaces rather than tabs - errors will resulting when mixing tabs with spaces!). Note, the elif and else statements are optional.", "if Ag < 100:\n print(\"Ag is less than 100 Da\")\nelif Ag > 100:\n print(\"Ag is greater than 100 Da.\")\nelse:\n print(\"Ag is equal to 100 Da.\")", "While loops\nWhile loops repeat the execution of a code block while a condition is evaulated as True. When using while loops, be careful not to make an infinite loop where the conditional statement never evaluates as False. (Note: You could, however, use 'break' to break from an infinite loop.)", "mass_spectrometers = 0\nwhile mass_spectrometers < 5:\n print(\"Ask for money\")\n mass_spectrometers = mass_spectrometers + 1\n # Comment: This can be written as mass_spectrometers += 1\n print(\"Number of mass spectrometers equals\", mass_spectrometers)\n \nprint(\"\\nNow we need more lab space\")", "For loops\nFor loops iterate over each item of collection data types (lists, tuples, dictionaries and sets). For loops can also be used to loop over the characters of a string. In fact, this fact will be utilised later to evaluate each amino acid residue of a peptide string.", "lipid_masses = [674.5, 688.6, 690.6, 745.7]\n\nNa = 23.0\n\nlipid_Na_adducts = []\nfor mass in lipid_masses:\n lipid_Na_adducts.append(mass + Na)\n\nlipid_Na_adducts", "List comprehension\nThe following is a list comprehension which performs the same operation of the for loop above but in less lines of code.", "adducts_comp = [mass + Na for mass in lipid_masses]\n\nadducts_comp", "We could also add a predicate to a list comprehension. Here, we calculate the mass of lipids less than 700 Da.", "adducts_comp = [mass + Na for mass in lipid_masses if mass < 700]\n\nadducts_comp ", "While and for loops with conditional statements\nBoth while and for loops can be combined with conditional statements for greater control of flow within a program.", "mass_spectrometers = 0\nwhile mass_spectrometers < 5:\n mass_spectrometers += 1\n print(\"Number of mass spectrometers equals\", mass_spectrometers)\n if mass_spectrometers == 1:\n print(\"Woohoo, the first of many!\")\n elif mass_spectrometers == 5:\n print(\"That'll do for now.\")\n else:\n print(\"More!!\")\n\nfor MS_file in MS_files:\n if \"spectrum\" in MS_file:\n print(\"MS file:\", MS_file)\n elif \"MS2\" in MS_file:\n print(\"MS2 file:\", MS_file)\n else:\n print(\"MS3 file:\", MS_file)", "Exercise: Calculate peptide masses\nIn the following example, we will calculate the mass of a peptide from a string containing one letter amino acid residue codes. For example, peptide = \"GASPV\". To do this, we will first need a dictionary containing the one letter codes as keys and the masses of the amino acid residues as values. We will then need to create a variable to store the mass of the peptide and use a for loop to iterate over each amino acid residue in the peptide.", "amino_dict = {\n 'G': 57.02147,\n 'A': 71.03712,\n 'S': 87.03203,\n 'P': 97.05277,\n 'V': 99.06842,\n 'T': 101.04768,\n 'C': 103.00919,\n 'I': 113.08407,\n 'L': 113.08407,\n 'N': 114.04293,\n 'D': 115.02695,\n 'Q': 128.05858,\n 'K': 128.09497,\n 'E': 129.0426,\n 'M': 131.04049,\n 'H': 137.05891,\n 'F': 147.06842,\n 'R': 156.10112,\n 'Y': 163.06333,\n 'W': 186.07932,\n }\n\n# Data modified from http://www.its.caltech.edu/~ppmal/sample_prep/work3.html\n\npeptide_name = \"SCIENCE\"\n\nmass = 18.010565\nfor amino_acid in peptide_name:\n mass += amino_dict[amino_acid]\n\nmass", "Functions\nFunctions perform a specified task when called during the execution of a program. Functions reduce the amount of code that needs to be written and greatly improves code readability. (Note: readability matters!) The for loop created above is better placed in a function so that the for loop doesn't need to be re-written everytime we wish to calculate the mass of a peptide. Pay careful attention to the syntax below.", "def peptide_mass(peptide):\n mass = 18.010565\n for amino_acid in peptide:\n mass += amino_dict[amino_acid]\n return mass\n\npeptide_mass(peptide_name)", "User input\nA simple means to gather user inputted data is to use input. This will prompt the user to enter data which may be used within the program. In the example below, we prompt the user to enter a peptide name. The peptide name is then used for the function call to calculate the peptide's mass.", "user_peptide = input(\"Enter peptide name: \")\n\npeptide_mass(user_peptide)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
UWSEDS/LectureNotes
week_4/Exceptions.ipynb
bsd-2-clause
[ "import numpy as np", "Exceptions\nAn exception is an event, which occurs during the execution of a program, that disrupts the normal flow of the program's instructions.\nYou've already seen some exceptions in the Debugging lesson.\n* \nMany programs want to know about exceptions when they occur. For example, if the input to a program is a file path. If the user inputs an invalid or non-existent path, the program generates an exception. It may be desired to provide a response to the user in this case.\nIt may also be that programs will generate exceptions. This is a way of indicating that there is an error in the inputs provided. In general, this is the preferred style for dealing with invalid inputs or states inside a python function rather than having an error return.\nCatching Exceptions\nPython provides a way to detect when an exception occurs. This is done by the use of a block of code surrounded by a \"try\" and \"except\" statement.", "def divide(numerator, denominator):\n result = numerator/denominator\n print(\"result = %f\" % result)\n\ndivide(1.0, 0)\n\ndef divide1(numerator, denominator):\n try:\n GARBAGE\n result = numerator/denominator\n print(\"result = %f\" % result)\n except (ZeroDivisionError, NameError) as err:\n import pdb; pdb.set_trace()\n print(\"You can't divide by 0! or use GARBAGE.\")\n\ndivide1(1.0, 'a')\n\nprint(err)\n\ndivide1(1.0, 2)\n\ndivide1(\"x\", 2)\n\ndef divide2(numerator, denominator):\n try:\n result = numerator / denominator\n print(\"result = %f\" % result)\n except (ZeroDivisionError, TypeError) as err:\n print(\"Got an exception: %s\" % err)\n\ndivide2(1, \"X\")\n\n#divide2(\"x, 2)", "Why didn't we catch this SyntaxError?", "# Handle division by 0 by using a small number\nSMALL_NUMBER = 1e-3\ndef divide3(numerator, denominator):\n try:\n result = numerator/denominator\n except ZeroDivisionError:\n result = numerator/SMALL_NUMBER\n print(\"result = %f\" % result)\n except Exception as err:\n print(\"Different error than division by zero:\", err)\n\ndivide3(1,0)\n\ndivide3(\"1\",0)", "What do you do when you get an exception?\nFirst, you can feel relieved that you caught a problematic element of your software! Yes, relieved. Silent fails are much worse. (Again, another plug for testing.)\nGenerating Exceptions\nWhy generate exceptions? (Don't I have enough unintentional errors?)", "import pandas as pd\ndef validateDF(df):\n \"\"\"\"\n :param pd.DataFrame df: should have a column named \"hours\"\n \"\"\"\n if not \"hours\" in df.columns:\n raise ValueError(\"DataFrame should have a column named 'hours'.\")\n\ndf = pd.DataFrame({'hours': range(10) })\nvalidateDF(df)\n\nclass SeattleCrimeError(Exception):\n pass\n\nb = False\nif not b:\n raise SeattleCrimeError(\"There's been a crime!\")", "Class exercise\nFor the entropy function, create a new functions that\nthrows an exception if the argument is not a valid probability distribution." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
chicago-justice-project/article-tagging
lib/notebooks/senteval_budgeting.ipynb
mit
[ "import sys\nimport json\nsys.path.append('..')\nimport tagnews\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\npd.set_option('display.width', 150)\npd.set_option('max.columns', 15)", "Determine the costs of processing existing articles\nBased on complete data files from through 2019-09-07.\nEach 1000 words of an article submitted is one \"unit\", rounded up.\n1,496,665 units total = $2487 to process at once, or 300 months in free batches of 5k...", "crimetags = tagnews.CrimeTags()\n\ndf_all = tagnews.load_data()\ndf_all['read_date'] = df_all['created'].str.slice(0, 10)\n### Limiting it to last two years because the data volume is unstable before that\ndf = df_all.loc[df_all['read_date'] >= '2017-01-01']\ndel df_all\n### Number of units to process title and article through Google Cloud API\ndf['n_chars'] = df['title'].str.len() + df['bodytext'].str.len()\ndf['n_units'] = np.ceil(df['n_chars']/1000.)\n\ndef calculate_google_nlp_price(total_units, verbose=True):\n '''Cost to run entity sentiment analysis on a given number of \n units in a single month through in Google Cloud API.\n https://cloud.google.com/natural-language/#natural-language-api-pricing\n \n First 5000 = free\n 5k-1M = $2 per 1000 units\n 1M-5M = $1 per 1000 units\n 5M-20M = $0.5 per 1000 units\n '''\n free_units = min(5e3, total_units)\n first_tier_units = min(1e6-5e3, total_units-free_units)\n second_tier_units = min(5e6-1e6, total_units-free_units-first_tier_units)\n third_tier_units = max(0, total_units-free_units-first_tier_units-second_tier_units)\n units = [free_units, first_tier_units, second_tier_units, third_tier_units]\n costs = [0, 2., 1., 0.5]\n total_cost = sum([c*np.ceil(u/1e3) for (c, u) in zip(costs, units)])\n if verbose:\n print('{:.0f} units: {:.0f}*0 + {:.0f}*$2 + {:.0f}*$1 + {:.0f}*$0.50 = ${:.2f}'\n .format(total_units,\n np.ceil(free_units/1e3),\n np.ceil(first_tier_units/1e3),\n np.ceil(second_tier_units/1e3),\n np.ceil(third_tier_units/1e3),\n total_cost))\n return total_cost\n\nunits = df['n_units'].sum()\ncost = calculate_google_nlp_price(units)\n\nunits_per_day = (df\n .groupby('read_date')\n .agg({'url': 'count',\n 'n_units': 'sum'})\n )\nprint(units_per_day.index.min(), units_per_day.index.max())\n\n### Number of units coming in per day\n### Typically ranges from 800-2000 daily, so definitely >5000 monthly\nf1, ax1 = plt.subplots(1, figsize=[15, 6])\nax1.plot(range(units_per_day.shape[0]), units_per_day['n_units'], label='# units')", "Relevance scoring/binning", "### Full dataset takes up too much memory, so dropping all but the most recent now\n### This keeps 276122 of the original 1.5e6, or a little less than 1/5th of the total\ndf2 = df.loc[df['read_date'] >= '2019-03-01']\ndel df\n\nnew_units = df2['n_units'].sum()\ndownscale = new_units/units\nprint(new_units, downscale)\n\n### Assign a made-up CPD relevance score\n\n### Words associated with CPD\ncop_words = [\n \"cpd\",\n \"police\",\n \"officer\",\n \"cop\",\n \"officers\",\n \"pigs\",\n \"policeofficer\",\n ]\n### Count number of times relevant words appear in title or text\ndf2['cop_word_counts'] = 0\nfor w in cop_words:\n df2['cop_word_counts'] += df2['bodytext'].str.lower().str.count(w)\n df2['cop_word_counts'] += df2['title'].str.lower().str.count(w)\n\ndf2['cop_word_counts'].describe()\n\n### Does the word count measure the same thing as the CPD_model column? \n### No, doesn't look very correlated actually...\nf1, ax1 = plt.subplots(1, figsize=[14,6])\nax1.scatter(df2['cop_word_counts'], df2['CPD_model'], alpha=0.3, s=5)\nax1.set_xlabel('cop word count')\nax1.set_ylabel('CPD_model')\n\n### See examples that use the relevant words but didn't score highly in CPD_model\n### Some definitely look relevant (e.g. article 650870)\nrelevant_but_zero = df2.loc[(df2['CPD_model']==0) & ((df2['CPD']==0))].sort_values('cop_word_counts', ascending=False)\nprint(relevant_but_zero.loc[650870, 'title'])\nprint(relevant_but_zero.loc[650870, 'bodytext'])\n\n### Basic relevance score:\n### - 50% human tagged \"CPD\"\n### - 25% \"CPD_model\"\n### - 25% usage of above words\ndf2['CPD_relevance'] = ( 0.5*df2['CPD'] # upweight because it means more\n + 0.25*df2['CPD_model']\n + 0.25*(df2['cop_word_counts']/(2*len(cop_words))).clip(upper=1.)\n )\n### 55% have relevance = 0\n### \ndf['relevance_tier'] = 0\ndf.head()\n\n### What number/fraction have score > 0?\nprint(df2.loc[df2['CPD_relevance']>0, 'n_units'].sum(), (df2['CPD_relevance']>0).mean())\n### What number/fraction have score = 0?\nprint(df2.loc[df2['CPD_relevance']==0, 'n_units'].sum(), (df2['CPD_relevance']==0).mean())\n\n### About half of scores are 0\n### What is the distribution of the nonzero ones?\nnonzero_scores = df2.loc[df2['CPD_relevance']>0].sort_values('CPD_relevance', ascending=False)\n\nf1, ax1 = plt.subplots(1, figsize=[14, 6])\nax1.hist(nonzero_scores['CPD_relevance'], bins=20)\n\n5000*downscale\n\n### Divide this sample into groups of 900 rows each, in order to get\n### sizes needed for bins that would be ~5000 each.\n### This ould actually be a bit too big, but you get the general idea\n### Bins would have to get progressively smaller as we go down to stay equal in number\nnonzero_scores['CPD_relevance'].iloc[[i*900 for i in range(1, int(np.ceil(nonzero_scores.shape[0]/900)))]]" ]
[ "code", "markdown", "code", "markdown", "code" ]
folivetti/BIGDATA
Spark/Lab2_Spark_PySpark.ipynb
mit
[ "Spark + Python = PySpark\nEsse notebook introduz os conceitos básicos do Spark através de sua interface com a linguagem Python. Como aplicação inicial faremos o clássico examplo de contador de palavras . Com esse exemplo é possível entender a lógica de programação funcional para as diversas tarefas de exploração de dados distribuídos.\nPara isso utilizaremos o livro texto Trabalhos completos de William Shakespeare obtidos do Projeto Gutenberg. Veremos que esse mesmo algoritmo pode ser empregado em textos de qualquer tamanho.\n Esse notebook contém: \nParte 1: Criando uma base RDD e RDDs de tuplas\nParte 2: Manipulando RDDs de tuplas\nParte 3: Encontrando palavras únicas e calculando médias\nParte 4: Aplicar contagem de palavras em um arquivo\nParte 5: Similaridade entre Objetos\nPara os exercícios é aconselhável consultar a documentação da API do PySpark\n Part 1: Criando e Manipulando RDDs \nNessa parte do notebook vamos criar uma base RDD a partir de uma lista com o comando parallelize.\n (1a) Criando uma base RDD \nPodemos criar uma base RDD de diversos tipos e fonte do Python com o comando sc.parallelize(fonte, particoes), sendo fonte uma variável contendo os dados (ex.: uma lista) e particoes o número de partições para trabalhar em paralelo.", "ListaPalavras = ['gato', 'elefante', 'rato', 'rato', 'gato']\npalavrasRDD = sc.parallelize(ListaPalavras, 4)\nprint type(palavrasRDD)", "(1b) Plural \nVamos criar uma função que transforma uma palavra no plural adicionando uma letra 's' ao final da string. Em seguida vamos utilizar a função map() para aplicar a transformação em cada palavra do RDD.\nEm Python (e muitas outras linguagens) a concatenação de strings é custosa. Uma alternativa melhor é criar uma nova string utilizando str.format().\nNota: a string entre os conjuntos de três aspas representa a documentação da função. Essa documentação é exibida com o comando help(). Vamos utilizar a padronização de documentação sugerida para o Python, manteremos essa documentação em inglês.", "# EXERCICIO\ndef Plural(palavra):\n \"\"\"Adds an 's' to `palavra`.\n\n Args:\n palavra (str): A string.\n\n Returns:\n str: A string with 's' added to it.\n \"\"\"\n return <COMPLETAR>\n\nprint Plural('gato')\n\nhelp(Plural)\n\nassert Plural('rato')=='ratos', 'resultado incorreto!'\nprint 'OK'", "(1c) Aplicando a função ao RDD \nTransforme cada palavra do nosso RDD em plural usando map()\nEm seguida, utilizaremos o comando collect() que retorna a RDD como uma lista do Python.", "# EXERCICIO\npluralRDD = palavrasRDD.<COMPLETAR>\nprint pluralRDD.collect()\n\nassert pluralRDD.collect()==['gatos','elefantes','ratos','ratos','gatos'], 'valores incorretos!'\nprint 'OK'", "Nota: utilize o comando collect() apenas quando tiver certeza de que a lista caberá na memória. Para gravar os resultados de volta em arquivo texto ou base de dados utilizaremos outro comando.\n (1d) Utilizando uma função lambda \nRepita a criação de um RDD de plurais, porém utilizando uma função lambda.", "# EXERCICIO\npluralLambdaRDD = palavrasRDD.<COMPLETAR>\nprint pluralLambdaRDD.collect()\n\nassert pluralLambdaRDD.collect()==['gatos','elefantes','ratos','ratos','gatos'], 'valores incorretos!'\nprint 'OK'", "(1e) Tamanho de cada palavra \nAgora use map() e uma função lambda para retornar o número de caracteres em cada palavra. Utilize collect() para armazenar o resultado em forma de listas na variável destino.", "# EXERCICIO\npluralTamanho = (pluralRDD\n <COMPLETAR>\n )\nprint pluralTamanho\n\nassert pluralTamanho==[5,9,5,5,5], 'valores incorretos'\nprint \"OK\"", "(1f) RDDs de pares e tuplas \nPara contar a frequência de cada palavra de maneira distribuída, primeiro devemos atribuir um valor para cada palavra do RDD. Isso irá gerar um base de dados (chave, valor). Desse modo podemos agrupar a base através da chave, calculando a soma dos valores atribuídos. No nosso caso, vamos atribuir o valor 1 para cada palavra.\nUm RDD contendo a estrutura de tupla chave-valor (k,v) é chamada de RDD de tuplas ou pair RDD.\nVamos criar nosso RDD de pares usando a transformação map() com uma função lambda().", "# EXERCICIO\npalavraPar = palavrasRDD.<COMPLETAR>\nprint palavraPar.collect()\n\nassert palavraPar.collect() == [('gato',1),('elefante',1),('rato',1),('rato',1),('gato',1)], 'valores incorretos!'\nprint \"OK\"", "Parte 2: Manipulando RDD de tuplas \nVamos manipular nossa RDD para contar as palavras do texto.\n (2a) Função groupByKey() \nA função groupByKey() agrupa todos os valores de um RDD através da chave (primeiro elemento da tupla) agregando os valores em uma lista.\nEssa abordagem tem um ponto fraco pois:\n\n\nA operação requer que os dados distribuídos sejam movidos em massa para que permaneçam na partição correta.\n\n\nAs listas podem se tornar muito grandes. Imagine contar todas as palavras do Wikipedia: termos comuns como \"a\", \"e\" formarão uma lista enorme de valores que pode não caber na memória do processo escravo.", "# EXERCICIO\npalavrasGrupo = palavraPar.groupByKey()\nfor chave, valor in palavrasGrupo.collect():\n print '{0}: {1}'.format(chave, list(valor))\n\nassert sorted(palavrasGrupo.mapValues(lambda x: list(x)).collect()) == [('elefante', [1]), ('gato',[1, 1]), ('rato',[1, 2])],\n 'Valores incorretos!'\nprint \"OK\"", "(2b) Calculando as contagens \nApós o groupByKey() nossa RDD contém elementos compostos da palavra, como chave, e um iterador contendo todos os valores correspondentes aquela chave.\nUtilizando a transformação map() e a função sum(), contrua um novo RDD que consiste de tuplas (chave, soma).", "# EXERCICIO\ncontagemGroup = palavrasGrupo.<COMPLETAR>\nprint contagemGroup.collect()\n\nassert sorted(contagemGroup.collect())==[('elefante',1), ('gato',2), ('rato',2)], 'valores incorretos!'\nprint \"OK\"", "(2c) reduceByKey \nUm comando mais interessante para a contagem é o reduceByKey() que cria uma nova RDD de tuplas.\nEssa transformação aplica a transformação reduce() vista na aula anterior para os valores de cada chave. Dessa forma, a função de transformação pode ser aplicada em cada partição local para depois ser enviada para redistribuição de partições, reduzindo o total de dados sendo movidos e não mantendo listas grandes na memória.", "# EXERCICIO\ncontagem = palavraPar.<COMPLETAR>\nprint contagem.collect()\n\nassert sorted(contagem.collect())==[('elefante',1), ('gato',2), ('rato',2)], 'valores incorretos!'\nprint \"OK\"", "(2d) Agrupando os comandos \nA forma mais usual de realizar essa tarefa, partindo do nosso RDD palavrasRDD, é encadear os comandos map e reduceByKey em uma linha de comando.", "# EXERCICIO\ncontagemFinal = (palavrasRDD\n <COMPLETAR>\n <COMPLETAR>\n )\nprint contagemFinal.collect()\n\nassert sorted(contagemFinal)==[('elefante',1), ('gato',2), ('rato',2)], 'valores incorretos!'\nprint \"OK\"", "Parte 3: Encontrando as palavras únicas e calculando a média de contagem \n (3a) Palavras Únicas \nCalcule a quantidade de palavras únicas do RDD. Utilize comandos de RDD da API do PySpark e alguma das últimas RDDs geradas nos exercícios anteriores.", "# EXERCICIO\npalavrasUnicas = <COMPLETAR>\nprint palavrasUnicas\n\nassert palavrasUnicas==3, 'valor incorreto!'\nprint \"OK\"", "(3b) Calculando a Média de contagem de palavras \nEncontre a média de frequência das palavras utilizando o RDD contagem.\nNote que a função do comando reduce() é aplicada em cada tupla do RDD. Para realizar a soma das contagens, primeiro é necessário mapear o RDD para um RDD contendo apenas os valores das frequências (sem as chaves).", "# EXERCICIO\n# add é equivalente a lambda x,y: x+y\nfrom operator import add\ntotal = (contagemFinal\n <COMPLETAR>\n <COMPLETAR>\n )\nmedia = total / float(palavrasUnicas)\nprint total\nprint round(media, 2)\n\nassert round(media, 2)==1.67, 'valores incorretos!'\nprint \"OK\"", "Parte 4: Aplicar nosso algoritmo em um arquivo \n (4a) Função contaPalavras \nPara podermos aplicar nosso algoritmo genéricamente em diversos RDDs, vamos primeiro criar uma função para aplicá-lo em qualquer fonte de dados. Essa função recebe de entrada um RDD contendo uma lista de chaves (palavras) e retorna um RDD de tuplas com as chaves e a contagem delas nessa RDD", "# EXERCICIO\ndef contaPalavras(chavesRDD):\n \"\"\"Creates a pair RDD with word counts from an RDD of words.\n\n Args:\n chavesRDD (RDD of str): An RDD consisting of words.\n\n Returns:\n RDD of (str, int): An RDD consisting of (word, count) tuples.\n \"\"\"\n return (chavesRDD\n <COMPLETAR>\n <COMPLETAR>\n )\n\nprint contaPalavras(palavrasRDD).collect()\n\nassert sorted(contaPalavras(palavrasRDD).collect())==[('elefante',1), ('gato',2), ('rato',2)], 'valores incorretos!'\nprint \"OK\"", "(4b) Normalizando o texto \nQuando trabalhamos com dados reais, geralmente precisamos padronizar os atributos de tal forma que diferenças sutis por conta de erro de medição ou diferença de normatização, sejam desconsideradas. Para o próximo passo vamos padronizar o texto para:\n\n\nPadronizar a capitalização das palavras (tudo maiúsculo ou tudo minúsculo).\n\n\nRemover pontuação.\n\n\nRemover espaços no início e no final da palavra.\n\n\nCrie uma função removerPontuacao que converte todo o texto para minúscula, remove qualquer pontuação e espaços em branco no início ou final da palavra. Para isso, utilize a biblioteca re para remover todo texto que não seja letra, número ou espaço, encadeando com as funções de string para remover espaços em branco e converter para minúscula (veja Strings).", "# EXERCICIO\nimport re\ndef removerPontuacao(texto):\n \"\"\"Removes punctuation, changes to lower case, and strips leading and trailing spaces.\n\n Note:\n Only spaces, letters, and numbers should be retained. Other characters should should be\n eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after\n punctuation is removed.\n\n Args:\n texto (str): A string.\n\n Returns:\n str: The cleaned up string.\n \"\"\"\n return re.sub(r'[^A-Za-z0-9 ]', '', texto).strip().lower()\nprint removerPontuacao('Ola, quem esta ai??!')\nprint removerPontuacao(' Sem espaco e_sublinhado!')\n\nassert removerPontuacao(' O uso de virgulas, embora permitido, nao deve contar. ')=='o uso de virgulas embora permitido nao deve contar', 'string incorreta!'\nprint \"OK\"", "(4c) Carregando arquivo texto \nPara a próxima parte vamos utilizar o livro Trabalhos completos de William Shakespeare do Projeto Gutenberg.\nPara converter um texto em uma RDD, utilizamos a função textFile() que recebe como entrada o nome do arquivo texto que queremos utilizar e o número de partições.\nO nome do arquivo texto pode se referir a um arquivo local ou uma URI de arquivo distribuído (ex.: hdfs://).\nVamos também aplicar a função removerPontuacao() para normalizar o texto e verificar as 15 primeiras linhas com o comando take().", "# Apenas execute a célula\nimport os.path\nimport urllib\n\nurl = 'http://www.gutenberg.org/cache/epub/100/pg100.txt' # url do livro\n\narquivo = os.path.join('Data','Aula02','shakespeare.txt') # local de destino: 'Data/Aula02/shakespeare.txt'\n\nif os.path.isfile(arquivo): # verifica se já fizemos download do arquivo\n print 'Arquivo já existe!'\nelse:\n try:\n urllib.urlretrieve(url, arquivo) # salva conteúdo da url em arquivo\n except IOError:\n print 'Impossível fazer o download: {0}'.format(url)\n\n# lê o arquivo com textFile e aplica a função removerPontuacao \nshakesRDD = (sc\n .textFile(arquivo, 8)\n .map(removerPontuacao)\n )\n\n# zipWithIndex gera tuplas (conteudo, indice) onde indice é a posição do conteudo na lista sequencial\n# Ex.: sc.parallelize(['gato','cachorro','boi']).zipWithIndex() ==> [('gato',0), ('cachorro',1), ('boi',2)]\n# sep.join() junta as strings de uma lista através do separador sep. Ex.: ','.join(['a','b','c']) ==> 'a,b,c'\nprint '\\n'.join(shakesRDD\n .zipWithIndex()\n .map(lambda (linha, num): '{0}: {1}'.format(num,linha))\n .take(15)\n )", "(4d) Extraindo as palavras \nAntes de poder usar nossa função Before we can use the contaPalavras(), temos ainda que trabalhar em cima da nossa RDD:\n\n\nPrecisamos gerar listas de palavras ao invés de listas de sentenças.\n\n\nEliminar linhas vazias.\n\n\nAs strings em Python tem o método split() que faz a separação de uma string por separador. No nosso caso, queremos separar as strings por espaço.\nUtilize a função map() para gerar um novo RDD como uma lista de palavras.", "# EXERCICIO\nshakesPalavrasRDD = shakesRDD.<COMPLETAR>\ntotal = shakesPalavrasRDD.count()\nprint shakesPalavrasRDD.take(5)\nprint total", "Conforme deve ter percebido, o uso da função map() gera uma lista para cada linha, criando um RDD contendo uma lista de listas.\nPara resolver esse problema, o Spark possui uma função análoga chamada flatMap() que aplica a transformação do map(), porém achatando o retorno em forma de lista para uma lista unidimensional.", "# EXERCICIO\nshakesPalavrasRDD = shakesRDD.flatMap(lambda x: x.split())\ntotal = shakesPalavrasRDD.count()\nprint shakesPalavrasRDD.top(5)\nprint total\n\nassert total==927631 or total == 928908, \"valor incorreto de palavras!\"\nprint \"OK\"\nassert shakesPalavrasRDD.top(5)==[u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'],'lista incorreta de palavras'\nprint \"OK\"", "(4e) Remover linhas vazias \nPara o próximo passo vamos filtrar as linhas vazias com o comando filter(). Uma linha vazia é uma string sem nenhum conteúdo.", "# EXERCICIO\nshakesLimpoRDD = shakesPalavrasRDD.<COMPLETAR>\ntotal = shakesLimpoRDD.count()\nprint total\n\nassert total==882996, 'valor incorreto!'\nprint \"OK\"", "(4f) Contagem de palavras \nAgora que nossa RDD contém uma lista de palavras, podemos aplicar nossa função contaPalavras().\nAplique a função em nossa RDD e utilize a função takeOrdered para imprimir as 15 palavras mais frequentes.\ntakeOrdered() pode receber um segundo parâmetro que instrui o Spark em como ordenar os elementos. Ex.:\ntakeOrdered(15, key=lambda x: -x): ordem decrescente dos valores de x", "# EXERCICIO\ntop15 = <COMPLETAR>\nprint '\\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15))\n\nassert top15 == [(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463),\n (u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890),\n (u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)],'valores incorretos!'\nprint \"OK\"", "Parte 5: Similaridade entre Objetos \nNessa parte do laboratório vamos aprender a calcular a distância entre atributos numéricos, categóricos e textuais.\n (5a) Vetores no espaço Euclidiano \nQuando nossos objetos são representados no espaço Euclidiano, medimos a similaridade entre eles através da p-Norma definida por:\n$$d(x,y,p) = (\\sum_{i=1}^{n}{|x_i - y_i|^p})^{1/p}$$\nAs normas mais utilizadas são $p=1,2,\\infty$ que se reduzem em distância absoluta, Euclidiana e máxima distância:\n$$d(x,y,1) = \\sum_{i=1}^{n}{|x_i - y_i|}$$\n$$d(x,y,2) = (\\sum_{i=1}^{n}{|x_i - y_i|^2})^{1/2}$$\n$$d(x,y,\\infty) = \\max(|x_1 - y_1|,|x_2 - y_2|, ..., |x_n - y_n|)$$", "import numpy as np\n\n# Vamos criar uma função pNorm que recebe como parâmetro p e retorna uma função que calcula a pNorma\ndef pNorm(p):\n \"\"\"Generates a function to calculate the p-Norm between two points.\n\n Args:\n p (int): The integer p.\n\n Returns:\n Dist: A function that calculates the p-Norm.\n \"\"\"\n\n def Dist(x,y):\n return np.power(np.power(np.abs(x-y),p).sum(),1/float(p))\n return Dist\n\n# Vamos criar uma RDD com valores numéricos\nnumPointsRDD = sc.parallelize(enumerate(np.random.random(size=(10,100))))\n\n# EXERCICIO\n# Procure dentre os comandos do PySpark, um que consiga fazer o produto cartesiano da base com ela mesma\ncartPointsRDD = numPointsRDD.<COMPLETAR>\n\n# Aplique um mapa para transformar nossa RDD em uma RDD de tuplas ((id1,id2), (vetor1,vetor2))\n# DICA: primeiro utilize o comando take(1) e imprima o resultado para verificar o formato atual da RDD\ncartPointsParesRDD = cartPointsRDD.<COMPLETAR>\n\n\n# Aplique um mapa para calcular a Distância Euclidiana entre os pares\nEuclid = pNorm(2)\ndistRDD = cartPointsParesRDD.<COMPLETAR>\n\n# Encontre a distância máxima, mínima e média, aplicando um mapa que transforma (chave,valor) --> valor\n# e utilizando os comandos internos do pyspark para o cálculo da min, max, mean\nstatRDD = distRDD.<COMPLETAR>\n\nminv, maxv, meanv = statRDD.<COMPLETAR>, statRDD.<COMPLETAR>, statRDD.<COMPLETAR>\nprint minv, maxv, meanv\n\nassert (minv.round(2), maxv.round(2), meanv.round(2))==(0.0, 4.70, 3.65), 'Valores incorretos'\nprint \"OK\"", "(5b) Valores Categóricos \nQuando nossos objetos são representados por atributos categóricos, eles não possuem uma similaridade espacial. Para calcularmos a similaridade entre eles podemos primeiro transformar nosso vetor de atrbutos em um vetor binário indicando, para cada possível valor de cada atributo, se ele possui esse atributo ou não.\nCom o vetor binário podemos utilizar a distância de Hamming definida por:\n$$ H(x,y) = \\sum_{i=1}^{n}{x_i != y_i} $$\nTambém é possível definir a distância de Jaccard como:\n$$ J(x,y) = \\frac{\\sum_{i=1}^{n}{x_i == y_i} }{\\sum_{i=1}^{n}{\\max(x_i, y_i}) } $$", "# Vamos criar uma função para calcular a distância de Hamming\ndef Hamming(x,y):\n \"\"\"Calculates the Hamming distance between two binary vectors.\n\n Args:\n x, y (np.array): Array of binary integers x and y.\n\n Returns:\n H (int): The Hamming distance between x and y.\n \"\"\"\n return (x!=y).sum()\n\n# Vamos criar uma função para calcular a distância de Jaccard\ndef Jaccard(x,y):\n \"\"\"Calculates the Jaccard distance between two binary vectors.\n\n Args:\n x, y (np.array): Array of binary integers x and y.\n\n Returns:\n J (int): The Jaccard distance between x and y.\n \"\"\"\n return (x==y).sum()/float( np.maximum(x,y).sum() )\n\n# Vamos criar uma RDD com valores categóricos\ncatPointsRDD = sc.parallelize(enumerate([['alto', 'caro', 'azul'],\n ['medio', 'caro', 'verde'],\n ['alto', 'barato', 'azul'],\n ['medio', 'caro', 'vermelho'],\n ['baixo', 'barato', 'verde'],\n ]))\n\n# EXERCICIO\n# Crie um RDD de chaves únicas utilizando flatMap\nchavesRDD = (catPointsRDD\n .<COMPLETAR>\n .<COMPLETAR>\n .<COMPLETAR>\n )\n\nchaves = dict((v,k) for k,v in enumerate(chavesRDD.collect()))\nnchaves = len(chaves)\nprint chaves, nchaves\n\nassert chaves=={'alto': 0, 'medio': 1, 'baixo': 2, 'barato': 3, 'azul': 4, 'verde': 5, 'caro': 6, 'vermelho': 7}, 'valores incorretos!'\nprint \"OK\"\n\nassert nchaves==8, 'número de chaves incorreta'\nprint \"OK\"\n\ndef CreateNP(atributos,chaves): \n \"\"\"Binarize the categorical vector using a dictionary of keys.\n\n Args:\n atributos (list): List of attributes of a given object.\n chaves (dict): dictionary with the relation attribute -> index\n\n Returns:\n array (np.array): Binary array of attributes.\n \"\"\"\n \n array = np.zeros(len(chaves))\n for atr in atributos:\n array[ chaves[atr] ] = 1\n return array\n\n# Converte o RDD para o formato binário, utilizando o dict chaves\nbinRDD = catPointsRDD.map(lambda rec: (rec[0],CreateNP(rec[1], chaves)))\nbinRDD.collect()\n\n# EXERCICIO\n# Procure dentre os comandos do PySpark, um que consiga fazer o produto cartesiano da base com ela mesma\ncartBinRDD = binRDD.<COMPLETAR>\n\n# Aplique um mapa para transformar nossa RDD em uma RDD de tuplas ((id1,id2), (vetor1,vetor2))\n# DICA: primeiro utilize o comando take(1) e imprima o resultado para verificar o formato atual da RDD\ncartBinParesRDD = cartBinRDD.<COMPLETAR>\n\n\n# Aplique um mapa para calcular a Distância de Hamming e Jaccard entre os pares\nhamRDD = cartBinParesRDD.<COMPLETAR>\njacRDD = cartBinParesRDD.<COMPLETAR>\n\n# Encontre a distância máxima, mínima e média, aplicando um mapa que transforma (chave,valor) --> valor\n# e utilizando os comandos internos do pyspark para o cálculo da min, max, mean\nstatHRDD = hamRDD.<COMPLETAR>\nstatJRDD = jacRDD.<COMPLETAR>\n\nHmin, Hmax, Hmean = statHRDD.<COMPLETAR>, statHRDD.<COMPLETAR>, statHRDD.<COMPLETAR>\nJmin, Jmax, Jmean = statJRDD.<COMPLETAR>, statJRDD.<COMPLETAR>, statJRDD.<COMPLETAR>\n\nprint \"\\t\\tMin\\tMax\\tMean\"\nprint \"Hamming:\\t{:.2f}\\t{:.2f}\\t{:.2f}\".format(Hmin, Hmax, Hmean )\nprint \"Jaccard:\\t{:.2f}\\t{:.2f}\\t{:.2f}\".format( Jmin, Jmax, Jmean )\n\nassert (Hmin.round(2), Hmax.round(2), Hmean.round(2)) == (0.00,6.00,3.52), 'valores incorretos'\nprint \"OK\"\nassert (Jmin.round(2), Jmax.round(2), Jmean.round(2)) == (0.33,2.67,1.14), 'valores incorretos'\nprint \"OK\"" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
gregorjerse/rt2
2015_2016/lab13/Extending values on vertices-template.ipynb
gpl-3.0
[ "Extending values on vertices to a discrete gradient vector field\nDuring extension algorithm one has to compute lover_link for every vertex in the complex. So let us implement search for the lower link first. It requires quite a lot of code: first we find a star, then link and finally lower link for the given simplex.", "from itertools import combinations, chain\n\ndef simplex_closure(a): \n \"\"\"Returns the generator that iterating over all subsimplices (of all dimensions) in the closure\n of the simplex a. The simplex a is also included.\n \"\"\"\n return chain.from_iterable([combinations(a, l) for l in range(1, len(a) + 1)])\n \ndef closure(K):\n \"\"\"Add all missing subsimplices to K in order to make it a simplicial complex.\"\"\"\n return list({s for a in K for s in simplex_closure(a)})\n\ndef contained(a, b):\n \"\"\"Returns True is a is a subsimplex of b, False otherwise.\"\"\"\n return all((v in b for v in a))\n\ndef star(s, cx):\n \"\"\"Return the set of all simplices in the cx that contais simplex s.\n \"\"\"\n return {p for p in cx if contained(s, p)}\n\ndef intersection(s1, s2):\n \"\"\"Return the intersection of s1 and s2.\"\"\"\n return list(set(s1).intersection(s2))\n\ndef link(s, cx):\n \"\"\"Returns link of the simplex s in the complex cx.\n \"\"\"\n # Link consists of all simplices from the closed star that have \n # empty intersection with s.\n return [c for c in closure(star(s, cx)) if not intersection(s, c)]\n\ndef simplex_value(s, f, aggregate):\n \"\"\"Return the value of f on vertices of s\n aggregated by the aggregate function.\n \"\"\"\n return aggregate([f[v] for v in s])\n\ndef lower_link(s, cx, f):\n \"\"\"Return the lower link of the simplex s in the complex cx.\n The dictionary f is the mapping from vertices (integers)\n to the values on vertices.\n \"\"\"\n sval = simplex_value(s, f, min)\n return [s for s in link(s, cx) \n if simplex_value(s, f, max) < sval]", "Let us test the above function on the simple example: full triangle with values 0, 1 and 2 on the vertices labeled with 1, 2 and 3.", "K = closure([(1, 2, 3)])\nf = {1: 0, 2: 1, 3: 2}\nfor v in (1, 2, 3):\n print\"{0}: {1}\".format((v,), lower_link((v,), K, f))", "Now let us implement an extension algorithm. We are leaving out the cancelling step for clarity.", "def join(a, b):\n \"\"\"Return the join of 2 simplices a and b.\"\"\"\n return tuple(sorted(set(a).union(b)))\n\ndef extend(K, f):\n \"\"\"Extend the field to the complex K.\n Function on vertices is given in f.\n Returns the pair V, C, where V is the dictionary containing discrete gradient vector field\n and C is the list of all critical cells.\n \"\"\"\n V = dict()\n C = []\n for v in (s for s in K if len(s)==1):\n # Add your own code\n pass\n return V, C", "Let us test the algorithm on the example from the previous step (full triangle).", "K = closure([(1, 2, 3)])\nf = {1: 0, 2: 1, 3: 2}\nextend(K, f)\n\nK = closure([(1, 2, 3), (2, 3, 4)])\nf = {1: 0, 2: 1, 3: 2, 4: 0}\nextend(K, f)\n\nK = closure([(1, 2, 3), (2, 3, 4)])\nf = {1: 0, 2: 1, 3: 2, 4: 3}\nextend(K, f)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
vicolab/ml-pyxis
examples/torch-dataset.ipynb
mit
[ "PyTorch dataset interface\nIn this example we will look at how a pyxis LMDB can be used with PyTorch's torch.utils.data.Dataset and torch.utils.data.DataLoader.", "from __future__ import print_function\n\nimport numpy as np\n\nimport pyxis as px", "As usual, we will begin by creating a small dataset to test with. It will consist of 10 samples, where each input observation has four features and targets are scalar values.", "nb_samples = 10\n\nX = np.outer(np.arange(1, nb_samples + 1, dtype=np.uint8), np.arange(1, 4 + 1, dtype=np.uint8))\ny = np.arange(nb_samples, dtype=np.uint8)\n\nfor i in range(nb_samples):\n print('Input: {} -> Target: {}'.format(X[i], y[i]))", "The data is written using a with statement.", "with px.Writer(dirpath='data', map_size_limit=10, ram_gb_limit=1) as db:\n db.put_samples('input', X, 'target', y)", "To be sure the data was stored correctly, we will read the data back - again using a with statement.", "with px.Reader('data') as db:\n print(db)", "Working with PyTorch", "try:\n import torch\n import torch.utils.data\nexcept ImportError:\n raise ImportError('Could not import the PyTorch library `torch` or '\n '`torch.utils.data`. Please refer to '\n 'https://pytorch.org/ for installation instructions.')", "In pyxis.torch we have implemented a wrapper around torch.utils.data.Dataset called pyxis.torch.TorchDataset. This object is not imported into the pyxis name space because it relies on PyTorch being installed. As such, we first need to import pyxis.torch:", "import pyxis.torch as pxt", "pyxis.torch.TorchDataset has a single constructor argument: dirpath, i.e. the location of the pyxis LMDB.", "dataset = pxt.TorchDataset('data')", "The pyxis.torch.TorchDataset object has only three methods: __len__, __getitem__, and __repr__, each of which you can see an example of below:", "len(dataset)\n\ndataset[0]\n\ndataset", "pyxis.torch.TorchDataset can be directly combined with torch.utils.data.DataLoader to create an iterator type object:", "use_cuda = True and torch.cuda.is_available()\nkwargs = {\"num_workers\": 4, \"pin_memory\": True} if use_cuda else {}\n\nloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False, **kwargs)\n\nfor i, d in enumerate(loader):\n print('Batch:', i)\n print('\\t', d['input'])\n print('\\t', d['target'])", "As with the built-in iterators in pyxis.iterators, we recommend you inherit from pyxis.torch.TorchDataset and alter __getitem__ to include your own data transformations." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
erinspace/share_tutorials
SHARE_Curation_Associates_Overview.ipynb
apache-2.0
[ "Jupyter Notebooks and the SHARE API\n\nLearn About\n- Jupyter Notebooks and Python\n- Making API Calls\n- Using the SHARE Search API and related tools\n\nHow YOU Can Use Jupyter Notebooks\n\nLearn Python and experiment with new code\nSend your code to others for them to use\nNicely document your code using a combination of text and code blocks\n\nMany great resources on the web\nJupyter/iPython Documentation\nhttp://jupyter.readthedocs.io/en/latest\nCollections of Interesting Notebooks\nhttps://github.com/ipython/ipython/wiki/A-gallery-of-interesting-IPython-Notebooks\nInstallation\nGet started by installing python on your system!\nhttps://osf.io/zk9xa/wiki\nUsing Jupyter for Making API Calls\n\nYou can use Jupyter to run any code in python (or 40+ other supported languages!)\nThis workshop will focus on making calls to APIs on the web, and soon, making calls to the SHARE Search API\n\nAPI\n\nApplication Programming Interface\n\nCan refer to any way to for a computer to interact with a source of data\n\n\nAPIs can oftentimes be accessed over the web", "import json\nimport requests\n\niss_url = 'http://api.open-notify.org/iss-now.json'\n\ndata = requests.get(iss_url).json()\nprint(json.dumps(data, indent=4))\n\n# Lattitude and Longitude of C'Ville\nLAT = 38.0293\nLON = 78.4767\n\niss_url = 'http://api.open-notify.org/iss-pass.json?lat={}&lon={}'.format(LAT, LON)\n\nprint(iss_url)\n\ndata = requests.get(iss_url).json()\nprint(json.dumps(data, indent=4))", "Parsing the Data\nWe got some datetimes back from the API -- but what do these mean?! \n\nWe can use python to find out!\nLets use a new library, arrow, to parse that.\nhttp://crsmithdev.com/arrow/", "import arrow", "open your terminal\npip install arrow", "from arrow.arrow import Arrow\n\n\nfor item in data['response']:\n datetime = Arrow.fromtimestamp(item['risetime'])\n print(\n 'The ISS will be visable over Charlottesville on {} at {} for {} seconds.'.format(\n datetime.date(),\n datetime.time(),\n item['duration']\n )\n )", "", "pokeapi = 'http://pokeapi.co/api/v2/generation/1/'\n\npokedata = requests.get(pokeapi).json()\n\n# Take that data, print out a nicely formatted version of the first 5 results\nprint(json.dumps(pokedata['pokemon_species'][:5], indent=4))\n\n# Let's get more info about the first pokemon on the list\n# By following the chain of linked data\n\n# Narrow down the url we'd like to get\nbulbasaur_url = pokedata['pokemon_species'][0]['url']\n\n# request data from that URL\nbulbasaur_data = requests.get(bulbasaur_url).json()\n\n# Let's remove the 'flavor text' because that's really long\ndel bulbasaur_data['flavor_text_entries']\n\nbulbasaur_data", "Some Great APIs YOU can use!\n\nTwitter\nGoogle Maps\nTwillio\nYelp\nSpotify\nGenius\n\n...and so many more!\nMany require some kind of authentication, so aren't as simple as the ISS, or PokeAPI.\nAccess an OAI-PMH Feed!\nMany institutions have an OAI-PMH based API.\nThis is great because they all have a unified way of interacting with the data in the repositories, just with different host urls.\nYou can create common code that will interact with most OAI-PMH feeds with only changing the base access URL.", "from furl import furl\n\n\nvt_url = furl('http://vtechworks.lib.vt.edu/oai/request')\n\nvt_url.args['verb'] = 'ListRecords'\nvt_url.args['metadataPrefix'] = 'oai_dc'\n\nvt_url.url\n\ndata = requests.get(vt_url.url)\n\ndata.content", "Let's parse this!\nconda install lxml", "from lxml import etree\n\netree_element = etree.XML(data.content)\n\netree_element\n\netree_element.getchildren()\n\n# A little namespace parsing and cleanup\nnamespaces = etree_element.nsmap\nnamespaces['ns0'] = etree_element.nsmap[None]\ndel namespaces[None]\n\nrecords = etree_element.xpath('//ns0:record', namespaces=namespaces)\n\nrecords[:10]\n\n# What's inside one of these records?\none_record = records[0]\none_record.getchildren()\n\n# We want to check out the \"metadata\" element, which is the second in the list\n# Let's make sure to get those namespaces too\n# Here's a cool trick to join 2 dictionaries in python 3!\nnamespaces = {**namespaces, **one_record[1][0].nsmap}\ndel namespaces[None]\n\n# Now we have namespaces we can use!\nnamespaces\n\n# Use those namespaces to get titles\ntitles = records[0].xpath('//dc:title/node()', namespaces=namespaces)\ntitles[:10]", "SHARE Search API\nAlso a fantastic resource!\nOne Way to Access Data\nInstead of writing custom code to parse both data coming from JSON and XML APIs\nThe SHARE Search Schema\nThe SHARE search API is built on a tool called elasticsearch. It lets you search a subset of SHARE's normalized metadata in a simple format.\nHere are the fields available in SHARE's elasticsearch endpoint:\n- 'title'\n- 'language'\n- 'subject'\n- 'description'\n- 'date'\n- 'date_created'\n- 'date_modified\n- 'date_updated'\n- 'date_published'\n- 'tags'\n- 'links'\n- 'awards'\n- 'venues'\n- 'sources'\n- 'contributors'\n\nYou can see a formatted version of the base results from the API by visiting the SHARE Search API URL.", "SHARE_SEARCH_API = 'https://staging-share.osf.io/api/search/abstractcreativework/_search'\n\nfrom furl import furl\n\nsearch_url = furl(SHARE_SEARCH_API)\nsearch_url.args['size'] = 3\nrecent_results = requests.get(search_url.url).json()\n\nrecent_results = recent_results['hits']['hits']\n\nrecent_results\n\nprint('The request URL is {}'.format(search_url.url))\nprint('----------')\nfor result in recent_results:\n print(\n '{} -- from {}'.format(\n result['_source']['title'],\n result['_source']['sources']\n )\n )", "Sending a Query to the SHARE Search API\nFirst, we'll define a function to do the hard work for us.\nIt will take 2 parameters, a URL, and a query to send to the search API.", "import json\n\ndef query_share(url, query):\n # A helper function that will use the requests library,\n # pass along the correct headers, and make the query we want\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(query)\n return requests.post(url, headers=headers, data=data).json()\n\nsearch_url.args = None # reset the args so that we remove our old query arguments.\nsearch_url.url # Show the URL that we'll be requesting to make sure the args were cleared\n\ntags_query = {\n \"query\": {\n \"exists\": {\n \"field\": \"tags\"\n }\n }\n}\n\n\nmissing_tags_query = {\n \"query\": {\n \"bool\": {\n \"must_not\": {\n \"exists\": {\n \"field\": \"tags\"\n }\n }\n } \n }\n}\n\nwith_tags = query_share(search_url.url, tags_query)\nmissing_tags = query_share(search_url.url, missing_tags_query)\n\ntotal_results = requests.get(search_url.url).json()['hits']['total']\n\nwith_tags_percent = (float(with_tags['hits']['total'])/total_results)*100\nmissing_tags_percent = (float(missing_tags['hits']['total'])/total_results)*100\n\n\nprint(\n '{} results out of {}, or {}%, have tags.'.format(\n with_tags['hits']['total'],\n total_results,\n format(with_tags_percent, '.2f')\n )\n)\n\nprint(\n '{} results out of {}, or {}%, do NOT have tags.'.format(\n missing_tags['hits']['total'],\n total_results,\n format(missing_tags_percent, '.2f')\n )\n)\n\nprint('------------')\nprint('As a little sanity check....')\nprint('{} + {} = {}%'.format(with_tags_percent, missing_tags_percent, format(with_tags_percent + missing_tags_percent, '.2f')))", "Other SHARE APIs\nSHARE has a host of other APIs that provide direct access to the data stored in SHARE.\nYou can read more about the SHARE Data Models here: http://share-research.readthedocs.io/en/latest/share_models.html", "SHARE_API = 'https://staging-share.osf.io/api/'\n\nshare_endpoints = requests.get(SHARE_API).json()\n\nshare_endpoints", "Visit the API In Your Browser\nYou can visit https://staging-share.osf.io/api/ and see the data formatted in \"pretty printed\" JSON\nSHARE Providers API\nAccess the information about the providers that SHARE harvests from", "SHARE_PROVIDERS = 'https://staging-share.osf.io/api/providers/'\n\ndata = requests.get(SHARE_PROVIDERS).json()\n \ndata", "We can print that out a little nicer\nUsing a loop and using the lookups that'd we'd like!", "print('Here are the first 10 Providers:')\nfor source in data['results']:\n print(\n '{}\\n{}\\n{}\\n'.format(\n source['long_title'],\n source['home_page'],\n source['provider_name']\n )\n )" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
xiph/rav1e
doc/regress_log-bitrate_wrt_log-quantizer.ipynb
bsd-2-clause
[ "Rate-control Empirical Analysis\nSimple linear regression\nWe performed a simple linear regression of the bitrate with respect to the quantizer,\noperating on the logarithm of both.\nThe data set used was all of the video clips on https://media.xiph.org/video/derf/\nas well as subset3 (for extra I-frame data).\nTo enable processing an arbitrarily large data set, an online regression algorithm was implemented.\nIn practice, 440MB of text formatted data were sufficient.\nThe raw final state of the online regression for each segment follows.", "%matplotlib inline\nfrom IPython.display import set_matplotlib_formats\nset_matplotlib_formats('svg')\nfrom matplotlib import pyplot as plt\nplt.rcParams['svg.fonttype'] = 'none'\n\nfrom glob import glob\nimport numpy as np\nfrom pprint import pprint\nimport tarfile\nfrom tqdm import tqdm_notebook\n\n# Klotz, Jerome H. \"UPDATING SIMPLE LINEAR REGRESSION.\"\n# Statistica Sinica 5, no. 1 (1995): 399-403.\n# http://www.jstor.org/stable/24305577\ndef online_simple_regression(accumulator, x, y):\n Ax_, Ay_, Sxy, Sxx, n_, minx, maxx = accumulator or (0, 0, 0, 0, 0, None, None)\n\n first = n_ == 0\n n = n_ + x.size\n rt_n, rt_n_ = np.sqrt((n, n_), dtype=np.float128)\n\n Ax = (Ax_*n_ + x.sum(dtype=np.float128))/n\n Ay = (Ay_*n_ + y.sum(dtype=np.float128))/n\n \n minx = x.min() if first else min(minx, x.min())\n maxx = x.max() if first else max(maxx, x.max())\n \n X = Ax if first else (Ax_*rt_n_ + Ax*rt_n)/(rt_n_ + rt_n)\n Y = Ay if first else (Ay_*rt_n_ + Ay*rt_n)/(rt_n_ + rt_n)\n\n Sxx += np.sum((x - X)**2)\n Sxy += np.sum((x - X)*(y - Y))\n\n return Ax, Ay, Sxy, Sxx, n, minx, maxx\n\ndef conv_px(s):\n w, h = s.split(b'x')\n return int(w)*int(h)\n\nconv_fti = [b'I', b'P', b'B0', b'B1'].index\n\ndef collect(filename, queues):\n px, log_target_q, byte_size, frame_type = np.loadtxt(\n filename, dtype=np.int64, delimiter=',',\n converters={1: conv_px, 4: conv_fti},\n skiprows=1, usecols=range(1, 5), unpack=True)\n\n blog64q57_ibpp = np.round((\n np.log2(px, dtype=np.float128) - np.log2(byte_size*8, dtype=np.float128)\n )*2**57).astype(np.int64)\n \n # These are the fixed point found by repeating this whole process\n boundaries = [\n [0, 381625*2**40, 655352*2**40, 967797*2**40],\n [0, 356802*2**40, 848173*2**40, 967797*2**40],\n [0, 288436*2**40, 671307*2**40, 967797*2**40],\n [0, 264708*2**40, 622760*2**40, 967797*2**40]\n ]\n\n for fti in np.unique(frame_type):\n buckets = list(zip(boundaries[fti][:-1], boundaries[fti][1:]))\n for bi, bucket in enumerate(buckets):\n low, high = bucket\n idx = (frame_type==fti) & (log_target_q >= low) & (log_target_q < high)\n if np.sum(idx, dtype=int) == 0: continue\n b = (bi << 2) | fti\n x, y = log_target_q[idx], blog64q57_ibpp[idx]\n queue = queues.get(b, ([], []))\n queue[0].append(x)\n queue[1].append(y)\n queues[b] = queue\n\ndef aggregate(queues, partials):\n for b, queue in queues.items():\n x, y = np.concatenate(queue[0]), np.concatenate(queue[1])\n partials[b] = online_simple_regression(partials.get(b, None), x, y)\n queues.clear()\n\npartials = dict()\n# https://ba.rr-dav.id.au/data/rav1e/rc-data.tar.xz\nwith tarfile.open('rc-data.tar.xz', 'r:xz') as tf:\n queues, last_name = dict(), None\n for ti in tqdm_notebook(tf, total=1077*255, leave=False):\n name = ti.name.split('/')[0]\n if last_name and name != last_name:\n aggregate(queues, partials)\n last_name = name\n collect(tf.extractfile(ti), queues)\n aggregate(queues, partials)\npprint(partials)", "Fixed-point approximation\nThe regression results are converted to a fixed-point representation,\nwith the exponent in Q6 and the scale in Q3.", "plt.figure(figsize=(7, 6))\nplt.axis('equal')\nplt.xticks([0, 10])\nplt.yticks([0, 10])\nplt.minorticks_on()\nplt.grid(b=True, which='major')\nplt.grid(b=True, which='minor', alpha=0.2)\n\nsegments = dict()\nfor b, accumulator in partials.items():\n Ax, Ay, Sxy, Sxx, n, minx, maxx = accumulator\n\n\n fti = b & 3\n beta = Sxy/Sxx\n alpha = Ay - beta*Ax\n exp = int(np.round(beta*2**6))\n beta_ = exp/2**6\n alpha_ = Ay - beta_*Ax\n scale = int(np.round(np.exp2(3 - alpha_/2**57)))\n label = ['I', 'P', 'B0', 'B1'][fti]\n print('%2s: exp=%d scale=%d bucket=%d' % (label, exp, scale, b>>2))\n\n xs, ys = segments.get(label, ([], []))\n xs = [minx/2**57, maxx/2**57]\n ys = [xs[0]*beta_ + alpha_/2**57, xs[1]*beta_ + alpha_/2**57]\n xs_, ys_ = segments.get(label, ([], []))\n xs_.extend(xs)\n ys_.extend(ys)\n segments[label] = (xs_, ys_)\n\nbest = dict()\nfor label, xy in segments.items():\n plt.plot(xy[0], xy[1], label=label)\n \nplt.legend();", "The endpoints of each linear regression, rounding only the exponent, are detailed in the following output.\nWe use a cubic interpolation of these points to adjust the segment boundaries.", "pprint(segments)", "Piecewise-linear fit\nWe applied a 3-segment piecewise-linear fit. The boundaries were aligned to integer values of pixels-per-bit,\nwhile optimizing for similarity to a cubic interpolation of the control points\n(log-quantizer as a function of log-bitrate).", "plt.figure(figsize=(7, 6))\nplt.axis('equal')\nplt.xticks([0, 10])\nplt.yticks([0, 10])\nplt.minorticks_on()\nplt.grid(b=True, which='major')\nplt.grid(b=True, which='minor', alpha=0.2)\n\nfrom scipy import optimize\n\nfor ft, xy in segments.items():\n f = np.poly1d(np.polyfit(np.array(xy[1]).astype(float), np.array(xy[0]).astype(float), 3))\n ys = np.linspace(min(xy[1]), max(xy[1]), 20)\n def cost(X):\n y0 = np.array([ys[0], X[0], X[1], ys[-1]]).astype(float)\n x0 = f(y0)\n f0 = np.where(ys<X[0],\n np.poly1d(np.polyfit(y0[:2], x0[:2], 1))(ys),\n np.where(ys<X[1],\n np.poly1d(np.polyfit(y0[1:3], x0[1:3], 1))(ys),\n np.poly1d(np.polyfit(y0[2:], x0[2:], 1))(ys)))\n return ((f0-f(ys))**2).sum()\n X = optimize.fmin(cost, [2, 5], disp=0)\n X = np.log2(np.ceil(np.exp2(X)))\n print(ft, np.exp2(X), np.round(f(X)*2**17))\n y0 = [ys.min(), X[0], X[1], ys.max()]\n x0 = f(y0)\n plt.plot(x0, y0, '.--', lw=1, c='grey')\n plt.plot(f(ys), ys, label=ft)\n\nplt.legend();" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
emiliom/stuff
MMW_API_landproperties_demo.ipynb
cc0-1.0
[ "Model My Watershed (MMW) API Demo\nEmilio Mayorga, University of Washington, Seattle. 2018-5-10,17. Demo put together using as a starting point instructions from Azavea from October 2017.\nIntroduction\nThe Model My Watershed API allows you to delineate watersheds and analyze geo-data for watersheds and arbitrary areas. You can read more about the work at WikiWatershed or use the web app.\nMMW users can discover their API keys through the user interface, and test the MMW geoprocessing API on either the live or staging apps. An Account page with the API key is available from either app (live or staging). To see it, go to the app, log in, and click on \"Account\" in the dropdown that appears when you click on your username in the top right. Your key is different between staging and production. For testing with the live (production) API and key, go to https://app.wikiwatershed.org/api/docs/\nThe API can be tested from the command line using curl. This example uses the production API to test the watershed endpoint:\nbash\ncurl -H \"Content-Type: application/json\" -H \"Authorization: Token YOUR_API_KEY\" -X POST \n -d '{ \"location\": [39.67185,-75.76743] }' https://app.wikiwatershed.org/api/watershed/\nMMW API: Obtain land properties based on \"analyze\" geoprocessing on AOI (small box around a point)\n1. Set up", "import json\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\ndef requests_retry_session(\n retries=3,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n session=None,\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session", "MMW production API endpoint base url.", "api_url = \"https://app.wikiwatershed.org/api/\"", "The job is not completed instantly and the results are not returned directly by the API request that initiated the job. The user must first issue an API request to confirm that the job is complete, then fetch the results. The demo presented here performs automated retries (checks) until the server confirms the job is completed, then requests the JSON results and converts (deserializes) them into a Python dictionary.", "def get_job_result(api_url, s, jobrequest):\n url_tmplt = api_url + \"jobs/{job}/\"\n get_url = url_tmplt.format\n \n result = ''\n while not result:\n get_req = requests_retry_session(session=s).get(get_url(job=jobrequest['job']))\n result = json.loads(get_req.content)['result']\n \n return result\n\ns = requests.Session()\n\nAPIToken = 'Token 0501d9a98b8170a41d57df8ce82c000c477c621a' # HIDE THE API TOKEN\n\ns.headers.update({\n 'Authorization': APIToken,\n 'Content-Type': 'application/json'\n})", "2. Construct AOI GeoJSON for job request\nParameters passed to the \"analyze\" API requests.", "from shapely.geometry import box, MultiPolygon\n\nwidth = 0.0004 # Looks like using a width smaller than 0.0002 causes a problem with the API?\n\n# GOOS: (-88.5552, 40.4374) elev 240.93. Agriculture Site—Goose Creek (Corn field) Site (GOOS) at IML CZO\n# SJER: (-119.7314, 37.1088) elev 403.86. San Joaquin Experimental Reserve Site (SJER) at South Sierra CZO\nlon, lat = -119.7314, 37.1088\n\nbbox = box(lon-0.5*width, lat-0.5*width, lon+0.5*width, lat+0.5*width)\n\npayload = MultiPolygon([bbox]).__geo_interface__\n\njson_payload = json.dumps(payload)\n\npayload", "3. Issue job requests, fetch job results when done, then examine results. Repeat for each request type", "# convenience function, to simplify the request calls, below\ndef analyze_api_request(api_name, s, api_url, json_payload):\n post_url = \"{}analyze/{}/\".format(api_url, api_name)\n post_req = requests_retry_session(session=s).post(post_url, data=json_payload)\n jobrequest_json = json.loads(post_req.content)\n # Fetch and examine job result\n result = get_job_result(api_url, s, jobrequest_json)\n return result", "Issue job request: analyze/land/", "result = analyze_api_request('land', s, api_url, json_payload)", "Everything below is just exploration of the results. Examine the content of the results (as JSON, and Python dictionaries)", "type(result), result.keys()", "result is a dictionary with one item, survey. This item in turn is a dictionary with 3 items: displayName, name, categories. The first two are just labels. The data are in the categories item.", "result['survey'].keys()\n\ncategories = result['survey']['categories']\n\nlen(categories), categories[1]\n\nland_categories_nonzero = [d for d in categories if d['coverage'] > 0]\n\nland_categories_nonzero", "Issue job request: analyze/terrain/", "result = analyze_api_request('terrain', s, api_url, json_payload)", "result is a dictionary with one item, survey. This item in turn is a dictionary with 3 items: displayName, name, categories. The first two are just labels. The data are in the categories item.", "categories = result['survey']['categories']\n\nlen(categories), categories\n\n[d for d in categories if d['type'] == 'average']", "Issue job request: analyze/climate/", "result = analyze_api_request('climate', s, api_url, json_payload)", "result is a dictionary with one item, survey. This item in turn is a dictionary with 3 items: displayName, name, categories. The first two are just labels. The data are in the categories item.", "categories = result['survey']['categories']\n\nlen(categories), categories[:2]\n\nppt = [d['ppt'] for d in categories]\ntmean = [d['tmean'] for d in categories]\n\n# ppt is in cm, right?\nsum(ppt)\n\nimport calendar\nimport numpy as np\n\ncalendar.mdays\n\n# Annual tmean needs to be weighted by the number of days per month\nsum(np.asarray(tmean) * np.asarray(calendar.mdays[1:]))/365" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
CRPropa/CRPropa3
doc/pages/example_notebooks/extragalactic_fields/MHD_models.v4.ipynb
gpl-3.0
[ "3D MHD models\nThis notebook explains how to use cubic results of 3D MHD models on a uniform grid in CRPropa.\nSupplied data\nThe fields need to be supplied in a raw binary file that contains only single floats, arranged as follows: Starting with the cell values (Bx,By,Bz for magnetic field or rho for density) at the origin of the box, the code continues to read along z, then y and finally x.\nOn https://crpropa.github.io/CRPropa3/ under \"Additional resources\" you can find a number of MHD models used with CRPropa in the literature. \nNote:\nThe parameters used for the following example refer to the MHD model by Hackstein et al. (2018), as provided under \"Additional resources\". However, CRPropa does in general not take any warranty on the accuracy of any of those external data files.\nNote that in some previous version of this notebook the used MHD model has not been representing the results from Hackstein et al. (2018). This has been due to two issues: (1.) the size of the grid has not taken the dimensionless Hubble parameter into account and (2.) the X- and Z-coordinates of the available data files have been transposed. But since 20.05.2022 both of these issues have been fixed and the following example can be used to include the MHD model data from Hackstein et al. (2018).", "from crpropa import *\n\n## settings for MHD model (must be set according to model)\nfilename_bfield = \"clues_primordial.dat\" ## filename of the magnetic field\ngridOrigin = Vector3d(0,0,0) ## origin of the 3D data, preferably at boxOrigin\ngridSize = 1024 ## size of uniform grid in data points\nh = 0.677 ## dimensionless Hubble parameter\nsize = 249.827/h *Mpc ## physical edgelength of volume in Mpc\nb_factor = 1. ## global renormalization factor for the field\n\n## settings of simulation\nboxOrigin = Vector3d( 0, 0, 0,) ## origin of the full box of the simulation\nboxSize = Vector3d( size, size, size ) ## end of the full box of the simulation\n\n## settings for computation\nminStep = 10.*kpc ## minimum length of single step of calculation\nmaxStep = 4.*Mpc ## maximum length of single step of calculation\ntolerance = 1e-2 ## tolerance for error in iterative calculation of propagation step\n\nspacing = size/(gridSize) ## resolution, physical size of single cell\n\nm = ModuleList()\n\n\n## instead of computing propagation without Lorentz deflection via\n# m.add(SimplePropagation(minStep,maxStep))\n\n## initiate grid to hold field values\nvgrid = Grid3f( gridOrigin, gridSize, spacing )\n## load values to the grid\nloadGrid( vgrid, filename_bfield, b_factor )\n## use grid as magnetic field\nbField = MagneticFieldGrid( vgrid )\n## add propagation module to the simulation to activate deflection in supplied field\nm.add(PropagationCK( bField, tolerance, minStep, maxStep))\n#m.add(DeflectionCK( bField, tolerance, minStep, maxStep)) ## this was used in older versions of CRPropa\n", "to make use of periodicity of the provided data grid, use", "m.add( PeriodicBox( boxOrigin, boxSize ) )", "to not follow particles forever, use", "m.add( MaximumTrajectoryLength( 400*Mpc ) ) ", "Uniform injection\nThe most simple scenario of UHECR sources is a uniform distribution of their sources. This can be realized via use of", "source = Source()\nsource.add( SourceUniformBox( boxOrigin, boxSize )) ", "Injection following density field\nThe distribution of gas density can be used as a probability density function for the injection of particles from random positions.", "filename_density = \"mass-density_clues.dat\" ## filename of the density field\n\nsource = Source()\n## initialize grid to hold field values\nmgrid = ScalarGrid( gridOrigin, gridSize, spacing )\n## load values to grid\nloadGrid( mgrid, filename_density )\n## add source module to simulation\nsource.add( SourceDensityGrid( mgrid ) )", "Mass Halo injection\nAlternatively, for the CLUES models, we also provide a list of mass halo positions. These positions can be used as sources with the same properties by use of the following", "import numpy as np\nfilename_halos = 'clues_halos.dat'\n\n# read data from file\ndata = np.loadtxt(filename_halos, unpack=True, skiprows=39)\nsX = data[0] \nsY = data[1] \nsZ = data[2] \nmass_halo = data[5] \n\n## find only those mass halos inside the provided volume (see Hackstein et al. 2018 for more details)\nXdown= sX >= 0.25 \nXup= sX <= 0.75 \nYdown= sY >= 0.25 \nYup= sY <= 0.75 \nZdown= sZ >= 0.25 \nZup= sZ <= 0.75 \ninsider= Xdown*Xup*Ydown*Yup*Zdown*Zup \n\n## transform relative positions to physical positions within given grid\nsX = (sX[insider]-0.25)*2*size\nsY = (sY[insider]-0.25)*2*size\nsZ = (sZ[insider]-0.25)*2*size\n\n## collect all sources in the multiple sources container\nsmp = SourceMultiplePositions()\nfor i in range(0,len(sX)):\n pos = Vector3d( sX[i], sY[i], sZ[i] )\n smp.add( pos, 1. )\n \n## add collected sources\nsource = Source()\nsource.add( smp )", "additional source properties", "## use isotropic emission from all sources\nsource.add( SourceIsotropicEmission() )\n\n## set particle type to be injected\nA, Z = 1, 1 # proton\nsource.add( SourceParticleType( nucleusId(A,Z) ) )\n\n## set injected energy spectrum\nEmin, Emax = 1*EeV, 1000*EeV\nspecIndex = -1\nsource.add( SourcePowerLawSpectrum( Emin, Emax, specIndex ) ) ", "Observer\nTo register particles, an observer has to be defined. In the provided constrained simulations the position of the Milky Way is, by definition, in the center of the volume.", "filename_output = 'data/output_MW.txt'\n\nobsPosition = Vector3d(0.5*size,0.5*size,0.5*size) # position of observer, MW is in center of constrained simulations\nobsSize = 800*kpc ## physical size of observer sphere\n\n\n## initialize observer that registers particles that enter into sphere of given size around its position\nobs = Observer()\nobs.add( ObserverSmallSphere( obsPosition, obsSize ) )\n## write registered particles to output file\nobs.onDetection( TextOutput( filename_output ) )\n## choose to not further follow particles paths once detected\nobs.setDeactivateOnDetection(True)\n## add observer to module list\nm.add(obs)\n", "finally run the simulation by", "N = 1000\n\nm.showModules() ## optional, see summary of loaded modules\nm.setShowProgress(True) ## optional, see progress during runtime\nm.run(source, N, True) ## perform simulation with N particles injected from source" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
KIPAC/StatisticalMethods
tutorials/old/GPRegression.ipynb
gpl-2.0
[ "Week 8 Tutorial\nGaussian Process Regression\nIn this example, we return to the \"straight line\" problem, generate some mock data, and investigate a \"model-free model\", a Gaussian Process, for them. The idea is to find a flexible model that can interpolate between the data we have, in order to predict future data lying in the gaps, or beyond the observed domain.\nRequirements\nYou will need to pip install scikit-learn and check that you have v0.18 or higher as a result.", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.rcParams['figure.figsize'] = (10.0, 10.0)\nplt.rcParams['savefig.dpi'] = 200\n\nclass SolutionMissingError(Exception):\n def __init__(self):\n Exception.__init__(self,\"You need to complete the solution for this code to work!\")\ndef REPLACE_WITH_YOUR_SOLUTION():\n raise SolutionMissingError", "The Data\nLet's generate a simple Cepheids-like dataset: observations of $y$ with reported uncertainties $\\sigma_y$, at given $x$ values.", "import numpy as np\nimport pylab as plt\n\nxlimits = [0,350]\nylimits = [0,250]\n\ndef generate_data(seed=None):\n \"\"\"\n Generate a 30-point data set, with x and sigma_y as standard, but with\n y values given by\n\n y = a_0 + a_1 * x + a_2 * x**2 + a_3 * x**3 + noise\n \"\"\"\n Ndata = 30\n\n xbar = 0.5*(xlimits[0] + xlimits[1])\n xstd = 0.25*(xlimits[1] - xlimits[0])\n\n if seed is not None:\n np.random.seed(seed=seed)\n\n x = xbar + xstd * np.random.randn(Ndata)\n\n meanerr = 0.025*(xlimits[1] - xlimits[0])\n\n sigmay = meanerr + 0.3 * meanerr * np.abs(np.random.randn(Ndata))\n\n a = np.array([37.2,0.93,-0.002,0.0])\n y = a[0] + a[1] * x + a[2] * x**2 + a[3] * x**3 + sigmay*np.random.randn(len(x))\n\n return x,y,sigmay\n\ndef plot_yerr(x, y, sigmay):\n \"\"\"\n Plot an (x,y,sigma) dataset as a set of points with error bars \n \"\"\"\n plt.errorbar(x, y, yerr=sigmay, fmt='.', ms=7, lw=1, color='k')\n plt.xlabel('$x$', fontsize=16)\n plt.ylabel('$y$', fontsize=16)\n plt.xlim(*xlimits)\n plt.ylim(*ylimits)\n return\n\n(x, y, sigmay) = generate_data(seed=13)\n\nplot_yerr(x, y, sigmay)", "Fitting a Gaussian Process\nLet's follow Jake VanderPlas' example, to see how to work with the scikit-learn v0.18 Gaussian Process regression model.", "from sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF as SquaredExponential", "Defining a GP\nFirst we define a kernel function, for populating the covariance matrix of our GP. To avoid confusion, a Gaussian kernel is referred to as a \"squared exponential\" (or a \"radial basis function\", RBF). The squared exponential kernel has one hyper-parameter, the length scale that is the Gaussian width.", "h = 10.0\n\nkernel = SquaredExponential(length_scale=h, length_scale_bounds=(0.01, 1000.0))\ngp0 = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)", "Now, let's draw some samples from the unconstrained process, o equivalently, the prior. Each sample is a function $y(x)$, which we evaluate on a grid. We'll need to assert a value for the kernel hyperparameter $h$, which dictates the correlation length between the datapoints. That will allow us to compute a mean function (which for simplicity we'll set to the mean observed $y$ value), and a covariance matrix that captures the correlations between datapoints.", "np.random.seed(1)\nxgrid = np.atleast_2d(np.linspace(0, 399, 100)).T\nprint(\"y(x) will be predicted on a grid of length\", len(xgrid))\n\n# Draw three sample y(x) functions:\ndraws = gp0.sample_y(xgrid, n_samples=3)\n\nprint(\"Drew 3 samples, stored in an array with shape \", draws.shape)", "Let's plot these, to see what our prior looks like.", "# Start a 4-panel figure:\nfig = plt.figure(figsize=(10,10))\n\n# Plot our three prior draws:\nax = fig.add_subplot(221)\nax.plot(xgrid, draws[:,0], '-r')\nax.plot(xgrid, draws[:,1], '-g')\nax.plot(xgrid, draws[:,2], '-b', label='Rescaled prior sample $y(x)$')\nax.set_xlim(0, 399)\nax.set_ylim(-5, 5)\nax.set_xlabel('$x$')\nax.set_ylabel('$y(x)$')\nax.legend(fontsize=8);", "Each predicted $y(x)$ is drawn from a Gaussian of unit variance, and with off-diagonal elements determined by the covariance function. \nTry changing h to see what happens to the smoothness of the predictions. \n\nGo back up to the cell where h is assigned, and re-run that cell and the subsequent ones.\n\nFor our data to be well interpolated by this Gaussian Process, it will need to be rescaled such that it has zero mean and unit variance. There are standard methods for doing this, but we'll do this rescaling here for transparency - and so we know what to add back in later!", "class Rescale():\n def __init__(self, y, err):\n self.original_data = y\n self.original_err = err\n self.mean = np.mean(y)\n self.std = np.std(y)\n self.transform()\n return\n def transform(self):\n self.y = (self.original_data - self.mean) / self.std\n self.err = self.original_err / self.std\n return()\n def invert(self, scaled_y, scaled_err):\n return (scaled_y * self.std + self.mean, scaled_err * self.std) \n\nrescaled = Rescale(y, sigmay)\nprint('Mean, variance of original data: ',np.round(np.mean(y)), np.round(np.var(y)))\nprint('Mean, variance of rescaled data: ',np.round(np.mean(rescaled.y)), np.round(np.var(rescaled.y)))", "Check that we can undo the scaling, for any y and sigmay:", "y2, sigmay2 = rescaled.invert(rescaled.y, rescaled.err)\nprint('Mean, variance of inverted, rescaled data: ',np.round(np.mean(y2)), np.round(np.var(y2)))\nprint('Maximum differences in y, sigmay, after round trip: ',np.max(np.abs(y2 - y)), np.max(np.abs(sigmay2 - sigmay)))", "Constraining the GP\nNow, using the same covariance function, lets \"fit\" the GP by constraining each draw from the GP to go through our data points, and optimizing the length scale hyperparameter h. \nLet's first look at how this would work for two data points with no uncertainty.", "# Choose two of our (rescaled) datapoints:\nx1 = np.array([x[10], x[12]])\nrescaled_y1 = np.array([rescaled.y[10], rescaled.y[12]])\nrescaled_sigmay1 = np.array([rescaled.err[10], rescaled.err[12]])\n\n# Instantiate a GP model, with initial length_scale h=10:\nkernel = SquaredExponential(length_scale=10.0, length_scale_bounds=(0.01, 1000.0))\ngp1 = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)\n\n# Fit it to our two noiseless datapoints:\ngp1.fit(x1[:, None], rescaled_y1)\n\n# We have fit for the length scale parameter: print the result here:\nparams = gp1.kernel_.get_params()\nprint('Best-fit kernel length scale =', params['length_scale'],'cf. input',10.0)\n\n# Now predict y(x) everywhere on our xgrid: \nrescaled_ygrid1, rescaled_ygrid1_err = gp1.predict(xgrid, return_std=True)\n\n# And undo scaling, of both y(x) on our grid, and our two constraining data points:\nygrid1, ygrid1_err = rescaled.invert(rescaled_ygrid1, rescaled_ygrid1_err)\ny1, sigmay1 = rescaled.invert(rescaled_y1, rescaled_sigmay1)\n\nax = fig.add_subplot(222)\nax.plot(xgrid, ygrid1, '-', color='gray', label='Posterior mean $y(x)$')\nax.fill(np.concatenate([xgrid, xgrid[::-1]]),\n np.concatenate([(ygrid1 - ygrid1_err), (ygrid1 + ygrid1_err)[::-1]]),\n alpha=0.3, fc='gray', ec='None', label='68% confidence interval')\nax.plot(x1, y1, '.k', ms=6, label='Noiseless constraints')\nax.set_xlim(0, 399)\nax.set_ylim(0, 399)\nax.set_xlabel('$x$')\nfig", "In the absence of information, the GP tends to produce $y(x)$ that fluctuate around the prior mean function, which we chose to be a constant. Let's draw some samples from the posterior PDF, and overlay them.", "draws = gp1.sample_y(xgrid, n_samples=3)\nfor k in range(3):\n draws[:,k], dummy = rescaled.invert(draws[:,k], np.zeros(len(xgrid)))\n\nax.plot(xgrid, draws[:,0], '-r')\nax.plot(xgrid, draws[:,1], '-g')\nax.plot(xgrid, draws[:,2], '-b', label='Posterior sample $y(x)$')\nax.legend(fontsize=8)\nfig", "See how the posterior sample $y(x)$ functions all pass through the constrained points.\nIncluding Observational Uncertainties\nThe mechanism for including uncertainties is a little esoteric: scikit-learn wants to be given a \"nugget,\" called alpha, to multiply the diagonal elements of the covariance matrix.", "# Choose two of our datapoints:\nx2 = np.array([x[10], x[12]])\nrescaled_y2 = np.array([rescaled.y[10], rescaled.y[12]])\nrescaled_sigmay2 = np.array([rescaled.err[10], rescaled.err[12]])\n\n# Instantiate a GP model, including observational errors:\nkernel = SquaredExponential(length_scale=10.0, length_scale_bounds=(0.01, 1000.0))\ngp2 = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9, \n alpha=(rescaled_sigmay2 / rescaled_y2) ** 2,\n random_state=0)\n\n# Fit it to our two noisy datapoints:\ngp2.fit(x2[:, None], rescaled_y2)\n\n# We have fit for the length scale parameter: print the result here:\nparams = gp2.kernel_.get_params()\nprint('Best-fit kernel length scale =', params['length_scale'],'cf. input',10.0)\n\n# Now predict y(x) everywhere on our xgrid: \nrescaled_ygrid2, rescaled_ygrid2_err = gp2.predict(xgrid, return_std=True)\n\n# And undo scaling:\nygrid2, ygrid2_err = rescaled.invert(rescaled_ygrid2, rescaled_ygrid2_err)\ny2, sigmay2 = rescaled.invert(rescaled_y2, rescaled_sigmay2)\n\n# Draw three posterior sample y(x):\ndraws = gp2.sample_y(xgrid, n_samples=3)\nfor k in range(3):\n draws[:,k], dummy = rescaled.invert(draws[:,k], np.zeros(len(xgrid)))\n\nax = fig.add_subplot(223)\n\ndef gp_plot(ax, xx, yy, ee, datax, datay, datae, samples, legend=True):\n ax.cla()\n ax.plot(xx, yy, '-', color='gray', label='Posterior mean $y(x)$')\n ax.fill(np.concatenate([xx, xx[::-1]]),\n np.concatenate([(yy - ee), (yy + ee)[::-1]]),\n alpha=0.3, fc='gray', ec='None', label='68% confidence interval')\n ax.errorbar(datax, datay, datae, fmt='.k', ms=6, label='Noisy constraints')\n ax.set_xlim(0, 399)\n ax.set_ylim(0, 399)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y(x)$')\n ax.plot(xgrid, samples[:,0], '-r')\n ax.plot(xgrid, samples[:,1], '-g')\n ax.plot(xgrid, samples[:,2], '-b', label='Posterior sample $y(x)$')\n if legend: ax.legend(fontsize=8)\n return\n\ngp_plot(ax, xgrid, ygrid2, ygrid2_err, x2, y2, sigmay2, draws, legend=True)\nfig", "Now, the posterior sample $y(x)$ functions pass through the constraints within the errors.\nUsing all the Data\nNow let's extend the above example to use all of our datapoints. This additional information should pull the predictions further away from the initial mean function. We'll also compute the marginal log likelihood of the best fit hyperparameter, in case we want to compare this choice of kernel with another one (in the exercises, for example).", "# Use all of our datapoints:\nx3 = x\nrescaled_y3 = rescaled.y\nrescaled_sigmay3 = rescaled.err\n\n# Instantiate a GP model, including observational errors:\nkernel = SquaredExponential(length_scale=10.0, length_scale_bounds=(0.01, 1000.0))\n# Could comment this out, and then import and use an \n# alternative kernel here. \n\ngp3 = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9, \n alpha=(rescaled_sigmay3 / rescaled_y3) ** 2,\n random_state=0)\n\n# Fit it to our noisy datapoints:\ngp3.fit(x3[:, None], rescaled_y3)\n\n# Now predict y(x) everywhere on our xgrid: \nrescaled_ygrid3, rescaled_ygrid3_err = gp3.predict(xgrid, return_std=True)\n\n# And undo scaling:\nygrid3, ygrid3_err = rescaled.invert(rescaled_ygrid3, rescaled_ygrid3_err)\ny3, sigmay3 = rescaled.invert(rescaled_y3, rescaled_sigmay3)\n\n# We have fitted the length scale parameter - print the result here:\nparams = gp3.kernel_.get_params()\nprint('Kernel: {}'.format(gp3.kernel_))\nprint('Best-fit kernel length scale =', params['length_scale'],'cf. input',10.0)\nprint('Marginal log-Likelihood: {:.3f}'.format(gp3.log_marginal_likelihood(gp3.kernel_.theta)))\n\n# Draw three posterior sample y(x):\ndraws = gp3.sample_y(xgrid, n_samples=3)\nfor k in range(3):\n draws[:,k], dummy = rescaled.invert(draws[:,k], np.zeros(len(xgrid)))\n\nax = fig.add_subplot(224)\n\ngp_plot(ax, xgrid, ygrid3, ygrid3_err, x3, y3, sigmay3, draws, legend=True)\nfig\n\n# fig.savefig('../../lessons/graphics/mfm_gp_example_pjm.png')", "We now see the Gaussian Process model providing a smooth interpolation between the points. The posterior samples show fluctuations, but all are plausible under our assumptions.\nExercises\n\nTry a different kernel function, from the list given in the scikit-learn docs here. \"Matern\" could be a good choice. Do you get a higher value of the marginal log likelihood when you fit this model? Under what circumstances would this marginal log likelihood approximate the Bayesian Evidence well?\n\n\n\nExtend the analysis above to do a posterior predictive model check of your GP inference. You'll need to generate new replica datasets from posterior draws from the fitted GP. Use the discrepancy measure $T(\\theta,d) = -2 \\log L(\\theta;d)$. Does your GP provide an adequate fit to the data? Could it be over-fitting? What could you do to prevent this? There's some starter code for you below.\n\n\n1. Alternative kernel\nGo back to the gp3 cell, and try something new...\nKernel: RBF\nBest-fit kernel length scale = \nMarginal log-Likelihood:\nKernel: ???\nBest-fit kernel length scale = \nMarginal log-Likelihood:\n2. Posterior Predictive Model Check\nFor this we need to draw models from our GP, and then generate a dataset from each one. We'll do this in the function below.", "def generate_replica_data(xgrid, ygrid, seed=None):\n \"\"\"\n Generate a 30-point data set, with x and sigma_y as standard, but with\n y values given by the \"lookup tables\" (gridded function) provided.\n \"\"\"\n Ndata = 30\n\n xbar = 0.5*(xlimits[0] + xlimits[1])\n xstd = 0.25*(xlimits[1] - xlimits[0])\n\n if seed is not None:\n np.random.seed(seed=seed)\n\n x = xbar + xstd * np.random.randn(Ndata)\n\n meanerr = 0.025*(xlimits[1] - xlimits[0])\n\n sigmay = meanerr + 0.3 * meanerr * np.abs(np.random.randn(Ndata))\n\n # Look up values of y given input lookup grid\n y = np.zeros(Ndata)\n for k in range(Ndata):\n y[k] = np.interp(x[k], np.ravel(xgrid), ygrid)\n # Add noise:\n y += sigmay*np.random.randn(len(x))\n\n return x,y,sigmay\n\n\ndef discrepancy(y_model, y_obs, s_obs):\n \"\"\"\n Compute discrepancy measure comparing model y and \n observed/replica y (with its uncertainty). \n \n T = -2 log L\n \"\"\"\n T = REPLACE_WITH_YOUR_SOLUTION()\n return T\n\n# Draw 1000 sample models:\nNsamples = 1000\ndraws = gp3.sample_y(xgrid, n_samples=Nsamples)\nx_rep, y_rep, sigmay_rep = np.zeros([30,Nsamples]), np.zeros([30,Nsamples]), np.zeros([30,Nsamples])\n# Difference in discrepancy measure, for plotting\ndT = np.zeros(Nsamples)\n\n# For each sample model, draw a replica dataset and accumulate test statistics:\ny_model = np.zeros(30)\nfor k in range(Nsamples):\n draws[:,k], dummy = rescaled.invert(draws[:,k], np.zeros(len(xgrid)))\n ygrid = draws[:,k]\n x_rep[:,k], y_rep[:,k], sigmay_rep[:,k] = generate_replica_data(xgrid, ygrid, seed=None)\n dT[k] = REPLACE_WITH_YOUR_SOLUTION()\n\n\n# Plot P(T[y_rep]-T[y_obs]|y_obs) as a histogram:\n\nplt.hist(dT, density=True)\nplt.xlabel(\"$T(y_{rep})-T(y_{obs})$\")\nplt.ylabel(\"Posterior predictive probability density\");" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
nitin-cherian/LifeLongLearning
Python/Python Morsels/multimax/my_try/multimax.ipynb
mit
[ "def multimax(iterable, key=None):\n \"\"\"\n Function that takes an iterable and returns all\n maximum values found in the iterable\n \"\"\"\n input = list(iterable)\n if key:\n return [it for it in input if key(it) == key(max(input, key=key))]\n return [it for it in input if it == max(input)]\n\nmultimax([1, 2, 4, 3])\n\nmultimax([1, 4, 2, 4, 3])\n\nmultimax([1, 1, 1])", "Bonus1: Make sure the function returns an empty list if the iterable is empty", "multimax([])", "Bonus2: Make sure the function works well with iterator such as files, generators etc", "numbers = [1, 3, 8, 5, 4, 10, 6]\nodds = (n for n in numbers if n % 2 == 1)\nmultimax(odds)", "Bonus3: The multimax function accept a keyword argument called \"key\" that is a function which will be used to determine the key by which to compare values as maximums. For example the key function could be used to find the longest words in a list of words", "words = [\"cheese\", \"shop\", \"ministry\", \"of\", \"silly\", \"walks\", \"argument\", \"clinic\"]\n\nmultimax(words, key=len)\n\nwords = [\"cheese\", \"shop\", \"ministry\", \"of\", \"silly\", \"walks\", \"argument\", \"clinic\"]\nmax(words, key=len)\n\nwords = [\"cheese\", \"shop\", \"argument\", \"of\", \"silly\", \"walks\", \"ministry\", \"clinic\"]\nmax(words, key=len)", "Unitests", "import unittest\n\n\nclass MultiMaxTests(unittest.TestCase):\n\n \"\"\"Tests for multimax.\"\"\"\n\n def test_single_max(self):\n self.assertEqual(multimax([1, 2, 4, 3]), [4])\n\n def test_two_max(self):\n self.assertEqual(multimax([1, 4, 2, 4, 3]), [4, 4])\n\n def test_all_max(self):\n self.assertEqual(multimax([1, 1, 1, 1, 1]), [1, 1, 1, 1, 1])\n\n def test_lists(self):\n inputs = [[0], [1], [], [0, 1], [1]]\n expected = [[1], [1]]\n self.assertEqual(multimax(inputs), expected)\n\n def test_order_maintained(self):\n inputs = [\n (3, 2),\n (2, 1),\n (3, 2),\n (2, 0),\n (3, 2),\n ]\n expected = [\n inputs[0],\n inputs[2],\n inputs[4],\n ]\n outputs = multimax(inputs)\n self.assertEqual(outputs, expected)\n self.assertIs(outputs[0], expected[0])\n self.assertIs(outputs[1], expected[1])\n self.assertIs(outputs[2], expected[2])\n\n # To test the Bonus part of this exercise, comment out the following line\n # @unittest.expectedFailure\n def test_empty(self):\n self.assertEqual(multimax([]), [])\n\n # To test the Bonus part of this exercise, comment out the following line\n # @unittest.expectedFailure\n def test_iterator(self):\n numbers = [1, 4, 2, 4, 3]\n squares = (n**2 for n in numbers)\n self.assertEqual(multimax(squares), [16, 16])\n\n # To test the Bonus part of this exercise, comment out the following line\n # @unittest.expectedFailure\n def test_key_function(self):\n words = [\"alligator\", \"animal\", \"apple\", \"artichoke\", \"avalanche\"]\n outputs = [\"alligator\", \"artichoke\", \"avalanche\"]\n self.assertEqual(multimax(words, key=len), outputs)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=['first-arg-is-ignored'], exit=False)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
akimbekov/Stock_prediction_using_ML_and_Deep_learning
.ipynb_checkpoints/Project-checkpoint.ipynb
mit
[ "Outline is based on this sentence:\nFor example, a project that performs various binary classification methods on a social science dataset you may want to focus on data munging, method selection, method evaluation, feature extraction, and presentation of analysis.\n<center> <h1> Prediction of stock raise/fall using state-of-the-art Machine Learning and Deep Learning methods. </h1>\n<h2> STA208 Final project </h2>\n\n<center><img src=\"https://stockmarketvideo.com/wp-content/uploads/2016/04/stock-market-prediction.jpg\" alt=\"Drawing\" style=\"width: 200px;\"></center>\nSample outline:\n\nIntroduction\nstock price prediction challanges, relation to news, why ML and Deep learning approach might work\ndata munging and feature extraction\nscraping of news data from motleyfool.com\ngetting the sentiment scores using the dictionary of positive and negative words\ndesigning a \"sentiment\" feature, word cloud for one POSITIVE and one NEGATIVE articles\nquandl.api to get the data, etc...\nmethod selection, evaluation, and comparison\nbaseline classification methods, logistic regression, SVM, random forest\nfeed forward neural nets, recurrent neural nets (description also)\ntuning the neural net parameters, etc.\nresults and conclusion\ncomparison of accuracy results, confusion matrix, ROC, PR, curves, etc.\n\n1. Introduction\nWhether one trades in Stocks, Index, Currencies, Commodities, a person would like to know questions like:\n\nWhat is the market trend? Will it continue?\nWhether market will close higher or lower compared to its opening levels? \nWhat could be expected high, low, close levels?\n\nThere could be many more such questions. The first challenge is to know the trend and direction of the market. If there was a crystal ball that could provide meaningful prediction in advance on the trend and direction of the markets that could help take correct trading position to make profitable trades.\nPredictive analytics based on historical price data using Data Mining, Machine Learning and Artificial Intelligence can provide prediction in advance on whether the next day market will close higher or lower compared to its opening levels. \nWe chose to investigate whether there is a connection between the sentiment in the news for a given day and the resulting market value changes for Apple, Inc on the same day. \n2. Data Munging and Feature Extraction\nTo get the news data related to Apple, Inc., we webscraped related news with term Apple from the Motley Fool of the last three years.\nNote: we commented out lines of codes where the scraping part is done. The scrapped data is available in github repo with name \"mfool.csv\".", "#data munging and feature extraction packages\nimport requests\nimport requests_ftp\nimport requests_cache\nimport lxml\nimport itertools\nimport pandas as pd\nimport re\nimport numpy as np\nimport seaborn as sns\nimport string\nfrom bs4 import BeautifulSoup\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\nfrom wordcloud import WordCloud\nplt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = [10, 8]\n\n#machine learning from scikit-learn\nfrom sklearn.metrics import classification_report,confusion_matrix, precision_recall_curve, roc_curve, auc\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\n#Deep learning from Tensor Flow\n#feed forward neural network\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier\nfrom tensorflow.contrib.layers import real_valued_column\n\n#recurrent neural nets\nfrom tensorflow.contrib.layers.python.layers.initializers import xavier_initializer\nfrom tensorflow.contrib import rnn\n\ndef motley_page_links(page):\n \"\"\"\n Given a page number, it returns all article links.\n \n Input: a page number (default = 1)\n Output: a list with links on the given page\n \"\"\"\n \n response = requests.get(\n 'https://www.fool.com/search/solr.aspx?page={}&q=apple&sort=date&source=isesitbut0000001'.format(page))\n response.raise_for_status()\n html = response.text\n parsed_html = BeautifulSoup(html, 'lxml')\n\n div_with_links = parsed_html.find_all(name = 'dl',\n attrs = {'class' : 'results'})\n links = []\n for link in div_with_links[0].find_all('a', href = True):\n links.append(link['href'])\n \n return links\n\ndef motley_all_links(no_pages = 1):\n \"\"\"\n Given number of pages, it returns all the links \n from \"no_pages\"\n \n Input: number of pages (default = 1)\n Output: a list with links from the pages\n \"\"\"\n all_links = []\n for page in range(1, (no_pages + 1)):\n all_links.extend(motley_page_links(page))\n \n return all_links\n\ndef motley_article_info(url):\n \"\"\"\n Given an article url, it returns title, date, content\n and url of that article.\n \n Input: article url\n Ouput: a dictionary with 'title', 'date',\n 'article', and 'url' as keys.\n \"\"\"\n \n response = requests.get(url)\n response.raise_for_status()\n html = response.text\n parsed_html = BeautifulSoup(html, 'lxml')\n content = parsed_html.find_all(name = 'div',\n attrs = {'class' : 'full_article'})\n\n date = parsed_html.find_all(name = 'div', attrs = {'class' : 'publication-date'})[0].text.strip()\n title = parsed_html.find_all('h1')[0].text\n article = ' '.join([t.text for t in content[0].find_all('p')])\n \n return {'title' : title,\n 'date' : date,\n 'article' : article,\n 'url' : url}\n\ndef motley_df(no_pages):\n \"\"\"\n Creates DataFrame for the articles in url\n with author, text, title, and url as column\n names.\n \n Input: A url, number of pages\n Output: DataFrame with 4 columns: author,\n text, title, and url.\n \"\"\"\n \n #get all links in the specified number of pages\n #from url\n links = motley_all_links(no_pages)\n \n #create dataframe for each link and\n #combine them into one dataframe\n article_df = pd.DataFrame(index = [999999], columns=['article', 'date', 'title', 'url'])\n for i, link in enumerate(links):\n try:\n append_to = pd.DataFrame(motley_article_info(link), index = [i])\n article_df = article_df.append(append_to)\n except:\n pass\n \n article_df = article_df.drop(999999)\n return article_df\n\n#df = motley_df(1000)\n#convert_to_csv(df, \"mfool.csv\")", "2.\nsentiment scoring", "motley = pd.read_csv('mfool.csv')\n\nnegative = pd.read_csv('negative-words.txt', sep = ' ', header = None)\npositive = pd.read_csv('positive-words.txt', sep=' ', header=None)\n\ndef score_word(word):\n \"\"\"\n returns -1 if negative meaning, +1 if positive meaning,\n else 0\n \n input: a word\n ouput: -1, 0, or + 1\n \"\"\"\n if word.lower() in negative.values:\n return -1\n elif word.lower() in positive.values:\n return +1\n return 0\n\ndef get_scores(article):\n \"\"\"\n returns sentiment scores for a given article\n \n input: an article\n output: sentiment score\n \"\"\"\n wordsArticle = article.split(' ')\n scores = [score_word(word) for word in wordsArticle]\n return sum(scores)\n\nmotley['sentiment'] = motley['article'].apply(get_scores)\n\nplt.hist(motley.sentiment, bins=50)\nplt.xlabel('sentiment scores')\nplt.ylabel('frequency')\nplt.title('Distribution of sentiment scores of articles');\n\n# motley.to_csv('motley_with_s_scores.csv', encoding='utf-8')\n\nmost_positive_article = motley['article'][motley['sentiment'] == np.max(motley['sentiment'])].values[0]\nwc = WordCloud().generate(most_positive_article)\nplt.imshow(wc)\nplt.axis('off');\n\nmost_negative_article = motley['article'][motley['sentiment'] == np.min(motley['sentiment'])].values[0]\nwc = WordCloud().generate(most_negative_article)\nplt.imshow(wc)\nplt.axis('off');", "3.\nmerging data sets\nAPPLE stock data was obtained using Quandl API at \"https://www.quandl.com/api/v3/datasets/WIKI/AAPL.csv\"", "path = \"../datasets/\"\naapl = pd.read_csv(path+'WIKI_PRICES_AAPL.csv')\nfool = pd.read_csv(path+'motley_with_s_scores.csv')\n\ndef format_df(stock_df, news_df, word):\n \"\"\"\n merges stock_df and news_df on \"date\"\n column\n \n input: stock df, news df, word\n output: merged df\n \"\"\"\n \n stock_df['diff'] = stock_df['close']-stock_df['open']\n news_df['Count'] = news_df['article'].apply(lambda x: x.count(word))\n news_df.loc[news_df['Count'] <= 5, 'sentiment'] = 0\n news_df['date'] = pd.to_datetime(news_df['date'])\n news_df['date'] = news_df['date'].dt.strftime('%Y-%m-%d')\n news_df = news_df.groupby(['date'], as_index = False).sum()\n news_df = news_df[['date', 'sentiment', 'Count']]\n merged_df = pd.merge(news_df, stock_df)\n merged_df['bin_sentiment'] = pd.cut(merged_df['sentiment'], [-np.inf, -0.001, 0.001, np.inf], labels = [-1, 0, 1])\n merged_df['bin_diff'] = pd.cut(merged_df['diff'], [-np.inf, -0.001, 0.001, np.inf], labels = [-1, 0, 1])\n return merged_df\n\nmerged_df = format_df(aapl, fool, 'Apple')\nmerged_df.head()\n#merged_df.to_csv('merged_df.csv', encoding='utf-8')", "3. Methods selection, evaluation", "def plot_ROC(y_test, scores, label, color):\n \"\"\"\n plots ROC curve\n input: y_test, scores, and title\n output: ROC curve\n \"\"\"\n false_pr, true_pr, _ = roc_curve(y_test, scores[:, 1])\n roc_auc = auc(false_pr, true_pr)\n plt.plot(false_pr, true_pr, lw = 3,\n label='{}: area={:10.4f})'.format(label, roc_auc), color = color)\n plt.plot([0, 1], [0, 1], color='black', lw=1, linestyle='--')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.legend(loc=\"best\")\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('ROC')\n\ndef plot_PR(y_test, scores, label, color):\n \"\"\"\n plots PR curve\n input: y_test, scores, title\n output: Precision-Recall curve\n \"\"\"\n precision, recall, _ = precision_recall_curve(y_test, scores[:, 1])\n plt.plot(recall, precision,lw = 2,\n label='{}'.format(label), color = color)\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n \n plt.legend(loc=\"best\")\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('PR')\n \ndef plot_confusionmatrix(ytrue, ypred):\n \"\"\"\n \n plots confusion matrix heatmap and prints out\n classification report\n \n input: ytrue (actual value), ypred(predicted value)\n output: confusion matrix heatmap and classification report\n \n \"\"\"\n \n print (classification_report(ytrue, ypred))\n \n print ('##################################################################')\n \n cnf_matrix = confusion_matrix(ytrue, ypred)\n sns.heatmap(cnf_matrix, cmap='coolwarm_r', annot = True, linewidths=.5, fmt = '.4g')\n plt.title('Confusion matrix')\n plt.xlabel('Prediction')\n plt.ylabel('Actual');\n\napple = pd.read_csv(path + 'merged_df.csv')\napple.head()\nprint (apple.shape)\n\napple.plot('date', 'diff');", "There is exterme fluctuation betweeen opening and closing prices of Apple, Inc. (as expected).\nLet's choose the features and label (bin_diff) and make the dataframe ready for machine learning and deep learning.", "aapl = apple.copy()[['date', 'sentiment', 'bin_diff']]\naapl.head()\n\nplt.hist(aapl['bin_diff']);", "Let's drop the observation with \"0\" and make it binary classification.", "aapl = aapl[aapl['bin_diff'] != 0]", "Also, to make the models work properly, from now on, we re-code loss category from -1 to 0.", "label = aapl['bin_diff'] == 1\nlabel = label.astype(int)", "let's look at the features and standardize them.", "InputDF = aapl.copy().drop('bin_diff', axis = 1)\nInputDF = InputDF.set_index('date')\n\nInputDF.head()\n\nInputDF = InputDF.apply(lambda x:(x -x.mean())/x.std())\n\nInputDF.head()\n\ntest_size = 600\nxtrain, xtest = InputDF.iloc[:test_size, :], InputDF.iloc[test_size:, :]\nytrain, ytest = label[:test_size], label[test_size:]", "Logistic regression", "logreg = LogisticRegression()\nlogreg_model = logreg.fit(xtrain, ytrain)\nlogpred = logreg_model.predict(xtest)\nlogscores = logreg_model.predict_proba(xtest)\n\nplot_confusionmatrix(ytest, logpred)\n\nplot_ROC(ytest, logscores, 'Logistic regression', 'r')\n\nplot_PR(ytest, logscores, 'Logistic regression', 'b')", "Support Vector Machines", "svm = SVC(probability=True)\nsvm_model = svm.fit(xtrain, ytrain)\nsvmpred = svm_model.predict(xtest)\nsvmscores = svm_model.predict_proba(xtest)\n\nplot_confusionmatrix(ytest, svmpred)\n\nplot_ROC(ytest, svmscores, 'SVM', 'r')\n\nplot_PR(ytest, svmscores, 'SVM', 'b')", "Random Forest Tree Classifiers", "rf = RandomForestClassifier()\nrf_model = rf.fit(xtrain, ytrain)\nrfpred = rf.predict(xtest)\nrfscores = rf.predict_proba(xtest)\n\nplot_confusionmatrix(ytest, rfpred)\n\nplot_ROC(ytest, logscores, 'Random Forest', 'r')\n\nplot_PR(ytest, logscores, 'Random Forest', 'b')", "Feed Forward Neural Network", "num_features = len(InputDF.columns)\ndropout=0.2\nhidden_1_size = 25\nhidden_2_size = 5\nnum_classes = label.nunique()\nNUM_EPOCHS=20\nBATCH_SIZE=1\nlr=0.0001\nnp.random.RandomState(52);\n\nval = (InputDF[:-test_size].values, label[:-test_size].values)\ntrain = (InputDF[-test_size:].values, label[-test_size:].values)\nNUM_TRAIN_BATCHES = int(len(train[0])/BATCH_SIZE)\nNUM_VAL_BATCHES = int(len(val[1])/BATCH_SIZE)\n\nclass Model():\n def __init__(self):\n global_step = tf.contrib.framework.get_or_create_global_step()\n self.input_data = tf.placeholder(dtype=tf.float32,shape=[None,num_features])\n self.target_data = tf.placeholder(dtype=tf.int32,shape=[None])\n self.dropout_prob = tf.placeholder(dtype=tf.float32,shape=[])\n with tf.variable_scope(\"ff\"):\n droped_input = tf.nn.dropout(self.input_data,keep_prob=self.dropout_prob)\n \n layer_1 = tf.contrib.layers.fully_connected(\n num_outputs=hidden_1_size,\n inputs=droped_input,\n )\n layer_2 = tf.contrib.layers.fully_connected(\n num_outputs=hidden_2_size,\n inputs=layer_1,\n )\n self.logits = tf.contrib.layers.fully_connected(\n num_outputs=num_classes,\n activation_fn =None,\n inputs=layer_2,\n )\n with tf.variable_scope(\"loss\"):\n \n self.losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.logits, \n labels = self.target_data)\n mask = (1-tf.sign(1-self.target_data)) #Don't give credit for flat days\n mask = tf.cast(mask,tf.float32)\n self.loss = tf.reduce_sum(self.losses)\n \n with tf.name_scope(\"train\"):\n opt = tf.train.AdamOptimizer(lr)\n gvs = opt.compute_gradients(self.loss)\n self.train_op = opt.apply_gradients(gvs, global_step=global_step)\n \n with tf.name_scope(\"predictions\"):\n self.probs = tf.nn.softmax(self.logits)\n self.predictions = tf.argmax(self.probs, 1)\n correct_pred = tf.cast(tf.equal(self.predictions, tf.cast(self.target_data,tf.int64)),tf.float64)\n self.accuracy = tf.reduce_mean(correct_pred)\n\nwith tf.Graph().as_default():\n model = Model()\n input_ = train[0]\n target = train[1]\n losses = []\n with tf.Session() as sess:\n init = tf.initialize_all_variables()\n sess.run([init])\n epoch_loss =0\n for e in range(NUM_EPOCHS):\n if epoch_loss >0 and epoch_loss <1:\n break\n epoch_loss =0\n for batch in range(0,NUM_TRAIN_BATCHES):\n \n start = batch*BATCH_SIZE\n end = start + BATCH_SIZE \n feed = {\n model.input_data:input_[start:end],\n model.target_data:target[start:end],\n model.dropout_prob:0.9\n }\n \n _,loss,acc = sess.run(\n [\n model.train_op,\n model.loss,\n model.accuracy,\n ]\n ,feed_dict=feed\n )\n epoch_loss+=loss\n losses.append(epoch_loss)\n #print('step - {0} loss - {1} acc - {2}'.format((1+batch+NUM_TRAIN_BATCHES*e),epoch_loss,acc))\n \n \n print('################ done training ################')\n final_preds =np.array([])\n final_scores =None\n for batch in range(0,NUM_VAL_BATCHES):\n \n start = batch*BATCH_SIZE\n end = start + BATCH_SIZE \n feed = {\n model.input_data:val[0][start:end],\n model.target_data:val[1][start:end],\n model.dropout_prob:1\n }\n \n acc,preds,probs = sess.run(\n [\n model.accuracy,\n model.predictions,\n model.probs\n ]\n ,feed_dict=feed\n )\n #print(acc)\n final_preds = np.concatenate((final_preds,preds),axis=0)\n if final_scores is None:\n final_scores = probs\n else:\n final_scores = np.concatenate((final_scores,probs),axis=0)\n print ('################ done testing ################')\n prediction_conf = final_scores[np.argmax(final_scores, 1)]\n\nplt.scatter(np.linspace(0, 1, len(losses)), losses);\nplt.title('Validation loss with epoch')\nplt.ylabel('Validation Loss')\nplt.xlabel('epoch progression');\n\nplot_confusionmatrix(ytest, final_preds)\n\nplot_ROC(ytest, final_scores, 'Feed forward neural net', 'r')\n\nplot_PR(ytest, final_probs, 'Feed forward neural net', 'b')", "Recursive Neural Nets", "RNN_HIDDEN_SIZE=4\nFIRST_LAYER_SIZE=50\nSECOND_LAYER_SIZE=10\nNUM_LAYERS=2\nBATCH_SIZE=1\nNUM_EPOCHS=25\nlr=0.0003\nNUM_TRAIN_BATCHES = int(len(train[0])/BATCH_SIZE)\nNUM_VAL_BATCHES = int(len(val[1])/BATCH_SIZE)\nATTN_LENGTH=30\nbeta=0\nnp.random.RandomState(52);\n\nclass RNNModel():\n def __init__(self):\n global_step = tf.contrib.framework.get_or_create_global_step()\n self.input_data = tf.placeholder(dtype=tf.float32,shape=[BATCH_SIZE,num_features])\n self.target_data = tf.placeholder(dtype=tf.int32,shape=[BATCH_SIZE])\n self.dropout_prob = tf.placeholder(dtype=tf.float32,shape=[])\n \n def makeGRUCells():\n base_cell = rnn.GRUCell(num_units=RNN_HIDDEN_SIZE,) \n layered_cell = rnn.MultiRNNCell([base_cell] * NUM_LAYERS,state_is_tuple=False) \n attn_cell =tf.contrib.rnn.AttentionCellWrapper(cell=layered_cell,attn_length=ATTN_LENGTH,state_is_tuple=False)\n return attn_cell\n \n self.gru_cell = makeGRUCells()\n self.zero_state = self.gru_cell.zero_state(1, tf.float32)\n \n self.start_state = tf.placeholder(dtype=tf.float32,shape=[1,self.gru_cell.state_size])\n \n \n\n with tf.variable_scope(\"ff\",initializer=xavier_initializer(uniform=False)):\n droped_input = tf.nn.dropout(self.input_data,keep_prob=self.dropout_prob)\n \n layer_1 = tf.contrib.layers.fully_connected(\n num_outputs=FIRST_LAYER_SIZE,\n inputs=droped_input,\n \n )\n layer_2 = tf.contrib.layers.fully_connected(\n num_outputs=RNN_HIDDEN_SIZE,\n inputs=layer_1,\n \n )\n \n \n split_inputs = tf.reshape(droped_input,shape=[1,BATCH_SIZE,num_features],name=\"reshape_l1\") # Each item in the batch is a time step, iterate through them\n split_inputs = tf.unstack(split_inputs,axis=1,name=\"unpack_l1\")\n states =[]\n outputs =[]\n with tf.variable_scope(\"rnn\",initializer=xavier_initializer(uniform=False)) as scope:\n state = self.start_state\n for i, inp in enumerate(split_inputs):\n if i >0:\n scope.reuse_variables()\n \n output, state = self.gru_cell(inp, state)\n states.append(state)\n outputs.append(output)\n self.end_state = states[-1]\n outputs = tf.stack(outputs,axis=1) # Pack them back into a single tensor\n outputs = tf.reshape(outputs,shape=[BATCH_SIZE,RNN_HIDDEN_SIZE])\n self.logits = tf.contrib.layers.fully_connected(\n num_outputs=num_classes,\n inputs=outputs,\n activation_fn=None\n )\n\n \n with tf.variable_scope(\"loss\"):\n self.penalties = tf.reduce_sum([beta*tf.nn.l2_loss(var) for var in tf.trainable_variables()])\n\n \n self.losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.logits,\n labels = self.target_data)\n self.loss = tf.reduce_sum(self.losses + beta*self.penalties)\n \n with tf.name_scope(\"train_step\"):\n opt = tf.train.AdamOptimizer(lr)\n gvs = opt.compute_gradients(self.loss)\n self.train_op = opt.apply_gradients(gvs, global_step=global_step)\n \n with tf.name_scope(\"predictions\"):\n self.probs = tf.nn.softmax(self.logits)\n self.predictions = tf.argmax(self.probs, 1)\n correct_pred = tf.cast(tf.equal(self.predictions, tf.cast(self.target_data,tf.int64)),tf.float64)\n self.accuracy = tf.reduce_mean(correct_pred)", "Training the RNN", "with tf.Graph().as_default():\n model = RNNModel()\n input_ = train[0]\n target = train[1]\n losses = []\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run([init])\n loss = 2000\n \n for e in range(NUM_EPOCHS):\n state = sess.run(model.zero_state)\n epoch_loss =0\n for batch in range(0,NUM_TRAIN_BATCHES):\n start = batch*BATCH_SIZE\n end = start + BATCH_SIZE \n feed = {\n model.input_data:input_[start:end],\n model.target_data:target[start:end],\n model.dropout_prob:0.5,\n model.start_state:state\n }\n _,loss,acc,state = sess.run(\n [\n model.train_op,\n model.loss,\n model.accuracy,\n model.end_state\n ]\n ,feed_dict=feed\n )\n epoch_loss+=loss\n losses.append(epoch_loss)\n #print('step - {0} loss - {1} acc - {2}'.format((e),epoch_loss,acc))\n print('################ done training ################')\n \n final_preds =np.array([])\n final_scores = None\n for batch in range(0,NUM_VAL_BATCHES):\n start = batch*BATCH_SIZE\n end = start + BATCH_SIZE \n feed = {\n model.input_data:val[0][start:end],\n model.target_data:val[1][start:end],\n model.dropout_prob:1,\n model.start_state:state\n }\n acc,preds,state, probs = sess.run(\n [\n model.accuracy,\n model.predictions,\n model.end_state,\n model.probs\n ]\n ,feed_dict=feed\n )\n #print(acc)\n assert len(preds) == BATCH_SIZE\n final_preds = np.concatenate((final_preds,preds),axis=0)\n if final_scores is None:\n final_scores = probs\n else:\n final_scores = np.concatenate((final_scores,probs),axis=0)\n print('################ done testing ################')\n\nplt.scatter(np.linspace(0, 1, len(losses)), losses);\nplt.title('Validation loss with epoch')\nplt.ylabel('Validation Loss')\nplt.xlabel('epoch progression');\n\nplot_confusionmatrix(ytest, final_preds)\n\nplot_ROC(ytest, final_scores, 'Feed forward neural net', 'r')\n\nplot_PR(ytest, final_scores, 'Feed forward neural net', 'b')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ML4DS/ML4all
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
mit
[ "Spectral Clustering Algorithms\nNotebook version: 1.1 (Nov 17, 2017)\n\nAuthor: Jesús Cid Sueiro (jcid@tsc.uc3m.es)\n Jerónimo Arenas García (jarenas@tsc.uc3m.es)\n\nChanges: v.1.0 - First complete version. \n v.1.1 - Python 3 version", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n# use seaborn plotting defaults\nimport seaborn as sns; sns.set()\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets.samples_generator import make_blobs, make_circles\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom sklearn.cluster import SpectralClustering\n\n# For the graph representation\nimport networkx as nx", "1. Introduction\nThe key idea of spectral clustering algorithms is to search for groups of connected data. I.e, rather than pursuing compact clusters, spectral clustering allows for arbitrary shape clusters.\nThis can be illustrated with two artifitial datasets that we will use along this notebook.\n1.1. Gaussian clusters:\nThe first one consists of 4 compact clusters generated from a Gaussian distribution. This is the kind of dataset that are best suited to centroid-based clustering algorithms like $K$-means. If the goal of the clustering algorithm is to minimize the intra-cluster distances and find a representative prototype or centroid for each cluster, $K$-means may be a good option.", "N = 300\nnc = 4\nXs, ys = make_blobs(n_samples=N, centers=nc,\n random_state=6, cluster_std=0.60, shuffle = False)\nX, y = shuffle(Xs, ys, random_state=0)\n\nplt.scatter(X[:, 0], X[:, 1], s=30);\nplt.axis('equal')\nplt.show()", "Note that we have computed two data matrices: \n\n${\\bf X}$, which contains the data points in an arbitray ordering\n${\\bf X}_s$, where samples are ordered by clusters, according to the cluster id array, ${\\bf y}$.\n\nNote that both matrices contain the same data (rows) but in different order. The sorted matrix will be useful later for illustration purposes, but keep in mind that, in a real clustering application, vector ${\\bf y}$ is unknown (learning is not supervised), and only a data matrix with an arbitrary ordering (like ${\\bf X}$) will be available. \n1.2. Concentric rings\nThe second dataset contains two concentric rings. One could expect from a clustering algorithm to identify two different clusters, one per each ring of points. If this is the case, $K$-means or any other algorithm focused on minimizing distances to some cluster centroids is not a good choice.", "X2s, y2s = make_circles(n_samples=N, factor=.5, noise=.05, shuffle=False)\nX2, y2 = shuffle(X2s, y2s, random_state=0)\nplt.scatter(X2[:, 0], X2[:, 1], s=30)\nplt.axis('equal')\nplt.show()", "Note, again, that we have computed both the sorted (${\\bf X}_{2s}$) and the shuffled (${\\bf X}_2$) versions of the dataset in the code above.\nExercise 1:\nUsing the code of the previous notebook, run the $K$-means algorithm with 4 centroids for the two datasets. In the light of your results, why do you think $K$-means does not work well for the second dataset?", "# <SOL>\n# </SOL>", "Spectral clustering algorithms are focused on connectivity: clusters are determined by maximizing some measure of intra-cluster connectivity and maximizing some form of inter-cluster connectivity.\n2. The affinity matrix\n2.1. Similarity function\nTo implement a spectral clustering algorithm we must specify a similarity measure between data points. In this session, we will use the rbf kernel, that computes the similarity between ${\\bf x}$ and ${\\bf y}$ as:\n$$\\kappa({\\bf x},{\\bf y}) = \\exp(-\\gamma \\|{\\bf x}-{\\bf y}\\|^2)$$\nOther similarity functions can be used, like the kernel functions implemented in Scikit-learn (see the <a href=http://scikit-learn.org/stable/modules/metrics.html> metrics </a> module).\n2.2. Affinity matrix\nFor a dataset ${\\cal S} = {{\\bf x}^{(0)},\\ldots,{\\bf x}^{(N-1)}}$, the $N\\times N$ affinity matrix ${\\bf K}$ contains the similarity measure between each pair of samples. Thus, its components are\n$$K_{ij} = \\kappa\\left({\\bf x}^{(i)}, {\\bf x}^{(j)}\\right)$$\nThe following fragment of code illustrates all pairs of distances between any two points in the dataset.", "gamma = 0.5\nK = rbf_kernel(X, X, gamma=gamma)", "2.3. Visualization\nWe can visualize the affinity matrix as an image, by translating component values into pixel colors or intensities.", "plt.imshow(K, cmap='hot')\nplt.colorbar()\nplt.title('RBF Affinity Matrix for gamma = ' + str(gamma))\nplt.grid('off')\nplt.show()", "Despite the apparent randomness of the affinity matrix, it contains some hidden structure, that we can uncover by visualizing the affinity matrix computed with the sorted data matrix, ${\\bf X}_s$.", "Ks = rbf_kernel(Xs, Xs, gamma=gamma)\n\nplt.imshow(Ks, cmap='hot')\nplt.colorbar()\nplt.title('RBF Affinity Matrix for gamma = ' + str(gamma))\nplt.grid('off')\nplt.show()", "Note that, despite their completely different appearance, both affinity matrices contain the same values, but with a different order of rows and columns.\nFor this dataset, the sorted affinity matrix is almost block diagonal. Note, also, that the block-wise form of this matrix depends on parameter $\\gamma$.\nExercise 2:\nModify the selection of $\\gamma$, and check the effect of this in the appearance of the sorted similarity matrix. Write down the values for which you consider that the structure of the matrix better resembles the number of clusters in the datasets.\nOut from the diagonal block, similarities are close to zero. We can enforze a block diagonal structure be setting to zero the small similarity values. \nFor instance, by thresholding ${\\bf K}s$ with threshold $t$, we get the truncated (and sorted) affinity matrix\n$$\n\\overline{K}{s,ij} = K_{s,ij} \\cdot \\text{u}(K_{s,ij} - t)\n$$\n(where $\\text{u}()$ is the step function) which is block diagonal.\nExercise 3:\nCompute the truncated and sorted affinity matrix with $t=0.001$", "t = 0.001\n# Kt = <FILL IN> # Truncated affinity matrix\n# Kst = <FILL IN> # Truncated and sorted affinity matrix\n# </SOL>", "3. Affinity matrix and data graph\nAny similarity matrix defines a weighted graph in such a way that the weight of the edge linking ${\\bf x}^{(i)}$ and ${\\bf x}^{(j)}$ is $K_{ij}$.\nIf $K$ is a full matrix, the graph is fully connected (there is and edge connecting every pair of nodes). But we can get a more interesting sparse graph by setting to zero the edges with a small weights. \nFor instance, let us visualize the graph for the truncated affinity matrix $\\overline{\\bf K}$ with threshold $t$. You can also check the effect of increasing or decreasing $t$.", "G = nx.from_numpy_matrix(Kt)\ngraphplot = nx.draw(G, X, node_size=40, width=0.5,)\nplt.axis('equal')\nplt.show()", "Note that, for this dataset, the graph connects edges from the same cluster only. Therefore, the number of diagonal blocks in $\\overline{\\bf K}_s$ is equal to the number of connected components in the graph.\nNote, also, the graph does not depend on the sample ordering in the data matrix: the graphs for any matrix ${\\bf K}$ and its sorted version ${\\bf K}_s$ are the same.\n4. The Laplacian matrix\nThe <a href = https://en.wikipedia.org/wiki/Laplacian_matrix>Laplacian matrix</a> of a given affinity matrix ${\\bf K}$ is given by\n$${\\bf L} = {\\bf D} - {\\bf K}$$\nwhere ${\\bf D}$ is the diagonal degree matrix given by\n$$D_{ii}=\\sum^{n}{j} K{ij}$$\n4.1. Properties of the Laplacian matrix\nThe Laplacian matrix of any symmetric matrix ${\\bf K}$ has several interesting properties:\nP1.\n\n${\\bf L}$ is symmetric and positive semidefinite. Therefore, all its eigenvalues $\\lambda_0,\\ldots, \\lambda_{N-1}$ are non-negative. Remind that each eigenvector ${\\bf v}$ with eigenvalue $\\lambda$ satisfies\n$${\\bf L} \\cdot {\\bf v} = \\lambda {\\bf v}$$\n\nP2.\n\n${\\bf L}$ has at least one eigenvector with zero eigenvalue: indeed, for ${\\bf v} = {\\bf 1}_N = (1, 1, \\ldots, 1)^\\intercal$ we get\n$${\\bf L} \\cdot {\\bf 1}_N = {\\bf 0}_N$$\nwhere ${\\bf 0}_N$ is the $N$ dimensional all-zero vector.\n\nP3.\n\nIf ${\\bf K}$ is block diagonal, its Laplacian is block diagonal.\n\nP4.\n\nIf ${\\bf L}$ is a block diagonal with blocks ${\\bf L}0, {\\bf L}_1, \\ldots, {\\bf L}{c-1}$, then it has at least $c$ orthogonal eigenvectors with zero eigenvalue: indeed, each block ${\\bf L}_i$ is the Laplacian matrix of the graph containing the samples in the $i$ connected component, therefore, according to property P2,\n$${\\bf L}i \\cdot {\\bf 1}{N_i} = {\\bf 0}_{N_i}$$\nwhere $N_i$ is the number of samples in the $i$-th connected component.\nTherefore, if $${\\bf v}i = \\left(\\begin{array}{l} \n{\\bf 0}{N_0} \\\n\\vdots \\\n{\\bf 0}{N{i-1}} \\\n{\\bf 1}{N_i} \\\n{\\bf 0}{N_{i+1}} \\\n\\vdots \\\n{\\bf 0}{N{c-1}}\n\\end{array}\n\\right)\n$$ \nthen\n$${\\bf L} \\cdot {\\bf v}{i} = {\\bf 0}{N}$$\n\nWe can compute the Laplacian matrix for the given dataset and visualize the eigenvalues:", "Dst = np.diag(np.sum(Kst, axis=1))\nLst = Dst - Kst\n\n# Next, we compute the eigenvalues of the matrix\nw = np.linalg.eigvalsh(Lst)\nplt.figure()\nplt.plot(w, marker='.');\nplt.title('Eigenvalues of the matrix')\nplt.show()", "Exercise 4:\nVerify that ${\\bf 1}N$ is an eigenvector with zero eigenvalues. To do so, compute ${\\bf L}{st} \\cdot {\\bf 1}_N$ and verify that its <a href= https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html>euclidean norm</a> is close to zero (it may be not exactly zero due to finite precission errors).\nVerify that vectors ${\\bf v}_i$ defined above (that you can compute using vi = (ys==i)) also have zero eigenvalue.", "# <SOL>\n# </SOL>", "Exercise 5:\nVerify that the spectral properties of the Laplacian matrix computed from ${\\bf K}{st}$ still apply using the unsorted matrix, ${\\bf K}_t$: compute ${\\bf L}{t} \\cdot {\\bf v}'_{i}$, where ${\\bf v}'_i$ is a binary vector with components equal to 1 at the positions corresponding to samples in cluster $i$ (that you can compute using vi = (y==i))), and verify that its euclidean norm is close to zero.", "# <SOL>\n# </SOL>", "Note that the position of 1's in eigenvectors ${\\bf v}_i$ points out the samples in the $i$-th connected component. This suggest the following tentative clustering algorithm:\n\nCompute the affinity matrix\nCompute the laplacian matrix\nCompute $c$ orthogonal eigenvectors with zero eigenvalue\nIf $v_{in}=1$, assign ${\\bf x}^{(n)}$ to cluster $i$. \n\nThis is the grounding idea of some spectral clustering algorithms. In this precise form, this algorithm does not usually work, for several reasons that we will discuss next, but with some modifications it becomes a powerfull method.\n4.2. Computing eigenvectors of the Laplacian Matrix\nOne of the reasons why the algorithm above may not work is that vectors ${\\bf v}'0, \\ldots,{\\bf v}'{c-1}$ are not the only zero eigenvectors or ${\\bf L}_t$: any linear combination of them is also a zero eigenvector. Eigenvector computation algorithms may return a different set of orthogonal eigenvectors.\nHowever, one can expect that eigenvector should have similar component in the positions corresponding to samples in the same connected component.", "wst, vst = np.linalg.eigh(Lst)\n\nfor n in range(nc):\n plt.plot(vst[:,n], '.-')", "4.3. Non block diagonal matrices.\nAnother reason to modify our tentative algorithm is that, in more realistic cases, the affinity matrix may have an imperfect block diagonal structure. In such cases, the smallest eigenvalues may be nonzero and eigenvectors may be not exactly piecewise constant.\nExercise 6\nPlot the eigenvector profile for the shuffled and not thresholded affinity matrix, ${\\bf K}$.", "# <SOL>\n# </SOL>", "Note that, despite the eigenvector components can not be used as a straighforward cluster indicator, they are strongly informative of the clustering structure. \n\nAll points in the same cluster have similar values of the corresponding eigenvector components $(v_{n0}, \\ldots, v_{n,c-1})$.\nPoints from different clusters have different values of the corresponding eigenvector components $(v_{n0}, \\ldots, v_{n,c-1})$.\n\nTherfore we can define vectors ${\\bf z}^{(n)} = (v_{n0}, \\ldots, v_{n,c-1})$ and apply a centroid based algorithm (like $K$-means) to identify all points with similar eigenvector components. The corresponding samples in ${\\bf X}$ become the final clusters of the spectral clustering algorithm. \nOne possible way to identify the cluster structure is to apply a $K$-means algorithm over the eigenvector coordinates. The steps of the spectral clustering algorithm become the following\n5. A spectral clustering (graph cutting) algorithm\n5.1. The steps of the spectral clustering algorithm.\nSummarizing, the steps of the spectral clustering algorithm for a data matrix ${\\bf X}$ are the following:\n\nCompute the affinity matrix, ${\\bf K}$. Optionally, truncate the smallest components to zero.\nCompute the laplacian matrix, ${\\bf L}$\nCompute the $c$ orthogonal eigenvectors with smallest eigenvalues, ${\\bf v}0,\\ldots,{\\bf v}{c-1}$\nConstruct the sample set ${\\bf Z}$ with rows ${\\bf z}^{(n)} = (v_{0n}, \\ldots, v_{c-1,n})$\nApply the $K$-means algorithms over ${\\bf Z}$ with $K=c$ centroids.\nAssign samples in ${\\bf X}$ to clusters: if ${\\bf z}^{(n)}$ is assigned by $K$-means to cluster $i$, assign sample ${\\bf x}^{(n)}$ in ${\\bf X}$ to cluster $i$.\n\nExercise 7:\nIn this exercise we will apply the spectral clustering algorithm to the two-rings dataset ${\\bf X}_2$, using $\\gamma = 20$, $t=0.1$ and $c = 2$ clusters.\n\nComplete step 1, and plot the graph induced by ${\\bf K}$", "# <SOL>\n# </SOL>", "Complete step 2, 3 and 4, and draw a scatter plot of the samples in ${\\bf Z}$", "# <SOL>\n# </SOL>", "Complete step 5", "est = KMeans(n_clusters=2)\nclusters = est.fit_predict(Z2t)", "Finally, complete step 6 and show, in a scatter plot, the result of the clustering algorithm", "plt.scatter(X2[:, 0], X2[:, 1], c=clusters, s=50, cmap='rainbow')\nplt.axis('equal')\nplt.show()", "5.2. Scikit-learn implementation.\nThe <a href=http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html> spectral clustering algorithm </a> in Scikit-learn requires the number of clusters to be specified. It works well for a small number of clusters but is not advised when using many clusters and/or data.\nFinally, we are going to run spectral clustering on both datasets. Spend a few minutes figuring out the meaning of parameters of the Spectral Clustering implementation of Scikit-learn:\nhttp://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html\nNote that there is not equivalent parameter to our threshold $t$, which has been useful for the graph representations. However, playing with $\\gamma$ should be enough to get a good clustering.\nThe following piece of code executes the algorithm with an 'rbf' kernel. You can manually adjust the number of clusters and the parameter of the kernel to study the behavior of the algorithm. When you are done, you can also:\n\nModify the code to allow for kernels different than the 'rbf'\nRepeat the analysis for the second dataset (two_rings)", "n_clusters = 4\ngamma = .1 # Warning do not exceed gamma=100\nSpClus = SpectralClustering(n_clusters=n_clusters,affinity='rbf',\n gamma=gamma)\nSpClus.fit(X)\n\nplt.scatter(X[:, 0], X[:, 1], c=SpClus.labels_.astype(np.int), s=50, \n cmap='rainbow')\nplt.axis('equal')\nplt.show()\n\nnc = 2\ngamma = 50 #Warning do not exceed gamma=300\n\nSpClus = SpectralClustering(n_clusters=nc, affinity='rbf', gamma=gamma)\nSpClus.fit(X2)\n\nplt.scatter(X2[:, 0], X2[:, 1], c=SpClus.labels_.astype(np.int), s=50, \n cmap='rainbow')\nplt.axis('equal')\nplt.show()\n\nnc = 5\nSpClus = SpectralClustering(n_clusters=nc, affinity='nearest_neighbors')\nSpClus.fit(X2)\n\nplt.scatter(X2[:, 0], X2[:, 1], c=SpClus.labels_.astype(np.int), s=50, \n cmap='rainbow')\nplt.axis('equal')\nplt.show()", "5.2. Other clustering algorithms.\n5.2.1. Agglomerative Clustering algorithms\nBottom-up approach:\n\nAt the beginning, each data point is a different cluster\nAt each step of the algorithm two clusters are merged, according to certain performance criterion\nAt the end of the algorithm, all points belong to the root node\n\nIn practice, this creates a hierarchical tree, that can be visualized with a dendogram. We can cut the tree at different levels, in each case obtaining a different number of clusters.\n<img src=https://www.mathworks.com/help/stats/dendrogram_partial.png> \nCriteria for merging clusters\nWe merge the two closest clusters, where the distance between clusters is defined as:\n\nSingle: Distance between clusters is the minimum of the distances between any two points in the clusters\nComplete: Maximal distance between any two points in each cluster\nAverage: Average distance between points in both clusters\nCentroid: Distance between the (Euclidean) centroids of both clusters\nWard: We merge centroids so that the overall increment of {\\em within-cluster} variance is minimum. \n\nPython implementations\nHierarchical clustering may lead to clusters of very different sizes. Complete linkage is the worst strategy, while Ward gives the most regular sizes. However, the affinity (or distance used in clustering) cannot be varied with Ward, thus for non Euclidean metrics, average linkage is a good alternative. \nThere are at least three different implementations of the algorithm:\n\nScikit-learn: Only implements complete',ward', and `average' linkage methods. Allows for the definition of connectivity constraints\nScipy\nfastcluster: Similar to Scipy, but more efficient with respect to computation and memory." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
idc9/law-net
vertex_metrics_experiment/procedural_v_substantive_scotus.ipynb
mit
[ "repo_directory = '/Users/iaincarmichael/Dropbox/Research/law/law-net/'\n\ndata_dir = '/Users/iaincarmichael/data/courtlistener/'\n\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\n\n# graph package\nimport igraph as ig\n\n# our code\nsys.path.append(repo_directory + 'code/')\nfrom setup_data_dir import setup_data_dir, make_subnetwork_directory\nfrom pipeline.download_data import download_bulk_resource, download_master_edgelist, download_scdb\nfrom helpful_functions import case_info\n\nfrom stats.viz import *\nfrom stats.dim_reduction import *\nfrom stats.linear_model import *\n\nsys.path.append(repo_directory + 'vertex_metrics_experiment/code/')\nfrom rankscore_experiment_sort import *\nfrom rankscore_experiment_LR import *\nfrom make_tr_edge_df import *\n\n\n# which network to download data for\nnetwork_name = 'scotus' # 'federal', 'ca1', etc\n\n\n# some sub directories that get used\nraw_dir = data_dir + 'raw/'\nsubnet_dir = data_dir + network_name + '/'\ntext_dir = subnet_dir + 'textfiles/'\n\n\n# jupyter notebook settings\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nG = ig.Graph.Read_GraphML(subnet_dir + network_name +'_network.graphml')", "compute metrics", "%time d_pagerank = G.pagerank()\n\n%time u_pagerank = G.as_undirected().pagerank()\n\n%time d_betweenness = G.betweenness(directed=True)\n\n%time u_betweenness = G.as_undirected().betweenness(directed=False)\n\n%time d_closeness = G.closeness(mode=\"IN\", normalized=True)\n\n%time u_closeness = G.as_undirected().closeness(normalized=True)\n\n%time d_eigen = G.eigenvector_centrality()\n\n%time u_eigen = G.as_undirected().eigenvector_centrality()\n\n%time hubs = G.hub_score()\n\n%time authorities = G.authority_score()\n\nindegree = G.indegree()\n\noutdegree = G.outdegree()\n\ndegree = G.degree()\n\ndf = pd.DataFrame(index=G.vs['name'])\n\ndf['year'] = G.vs['year']\n\ndf['indegree'] = indegree\ndf['outdegree'] = outdegree\ndf['degree'] = degree\ndf['d_pagerank'] = d_pagerank\ndf['u_pagerank'] = u_pagerank\ndf['d_betweenness'] = d_betweenness\ndf['u_betweenness'] = u_betweenness\ndf['d_closeness'] = d_closeness\ndf['u_closeness'] = u_closeness\ndf['d_eigen'] = d_eigen\ndf['u_eigen'] = u_eigen\ndf['hubs'] = hubs\ndf['authorities'] = authorities\n\nall_metrics = ['indegree', 'outdegree', 'degree',\n 'd_pagerank', 'u_pagerank',\n 'd_betweenness', 'u_betweenness',\n 'd_closeness', 'u_closeness',\n 'd_eigen', 'u_eigen',\n 'hubs', 'authorities']", "issue area\nProcedural\n- 1 Criminal Procedure\n- 4 Due Process\n- 6 Attorneys\n- 9 Judicial Power\nSubstantive\n- 2 Civil Rights\n- 3 First Amendment\n- 5 Privacy\n- 7 Unions\n- 8 Economic Activity\n- 12 Federal Taxation\n- 14 Private Action\nOther\n- 0 Missing\n- 10 Federalism\n- 11 Interstate Relations\n- 13 Miscellaneous\nhypothesis\n\nbetweeness/closeness favor procedural cases\neivenvector metrics (eigenvector centrality, hubs, authorities) favor substantive cases", "# map types to issues\ntype_to_issue = {'procedural': [1, 4, 6, 9],\n 'substantive': [2, 3, 5, 7, 8, 12, 14],\n 'other': [10, 11, 13, 0]}\n\n# map issues to type\nissue_to_type = {i: '' for i in range(13 + 1)}\nfor t in type_to_issue.keys():\n for i in type_to_issue[t]:\n issue_to_type[i] = t\n\n# create type\nG.vs['issueArea'] = [int(i) for i in G.vs['issueArea']]\nG.vs['type'] = [issue_to_type[i] for i in G.vs['issueArea']]\n\n# add to data frame\ndf['issueArea'] = G.vs['issueArea']\ndf['type'] = G.vs['type']\n\n# get type subsets\ndf_sub = df[df['type'] == 'substantive']\ndf_pro = df[df['type'] == 'procedural']\ndf_oth = df[df['type'] == 'other']\n\nprint 'num substantive: %d' % df_sub.shape[0]\nprint 'num procedural: %d' % df_pro.shape[0]\nprint 'num other: %d' % df_oth.shape[0]\n\ndf.to_csv(subnet_dir + 'issue_area/metrics.csv', index=True)", "compare metric vs. issue type", "df.columns\n\n\nmetric = 'authorities'\n\nbins = np.linspace(min(df[metric]), max(df[metric]), 100)\n\n# substantive\nplt.hist(df_sub[metric],\n bins=bins,\n color='red',\n label='substantive (mean: %1.5f)' % np.mean(df_sub[metric]))\n\n# procedural\nplt.hist(df_pro[metric],\n bins=bins,\n color='blue',\n label='procedural (mean: %1.5f)' % np.mean(df_pro[metric]))\n\n# other\nplt.hist(df_oth[metric],\n bins=bins,\n color='green',\n label='other (mean: %1.5f)' % np.mean(df_oth[metric]))\n\nplt.xlim([0, .2])\nplt.ylim([0, 2000])\n\nplt.xlabel(metric)\nplt.legend(loc='upper right')\n\n# look at propotion of top cases of each type\nT = 100\n\ntop_cases = df.sort_values(by=metric, ascending=False).iloc[0:T]['type']\ntop_breakdown = top_cases.value_counts(normalize=True)\n\n# compare to proportion of all cases\nall_breakdown = df['type'].value_counts(normalize=True)\n\ndiff = top_breakdown - all_breakdown\n\ndiff", "permutation test\nRank cases by metric then look at the proportion of the top T (=100) cases that are substantive.", "metric= 'indegree'\n\ndf_pro_sub = df[df['type'] != 'other']\n\nT = 100\n\n# observed proportion of top cases that are substantive\nobs_top_breakdown = df_pro_sub.\\\n sort_values(by=metric, ascending=False).\\\n iloc[0:T]['type'].\\\n value_counts(normalize=True)\n \nobs_prop_sub = obs_top_breakdown['substantive']\n\nR = 1000\n\n\nperm_prop_sub = [0] * R\nfor r in range(R):\n \n # randomly select T cases\n perm_indices = np.random.choice(range(df_pro_sub.shape[0]), replace=False, size=T)\n \n # compute the type breakdown of the T cases\n perm_breakdown = df_pro_sub.\\\n iloc[perm_indices]['type'].\\\n value_counts(normalize=True)\n \n # proportion of T cases that are substantive\n perm_prop_sub[r] = perm_breakdown['substantive']\n \nperm_prop_sub = np.array(perm_prop_sub)\npval = 1 - np.mean(perm_prop_sub < obs_prop_sub)\n\nplt.title('permutation test substantive vs. procedural (pval: %1.3f)' % pval)\nplt.hist(perm_prop_sub,\n color='blue',\n label='permutation')\n\nplt.axvline(obs_prop_sub,\n color='red',\n label='obs')\n\nplt.xlabel(metric)", "Results\nhubs, authorities, u_eigen, d_eign, d_betweeness, u_betweeness are significant (confirming hypothesis)\nTODO: recompute u_closeness\nPC plot", "df_pro_sub = df[df['type'] != 'other']\n\nU, D, V = get_PCA(df_pro_sub[all_metrics], scale=True)\n\nplot_2class_scores(U,\n classes = df_pro_sub['type'],\n start=6,\n n_comp=5)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]