markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Converting fragments to kde object
!cd $workDir; \ SIPSim fragment_kde \ ampFrags_wRand.pkl \ > ampFrags_wRand_kde.pkl
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Adding diffusion
!cd $workDir; \ SIPSim diffusion \ ampFrags_wRand_kde.pkl \ --np $nprocs \ > ampFrags_wRand_kde_dif.pkl
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Making an incorp config file
!cd $workDir; \ SIPSim incorpConfigExample \ --percTaxa 0 \ --percIncorpUnif 100 \ > PT0_PI100.config
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Adding isotope incorporation to BD distribution
!cd $workDir; \ SIPSim isotope_incorp \ ampFrags_wRand_kde_dif.pkl \ PT0_PI100.config \ --comm comm.txt \ --np $nprocs \ > ampFrags_wRand_kde_dif_incorp.pkl
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Calculating BD shift from isotope incorporation
!cd $workDir; \ SIPSim BD_shift \ ampFrags_wRand_kde_dif.pkl \ ampFrags_wRand_kde_dif_incorp.pkl \ --np $nprocs \ > ampFrags_wRand_kde_dif_incorp_BD-shift.txt
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Simulating gradient fractions
!cd $workDir; \ SIPSim gradient_fractions \ comm.txt \ > fracs.txt
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Simulating an OTU table
!cd $workDir; \ SIPSim OTU_table \ ampFrags_wRand_kde_dif_incorp.pkl \ comm.txt \ fracs.txt \ --abs 1e9 \ --np $nprocs \ > OTU_abs1e9.txt
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting taxon abundances
%%R -i workDir setwd(workDir) # loading file tbl = read.delim('OTU_abs1e9.txt', sep='\t') %%R ## BD for G+C of 0 or 100 BD.GCp0 = 0 * 0.098 + 1.66 BD.GCp100 = 1 * 0.098 + 1.66 %%R -w 800 -h 300 # plotting absolute abundances tbl.s = tbl %>% group_by(library, BD_mid) %>% summarize(total_count = sum(count)) ## plot p = ggplot(tbl.s, aes(BD_mid, total_count)) + geom_area(stat='identity', alpha=0.3, position='dodge') + geom_histogram(stat='identity') + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16) ) p %%R -w 800 -h 300 # plotting number of taxa at each BD tbl.nt = tbl %>% filter(count > 0) %>% group_by(library, BD_mid) %>% summarize(n_taxa = n()) ## plot p = ggplot(tbl.nt, aes(BD_mid, n_taxa)) + geom_area(stat='identity', alpha=0.3, position='dodge') + geom_histogram(stat='identity') + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density', y='Number of taxa') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p %%R -w 800 -h 250 # plotting relative abundances ## plot p = ggplot(tbl, aes(BD_mid, count, fill=taxon)) + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p + geom_area(stat='identity', position='dodge', alpha=0.5) %%R -w 800 -h 250 p + geom_area(stat='identity', position='fill')
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Subsampling from the OTU table
dist,loc,scale = seq_per_fraction !cd $workDir; \ SIPSim OTU_subsample \ --dist $dist \ --dist_params mean:$loc,sigma:$scale \ --walk 2 \ --min_size 10000 \ --max_size 200000 \ OTU_abs1e9.txt \ > OTU_abs1e9_sub.txt
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Testing/Plotting seq count distribution of subsampled fraction samples
%%R -h 300 -i workDir setwd(workDir) tbl = read.csv('OTU_abs1e9_sub.txt', sep='\t') tbl.s = tbl %>% group_by(library, fraction) %>% summarize(total_count = sum(count)) %>% ungroup() %>% mutate(library = as.character(library)) ggplot(tbl.s, aes(total_count)) + geom_density(fill='blue') %%R -h 300 -w 600 setwd(workDir) tbl.s = tbl %>% group_by(fraction, BD_min, BD_mid, BD_max) %>% summarize(total_count = sum(count)) ggplot(tbl.s, aes(BD_mid, total_count)) + geom_point() + geom_line() + labs(x='Buoyant density', y='Total sequences') + theme_bw() + theme( text = element_text(size=16) )
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Getting list of target taxa
%%R -i workDir inFile = paste(c(workDir, 'target_genome_index.txt'), collapse='/') tbl.target = read.delim(inFile, sep='\t', header=F) colnames(tbl.target) = c('OTUId', 'genome_file', 'genome_ID', 'X', 'Y', 'Z') tbl.target = tbl.target %>% distinct(OTUId) cat('Number of target OTUs: ', tbl.target$OTUId %>% unique %>% length, '\n') cat('----------\n') tbl.target %>% head(n=3)
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting abundance distributions
%%R -w 800 -h 250 # plotting relative abundances tbl = tbl %>% group_by(fraction) %>% mutate(rel_abund = count / sum(count)) ## plot p = ggplot(tbl, aes(BD_mid, count, fill=taxon)) + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p + geom_area(stat='identity', position='dodge', alpha=0.5) %%R -w 800 -h 250 p = ggplot(tbl, aes(BD_mid, rel_abund, fill=taxon)) + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p + geom_area(stat='identity')
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Abundance distribution of just target taxa
%%R targets = tbl.target$OTUId %>% as.vector %>% unique tbl.f = tbl %>% filter(taxon %in% targets) tbl.f %>% head %%R -w 800 -h 250 # plotting absolute abundances ## plot p = ggplot(tbl.f, aes(BD_mid, count, fill=taxon)) + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p + geom_area(stat='identity', position='dodge', alpha=0.5) %%R -w 800 -h 250 # plotting relative abundances p = ggplot(tbl.f, aes(BD_mid, rel_abund, fill=taxon)) + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p + geom_area(stat='identity')
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting 'true' taxon abundance distribution (from priming exp dataset)
%%R -i metaDataFile # loading priming_exp metadata file meta = read.delim(metaDataFile, sep='\t') meta %>% head(n=4) %%R -i otuTableFile # loading priming_exp OTU table tbl.otu.true = read.delim(otuTableFile, sep='\t') %>% select(OTUId, starts_with('X12C.700.28')) tbl.otu.true %>% head(n=3) %%R # editing table tbl.otu.true.w = tbl.otu.true %>% gather('sample', 'count', 2:ncol(tbl.otu.true)) %>% mutate(sample = gsub('^X', '', sample)) %>% group_by(sample) %>% mutate(rel_abund = count / sum(count)) %>% ungroup() %>% filter(count > 0) tbl.otu.true.w %>% head(n=5) %%R tbl.true.j = inner_join(tbl.otu.true.w, meta, c('sample' = 'Sample')) tbl.true.j %>% as.data.frame %>% head(n=3) %%R -w 800 -h 300 -i workDir # plotting number of taxa at each BD tbl = read.csv('OTU_abs1e9_sub.txt', sep='\t') tbl.nt = tbl %>% filter(count > 0) %>% group_by(library, BD_mid) %>% summarize(n_taxa = n()) ## plot p = ggplot(tbl.nt, aes(BD_mid, n_taxa)) + geom_area(stat='identity', alpha=0.5) + geom_point() + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density', y='Number of taxa') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' ) p %%R -w 700 -h 350 tbl.true.j.s = tbl.true.j %>% filter(count > 0) %>% group_by(sample, Density) %>% summarize(n_taxa = sum(count > 0)) ggplot(tbl.true.j.s, aes(Density, n_taxa)) + geom_area(stat='identity', alpha=0.5) + geom_point() + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density', y='Number of taxa') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' )
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting total counts for each sample
%%R -h 300 -w 600 tbl.true.j.s = tbl.true.j %>% group_by(sample, Density) %>% summarize(total_count = sum(count)) ggplot(tbl.true.j.s, aes(Density, total_count)) + geom_point() + geom_line() + labs(x='Buoyant density', y='Total sequences') + theme_bw() + theme( text = element_text(size=16) )
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Plotting abundance distribution of target OTUs
%%R tbl.true.j.f = tbl.true.j %>% filter(OTUId %in% targets) %>% arrange(OTUId, Density) %>% group_by(sample) tbl.true.j.f %>% head(n=3) %>% as.data.frame %%R -w 800 -h 250 # plotting relative abundances ## plot ggplot(tbl.true.j.f, aes(Density, rel_abund, fill=OTUId)) + geom_area(stat='identity') + geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) + labs(x='Buoyant density') + theme_bw() + theme( text = element_text(size=16), legend.position = 'none' )
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Combining true and simulated OTU tables for target taxa
%%R tbl.f.e = tbl.f %>% mutate(library = 'simulation') %>% rename('density' = BD_mid) %>% select(-BD_min, -BD_max) tbl.true.e = tbl.true.j.f %>% select('taxon' = OTUId, 'fraction' = sample, 'density' = Density, count, rel_abund) %>% mutate(library = 'true') tbl.sim.true = rbind(tbl.f.e, tbl.true.e) %>% as.data.frame tbl.f.e = data.frame() tbl.true.e = data.frame() tbl.sim.true %>% head(n=3) %%R # check cat('Number of target taxa: ', tbl.sim.true$taxon %>% unique %>% length, '\n')
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Abundance distributions of each target taxon
%%R -w 900 -h 3500 tbl.sim.true.f = tbl.sim.true %>% ungroup() %>% filter(density >= 1.677) %>% filter(density <= 1.761) %>% group_by(taxon) %>% mutate(mean_rel_abund = mean(rel_abund)) %>% ungroup() tbl.sim.true.f$taxon = reorder(tbl.sim.true.f$taxon, -tbl.sim.true.f$mean_rel_abund) ggplot(tbl.sim.true.f, aes(density, rel_abund, color=library)) + geom_point() + geom_line() + theme_bw() + facet_wrap(~ taxon, ncol=4, scales='free_y') %%R tbl.otu.true.w %>% filter(OTUId == 'OTU.1') %>% as.data.frame()
ipynb/bac_genome/priming_exp/validation_sample/.ipynb_checkpoints/X12C.700.45.01_fracRichness-checkpoint.ipynb
nick-youngblut/SIPSim
mit
Versió 1.0 Utilitzeu el codi de l'exemple anterior del bucle while: només heu d'afegir que, quan xoque, el robot vaja cap enrere, gire una mica (cap al vostre costat preferit), i pare.
while not touch(): forward() backward() sleep(1) left() sleep(1) stop()
task/navigation_teacher.ipynb
ecervera/mindstorms-nb
mit
Versió 2.0 Se suposa que la maniobra del robot li permet evitar l'obstacle, i per tant tornar a anar cap avant. Com ho podem programar? Cal repetir tot el bloc d'instruccions del comportament, incloent el bucle. Cap problema, els llenguatges de programació permeten posar un bucle dins d'un altre, el que s'anomena bucles anidats. Utilitzeu un bucle for per a repetir 5 vegades el codi anterior.
for ...: while ...: ... ... for i in range(5): while not touch(): forward() backward() sleep(1) left() sleep(1) stop()
task/navigation_teacher.ipynb
ecervera/mindstorms-nb
mit
Versió 3.0 <img src="img/interrupt.png" align="right"> I si en lloc de repetir 10 o 20 vegades, volem que el robot continue fins que el parem nosaltres? Ho podem fer amb un bucle infinit, i indicarem al programa que pare amb el botó interrupt kernel. En Python, un bucle infinit s'escriu així: python while True: statement Quan s'interromp el programa, s'abandona la instrucció que s'estava executant en eixe moment, i cal parar el robot. En Python, aquest procés s'anomena excepció i es gestiona d'aquesta manera: python try: while True: statement # ací anirà el comportament except KeyboardInterrupt: statement # ací pararem el robot Utilitzeu un bucle infinit per a repetir el comportament del robot fins que el pareu.
try: while True: while not touch(): forward() backward() sleep(1) left() sleep(1) except KeyboardInterrupt: stop()
task/navigation_teacher.ipynb
ecervera/mindstorms-nb
mit
Versió 4.0 El comportament del robot, girant sempre cap al mateix costat, és una mica previsible, no vos sembla? Anem a introduir un component d'atzar: en els llenguatges de programació, existeixen els generadors de números aleatoris, que són com els daus dels ordinadors. Executeu el següent codi vàries vegades amb Ctrl+Enter i comproveu els resultats.
from random import random random()
task/navigation_teacher.ipynb
ecervera/mindstorms-nb
mit
La funció random és com llançar un dau, però en compte de donar una valor d'1 a 6, dóna un número real entre 0 i 1. Aleshores, el robot pot utilitzar eixe valor per a decidir si gira a esquerra o dreta. Com? Doncs si el valor és major que 0.5, gira a un costat, i si no, cap a l'altre. Aleshores, girarà a l'atzar, amb una probabilitat del 50% per a cada costat. Incorporeu la decisió a l'atzar per a girar al codi de la versió anterior:
try: while True: while not touch(): forward() backward() sleep(1) if random() > 0.5: left() else: right() sleep(1) except KeyboardInterrupt: stop()
task/navigation_teacher.ipynb
ecervera/mindstorms-nb
mit
Recapitulem Abans de continuar, desconnecteu el robot:
disconnect()
task/navigation_teacher.ipynb
ecervera/mindstorms-nb
mit
Let's load in the datasets
book1 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book1-edges.csv') book2 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book2-edges.csv') book3 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book3-edges.csv') book4 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book4-edges.csv') book5 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book5-edges.csv')
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
The resulting DataFrame book1 has 5 columns: Source, Target, Type, weight, and book. Source and target are the two nodes that are linked by an edge. A network can have directed or undirected edges and in this network all the edges are undirected. The weight attribute of every edge tells us the number of interactions that the characters have had over the book, and the book column tells us the book number.
book1.head()
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Once we have the data loaded as a pandas DataFrame, it's time to create a network. We create a graph for each book. It's possible to create one MultiGraph instead of 5 graphs, but it is easier to play with different graphs.
G_book1 = nx.Graph() G_book2 = nx.Graph() G_book3 = nx.Graph() G_book4 = nx.Graph() G_book5 = nx.Graph()
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Let's populate the graph with edges from the pandas DataFrame.
for row in book1.iterrows(): G_book1.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book2.iterrows(): G_book2.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book3.iterrows(): G_book3.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book4.iterrows(): G_book4.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book5.iterrows(): G_book5.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) books = [G_book1, G_book2, G_book3, G_book4, G_book5]
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Let's have a look at these edges.
list(G_book1.edges(data=True))[16] list(G_book1.edges(data=True))[400]
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Finding the most important node i.e character in these networks. Is it Jon Snow, Tyrion, Daenerys, or someone else? Let's see! Network Science offers us many different metrics to measure the importance of a node in a network as we saw in the first part of the tutorial. Note that there is no "correct" way of calculating the most important node in a network, every metric has a different meaning. First, let's measure the importance of a node in a network by looking at the number of neighbors it has, that is, the number of nodes it is connected to. For example, an influential account on Twitter, where the follower-followee relationship forms the network, is an account which has a high number of followers. This measure of importance is called degree centrality. Using this measure, let's extract the top ten important characters from the first book (book[0]) and the fifth book (book[4]).
deg_cen_book1 = nx.degree_centrality(books[0]) deg_cen_book5 = nx.degree_centrality(books[4]) sorted(deg_cen_book1.items(), key=lambda x:x[1], reverse=True)[0:10] sorted(deg_cen_book5.items(), key=lambda x:x[1], reverse=True)[0:10] # Plot a histogram of degree centrality plt.hist(list(nx.degree_centrality(G_book4).values())) plt.show() d = {} for i, j in dict(nx.degree(G_book4)).items(): if j in d: d[j] += 1 else: d[j] = 1 x = np.log2(list((d.keys()))) y = np.log2(list(d.values())) plt.scatter(x, y, alpha=0.9) plt.show()
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Exercise Create a new centrality measure, weighted_degree(Graph, weight) which takes in Graph and the weight attribute and returns a weighted degree dictionary. Weighted degree is calculated by summing the weight of the all edges of a node and find the top five characters according to this measure.
def weighted_degree(G, weight): result = dict() for node in G.nodes(): weight_degree = 0 for n in G.edges([node], data=True): weight_degree += n[2]['weight'] result[node] = weight_degree return result plt.hist(list(weighted_degree(G_book1, 'weight').values())) plt.show() sorted(weighted_degree(G_book1, 'weight').items(), key=lambda x:x[1], reverse=True)[0:10]
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Let's do this for Betweeness centrality and check if this makes any difference Haha, evil laugh
# First check unweighted, just the structure sorted(nx.betweenness_centrality(G_book1).items(), key=lambda x:x[1], reverse=True)[0:10] # Let's care about interactions now sorted(nx.betweenness_centrality(G_book1, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
PageRank The billion dollar algorithm, PageRank works by counting the number and quality of links to a page to determine a rough estimate of how important the website is. The underlying assumption is that more important websites are likely to receive more links from other websites.
# by default weight attribute in pagerank is weight, so we use weight=None to find the unweighted results sorted(nx.pagerank_numpy(G_book1, weight=None).items(), key=lambda x:x[1], reverse=True)[0:10] sorted(nx.pagerank_numpy(G_book1, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Is there a correlation between these techniques? Exercise Find the correlation between these four techniques. pagerank betweenness_centrality weighted_degree degree centrality
cor = pd.DataFrame.from_records([nx.pagerank_numpy(G_book1, weight='weight'), nx.betweenness_centrality(G_book1, weight='weight'), weighted_degree(G_book1, 'weight'), nx.degree_centrality(G_book1)]) # cor.T cor.T.corr()
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Evolution of importance of characters over the books According to degree centrality the most important character in the first book is Eddard Stark but he is not even in the top 10 of the fifth book. The importance changes over the course of five books, because you know stuff happens ;) Let's look at the evolution of degree centrality of a couple of characters like Eddard Stark, Jon Snow, Tyrion which showed up in the top 10 of degree centrality in first book. We create a dataframe with character columns and index as books where every entry is the degree centrality of the character in that particular book and plot the evolution of degree centrality Eddard Stark, Jon Snow and Tyrion. We can see that the importance of Eddard Stark in the network dies off and with Jon Snow there is a drop in the fourth book but a sudden rise in the fifth book
evol = [nx.degree_centrality(book) for book in books] evol_df = pd.DataFrame.from_records(evol).fillna(0) evol_df[['Eddard-Stark', 'Tyrion-Lannister', 'Jon-Snow']].plot() set_of_char = set() for i in range(5): set_of_char |= set(list(evol_df.T[i].sort_values(ascending=False)[0:5].index)) set_of_char
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Exercise Plot the evolution of weighted degree centrality of the above mentioned characters over the 5 books, and repeat the same exercise for betweenness centrality.
evol_df[list(set_of_char)].plot(figsize=(29,15)) evol = [nx.betweenness_centrality(graph, weight='weight') for graph in [G_book1, G_book2, G_book3, G_book4, G_book5]] evol_df = pd.DataFrame.from_records(evol).fillna(0) set_of_char = set() for i in range(5): set_of_char |= set(list(evol_df.T[i].sort_values(ascending=False)[0:5].index)) evol_df[list(set_of_char)].plot(figsize=(19,10))
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
So what's up with Stannis Baratheon?
nx.draw(nx.barbell_graph(5, 1), with_labels=True) sorted(nx.degree_centrality(G_book5).items(), key=lambda x:x[1], reverse=True)[:5] sorted(nx.betweenness_centrality(G_book5).items(), key=lambda x:x[1], reverse=True)[:5]
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Community detection in Networks A network is said to have community structure if the nodes of the network can be easily grouped into (potentially overlapping) sets of nodes such that each set of nodes is densely connected internally. We will use louvain community detection algorithm to find the modules in our graph.
plt.figure(figsize=(15, 15)) partition = community.best_partition(G_book1) size = float(len(set(partition.values()))) pos = nx.kamada_kawai_layout(G_book1) count = 0 colors = ['red', 'blue', 'yellow', 'black', 'brown', 'purple', 'green', 'pink'] for com in set(partition.values()): list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com] nx.draw_networkx_nodes(G_book1, pos, list_nodes, node_size = 20, node_color = colors[count]) count = count + 1 nx.draw_networkx_edges(G_book1, pos, alpha=0.2) plt.show() d = {} for character, par in partition.items(): if par in d: d[par].append(character) else: d[par] = [character] d nx.draw(nx.subgraph(G_book1, d[3])) nx.draw(nx.subgraph(G_book1, d[1])) nx.density(G_book1) nx.density(nx.subgraph(G_book1, d[4])) nx.density(nx.subgraph(G_book1, d[4]))/nx.density(G_book1)
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Exercise Find the most important node in the partitions according to degree centrality of the nodes.
max_d = {} deg_book1 = nx.degree_centrality(G_book1) for group in d: temp = 0 for character in d[group]: if deg_book1[character] > temp: max_d[group] = character temp = deg_book1[character] max_d
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
A bit about power law in networks
G_random = nx.erdos_renyi_graph(100, 0.1) nx.draw(G_random) G_ba = nx.barabasi_albert_graph(100, 2) nx.draw(G_ba) # Plot a histogram of degree centrality plt.hist(list(nx.degree_centrality(G_random).values())) plt.show() plt.hist(list(nx.degree_centrality(G_ba).values())) plt.show() G_random = nx.erdos_renyi_graph(2000, 0.2) G_ba = nx.barabasi_albert_graph(2000, 20) d = {} for i, j in dict(nx.degree(G_random)).items(): if j in d: d[j] += 1 else: d[j] = 1 x = np.log2(list((d.keys()))) y = np.log2(list(d.values())) plt.scatter(x, y, alpha=0.9) plt.show() d = {} for i, j in dict(nx.degree(G_ba)).items(): if j in d: d[j] += 1 else: d[j] = 1 x = np.log2(list((d.keys()))) y = np.log2(list(d.values())) plt.scatter(x, y, alpha=0.9) plt.show()
archive/7-game-of-thrones-case-study-instructor.ipynb
ericmjl/Network-Analysis-Made-Simple
mit
Exercise Import the classifier object ``sklearn.svm.SVC``` initialize it fit it with the training data (no need to split a second time) evaluate the quality of the created classifier using score() Pipelining and cross-validation It's common to want to preprocess data somehow or in general have several steps. This can be easily done with the Pipeline class. There are typically parameters involved and you might want to select the best possible parameter.
from sklearn.decomposition import PCA # pca is a subspace method that projects the data into a lower-dimensional space from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier pca = PCA(n_components=2) knn = KNeighborsClassifier(n_neighbors=3) from sklearn.pipeline import Pipeline pipeline = Pipeline([("pca", pca), ("kneighbors", knn)]) parameters_grid = dict( pca__n_components=[1,2,3,4], kneighbors__n_neighbors=[1,2,3,4,5,6] ) grid_search = GridSearchCV(pipeline, parameters_grid) grid_search.fit(train_X, train_Y) grid_search.best_estimator_ # you can now test agains the held out part grid_search.best_estimator_.score(test_X, test_Y)
notebooks/examples/Extra Scikit-learn.ipynb
csc-training/python-introduction
mit
Implementing an Earley Parser A Grammar for Grammars Earley's algorithm has two inputs: - a grammar $G$ and - a string $s$. It then checks whether the string $s$ can be parsed with the given grammar. In order to input the grammar in a natural way, we first have to develop a parser for grammars. An example grammar that we want to parse is stored in the file simple.g.
!cat simple.g
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
We use <span style="font-variant:small-caps;">Antlr</span> to develop a parser for this Grammar. The pure grammar to parse this type of grammar is stored in the file Pure.g4.
!cat Pure.g4
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The annotated grammar is stored in the file Grammar.g4.
!cat -n Grammar.g4
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
We start by generating both scanner and parser.
!antlr4 -Dlanguage=Python3 Grammar.g4 from GrammarLexer import GrammarLexer from GrammarParser import GrammarParser import antlr4
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function parse_grammar takes a filename as its argument and returns the grammar that is stored in the given file. The grammar is represented as list of rules. Each rule is represented as a tuple. The example below will clarify this structure.
def parse_grammar(filename): input_stream = antlr4.FileStream(filename) lexer = GrammarLexer(input_stream) token_stream = antlr4.CommonTokenStream(lexer) parser = GrammarParser(token_stream) grammar = parser.start() return grammar.g parse_grammar('simple.g')
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
Earley's Algorithm Given a context-free grammar $G = \langle V, \Sigma, R, S \rangle$ and a string $s = x_1x_2 \cdots x_n \in \Sigma^$ of length $n$, an Earley item* is a pair of the form $$\langle A \rightarrow \alpha \bullet \beta, k \rangle$$ such that - $(A \rightarrow \alpha \beta) \in R\quad$ and - $k \in {0,1,\cdots,n}$. The class EarleyItem represents a single Earley item. - mVariable is the variable $A$, - mAlpha is $\alpha$, - mBeta is $\beta$, and - mIndex is $k$. Since we later have to store objects of class EarleyItem in sets, we have to implement the functions - __eq__, - __ne__, - __hash__. It is easiest to implement __hash__ by first converting the object into a string. Hence we also implement the function __repr__, that converts an EarleyItem into a string.
class EarleyItem(): def __init__(self, variable, alpha, beta, index): self.mVariable = variable self.mAlpha = alpha self.mBeta = beta self.mIndex = index def __eq__(self, other): return isinstance(other, EarleyItem) and \ self.mVariable == other.mVariable and \ self.mAlpha == other.mAlpha and \ self.mBeta == other.mBeta and \ self.mIndex == other.mIndex def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.__repr__()) def __repr__(self): alphaStr = ' '.join(self.mAlpha) betaStr = ' '.join(self.mBeta) return f'<{self.mVariable} → {alphaStr} • {betaStr}, {self.mIndex}>'
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
Given an Earley item self, the function isComplete checks, whether the Earley item self has the form $$\langle A \rightarrow \alpha \bullet, k \rangle,$$ i.e. whether the $\bullet$ is at the end of the grammar rule.
def isComplete(self): return self.mBeta == () EarleyItem.isComplete = isComplete del isComplete
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function sameVar(self, C) checks, whether the item following the dot is the same as the variable given as argument, i.e. sameVar(self, C) returns True if self is an Earley item of the form $$\langle A \rightarrow \alpha \bullet C\beta, k \rangle.$$
def sameVar(self, C): return len(self.mBeta) > 0 and self.mBeta[0] == C EarleyItem.sameVar = sameVar del sameVar
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function scan(self, t) checks, whether the item following the dot matches the token t, i.e. scan(self, t) returns True if self is an Earley item of the form $$\langle A \rightarrow \alpha \bullet t\beta, k \rangle.$$ The argument $t$ can either be the name of a token or a literal.
def scan(self, t): if len(self.mBeta) > 0: return self.mBeta[0] == t or self.mBeta[0] == "'" + t + "'" return False EarleyItem.scan = scan del scan
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
Given an Earley item, this function returns the name of the variable following the dot. If there is no variable following the dot, the function returns None. The function can distinguish variables from token names because variable names consist only of lower case letters.
def nextVar(self): if len(self.mBeta) > 0: var = self.mBeta[0] if var[0] != "'" and var.islower(): return var return None EarleyItem.nextVar = nextVar del nextVar
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function moveDot(self) moves the $\bullet$ in the Earley item self, where self has the form $$\langle A \rightarrow \alpha \bullet \beta, k \rangle$$ over the next variable, token, or literal in $\beta$. It assumes that $\beta$ is not empty.
def moveDot(self): return EarleyItem(self.mVariable, self.mAlpha + (self.mBeta[0],), self.mBeta[1:], self.mIndex) EarleyItem.moveDot = moveDot del moveDot
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The class Grammar represents a context free grammar. It stores a list of the rules of the grammar. Each grammar rule of the form $$ a \rightarrow \beta $$ is stored as the tuple $(a,) + \beta$. The start symbol is assumed to be the variable on the left hand side of the first rule. To distinguish syntactical variables form tokens, variables contain only lower case letters, while tokens either contain only upper case letters or they start and end with a single quote character "'".
class Grammar(): def __init__(self, Rules): self.mRules = Rules
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function startItem returns the Earley item $$ \langle\hat{S} \rightarrow \bullet S, 0\rangle $$ where $S$ is the start variable of the given grammar and $\hat{S}$ is a new variable.
def startItem(self): return EarleyItem('Start', (), (self.startVar(),), 0) Grammar.startItem = startItem del startItem
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function finishItem returns the Earley item $$ \langle\hat{S} \rightarrow S \bullet, 0\rangle $$ where $S$ is the start variable of the given grammar and $\hat{S}$ is a new variable.
def finishItem(self): return EarleyItem('Start', (self.startVar(),), (), 0) Grammar.finishItem = finishItem del finishItem
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function startVar returns the start variable of the grammar. It is assumed that the first rule grammar starts with the start variable of the grammar.
def startVar(self): return self.mRules[0][0] Grammar.startVar = startVar del startVar
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function toString creates a readable presentation of the grammar rules.
def toString(self): result = '' for head, *body in self.mRules: result += f'{head}: {body};\n' return result Grammar.__str__ = toString del toString
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The class EarleyParser implements the parsing algorithm of Jay Earley. The class maintains the following member variables: - mGrammar is the grammar that is used to parse the given token string. - mString is the list of tokens and literals that has to be parsed. As a hack, the first element of this list in None. Therefore, mString[i] is the ith token. - mStateList is a list of sets of Earley items. If $n$ is the length of the given token string (excluding the first element None), then $Q_i = \texttt{mStateList}[i]$. The idea is that the set $Q_i$ is the set of those Earley items that the parser could be in when it has read the tokens mString[1], $\cdots$, mString[n]. $Q_0$ is initialized as follows: $$ Q_0 = \bigl{\langle\hat{S} \rightarrow \bullet S, 0\rangle\bigr}. $$ The Earley items are interpreted as follows: If we have $$ \langle C \rightarrow \alpha \bullet \beta, k\rangle \in Q_i, $$ then we know the following: - After having read the tokens mString[:k+1] the parser tries to parse the variable $C$ in the token string mString[k+1:]. - After having read the token string mString[k+1:i+1] the parser has already recognized $\alpha$ and now needs to recognize $\beta$ in the token string mString[i+1:] in order to parse the variable $C$.
class EarleyParser(): def __init__(self, grammar, TokenList): self.mGrammar = grammar self.mString = [None] + TokenList # dirty hack so mString[1] is first token self.mStateList = [set() for i in range(len(TokenList)+1)] print('Grammar:\n') print(self.mGrammar) print(f'Input: {self.mString}\n') self.mStateList[0] = { self.mGrammar.startItem() }
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The method parse implements Earley's algorithm. For all states $Q_1$, $\cdots$, $Q_n$ we proceed as follows: - We apply the completion operation followed by the prediction operation. This is done until no more states are added to $Q_i$. (The inner while loop is not necessary if the grammar does not contain $\varepsilon$-rules.) - Finally, the scanning operation is applied to $Q_i$. After $Q_i$ has been computed, we proceed to compute $Q_{i+1}$. Parsing is successful iff $$ \langle\hat{S} \rightarrow S \bullet, 0\rangle \in Q_n $$
def parse(self): "run Earley's algorithm" n = len(self.mString) - 1 # mString[0] = None for i in range(0, n+1): if i + 1 <= n: next_token = self.mString[i+1] else: next_token = 'EOF' print('_' * 80) print(f'next token = {next_token}') print('_' * 80) change = True while change: change = self.complete(i) change = self.predict(i) or change self.scan(i) # print states print(f'\nQ{i}:') Qi = self.mStateList[i] for item in Qi: print(item) if i + 1 <= n: print(f'\nQ{i+1}:') Qip1 = self.mStateList[i+1] for item in Qip1: print(item) if self.mGrammar.finishItem() in self.mStateList[-1]: print('Parsing successful!') else: print('Parsing failed!') EarleyParser.parse = parse del parse
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The method complete(self, i) applies the completion operation to the state $Q_i$: If we have - $\langle C \rightarrow \gamma \bullet, j\rangle \in Q_i$ and - $\langle A \rightarrow \beta \bullet C \delta, k\rangle \in Q_j$, then the parser tried to parse the variable $C$ after having read mString[:j+1] and we know that $$ C \Rightarrow^ \texttt{mString[j+1:i+1]}, $$ i.e. the parser has recognized $C$ after having read mString[j+1:i+1]. Therefore the parser should proceed to recognize $\delta$ in state $Q_i$. Therefore we add the Earley item* $\langle A \rightarrow \beta C \bullet \delta,k\rangle$ to the set $Q_i$: $$\langle C \rightarrow \gamma \bullet, j\rangle \in Q_i \wedge \langle A \rightarrow \beta \bullet C \delta, k\rangle \in Q_j \;\rightarrow\; Q_i := Q_i \cup \bigl{ \langle A \rightarrow \beta C \bullet \delta, k\rangle \bigr} $$
def complete(self, i): change = False added = True Qi = self.mStateList[i] while added: added = False newQi = set() for item in Qi: if item.isComplete(): C = item.mVariable j = item.mIndex Qj = self.mStateList[j] for newItem in Qj: if newItem.sameVar(C): moved = newItem.moveDot() newQi.add(moved) if not (newQi <= Qi): change = True added = True print("completion:") for newItem in newQi: if newItem not in Qi: print(f'{newItem} added to Q{i}') self.mStateList[i] |= newQi Qi = self.mStateList[i] return change EarleyParser.complete = complete del complete
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The method self.predict(i) applies the prediction operation to the state $Q_i$: If $\langle A \rightarrow \beta \bullet C \delta, k \rangle \in Q_j$, then the parser tries to recognize $C\delta$ after having read mString[:j+1]. To this end it has to parse $C$ in the string mString[j+1:]. Therefore, if $C \rightarrow \gamma$ is a rule of our grammar, we add the Earley item $\langle C \rightarrow \bullet \gamma, j\rangle$ to the set $Q_j$: $$ \langle A \rightarrow \beta \bullet C \delta, k\rangle \in Q_j \wedge (C \rightarrow \gamma) \in R \;\rightarrow\; Q_j := Q_j \cup\bigl{ \langle C \rightarrow \bullet\gamma, j\rangle\bigr}. $$ As the right hand side $\gamma$ might start with a variable, the function uses a fix point iteration until no more Earley items are added to $Q_j$.
def predict(self, i): change = False added = True Qi = self.mStateList[i] while added: added = False newQi = set() for item in Qi: c = item.nextVar() if c != None: for rule in self.mGrammar.mRules: if c == rule[0]: newQi.add(EarleyItem(c, (), rule[1:], i)) if not (newQi <= Qi): change = True added = True print("prediction:") for newItem in newQi: if newItem not in Qi: print(f'{newItem} added to Q{i}') self.mStateList[i] |= newQi Qi = self.mStateList[i] return change EarleyParser.predict = predict del predict
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function self.scan(i) applies the scanning operation to the state $Q_i$. If $\langle A \rightarrow \beta \bullet a \gamma, k\rangle \in Q_i$ and $a$ is a token, then the parser tries to recognize the right hand side of the grammar rule $$ A \rightarrow \beta a \gamma$$ and after having read mString[k+1:i+1] it has already recognized $\beta$. If we now have mString[i+1] == a, then the parser still has to recognize $\gamma$ in mString[i+2:]. Therefore, the Earley object $\langle A \rightarrow \beta a \bullet \gamma, k\rangle$ is added to the set $Q_{i+1}$: $$\langle A \rightarrow \beta \bullet a \gamma, k\rangle \in Q_i \wedge x_{i+1} = a \;\rightarrow\; Q_{i+1} := Q_{i+1} \cup \bigl{ \langle A \rightarrow \beta a \bullet \gamma, k\rangle \bigr} $$
def scan(self, i): Qi = self.mStateList[i] n = len(self.mString) - 1 # remember mStateList[0] == None if i + 1 <= n: a = self.mString[i+1] for item in Qi: if item.scan(a): self.mStateList[i+1].add(item.moveDot()) print('scanning:') print(f'{item.moveDot()} added to Q{i+1}') EarleyParser.scan = scan del scan import re
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function tokenize transforms the string s into a list of tokens. See below for an example.
def tokenize(s): '''Transform the string s into a list of tokens. The string s is supposed to represent an arithmetic expression. ''' lexSpec = r'''([ \t]+) | # blanks and tabs ([1-9][0-9]*|0) | # number ([()]) | # parentheses ([-+*/]) | # arithmetical operators (.) # unrecognized character ''' tokenList = re.findall(lexSpec, s, re.VERBOSE) result = [] for ws, number, parenthesis, operator, error in tokenList: if ws: # skip blanks and tabs continue elif number: result += [ 'NUMBER' ] elif parenthesis: result += [ parenthesis ] elif operator: result += [ operator ] else: result += [ f'ERROR({error})'] return result tokenize('1 + 2 * 3')
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The function test takes two arguments. - file is the name of a file containing a grammar, - word is a string that should be parsed. word is first tokenized. Then the resulting token list is parsed using Earley's algorithm.
def test(file, word): Rules = parse_grammar(file) grammar = Grammar(Rules) TokenList = tokenize(word) ep = EarleyParser(grammar, TokenList) ep.parse() test('simple.g', '1 + 2 * 3')
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
The command below cleans the directory. If you are running windows, you have to replace rmwith del.
!rm GrammarLexer.* GrammarParser.* Grammar.tokens GrammarListener.py Grammar.interp !rm -r __pycache__ !ls
ANTLR4-Python/Earley-Parser/Earley-Parser.ipynb
Danghor/Formal-Languages
gpl-2.0
Univariate normal Generate data
from scipy.stats import norm data = norm(10, 2).rvs(20) data n = len(data) xbar = np.mean(data) s2 = np.var(data) n, xbar, s2
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Grid algorithm
mus = np.linspace(8, 12, 101) prior_mu = Pmf(1, mus) prior_mu.index.name = 'mu' sigmas = np.linspace(0.01, 5, 100) ps = sigmas**-2 prior_sigma = Pmf(ps, sigmas) prior_sigma.index.name = 'sigma' from utils import make_joint prior = make_joint(prior_mu, prior_sigma) from utils import normalize def update_norm(prior, data): """Update the prior based on data. prior: joint distribution of mu and sigma data: sequence of observations """ X, Y, Z = np.meshgrid(prior.columns, prior.index, data) likelihood = norm(X, Y).pdf(Z).prod(axis=2) posterior = prior * likelihood normalize(posterior) return posterior posterior = update_norm(prior, data) from utils import marginal posterior_mu_grid = marginal(posterior, 0) posterior_sigma_grid = marginal(posterior, 1) posterior_mu_grid.plot() decorate(title='Posterior distribution of mu') posterior_sigma_grid.plot(color='C1') decorate(title='Posterior distribution of sigma')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Update Mostly following notation in Murphy, Conjugate Bayesian analysis of the Gaussian distribution
m0 = 0 kappa0 = 0 alpha0 = 0 beta0 = 0 m_n = (kappa0 * m0 + n * xbar) / (kappa0 + n) m_n kappa_n = kappa0 + n kappa_n alpha_n = alpha0 + n/2 alpha_n beta_n = beta0 + n*s2/2 + n * kappa0 * (xbar-m0)**2 / (kappa0 + n) / 2 beta_n def update_normal(prior, summary): m0, kappa0, alpha0, beta0 = prior n, xbar, s2 = summary m_n = (kappa0 * m0 + n * xbar) / (kappa0 + n) kappa_n = kappa0 + n alpha_n = alpha0 + n/2 beta_n = (beta0 + n*s2/2 + n * kappa0 * (xbar-m0)**2 / (kappa0 + n) / 2) return m_n, kappa_n, alpha_n, beta_n prior = 0, 0, 0, 0 summary = n, xbar, s2 update_normal(prior, summary)
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior distribution of sigma
from scipy.stats import invgamma dist_sigma2 = invgamma(alpha_n, scale=beta_n) dist_sigma2.mean() dist_sigma2.std() sigma2s = np.linspace(0.01, 20, 101) ps = dist_sigma2.pdf(sigma2s) posterior_sigma2_invgammas = Pmf(ps, sigma2s) posterior_sigma2_invgammas.normalize() posterior_sigma2_invgammas.plot() decorate(xlabel='$\sigma^2$', ylabel='PDF', title='Posterior distribution of variance') sigmas = np.sqrt(sigma2s) posterior_sigma_invgammas = Pmf(ps, sigmas) posterior_sigma_invgammas.normalize() posterior_sigma_grid.make_cdf().plot(color='gray', label='grid') posterior_sigma_invgammas.make_cdf().plot(color='C1', label='invgamma') decorate(xlabel='$\sigma$', ylabel='PDF', title='Posterior distribution of standard deviation') posterior_sigma_invgammas.mean(), posterior_sigma_grid.mean() posterior_sigma_invgammas.std(), posterior_sigma_grid.std() 2 / np.sqrt(2 * (n-1))
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior distribution of mu
from scipy.stats import t as student_t def make_student_t(df, loc, scale): return student_t(df, loc=loc, scale=scale) df = 2 * alpha_n precision = alpha_n * kappa_n / beta_n dist_mu = make_student_t(df, m_n, 1/np.sqrt(precision)) dist_mu.mean() dist_mu.std() np.sqrt(4/n) mus = np.linspace(8, 12, 101) ps = dist_mu.pdf(mus) posterior_mu_student = Pmf(ps, mus) posterior_mu_student.normalize() posterior_mu_student.plot() decorate(xlabel='$\mu$', ylabel='PDF', title='Posterior distribution of mu') posterior_mu_grid.make_cdf().plot(color='gray', label='grid') posterior_mu_student.make_cdf().plot(label='invgamma') decorate(xlabel='$\mu$', ylabel='CDF', title='Posterior distribution of mu') def make_posterior_mu(m_n, kappa_n, alpha_n, beta_n): df = 2 * alpha_n loc = m_n precision = alpha_n * kappa_n / beta_n dist_mu = make_student_t(df, loc, 1/np.sqrt(precision)) return dist_mu
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior joint distribution
mu_mesh, sigma2_mesh = np.meshgrid(mus, sigma2s) joint = (dist_sigma2.pdf(sigma2_mesh) * norm(m_n, sigma2_mesh/kappa_n).pdf(mu_mesh)) joint_df = pd.DataFrame(joint, columns=mus, index=sigma2s) from utils import plot_contour plot_contour(joint_df) decorate(xlabel='$\mu$', ylabel='$\sigma^2$', title='Posterior joint distribution')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Sampling from posterior predictive
sample_sigma2 = dist_sigma2.rvs(1000) sample_mu = norm(m_n, sample_sigma2 / kappa_n).rvs() sample_pred = norm(sample_mu, np.sqrt(sample_sigma2)).rvs() cdf_pred = Cdf.from_seq(sample_pred) cdf_pred.plot() sample_pred.mean(), sample_pred.var()
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Analytic posterior predictive
df = 2 * alpha_n precision = alpha_n * kappa_n / beta_n / (kappa_n+1) dist_pred = make_student_t(df, m_n, 1/np.sqrt(precision)) xs = np.linspace(2, 16, 101) ys = dist_pred.cdf(xs) plt.plot(xs, ys, color='gray', label='student t') cdf_pred.plot(label='sample') decorate(title='Predictive distribution') def make_posterior_pred(m_n, kappa_n, alpha_n, beta_n): df = 2 * alpha_n loc = m_n precision = alpha_n * kappa_n / beta_n / (kappa_n+1) dist_pred = make_student_t(df, loc, 1/np.sqrt(precision)) return dist_pred
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Multivariate normal Generate data
mean = [10, 20] sigma_x = 2 sigma_y = 3 rho = 0.3 cov = rho * sigma_x * sigma_y Sigma = [[sigma_x**2, cov], [cov, sigma_y**2]] Sigma from scipy.stats import multivariate_normal n = 20 data = multivariate_normal(mean, Sigma).rvs(n) data n = len(data) n xbar = np.mean(data, axis=0) xbar S = np.cov(data.transpose()) S np.corrcoef(data.transpose()) stds = np.sqrt(np.diag(S)) stds corrcoef = S / np.outer(stds, stds) corrcoef def unpack_cov(S): stds = np.sqrt(np.diag(S)) corrcoef = S / np.outer(stds, stds) return stds[0], stds[1], corrcoef[0][1] sigma_x, sigma_y, rho = unpack_cov(S) sigma_x, sigma_y, rho def pack_cov(sigma_x, sigma_y, rho): cov = sigma_x * sigma_y * rho return np.array([[sigma_x**2, cov], [cov, sigma_y**2]]) pack_cov(sigma_x, sigma_y, rho) S
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Update
m_0 = 0 Lambda_0 = 0 nu_0 = 0 kappa_0 = 0 m_n = (kappa_0 * m_0 + n * xbar) / (kappa_0 + n) m_n xbar diff = (xbar - m_0) D = np.outer(diff, diff) D Lambda_n = Lambda_0 + S + n * kappa_0 * D / (kappa_0 + n) Lambda_n S nu_n = nu_0 + n nu_n kappa_n = kappa_0 + n kappa_n
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior distribution of covariance
from scipy.stats import invwishart def make_invwishart(nu, Lambda): d, _ = Lambda.shape return invwishart(nu, scale=Lambda * (nu - d - 1)) dist_cov = make_invwishart(nu_n, Lambda_n) dist_cov.mean() S sample_Sigma = dist_cov.rvs(1000) np.mean(sample_Sigma, axis=0) res = [unpack_cov(Sigma) for Sigma in sample_Sigma] sample_sigma_x, sample_sigma_y, sample_rho = np.transpose(res) sample_sigma_x.mean(), sample_sigma_y.mean(), sample_rho.mean() unpack_cov(S) Cdf.from_seq(sample_sigma_x).plot(label=r'$\sigma_x$') Cdf.from_seq(sample_sigma_y).plot(label=r'$\sigma_y$') decorate(xlabel='Standard deviation', ylabel='CDF', title='Posterior distribution of standard deviation') Cdf.from_seq(sample_rho).plot() decorate(xlabel='Coefficient of correlation', ylabel='CDF', title='Posterior distribution of correlation')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Evaluate the Inverse Wishart PDF
num = 51 sigma_xs = np.linspace(0.01, 10, num) sigma_ys = np.linspace(0.01, 10, num) rhos = np.linspace(-0.3, 0.9, num) index = pd.MultiIndex.from_product([sigma_xs, sigma_ys, rhos], names=['sigma_x', 'sigma_y', 'rho']) joint = Pmf(0, index) joint.head() dist_cov.pdf(S) for sigma_x, sigma_y, rho in joint.index: Sigma = pack_cov(sigma_x, sigma_y, rho) joint.loc[sigma_x, sigma_y, rho] = dist_cov.pdf(Sigma) joint.normalize() from utils import pmf_marginal posterior_sigma_x = pmf_marginal(joint, 0) posterior_sigma_y = pmf_marginal(joint, 1) marginal_rho = pmf_marginal(joint, 2) posterior_sigma_x.mean(), posterior_sigma_y.mean(), marginal_rho.mean() unpack_cov(S) posterior_sigma_x.plot(label='$\sigma_x$') posterior_sigma_y.plot(label='$\sigma_y$') decorate(xlabel='Standard deviation', ylabel='PDF', title='Posterior distribution of standard deviation') posterior_sigma_x.make_cdf().plot(color='gray') posterior_sigma_y.make_cdf().plot(color='gray') Cdf.from_seq(sample_sigma_x).plot(label=r'$\sigma_x$') Cdf.from_seq(sample_sigma_y).plot(label=r'$\sigma_y$') decorate(xlabel='Standard deviation', ylabel='CDF', title='Posterior distribution of standard deviation') marginal_rho.make_cdf().plot(color='gray') Cdf.from_seq(sample_rho).plot() decorate(xlabel='Coefficient of correlation', ylabel='CDF', title='Posterior distribution of correlation')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior distribution of mu
m_n sample_mu = [multivariate_normal(m_n, Sigma/kappa_n).rvs() for Sigma in sample_Sigma] sample_mu0, sample_mu1 = np.transpose(sample_mu) sample_mu0.mean(), sample_mu1.mean() xbar sample_mu0.std(), sample_mu1.std() 2 / np.sqrt(n), 3 / np.sqrt(n) Cdf.from_seq(sample_mu0).plot(label=r'$\mu_0$ sample') Cdf.from_seq(sample_mu1).plot(label=r'$\mu_1$ sample') decorate(xlabel=r'$\mu$', ylabel='CDF', title=r'Posterior distribution of $\mu$')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Multivariate student t Let's use this implementation
from scipy.special import gammaln def multistudent_pdf(x, mean, shape, df): return np.exp(logpdf(x, mean, shape, df)) def logpdf(x, mean, shape, df): p = len(mean) vals, vecs = np.linalg.eigh(shape) logdet = np.log(vals).sum() valsinv = np.array([1.0/v for v in vals]) U = vecs * np.sqrt(valsinv) dev = x - mean maha = np.square(dev @ U).sum(axis=-1) t = 0.5 * (df + p) A = gammaln(t) B = gammaln(0.5 * df) C = p/2. * np.log(df * np.pi) D = 0.5 * logdet E = -t * np.log(1 + (1./df) * maha) return A - B - C - D + E d = len(m_n) x = m_n mean = m_n df = nu_n - d + 1 shape = Lambda_n / kappa_n multistudent_pdf(x, mean, shape, df) mu0s = np.linspace(8, 12, 91) mu1s = np.linspace(18, 22, 101) mu_mesh = np.dstack(np.meshgrid(mu0s, mu1s)) mu_mesh.shape ps = multistudent_pdf(mu_mesh, mean, shape, df) joint = pd.DataFrame(ps, columns=mu0s, index=mu1s) normalize(joint) plot_contour(joint) from utils import marginal posterior_mu0_student = marginal(joint, 0) posterior_mu1_student = marginal(joint, 1) posterior_mu0_student.make_cdf().plot(color='gray', label=r'$\mu_0 multi t$') posterior_mu1_student.make_cdf().plot(color='gray', label=r'$\mu_1 multi t$') Cdf.from_seq(sample_mu0).plot(label=r'$\mu_0$ sample') Cdf.from_seq(sample_mu1).plot(label=r'$\mu_1$ sample') decorate(xlabel=r'$\mu$', ylabel='CDF', title=r'Posterior distribution of $\mu$')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Compare to analytic univariate distributions
prior = 0, 0, 0, 0 summary = n, xbar[0], S[0][0] summary params = update_normal(prior, summary) params dist_mu0 = make_posterior_mu(*params) dist_mu0.mean(), dist_mu0.std() mu0s = np.linspace(7, 12, 101) ps = dist_mu0.pdf(mu0s) posterior_mu0 = Pmf(ps, index=mu0s) posterior_mu0.normalize() prior = 0, 0, 0, 0 summary = n, xbar[1], S[1][1] summary params = update_normal(prior, summary) params dist_mu1 = make_posterior_mu(*params) dist_mu1.mean(), dist_mu1.std() mu1s = np.linspace(17, 23, 101) ps = dist_mu1.pdf(mu1s) posterior_mu1 = Pmf(ps, index=mu1s) posterior_mu1.normalize() posterior_mu0.make_cdf().plot(label=r'$\mu_0$ uni t', color='gray') posterior_mu1.make_cdf().plot(label=r'$\mu_1$ uni t', color='gray') Cdf.from_seq(sample_mu0).plot(label=r'$\mu_0$ sample') Cdf.from_seq(sample_mu1).plot(label=r'$\mu_1$ sample') decorate(xlabel=r'$\mu$', ylabel='CDF', title=r'Posterior distribution of $\mu$')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Sampling from posterior predictive
sample_pred = [multivariate_normal(mu, Sigma).rvs() for mu, Sigma in zip(sample_mu, sample_Sigma)] sample_x0, sample_x1 = np.transpose(sample_pred) sample_x0.mean(), sample_x1.mean() sample_x0.std(), sample_x1.std() prior = 0, 0, 0, 0 summary = n, xbar[0], S[0][0] params = update_normal(prior, summary) dist_x0 = make_posterior_pred(*params) dist_x0.mean(), dist_x0.std() x0s = np.linspace(2, 18, 101) ps = dist_x0.pdf(x0s) pred_x0 = Pmf(ps, index=x0s) pred_x0.normalize() prior = 0, 0, 0, 0 summary = n, xbar[1], S[1][1] params = update_normal(prior, summary) dist_x1 = make_posterior_pred(*params) dist_x1.mean(), dist_x1.std() x1s = np.linspace(10, 30, 101) ps = dist_x1.pdf(x1s) pred_x1 = Pmf(ps, index=x1s) pred_x1.normalize() pred_x0.make_cdf().plot(label=r'$x_0$ student t', color='gray') pred_x1.make_cdf().plot(label=r'$x_1$ student t', color='gray') Cdf.from_seq(sample_x0).plot(label=r'$x_0$ sample') Cdf.from_seq(sample_x1).plot(label=r'$x_1$ sample') decorate(xlabel='Quantity', ylabel='CDF', title='Posterior predictive distributions')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Comparing to the multivariate student t
d = len(m_n) x = m_n mean = m_n df = nu_n - d + 1 shape = Lambda_n * (kappa_n+1) / kappa_n multistudent_pdf(x, mean, shape, df) x0s = np.linspace(0, 20, 91) x1s = np.linspace(10, 30, 101) x_mesh = np.dstack(np.meshgrid(x0s, x1s)) x_mesh.shape ps = multistudent_pdf(x_mesh, mean, shape, df) joint = pd.DataFrame(ps, columns=x0s, index=x1s) normalize(joint) plot_contour(joint) from utils import marginal posterior_x0_student = marginal(joint, 0) posterior_x1_student = marginal(joint, 1) posterior_x0_student.make_cdf().plot(color='gray', label=r'$x_0$ multi t') posterior_x1_student.make_cdf().plot(color='gray', label=r'$x_1$ multi t') Cdf.from_seq(sample_x0).plot(label=r'$x_0$ sample') Cdf.from_seq(sample_x1).plot(label=r'$x_1$ sample') decorate(xlabel='Quantity', ylabel='CDF', title='Posterior predictive distributions')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Bayesian linear regression Generate data
inter, slope = 5, 2 sigma = 3 n = 20 xs = norm(0, 3).rvs(n) xs = np.sort(xs) ys = inter + slope * xs + norm(0, sigma).rvs(20) plt.plot(xs, ys, 'o'); import statsmodels.api as sm X = sm.add_constant(xs) X model = sm.OLS(ys, X) results = model.fit() results.summary() beta_hat = results.params beta_hat # k = results.df_model k = 2 s2 = results.resid @ results.resid / (n - k) s2 s2 = results.ssr / (n - k) s2 np.sqrt(s2)
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Grid algorithm
beta0s = np.linspace(2, 8, 71) prior_inter = Pmf(1, beta0s, name='inter') prior_inter.index.name = 'Intercept' beta1s = np.linspace(1, 3, 61) prior_slope = Pmf(1, beta1s, name='slope') prior_slope.index.name = 'Slope' sigmas = np.linspace(1, 6, 51) ps = sigmas**-2 prior_sigma = Pmf(ps, sigmas, name='sigma') prior_sigma.index.name = 'Sigma' prior_sigma.normalize() prior_sigma.plot() from utils import make_joint def make_joint3(pmf1, pmf2, pmf3): """Make a joint distribution with three parameters. pmf1: Pmf object pmf2: Pmf object pmf3: Pmf object returns: Pmf representing a joint distribution """ joint2 = make_joint(pmf2, pmf1).stack() joint3 = make_joint(pmf3, joint2).stack() return Pmf(joint3) prior3 = make_joint3(prior_slope, prior_inter, prior_sigma) prior3.head() from utils import normalize def update_optimized(prior, data): """Posterior distribution of regression parameters `slope`, `inter`, and `sigma`. prior: Pmf representing the joint prior data: DataFrame with columns `x` and `y` returns: Pmf representing the joint posterior """ xs = data['x'] ys = data['y'] sigmas = prior.columns likelihood = prior.copy() for slope, inter in prior.index: expected = slope * xs + inter resid = ys - expected resid_mesh, sigma_mesh = np.meshgrid(resid, sigmas) densities = norm.pdf(resid_mesh, 0, sigma_mesh) likelihood.loc[slope, inter] = densities.prod(axis=1) posterior = prior * likelihood normalize(posterior) return posterior data = pd.DataFrame(dict(x=xs, y=ys)) from utils import normalize posterior = update_optimized(prior3.unstack(), data) normalize(posterior) from utils import marginal posterior_sigma_grid = marginal(posterior, 0) posterior_sigma_grid.plot(label='grid') decorate(title='Posterior distribution of sigma') joint_posterior = marginal(posterior, 1).unstack() plot_contour(joint_posterior) posterior_beta0_grid = marginal(joint_posterior, 0) posterior_beta1_grid = marginal(joint_posterior, 1) posterior_beta0_grid.make_cdf().plot(label=r'$\beta_0$') posterior_beta1_grid.make_cdf().plot(label=r'$\beta_1$') decorate(title='Posterior distributions of parameters')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior distribution of sigma According to Gelman et al, the posterior distribution of $\sigma^2$ is scaled inverse chi2 with $\nu=n-k$ and scale $s^2$. According to Wikipedia, that's equivalent to inverse gamma with parameters $\nu/2$ and $\nu s^2 / 2$.
nu = n-k nu/2, nu*s2/2 from scipy.stats import invgamma dist_sigma2 = invgamma(nu/2, scale=nu*s2/2) dist_sigma2.mean() sigma2s = np.linspace(0.01, 30, 101) ps = dist_sigma2.pdf(sigma2s) posterior_sigma2_invgamma = Pmf(ps, sigma2s) posterior_sigma2_invgamma.normalize() posterior_sigma2_invgamma.plot() sigmas = np.sqrt(sigma2s) posterior_sigma_invgamma = Pmf(ps, sigmas) posterior_sigma_invgamma.normalize() posterior_sigma_invgamma.mean(), posterior_sigma_grid.mean() posterior_sigma_grid.make_cdf().plot(color='gray', label='grid') posterior_sigma_invgamma.make_cdf().plot(label='invgamma') decorate(title='Posterior distribution of sigma')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior distribution of sigma, updatable version Per the Wikipedia page: https://en.wikipedia.org/wiki/Bayesian_linear_regression
Lambda_0 = np.zeros((k, k)) Lambda_n = Lambda_0 + X.T @ X Lambda_n from scipy.linalg import inv mu_0 = np.zeros(k) mu_n = inv(Lambda_n) @ (Lambda_0 @ mu_0 + X.T @ X @ beta_hat) mu_n a_0 = 0 a_n = a_0 + n / 2 a_n b_0 = 0 b_n = b_0 + (ys.T @ ys + mu_0.T @ Lambda_0 @ mu_0 - mu_n.T @ Lambda_n @ mu_n) / 2 b_n a_n, nu/2 b_n, nu * s2 / 2
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Sampling the posterior of the parameters
sample_sigma2 = dist_sigma2.rvs(1000) sample_sigma = np.sqrt(sample_sigma2) from scipy.linalg import inv V_beta = inv(X.T @ X) V_beta sample_beta = [multivariate_normal(beta_hat, V_beta * sigma2).rvs() for sigma2 in sample_sigma2] np.mean(sample_beta, axis=0) beta_hat np.std(sample_beta, axis=0) results.bse sample_beta0, sample_beta1 = np.transpose(sample_beta) Cdf.from_seq(sample_beta0).plot(label=r'$\beta_0$') Cdf.from_seq(sample_beta1).plot(label=r'$\beta_1$') decorate(title='Posterior distributions of the parameters')
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Posterior using multivariate Student t
x = beta_hat mean = beta_hat df = (n - k) shape = (V_beta * s2) multistudent_pdf(x, mean, shape, df) low, high = sample_beta0.min(), sample_beta0.max() low, high beta0s = np.linspace(0.9*low, 1.1*high, 101) low, high = sample_beta1.min(), sample_beta1.max() beta1s = np.linspace(0.9*low, 1.1*high, 91) beta0_mesh, beta1_mesh = np.meshgrid(beta0s, beta1s) beta_mesh = np.dstack(np.meshgrid(beta0s, beta1s)) beta_mesh.shape ps = multistudent_pdf(beta_mesh, mean, shape, df) ps.shape joint = pd.DataFrame(ps, columns=beta0s, index=beta1s) from utils import normalize normalize(joint) from utils import plot_contour plot_contour(joint) decorate(xlabel=r'$\beta_0$', ylabel=r'$\beta_1$') marginal_beta0_student = marginal(joint, 0) marginal_beta1_student = marginal(joint, 1) from utils import marginal posterior_beta0_grid.make_cdf().plot(color='gray', label=r'grid $\beta_0$') posterior_beta1_grid.make_cdf().plot(color='gray', label=r'grid $\beta_1$') marginal_beta0_student.make_cdf().plot(label=r'student $\beta_0$', color='gray') marginal_beta1_student.make_cdf().plot(label=r'student $\beta_0$', color='gray') Cdf.from_seq(sample_beta0).plot(label=r'sample $\beta_0$') Cdf.from_seq(sample_beta1).plot(label=r'sample $\beta_1$') decorate()
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Sampling the predictive distribution
t = [X @ beta + norm(0, sigma).rvs(n) for beta, sigma in zip(sample_beta, sample_sigma)] predictions = np.array(t) predictions.shape low, median, high = np.percentile(predictions, [5, 50, 95], axis=0) plt.plot(xs, ys, 'o') plt.plot(xs, median) plt.fill_between(xs, low, high, color='C1', alpha=0.3)
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Modeling the predictive distribution
xnew = [1, 2, 3] Xnew = sm.add_constant(xnew) Xnew t = [Xnew @ beta + norm(0, sigma).rvs(len(xnew)) for beta, sigma in zip(sample_beta, sample_sigma)] predictions = np.array(t) predictions.shape x0, x1, x2 = predictions.T Cdf.from_seq(x0).plot() Cdf.from_seq(x1).plot() Cdf.from_seq(x2).plot() mu_new = Xnew @ beta_hat mu_new cov_new = s2 * (np.eye(len(xnew)) + Xnew @ V_beta @ Xnew.T) cov_new x = mu_new mean = mu_new df = (n - k) shape = cov_new multistudent_pdf(x, mean, shape, df) y1s = np.linspace(0, 20, 51) y0s = np.linspace(0, 20, 61) y2s = np.linspace(0, 20, 71) mesh = np.stack(np.meshgrid(y0s, y1s, y2s), axis=-1) mesh.shape ps = multistudent_pdf(mesh, mean, shape, df) ps.shape ps /= ps.sum() ps.sum() p1s = ps.sum(axis=1).sum(axis=1) p1s.shape p0s = ps.sum(axis=0).sum(axis=1) p0s.shape p2s = ps.sum(axis=0).sum(axis=0) p2s.shape pmf_y0 = Pmf(p0s, y0s) pmf_y1 = Pmf(p1s, y1s) pmf_y2 = Pmf(p2s, y2s) pmf_y0.mean(), pmf_y1.mean(), pmf_y2.mean() pmf_y0.make_cdf().plot(color='gray') pmf_y1.make_cdf().plot(color='gray') pmf_y2.make_cdf().plot(color='gray') Cdf.from_seq(x0).plot() Cdf.from_seq(x1).plot() Cdf.from_seq(x2).plot() stop
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Leftovers Related discussion saved for the future https://stats.stackexchange.com/questions/78177/posterior-covariance-of-normal-inverse-wishart-not-converging-properly
from scipy.stats import chi2 class NormalInverseWishartDistribution(object): def __init__(self, mu, lmbda, nu, psi): self.mu = mu self.lmbda = float(lmbda) self.nu = nu self.psi = psi self.inv_psi = np.linalg.inv(psi) def sample(self): sigma = np.linalg.inv(self.wishartrand()) return (np.random.multivariate_normal(self.mu, sigma / self.lmbda), sigma) def wishartrand(self): dim = self.inv_psi.shape[0] chol = np.linalg.cholesky(self.inv_psi) foo = np.zeros((dim,dim)) for i in range(dim): for j in range(i+1): if i == j: foo[i,j] = np.sqrt(chi2.rvs(self.nu-(i+1)+1)) else: foo[i,j] = np.random.normal(0,1) return np.dot(chol, np.dot(foo, np.dot(foo.T, chol.T))) def posterior(self, data): n = len(data) mean_data = np.mean(data, axis=0) sum_squares = np.sum([np.array(np.matrix(x - mean_data).T * np.matrix(x - mean_data)) for x in data], axis=0) mu_n = (self.lmbda * self.mu + n * mean_data) / (self.lmbda + n) lmbda_n = self.lmbda + n nu_n = self.nu + n dev = mean_data - self.mu psi_n = (self.psi + sum_squares + self.lmbda * n / (self.lmbda + n) * np.array(dev.T @ dev)) return NormalInverseWishartDistribution(mu_n, lmbda_n, nu_n, psi_n) x = NormalInverseWishartDistribution(np.array([0,0])-3,1,3,np.eye(2)) samples = [x.sample() for _ in range(100)] data = [np.random.multivariate_normal(mu,cov) for mu,cov in samples] y = NormalInverseWishartDistribution(np.array([0,0]),1,3,np.eye(2)) z = y.posterior(data) print('mu_n: {0}'.format(z.mu)) print('psi_n: {0}'.format(z.psi)) from scipy.linalg import inv from scipy.linalg import cholesky def wishartrand(nu, Lambda): d, _ = Lambda.shape chol = cholesky(Lambda) foo = np.empty((d, d)) for i in range(d): for j in range(i+1): if i == j: foo[i,j] = np.sqrt(chi2.rvs(nu-(i+1)+1)) else: foo[i,j] = np.random.normal(0, 1) return np.dot(chol, np.dot(foo, np.dot(foo.T, chol.T))) sample = [wishartrand(nu_n, Lambda_n) for i in range(1000)] np.mean(sample, axis=0) Lambda_n
examples/normal.ipynb
AllenDowney/ThinkBayes2
mit
Keys for each of the columns in the orbit (Keplerian state) report.
utc = 0 sma = 1 ecc = 2 inc = 3 raan = 4 aop = 5 ma = 6 ta = 7
dslwp/DSLWP-B deorbit.ipynb
daniestevez/jupyter_notebooks
gpl-3.0
Plot the orbital parameters which are vary significantly between different tracking files.
#fig1 = plt.figure(figsize = [15,8], facecolor='w') fig_peri = plt.figure(figsize = [15,8], facecolor='w') fig_peri_deorbit = plt.figure(figsize = [15,8], facecolor='w') fig_apo = plt.figure(figsize = [15,8], facecolor='w') fig3 = plt.figure(figsize = [15,8], facecolor='w') fig4 = plt.figure(figsize = [15,8], facecolor='w') fig4_rap = plt.figure(figsize = [15,8], facecolor='w') fig5 = plt.figure(figsize = [15,8], facecolor='w') fig6 = plt.figure(figsize = [15,8], facecolor='w') #sub1 = fig1.add_subplot(111) sub_peri = fig_peri.add_subplot(111) sub_peri_deorbit = fig_peri_deorbit.add_subplot(111) sub_apo = fig_apo.add_subplot(111) sub3 = fig3.add_subplot(111) sub4 = fig4.add_subplot(111) sub4_rap = fig4_rap.add_subplot(111) sub5 = fig5.add_subplot(111) sub6 = fig6.add_subplot(111) subs = [sub_peri, sub_peri_deorbit, sub_apo, sub3, sub4, sub4_rap, sub5, sub6] for file in ['orbit_deorbit.txt', 'orbit_deorbit2.txt', 'orbit_deorbit3.txt']: orbit = load_orbit_file(file) t = Time(mjd2unixtimestamp(orbit[:,utc]), format='unix') #sub1.plot(t.datetime, orbit[:,sma]) sub_peri.plot(t.datetime, orbit[:,sma]*(1-orbit[:,ecc])) deorbit_sel = (mjd2unixtimestamp(orbit[:,utc]) >= 1564012800) & (mjd2unixtimestamp(orbit[:,utc]) <= 1564963200) if np.any(deorbit_sel): sub_peri_deorbit.plot(t[deorbit_sel].datetime, orbit[deorbit_sel,sma]*(1-orbit[deorbit_sel,ecc])) sub_apo.plot(t.datetime, orbit[:,sma]*(1+orbit[:,ecc])) sub3.plot(t.datetime, orbit[:,ecc]) sub4.plot(t.datetime, orbit[:,aop]) sub4_rap.plot(t.datetime, np.fmod(orbit[:,aop] + orbit[:,raan],360)) sub5.plot(t.datetime, orbit[:,inc]) sub6.plot(t.datetime, orbit[:,raan]) sub_peri.axhline(y = 1737, color='red') sub_peri_deorbit.axhline(y = 1737, color='red') month_locator = mdates.MonthLocator() day_locator = mdates.DayLocator() for sub in subs: sub.set_xlabel('Time') sub.xaxis.set_major_locator(month_locator) sub.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m')) sub.xaxis.set_tick_params(rotation=45) sub_peri_deorbit.xaxis.set_major_locator(day_locator) sub_peri_deorbit.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')) #sub1.set_ylabel('SMA (km)') sub_peri.set_ylabel('Periapsis radius (km)') sub_peri_deorbit.set_ylabel('Periapsis radius (km)') sub_apo.set_ylabel('Apoapsis radius (km)') sub3.set_ylabel('ECC') sub4.set_ylabel('AOP (deg)') sub4_rap.set_ylabel('RAOP (deg)') sub5.set_ylabel('INC (deg)') sub6.set_ylabel('RAAN (deg)') #sub1.set_title('Semi-major axis') sub_peri.set_title('Periapsis radius') sub_peri_deorbit.set_title('Periapsis radius') sub_apo.set_title('Apoapsis radius') sub3.set_title('Eccentricity') sub4.set_title('Argument of periapsis') sub4_rap.set_title('Right ascension of periapsis') sub5.set_title('Inclination') sub6.set_title('Right ascension of ascending node') for sub in subs: sub.legend(['Before periapsis lowering', 'After periapsis lowering', 'Latest ephemeris']) sub_peri.legend(['Before periapsis lowering', 'After periapsis lowering', 'Latest ephemeris', 'Lunar radius']); sub_peri_deorbit.legend(['Before periapsis lowering', 'After periapsis lowering', 'Latest ephemeris', 'Lunar radius']);
dslwp/DSLWP-B deorbit.ipynb
daniestevez/jupyter_notebooks
gpl-3.0
Try running a few tests on a subset of users, the keys are our unique user IDs. We proceed as follows for each user ID: 1.Create a user dataframe with the following columns:•(review_text, review rating, business_id) 2.Create a list of unique business IDs for that user 3.Connect to the MongoDB server and pull all of the reviews for the restaurants that the user has reviewed 4.Create a restaurant dataframe with the following columns:•(review_text, biz rating, business_id) 5.Do a 80/20 training/test split, randomizing over the set of user' reviewed restaurants 6.Train the LSI model on the set of training reviews, get the number of topics used in fitting 7.Set up the FeatureUnion with the desired features, then fit according to the train reviews and transform the train reviews 8.
#####Test Machine Learning Algorithms ip = 'Insert IP here' conn = MongoClient(ip, 27017) conn.database_names() db = conn.get_database('cleaned_data') reviews = db.get_collection('restaurant_reviews')
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit
1.Create a user dataframe with the following columns:•(review_text, review rating, business_id)
useridlist =[] for user in users_dict.keys(): useridlist.append(user) print(useridlist[1]) def make_user_df(user_specific_reviews): #Input: #user_specific_reviews: A list of reviews for a specific user #Output: A dataframe with the columns (user_reviews, user_ratings, biz_ids) user_reviews = [] user_ratings = [] business_ids = [] for review in user_specific_reviews: user_reviews.append(review['text']) user_ratings.append(review['stars']) business_ids.append(review['business_id']) ###WE SHOULD MAKE THE OUR OWN PUNCTUATION RULES #https://www.tutorialspoint.com/python/string_translate.htm #I'm gonna have to go and figure out what this does -ed #user_reviews = [review.encode('utf-8').translate(None, string.punctuation) for review in user_reviews] user_df = pd.DataFrame({'review_text': user_reviews, 'rating': user_ratings, 'biz_id': business_ids}) return user_df #test to make users_dict,make_user_df works user_specific_reviews = users_dict[useridlist[0]] x= make_user_df(user_specific_reviews) x.head()
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit
2.Create a list of unique business IDs for that user
business_ids = list(set(user['biz_id']))
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit
3.Connect to the MongoDB server and pull all of the reviews for the restaurants that the user has reviewed
restreview = {} for i in range(0, len(business_ids)): rlist = [] for obj in reviews.find({'business_id':business_ids[i]}): rlist.append(obj) restreview[business_ids[i]] = rlist
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit
4.Create a restaurant dataframe with the following columns:•(review_text, biz rating, business_id)
restaurant_df = yml.make_biz_df(user, restreview)
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit
5.Do a 80/20 training/test split, randomizing over the set of user' reviewed restaurants
#Create a training and test sample from the user reviewed restaurants split_samp = .30 random_int = random.randint(1, len(business_ids)-1) len_random = int(len(business_ids) * split_samp) test_set = business_ids[random_int:random_int+len_random] training_set = business_ids[0:random_int]+business_ids[random_int+len_random:len(business_ids)] train_reviews, train_ratings = [], [] #Create a list of training reviews and training ratings for rest_id in training_set: train_reviews.extend(list(user_df[user_df['biz_id'] == rest_id]['review_text'])) train_ratings.extend(list(user_df[user_df['biz_id'] == rest_id]['rating'])) #Transform the star labels into a binary class problem, 0 if rating is < 4 else 1 train_labels = [1 if x >=4 else 0 for x in train_ratings]
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit
6.Train the LSI model on the set of training reviews, get the number of topics used in fitting
#this is just for my understand of how the model is working under the hood def fit_lsi(train_reviews): #Input: train_reviews is a list of reviews that will be used to train the LSI feature transformer #Output: A trained LSI model and the transformed training reviews texts = [[word for word in review.lower().split() if (word not in stop_words)] for review in train_reviews] dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] numpy_matrix = matutils.corpus2dense(corpus, num_terms=10000) singular_values = np.linalg.svd(numpy_matrix, full_matrices=False, compute_uv=False) mean_sv = sum(list(singular_values))/len(singular_values) topics = int(mean_sv) tfidf = models.TfidfModel(corpus) corpus_tfidf = tfidf[corpus] lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=topics) return lsi, topics, dictionary #Fit LSI model and return number of LSI topics lsi, topics, dictionary = yml.fit_lsi(train_reviews)
machine_learning/User_Sample_test_draft_ed.ipynb
georgetown-analytics/yelp-classification
mit