text
stringlengths
87
777k
meta.hexsha
stringlengths
40
40
meta.size
int64
682
1.05M
meta.ext
stringclasses
1 value
meta.lang
stringclasses
1 value
meta.max_stars_repo_path
stringlengths
8
226
meta.max_stars_repo_name
stringlengths
8
109
meta.max_stars_repo_head_hexsha
stringlengths
40
40
meta.max_stars_repo_licenses
listlengths
1
5
meta.max_stars_count
int64
1
23.9k
meta.max_stars_repo_stars_event_min_datetime
stringlengths
24
24
meta.max_stars_repo_stars_event_max_datetime
stringlengths
24
24
meta.max_issues_repo_path
stringlengths
8
226
meta.max_issues_repo_name
stringlengths
8
109
meta.max_issues_repo_head_hexsha
stringlengths
40
40
meta.max_issues_repo_licenses
listlengths
1
5
meta.max_issues_count
int64
1
15.1k
meta.max_issues_repo_issues_event_min_datetime
stringlengths
24
24
meta.max_issues_repo_issues_event_max_datetime
stringlengths
24
24
meta.max_forks_repo_path
stringlengths
8
226
meta.max_forks_repo_name
stringlengths
8
109
meta.max_forks_repo_head_hexsha
stringlengths
40
40
meta.max_forks_repo_licenses
listlengths
1
5
meta.max_forks_count
int64
1
6.05k
meta.max_forks_repo_forks_event_min_datetime
stringlengths
24
24
meta.max_forks_repo_forks_event_max_datetime
stringlengths
24
24
meta.avg_line_length
float64
15.5
967k
meta.max_line_length
int64
42
993k
meta.alphanum_fraction
float64
0.08
0.97
meta.converted
bool
1 class
meta.num_tokens
int64
33
431k
meta.lm_name
stringclasses
1 value
meta.lm_label
stringclasses
3 values
meta.lm_q1_score
float64
0.56
0.98
meta.lm_q2_score
float64
0.55
0.97
meta.lm_q1q2_score
float64
0.5
0.93
text_lang
stringclasses
53 values
text_lang_conf
float64
0.03
1
label
float64
0
1
```python #In this program, we calculate the largest water cluster on Earth in the presence of different levels of water #Libraries from scipy.ndimage.filters import gaussian_filter import matplotlib.pyplot as plt from netCDF4 import Dataset import numpy as np import os from tqdm import tqdm from scipy.ndimage import measurements import time import sympy import concurrent.futures ``` ```python #This function gives the different resolution of the earth map. we use it to extract the orginal resolution. #the code has been written by def Etopo(lon_area, lat_area, resolution): ### Input # resolution: resolution of topography for both of longitude and latitude [deg] # (Original resolution is 0.0167 deg) # lon_area and lat_area: the region of the map which you want like [100, 130], [20, 25] ### ### Output # Mesh type longitude, latitude, and topography data ### # Read NetCDF data data = Dataset("ETOPO1_Bed_c_gdal.grd", "r") # Get data lon_range = data.variables['x_range'][:] lat_range = data.variables['y_range'][:] topo_range = data.variables['z_range'][:] spacing = data.variables['spacing'][:] dimension = data.variables['dimension'][:] z = data.variables['z'][:] lon_num = dimension[0] lat_num = dimension[1] # Prepare array lon_input = np.zeros(lon_num); lat_input = np.zeros(lat_num) for i in range(lon_num): lon_input[i] = lon_range[0] + i * spacing[0] for i in range(lat_num): lat_input[i] = lat_range[0] + i * spacing[1] # Create 2D array lon, lat = np.meshgrid(lon_input, lat_input) # Convert 2D array from 1D array for z value topo = np.reshape(z, (lat_num, lon_num)) # Skip the data for resolution if ((resolution < spacing[0]) | (resolution < spacing[1])): print('Set the highest resolution') else: skip = int(resolution/spacing[0]) lon = lon[::skip,::skip] lat = lat[::skip,::skip] topo = topo[::skip,::skip] topo = topo[::-1] # Select the range of map range1 = np.where((lon>=lon_area[0]) & (lon<=lon_area[1])) lon = lon[range1]; lat = lat[range1]; topo = topo[range1] range2 = np.where((lat>=lat_area[0]) & (lat<=lat_area[1])) lon = lon[range2]; lat = lat[range2]; topo = topo[range2] # Convert 2D again lon_num = len(np.unique(lon)) lat_num = len(np.unique(lat)) lon = np.reshape(lon, (lat_num, lon_num)) lat = np.reshape(lat, (lat_num, lon_num)) topo = np.reshape(topo, (lat_num, lon_num)) return lon, lat, topo ``` ```python # convert degrees to radians def degree2radians(degree): return degree*np.pi/180 ``` ```python #defining the orginal resoluton of the data resolution = 0.0167 # resolution = 1 lon_area = [-180., 180.] lat_area = [-90., 90.] # Get mesh-shape topography data lon_topo, lat_topo, topo = Etopo(lon_area, lat_area, resolution) lon_topo = degree2radians(lon_topo) lat_topo = degree2radians(lat_topo) ``` ```python #The Course graining part. we define the of the course graining window with N and course grain it with thw function rebin N = 10 def rebin(a, shape): sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1] return a.reshape(sh).mean(-1).mean(1) Width = int(10800/N) Length = int(21600/N) topo = rebin(topo,(Width, Length)) lat_topo = rebin(lat_topo,(Width, Length)) lon_topo = rebin(lon_topo,(Width, Length)) ``` ```python #we extract all the elevations and sort then in an ascending order indexes = np.argsort(topo.flatten()) hieght = np.sort(topo.flatten()) #defining the parallelization variables l = len(hieght) d = sympy.divisors(l) num_iter = d[int(len(d)/2)] num_proc = d[int(len(d)/2) - 1] ``` ```python #water clusters total_landmass = np.zeros(len(hieght)) big_cluster = np.zeros(len(hieght)) total_lat = np.sum(np.abs(np.cos(lat_topo))) all_pos = np.shape(lat_topo)[0]*np.shape(lat_topo)[1] squares1 = np.zeros((num_proc, num_iter)) squares2 = np.zeros((num_proc, num_iter)) def water_function(ii): total_landmass = np.zeros(num_iter) big_cluster = np.zeros(num_iter) iso_hight = np.zeros(num_iter) iterr = 0 for i in range(ii*num_iter, ii*num_iter + num_iter): sphere = 0 new_topo=np.zeros(hieght.shape, bool) new_topo[indexes[:(i+1)]] = True new_topo = new_topo.reshape((Width, Length)) pp = (i+1)/(all_pos) lw, num = measurements.label(new_topo) for l in range(len(lw)): if(lw[l][0] != 0 and lw[l][-1] != 0 and lw[l][-1] != lw[l][0]): lw[lw == lw[l][0]] = lw[l][-1] biggest_size = 0 elements = np.unique(lw) elements = elements[elements != 0] unique_clustersize = np.zeros(len(elements)) iterat = 0 for j in elements: unique_clustersize[iterat] = np.sum(lw == j) iterat +=1 elements = elements[unique_clustersize.argsort()] a = 0 if len(elements)>2: for n in elements[[-1,-2,-3]]: mask = (lw == n) a = np.sum(np.abs(np.cos((mask*lat_topo)[(mask*lat_topo) != 0]))) if a>biggest_size: biggest_size = a elif len(elements) == 2: for n in elements[[-1,-2]]: mask = (lw == n) a = np.sum(np.abs(np.cos((mask*lat_topo)[(mask*lat_topo) != 0]))) if a>biggest_size: biggest_size = a else: mask = (lw == elements[0]) biggest_size = np.sum(np.abs(np.cos((mask*lat_topo)[(mask*lat_topo) != 0]))) total_landmass[iterr] = pp big_cluster[iterr] = float(biggest_size / total_lat) iterr += 1 return ii, big_cluster, total_landmass, iso_hight with concurrent.futures.ProcessPoolExecutor() as executor: for row, result1, result2 in executor.map(water_function, range(num_proc)): print(row) squares1[row] = result1 squares2[row] = result2 np.save("/home/complex/c++/Earth/big_cluster_" + str(N) + ".npy", squares1.flatten()) np.save("/home/complex/c++/Earth/total_landmass_" + str(N) + ".npy", squares2.flatten()) ```
e82dc011355473063756f79c4b3f77898baf9aa9
9,248
ipynb
Jupyter Notebook
Water_Clusters.ipynb
eurusebr/CMB_Percolation
6d11cdec20da8b632cbb95a7927d26f3aa19e1b3
[ "MIT" ]
null
null
null
Water_Clusters.ipynb
eurusebr/CMB_Percolation
6d11cdec20da8b632cbb95a7927d26f3aa19e1b3
[ "MIT" ]
null
null
null
Water_Clusters.ipynb
eurusebr/CMB_Percolation
6d11cdec20da8b632cbb95a7927d26f3aa19e1b3
[ "MIT" ]
2
2021-12-22T09:24:23.000Z
2022-01-03T11:22:07.000Z
34
130
0.515679
true
1,776
Qwen/Qwen-72B
1. YES 2. YES
0.83762
0.709019
0.593889
__label__eng_Latn
0.527837
0.218132
# Polynomial Unconstrained Boolean Optimization Problem in MBQC <em> Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved. </em> In the tutorial [Measurement-based Quantum Approximate Optimization Algorithm](QAOA_EN.ipynb), we give a brief introduction to the **polynomial unconstrained boolean optimization (PUBO) problem** and propose the **measurement-based quantum approximate optimization algorithm (MB-QAOA)** to solve it. For interested readers, please refer to the previous tutorial for more information. In this tutorial, we will showcase two specific examples as practical demonstrations of MB-QAOA. The first one is a concrete PUBO problem, while the second one is a **maximum cut (MaxCut)** problem. ## Example: PUBO Problem Let us first briefly review what a PUBO problem is. Consider a polynomial of $n$ variables $x = \{x_1,\cdots,x_n\}$, $$ C(x) = \sum_{\lambda \in \Lambda } \alpha_{\lambda} \prod_{j \in \lambda} x_j,\tag{1} $$ where $x_i \in \{0,1\}$ is a boolean variable, $\underset{j \in \lambda}{\prod} x_j$ is a monomial, $\lambda \subseteq [n]:= \{1, 2, ..., n\}$ is a set of indexes, $\Lambda$ is the set of index sets, $\alpha_\lambda$ is the real coefficient of monomial. In PUBO, $C(x)$ is called the objective polynomial. We hope to find an optimal solution $x^* = \{x_1^*, x_2^*, ..., x_n^*\} $ maximizing the value of objective polynomial. That is to find $$ x^* = \underset{x}{\text{argmax}} \ C(x).\tag{2} $$ For code implementation, we require that a standard polynomial is input as a list whose first item is the number of variables and the second item is a dictionary of all the monomials ('cons' stands for the constant item). In the dictionary, we make monomial variables split with ',' as keys and the corresponding coefficients as values. For example, suppose we want to input a polynomial $x_1 + x_2 - x_3 + x_1 x_2 - x_1 x_2 x_3 + 0.5$, we need to code as follows: ```python # Number of variables var_num = 3 # Polynomial as a dictionary poly_dict = {'x_1': 1, 'x_2': 1, 'x_3': -1, 'x_1,x_2': 1, 'x_1,x_2,x_3': -1, 'cons':0.5} # Construct the list required polynomial = [var_num, poly_dict] ``` **Note:** As the variables are boolean, the power of variables in a monomial should be no greater than 1. That is, each variable should appear at most once in a key of the dictionary. For instance, it is not a valid to input something like {'x_1,x_1,x_2': 1}. Also, we set variable subscripts by consecutive numbers starting from '1' to be consistent with math conventions. A polynomial like $x_1 x_2 + x_6$ will raise an error automatically. A valid polynomial should be like $x_1x_2 + x_3$. For convenience, we provide a function `is_poly_valid` in `pubo` to check the validity of the user's input. If the polynomial is valid, it will print a statement "The polynomial is valid.". Otherwise, an error will be raised. ```python from paddle_quantum.mbqc.QAOA.pubo import is_poly_valid ``` We also provide a function `random_poly` to generate a random boolean polynomial with a given number of variables. ```python from paddle_quantum.mbqc.QAOA.pubo import random_poly ``` **Note:** The randomly generated polynomial is not always valid and we still need to check the validity before calculation. ### Code implementation to slove PUBO problem ```python # Import time module from time import perf_counter # Import sympy module for syntax calculation from sympy import symbols # Import paddle module from paddle import seed, optimizer # Import pubo module from paddle_quantum.mbqc.QAOA.pubo import dict_to_symbol,is_poly_valid,brute_force_search # Import qaoa module from paddle_quantum.mbqc.QAOA.qaoa import MBQC_QAOA_Net, get_solution_string ``` We define a function ``mbqc_pubo`` which takes in the objective polynomial and returns an optimal solution. **The core part of ``mbqc_pubo`` is the ``MBQC_QAOA_Net`` class**, which integrates MB-QAOA and the optimization net. Please refer to [Measurement-Based Quantum Approximate Optimization Algorithm](QAOA_EN.ipynb) for more details. Here we directly call the function. ```python # Define the PUBO main function def mbqc_pubo(OBJ_POLY, DEPTH, SEED, LR, ITR, EPOCH, SHOTS=1024): # Symbolize the polynomial obj_poly = dict_to_symbol(OBJ_POLY) var_num, poly_symbol = obj_poly # Print the QAOA depth print("QAOA depth is:", DEPTH) # Start timing start_time = perf_counter() # Instaniate a MB-QAOA traning net seed(SEED) mbqc_net = MBQC_QAOA_Net(DEPTH) # Choose Adams optimizer (or SGD optimizer) opt = optimizer.Adam(learning_rate=LR, parameters=mbqc_net.parameters()) # Start training for epoch in range(EPOCH): # Update parameters for each iter for itr in range(1, ITR + 1): # Train with mbqc_net and return the loss loss, state_out = mbqc_net(poly=obj_poly) # Propagate loss backwards and optimize the parameters loss.backward() opt.minimize(loss) opt.clear_grad() if itr % 10 == 0: print("iter:", itr, " loss_MBQC:", "%.4f" % loss.numpy()) # Stop timing and print the running time end_time = perf_counter() print("MBQC running time is: ", end_time - start_time) # Print the optimization parameters print("Optimal parameter gamma: ", mbqc_net.gamma.numpy()) print("Optimal parameter beta: ", mbqc_net.beta.numpy()) # Decode the solution from the quantum state solution_str = get_solution_string(state_out, SHOTS) # Evaluate the corresponding value relation = {symbols('x_' + str(j + 1)): int(solution_str[j]) for j in range(var_num)} value = poly_symbol.evalf(subs=relation) # Return the solution and its corresponding value opt = [solution_str, value] return opt ``` To check the correctness of the training result, we provide a `brute_force_search` function in `pubo` that finds a global optimal value by brute force search. We can compare the training result with the optimal one. ```python from paddle_quantum.mbqc.QAOA.pubo import brute_force_search ``` ### Main function After defining the main function, let's input the parameters to run the code! ```python # Define the main function def main(): # Choose the example x_1 + x_2 - x_3 + x_1*x_2 -x_1*x_2*x_3 + 0.5 var_num = 3 poly_dict = {'x_1': 1, 'x_2': 1, 'x_3': -1, 'x_1,x_2': 1, 'x_1,x_2,x_3': -1, 'cons':0.5} polynomial = [var_num, poly_dict] # Print the input polynomial print("The input polynomial is: ", polynomial) # We can also randomly generate an objective function # polynomial = random_poly(var_num) # Check the validity of the input polynomial is_poly_valid(polynomial) # Starting training and obtain the result mbqc_result = mbqc_pubo( OBJ_POLY=polynomial, # Objective Function DEPTH=6, # QAOA Depth SEED=1024, # Plant Seed LR=0.1, # Learning Rate ITR=120, # Training Iters EPOCH=1 # Epoch Times ) # Print the result from MBQC model print("Optimal solution by MBQC: ", mbqc_result[0]) print("Optimal value by MBQC: ", mbqc_result[1]) # Compute the optimal result by brute-force search and print the result brute_result = brute_force_search(polynomial) print("Optimal solution by brute force search: ", brute_result[0]) print("Optimal value by brute force search: ", brute_result[1]) # Compare the training result with the optimal one print("Difference between optimal values from MBQC and brute force search: ", mbqc_result[1] - brute_result[1]) if __name__ == '__main__': main() ``` ## Example: MaxCut ### Graph and cut Maximum cut problem(MaxCut Problem)is a combinatorial optimization problem in graph theory, with plenty of applications in e.g. statistic physics and circuit design. In graph theory, a graph is represented by $G = (V, E)$, where $V$ is a set of vertices and $E$ is a set of edges. For example, a square can be characterized by the graph $G = (V,E)$ with $V = [1,2,3,4]$ and $E = [(1,2),(2,3),(3,4),(1,4)]$. For code implementation, we can use the `plot_graph` function in `maxcut` to plot a graph. ```python from paddle_quantum.mbqc.QAOA.maxcut import plot_graph V = [1,2,3,4] E = [(1,2),(2,3),(3,4),(1,4)] G = [V, E] plot_graph(G,"A square") ``` A cut in the graph is a partition separating the vertices set $V$ into two complementary subsets $S_0$ and $S_1$. If two vertices of an edge in the graph are separated into different subsets, we score a goal. The size of a cut is defined by the total scores that we get. Then the MaxCut problem is to find a cut of graph with maximal size. As for the above square $G$, one of the optimal solutions to the MaxCut problem is to put $1$ and $3$ into subset $S_0$ and put $2$ and $4$ into subset $S_1$. ### Transformation to a PUBO problem A MaxCut problem can be transformed into a PUBO problem. Assume the graph to be cut $G = (V, E)$ has $n=|V|$ vertices and $m =|E|$ edges, we can transform the MaxCut problem into a PUBO problem of $n$ variables. Each variable $x_v$ corresponds to a vertex $v \in V$ in the graph $G$, with its domain $x_v \in \{0,1\}$ corresponding to its belonging to subset $S_0$ or subset $S_1$. So, each value of the string $x = \{x_1,\cdots,x_n\}$ corresponds to a cut. As a valid edge to score a goal is the one whose vertices $u$ and $v$ belong to different subsets, given a cut $x$, its size can be defined as: $$ C(x) = \sum_{(u,v) \in E} (x_u \oplus x_v),\tag{3} $$ where $\oplus$ represents XOR operation. Then the MaxCut problem is equivalent to solve the optimization $\underset{x}{\max} \ C(x)$. Since $C(x)$ can be written as a polynomial: $$ C(x) = \sum_{(u, v) \in E} (x_u + x_v - 2 x_u x_v).\tag{4} $$ this optimization is essentially a quadratic PUBO problem of $n$ variables. We hope to find an optimal solution $x^{*}$ maximizing the value of objective polynomial, that is, $$ x^* = \underset{x}{\text{argmax}} \left( \sum_{(u, v) \in E} (x_u + x_v - 2 x_u x_v) \right).\tag{5} $$ We provide a function `graph_to_poly` in `maxcut` which takes in the graph to be cut and returns the equivalent objective polynomial in PUBO. ```python # Import maxcut module from paddle_quantum.mbqc.QAOA.maxcut import graph_to_poly # Input the vertices and edges V = [1,2,3,4] E = [(1,2),(2,3),(3,4),(1,4)] # Construct the graph to be cut G = [V, E] # Transform the graph to the equivalent polynomial poly = graph_to_poly(G) print("The equivalent objective polynomial is:\n", poly) ``` ### Code implementation to solve MaxCut problem Once obtaining the objective polynomial, we can follow the same process as the previous example and solve the MaxCut problem as a special case of PUBO. The complete code implementation is as follows: ```python # Import symbol calculaion module from sympy import symbols # Import paddle module from paddle import seed, optimizer # Import qaoa module from paddle_quantum.mbqc.QAOA.qaoa import MBQC_QAOA_Net, get_solution_string # Import maxcut module from paddle_quantum.mbqc.QAOA.maxcut import plot_graph, graph_to_poly, plot_solution ``` We define the main function for MaxCut that takes in the graph to be cut and returns the optimal training results. ```python # Define the MaxCut main function def mbqc_maxcut(GRAPH, DEPTH, SEED, LR, ITR, EPOCH, SHOTS=1024): # Plot the graph to be cut plot_graph(graph=GRAPH, title="Graph to be cut") # Obtain the objective polynomial polynomial = graph_to_poly(GRAPH) print("Corresponding objective polynomial of the graph is:", polynomial[1]) # Start timing start_time = perf_counter() # Instantiate a MB-QAOA training net seed(SEED) mbqc_net = MBQC_QAOA_Net(DEPTH) # Choose Adams optimizer (or SGD optimizer) opt = optimizer.Adam(learning_rate=LR, parameters=mbqc_net.parameters()) # Start training for epoch in range(EPOCH): # Update parameters for each iter for itr in range(1, ITR + 1): # Train with mbqc_net and return the loss loss, state_out = mbqc_net(poly=polynomial) # Propagate loss backwards and optimize the parameters loss.backward() opt.minimize(loss) opt.clear_grad() if itr % 10 == 0: print("iter:", itr, " loss_MBQC:", "%.4f" % loss.numpy()) # Stop timing and print the running time end_time = perf_counter() print("MBQC running time: ", end_time - start_time) # Print the optimized parameters print("Optimal parameter gamma: ", mbqc_net.gamma.numpy()) print("Optimal parameter beta: ", mbqc_net.beta.numpy()) # Decode the MaxCut solution from the final state mbqc_solution = get_solution_string(state_out, SHOTS) # Plot the MaxCut solution plot_solution(GRAPH, mbqc_solution) # Evaluate the number of cuts var_num, poly_symbol = polynomial relation = {symbols('x_' + str(j + 1)): int(mbqc_solution[j]) for j in range(var_num)} mbqc_value = int(poly_symbol.evalf(subs=relation)) mbqc_opt = [mbqc_solution, mbqc_value] return mbqc_opt ``` ### Main function After defining the main function, let's input the parameters to run the code! ```python def main(): # A graph to be cut V = [1, 2, 3, 4] E = [(1, 2), (2, 3), (3, 4), (4, 1)] G = [V, E] # MaxCut under MBQC mbqc_result = mbqc_maxcut( GRAPH=G, # Graph to be cut DEPTH=6, # Depth SEED=1024, # Plant Seed LR=0.1, # Learning Rate ITR=120, # Training Iters EPOCH=1, # Epoch Times SHOTS=1024 # Shots for decoding the solution ) # Print the result from MBQC model print("Optimal solution by MBQC: ", mbqc_result[0]) print("Optimal value by MBQC: ", mbqc_result[1]) if __name__ == '__main__': main() ``` Now, we have completed the demonstration of two examples. The implementation of MB-QAOA indicates a great potential of MBQC in the field of quantum machine learning. Apparently, MBQC model can realize quantities of algorithms far beyond QAOA. We therefore are looking forward to exploring more on this and to show some unparalleled advantages in practice. --- ## References [1] Farhi, Edward, et al. "A quantum approximate optimization algorithm." [arXiv preprint arXiv:1411.4028 (2014).](https://arxiv.org/abs/1411.4028)
27324ef22ca19997b5d7cf9fc7182cb2e148112c
20,808
ipynb
Jupyter Notebook
tutorial/mbqc/PUBO_EN.ipynb
gsq7474741/Quantum
16e7d3bf2dba7e94e6faf5c853faf0e913e1f268
[ "Apache-2.0" ]
1
2020-07-14T14:10:23.000Z
2020-07-14T14:10:23.000Z
tutorial/mbqc/PUBO_EN.ipynb
gsq7474741/Quantum
16e7d3bf2dba7e94e6faf5c853faf0e913e1f268
[ "Apache-2.0" ]
null
null
null
tutorial/mbqc/PUBO_EN.ipynb
gsq7474741/Quantum
16e7d3bf2dba7e94e6faf5c853faf0e913e1f268
[ "Apache-2.0" ]
null
null
null
38.391144
619
0.58194
true
3,987
Qwen/Qwen-72B
1. YES 2. YES
0.817574
0.699254
0.571693
__label__eng_Latn
0.969564
0.166563
# Numerical Solution of the Helmholtz Equation using the Finite Element Method This notebook illustrates the numerical solution of the wave equation for harmonic excitation using the so called [Finite Element Method](https://en.wikipedia.org/wiki/Finite_element_method) (FEM). The method aims at an approximate solution by subdividing the area of interest into smaller parts with simpler geometry, linking these parts together and applying methods from the calculus of variations to solve the problem numerically. The FEM is a well established method for the numerical approximation of the solution of partial differential equations (PDEs). The solutions of PDEs are often known analytically only for rather simple geometries. FEM based simulations allow to gain insights into other more complex cases. ## Problem Statement The inhomogeneous linear [wave equation](https://en.wikipedia.org/wiki/Wave_equation) is given as \begin{equation} \Delta p(\mathbf{x}, t) - \frac{1}{c^2} \frac{\partial^2}{\partial t^2} p(\mathbf{x}, t) = - q(\mathbf{x}, t) , \end{equation} where $p(\mathbf{x}, t)$ denotes the sound pressure at position $\mathbf{x}$, $c$ the speed of sound and $q(\mathbf{x}, t)$ the inhomogeneity. For an harmonic excitation $q(\mathbf{x}, t) = \Re \{ Q(\mathbf{x}, \omega) \mathrm{e}^{\mathrm{j} \omega t} \}$ with frequency $\omega = 2 \pi f$ we choose the Ansatz $p(\mathbf{x}, t) = \Re \{ P(\mathbf{x}, \omega) \mathrm{e}^{\mathrm{j} \omega t} \}$ for the sound pressure. Introduction of the complex quantities into the wave equation yields \begin{equation} \Delta P(\mathbf{x}, \omega) \mathrm{e}^{\mathrm{j} \omega t} + \frac{\omega^2}{c^2} P(\mathbf{x}, \omega) \mathrm{e}^{\mathrm{j} \omega t} = - Q(\mathbf{x}, \omega) \mathrm{e}^{\mathrm{j} \omega t} , \end{equation} and canceling out the $\mathrm{e}^{\mathrm{j} \omega t}$ terms yields the [Helmholtz equation](https://en.wikipedia.org/wiki/Helmholtz_equation) \begin{equation} \Delta P(\mathbf{x}, \omega) + \frac{\omega^2}{c^2} P(\mathbf{x}, \omega) = - Q(\mathbf{x}, \omega) . \end{equation} We aim for a numerical solution of the Helmholtz equation on the domain $V$ with respect to the homogeneous Dirichlet boundary condition \begin{equation} P(\mathbf{x}, \omega) = 0 \qquad \text{for } x \in \partial V \end{equation} or the homogeneous Neumann boundary condition \begin{equation} \frac{\partial}{\partial n} P(\mathbf{x}, \omega) = 0 \qquad \text{for } x \in \partial V , \end{equation} where $\partial V $ denotes the boundary of $V$. ## Variational Formulation The FEM is based on expressing the partial differential equation (PDE) to be solved in its [variational](https://en.wikipedia.org/wiki/Calculus_of_variations) or weak form. The first step towards this formulation is to multiply the Helmholtz equation by the test function $V(\mathbf{x}, \omega)$ \begin{equation} \Delta P(\mathbf{x}, \omega) \cdot V(\mathbf{x}, \omega) + \frac{\omega^2}{c^2} P(\mathbf{x}, \omega) \cdot V(\mathbf{x}, \omega) = - Q(\mathbf{x}, \omega) \cdot V(\mathbf{x}, \omega) , \end{equation} followed by integration over the domain $V$ \begin{equation} \int_V \Delta P(\mathbf{x}, \omega) \cdot V(\mathbf{x}, \omega) \mathrm{d}x + \frac{\omega^2}{c^2} \int_V P(\mathbf{x}, \omega) \cdot V(\mathbf{x}, \omega) \mathrm{d}x = - \int_V Q(\mathbf{x}, \omega) \cdot V(\mathbf{x}, \omega) \mathrm{d}x , \end{equation} where $\mathrm{d}x$ denotes a suitably chosen differential element for integration. Application of [Green's first identity](https://en.wikipedia.org/wiki/Green%27s_identities) yields \begin{equation} {-} \int_V \nabla P(\mathbf{x}, \omega) \cdot \nabla V(\mathbf{x}, \omega) \mathrm{d}x + \int_{\partial V} V(\mathbf{x}, \omega) \frac{\partial}{\partial n} P(\mathbf{x}, \omega) \mathrm{d}s + \frac{\omega^2}{c^2} \int_V P(\mathbf{x}, \omega) V(\mathbf{x}, \omega) \mathrm{d}x = - \int_V Q(\mathbf{x}, \omega) V(\mathbf{x}, \omega) \mathrm{d}x . \end{equation} This way the differential order of the first integral is lowered which is advisable for application of the FEM. The second integral vanishes as * the variation formulation requires $V(\mathbf{x}, \omega) = 0$ on $\partial V$ where $P(\mathbf{x}, \omega)$ is known - here by the pure Dirichlet boundary condition - or * due to a pure homogeneous Neumann boundary condition $\frac{\partial}{\partial n} P(\mathbf{x}, \omega)$ on $\partial V$. This results in the variational/weak formulation of the Helmholtz equation \begin{equation} {-} \int_V \nabla P(\mathbf{x}, \omega) \cdot \nabla V(\mathbf{x}, \omega) \mathrm{d}x + \frac{\omega^2}{c^2} \int_V P(\mathbf{x}, \omega) V(\mathbf{x}, \omega) \mathrm{d}x = - \int_V Q(\mathbf{x}, \omega) V(\mathbf{x}, \omega) \mathrm{d}x . \end{equation} It is common to express the integral equation above in terms of the bilinear $a(P, V)$ and linear $L(V)$ forms \begin{equation} a(P, V) = \frac{\omega^2}{c^2} \int_V P(\mathbf{x}, \omega) V(\mathbf{x}, \omega) \mathrm{d}x - \int_V \nabla P(\mathbf{x}, \omega) \cdot \nabla V(\mathbf{x}, \omega) \mathrm{d}x , \end{equation} \begin{equation} L(V) = - \int_V Q(\mathbf{x}, \omega) V(\mathbf{x}, \omega) \mathrm{d}x , \end{equation} where \begin{equation} a(P, V) = L(V) . \end{equation} ## Numerical Solution The numerical solution of the variational problem is based on [FEniCS](https://fenicsproject.org/), an open-source framework for numerical solution of PDEs. Its high-level Python interface `dolfin` is used in the following to define the problem and compute its solution. The implementation is based on the variational formulation derived above. It is common in the FEM to denote the solution of the problem by $u$ and the test function by $v$. The definition of the problem in FEniCS is very close to the mathematical formulation of the problem. We limit ourselves to real-valued $P(\mathbf{x}, \omega)$ due to the assumption of Dirichlet or Neumann boundary conditions. For the subsequent examples the solution of the inhomogeneous wave equation for a point source $Q(\mathbf{x}) = \delta(\mathbf{x}-\mathbf{x_s})$ at position $\mathbf{x_s}$ is computed using the FEM. A function is defined for this purpose, accompanied by a plotting routine for the resulting sound field. ```python import dolfin import mshr import matplotlib.pyplot as plt %matplotlib inline def FEM_Helmholtz(mesh, frequency, xs, neumann_bc=True, c=343): '''numerical solution of the Helmholtz equation using the FEM''' # squared wavenumber k2 = dolfin.Constant(2*dolfin.pi*frequency/c)**2 # define function space V = dolfin.FunctionSpace(mesh, "CG", 2) # define boundary conditions if neumann_bc: bcs = None else: bcs = dolfin.DirichletBC(V, dolfin.Constant(0), "on_boundary") # define variational problem u = dolfin.TrialFunction(V) v = dolfin.TestFunction(V) a = k2 * dolfin.inner(u, v) * dolfin.dx - dolfin.inner(dolfin.nabla_grad(u), dolfin.nabla_grad(v)) * dolfin.dx L = dolfin.Constant(0) * v * dolfin.dx A, b = dolfin.assemble_system(a, L, bcs) # define inhomogenity delta = dolfin.PointSource(V, xs, -1) # to account for negative sign in inhomogeneity delta.apply(b) # compute solution u = dolfin.Function(V) dolfin.solve(A, u.vector(), b) return u def plot_soundfield(u): '''plot solution of FEM-based simulation''' fig = plt.figure(figsize=(10,10)) fig = dolfin.plot(u) plt.title(r'$P(\mathbf{x}, \omega)$') plt.xlabel(r'$x$ / m') plt.ylabel(r'$y$ / m') plt.colorbar(fig, fraction=0.038, pad=0.04); ``` ### Sound Field in a Rectangular Room with Sound-Hard Boundaries The two-dimensional sound field in a rectangular room whose height is very small compared to the wavelength and with rigid boundaries (Neumann boundary condition) is computed for the frequency $f=1000$ Hz and source position $x_s = (1.2,3.2)$ m. ```python # define geometry and mesh mesh = dolfin.RectangleMesh(dolfin.Point(0,0), dolfin.Point(5,4), 200, 200, "right/left") # compute solution u = FEM_Helmholtz(mesh, 1000, dolfin.Point(1.2,3.2)) # plot sound field plot_soundfield(u) plot_soundfield(abs(u)) plt.title(r'$|P(\mathbf{x}, \omega)|$'); ``` ### Sound Field in Two Coupled Rectangular Rooms The two-dimensional sound field in two coupled rectangular rooms with sound-hard boundaries (Neumann boundary condition) is computed for the frequency $f=1000$ Hz and source position $x_s = (2,0.5)$ m. First the geometry is defined and plotted, for which the mesh is generated with a low number of elements for ease of illustration. A higher resolution is then used for the simulations. ```python # define geometry and mesh domain = mshr.Rectangle(dolfin.Point(0, 0), dolfin.Point(3,4)) + \ mshr.Rectangle(dolfin.Point(3, 1.5), dolfin.Point(3.5, 2.5)) + \ mshr.Rectangle(dolfin.Point(3.5, 0), dolfin.Point(6, 4)) mesh2 = mshr.generate_mesh(domain, 20) dolfin.plot(mesh2); ``` ```python # compute solution mesh2 = mshr.generate_mesh(domain, 100) u = FEM_Helmholtz(mesh2, 1000, dolfin.Point(2, .5)) # plot sound field plot_soundfield(u) plot_soundfield(abs(u)) plt.title(r'$|P(\mathbf{x}, \omega)|$'); ``` **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT).
fc955065cb6069ff098dcf7dbecee02aa945e870
849,767
ipynb
Jupyter Notebook
FEM_Helmholtz_equation.ipynb
wgddd/computational_acoustics
6e1f249b562a73b3bb295981bea9d80d218a213f
[ "MIT" ]
80
2018-10-17T09:51:06.000Z
2022-02-09T02:09:48.000Z
FEM_Helmholtz_equation.ipynb
Zhengyu-Huang/computational_acoustics
6e1f249b562a73b3bb295981bea9d80d218a213f
[ "MIT" ]
null
null
null
FEM_Helmholtz_equation.ipynb
Zhengyu-Huang/computational_acoustics
6e1f249b562a73b3bb295981bea9d80d218a213f
[ "MIT" ]
21
2018-10-19T11:55:53.000Z
2022-02-10T15:24:20.000Z
2,434.862464
225,992
0.962826
true
2,867
Qwen/Qwen-72B
1. YES 2. YES
0.952574
0.826712
0.787504
__label__eng_Latn
0.923359
0.667969
# Free Body Diagram for particles Renato Naville Watanabe ```python import numpy as np import matplotlib.pyplot as plt %matplotlib notebook ``` ### Steps to draw a free-body diagram 1 - Draw separately each object considered in the problem. How you separate depends on what questions you want to answer. 2 - Identify the forces acting on each object. If you are analyzing more than one object, remember the third Newton law (action and reaction), and identify where the reaction of a force is being applied. 3 - Draw all the identified forces, representing them as vectors. The vectors should be represented with the origin in the object. In the case of particles, the origin should be in the center of the particle. 4 - If necessary, you should represent the reference frame in the free-body diagram. 5 - After this, you can solve the problem using the Second Newton Law (see, e.g, [Newton Laws](https://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/Notebooks/newtonLawForParticles.ipynb)) to find the motion of the particle. ## Basic element and forces ### Spring Is an element used to represent a force proportional to some length or displacement. It produces a force in the same direction of the vector linking the spring extremities and opposite to its length or displacement from an equilibrium length. Frequently it has a linear relation, but it could be nonlinear as well. The force exerted by the spring in one of the extremities is: \begin{equation} \vec{\bf{F}} = - k(||\vec{\bf{r}}||-l_0)\frac{\vec{\bf{r}}}{||\vec{\bf{r}}||} = -k\vec{\bf{r}} +kl_0\frac{\vec{\bf{r}}}{||\vec{\bf{r}}||} = -k\left(1-\frac{l_0}{||\vec{\bf{r}}||}\right)\vec{\bf{r}} \end{equation} where $\vec{\bf{r}}$ is the vector linking the extremity applying the force to the other extremity and $l_0$ is the equilibrium length of the spring. Since the spring element is a massless element, the force in both extremities have the same absolute value and opposite directions. ### Damping Is an element used to represent a force proportional to the velocity of displacement. It produces a force in the same direction of the vector linking the element extremities and opposite to its velocity. Frequently it has a linear relation, but it could be nonlinear as well. The force exerted by the damping element in one of its extremities is: \begin{equation} \vec{\bf{F}} = - b||\vec{\bf{v}}||\frac{\vec{\bf{v}}}{||\vec{\bf{v}}||} = -b\vec{\bf{v}} = -b\frac{d\vec{\bf{r}}}{dt} \end{equation} where $\vec{\bf{r}}$ is the vector linking the extremity applying the force to the other extremity. For the same reason of the spring, the force in both extremities have the same absolute value and opposite directions. ### Gravity The gravity force acts on two masses, each one atracting each other: \begin{equation} \vec{{\bf{F}}} = - G\frac{m_1m_2}{||\vec{\bf{r}}||^2}\frac{\vec{\bf{r}}}{||\vec{\bf{r}}||} \end{equation} where $G = 6.67.10^{−11} Nm^2/kg^2$ and $\vec{\bf{r}}$ is a vector with length equal to the distance between the masses and directing towards the other mass. Note the forces acting on each mass have the same absolute value. Since the mass of the Earth is $m1=5.9736×10^24kg$ and its radius is 6.371×10^6m (see [this notebook](https://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/Notebooks/KineticsFundamentalConcepts.ipynb), for details), the gravity force on Earth is: \begin{equation} \vec{{\bf{F}}} = m\vec{\bf{g}} \end{equation} with the absolute value of $\vec{\bf{g}}$ approximately equal to 9.81 $m/s^2$, and point towards the center of Earth. Below, will be shown some examples of how to draw the free-body diagram and obtain the motion equations to solve the problems. ## 1) No force acting on the particle The most trivial situation is a particle with no force acting on it. The free-body diagram is below, with no force vectors acting on the particle. <figure> In this situation, the resultant force is: \begin{equation} \vec{\bf{F}} = \vec{\bf{0}} \end{equation} And the second Newton law for this particle is: \begin{equation} m\frac{d^2\vec{\bf{r}}}{dt^2} = \vec{\bf{0}} \rightarrow \frac{d^2\vec{\bf{r}}}{dt^2} = \vec{\bf{0}} \end{equation} The motion of of the particle can be found by integrating twice both times, getting the following: \begin{equation} \vec{\bf{r}} = \vec{\bf{v_0}}t + \vec{\bf{r0}} \end{equation} The particle continues to change its position with the same velocity it was at the beginning of the analysis. This could be predicted by the first Newton law. ## 2) Gravity force acting on the particle Now, we will consider a ball with the gravity force acting on it. The free-body diagram is depicted below. <figure> The only force acting on the ball is the gravitational force: \begin{equation} \hat{\bf{F_g}} = - mg \hat{\bf{j}} \end{equation} So, we write the Second Newton Law: \begin{equation} \hat{\bf{F_g}} = m \frac{d^2\vec{\bf{r}}}{dt^2} \rightarrow - mg \hat{\bf{j}} = m \frac{d^2\vec{\bf{r}}}{dt^2} \rightarrow - g \hat{\bf{j}} =\frac{d^2\vec{\bf{r}}}{dt^2} \end{equation} Now, we can separate the equation in two components (x and y): \begin{equation} 0 = \frac{d^2x}{dt^2} \end{equation} and \begin{equation} - g = \frac{d^2y}{dt^2} \end{equation} These equations were solved in detail in [this Notebook about Newton laws](https://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/Notebooks/newtonLawForParticles.ipynb). ## 3) Ground reaction force Now, we will analyze the situation of a particle in contact with the ground. To simplify the analysis, only the vertical movement will be considered. <figure> The forces acting on the particle are the ground reaction force (often called as normal force) and the gravity force. The free-body diagram of the particle is below: <figure> So, the resultant force in the particle is: \begin{equation} \vec{\bf{F}} = \overrightarrow{\bf{GRF}} + m\vec{\bf{g}} = \overrightarrow{\bf{GRF}} - mg\hat{\bf{j}} \end{equation} Considering only the y direction: \begin{equation} F = GRF - mg \end{equation} Applying the second Newton law to the particle: \begin{equation} m \frac{d^2y}{dt^2} = GRF - mg \end{equation} Note that since we have no information about how the force GRF varies along time, we cannot solve this equation. To find the position of the particle along time, one would have to measure the ground reaction force. See [the notebook on Vertical jump](http://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/notebooks/VerticalJump.ipynb) for an application of this model. ## 4) Linear spring in horizontal movement The example below is mass attached to a spring. The other extremity of the spring is fixed. <figure> The only force force acting on the mass is from the spring. Below is the free-body diagram from the mass. <figure> Since the movement is horizontal, we can neglect the gravity force. \begin{equation} \vec{\bf{F}} = -k\left(1-\frac{l_0}{||\vec{\bf{r}}||}\right)\vec{\bf{r}} \end{equation} Applying the second Newton law to the mass: \begin{equation} m\frac{d^2\vec{\bf{r}}}{dt^2} = -k\left(1-\frac{l_0}{||\vec{\bf{r}}||}\right)\vec{\bf{r}} \rightarrow \frac{d^2\vec{\bf{r}}}{dt^2} = -\frac{k}{m}\left(1-\frac{l_0}{||\vec{\bf{r}}||}\right)\vec{\bf{r}} \end{equation} Since the movement is unidimensional, we can deal with it scalarly: \begin{equation} \frac{d^2x}{dt^2} = -\frac{k}{m}\left(1-\frac{l_0}{x}\right)x = -\frac{k}{m}(x-l_0) \end{equation} To solve this equation numerically, we must break the equations into two first-order differential equation: \begin{equation} \frac{dv_x}{dt} = -\frac{k}{m}(x-l_0) \end{equation} \begin{equation} \frac{dx}{dt} = v_x \end{equation} In the numerical solution below, we will use $k = 40 N/m$, $m = 2 kg$, $l_0 = 0.5 m$ and the mass starts from the position $x = 0.8m$ and at rest. ```python k = 40 m = 2 l0 = 0.5 x0 = 0.8 v0 = 0 x = x0 v = v0 dt = 0.001 t = np.arange(0, 3, dt) r = np.array([x]) for i in t[1:]: dxdt = v dvxdt = -k/m*(x-l0) x = x + dt*dxdt v = v + dt*dvxdt r = np.vstack((r,np.array([x]))) plt.figure() plt.plot(t,r) plt.show() ``` ## 5) Linear spring in bidimensional movement in horizontal plane This example has two masses attached to the spring. To solve the motion of both masses, we must draw a free-body diagram for each one of the masses. <figure> The only force acting on each mass is the force due to the spring. Since the movement is happening in the horizontal plane, the gravity force can be neglected. <figure> So, the forces acting on mass 1 is: \begin{equation} \vec{\bf{F_1}} = k\left(||\vec{\bf{r_2}}-\vec{\bf{r_1}}||-l_0\right)\frac{(\vec{\bf{r_2}}-\vec{\bf{r_1}})}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||} \end{equation} and the forces acting on mass 2 is: \begin{equation} \vec{\bf{F_2}} =k\left(||\vec{\bf{r_2}}-\vec{\bf{r_1}}||-l_0\right)\frac{(\vec{\bf{r_1}}-\vec{\bf{r_2}})}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||} \end{equation} Applying the second Newton law for the masses: \begin{equation} m_1\frac{d^2\vec{\bf{r_1}}}{dt^2} = k\left(||\vec{\bf{r_2}}-\vec{\bf{r_1}}||-l_0\right)\frac{(\vec{\bf{r_2}}-\vec{\bf{r_1}})}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||} \rightarrow \frac{d^2\vec{\bf{r_1}}}{dt^2} = -\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_1}}+\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_2}} \rightarrow \frac{d^2x_1\hat{\bf{i}}}{dt^2}+\frac{d^2y_1\hat{\bf{j}}}{dt^2} = -\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_1\hat{\bf{i}}+y_1\hat{\bf{j}})+\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_2\hat{\bf{i}}+y_2\hat{\bf{j}}) \end{equation} \begin{equation} m_2\frac{d^2\vec{\bf{r_2}}}{dt^2} = k\left(||\vec{\bf{r_2}}-\vec{\bf{r_1}}||-l_0\right)\frac{(\vec{\bf{r_1}}-\vec{\bf{r_2}})}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||} \rightarrow \frac{d^2\vec{\bf{r_2}}}{dt^2} = -\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_2}}+\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_1}} \rightarrow \frac{d^2x_2\hat{\bf{i}}}{dt^2}+\frac{d^2y_2\hat{\bf{j}}}{dt^2} = -\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_2\hat{\bf{i}}+y_2\hat{\bf{j}})+\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_1\hat{\bf{i}}+y_1\hat{\bf{j}}) \end{equation} Now, we can separate the equations for each of the coordinates: \begin{equation} \frac{d^2x_1}{dt^2} = -\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)x_1+\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)x_2=-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_1-x_2) \end{equation} \begin{equation} \frac{d^2y_1}{dt^2} = -\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)y_1+\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)y_2=-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_1-y_2) \end{equation} \begin{equation} \frac{d^2x_2}{dt^2} = -\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)x_2+\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)x_1=-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_2-x_1) \end{equation} \begin{equation} \frac{d^2y_2}{dt^2} = -\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)y_2+\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)y_1=-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_2-y_1) \end{equation} To solve these equations numerically, you must break these equations into first-order equations: \begin{equation} \frac{dv_{x_1}}{dt} = -\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_1-x_2) \end{equation} \begin{equation} \frac{dv_{y_1}}{dt} = -\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_1-y_2) \end{equation} \begin{equation} \frac{dv_{x_2}}{dt} = -\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_2-x_1) \end{equation} \begin{equation} \frac{dv_{y_2}}{dt} = -\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_2-y_1) \end{equation} \begin{equation} \frac{dx_1}{dt} = v_{x_1} \end{equation} \begin{equation} \frac{dy_1}{dt} = v_{y_1} \end{equation} \begin{equation} \frac{dx_2}{dt} = v_{x_2} \end{equation} \begin{equation} \frac{dy_2}{dt} = v_{y_2} \end{equation} Note that if you did not wanted to know the details about the motion of each mass, but only the motion of the center of mass of the masses-spring system, you could have modeled the whole system as a single particle. To solve the equations numerically, we will use the $m_1=1 kg$, $m_2 = 2 kg$, $l_0 = 0.5 m$, $k = 90 N/m$ and $x_{1_0} = 0 m$, $x_{2_0} = 0 m$, $y_{1_0} = 1 m$, $y_{2_0} = -1 m$, $v_{x1_0} = -2 m/s$, $v_{x2_0} = 0 m$, $v_{y1_0} = 0 m$, $v_{y2_0} = 0 m$. ```python x01 = 0 y01= 0.5 x02 = 0 y02 = -0.5 vx01 = 0.1 vy01 = 0 vx02 = -0.1 vy02 = 0 x1= x01 y1 = y01 x2= x02 y2 = y02 vx1= vx01 vy1 = vy01 vx2= vx02 vy2 = vy02 r1 = np.array([x1,y1]) r2 = np.array([x2,y2]) k = 30 m1 = 1 m2 = 1 l0 = 0.5 dt = 0.0001 t = np.arange(0,5,dt) for i in t[1:]: dvx1dt = -k/m1*(x1-x2)*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2)) dvx2dt = -k/m2*(x2-x1)*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2)) dvy1dt = -k/m1*(y1-y2)*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2)) dvy2dt = -k/m2*(y2-y1)*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2)) dx1dt = vx1 dx2dt = vx2 dy1dt = vy1 dy2dt = vy2 x1 = x1 + dt*dx1dt x2 = x2 + dt*dx2dt y1 = y1 + dt*dy1dt y2 = y2 + dt*dy2dt vx1 = vx1 + dt*dvx1dt vx2 = vx2 + dt*dvx2dt vy1 = vy1 + dt*dvy1dt vy2 = vy2 + dt*dvy2dt r1 = np.vstack((r1,np.array([x1,y1]))) r2 = np.vstack((r2,np.array([x2,y2]))) springLength = np.sqrt((r1[:,0]-r2[:,0])**2+(r1[:,1]-r2[:,1])**2) plt.figure() plt.plot(t, springLength) plt.show() ``` ## 6)Particle with gravity and linear air resistance Below is the free-body diagram of a particle with the gravity force and a linear drag force due to the air resistance. <figure> the forces being applied in the ball are: \begin{equation} \vec{\bf{F}} = -mg \hat{\bf{j}} - b\vec{\bf{v}} = -mg \hat{\bf{j}} - b\frac{d\vec{\bf{r}}}{dt} = -mg \hat{\bf{j}} - b\left(\frac{dx}{dt}\hat{\bf{i}}+\frac{dy}{dt}\hat{\bf{j}}\right) = - b\frac{dx}{dt}\hat{\bf{i}} - \left(mg + b\frac{dy}{dt}\right)\hat{\bf{j}} \end{equation} Writing down the Second Newton Law: \begin{equation} \vec{\bf{F}} = m \frac{d^2\vec{\bf{r}}}{dt^2} \rightarrow - b\frac{dx}{dt}\hat{\bf{i}} - \left(mg + b\frac{dy}{dt}\right)\hat{\bf{j}} = m\left(\frac{d^2x}{dt^2}\hat{\bf{i}}+\frac{d^2y}{dt^2}\hat{\bf{j}}\right) \end{equation} Now, we can separate into one equation for each coordinate: \begin{equation} - b\frac{dx}{dt} = m\frac{d^2x}{dt^2} -\rightarrow \frac{d^2x}{dt^2} = -\frac{b}{m} \frac{dx}{dt} \end{equation} \begin{equation} -mg - b\frac{dy}{dt} = m\frac{d^2y}{dt^2} \rightarrow \frac{d^2y}{dt^2} = -\frac{b}{m}\frac{dy}{dt} - g \end{equation} These equations were solved in [this notebook](https://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/Notebooks/newtonLawForParticles.ipynb). ## 7) Particle with gravity and nonlinear air resistance Below, is the free-body diagram of a particle with the gravity force and a drag force due to the air resistance proportional to the square of the particle velocity. <figure> The forces being applied in the ball are: \begin{equation} \vec{\bf{F}} = -mg \hat{\bf{j}} - bv^2\hat{\bf{e_t}} = -mg \hat{\bf{j}} - b (v_x^2+v_y^2) \frac{v_x\hat{\bf{i}}+v_y\hat{\bf{j}}}{\sqrt{v_x^2+v_y^2}} = -mg \hat{\bf{j}} - b \sqrt{v_x^2+v_y^2} \,(v_x\hat{\bf{i}}+v_y\hat{\bf{j}}) = -mg \hat{\bf{j}} - b \sqrt{\left(\frac{dx}{dt} \right)^2+\left(\frac{dy}{dt} \right)^2} \,\left(\frac{dx}{dt} \hat{\bf{i}}+\frac{dy}{dt}\hat{\bf{j}}\right) \end{equation} Writing down the Second Newton Law: \begin{equation} \vec{\bf{F}} = m \frac{d^2\vec{\bf{r}}}{dt^2} \rightarrow -mg \hat{\bf{j}} - b \sqrt{\left(\frac{dx}{dt} \right)^2+\left(\frac{dy}{dt} \right)^2} \,\left(\frac{dx}{dt} \hat{\bf{i}}+\frac{dy}{dt}\hat{\bf{j}}\right) = m\left(\frac{d^2x}{dt^2}\hat{\bf{i}}+\frac{d^2y}{dt^2}\hat{\bf{j}}\right) \end{equation} Now, we can separate into one equation for each coordinate: \begin{equation} - b \sqrt{\left(\frac{dx}{dt} \right)^2+\left(\frac{dy}{dt} \right)^2} \,\frac{dx}{dt} = m\frac{d^2x}{dt^2} \rightarrow \frac{d^2x}{dt^2} = - \frac{b}{m} \sqrt{\left(\frac{dx}{dt} \right)^2+\left(\frac{dy}{dt} \right)^2} \,\frac{dx}{dt} \end{equation} \begin{equation} -mg - b \sqrt{\left(\frac{dx}{dt} \right)^2+\left(\frac{dy}{dt} \right)^2} \,\frac{dy}{dt} = m\frac{d^2y}{dt^2} \rightarrow \frac{d^2y}{dt^2} = - \frac{b}{m} \sqrt{\left(\frac{dx}{dt} \right)^2+\left(\frac{dy}{dt} \right)^2} \,\frac{dy}{dt} -g \end{equation} These equations were solved numerically in [this notebook](https://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/Notebooks/newtonLawForParticles.ipynb). ## 8) Linear spring and damping on bidimensional horizontal movement This situation is very similar to the example of horizontal movement with one spring and two masses, with a damper added in parallel to the spring. <figure> Now, the forces acting on each mass are the force due to the spring and the force due to the damper. <figure> So, the forces acting on mass 1 is: \begin{equation} \vec{\bf{F_1}} = b\frac{d(\vec{\bf{r_2}}-\vec{\bf{r_1}})}{dt} + k\left(||\vec{\bf{r_2}}-\vec{\bf{r_1}}||-l_0\right)\frac{(\vec{\bf{r_2}}-\vec{\bf{r_1}})}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||} = b\frac{d(\vec{\bf{r_2}}-\vec{\bf{r_1}})}{dt} + k\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(\vec{\bf{r_2}}-\vec{\bf{r_1}}) \end{equation} and the forces acting on mass 2 is: \begin{equation} \vec{\bf{F_2}} = b\frac{d(\vec{\bf{r_1}}-\vec{\bf{r_2}})}{dt} + k\left(||\vec{\bf{r_2}}-\vec{\bf{r_1}}||-l_0\right)\frac{(\vec{\bf{r_1}}-\vec{\bf{r_2}})}{||\vec{\bf{r_1}}-\vec{\bf{r_2}}||}= b\frac{d(\vec{\bf{r_1}}-\vec{\bf{r_2}})}{dt} + k\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(\vec{\bf{r_1}}-\vec{\bf{r_2}}) \end{equation} Applying the second Newton law for the masses: \begin{equation} m_1\frac{d^2\vec{\bf{r_1}}}{dt^2} = b\frac{d(\vec{\bf{r_2}}-\vec{\bf{r_1}})}{dt}+k\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(\vec{\bf{r_2}}-\vec{\bf{r_1}}) \rightarrow \frac{d^2\vec{\bf{r_1}}}{dt^2} = -\frac{b}{m_1}\frac{d\vec{\bf{r_1}}}{dt} -\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_1}} + \frac{b}{m_1}\frac{d\vec{\bf{r_2}}}{dt}+\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_2}} \rightarrow \frac{d^2x_1\hat{\bf{i}}}{dt^2}+\frac{d^2y_1\hat{\bf{j}}}{dt^2} = -\frac{b}{m_1}\left(\frac{dx_1\hat{\bf{i}}}{dt}+\frac{dy_1\hat{\bf{j}}}{dt}\right)-\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_1\hat{\bf{i}}+y_1\hat{\bf{j}})+\frac{b}{m_1}\left(\frac{dx_2\hat{\bf{i}}}{dt}+\frac{dy_2\hat{\bf{j}}}{dt}\right)+\frac{k}{m_1}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_2\hat{\bf{i}}+y_2\hat{\bf{j}}) = -\frac{b}{m_1}\left(\frac{dx_1\hat{\bf{i}}}{dt}+\frac{dy_1\hat{\bf{j}}}{dt}-\frac{dx_2\hat{\bf{i}}}{dt}-\frac{dy_2\hat{\bf{j}}}{dt}\right)-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_1\hat{\bf{i}}+y_1\hat{\bf{j}}-x_2\hat{\bf{i}}-y_2\hat{\bf{j}}) \end{equation} \begin{equation} m_2\frac{d^2\vec{\bf{r_2}}}{dt^2} = b\frac{d(\vec{\bf{r_1}}-\vec{\bf{r_2}})}{dt}+k\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(\vec{\bf{r_1}}-\vec{\bf{r_2}}) \rightarrow \frac{d^2\vec{\bf{r_2}}}{dt^2} = -\frac{b}{m_2}\frac{d\vec{\bf{r_2}}}{dt} -\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_2}} + \frac{b}{m_2}\frac{d\vec{\bf{r_1}}}{dt}+\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)\vec{\bf{r_1}} \rightarrow \frac{d^2x_2\hat{\bf{i}}}{dt^2}+\frac{d^2y_2\hat{\bf{j}}}{dt^2} = -\frac{b}{m_2}\left(\frac{dx_2\hat{\bf{i}}}{dt}+\frac{dy_2\hat{\bf{j}}}{dt}\right)-\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_2\hat{\bf{i}}+y_2\hat{\bf{j}})+\frac{b}{m_2}\left(\frac{dx_1\hat{\bf{i}}}{dt}+\frac{dy_1\hat{\bf{j}}}{dt}\right)+\frac{k}{m_2}\left(1-\frac{l_0}{||\vec{\bf{r_2}}-\vec{\bf{r_1}}||}\right)(x_1\hat{\bf{i}}+y_1\hat{\bf{j}})=-\frac{b}{m_2}\left(\frac{dx_2\hat{\bf{i}}}{dt}+\frac{dy_2\hat{\bf{j}}}{dt}-\frac{dx_1\hat{\bf{i}}}{dt}-\frac{dy_1\hat{\bf{j}}}{dt}\right)-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_2\hat{\bf{i}}+y_2\hat{\bf{j}}-x_1\hat{\bf{i}}-y_1\hat{\bf{j}}) \end{equation} Now, we can separate the equations for each of the coordinates: \begin{equation} \frac{d^2x_1}{dt^2} = -\frac{b}{m_1}\left(\frac{dx_1}{dt}-\frac{dx_2}{dt}\right)-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_1-x_2) \end{equation} \begin{equation} \frac{d^2y_1}{dt^2} = -\frac{b}{m_1}\left(\frac{dy_1}{dt}-\frac{dy_2}{dt}\right)-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_1-y_2) \end{equation} \begin{equation} \frac{d^2x_2}{dt^2} = -\frac{b}{m_2}\left(\frac{dx_2}{dt}-\frac{dx_1}{dt}\right)-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_2-x_1) \end{equation} \begin{equation} \frac{d^2y_2}{dt^2} = -\frac{b}{m_2}\left(\frac{dy_2}{dt}-\frac{dy_1}{dt}\right)-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_2-y_1) \end{equation} If you want to solve these equations numerically, you must break these equations into first-order equations: \begin{equation} \frac{dv_{x_1}}{dt} = -\frac{b}{m_1}\left(v_{x_1}-v_{x_2}\right)-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_1-x_2) \end{equation} \begin{equation} \frac{dv_{y_1}}{dt} = -\frac{b}{m_1}\left(v_{y_1}-v_{y_2}\right)-\frac{k}{m_1}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_1-y_2) \end{equation} \begin{equation} \frac{dv_{x_2}}{dt} = -\frac{b}{m_2}\left(v_{x_2}-v_{x_1}\right)-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(x_2-x_1) \end{equation} \begin{equation} \frac{dv_{y_2}}{dt} = -\frac{b}{m_2}\left(v_{y_2}-v_{y_1}\right)-\frac{k}{m_2}\left(1-\frac{l_0}{\sqrt{(x_2-x_1)^2+(y_2-y_1)^2}}\right)(y_2-y_1) \end{equation} \begin{equation} \frac{dx_1}{dt} = v_{x_1} \end{equation} \begin{equation} \frac{dy_1}{dt} = v_{y_1} \end{equation} \begin{equation} \frac{dx_2}{dt} = v_{x_2} \end{equation} \begin{equation} \frac{dy_2}{dt} = v_{y_2} \end{equation} To solve the equations numerically, we will use the $m_1=1 kg$, $m_2 = 2 kg$, $l_0 = 0.5 m$, $k = 10 N/m$, $b = 0.6 Ns/m$ and $x_{1_0} = 0 m$, $x_{2_0} = 0 m$, $y_{1_0} = 1 m$, $y_{2_0} = -1 m$, $v_{x1_0} = -2 m/s$, $v_{x2_0} = 1 m$, $v_{y1_0} = 0 m$, $v_{y2_0} = 0 m$. ```python x01 = 0 y01= 1 x02 = 0 y02 = -1 vx01 = -2 vy01 = 0 vx02 = 1 vy02 = 0 x1= x01 y1 = y01 x2= x02 y2 = y02 vx1= vx01 vy1 = vy01 vx2= vx02 vy2 = vy02 r1 = np.array([x1,y1]) r2 = np.array([x2,y2]) k = 10 m1 = 1 m2 = 2 b = 0.6 l0 = 0.5 dt = 0.001 t = np.arange(0,5,dt) for i in t[1:]: dvx1dt = -b/m1*(vx1-vx2) -k/m1*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2))*(x1-x2) dvx2dt = -b/m2*(vx2-vx1) -k/m2*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2))*(x2-x1) dvy1dt = -b/m1*(vy1-vy2) -k/m1*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2))*(y1-y2) dvy2dt = -b/m2*(vy2-vy1) -k/m2*(1-l0/np.sqrt((x2-x1)**2+(y2-y1)**2))*(y2-y1) dx1dt = vx1 dx2dt = vx2 dy1dt = vy1 dy2dt = vy2 x1 = x1 + dt*dx1dt x2 = x2 + dt*dx2dt y1 = y1 + dt*dy1dt y2 = y2 + dt*dy2dt vx1 = vx1 + dt*dvx1dt vx2 = vx2 + dt*dvx2dt vy1 = vy1 + dt*dvy1dt vy2 = vy2 + dt*dvy2dt r1 = np.vstack((r1,np.array([x1,y1]))) r2 = np.vstack((r2,np.array([x2,y2]))) springDampLength = np.sqrt((r1[:,0]-r2[:,0])**2+(r1[:,1]-r2[:,1])**2) plt.figure() plt.plot(t, springDampLength) plt.show() plt.figure() plt.plot(r1[:,0], r1[:,1],'r.') plt.plot(r2[:,0], r2[:,1],'b.') plt.plot((m1*r1[:,0]+m2*r2[:,0])/(m1+m2), (m1*r1[:,1]+m2*r2[:,1])/(m1+m2),'g.') plt.xlim(-2,2) plt.ylim(-2,2) plt.show() ``` ## 9) Simple muscle model The diagram below shows a simple muscle model. The spring in the left represents the tendinous tissues and the spring in the right represents the elastic properties of the muscle fibers. The damping is present to model the viscous properties of the muscle fibers, the element CE is the contractile element (force production) and the mass $m$ is the muscle mass. The length $L_{MT}$ is the length of the muscle plus the tendon. In our model $L_{MT}$ is constant, but it could be a function of the joint angle. <figure> The length of the tendon will be denoted by $l_T(t)$ and the muscle length, by $l_{m}(t)$. Both lengths are related by each other by the following expression: \begin{equation} l_t(t) + l_m(t) = L_{MT} \end{equation} The free-body diagram of the muscle mass is depicted below. <figure> The resultant force being applied in the muscle mass is: $\vec{\bf{F}} = -k_T(||\vec{\bf{r_m}}||-l_{t_0})\frac{\vec{\bf{r_m}}}{||\vec{\bf{r_m}}||} + b\frac{d(L_{MT}\hat{\bf{i}} - \vec{\bf{r_{m}}})}{dt} + k_m (||L_{MT}\hat{\bf{i}} - \vec{\bf{r_{m}}}||-l_{m_0})\frac{L_{MT}\hat{\bf{i}} - \vec{\bf{r_{m}}}}{||L_{MT}\hat{\bf{i}} - \vec{\bf{r_{m}}}||} +\vec{\bf{F}}{\bf{_{CE}}}(t)$ Since the model is unidimensional, we can assume that the force $\vec{\bf{F}}\bf{_{CE}}(t)$ is in the x direction, so the analysis will be done only in this direction. $F = -k_T(l_t-l_{t_0}) + b\frac{d(L_{MT} - l_t)}{dt} + k_m (l_m-l_{m_0}) + F_{CE}(t) = -k_T(l_t-l_{t_0}) -b\frac{dl_t}{dt} + k_m (L_{MT}-l_t-l_{m_0}) + F_{CE}(t) = -b\frac{dl_t}{dt}-(k_T+k_m)l_t+F_{CE}(t)+k_Tl_{t_0}+k_m(L_{MT}-l_{m_0})$ Applying the second Newton law: $m\frac{d^2l_t}{dt^2} = -b\frac{dl_t}{dt}-(k_T+k_m)l_t+F_{CE}(t)+k_Tl_{t_0}+k_m(L_{MT}-l_{m_0})$ To solve this equation, we must break the equation into two first-order differential equations: \begin{equation} \frac{dvt}{dt} = - \frac{b}{m}v_t - \frac{k_T+k_m}{m}l_t +\frac{F_{CE}(t)}{m} + \frac{k_T}{m}l_{t_0}+\frac{k_m}{m}(L_{MT}-l_{m_0}) \end{equation} \begin{equation} \frac{d l_t}{dt} = v_t \end{equation} Now, we can solve these equations by using some numerical method. To obtain the solution, we will use the damping factor of the muscle as $b = 10\,Ns/m$, the muscle mass is $m = 2 kg$, the stiffness of the tendon as $k_t=1000\,N/m$ and the elastic element of the muscle as $k_m=1500\,N/m$. The tendon-length is $L_{MT} = 0.35\,m$, and the tendon equilibrium length is $l_{t0} = 0.28\,m$ and the muscle fiber equilibrium length is $l_{m0} = 0.07\,m$. Both the tendon and the muscle fiber are at their equilibrium lengths and at rest. Also, we will model the force of the contractile element as a Heaviside step of $90\,N$ (90 N beginning at $t=0$), but normally it is modeled as a function of $l_m$ and $v_m$ having a neural activation signal as input. ```python m = 2 b = 10 km = 1500 kt = 1000 lt0 = 0.28 lm0 = 0.07 Lmt = 0.35 vt0 = 0 dt = 0.0001 t = np.arange(0, 10, dt) Fce = 90 lt = lt0 vt = vt0 ltp = np.array([lt0]) lmp = np.array([lm0]) Ft = np.array([0]) for i in range(1,len(t)): dvtdt = -b/m*vt-(kt+km)/m*lt + Fce/m + kt/m*lt0 +km/m*(Lmt-lm0) dltdt = vt vt = vt + dt*dvtdt lt = lt + dt*dltdt Ft = np.vstack((Ft,np.array(kt*(lt-lt0)))) ltp = np.vstack((ltp,np.array(lt))) lmp = np.vstack((lmp,np.array(Lmt - lt))) plt.figure() plt.plot(t,Ft) plt.xlabel('t') plt.ylabel('Tendon force (N)') plt.show() plt.figure() plt.plot(t,ltp) plt.plot(t,lmp) plt.xlabel('t') plt.ylabel('Length (m)') plt.show() ``` ## Problems Solve the problems 3.3.9, 3.3.20, 10.1.6, 12.1.6(a,b,c,d,f), 12.1.7, 12.1.10 (a,b) from Ruina and Pratap book ## References - Ruina A, Rudra P (2015) [Introduction to Statics and Dynamics](http://ruina.tam.cornell.edu/Book/index.html). Oxford University Press. - Nigg & Herzog (2006) [Biomechanics of the Musculo-skeletal System](https://books.google.com.br/books?id=hOIeAQAAIAAJ&dq=editions:ISBN0470017678). 3rd Edition. Wiley. ```python ```
bb4ef9feecc63c866fd214ea860ea42f35a7ece8
138,295
ipynb
Jupyter Notebook
notebooks/FBDParticles.ipynb
ahmadhassan01/bmc
3114b7d3ecd1f7c678fac0c04e8e139ac2898992
[ "MIT" ]
null
null
null
notebooks/FBDParticles.ipynb
ahmadhassan01/bmc
3114b7d3ecd1f7c678fac0c04e8e139ac2898992
[ "MIT" ]
null
null
null
notebooks/FBDParticles.ipynb
ahmadhassan01/bmc
3114b7d3ecd1f7c678fac0c04e8e139ac2898992
[ "MIT" ]
null
null
null
151.639254
26,696
0.840428
true
11,823
Qwen/Qwen-72B
1. YES 2. YES
0.763484
0.822189
0.627728
__label__eng_Latn
0.618575
0.296753
# Hakwes Process w/ tensorflow See [`arxiv:1507.02822`](https://arxiv.org/pdf/1507.02822.pdf). The conditional intensity function is parametrized as: $\mu(t) = \mu_0 + \alpha \sum_{t_i < t} e^{-\beta(t - t_i)}$ ```python %matplotlib inline ``` ```python import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tqdm import tqdm_notebook as tqdm ``` ```python def simulate(mu0, alpha, beta, tmax): """Simulate the Hawkes process by thinning.""" epsilon = 1e-6 events = [np.random.exponential(1.0 / mu0)] # TODO: reuse the sum when simulating while events[-1] < tmax: bound = intensity(mu0, alpha, beta, events, events[-1] + epsilon) tnew = events[-1] + np.random.exponential(1.0 / bound) u = np.random.uniform(0.0, bound) mu = intensity(mu0, alpha, beta, events, tnew) if u < mu and tnew > tmax: break elif u < mu: events += [tnew] return np.asarray(events) def intensity(mu0, alpha, beta, events, t): events = np.asarray(events).reshape(-1) t = np.asarray(t).reshape(-1) delta = t[:, None] - events[None, :] return mu0 + alpha * np.sum((delta > 0) * np.exp(-beta * delta), axis=1) ``` ```python mu0 = 10 alpha = 0.5 beta = 2 traces = [ simulate(mu0, alpha, beta, 40) for _ in range(100) ] ``` ```python min_len = min(len(t) for t in traces) max_len = max(len(t) for t in traces) #traces = [t[:min_len] for t in traces] ``` ```python padded = np.zeros((len(traces), max_len), dtype=np.float32) masks = np.ones((len(traces), max_len), dtype=np.float32) for idx, t in enumerate(traces): padded[idx, :len(t)] = t masks[idx, len(t):] = 0 ``` ```python time = np.linspace(0, 1.1 * traces[0].max(), 100) plt.eventplot(traces[0], lineoffsets=0, color='k', alpha=0.2) plt.plot(time, intensity(mu0, alpha, beta, traces[0], time) - mu0) ``` ## tensorflow ```python def unpacked(f): return lambda *ps: f(*[i for p in ps for i in p]) ``` ```python tf.reset_default_graph() events_ = tf.placeholder(tf.float32, shape=[None, None]) mask_ = tf.placeholder(tf.float32, shape=[None, None]) mu0_ = tf.Variable(np.random.uniform(0.5, 10.0), dtype=tf.float32) mu0_ = tf.nn.softplus(mu0_) alpha_ = tf.Variable(0.0, dtype=tf.float32) alpha_ = tf.nn.softplus(alpha_) beta_ = tf.Variable(np.random.uniform(1.0, 10.0), dtype=tf.float32) beta_ = tf.nn.softplus(beta_) # build loglikelihood tmax_ = tf.reduce_max(mask_ * events_, axis=1) # Notes: # use initial value prev_a to ensure A(0) = 0 # prevent overflows in scan, due to 0 in padding scan_events_ = mask_ * events_ + (1.0 - mask_) * tf.expand_dims(tmax_, axis=1) a_, _ = tf.scan( unpacked(lambda prev_a, prev_event, current_event: [ tf.exp(-beta_ * (current_event - prev_event)) * (1 + prev_a), current_event, ]), [tf.transpose(scan_events_)], [-tf.ones_like(tmax_), tf.zeros_like(tmax_)], ) a_ = tf.transpose(a_) ll_ = tf.reduce_mean( tf.reduce_sum(mask_ * tf.log(mu0_ + alpha_ * a_), axis=1) + -mu0_ * tmax_ + alpha_ / beta_ * tf.reduce_sum(mask_ * (tf.exp(-beta_ * (tf.expand_dims(tmax_, axis=1) - events_)) - 1), axis=1) ) optimizer_ = tf.train.RMSPropOptimizer(0.1) train_ = optimizer_.minimize(-ll_) ``` ```python %%time with tf.Session() as sess: sess.run(tf.global_variables_initializer()) optimizer_ = tf.contrib.opt.ScipyOptimizerInterface(-ll_) optimizer_.minimize(sess, {events_: padded, mask_: masks}) lbfgs_params = sess.run([mu0_, alpha_, beta_]) lbfgs_params = dict(zip(['mu0', 'alpha', 'beta'], lbfgs_params)) ``` INFO:tensorflow:Optimization terminated with: Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH' Objective function value: -977.540161 Number of iterations: 18 Number of functions evaluations: 38 CPU times: user 23.8 s, sys: 17.9 s, total: 41.7 s Wall time: 21 s ```python %%time with tf.Session() as sess: sess.run(tf.global_variables_initializer()) status = tqdm(range(101)) for idx in status: _, ll = sess.run([train_, ll_], {events_: padded, mask_: masks}) rmsp_params = sess.run([mu0_, alpha_, beta_]) rmsp_params = dict(zip(['mu0', 'alpha', 'beta'], rmsp_params)) if idx % 50 == 0: status.write('ll: {ll} mu0: {mu0:.1f} alpha: {alpha:.1f} beta: {beta:.1f}'.format(ll=ll, **rmsp_params)) ``` ll: 928.3585815429688 mu0: 3.0 alpha: 0.9 beta: 1.2 ll: 974.2868041992188 mu0: 5.5 alpha: 0.7 beta: 1.1 ll: 976.4176025390625 mu0: 7.7 alpha: 0.6 beta: 1.2 CPU times: user 1min 13s, sys: 1min 11s, total: 2min 24s Wall time: 1min 11s # Mixture of Poisson Point Processes $$ \begin{align} P(N) &= \frac{\Lambda^N}{N!}e^{-\Lambda} \\ P(t_i) &= \frac{\lambda(t_i)}{\Lambda} \end{align} $$ $$ \begin{align} \lambda_1(t) &= \lambda_1 \\ \lambda_2(t) &= \gamma \lambda_2 e^{-\gamma t / T} \end{align} $$ $$ \begin{align} \Lambda_1 &= \lambda_1 T \\ \Lambda_2 &= \lambda_2 T (1 - e^{-\gamma T}) \approx \lambda_2 T \end{align} $$ $$ \log L = \sum_i z_{i,1} \log \lambda_1(t_i) + \sum_i z_{i,2} \log \lambda_2(t_1) - \lambda_1 T - \lambda_2 T $$ $$ \begin{align} \frac{\partial}{\partial \lambda_1} \log L = \sum_i z_{i,1} \frac{1}{\lambda_1} - T \\ \frac{\partial}{\partial \lambda_2} \log L = \sum_i z_{i,2} \frac{1}{\lambda_2} - T \\ \frac{\partial}{\partial \gamma} \log L = \sum_i z_{i,2} \frac{1}{\gamma} - \sum_i z_{i,2} \frac{t_i}{T} \end{align} $$ $$ \begin{align} \lambda_1 &= \frac{\sum_i z_{i,1}}{T} \\ \lambda_2 &= \frac{\sum_i z_{i,2}}{T} \\ \gamma &= \frac{\sum_i z_{i,2} T}{\sum_i z_{i,2} t_i} \end{align} $$ $$ \begin{align} z_{i,1} = \frac{\lambda_1(t_i)}{\lambda_1(t_i) + \lambda_2(t_i)} \\ z_{i,2} = \frac{\lambda_2(t_i)}{\lambda_1(t_i) + \lambda_2(t_i)} \end{align} $$ ```python ```
44c2fb7e5161b418269fc98f3e9976250389f401
36,869
ipynb
Jupyter Notebook
BuildingBlocks/PoissonPointProcess.ipynb
chmp/misc-exp
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
[ "MIT" ]
6
2017-10-31T20:54:37.000Z
2020-10-23T19:03:00.000Z
BuildingBlocks/PoissonPointProcess.ipynb
chmp/misc-exp
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
[ "MIT" ]
7
2020-03-24T16:14:34.000Z
2021-03-18T20:51:37.000Z
BuildingBlocks/PoissonPointProcess.ipynb
chmp/misc-exp
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
[ "MIT" ]
1
2019-07-29T07:55:49.000Z
2019-07-29T07:55:49.000Z
90.587224
26,066
0.820228
true
2,068
Qwen/Qwen-72B
1. YES 2. YES
0.874077
0.737158
0.644333
__label__eng_Latn
0.205158
0.335333
<a href="https://colab.research.google.com/github/knazari/Advanced_Robotic_2022/blob/main/Week5/AR_w5_ProMP.ipynb" target="_parent"></a> ```python from __future__ import division import numpy as np import matplotlib.pyplot as plt import numpy.matlib as mat ``` **Probabilistic Movement Primitives** =================================== In this notebook we will write a class called **ProMP** which includes all the information required to define a ProMP as well as important methods which allow the manipulation of the MPs to show the potential of the probabilistic approach. <br/><br/> _______________________________________________________________________________ <br/><br/> For stroke-based movements, the mean of a ProMP is given by a weighted sum of $N$ Gaussians. These Gaussian basis functions have the following form: \begin{equation} b_i(t):=\mathrm{exp}\left(-\frac{(t-c_i)^2}{2h}\right),\;i=1,\ldots,N \tag{1} \end{equation} These functions are commonly normalized to improve regression \begin{equation} \phi_i(t):=\frac{b_i(t)}{\sum_{j=1}^Nb_j(t)} \tag{2} \end{equation} The function $z(t)$ allows temporal modulation, so that our basis functions are given by: \begin{equation} \phi_i(z)=\phi_i(z(t)) \tag{3} \end{equation} We work in discrete time, so we define a time step d$t$. By default, we work with a time lapse of 1s, therefore, if $T$ steps are needed, d$t=1/T$. We then introduce $\mathbf{\Phi}_t:=(\phi_1(t),\ldots,\phi_N(t))\in R^{N\times 1}$, which includes the evaluation of all basis functions in a time step. The **BasisFuncGauss** function creates and evaluates $N$ basis Gaussian functions with a bandwidth $h$. Input $f$ refers to the linear modulation factor, so that the modulation is given by $z(t):=ft$. Then, the Gaussian functions $\phi_i(t)$ are evaluated as $\phi_i(ft)$. $dt$ represents the time step. The functions have modes at $c_i$, evenly spread along $[0,1/f]$. The output of this function is a $T\times N$array for which every row corresponds to the vector $\mathbf{\Phi}_t$ for every time step. <br/><br/> <font color='red'>In the code cell below, write instructions that will normalize the basis functions in $F$ in line 19 (use eq(2)).</font> ```python def BasisFuncGauss(N, h, f, dt): """ Evaluates Gaussian basis functions in [0,1/f] N = number of basis functions h = bandwidth dt = time step f = modulation factor """ tf = 1/f; T = int(round(tf/dt+1)) Phi = np.zeros((T,N)) for z in range(0,T): t = z*dt phi = np.zeros((1, N)) for k in range(1,N+1): c = (k-1)/(N-1) phi[0,k-1] = np.exp(-(f*t - c)*(f*t - c)/(2*h)) Phi[z,:N] = phi[0, :N] # -- INSERT YOUR CODE HERE -- return Phi #[TxN] ``` In a ProMP, at time step $t$, a joint variable $q$ is modeled as: \begin{equation} q_t = \mathbf{\Phi}_t^{\mathrm{T}}\boldsymbol w + \epsilon_q \tag{4} \end{equation} where $\epsilon_q$ adds zero-mean Gaussian observation noise with variance $\Sigma_q$. It follows that the probability of observing $q_t$ is represented by: \begin{equation} p(q_t|\boldsymbol w)=\mathcal{N}\left(q_t\,\big|\,\mathbf{\Phi}^{\mathrm{T}}_t\boldsymbol w, \mathbf{\Sigma}_q\right) \tag{5} \end{equation} Since $\Sigma_q$ is the same for every time step, the values $q_t$ are taken from independent and identical distributions, i.i.d. Hence, the probability of observing a trajectory $\tau:=\{q_1,\ldots,q_T\}$ is given by: \begin{equation} p(q_t|\boldsymbol w):=\prod_{t=1}^Tp(q_t|\boldsymbol w) \tag{6} \end{equation} However, since parameters $\boldsymbol w$ are to be learnt from data, we also assume such parameters are taken from a distribution $\boldsymbol w\sim p(\boldsymbol w|\theta)=\mathcal{N}(\boldsymbol w|\mu_{\boldsymbol w},\mathbf{\Sigma}_{\boldsymbol w})$. We therefore would like to have a predictive distribution of $q_t$ which does not depend on $\boldsymbol w$, but on $\theta:=(\mu_{\boldsymbol w},\mathbf{\Sigma}_{\boldsymbol w})$. This is done buy marginalizing $\boldsymbol w$ out in the distribution as follows: \begin{equation} p(q_t|\theta) = \int\mathcal{N}(q_t\,\big|\,\mathbf{\Phi}^{\mathrm{T}}_t\boldsymbol w,\,\Sigma_q)\mathcal{N}(\boldsymbol w\,\big|\,\mu_{\boldsymbol w},\,\Sigma_{\boldsymbol w})\mathrm{d}\boldsymbol w = \mathcal{N}(q_t\,\big|\,\mathbf{\Phi}^{\mathrm{T}}_t\boldsymbol w,\,\Sigma_q+\mathbf{\Phi}^{\mathrm{T}}_t\mathbf{\Sigma}_{\boldsymbol w}\mathbf{\Phi}_t) \tag{7} \end{equation} In the *ProMP* class, a *ProMP* object is initialized by the number of basis functions $N$, the bandwidth in the basis functions $h$, the time step d$t$, the covariance of the original distributions on $q$, *covQ* ($\Sigma_q$), the mean of the weights, $Wm$ ($\mu_{\boldsymbol w}$), and the covariance of the weights, *covW* ($\mathbf{\Sigma}_{\boldsymbol w}$). <br/><br/> When initialized, the class also defines other convenience variables. *Phi* is the $T\times N$ matrix containing the basis functions evaluated for every time step and with a default modulation factor equal to 1; $Qm$ ($\mu_{q,t}$) is the mean values of $q$ for every time step; and *cov* is the covariance of the marginal distribution on $q$ for every time step. <font color='red'>In the code cell below, write instructions that will calculate the covariance matrix $cov$ in line 13 (use eq(7)).</font> ```python def __init__(self, N, h, dt, covQ, Wm, covW): self.N = N self.h = h self.dt = dt self.covQ = covQ self.Wm = Wm #[Nx1] self.covW = covW #[NxN] self.Phi = BasisFuncGauss(N,h,1,dt) #[TxN] self.T,_ = self.Phi.shape self.Qm = np.matmul(self.Phi,self.Wm) #[Tx1] self.cov = np.zeros((self.T,1)) #[Tx1] for i in range(0,self.T): self.cov[i,0] = # -- write your code here -- ``` One of the big advantages of the ProMPs approach is the inclusion of new via-points. Say we wish to add a new via-point $q_{t^{*}}^{*}$ at time $t^{*}$ which is to be observed with an uncertainty of $\Sigma_{q^{*}}$. We simply have to obtain a posterior distribution over $\boldsymbol w$ by affecting our prior distribution with the new information given by $q_{t^{*}}^{*}$ and $\Sigma_{q^{*}}$. We thus apply Bayes theorem as follows: \begin{equation} p(\boldsymbol w|q_{t^{*}}^{*},\Sigma_{q^{*}})\propto\mathcal{N}\left(q_{t^{*}}^{*}\,\big|\,\mathbf{\Phi}^{\mathrm{T}}_{t^{*}}\boldsymbol w,\,\Sigma_{q^{*}}\right)p(\boldsymbol w) \tag{8} \end{equation} The result of applying the Bayes theorem leads to the following mean and variance of the posterior distribution: \begin{equation} \mu_{\boldsymbol w}^{[\mathrm{new}]} = \mu_{\boldsymbol w}+\mathbf{\Sigma}_{\boldsymbol w}\mathbf{\Phi}_{t^{*}}(\Sigma_q^{*}+\mathbf{\Phi}_{t^{*}}^{\mathrm{T}}\mathbf{\Sigma}_{\boldsymbol w}\mathbf{\Phi}_{t^{*}})^{-1}(q_{t^{*}}^{*}-\mathbf{\Phi}^{\mathrm{T}}_{t^{*}}\mu_{\boldsymbol w}) \tag{9} \end{equation} \begin{equation} \mathbf{\Sigma}_{\boldsymbol w}^{[\mathrm{new}]} = \mathbf{\Sigma}_{\boldsymbol w}-\mathbf{\Sigma}_{\boldsymbol w}\mathbf{\Phi}_{t^{*}}(\Sigma_q^{*}+\mathbf{\Phi}_{t^{*}}^{\mathrm{T}}\mathbf{\Sigma}_{\boldsymbol w}\mathbf{\Phi}_{t^{*}})^{-1}\mathbf{\Phi}_{t^{*}}^{\mathrm{T}}\mathbf{\Sigma}_{\boldsymbol w} \end{equation} The *condition* method conditions a ProMP to the new observation *Qstar* ($q^{*}_{t^{*}}$) at time *tstar* ($t^{*}$) with precision of observation *covQstar* ($\Sigma_{q^{*}}$). <font color='red'>Complete the below code cell to compute *Wm* and *covW* follwing Eq. (9). Use variable *Phit* ($\Phi_{t^{*}}$) which is defined inside the function..</font> ```python def condition(self, tstar, Qstar, covQstar): Phit = np.transpose(self.Phi[tstar-1:tstar,:]) self.Wm = # -- INSERT YOUR CODE HERE --- self.covW = # -- INSERT YOUR CODE HERE --- self.Qm = np.matmul(self.Phi,self.Wm) for i in range(0,self.T): self.cov[i,0] = self.covQ + np.matmul(np.array([self.Phi[i,:]]),np.matmul(self.covW,np.transpose(np.array([self.Phi[i,:]])))) ``` Temporal modulation is carried out by means of the previously defined phase function $z(t)$. Member function *modulate* allows temporal modulation of a ProMP. Only linear time modulation is supported by this class. Modulation is given by the *factor* ($f$) input variable which is originally set to 1 when a ProMP is initialized. The phase function is then given by $z(t)=ft$. Note that $dt$ is constant throughout the code, hence, temporal modulation modifies $T$. ```python def modulate(self, factor): self.Phi = BasisFuncGauss(self.N,self.h,factor,self.dt) #[TxN] self.T,_ = self.Phi.shape # new T self.Qm = np.matmul(self.Phi,self.Wm) #[Tx1] self.cov = np.zeros((self.T,1)) #[Tx1] for i in range(0,self.T): self.cov[i,0] = self.covQ + np.matmul(np.array([self.Phi[i,:]]),np.matmul(self.covW,np.transpose(np.array([self.Phi[i,:]])))) ``` The *printMP* method inside the *ProMP* class plots the mean *Qm* ($\mu_q$) against time. It also plots two standard deviations above and below $\mu_q$ in order to show the marginal distribution at every time step. The *name* value refers to the title of the plot. <font color='red'>To complete the code cell bellow, compute the $T\times 1$ arrays *upper* and *lower* which contain two standard deviations above and below the mean $\mu_q$, respectively, for every time step. Remember that we store the covariance at every time step in the member variable *cov*.</font> ```python def printMP(self, name): t = np.arange(0, self.T*self.dt, self.dt) upper = # -- INSERT YOUR CODE HERE --- lower = # -- INSERT YOUR CODE HERE --- plt.plot(t,self.Qm) plt.fill_between(t, upper[:,0], lower[:,0], color = 'k', alpha = 0.1) plt.title(name) plt.show() ``` We would like to be able to blend different MPs into a single movement. For example, given two ProMPs, each having an important via-point, we wish to blend them so that our new ProMP crosses both via-points. We can do this by activation and deactivation of the ProMPs. We define the activation functions $\alpha^{[1]}(t)$ and $\alpha^{[2]}(t)$, with values $\alpha_{t}^{[1]}, \alpha_{t}^{[2]}\in[0,1]$ at each time step $t=1,\ldots,T$. When the value of an activation function reaches 0, the corresponding ProMP is fully deactivated. Oppositely, when the value reaches 1, the ProMP is fully activated. With these tools, the result of blending two ProMPs with predictive distributions $p^{[i]}(q_i)=\mathcal{N}(q_t\,|\,\mu_{q,t}^{[i]},\Sigma_t^{[i]})$, $i=1,2$ yields to a new ProMP described by $p^{*}(q_t)=\mathcal{N}(q_t\,|\,\mu_{q,t}^{*},\Sigma_t^{*})$, where: \begin{equation} \Sigma_t^{*} = (\frac{\alpha_t^{[1]}}{\Sigma_t^{[1]}} +\frac{\alpha_t^{[2]}}{\Sigma_t^{[2]}})^{-1} \tag{10} \end{equation} \begin{equation} \mu_{q,t}^{*} = \Sigma_t^{*}(\frac{\alpha_t^{[1]}}{\Sigma_t^{[1]}}\mu_{q,t}^{[1]} + \frac{\alpha_t^{[2]}}{\Sigma_t^{[2]}}\mu_{q,t}^{[2]}) \end{equation} The **blend** function blends two **ProMPs** objects, **MP1** and **MP2**, according to the activation functions **alpha1** ($\alpha_t^{[1]}$) and **alpha2** ($\alpha_t^{[2]}$). Vectors **alpha1** and **alpha2** contain the values of the activation functions for every time step. It is assumed that both ProMPs have the same value for **T**, hence, **alpha1** and **alpha2** must be arrays of **T** elements. <font color='red'>Using Eq. (10), complete the code above by writing the expressions for $cov12$ and $Qm12$, which correspond to the covariance $\Sigma_t^{*}$ and mean $\mu_{q,t}^{*}$ of the resulting ProMP at every time step. In order to be compatible with the $ProMP$ class,$cov12$ and $Qm12$ must be arrays with shapes $(T,1)$.</font> ```python def blend(MP1,MP2,alpha1,alpha2): """ blends two MPs MP1, MP2 = ProMP objects to blend alpha1, alpha2 = activation functions for each respective MP [Tx1] """ a1 = np.transpose(np.array([alpha1])) #[Tx1] a2 = np.transpose(np.array([alpha2])) #[Tx1] cov12 = # -- INSERT YOUR CODE HERE --- Qm12 = # -- INSERT YOUR CODE HERE --- M12 = ProMP(MP1.N,MP1.h,MP1.dt,MP1.covQ,np.zeros((MP1.N,1)),np.zeros((MP1.N,MP1.N))) M12.cov = cov12 M12.Qm = Qm12 return M12 ``` Now that we defined the methods we can define ProMP class. Replace your written method in the corresponding section. ```python class ProMP: """ ProMP class N = number of basis functions h = bandwidth of basis functions dt = time step covQ = variance of original p(Q) Wm = mean of weights [Nx1] covW = variance of weights [NxN] internal: Phi = basis functions evaluated for every step [TxN] Qm = mean of Q [Nx1] cov = variance of p(Q|Wm) for every step [Tx1] methods: condition (conditions an MP for a new viapoint) tstar = step of viapoint Qstar = value of Q of the viapoint covTstar = uncertainty of observation modulate (linear time modulation of the MP) factor = factor of linear modulation, z = factor*t printMP (plots an MP showing a standar deviation above and below) name = title of the plot """ def __init__(self, N, h, dt, covQ, Wm, covW): self.N = N self.h = h self.dt = dt self.covQ = covQ self.Wm = Wm #[Nx1] self.covW = covW #[NxN] self.Phi = BasisFuncGauss(N,h,1,dt) #[TxN] self.T,_ = self.Phi.shape self.Qm = np.matmul(self.Phi,self.Wm) #[Tx1] self.cov = np.zeros((self.T,1)) #[Tx1] for i in range(0,self.T): self.cov[i,0] = # -- write your code here -- def condition(self, tstar, Qstar, covQstar): Phit = np.transpose(self.Phi[tstar-1:tstar,:]) self.Wm = # -- INSERT YOUR CODE HERE --- self.covW = # -- INSERT YOUR CODE HERE --- self.Qm = np.matmul(self.Phi,self.Wm) for i in range(0,self.T): self.cov[i,0] = self.covQ + np.matmul(np.array([self.Phi[i,:]]),np.matmul(self.covW,np.transpose(np.array([self.Phi[i,:]])))) def modulate(self, factor): self.Phi = BasisFuncGauss(self.N,self.h,factor,self.dt) #[TxN] self.T,_ = self.Phi.shape # new T self.Qm = np.matmul(self.Phi,self.Wm) #[Tx1] self.cov = np.zeros((self.T,1)) #[Tx1] for i in range(0,self.T): self.cov[i,0] = self.covQ + np.matmul(np.array([self.Phi[i,:]]),np.matmul(self.covW,np.transpose(np.array([self.Phi[i,:]])))) def printMP(self, name): t = np.arange(0, self.T*self.dt, self.dt) upper = # -- INSERT YOUR CODE HERE --- lower = # -- INSERT YOUR CODE HERE --- plt.plot(t,self.Qm) plt.fill_between(t, upper[:,0], lower[:,0], color = 'k', alpha = 0.1) plt.title(name) plt.show() ``` Now we will use the samples of weights $\boldsymbol w$ obtained in 5 observations. So we assume that the learning process has been completed. With this data, a ProMP for a single joint variable $q$ is created. We will carry out the following experiments: * Condition the ProMP to a first via-point * Condition the resulting ProMP from the previous experiment to a second via-point * Obtain two ProMPs after conditioning the original MP to each via-point, separately, and blend the two results * Time-modulate the original ProMP to different phases Array $Wsamples$ includes the values of the $N = 15$ weights after 5 observations. $\mu_{\boldsymbol w}$ is computed from these observations and stored in $Wmean$. Similarly, $\mathbf{\Sigma}_{\boldsymbol w}$ is saved in $Wcov$. Using these values, three identical objects of the class $ProMP$ are defined. ```python # 15 weights obtained from 5 observations Wsamples = np.array([[0.0141,0.0130,0.0038,0.0029,0.0143], [0.0044,0.2025,0.0178,0.0703,0.0143], [0.0388,0.1042,0.0531,0.0854,0.1479], [0.0025,0.0321,0.0235,0.0495,0.0086], [0.0810,0.0178,0.1500,0.0310,0.0843], [0.0658,0.1258,0.0488,0.1650,0.1398], [0.1059,0.0821,0.0116,0.2260,0.0531], [0.0032,0.0952,0.0305,0.2220,0.0025], [0.2031,0.1665,0.1430,0.0842,0.0656], [0.0491,0.1543,0.1232,0.1505,0.0049], [0.1914,0.0525,0.0783,0.0009,0.0292], [0.0584,0.1035,0.0830,0.0305,0.1452], [0.0157,0.1713,0.2550,0.0695,0.0051], [0.2106,0.0630,0.0942,0.0086,0.1512], [0.0959,0.2093,0.1388,0.0566,0.0819]]) Wmean = np.transpose([np.mean(Wsamples, axis=1)]) Wcov = np.cov(Wsamples) N,_ = Wsamples.shape T = 100 dt = 1/(T-1) # Define MPs MP1 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP2 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP3 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) ``` $MP2$ is conditioned to point $Qstar1$, at time step $T$, with an uncertainty of the observation equal to $covQstar1$. The desired via-point, the original mean and the new conditioned mean are then plotted. ```python # New desired point tstar1 = 100 Qstar1 = MP2.Qm[100-1]+ 0.03 covQstar1 = 1e-6 MP2.condition(tstar1,Qstar1,covQstar1) t = np.arange(0, 1+dt, dt) # Plot original mean, and mean of conditioned MP plt.figure() plt.plot(t,MP1.Qm, 'r--', t, MP2.Qm, 'k') plt.plot(tstar1/(MP1.T), Qstar1,'ro') plt.title('Coditioning for point 1') plt.show() # Print MP conditioned to point 1 MP2.printMP('Coditioning for point 1') ``` The resulting MP is now conditioned to a second via-point, $Qstar2$, at time step $tstar2$. The resulting ProMP must cross both points. ```python # Second desired point tstar2 = 30 Qstar2 = 0.11 covQstar2 = 1e-6 MP2.condition(tstar2,Qstar2,covQstar2) plt.figure() plt.plot(t,MP1.Qm, 'r--', t, MP2.Qm, 'k') plt.plot(tstar1/(MP1.T), Qstar1,'ro') plt.plot(tstar2/(MP1.T), Qstar2,'ro') plt.title('Coditioning for point 1 and point 2') plt.show() MP2.printMP('Coditioning for point 1 and point 2') ``` MPs $MP1$ and $MP2$ are conditioned to points 1 and 2, respectively. Then both are blended. The result of the blending must have both points as via-points. <font color='red'>To complete the code, come up with an expression for the $T-$element array ***alpha1*** which contains the values $\alpha_t^{[1]}$ of the activation function for **MP1**. Since tstar1 = 100 and tstar2 = 30, $\alpha_t^{[1]}$ must be equal (or close) to 1 at $t = 100$ but must be equal (or close) to 0 at $t = 30$, and there must be a smooth transition between these values. $\alpha_t^{[2]}$ is simply computed by $alpha2 = -alpha1+1$ in the following line of the code. For this, find suitable values for $A$, $B$ and $C$ in the following function: \begin{equation} \alpha^{[1]}(t):=A\mathrm{tanh}\left(B\left(\frac{t-C}{T}\right)\right)+A \end{equation} The figure below shows an example of such a function using $B=30$, but you can play with this value and see how it affects the result. </font> The program then plots the ProMPs with the following results: ```python # Blending: MP1 is conditioned to point 1, MP2 is conditioned to point 2, MP12 is the result of blending both MP1 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP2 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP1.condition(tstar1,Qstar1,covQstar1) MP2.condition(tstar2,Qstar2,covQstar2) MP1.printMP('MP1: Coditioning for point 1') MP2.printMP('MP2: Coditioning for point 2') # Activation functions for blending alpha1 = # -- INSERT YOUR CODE HERE -- alpha2 = -alpha1+1 MP12 = blend(MP1,MP2,alpha1,alpha2) MP12.printMP('blending of MP1 and MP2') ``` Given three initially identical MPs, MP1, MP2 and MP3, MP2 is modulated with a factor of 0.75, i.e. $z(t)=0.75t$, and MP3 with a factor 1.5, i.e. $z(t)=1.5t$. MP1 is left unmodified. ```python #time modulation MP1 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP2 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP3 = ProMP(N,0.02,dt,1e-6,Wmean,Wcov) MP2.modulate(0.75) MP3.modulate(1.5) t1 = np.arange(0, MP1.T*dt, dt) t2 = np.arange(0, MP2.T*dt, dt) t3 = np.arange(0, MP3.T*dt, dt) # Plot original mean, and mean of conditioned MP plt.figure() plt.plot(t1,MP1.Qm, 'k') plt.plot(t2,MP2.Qm, 'b') plt.plot(t3,MP3.Qm, 'g') plt.title('Time modulation') plt.show() MP1.printMP('modulation factor = 1') MP2.printMP('modulation factor = 0.75') MP3.printMP('modulation factor = 1.5') ```
525fd00d2405218e19e94a5b524d4c2c7bf7a999
30,156
ipynb
Jupyter Notebook
Week5/AR_w5_ProMP.ipynb
knazari/Advanced_Robotic_2022
17b21c917dd9d7522b867ca8ef4c21c142bb05e3
[ "Apache-2.0" ]
null
null
null
Week5/AR_w5_ProMP.ipynb
knazari/Advanced_Robotic_2022
17b21c917dd9d7522b867ca8ef4c21c142bb05e3
[ "Apache-2.0" ]
null
null
null
Week5/AR_w5_ProMP.ipynb
knazari/Advanced_Robotic_2022
17b21c917dd9d7522b867ca8ef4c21c142bb05e3
[ "Apache-2.0" ]
null
null
null
46.465331
892
0.51794
true
6,468
Qwen/Qwen-72B
1. YES 2. YES
0.863392
0.7773
0.671114
__label__eng_Latn
0.94611
0.397554
# SymPy `SymPy` is a package for symbolic calculations in python, similar to *Mathematica*. It works with expressions containing symbols. ```python from sympy import * init_printing() ``` Symbols are basic bricks used to construct expressions. Each symbol has a name used for printing expressions. Objects of the class `Symbol` should be created and assigned to python variables in order to be used in expressions. The symbol name and the name of the variable to which this symbol is assigned are two independent things, and one may write `abc=Symbol('xyz')`. But then one has to write `abc` in input expressions, while `SymPy` will write `xyz` in output ones, producing unnecessary confusion. The python variable name should better be the same as the symbol name. In languages specifically designed for symbolic calculations, such as *Mathematica*, if a variable to which nothing has been assigned is used, it automatically means a symbol with the same name. Python has not been designed for symbolic calculations. If you use a variable to which nothing has been assigned, you will get an error message. Symbol objects have to be created explicitly. ```python x=Symbol('x') ``` ```python a=x**2-1 a ``` Several symbols can be defined at once. The string is split at spaces. ```python y,z=symbols('y z') ``` Let's substitute $y+1$ for $x$. ```python a.subs(x,y+1) ``` ## Polynomials and rational functions `SymPy` does not expand brackets automatically. The function `expand` is used for this. ```python a=(x+y-z)**6 a ``` ```python a=expand(a) a ``` Degree of the polynomial $a$ in $x$. ```python degree(a,x) ``` Let's collect terms with same power of $x$ together. ```python collect(a,x) ``` Any polynomial with integer coefficients can be factorized into polynomials with integer coefficients (which cannot be factorized further). There exist efficient algorithms to do this. ```python a=factor(a) a ``` `SymPy` does not automatically cancel ratios of polynomials by their greatest common divisor. The function `cancel` is used for this. ```python a=(x**3-y**3)/(x**2-y**2) a ``` ```python cancel(a) ``` `SymPy` does not automatically bring sums of rational expressions to common denominator. The function `together` is used for this. ```python a=y/(x-y)+x/(x+y) a ``` ```python together(a) ``` The function `simplify` tries to rewrite an expression *in a simplest form*. This concept is not well defined (different forms may be considered simplest in different contexts), and there exists no algorithm for such simplification. The function `simplify` works heuristically, and it is not possible to guess in advance what simplifications it will try. It is very convenient in interactive sessions in order to check if it will succeed in rewriting an expression in some reasonably good form. But it is not desirable to use it in programs. There one should better use more specialized functions which perform well defined expression transformations. ```python simplify(a) ``` Partial fraction decomposition with respect to $x$. ```python apart(a,x) ``` Let's substitute some values for the symbils $x$ and $y$. ```python a=a.subs({x:1,y:2}) a ``` And how much is it numerically? ```python a.n() ``` ## Elementary functions `SymPy` automatically applies simplifications of elementary functions which are correct everywhere. ```python sin(-x) ``` ```python cos(pi/4),tan(5*pi/6) ``` `SymPy` can work with floating point numbers having arbitrarily high precision. Here is $\pi$ with 100 significant digits. ```python pi.n(200) ``` `E` is the base of natural logarithms. ```python log(1),log(E) ``` ```python exp(log(x)),log(exp(x)) ``` Why not $x$? Try $x=2\pi i$. ```python sqrt(0) ``` ```python sqrt(x)**4,sqrt(x**4) ``` Why not $x^2$? Try $x=i$. Symbols can have certain properties. E.g., they can be positive. Then `SymPy` can simplify square roots better. ```python p,q=symbols('p q',positive=True) sqrt(p**2) ``` ```python sqrt(12*x**2*y),sqrt(12*p**2*y) ``` Let the symbol $n$ be integer (`I` is the imaginary unit). ```python n=Symbol('n',integer=True) exp(2*pi*I*n) ``` The method `rewrite` tries to rewrite an expression in terms of a given function. ```python cos(x).rewrite(exp),exp(I*x).rewrite(cos) ``` ```python asin(x).rewrite(log) ``` The function `trigsimp` tries to rewrite a trigonometric expression *in a simplest form*. In programs it is better to use more specialized functions. ```python trigsimp(2*sin(x)**2+3*cos(x)**2) ``` The function `expand_trig` expands sines and cosines of sums and multiple angles. ```python expand_trig(sin(x-y)),expand_trig(sin(2*x)) ``` The inverse transformation, rewriting products and powers of sines and cosines into expressions linear in these functions, is needed more often. Suppose we work with a truncated Fourier series. ```python a1,a2,b1,b2=symbols('a1 a2 b1 b2') a=a1*cos(x)+a2*cos(2*x)+b1*sin(x)+b2*sin(2*x) a ``` We want to square it and get a truncated Fourier series again. ```python a=(a**2).rewrite(exp).expand().rewrite(cos).expand() a ``` ```python a.collect([cos(x),cos(2*x),cos(3*x),sin(x),sin(2*x),sin(3*x)]) ``` The function `expand_log` transforms logarithms of products and powers (of positive quantities) into sums of logarithms; `logcombine` performs the inverse transformation. ```python a=expand_log(log(p*q**2)) a ``` ```python logcombine(a) ``` The function `expand_power_exp` rewrites powers whose exponents are sums via products of powers. ```python expand_power_exp(x**(p+q)) ``` The function `expand_power_base` rewrites powers whose bases are products via products of powers. ```python expand_power_base((x*y)**n) ``` The function `powsimp` performs the inverse transformations. ```python powsimp(exp(x)*exp(2*y)),powsimp(x**n*y**n) ``` New symbolic functions can be introduced. They may have an arbitrary numbers of arguments. ```python f=Function('f') f(x)+f(x,y) ``` ## Expression structure Internally expressions are are trees. The function `srepr` returns a string representing this tree. ```python srepr(x+1) ``` "Add(Symbol('x'), Integer(1))" ```python srepr(x-1) ``` "Add(Symbol('x'), Integer(-1))" ```python srepr(x-y) ``` "Add(Symbol('x'), Mul(Integer(-1), Symbol('y')))" ```python srepr(2*x*y/3) ``` "Mul(Rational(2, 3), Symbol('x'), Symbol('y'))" ```python srepr(x/y) ``` "Mul(Symbol('x'), Pow(Symbol('y'), Integer(-1)))" One may use the functions `Add`, `Mul`, `Pow`, etc. instead of the binary operations `+`, `*`, `**`, etc. ```python Mul(x,Pow(y,-1)) ``` ```python srepr(f(x,y)) ``` "Function('f')(Symbol('x'), Symbol('y'))" The attribute `func` is the top-level function of an expression, and `args` is the list of its agruments. ```python a=2*x*y**2 a.func ``` sympy.core.mul.Mul ```python a.args ``` ```python for i in a.args: print(i) ``` 2 x y**2 The function `subs` substitutes an expression for a symbol. ```python a.subs(y,2) ``` It can perform substitutions for several symbols. To this end, one calls it with a list of tuples or a dictionary. ```python a.subs([(x,pi),(y,2)]) ``` ```python a.subs({x:pi,y:2}) ``` It can substitute not only for a symbol but also for a subexpression - a function with arguments. ```python a=f(x)+f(y) a.subs(f(y),1) ``` ```python (2*x*y*z).subs(x*y,z) ``` ```python (x+x**2+x**3+x**4).subs(x**2,y) ``` Substitutions are performed sequentially. In this case, first $x$ is replaced by $y$ producing $y^3+y^2$; then $y$ is replaced by $x$ in this result. ```python a=x**2+y**3 a.subs([(x,y),(y,x)]) ``` Interchanging these substitutions leads to a different result. ```python a.subs([(y,x),(x,y)]) ``` But if one calls `subs` with the keyword parameter `simultaneous=True`, all substitutions are preformed simultaneously. In this way one can, e.g., interchange $x$ and $y$. ```python a.subs([(x,y),(y,x)],simultaneous=True) ``` A function can be replaced by another function. ```python g=Function('g') a=f(x)+f(y) a.subs(f,g) ``` The method `replace` searches for subexpressions matching a pattern (with wildcards) and replaces them by a given expression. ```python a=Wild('a') (f(x)+f(x+y)).replace(f(a),a**2) ``` ```python (f(x,x)+f(x,y)).replace(f(a,a),a**2) ``` ```python a=x**2+y**2 a.replace(x,x+1) ``` Only a complete subtree can match a pattern, not a subset of factors in a product or a smaller power in a larger one. ```python a=2*x*y*z a.replace(x*y,z) ``` ```python (x+x**2+x**3+x**4).replace(x**2,y) ``` ## Solving equations ```python a,b,c,d,e,f=symbols('a b c d e f') ``` An equation is represented by the function `Eq` with two arguments. The function `solve` returns a list of solutions. ```python solve(Eq(a*x,b),x) ``` Instead of equations, one may pass just expressions to `solve`; they mean equations `<expression>=0`. ```python solve(a*x+b,x) ``` A square equation has 2 solutions. ```python solve(a*x**2+b*x+c,x) ``` A system of linear equations. ```python solve([a*x+b*y-e,c*x+d*y-f],[x,y]) ``` The function `roots` returns roots of a polynomial together with their multiplicities. ```python roots(x**3-3*x+2,x) ``` The function `solve_poly_system` solves systems of polynomial equations by constructing their Gröbner bases. ```python p1=x**2+y**2-1 p2=4*x*y-1 solve_poly_system([p1,p2],x,y) ``` ## Series ```python exp(x).series(x,0,5) ``` A series can start from a negative power. ```python cot(x).series(x,n=5) ``` And even run over half-integer powers. ```python sqrt(x*(1-x)).series(x,n=5) ``` ```python log(gamma(1+x)).series(x,n=6).rewrite(zeta) ``` Let's prepare 3 series. ```python sinx=series(sin(x),x,0,8) sinx ``` ```python cosx=series(cos(x),x,n=8) cosx ``` ```python tanx=series(tan(x),x,n=8) tanx ``` Products and ratios of series are not calculated automatically, the function `series` should be applied to them. ```python series(tanx*cosx,n=8) ``` ```python series(sinx/cosx,n=8) ``` And this series should be equal to 1. But since `sinx` and `cosx` are known only with a limited accuracy, we obtain 1 with the same accuracy. ```python series(sinx**2+cosx**2,n=8) ``` Here the leading terms have canceled, and the result can be obtained only with a lower accuracy. ```python series((1-cosx)/x**2,n=6) ``` Series can be differentiated and integrated. ```python diff(sinx,x) ``` ```python integrate(cosx,x) ``` A series (starting from a small term) can be substituted for an expansion variable in another series. Here are $\sin(\tan(x))$ and $\tan(\sin(x))$. ```python st=series(sinx.subs(x,tanx),n=8) st ``` ```python ts=series(tanx.subs(x,sinx),n=8) ts ``` ```python series(ts-st,n=8) ``` It is not possible to substitute a numerical value for the expansion variable in a series (and hence to plot it). To this end one has to remove the $\mathcal{O}$ term first, transforming a series into a polynomial. ```python sinx.removeO() ``` ## Derivatives ```python a=x*sin(x+y) diff(a,x) ``` ```python diff(a,y) ``` The second derivative in $x$ and the first one in $y$. ```python diff(a,x,2,y) ``` Expressions with undefined functions can be differentiated. ```python a=x*f(x**2) b=diff(a,x) b ``` What's this? ```python print(b) ``` 2*x**2*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (x**2,)) + f(x**2) The function `Derivative` represents an unevaluated derivative. It can be evaluated by the method `doit`. ```python a=Derivative(sin(x),x) Eq(a,a.doit()) ``` ## Integrals ```python integrate(1/(x*(x**2-2)**2),x) ``` ```python integrate(1/(exp(x)+1),x) ``` ```python integrate(log(x),x) ``` ```python integrate(x*sin(x),x) ``` ```python integrate(x*exp(-x**2),x) ``` ```python a=integrate(x**x,x) a ``` This is an unevaluated integral. ```python print(a) ``` Integral(x**x, x) ```python a=Integral(sin(x),x) Eq(a,a.doit()) ``` Definite integrals. ```python integrate(sin(x),(x,0,pi)) ``` `oo` means $\infty$. ```python integrate(exp(-x**2),(x,0,oo)) ``` ```python integrate(log(x)/(1-x),(x,0,1)) ``` ## Summing series ```python summation(1/n**2,(n,1,oo)) ``` ```python summation((-1)**n/n**2,(n,1,oo)) ``` ```python summation(1/n**4,(n,1,oo)) ``` An unevaluated sum is denoted `Sum`. ```python a=Sum(x**n/factorial(n),(n,0,oo)) Eq(a,a.doit()) ``` ## Limits ```python limit((tan(sin(x))-sin(tan(x)))/x**7,x,0) ``` This limit is easy: just expand the numerator and the denominator into series. Things become more difficult if $x=0$ is an essential singularity. Let's calculate one-sided limits. ```python limit((tan(sin(x))-sin(tan(x)))/(x**7+exp(-1/x)),x,0,'+') ``` ```python limit((tan(sin(x))-sin(tan(x)))/(x**7+exp(-1/x)),x,0,'-') ``` ## Differential equations ```python t=Symbol('t') x=Function('x') p=Function('p') ``` First order. ```python dsolve(diff(x(t),t)+x(t),x(t)) ``` Second order. ```python dsolve(diff(x(t),t,2)+x(t),x(t)) ``` A system of first-order equations. ```python dsolve((diff(x(t),t)-p(t),diff(p(t),t)+x(t))) ``` ## Linear algebra ```python a,b,c,d,e,f=symbols('a b c d e f') ``` A matrix can be constructed from a list of lists. ```python M=Matrix([[a,b,c],[d,e,f]]) M ``` ```python M.shape ``` A row matrix. ```python Matrix([[1,2,3]]) ``` A column matrix. ```python Matrix([1,2,3]) ``` A matrix can be constructed from a function. ```python def g(i,j): return Rational(1,i+j+1) Matrix(3,3,g) ``` Or from an undefined function. ```python g=Function('g') M=Matrix(3,3,g) M ``` ```python M[1,2] ``` ```python M[1,2]=0 M ``` ```python M[2,:] ``` ```python M[:,1] ``` ```python M[0:2,1:3] ``` A unit matrix. ```python eye(3) ``` A zero matrix. ```python zeros(3) ``` ```python zeros(2,3) ``` A diagonal matrix. ```python diag(1,2,3) ``` ```python M=Matrix([[a,1],[0,a]]) diag(1,M,2) ``` Operations with matrices. ```python A=Matrix([[a,b],[c,d]]) B=Matrix([[1,2],[3,4]]) A+B ``` ```python A*B,B*A ``` ```python A*B-B*A ``` ```python simplify(A**(-1)) ``` ```python det(A) ``` ### Eigenvalues and eigenvectors ```python x=Symbol('x',real=True) ``` ```python M=Matrix([[(1-x)**3*(3+x),4*x*(1-x**2),-2*(1-x**2)*(3-x)], [4*x*(1-x**2),-(1+x)**3*(3-x),2*(1-x**2)*(3+x)], [-2*(1-x**2)*(3-x),2*(1-x**2)*(3+x),16*x]]) M ``` ```python det(M) ``` This means that this matrix has a null space (this matrix transforms vectors from this subspace into 0). Let's find a basis of this subspace. ```python v=M.nullspace() len(v) ``` It is one-dimensional. ```python v=simplify(v[0]) v ``` Let's check. ```python simplify(M*v) ``` The eigenvalues and their multiplicities. ```python M.eigenvals() ``` If both eigenvalues and corresponding eigenvectors are needed, the method `eigenvects` is used. It returns a list of tuples. In each tuple the zeroth element is an eigenvalue, the first one is its multiplicity, and the last one is a list of corresponding basis eigenvectors (their number is the multiplicity). ```python v=M.eigenvects() len(v) ``` ```python for i in range(len(v)): v[i][2][0]=simplify(v[i][2][0]) v ``` Let's check. ```python for i in range(len(v)): z=M*v[i][2][0]-v[i][0]*v[i][2][0] pprint(simplify(z)) ``` ⎡0⎤ ⎢ ⎥ ⎢0⎥ ⎢ ⎥ ⎣0⎦ ⎡0⎤ ⎢ ⎥ ⎢0⎥ ⎢ ⎥ ⎣0⎦ ⎡0⎤ ⎢ ⎥ ⎢0⎥ ⎢ ⎥ ⎣0⎦ ### Jordan normal form ```python M=Matrix([[Rational(13,9),-Rational(2,9),Rational(1,3),Rational(4,9),Rational(2,3)], [-Rational(2,9),Rational(10,9),Rational(2,15),-Rational(2,9),-Rational(11,15)], [Rational(1,5),-Rational(2,5),Rational(41,25),-Rational(2,5),Rational(12,25)], [Rational(4,9),-Rational(2,9),Rational(14,15),Rational(13,9),-Rational(2,15)], [-Rational(4,15),Rational(8,15),Rational(12,25),Rational(8,15),Rational(34,25)]]) M ``` The method `M.jordan_form()` returns a couple of matrices, the transformation matrix $P$ and the Jordan form $J$: $M = P J P^{-1}$. ```python P,J=M.jordan_form() J ``` ```python P=simplify(P) P ``` Let's check. ```python Z=P*J*P**(-1)-M simplify(Z) ``` ## Plots `SymPy` uses `matplotlib`. However, it distributes $x$ points adaptively, not uniformly. ```python %matplotlib inline ``` A single function. ```python plot(sin(x)/x,(x,-10,10)) ``` Several functions. ```python plot(x,x**2,x**3,(x,0,2)) ``` Some additional plotting functions can be imported from `sympy.plotting`. ```python from sympy.plotting import (plot_parametric,plot_implicit, plot3d,plot3d_parametric_line, plot3d_parametric_surface) ``` A parametric plot - a Lissajous curve. ```python t=Symbol('t') plot_parametric(sin(2*t),cos(3*t),(t,0,2*pi), title='Lissajous',xlabel='x',ylabel='y') ``` An implicit plot - a circle. ```python plot_implicit(x**2+y**2-1,(x,-1,1),(y,-1,1)) ``` A surface. If it is not inline but in a separaye window, you can rotate it with your mouse. ```python plot3d(x*y,(x,-2,2),(y,-2,2)) ``` Several surfaces. ```python plot3d(x**2+y**2,x*y,(x,-2,2),(y,-2,2)) ``` A parametric space curve - a spiral. ```python a=0.1 plot3d_parametric_line(cos(t),sin(t),a*t,(t,0,4*pi)) ``` A parametric surface - a torus. ```python u,v=symbols('u v') a=0.3 plot3d_parametric_surface((1+a*cos(u))*cos(v), (1+a*cos(u))*sin(v),a*sin(u), (u,0,2*pi),(v,0,2*pi)) ```
183af3da10348f4e367d312987ef698f255c7c88
712,436
ipynb
Jupyter Notebook
Gallery/sympy.ipynb
RonSheely/jupyter-notebooks
8d9b37503e59675d392f9b7c3a5a71a1c73a5373
[ "MIT" ]
null
null
null
Gallery/sympy.ipynb
RonSheely/jupyter-notebooks
8d9b37503e59675d392f9b7c3a5a71a1c73a5373
[ "MIT" ]
null
null
null
Gallery/sympy.ipynb
RonSheely/jupyter-notebooks
8d9b37503e59675d392f9b7c3a5a71a1c73a5373
[ "MIT" ]
null
null
null
132.570897
114,478
0.863905
true
5,422
Qwen/Qwen-72B
1. YES 2. YES
0.921922
0.853913
0.787241
__label__eng_Latn
0.955014
0.667357
<a id='top'></a> # Complex vibration modes Complex vibration modes arise in experimental research and numerical simulations when non proportional damping is adopted. In such cases a state space formulation of the second order differential dynamic equilibrium equation is the preferred way to adress the problem. This notebook is inspired in one of [Pete Avitabile's Modal Space articles](http://macl.caeds.eng.uml.edu/umlspace/mspace.html), namely the one discussing the difference between [complex modes and real normal modes](http://sem.org/ArtDownLoad/msmj02.pdf). Additional information about state space formulation in structural dynamics can be found for example [here](http://dspace.mit.edu/bitstream/handle/1721.1/38777/35332523.pdf). ## Table of contents [Preamble](#Preamble) [Dynamic equilibrium equation](#Dynamic-equilibrium-equation) [State space formulation](#State-space-formulation) [Dynamic system setup](#Dynamic-system-setup) [Undamped system](#Undamped-system) [Proportionally damped system](#Proportionally-damped-system) [Non proportionally damped system](#Non-proportionally-damped-system) [Conclusions](#Conclusions) [Odds and ends](#Odds-and-ends) ## Preamble We will start by setting up the computational environment for this notebook. Furthermore, we will need numpy and scipy for the numerical simulations and matplotlib for the plots: ```python import sys import numpy as np import scipy as sp import matplotlib as mpl print('System: {}'.format(sys.version)) print('numpy version: {}'.format(np.__version__)) print('scipy version: {}'.format(sp.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) ``` System: 3.6.1 |Anaconda 4.4.0 (64-bit)| (default, May 11 2017, 13:25:24) [MSC v.1900 64 bit (AMD64)] numpy version: 1.12.1 scipy version: 0.19.0 matplotlib version: 2.0.2 We will also need a couple of specific modules and a litle "IPython magic" to show the plots: ```python from numpy import linalg as LA import matplotlib.pyplot as plt %matplotlib inline ``` [Back to top](#top) ## Dynamic equilibrium equation In structural dynamics the second order differential dynamic equilibrium equation can be written in terms of generalized coordinates (d[isplacement]) and their first (v[elocity]) and second (a[cceleration]) time derivatives: \begin{equation} \mathbf{M} \times \mathbf{a(t)} + \mathbf{C} \times \mathbf{v(t)} + \mathbf{K} \times \mathbf{d(t)} = \mathbf{F(t)} \end{equation} where: > $\mathbf{M}$ is the mass matrix > $\mathbf{C}$ is the damping matrix > $\mathbf{K}$ is the stiffness matrix > $\mathbf{a(t)}$ is the acceleration vector > $\mathbf{v(t)}$ is the velocity vector > $\mathbf{d(t)}$ is the displacement vector > $\mathbf{F(t)}$ is the force input vector All these matrices are of size $NDOF \times NDOF$, where $NDOF$ is the number of generalized degrees of freedom of the dynamic system. [Back to top](#top) ## State space formulation In a [state space formulation](http://en.wikipedia.org/wiki/State-space_representation) the second order differential dynamic equilibrium equation is turned into a system of first order differential dynamic equilibrium equations: \begin{equation} \begin{matrix} \mathbf{\dot{x}(t)} = \mathbf{A} \cdot \mathbf{x(t)} + \mathbf{B} \cdot \mathbf{u(t)} \\ \mathbf{y(t)} = \mathbf{C} \cdot \mathbf{x(t)} + \mathbf{D} \cdot \mathbf{u(t)} \end{matrix} \end{equation} where > $\mathbf{A}$ is the system matrix > $\mathbf{B}$ is the imput matrix > $\mathbf{C}$ is the output matrix > $\mathbf{D}$ is the feedthrough matrix > $\mathbf{x(t)}$ is the state vector > $\mathbf{y(t)}$ is the output vector > $\mathbf{u(t)}$ is the input vector The state vector, of size $2 \times NDOF$ by $1$, has the following form: \begin{equation} \mathbf{x(t)} = \left[ \begin{matrix} \mathbf{u(t)} \\ \mathbf{\dot{u}(t)} \end{matrix} \right] \end{equation} The system matrix, of size $2 \times NDOF$ by $2 \times NDOF$, is built using the M, C and K matrices: \begin{equation*} \mathbf{A} = \left[ \begin{matrix} \mathbf{0} & \mathbf{I} \\ -\mathbf{M}^{-1} \cdot \mathbf{K} & -\mathbf{M}^{-1} \cdot \mathbf{C} \end{matrix} \right] \end{equation*} The loading matrix, of size $2 \times NDOF$ by $1$, is composed of 0's and 1's. [Back to top](#top) ## Dynamic system setup In this example we will use the folowing mass and stiffness matrices: ```python MM = np.matrix(np.diag([2,3])) print(MM) ``` [[2 0] [0 3]] ```python KK = np.matrix([[2, -1],[-1, 1]]) print(KK) ``` [[ 2 -1] [-1 1]] Let us perform the eigenanalysis of the (undamped) second order differential dynamic equilibrium equation for later comparison of results: ```python W2, F1 = LA.eig(LA.solve(MM,KK)) # eigenanalysis ix = np.argsort(np.absolute(W2)) # sort eigenvalues in ascending order W2 = W2[ix] # sorted eigenvalues F1 = F1[:,ix] # sorted eigenvectors print(np.round_(W2, 4)) print(np.round_(F1, 4)) ``` [ 0.1396 1.1937] [[ 0.5025 0.9325] [ 0.8646 -0.3613]] The angular frequencies are computed as the square root of the eigenvalues: ```python print(np.sqrt(W2)) ``` [ 0.37365812 1.09257171] The modal vectors, the columns of the modal matrix, have unit norm: ```python print(LA.norm(F1, axis=0)) ``` [ 1. 1.] [Back to top](#top) ## Undamped system In the undamped system, the damping matrix is all zeros: ```python C0 = np.matrix(np.zeros_like(MM)) print(C0) ``` [[0 0] [0 0]] The system matrix is the following: ```python A = np.bmat([[np.zeros_like(MM), np.identity(MM.shape[0])], [LA.solve(-MM,KK), LA.solve(-MM,C0)]]) print(A) ``` [[ 0. 0. 1. 0. ] [ 0. 0. 0. 1. ] [-1. 0.5 0. 0. ] [ 0.33333333 -0.33333333 0. 0. ]] Performing the eigenanalysis on this matrix yields the following complex valued results: ```python w0, v0 = LA.eig(A) ix = np.argsort(np.absolute(w0)) w0 = w0[ix] v0 = v0[:,ix] print(np.round_(w0, 4)) print(np.round_(v0, 4)) ``` [ 0.+0.3737j 0.-0.3737j -0.+1.0926j -0.-1.0926j] [[ 0.4707+0.j 0.4707-0.j -0.0000-0.6296j -0.0000+0.6296j] [ 0.8099+0.j 0.8099-0.j 0.0000+0.2439j 0.0000-0.2439j] [-0.0000+0.1759j -0.0000-0.1759j 0.6878+0.j 0.6878-0.j ] [-0.0000+0.3026j -0.0000-0.3026j -0.2665-0.j -0.2665+0.j ]] As we can see, the eigenvalues come in complex conjugate pairs. Therefore we can take for instance only the ones in the [upper half-plane](http://en.wikipedia.org/wiki/Upper_half-plane): ```python print(np.round_(w0[[0,2]], 4)) ``` [ 0.+0.3737j -0.+1.0926j] In this case, since damping is zero, the real part of the complex eigenvalues is also zero (apart from [round-off errors](http://en.wikipedia.org/wiki/Round-off_error)) and the imaginary part is equal to the angular frequency computed previously for the dynamic system: ```python print(w0[[0,2]].imag) ``` [ 0.37365812 1.09257171] The columns of the modal matrix, the modal vectors, also come in conjugate pairs. Each vector has unit norm as in the dynamic system: ```python print(LA.norm(v0[:,[0,2]], axis=0)) ``` [ 1. 1.] Moreover, we can check that the modal matrix is composed of four blocks, each with $NDOF \times NDOF$ dimension. Some column reordering is necessary in order to match both modal matrices: ```python AA = v0[:2,[0,2]] AB = AA.conjugate() BA = np.multiply(AA,w0[[0,2]]) BB = BA.conjugate() v0_new = np.bmat([[AA, AB], [BA, BB]]) print(np.round_(v0_new[:,[0,2,1,3]], 4)) ``` [[ 0.4707+0.j 0.4707-0.j -0.0000-0.6296j -0.0000+0.6296j] [ 0.8099+0.j 0.8099-0.j 0.0000+0.2439j 0.0000-0.2439j] [-0.0000+0.1759j -0.0000-0.1759j 0.6878+0.j 0.6878-0.j ] [ 0.0000+0.3026j 0.0000-0.3026j -0.2665+0.j -0.2665-0.j ]] To help visualize the complex valued modal vectors we will do a polar plot of the corresponding amplitudes and angles: ```python fig, ax = plt.subplots(1, 2, subplot_kw=dict(polar=True)) for mode in range(2): ax[mode].set_title('Mode #{}'.format(mode+1)) for dof in range(2): r = np.array([0, np.absolute(v0[dof,2*mode])]) t = np.array([0, np.angle(v0[dof,2*mode])]) ax[mode].plot(t, r, 'o-', label='DOF #{}'.format(dof+1)) plt.legend(loc='lower left', bbox_to_anchor=(1., 0.)) plt.show() ``` [Back to top](#top) ## Proportionally damped system In a proportionally damped system, the damping matrix is proportional to the mass and stiffness matrices: \begin{equation*} \mathbf{C} = \alpha \times \mathbf{M} + \beta \times \mathbf{K} \end{equation*} Let us assume $\alpha$ to be 0.1 and $\beta$ to be 0.1. This yields the following damping matrix: ```python C1 = 0.1*MM+0.1*KK print(C1) ``` [[ 0.4 -0.1] [-0.1 0.4]] This damping matrix is orthogonal because the mass and stiffness matrices are also orthogonal: ```python print(np.round_(F1.T*C1*F1, 4)) ``` [[ 0.3131 0. ] [ 0. 0.4674]] The system matrix is the following: ```python A = np.bmat([[np.zeros_like(MM), np.identity(MM.shape[0])], [LA.solve(-MM,KK), LA.solve(-MM,C1)]]) print(A) ``` [[ 0. 0. 1. 0. ] [ 0. 0. 0. 1. ] [-1. 0.5 -0.2 0.05 ] [ 0.33333333 -0.33333333 0.03333333 -0.13333333]] The eigenanalysis yields the eigenvalues and eigenvectors: ```python w1, v1 = LA.eig(A) ix = np.argsort(np.absolute(w1)) w1 = w1[ix] v1 = v1[:,ix] print(np.round_(w1, 4)) print(np.round_(v1, 4)) ``` [-0.0570+0.3693j -0.0570-0.3693j -0.1097+1.0871j -0.1097-1.0871j] [[-0.4707+0.j -0.4707-0.j 0.0632+0.6264j 0.0632-0.6264j] [-0.8099+0.j -0.8099-0.j -0.0245-0.2427j -0.0245+0.2427j] [ 0.0268-0.1738j 0.0268+0.1738j -0.6878+0.j -0.6878-0.j ] [ 0.0461-0.2991j 0.0461+0.2991j 0.2665-0.j 0.2665+0.j ]] As we can see, the eigenvalues come in complex conjugate pairs. Let us take only the ones in the [upper half-plane](http://en.wikipedia.org/wiki/Upper_half-plane): ```python print(np.round_(w1[[0,2]], 4)) ``` [-0.0570+0.3693j -0.1097+1.0871j] These complex eigenvalues can be decomposed into angular frequency and damping coefficient: ```python zw = -w1.real # damping coefficient time angular frequency wD = w1.imag # damped angular frequency zn = 1./np.sqrt(1.+(wD/-zw)**2) # the minus sign is formally correct! wn = zw/zn # undamped angular frequency print('Angular frequency: {}'.format(wn[[0,2]])) print('Damping coefficient: {}'.format(zn[[0,2]])) ``` Angular frequency: [ 0.37365812 1.09257171] Damping coefficient: [ 0.15249507 0.10039217] The columns of the modal matrix, the modal vectors, also come in conjugate pairs, each vector having unit norm: ```python print(LA.norm(v1[:,[0,2]], axis=0)) ``` [ 1. 1.] Moreover, the modal matrix is composed of four blocks, each with $NDOF \times NDOF$ dimension. Some column reordering is necessary in order to match both modal matrices: ```python AA = v1[:2,[0,2]] AB = AA.conjugate() BA = np.multiply(AA,w1[[0,2]]) BB = BA.conjugate() v1_new = np.bmat([[AA, AB], [BA, BB]]) print(np.round_(v1_new[:,[0,2,1,3]], 4)) ``` [[-0.4707+0.j -0.4707-0.j 0.0632+0.6264j 0.0632-0.6264j] [-0.8099+0.j -0.8099-0.j -0.0245-0.2427j -0.0245+0.2427j] [ 0.0268-0.1738j 0.0268+0.1738j -0.6878+0.j -0.6878-0.j ] [ 0.0461-0.2991j 0.0461+0.2991j 0.2665-0.j 0.2665+0.j ]] We will visualize again the complex valued modal vectors with a polar plot of the corresponding amplitudes and angles: ```python fig, ax = plt.subplots(1, 2, subplot_kw=dict(polar=True)) for mode in range(2): ax[mode].set_title('Mode #{}'.format(mode+1)) for dof in range(2): r = np.array([0, np.absolute(v1[dof,2*mode])]) t = np.array([0, np.angle(v1[dof,2*mode])]) ax[mode].plot(t, r, 'o-', label='DOF #{}'.format(dof+1)) plt.legend(loc='lower left', bbox_to_anchor=(1., 0.)) plt.show() ``` [Back to top](#top) ## Non proportionally damped system In non proportionally damped systems the damping matrix is not proportional neither to the mass matrix nor the stiffness matrix. Let us consider the following damping matrix: ```python C2 = np.matrix([[0.4, -0.1],[-0.1, 0.1]]) print(C2) ``` [[ 0.4 -0.1] [-0.1 0.1]] Non proportinal damping carries the fact that the damping matrix is not orthogonal anymore: ```python print(np.round_(F1.T*C2*F1, 4)) ``` [[ 0.0889 0.0937] [ 0.0937 0.4282]] The system matrix is the following: ```python A = np.bmat([[np.zeros_like(MM), np.identity(MM.shape[0])], [LA.solve(-MM,KK), LA.solve(-MM,C2)]]) print(A) ``` [[ 0. 0. 1. 0. ] [ 0. 0. 0. 1. ] [-1. 0.5 -0.2 0.05 ] [ 0.33333333 -0.33333333 0.03333333 -0.03333333]] The eigenanalysis yields the eigenvalues and eigenvectors: ```python w2, v2 = LA.eig(A) ix = np.argsort(np.absolute(w2)) w2 = w2[ix] v2 = v2[:,ix] print(np.round_(w2, 4)) print(np.round_(v2, 4)) ``` [-0.0162+0.3736j -0.0162-0.3736j -0.1005+1.0872j -0.1005-1.0872j] [[ 0.4703-0.0167j 0.4703+0.0167j 0.0580+0.6276j 0.0580-0.6276j] [ 0.8099+0.j 0.8099-0.j -0.0473-0.2382j -0.0473+0.2382j] [-0.0014+0.1759j -0.0014-0.1759j -0.6881+0.j -0.6881-0.j ] [-0.0131+0.3026j -0.0131-0.3026j 0.2638-0.0275j 0.2638+0.0275j]] As we can see, the eigenvalues come in complex conjugate pairs. Again, let us take only the ones in the [upper half-plane](http://en.wikipedia.org/wiki/Upper_half-plane): ```python print(np.round_(w2[[0,2]], 4)) ``` [-0.0162+0.3736j -0.1005+1.0872j] These complex eigenvalues can be decomposed into angular frequency and damping coefficient much like in the propotional damping case: ```python zw = -w2.real # damping coefficient time angular frequency wD = w2.imag # damped angular frequency zn = 1./np.sqrt(1.+(wD/-zw)**2) # the minus sign is formally correct! wn = zw/zn # undamped angular frequency print('Angular frequency: {}'.format(wn[[0,2]])) print('Damping coefficient: {}'.format(zn[[0,2]])) ``` Angular frequency: [ 0.37392474 1.09179266] Damping coefficient: [ 0.04326203 0.09204121] Again, the columns of the modal matrix, the modal vectors, come in conjugate pairs, and each vector has unit norm: ```python print(LA.norm(v2[:,[0,2]], axis=0)) ``` [ 1. 1.] Moreover, the modal matrix is composed of four blocks, each with $NDOF \times NDOF$ dimension. Some column reordering is necessary in order to match both modal matrices: ```python AA = v2[:2,[0,2]] AB = AA.conjugate() BA = np.multiply(AA,w2[[0,2]]) BB = BA.conjugate() v2_new = np.bmat([[AA, AB], [BA, BB]]) print(np.round_(v2_new[:,[0,2,1,3]], 4)) ``` [[ 0.4703-0.0167j 0.4703+0.0167j 0.0580+0.6276j 0.0580-0.6276j] [ 0.8099+0.j 0.8099-0.j -0.0473-0.2382j -0.0473+0.2382j] [-0.0014+0.1759j -0.0014-0.1759j -0.6881-0.j -0.6881+0.j ] [-0.0131+0.3026j -0.0131-0.3026j 0.2638-0.0275j 0.2638+0.0275j]] Once more we will visualize the complex valued modal vectors through a polar plot of the corresponding amplitudes and angles: ```python fig, ax = plt.subplots(1, 2, subplot_kw=dict(polar=True)) for mode in range(2): ax[mode].set_title('Mode #{}'.format(mode+1)) for dof in range(2): r = np.array([0, np.absolute(v2[dof,2*mode])]) t = np.array([0, np.angle(v2[dof,2*mode])]) ax[mode].plot(t, r, 'o-', label='DOF #{}'.format(dof+1)) plt.legend(loc='lower left', bbox_to_anchor=(1., 0.)) plt.show() ``` [Back to top](#top) ## Conclusions Several conclusion can be drawn from this very simple example. First of all, damping changes the vibration frequencies and mode shapes. Furthermore, the polar plots show clearly that: 1. In the undamped system the vibration modes are fully "orthogonal"; 2. In the proportionally damped system the vibration modes are no longer "orthogonal"; 3. In the non proportionally damped system each DOF has a different phase angle. [Back to top](#top) ## Odds and ends This notebook was created by Paulo Xavier Candeias. [Back to top](#top)
5873ed68b54a069907da3f19238679802104a6ce
192,412
ipynb
Jupyter Notebook
complex_vibration_modes_SS.ipynb
pxcandeias/py-notebooks
1557bfeb61f301c075be88fa35936b9aa3964862
[ "MIT" ]
3
2016-11-12T10:35:42.000Z
2021-04-09T00:20:37.000Z
complex_vibration_modes_SS.ipynb
pxcandeias/py-notebooks
1557bfeb61f301c075be88fa35936b9aa3964862
[ "MIT" ]
null
null
null
complex_vibration_modes_SS.ipynb
pxcandeias/py-notebooks
1557bfeb61f301c075be88fa35936b9aa3964862
[ "MIT" ]
null
null
null
175.398359
55,000
0.890064
true
5,636
Qwen/Qwen-72B
1. YES 2. YES
0.884039
0.810479
0.716495
__label__eng_Latn
0.839009
0.50299
$\nabla ^2 \mathbf{E} + k^2 \mathbf{E} = 0$ $\nabla ^2 \mathbf{H} + k^2 \mathbf{H} = 0$ $ k = \sqrt{\omega ^2 \mu \varepsilon - i \omega \mu \sigma } $ $k_{ground} \simeq (1-i) \sqrt{ \frac{\omega \mu \sigma}{2} }$ $k_{air} \simeq \omega \sqrt{ \mu_0 \varepsilon_0}$ $\begin{split}\left(\begin{matrix} E_{x} \\ E_{y} \end{matrix} \right) = \left(\begin{matrix} \hat{Z}_{xx} & \hat{Z}_{xy} \\ \hat{Z}_{yx} & \hat{Z}_{yy} \end{matrix} \right) \left(\begin{matrix} H_x \\ H_y \end{matrix} \right)\end{split}$ $\begin{split} H_z = \left(\begin{matrix} T_{zx} & T_{zy} \end{matrix} \right) \left(\begin{matrix} H_x \\ H_y \end{matrix} \right)\end{split}$ $\delta = \sqrt{ \frac{2}{\omega \mu \sigma}} \simeq \frac{500}{\sqrt{\sigma f}}$ $ \phi_d = \sum \left(\frac{d^{obs}_i-d^{pred}_i}{\varepsilon} \right) ^2 $ $\phi = \phi_d +\beta \phi_m$ ```latex %%latex \begin{align} \curl \vf{E} + i \omega \vf{B} &= 0 \\ \curl \frac{1}{\mu} \vf{B} - \sigma \vf{E} &= \vf{s}_E \end{align} ``` \begin{align} \curl \vf{E} + i \omega \vf{B} &= 0 \\ \curl \frac{1}{\mu} \vf{B} - \sigma \vf{E} &= \vf{s}_E \end{align} ```python ```
d96da85269151a311e94904a95d49599115a9e09
3,065
ipynb
Jupyter Notebook
SciPy2016/MTwork/Poster/.ipynb_checkpoints/Equations-checkpoint.ipynb
simpeg/simpegExamples
38b8064fb854d809f72b7f1ca8b8096bca696af1
[ "MIT" ]
1
2021-08-07T13:46:54.000Z
2021-08-07T13:46:54.000Z
SciPy2016/MTwork/Poster/.ipynb_checkpoints/Equations-checkpoint.ipynb
simpeg/simpegExamples
38b8064fb854d809f72b7f1ca8b8096bca696af1
[ "MIT" ]
1
2016-07-27T22:20:36.000Z
2016-07-27T22:20:36.000Z
SciPy2016/MTwork/Poster/.ipynb_checkpoints/Equations-checkpoint.ipynb
simpeg/simpegExamples
38b8064fb854d809f72b7f1ca8b8096bca696af1
[ "MIT" ]
null
null
null
22.210145
268
0.460685
true
507
Qwen/Qwen-72B
1. YES 2. YES
0.92079
0.760651
0.700399
__label__kor_Hang
0.069015
0.465594
_Note: This Python notebook assumes that this is your first time to see/work on a Python code. It also assumes that you are new to recommendation systems, but familiar with it as a high-level concept._ # VANILLA COLLABORTIVE FILTERING _Prepared for EMBA 2022 by EF Legara_ --- Here, we use Python libraries to help us with some of our computations and data visualization. Think of them as a collection of pre-written formulae that we can utilize so we don't have to explicitly write the formulae ourselves. The following libraries are used. You can click on the link to find out more about them. - [pandas](https://pandas.pydata.org/) - Python data analysis library - [matplotlib](https://matplotlib.org/) - a plotting library for Python - [seaborn](https://seaborn.pydata.org/) - for statistical visualization - [numpy](https://numpy.org/) - for numerical computing in Python - [sklearn](https://scikit-learn.org/stable/) - ML in Python Below is the "code cell" that we run to load the libraries. ```python import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.metrics.pairwise import cosine_similarity import seaborn as sns %matplotlib inline ``` --- ## Loading the Data _This data was generated from the EMBA 2019 class._ `df` is the variable we use for our dataframe or table. You may use other variable names, of course. Here, we load the data stored in an MS in Excel file named `movie recom.xlsx` to a table that we call `df` (a dataframe). ```python df = pd.read_excel('./data/movie recom.xlsx') ``` Let's take a look at what's inside `df`. The `.head()` function just tells Python to print the first five rows of the dataframe. ```python df.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Name</th> <th>Matrix</th> <th>Inception</th> <th>Titanic</th> <th>Amélie</th> <th>Love Actually</th> <th>Terminator</th> <th>Elysium</th> <th>Avatar</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>Person 1</td> <td>5</td> <td>5</td> <td>4</td> <td>0</td> <td>0</td> <td>3</td> <td>0</td> <td>5</td> </tr> <tr> <th>1</th> <td>Person 2</td> <td>5</td> <td>5</td> <td>1</td> <td>4</td> <td>3</td> <td>4</td> <td>2</td> <td>4</td> </tr> <tr> <th>2</th> <td>Person 3</td> <td>0</td> <td>0</td> <td>3</td> <td>4</td> <td>5</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>Person 4</td> <td>4</td> <td>4</td> <td>4</td> <td>0</td> <td>3</td> <td>3</td> <td>2</td> <td>5</td> </tr> <tr> <th>4</th> <td>Person 5</td> <td>1</td> <td>0</td> <td>3</td> <td>0</td> <td>0</td> <td>3</td> <td>2</td> <td>4</td> </tr> </tbody> </table> </div> The table above currently has nine columns which include the **Name** column and the eight other columns for the movies. For ease of analysis, we can set the _index_ of each row in the table using the **Name** of clients or users. You can think of the index as the row number or row name. ```python df = df.set_index('Name') ``` Here's how are dataframe looks like. ```python df.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Matrix</th> <th>Inception</th> <th>Titanic</th> <th>Amélie</th> <th>Love Actually</th> <th>Terminator</th> <th>Elysium</th> <th>Avatar</th> </tr> <tr> <th>Name</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>Person 1</th> <td>5</td> <td>5</td> <td>4</td> <td>0</td> <td>0</td> <td>3</td> <td>0</td> <td>5</td> </tr> <tr> <th>Person 2</th> <td>5</td> <td>5</td> <td>1</td> <td>4</td> <td>3</td> <td>4</td> <td>2</td> <td>4</td> </tr> <tr> <th>Person 3</th> <td>0</td> <td>0</td> <td>3</td> <td>4</td> <td>5</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>Person 4</th> <td>4</td> <td>4</td> <td>4</td> <td>0</td> <td>3</td> <td>3</td> <td>2</td> <td>5</td> </tr> <tr> <th>Person 5</th> <td>1</td> <td>0</td> <td>3</td> <td>0</td> <td>0</td> <td>3</td> <td>2</td> <td>4</td> </tr> </tbody> </table> </div> As you can see above, **Name** is not a column (or variable) in the table anymore, but instead an _index_ (or a row name). The resulting dataframe now purely consists of values that are numerical; in this case, the ratings given by each person (row name or index) to the set of movies (columns). We can view the values of the pandas dataframe by running the code cell below. ```python df.values ``` array([[5, 5, 4, 0, 0, 3, 0, 5], [5, 5, 1, 4, 3, 4, 2, 4], [0, 0, 3, 4, 5, 0, 0, 0], [4, 4, 4, 0, 3, 3, 2, 5], [1, 0, 3, 0, 0, 3, 2, 4], [5, 5, 2, 3, 0, 3, 1, 4], [5, 5, 2, 0, 1, 3, 0, 5], [5, 5, 4, 3, 3, 4, 0, 3], [4, 5, 4, 0, 4, 4, 5, 4], [5, 5, 3, 0, 3, 4, 4, 5], [5, 5, 5, 5, 0, 2, 0, 3], [4, 5, 3, 2, 5, 3, 2, 1], [5, 5, 5, 4, 5, 3, 3, 4], [4, 5, 4, 0, 4, 4, 1, 4], [4, 5, 2, 0, 1, 3, 3, 5], [4, 3, 3, 0, 4, 3, 0, 4], [5, 5, 4, 5, 3, 2, 0, 4], [4, 0, 2, 0, 4, 4, 0, 3], [1, 3, 3, 0, 3, 3, 0, 5], [4, 0, 5, 0, 4, 4, 4, 5]]) These are essentially the numbers in our dataframe, but displayed as an $N$-by-$N$ array of numbers, where $N=20$ is the total number of users and where each `[ ... ]` lists the ratings a person has given to the movies he/she has seen. The order, of course, matters. The first element or number in the list `[ ... ]` corresponds to the movie *The Matrix*, while the last corresponds to *Avatar*. Let's then have a look at how Person 1 (`p1`) and Person 2 (`p2`) rated the movies. ```python p1 = list(df.loc['Person 1']) p2 = list(df.loc['Person 2']) print('p1 [The Matrix, Inception, ..., Elysium, Avatar]:', p1) print('p2: ', p2) ``` p1 [The Matrix, Inception, ..., Elysium, Avatar]: [5, 5, 4, 0, 0, 3, 0, 5] p2: [5, 5, 1, 4, 3, 4, 2, 4] Indeed, the values reflect the ratings in our raw data for persons 1 and 2. ```python df.head(n=2) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Matrix</th> <th>Inception</th> <th>Titanic</th> <th>Amélie</th> <th>Love Actually</th> <th>Terminator</th> <th>Elysium</th> <th>Avatar</th> </tr> <tr> <th>Name</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>Person 1</th> <td>5</td> <td>5</td> <td>4</td> <td>0</td> <td>0</td> <td>3</td> <td>0</td> <td>5</td> </tr> <tr> <th>Person 2</th> <td>5</td> <td>5</td> <td>1</td> <td>4</td> <td>3</td> <td>4</td> <td>2</td> <td>4</td> </tr> </tbody> </table> </div> --- ## Matrix Visualization Let's explore if we can also already identify some patterns by just visualizing the raw data of ratings. We can use seaborn's `heatmap` function to visualize our dataframe `df`. ```python # the choice of colormap cmap = sns.cm.rocket_r # indicate figure size plt.figure(figsize=(10,8)) # draw heatmap g = sns.heatmap(df.values); # set tick labes for both x & y axes g.set_xticklabels(df.columns, rotation=90); g.set_yticklabels(df.index, rotation=0); ``` That is actually still a bit tough to read. We cannot accurately tell yet if there are strong connections, correlations, and/or similarities between movies/users. --- ## Collaborative Filtering ### User-based CF Now, let's implement [**collaborative filtering**](https://en.wikipedia.org/wiki/Collaborative_filtering). With collaborative filtering, we can explore how users are similar to each other based on the ratings they gave movies using the [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) measure. If you can recall, \begin{equation} \cos (X,Y) = \frac{\sum_{i=1}^{n} X_i Y_i}{\sqrt {\sum_{i=1}^{n}X_i^2}\sqrt {\sum_{i=1}^{n}Y_i^2}}. \end{equation} That is, the similarity of Person $X$ and Person $Y$, in terms of their taste in movies, can be computed by the ratings they gave to the movies ($1$ to $n=8$) where movie 1 corresponds to _The Matrix_, movie 2 to _Inception_, movie 3 to _Titanic_, and so on and so forth. We define a function `compute_cosine_similarity( )` below that computes the cosine similarity given the above equation. ```python def compute_cosine_similarity (p1, p2): num_movies = len(p1) sumprod = 0 for i in range(num_movies): sumprod = sumprod + (p1[i] * p2[i]) numerator = sumprod denominator = np.sqrt(sum([r1*r1 for r1 in p1])) * np.sqrt(sum([r2*r2 for r2 in p2])) return numerator/denominator ``` Let's use the function above to check how similar (or not) _Person 1_ and _Person 2_ are. ```python p1 = list(df.loc['Person 1']) p2 = list(df.loc['Person 2']) print ('Person 1 and Person 2 are {:.2f}% similar.'.format(compute_cosine_similarity(p1, p2)*100)) ``` Person 1 and Person 2 are 81.26% similar. Check similarity between Person 4 and Person 3. ```python p1 = list(df.loc['Person 4']) p2 = list(df.loc['Person 3']) print ('Person 4 and Person 3 are {:.2f}% similar.'.format(compute_cosine_similarity(p1, p2)*100)) ``` Person 4 and Person 3 are 39.18% similar. #### Using a Python library As mentioned at the beginning, the good news is that we don't have to manually perform this pairwise computation of cosine similarity. We didn't even need to write the function ourselves. Python has a function via the `sklearn` library that computes the cosine similarity of the whole matrix for us. It's a one liner as you can see in the code cell below. ```python user_similarity_matrix = cosine_similarity(df.values) ``` The result is a 20 x 20 matrix where the numbers of rows and columns correspond to the number of users (in this case, 20) in the data. The element in each cell of the matrix gives us the cosine similarity between two persons. ```python user_similarity_matrix.size ``` 400 ```python user_similarity_matrix ``` array([[1. , 0.81262362, 0.16970563, 0.92338052, 0.73658951, 0.92219816, 0.97519805, 0.89077845, 0.81566396, 0.88548292, 0.85605599, 0.7362357 , 0.80833162, 0.90329585, 0.92219816, 0.87757241, 0.83984125, 0.70420284, 0.83820084, 0.72117107], [0.81262362, 1. , 0.45434411, 0.86281799, 0.60522753, 0.94150762, 0.87139535, 0.93221259, 0.85360419, 0.89586351, 0.84445278, 0.8916428 , 0.9258201 , 0.87188993, 0.88141139, 0.85104977, 0.92296269, 0.75009757, 0.76802458, 0.71684223], [0.16970563, 0.45434411, 1. , 0.39175718, 0.20380987, 0.26983141, 0.16489697, 0.52828266, 0.39691115, 0.30357866, 0.46563307, 0.61591788, 0.6466323 , 0.4395538 , 0.16489697, 0.47356802, 0.60676739, 0.47078588, 0.43105272, 0.46358632], [0.92338052, 0.86281799, 0.39175718, 1. , 0.80501129, 0.85915255, 0.92440465, 0.90409231, 0.95383309, 0.97272271, 0.78177899, 0.86175089, 0.9214786 , 0.9765879 , 0.94615534, 0.95960518, 0.85229309, 0.82758732, 0.92512561, 0.89365259], [0.73658951, 0.60522753, 0.20380987, 0.80501129, 1. , 0.64499491, 0.67894201, 0.62883731, 0.75838508, 0.77340406, 0.5724164 , 0.49813548, 0.66679486, 0.71543897, 0.76380977, 0.70262025, 0.57008771, 0.69707851, 0.79311554, 0.88484517], [0.92219816, 0.94150762, 0.26983141, 0.85915255, 0.64499491, 1. , 0.93258427, 0.92391739, 0.79952449, 0.87224365, 0.92736078, 0.79139995, 0.86548464, 0.84423998, 0.91011236, 0.80782688, 0.91925919, 0.65145034, 0.74040926, 0.65523412], [0.97519805, 0.87139535, 0.16489697, 0.92440465, 0.67894201, 0.93258427, 1. , 0.89345857, 0.82741488, 0.91016729, 0.80770132, 0.76941662, 0.81355557, 0.91630925, 0.94382022, 0.90574529, 0.84184789, 0.74645352, 0.84810515, 0.70487307], [0.89077845, 0.93221259, 0.52828266, 0.90409231, 0.62883731, 0.92391739, 0.89345857, 1. , 0.84846992, 0.87383999, 0.91906812, 0.92369422, 0.94629488, 0.93962636, 0.84269388, 0.9179821 , 0.96180895, 0.79714108, 0.82717961, 0.7445818 ], [0.81566396, 0.85360419, 0.39691115, 0.95383309, 0.75838508, 0.79952449, 0.82741488, 0.84846992, 1. , 0.98058068, 0.70130676, 0.90037213, 0.91662704, 0.93706146, 0.92967964, 0.88108325, 0.77662155, 0.76361125, 0.8353986 , 0.8871553 ], [0.88548292, 0.89586351, 0.30357866, 0.97272271, 0.77340406, 0.87224365, 0.91016729, 0.87383999, 0.98058068, 1. , 0.74043756, 0.87182912, 0.90556796, 0.94693149, 0.97653365, 0.90886009, 0.80833162, 0.7901857 , 0.85194275, 0.87121613], [0.85605599, 0.84445278, 0.46563307, 0.78177899, 0.5724164 , 0.92736078, 0.80770132, 0.91906812, 0.70130676, 0.74043756, 1. , 0.77063086, 0.86794777, 0.77665255, 0.75784322, 0.73865061, 0.95321997, 0.56610073, 0.66904135, 0.59912476], [0.7362357 , 0.8916428 , 0.61591788, 0.86175089, 0.49813548, 0.79139995, 0.76941662, 0.92369422, 0.90037213, 0.87182912, 0.77063086, 1. , 0.94826761, 0.91653063, 0.79139995, 0.87407914, 0.87087481, 0.75677794, 0.75065008, 0.73810763], [0.80833162, 0.9258201 , 0.6466323 , 0.9214786 , 0.66679486, 0.86548464, 0.81355557, 0.94629488, 0.91662704, 0.90556796, 0.86794777, 0.94826761, 1. , 0.91993984, 0.84817495, 0.89566859, 0.94660211, 0.77360839, 0.81919184, 0.83354383], [0.90329585, 0.87188993, 0.4395538 , 0.9765879 , 0.71543897, 0.84423998, 0.91630925, 0.93962636, 0.93706146, 0.94693149, 0.77665255, 0.91653063, 0.91993984, 1. , 0.90601364, 0.97574355, 0.86005887, 0.8456508 , 0.92515071, 0.83691715], [0.92219816, 0.88141139, 0.16489697, 0.94615534, 0.76380977, 0.91011236, 0.94382022, 0.84269388, 0.92967964, 0.97653365, 0.75784322, 0.79139995, 0.84817495, 0.90601364, 1. , 0.85678609, 0.79346582, 0.69216599, 0.83464317, 0.78429539], [0.87757241, 0.85104977, 0.47356802, 0.95960518, 0.70262025, 0.80782688, 0.90574529, 0.9179821 , 0.88108325, 0.90886009, 0.73865061, 0.87407914, 0.89566859, 0.97574355, 0.85678609, 1. , 0.85381497, 0.91663438, 0.92387682, 0.85436615], [0.83984125, 0.92296269, 0.60676739, 0.85229309, 0.57008771, 0.91925919, 0.84184789, 0.96180895, 0.77662155, 0.80833162, 0.95321997, 0.87087481, 0.94660211, 0.86005887, 0.79346582, 0.85381497, 1. , 0.70128687, 0.77676265, 0.68398557], [0.70420284, 0.75009757, 0.47078588, 0.82758732, 0.69707851, 0.65145034, 0.74645352, 0.79714108, 0.76361125, 0.7901857 , 0.56610073, 0.75677794, 0.77360839, 0.8456508 , 0.69216599, 0.91663438, 0.70128687, 1. , 0.7967743 , 0.87539793], [0.83820084, 0.76802458, 0.43105272, 0.92512561, 0.79311554, 0.74040926, 0.84810515, 0.82717961, 0.8353986 , 0.85194275, 0.66904135, 0.75065008, 0.81919184, 0.92515071, 0.83464317, 0.92387682, 0.77676265, 0.7967743 , 1. , 0.80883632], [0.72117107, 0.71684223, 0.46358632, 0.89365259, 0.88484517, 0.65523412, 0.70487307, 0.7445818 , 0.8871553 , 0.87121613, 0.59912476, 0.73810763, 0.83354383, 0.83691715, 0.78429539, 0.85436615, 0.68398557, 0.87539793, 0.80883632, 1. ]]) Okay. That's a bit tough to read. We can use a heatmap instead. We may view it as a dataframe ```python pd.DataFrame(user_similarity_matrix, index = df.index, columns = list(df.index)) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Person 1</th> <th>Person 2</th> <th>Person 3</th> <th>Person 4</th> <th>Person 5</th> <th>Person 6</th> <th>Person 7</th> <th>Person 8</th> <th>Person 9</th> <th>Person 10</th> <th>Person 11</th> <th>Person 12</th> <th>Person 13</th> <th>Person 14</th> <th>Person 15</th> <th>Person 16</th> <th>Person 17</th> <th>Person 18</th> <th>Person 19</th> <th>Person 20</th> </tr> <tr> <th>Name</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>Person 1</th> <td>1.000000</td> <td>0.812624</td> <td>0.169706</td> <td>0.923381</td> <td>0.736590</td> <td>0.922198</td> <td>0.975198</td> <td>0.890778</td> <td>0.815664</td> <td>0.885483</td> <td>0.856056</td> <td>0.736236</td> <td>0.808332</td> <td>0.903296</td> <td>0.922198</td> <td>0.877572</td> <td>0.839841</td> <td>0.704203</td> <td>0.838201</td> <td>0.721171</td> </tr> <tr> <th>Person 2</th> <td>0.812624</td> <td>1.000000</td> <td>0.454344</td> <td>0.862818</td> <td>0.605228</td> <td>0.941508</td> <td>0.871395</td> <td>0.932213</td> <td>0.853604</td> <td>0.895864</td> <td>0.844453</td> <td>0.891643</td> <td>0.925820</td> <td>0.871890</td> <td>0.881411</td> <td>0.851050</td> <td>0.922963</td> <td>0.750098</td> <td>0.768025</td> <td>0.716842</td> </tr> <tr> <th>Person 3</th> <td>0.169706</td> <td>0.454344</td> <td>1.000000</td> <td>0.391757</td> <td>0.203810</td> <td>0.269831</td> <td>0.164897</td> <td>0.528283</td> <td>0.396911</td> <td>0.303579</td> <td>0.465633</td> <td>0.615918</td> <td>0.646632</td> <td>0.439554</td> <td>0.164897</td> <td>0.473568</td> <td>0.606767</td> <td>0.470786</td> <td>0.431053</td> <td>0.463586</td> </tr> <tr> <th>Person 4</th> <td>0.923381</td> <td>0.862818</td> <td>0.391757</td> <td>1.000000</td> <td>0.805011</td> <td>0.859153</td> <td>0.924405</td> <td>0.904092</td> <td>0.953833</td> <td>0.972723</td> <td>0.781779</td> <td>0.861751</td> <td>0.921479</td> <td>0.976588</td> <td>0.946155</td> <td>0.959605</td> <td>0.852293</td> <td>0.827587</td> <td>0.925126</td> <td>0.893653</td> </tr> <tr> <th>Person 5</th> <td>0.736590</td> <td>0.605228</td> <td>0.203810</td> <td>0.805011</td> <td>1.000000</td> <td>0.644995</td> <td>0.678942</td> <td>0.628837</td> <td>0.758385</td> <td>0.773404</td> <td>0.572416</td> <td>0.498135</td> <td>0.666795</td> <td>0.715439</td> <td>0.763810</td> <td>0.702620</td> <td>0.570088</td> <td>0.697079</td> <td>0.793116</td> <td>0.884845</td> </tr> <tr> <th>Person 6</th> <td>0.922198</td> <td>0.941508</td> <td>0.269831</td> <td>0.859153</td> <td>0.644995</td> <td>1.000000</td> <td>0.932584</td> <td>0.923917</td> <td>0.799524</td> <td>0.872244</td> <td>0.927361</td> <td>0.791400</td> <td>0.865485</td> <td>0.844240</td> <td>0.910112</td> <td>0.807827</td> <td>0.919259</td> <td>0.651450</td> <td>0.740409</td> <td>0.655234</td> </tr> <tr> <th>Person 7</th> <td>0.975198</td> <td>0.871395</td> <td>0.164897</td> <td>0.924405</td> <td>0.678942</td> <td>0.932584</td> <td>1.000000</td> <td>0.893459</td> <td>0.827415</td> <td>0.910167</td> <td>0.807701</td> <td>0.769417</td> <td>0.813556</td> <td>0.916309</td> <td>0.943820</td> <td>0.905745</td> <td>0.841848</td> <td>0.746454</td> <td>0.848105</td> <td>0.704873</td> </tr> <tr> <th>Person 8</th> <td>0.890778</td> <td>0.932213</td> <td>0.528283</td> <td>0.904092</td> <td>0.628837</td> <td>0.923917</td> <td>0.893459</td> <td>1.000000</td> <td>0.848470</td> <td>0.873840</td> <td>0.919068</td> <td>0.923694</td> <td>0.946295</td> <td>0.939626</td> <td>0.842694</td> <td>0.917982</td> <td>0.961809</td> <td>0.797141</td> <td>0.827180</td> <td>0.744582</td> </tr> <tr> <th>Person 9</th> <td>0.815664</td> <td>0.853604</td> <td>0.396911</td> <td>0.953833</td> <td>0.758385</td> <td>0.799524</td> <td>0.827415</td> <td>0.848470</td> <td>1.000000</td> <td>0.980581</td> <td>0.701307</td> <td>0.900372</td> <td>0.916627</td> <td>0.937061</td> <td>0.929680</td> <td>0.881083</td> <td>0.776622</td> <td>0.763611</td> <td>0.835399</td> <td>0.887155</td> </tr> <tr> <th>Person 10</th> <td>0.885483</td> <td>0.895864</td> <td>0.303579</td> <td>0.972723</td> <td>0.773404</td> <td>0.872244</td> <td>0.910167</td> <td>0.873840</td> <td>0.980581</td> <td>1.000000</td> <td>0.740438</td> <td>0.871829</td> <td>0.905568</td> <td>0.946931</td> <td>0.976534</td> <td>0.908860</td> <td>0.808332</td> <td>0.790186</td> <td>0.851943</td> <td>0.871216</td> </tr> <tr> <th>Person 11</th> <td>0.856056</td> <td>0.844453</td> <td>0.465633</td> <td>0.781779</td> <td>0.572416</td> <td>0.927361</td> <td>0.807701</td> <td>0.919068</td> <td>0.701307</td> <td>0.740438</td> <td>1.000000</td> <td>0.770631</td> <td>0.867948</td> <td>0.776653</td> <td>0.757843</td> <td>0.738651</td> <td>0.953220</td> <td>0.566101</td> <td>0.669041</td> <td>0.599125</td> </tr> <tr> <th>Person 12</th> <td>0.736236</td> <td>0.891643</td> <td>0.615918</td> <td>0.861751</td> <td>0.498135</td> <td>0.791400</td> <td>0.769417</td> <td>0.923694</td> <td>0.900372</td> <td>0.871829</td> <td>0.770631</td> <td>1.000000</td> <td>0.948268</td> <td>0.916531</td> <td>0.791400</td> <td>0.874079</td> <td>0.870875</td> <td>0.756778</td> <td>0.750650</td> <td>0.738108</td> </tr> <tr> <th>Person 13</th> <td>0.808332</td> <td>0.925820</td> <td>0.646632</td> <td>0.921479</td> <td>0.666795</td> <td>0.865485</td> <td>0.813556</td> <td>0.946295</td> <td>0.916627</td> <td>0.905568</td> <td>0.867948</td> <td>0.948268</td> <td>1.000000</td> <td>0.919940</td> <td>0.848175</td> <td>0.895669</td> <td>0.946602</td> <td>0.773608</td> <td>0.819192</td> <td>0.833544</td> </tr> <tr> <th>Person 14</th> <td>0.903296</td> <td>0.871890</td> <td>0.439554</td> <td>0.976588</td> <td>0.715439</td> <td>0.844240</td> <td>0.916309</td> <td>0.939626</td> <td>0.937061</td> <td>0.946931</td> <td>0.776653</td> <td>0.916531</td> <td>0.919940</td> <td>1.000000</td> <td>0.906014</td> <td>0.975744</td> <td>0.860059</td> <td>0.845651</td> <td>0.925151</td> <td>0.836917</td> </tr> <tr> <th>Person 15</th> <td>0.922198</td> <td>0.881411</td> <td>0.164897</td> <td>0.946155</td> <td>0.763810</td> <td>0.910112</td> <td>0.943820</td> <td>0.842694</td> <td>0.929680</td> <td>0.976534</td> <td>0.757843</td> <td>0.791400</td> <td>0.848175</td> <td>0.906014</td> <td>1.000000</td> <td>0.856786</td> <td>0.793466</td> <td>0.692166</td> <td>0.834643</td> <td>0.784295</td> </tr> <tr> <th>Person 16</th> <td>0.877572</td> <td>0.851050</td> <td>0.473568</td> <td>0.959605</td> <td>0.702620</td> <td>0.807827</td> <td>0.905745</td> <td>0.917982</td> <td>0.881083</td> <td>0.908860</td> <td>0.738651</td> <td>0.874079</td> <td>0.895669</td> <td>0.975744</td> <td>0.856786</td> <td>1.000000</td> <td>0.853815</td> <td>0.916634</td> <td>0.923877</td> <td>0.854366</td> </tr> <tr> <th>Person 17</th> <td>0.839841</td> <td>0.922963</td> <td>0.606767</td> <td>0.852293</td> <td>0.570088</td> <td>0.919259</td> <td>0.841848</td> <td>0.961809</td> <td>0.776622</td> <td>0.808332</td> <td>0.953220</td> <td>0.870875</td> <td>0.946602</td> <td>0.860059</td> <td>0.793466</td> <td>0.853815</td> <td>1.000000</td> <td>0.701287</td> <td>0.776763</td> <td>0.683986</td> </tr> <tr> <th>Person 18</th> <td>0.704203</td> <td>0.750098</td> <td>0.470786</td> <td>0.827587</td> <td>0.697079</td> <td>0.651450</td> <td>0.746454</td> <td>0.797141</td> <td>0.763611</td> <td>0.790186</td> <td>0.566101</td> <td>0.756778</td> <td>0.773608</td> <td>0.845651</td> <td>0.692166</td> <td>0.916634</td> <td>0.701287</td> <td>1.000000</td> <td>0.796774</td> <td>0.875398</td> </tr> <tr> <th>Person 19</th> <td>0.838201</td> <td>0.768025</td> <td>0.431053</td> <td>0.925126</td> <td>0.793116</td> <td>0.740409</td> <td>0.848105</td> <td>0.827180</td> <td>0.835399</td> <td>0.851943</td> <td>0.669041</td> <td>0.750650</td> <td>0.819192</td> <td>0.925151</td> <td>0.834643</td> <td>0.923877</td> <td>0.776763</td> <td>0.796774</td> <td>1.000000</td> <td>0.808836</td> </tr> <tr> <th>Person 20</th> <td>0.721171</td> <td>0.716842</td> <td>0.463586</td> <td>0.893653</td> <td>0.884845</td> <td>0.655234</td> <td>0.704873</td> <td>0.744582</td> <td>0.887155</td> <td>0.871216</td> <td>0.599125</td> <td>0.738108</td> <td>0.833544</td> <td>0.836917</td> <td>0.784295</td> <td>0.854366</td> <td>0.683986</td> <td>0.875398</td> <td>0.808836</td> <td>1.000000</td> </tr> </tbody> </table> </div> From the dataframe above, we can see that Person 1 is 81.26% similar to Person 2 while Person 3 is only 39.18% similar to Person 4. We can display the dataframe above as a heatmap. The darker the cell color is the more similar two persons are. ```python plt.figure(figsize=(10,10)) g = sns.heatmap(user_similarity_matrix, cmap = cmap) g.set_xticklabels(df.index, rotation=90); g.set_yticklabels(df.index, rotation=0); ``` ### Item-based CF How about instead of user-based, we use item-based? The method/approach is exactly the same. We'll just transpose our original dataframe; i.e., our rows are now the movie names, while the columns are the individual users. ```python df_t = df.transpose() ``` ```python df_t ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th>Name</th> <th>Person 1</th> <th>Person 2</th> <th>Person 3</th> <th>Person 4</th> <th>Person 5</th> <th>Person 6</th> <th>Person 7</th> <th>Person 8</th> <th>Person 9</th> <th>Person 10</th> <th>Person 11</th> <th>Person 12</th> <th>Person 13</th> <th>Person 14</th> <th>Person 15</th> <th>Person 16</th> <th>Person 17</th> <th>Person 18</th> <th>Person 19</th> <th>Person 20</th> </tr> </thead> <tbody> <tr> <th>Matrix</th> <td>5</td> <td>5</td> <td>0</td> <td>4</td> <td>1</td> <td>5</td> <td>5</td> <td>5</td> <td>4</td> <td>5</td> <td>5</td> <td>4</td> <td>5</td> <td>4</td> <td>4</td> <td>4</td> <td>5</td> <td>4</td> <td>1</td> <td>4</td> </tr> <tr> <th>Inception</th> <td>5</td> <td>5</td> <td>0</td> <td>4</td> <td>0</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>5</td> <td>3</td> <td>5</td> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>Titanic</th> <td>4</td> <td>1</td> <td>3</td> <td>4</td> <td>3</td> <td>2</td> <td>2</td> <td>4</td> <td>4</td> <td>3</td> <td>5</td> <td>3</td> <td>5</td> <td>4</td> <td>2</td> <td>3</td> <td>4</td> <td>2</td> <td>3</td> <td>5</td> </tr> <tr> <th>Amélie</th> <td>0</td> <td>4</td> <td>4</td> <td>0</td> <td>0</td> <td>3</td> <td>0</td> <td>3</td> <td>0</td> <td>0</td> <td>5</td> <td>2</td> <td>4</td> <td>0</td> <td>0</td> <td>0</td> <td>5</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>Love Actually</th> <td>0</td> <td>3</td> <td>5</td> <td>3</td> <td>0</td> <td>0</td> <td>1</td> <td>3</td> <td>4</td> <td>3</td> <td>0</td> <td>5</td> <td>5</td> <td>4</td> <td>1</td> <td>4</td> <td>3</td> <td>4</td> <td>3</td> <td>4</td> </tr> <tr> <th>Terminator</th> <td>3</td> <td>4</td> <td>0</td> <td>3</td> <td>3</td> <td>3</td> <td>3</td> <td>4</td> <td>4</td> <td>4</td> <td>2</td> <td>3</td> <td>3</td> <td>4</td> <td>3</td> <td>3</td> <td>2</td> <td>4</td> <td>3</td> <td>4</td> </tr> <tr> <th>Elysium</th> <td>0</td> <td>2</td> <td>0</td> <td>2</td> <td>2</td> <td>1</td> <td>0</td> <td>0</td> <td>5</td> <td>4</td> <td>0</td> <td>2</td> <td>3</td> <td>1</td> <td>3</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>4</td> </tr> <tr> <th>Avatar</th> <td>5</td> <td>4</td> <td>0</td> <td>5</td> <td>4</td> <td>4</td> <td>5</td> <td>3</td> <td>4</td> <td>5</td> <td>3</td> <td>1</td> <td>4</td> <td>4</td> <td>5</td> <td>4</td> <td>4</td> <td>3</td> <td>5</td> <td>5</td> </tr> </tbody> </table> </div> ```python movie_similarity_matrix = cosine_similarity(df_t) plt.figure(figsize=(10,10)) g = sns.heatmap(movie_similarity_matrix, cmap = cmap, annot=True) g.set_xticklabels(df_t.index, rotation=90); g.set_yticklabels(df_t.index, rotation=0); ``` See if you can confirm the following statements: - Among the other movies in the list and according to the preferences of EMBA 2019 students, _Avatar_ is most similar to _Terminator_. - Among the other movies in the list and according to the preferences of EMBA 2019 students, _The Matrix_ is most similar to both _Inception_ and _Terminator_. --- ## Going back to the Users Can we maybe cluster the users instead based on the ratings they gave the eight (8) movies, at most? Definitely. And one of the most basic methods we can use is [k-means clustering](https://en.wikipedia.org/wiki/K-means_clustering). With $k$-means, we need to indicate the number of clusters $k$ we want our users to be grouped into. Let us once again use `sklearn` to implement k-means. ```python from sklearn.cluster import KMeans ``` ```python # setting k = 4 means that we're expecting 4 groupings in the EMBA 2019 cohort based # on their movie ratings. k = 4 kmeans = KMeans(n_clusters=k).fit(df.values) ``` ```python cluster_labels = kmeans.labels_ ``` Below, we print the groupings. ```python groupings = {} for c in range(k): groupings[c] = [] persons = list(df.index) i = 0 for i in range(len(persons)): groupings[cluster_labels[i]].append(persons[i]) for group, people in groupings.items(): print ('Group #{}: {}'.format(group, people)) ``` Group #0: ['Person 1', 'Person 4', 'Person 7', 'Person 9', 'Person 10', 'Person 14', 'Person 15', 'Person 16', 'Person 19'] Group #1: ['Person 2', 'Person 6', 'Person 8', 'Person 11', 'Person 12', 'Person 13', 'Person 17'] Group #2: ['Person 5', 'Person 18', 'Person 20'] Group #3: ['Person 3'] $k$-means is actually one of the most basic and (arguably) one of the more popular _unsupervised_ machine learning algorithms out there. ### Assigning _new_ users to existing groups. With a model generated using our data, we can now assign (or "predict") new users to the generated clusters. Say, for example, our new user has given our movies the following ratings: - The Matrix: 0 - Inception: 0 - Titanic: 3 - Amelie: 2 - Love Actually: 5 - Terminator: 5 - Elysium: 5 - Avatar: 3 ```python kmeans.predict([[0,0,3,2,5,5,5,3]]) ``` array([2], dtype=int32) What if we have another new user? - The Matrix: 5 - Inception: 5 - Titanic: 5 - Amelie: 2 - Love Actually: 5 - Terminator: 1 - Elysium: 1 - Avatar: 3 ```python kmeans.predict([[5,5,5,2,5,1,1,3]]) ``` array([1], dtype=int32) And another one? ```python kmeans.predict([[1,1,5,2,5,1,1,3]]) ``` array([3], dtype=int32) --- ## Summary and Some Notes - We showed how to load a MS Excel file into a dataframe. - We implemented collaborative filtering in Python: - user-user - item-item - We introduced (for demo) $k$-means clustering, an unsupervised ML model, to cluster users based on what they've watched and their taste in movies. - For the $k$-means clustering demo above, we explicitly chose the size of $k$. There are, of course, ways on how to systematically find the "right" number of clusters (e.g., using intertia plots), but they are beyond the scope of this intro to Python and Vanilla Collaborative Filtering method. - Once you learn more about the math/science behind $k$-means, you'll also realize that the membership of clusters may change for every run. This is because $k$-means starts by _randomly_ creating $k$ centroids, which are the points it uses to cluster the data points, which is based on their distance from the centroids.
797245e37b40c5282f962700b440627ea98fadb5
189,058
ipynb
Jupyter Notebook
Collaborative Filtering.ipynb
eflegara/Business-Analytics
15a56027925fbb310cb434fb9a866aaf08a8ac34
[ "MIT" ]
1
2021-04-25T13:46:14.000Z
2021-04-25T13:46:14.000Z
Collaborative Filtering.ipynb
eflegara/Business-Analytics
15a56027925fbb310cb434fb9a866aaf08a8ac34
[ "MIT" ]
null
null
null
Collaborative Filtering.ipynb
eflegara/Business-Analytics
15a56027925fbb310cb434fb9a866aaf08a8ac34
[ "MIT" ]
null
null
null
88.262372
54,624
0.748881
true
15,036
Qwen/Qwen-72B
1. YES 2. YES
0.760651
0.746139
0.567551
__label__eng_Latn
0.314381
0.156941
```python from sympy import Symbol, Matrix, symbols, sin, lambdify import time import numpy as np from collections import OrderedDict ``` ```python id_wind=False bo=False ``` R = Matrix([[1, 0,0],[0,np.cos(10*np.pi/180),-np.sin(10*np.pi/180)],[0,np.sin(10*np.pi/180), np.cos(10*np.pi/180)]]) c=Matrix([0,0,0]) v_B=Matrix([10,0,0]) v_W=Matrix([0,0,0]) Om=Matrix([0,0,0]) cro=Matrix([0,1,0]) GenDirectForceWing(Om, c, v_B, v_W, R, cro) ```python for used_logged_v_in_model in [True, False]: t0=time.time() vlog_i,vlog_j,vlog_k=symbols("vlog_i,vlog_j,vlog_k",real=True) v_log=Matrix([[vlog_i], [vlog_j], [vlog_k]]) vpred_i,vpred_j,vpred_k=symbols("vpred_i,vpred_j,vpred_k",real=True) v_pred=Matrix([[vpred_i], [vpred_j], [vpred_k]]) v_i,v_j,v_k=(vlog_i,vlog_j,vlog_k) if used_logged_v_in_model else (vpred_i,vpred_j,vpred_k) v_B=Matrix([[v_i], [v_j], [v_k]]) print("\nElapsed : %f s , Prev step time: -1 s \\ Generating first symbols ..."%(time.time()-t0)) dt=Symbol('dt',positive=True,real=True) CL_1_sa = Symbol('C^{sa}_{L,1}',real=True) # Coeff rechercher CD_0_sa = Symbol('C^{sa}_{D,0}',real=True) # Coeff rechercher CD_1_sa = Symbol('C^{sa}_{D,1}',real=True) # Coeff rechercher CD_0_fp = Symbol('C^{fp}_{D,0}',real=True) # Coeff rechercher CD_1_fp = Symbol('C^{fp}_{D,1}',real=True) # Coeff rechercher k_0 = Symbol('k_0', real=True) # coeff rechercher k_1 = Symbol('k_1', real=True) # coeff rechercher k_2 = Symbol('k_2', real=True) # coeff rechercher delta_s = Symbol('delta_s', real=True) # Coeff rechercher : largeur du stall alpha_s = Symbol('alpha_s',real=True) # Coeff rechercher B_B = Matrix([[1,0,0], [0,1,0], [0,0,1]]) # Base dans le repère body omega1, omega2, omega3 = symbols('\omega_1, \omega_2, \omega_3', real=True) Omega = Matrix([omega1, omega2, omega3]) # Vecteur de rotation r00, r01, r02, r10, r11, r12, r20, r21, r22 = symbols('r_{00}, r_{01}, r_{02}, r_{10}, r_{11}, r_{12}, r_{20}, r_{21}, r_{22}', real=True) R = Matrix([[r00,r01,r02], [r10,r11, r12], [r20, r21, r22]]) # Matrice de rotation # Vb1,Vb2,Vb3=symbols('V_{b1} V_{b2} V_{b3}',real=True) #v_B = Matrix([Vb1, Vb2, Vb3]) # Vitesse du corps (repère drone) Vw1,Vw2,Vw3=symbols('V_{w1} V_{w2} V_{w3}',real=True) # Vitesse du vent dans le repère NED v_W = Matrix([Vw1, Vw2, Vw3]) xcp, ycp, zcp = symbols('x_{cp}, y_{cp}, z_{cp}') X_cp = Matrix([xcp, ycp, zcp]) # Position du centre de poussé d'un corps dans le repère body C_t, C_q, C_h=symbols('C_t,C_q, C_h',real=True) # Coefficient de poussée des moteurs, coefficient de couple des moteurs motor_axis_in_body_frame = Matrix([1,0,0]) # Axe des moteurs, ici placé en mode avion omega_rotor = symbols('\omega_{rotor}', real=True) # Vitesse de rotation des moteurs crossward_B = B_B[:,1] c45, s45 = np.cos(45*np.pi/180), np.sin(45*np.pi/180) r = np.array(((1,0, 0),(0,c45,-s45),(0,s45, c45))) r_neg = np.array(((1,0, 0), (0,c45, s45),(0,-s45, c45))) R_list_sympy = [R, R, R * r, R *r_neg, R] # Liste des matrices de rotation de chaque surface portante du drone, seul les éléments de la queue (element 3 et 4) ne sont pas dans le repère inertiel. cp1x,cp1y, cp1z, cp2x,cp2y,cp2z, cp3x,cp3y,cp3z, cp4x,cp4y,cp4z,cp5x,cp5y,cp5z = symbols('cp1x,cp1y, cp1z, cp2x,cp2y,cp2z, cp3x,cp3y,cp3z, cp4x,cp4y,cp4z,cp5x,cp5y,cp5z', real=True) cp_list = [Matrix([cp1x,cp1y, cp1z]), Matrix([cp2x,cp2y,cp2z]), Matrix([cp3x,cp3y,cp3z]), Matrix([cp4x,cp4y,cp4z]), Matrix([cp5x,cp5y,cp5z])] A1, A2, A3 = symbols('A_1 A_2 A_3', real=True) Aire_list = [A1, A1, A2, A2, A3] # Liste des 1/2 * rho * S pour chaque surface cp_list_rotor = [Matrix([0.713,0.475,0]), Matrix([0.713,-0.475,0])] spinning_sense_list = [1,-1] ##### Listes des angles (d'attaque et de contrôles) pour faire la somme des forces en une seule équations alpha1, alpha2, alpha3, alpha4, alpha5 = symbols('alpha_1, alpha_2, alpha_3, alpha_4, alpha_5', real=True) alpha_list =Matrix([alpha1, alpha2, alpha3, alpha4, alpha5]) alpha0 = Symbol('alpha_0', real=True) alpha0_list = Matrix([alpha0 , alpha0 ,0, 0, alpha0]) delta0_1, delta0_2, delta0_3, delta0_4, delta0_5 = symbols('delta_0_1, delta_0_2, delta_0_3, delta_0_4, delta_0_5', real=True) delta0_list = Matrix([delta0_1, delta0_2, delta0_3, delta0_4, delta0_5]) t1=time.time() print("Elapsed : %f s , Prev step time: -1 s \\ Generating dynamics function ..."%(t1-t0)) def compute_alpha(dragDirection, liftDirection, frontward_Body, VelinLDPlane, R=None): if R is None : calpha= np.vdot(dragDirection, frontward_Body) else : calpha= np.vdot(dragDirection, R@frontward_Body) absalpha= -np.arccos(calpha) if R is None : signalpha = np.sign(np.vdot(liftDirection, frontward_Body)) else: signalpha = np.sign(np.vdot(liftDirection, R@frontward_Body)) if np.linalg.norm(VelinLDPlane)>1e-7 : alpha = signalpha*absalpha else : alpha=0 if abs(alpha)>0.5*np.pi: if alpha>0 :alpha=alpha-np.pi else: alpha=alpha+np.pi return alpha ##################################################### génération des équations pour Cd et Cl (utiliser pour générer les équations symbolique pour chaque surface portantes) #################################################### def compute_cl_cd(a, a_0, a_s, d_0, d_s, cl1sa, cd1fp, k0, k1, k2, cd0fp, cd0sa, cd1sa): if bo == True: CL_sa = 1/2 * cl1sa * (2*(a + (d_0))) CD_sa = cd1sa * (a + d_0)*(a + d_0) C_L = CL_sa + (d_0) C_D = CD_sa else: CL_sa = 1/2 * cl1sa * sin(2*(a + (k1*d_0) + a_0)) CD_sa = cd0sa + cd1sa * sin(a + (k0*d_0) + a_0)*sin(a + (k0*d_0) + a_0) CL_fp = 1/2 * cd1fp * sin(2*(a+ (k1*d_0) + a_0)) CD_fp = cd0fp + cd1fp * sin(a + (k0*d_0) + a_0)*sin(a + (k0*d_0) + a_0) puiss=5 x=((a+a_0)**2/a_s**2)**puiss s = 1.0 - x/(x + 100+200*d_s) C_L = CL_fp + s*(CL_sa - CL_fp) + k2 * sin(d_0) C_D = CD_fp + s*(CD_sa - CD_fp) return C_L, C_D def GenDirectForceWing(Omega, cp, vB, vW, R, crossward_Body): if bo==True: # Cette fonction permet d'obtenir les directions des efforts de portances et de trainé en fonction des vitesses, et de l'orientation dans le repère NED. Air_speed_earth = vB -vW VelinLDPlane = Air_speed_earth - Air_speed_earth.dot(R*crossward_Body) * R*crossward_Body dragDirection = -VelinLDPlane / VelinLDPlane.norm() liftDirection = -(R*crossward_Body).cross(dragDirection) else: Air_speed_earth = vB - vW Air_speed_body = (R.T* Air_speed_earth) - cp.cross(Omega) VelinLDPlane = Air_speed_body - Air_speed_body.dot(crossward_Body.T) * crossward_Body dragDirection = -VelinLDPlane / VelinLDPlane.norm() liftDirection = -crossward_Body.cross(dragDirection) return VelinLDPlane, dragDirection, liftDirection def GenForceWing(A, VelinLDPlane, dragDirection, liftDirection, Cd, Cl, cp): # Cette fonction permet de générer les forces aerodynamique d'une aile dans son repère. D = A * VelinLDPlane.norm()**2 * dragDirection * Cd L = A * VelinLDPlane.norm()**2 * liftDirection * Cl F_wing = L+D Torque_wing = cp.cross(F_wing) return F_wing, Torque_wing def Generate_Sum_Force_wing(A_list, Omega, cp_list, R_list, vB, vW, Cd_list, Cl_list, crossward_body, r_queue, r_queue_neg): # Cette function permet de généer l'équation complète de la somme des forces pour les différentes surfaces portantes p = 0 Sum_Force_Wing = Matrix([0,0,0]) Sum_Torque_Wing = Matrix([0,0,0]) for i in cp_list: VelinLDPlane, dragDirection, liftDirection= GenDirectForceWing(Omega, i, vB, vW, R_list[p], crossward_body) if bo==True: F_wing, Torque_wing = GenForceWing(A_list[p], VelinLDPlane, dragDirection, liftDirection, Cd_list[p], Cl_list[p], i) Sum_Force_Wing = Sum_Force_Wing + F_wing Sum_Torque_Wing = Sum_Torque_Wing + Torque_wing p+=1 else: if p == 2 : # Comme la fonction GenForceWing donne les efforts des ailes dans leur repère propre, on doit passer par les matrice de rotation pour les ailes de la queue F_wing, Torque_wing = GenForceWing(A_list[p], VelinLDPlane, dragDirection, liftDirection, Cd_list[p], Cl_list[p], Matrix([0,0,0])) Sum_Force_Wing = Sum_Force_Wing + r_queue.T @ F_wing Sum_Torque_Wing = Sum_Torque_Wing + i.cross(r_queue.T @ F_wing) elif p == 3 : F_wing, Torque_wing = GenForceWing(A_list[p], VelinLDPlane, dragDirection, liftDirection, Cd_list[p], Cl_list[p], Matrix([0,0,0])) Sum_Force_Wing = Sum_Force_Wing + r_queue_neg.T @ F_wing Sum_Torque_Wing = Sum_Torque_Wing + i.cross(r_queue_neg.T @ F_wing) else: F_wing, Torque_wing = GenForceWing(A_list[p], VelinLDPlane, dragDirection, liftDirection, Cd_list[p], Cl_list[p], i) Sum_Force_Wing = Sum_Force_Wing + F_wing Sum_Torque_Wing = Sum_Torque_Wing + Torque_wing p+=1 return Sum_Force_Wing, Sum_Torque_Wing def GenForceMoteur(Omega, ct, cq, omega_rotor, cp, vB, vW, ch, R, motor_axis_in_body_frame, spinning_sense): ## Cette fonction calcule les effort produit par un rotor sur le drone en fonction de son sens de rotation et de sa localisation, les efforts sont donnés ## dans le repère inertiel. l'axe des moteur est placé suivant l'axe x du drone (mode avion seulement) if bo==True: Air_speed_earth = vB - vW Axial_speed = Air_speed_earth.dot(R*motor_axis_in_body_frame) lat_speed = Air_speed_earth - (Axial_speed * (R*motor_axis_in_body_frame)) T = ct*omega_rotor**2 T_vec = T * R*motor_axis_in_body_frame torque_at_body_center = cp.cross(T_vec.T) else: Air_speed_earth = vB - vW air_speed_in_rotor_frame = (R.T* Air_speed_earth) - cp.cross(Omega) Axial_speed = air_speed_in_rotor_frame.dot(motor_axis_in_body_frame) lat_speed = air_speed_in_rotor_frame - (Axial_speed * (motor_axis_in_body_frame)) T = ct*omega_rotor**2 H = ch * omega_rotor T_vec = T * motor_axis_in_body_frame - H * lat_speed torque = - omega_rotor * cq * lat_speed torque = - spinning_sense * cq * T * motor_axis_in_body_frame torque_at_body_center = torque + cp.cross(T_vec.T) return T_vec, torque_at_body_center def Generate_Sum_Force_Moteur(Omega, ct, cq, omega_rotor, cp_list_rotor, vB, vW, ch, R, motor_axis_in_body_frame_list, spinning_sense_list): # Calcul des forces des moteurs sur le drone, génère toutes les forces, ainsi que le couple appliqué au centre de gravité du drone, dans le repère inertiel p = 0 Sum_Force_Rotor = Matrix([0,0,0]) Sum_Torque_Rotor = Matrix([0,0,0]) for cp in cp_list_rotor: F_rotor, Q_rotor = GenForceMoteur(Omega, ct, cq, omega_rotor, cp, vB, vW, ch, R, motor_axis_in_body_frame_list, spinning_sense_list[p]) Sum_Force_Rotor = Sum_Force_Rotor + F_rotor Sum_Torque_Rotor = Sum_Torque_Rotor + Q_rotor p+=1 return Sum_Force_Rotor, Sum_Torque_Rotor def Compute_list_coeff(alpha_list, alpha_0_list, alpha_s, delta_0_list, delta_s, CL_1_sa, CD_1_fp, k_0, k_1, k_2, CD_0_fp, CD_0_sa, CD_1_sa): Cd_list = Matrix([0 for i in range(len(alpha_list))]) Cl_list = Matrix([0 for i in range(len(alpha_list))]) for count, alpha in enumerate(alpha_list): Cl_list[count], Cd_list[count] = compute_cl_cd(alpha, alpha_0_list[count], alpha_s, delta_0_list[count], delta_s, CL_1_sa, CD_1_fp, k_0, k_1, k_2, CD_0_fp, CD_0_sa, CD_1_sa) return Cl_list , Cd_list "liftdrag forces" t2=time.time() print("Elapsed : %f s , Prev step time: %f s \\ Solving lifrdrag model ..."%(t2-t0,t2-t1)) # Ces équation servent uniquement pour le calcul de l'angle d'attaque pour le calcul des coeffficient aérodynamique Cd et Cl, il n'interviennent pas directement dans le calcul des efforts. VelinLDPlane, dragDirection, liftDirection= GenDirectForceWing(Omega, X_cp, v_B, v_W, R, crossward_B) Cl_list, Cd_list = Compute_list_coeff(alpha_list, alpha0_list, alpha_s, delta0_list, delta_s, CL_1_sa, CD_1_fp, k_0, k_1, k_2, CD_0_fp, CD_0_sa, CD_1_sa) Sum_F_wing_complete, Sum_T_wing_complete = Generate_Sum_Force_wing(Aire_list, Omega, cp_list, R_list_sympy, v_B, v_W, Cd_list, Cl_list, crossward_B, r, r_neg) t3=time.time() print("Elapsed : %f s , Prev step time: %f s \\ Solving rotor model ..."%(t3-t0,t3-t2)) ##################### Sommes des efforts des moteurs Sum_F_rotor_complete, Sum_T_rotor_complete = Generate_Sum_Force_Moteur(Omega, C_t, C_q, omega_rotor, cp_list_rotor, v_B, v_W, C_h, R, motor_axis_in_body_frame, spinning_sense_list) Sum_F_rotor_complete.simplify() Sum_T_rotor_complete.simplify() Effort_Aero_complete = [Sum_F_wing_complete + Sum_F_rotor_complete , Sum_T_wing_complete + Sum_T_rotor_complete] t35=time.time() print("Elapsed : %f s , Prev step time: %f s \\ Solving Dynamics ..."%(t35-t0,t35-t3)) theta=[] for i in (CD_0_sa, CD_0_fp, CD_1_sa, CL_1_sa, CD_1_fp, k_0, k_1, k_2, C_t): theta.append(i) if id_wind: theta.append(Vw1) theta.append(Vw2) #theta.append(Vw3) theta = Matrix([theta]) ########## Equation du gradient utilisé en simulation #################### VelinLDPlane_function = lambdify((Omega, X_cp, v_B, v_W, R), VelinLDPlane, 'numpy') dragDirection_function = lambdify((Omega, X_cp, v_B, v_W, R), dragDirection, 'numpy') liftDirection_function = lambdify((Omega, X_cp, v_B, v_W, R), liftDirection, 'numpy') # Effort_Aero_complete_function = lambdify((Aire_list, Omega, R, v_B, v_W, cp_list, alpha_list, alpha0, alpha_s, delta0_list, delta_s, \ # CL_1_sa, CD_1_fp, k_0, k_1, k_2, CD_0_fp, CD_0_sa, CD_1_sa, C_t, C_q, C_h, omega_rotor), Effort_Aero_complete, 'numpy') t37=time.time() print("Elapsed : %f s , Prev step time: %f s \\ Generating costs ..."%(t37-t0,t37-t35)) m=symbols('m', real=True) g1,g2,g3=symbols('g1,g2,g3', real=True) g=Matrix([g1,g2,g3]) #Génération des équations finales pour la gradient du csout et des RMS error if bo==True: forces = (Sum_F_wing_complete + Sum_F_rotor_complete) + m*g else: forces = R@(Sum_F_wing_complete + Sum_F_rotor_complete) + m*g # torque = R@(Sum_T_wing_complete + Sum_T_rotor_complete) new_acc = forces/m new_v = v_B + new_acc*dt alog_i,alog_j,alog_k=symbols("alog_i,alog_j,alog_k",real=True) alog=Matrix([[alog_i],[alog_j],[alog_k]]) err_a=Matrix(alog-new_acc) err_v=Matrix(v_log-new_v) # cost_scaler_a=symbols('C_sa',real=True,positive=True) # cost_scaler_v=symbols('C_sv',real=True,positive=True) # sqerr_a=Matrix([1.0/cost_scaler_a*(err_a[0,0]**2+err_a[1,0]**2+err_a[2,0]**2)]) # sqerr_v=Matrix([1.0/cost_scaler_v*(err_v[0,0]**2+err_v[1,0]**2+err_v[2,0]**2)]) sqerr_a=Matrix([err_a[0,0]**2+err_a[1,0]**2+err_a[2,0]**2]) sqerr_v=Matrix([err_v[0,0]**2+err_v[1,0]**2+err_v[2,0]**2]) t38=time.time() print("Elapsed : %f s , Prev step time: %f s \\ Generating Jacobian ..."%(t38-t0,t38-t37)) Ja=sqerr_a.jacobian(theta) Jv=sqerr_v.jacobian(theta) Y=Matrix([new_acc,new_v,sqerr_a,sqerr_v,Ja.T,Jv.T]) X =(alog,v_log,dt, Aire_list, Omega, R, v_pred, v_W, cp_list, alpha_list, alpha0, alpha_s, delta0_list, delta_s, \ CL_1_sa, CD_1_fp, k_0, k_1, k_2, CD_0_fp, CD_0_sa, CD_1_sa, C_t, C_q, C_h, omega_rotor, \ g, m) function_physical_model =[VelinLDPlane_function, \ dragDirection_function, \ liftDirection_function,\ compute_alpha] model_func=lambdify(X,Y, modules='numpy') import dill as dill dill.settings['recurse'] = True dill.dump([model_func, function_physical_model], open("./.Funcs/model_func_"+str(used_logged_v_in_model)+"simple_"+str(bo), "wb")) print("Elapsed : %f s , Prev step time: %f s \\ Finished ..."%(time.time()-t0,time.time()-t3)) ``` Elapsed : 0.000357 s , Prev step time: -1 s \ Generating first symbols ... Elapsed : 0.052193 s , Prev step time: -1 s \ Generating dynamics function ... Elapsed : 0.052314 s , Prev step time: 0.000121 s \ Solving lifrdrag model ... Elapsed : 1.550169 s , Prev step time: 1.497855 s \ Solving rotor model ... Elapsed : 2.237926 s , Prev step time: 0.687757 s \ Solving Dynamics ... Elapsed : 3.460129 s , Prev step time: 1.222203 s \ Generating costs ... Elapsed : 3.558026 s , Prev step time: 0.097897 s \ Generating Jacobian ... Elapsed : 282.040830 s , Prev step time: 280.490661 s \ Finished ... Elapsed : 0.000273 s , Prev step time: -1 s \ Generating first symbols ... Elapsed : 0.011859 s , Prev step time: -1 s \ Generating dynamics function ... Elapsed : 0.011914 s , Prev step time: 0.000055 s \ Solving lifrdrag model ... Elapsed : 0.907588 s , Prev step time: 0.895674 s \ Solving rotor model ... Elapsed : 1.219560 s , Prev step time: 0.311972 s \ Solving Dynamics ... Elapsed : 1.936768 s , Prev step time: 0.717208 s \ Generating costs ... Elapsed : 1.940360 s , Prev step time: 0.003592 s \ Generating Jacobian ... Elapsed : 285.224708 s , Prev step time: 284.317121 s \ Finished ... ```python ```
116f8cd4be700920a5c1a8826e7559e55831901f
23,256
ipynb
Jupyter Notebook
2_5_model_avion_generator.ipynb
altlnt/id_modele_reel
f67fdc66a207108b1fb6af0a7197bf590997cfbd
[ "MIT" ]
null
null
null
2_5_model_avion_generator.ipynb
altlnt/id_modele_reel
f67fdc66a207108b1fb6af0a7197bf590997cfbd
[ "MIT" ]
null
null
null
2_5_model_avion_generator.ipynb
altlnt/id_modele_reel
f67fdc66a207108b1fb6af0a7197bf590997cfbd
[ "MIT" ]
null
null
null
55.636364
237
0.537754
true
6,057
Qwen/Qwen-72B
1. YES 2. YES
0.879147
0.689306
0.606001
__label__kor_Hang
0.157907
0.246273
```python %matplotlib inline ``` Neural Transfer with PyTorch ============================ **Author**: `Alexis Jacq <https://alexis-jacq.github.io>`_ Introduction ------------ Welcome! This tutorial explains how to impletment the `Neural-Style <https://arxiv.org/abs/1508.06576>`__ algorithm developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge. Neural what? ~~~~~~~~~~~~ The Neural-Style, or Neural-Transfer, is an algorithm that takes as input a content-image (e.g. a tortle), a style-image (e.g. artistic waves) and return the content of the content-image as if it was 'painted' using the artistic style of the style-image: .. figure:: /_static/img/neural-style/neuralstyle.png :alt: content1 How does it work? ~~~~~~~~~~~~~~~~~ The principle is simple: we define two distances, one for the content ($D_C$) and one for the style ($D_S$). $D_C$ measures how different the content is between two images, while $D_S$ measures how different the style is between two images. Then, we take a third image, the input, (e.g. a with noise), and we transform it in order to both minimize its content-distance with the content-image and its style-distance with the style-image. OK. How does it work? ^^^^^^^^^^^^^^^^^^^^^ Well, going further requires some mathematics. Let $C_{nn}$ be a pre-trained deep convolutional neural network and $X$ be any image. $C_{nn}(X)$ is the network fed by $X$ (containing feature maps at all layers). Let $F_{XL} \in C_{nn}(X)$ be the feature maps at depth layer $L$, all vectorized and concatenated in one single vector. We simply define the content of $X$ at layer $L$ by $F_{XL}$. Then, if $Y$ is another image of same the size than $X$, we define the distance of content at layer $L$ as follow: \begin{align}D_C^L(X,Y) = \|F_{XL} - F_{YL}\|^2 = \sum_i (F_{XL}(i) - F_{YL}(i))^2\end{align} Where $F_{XL}(i)$ is the $i^{th}$ element of $F_{XL}$. The style is a bit less trivial to define. Let $F_{XL}^k$ with $k \leq K$ be the vectorized $k^{th}$ of the $K$ feature maps at layer $L$. The style $G_{XL}$ of $X$ at layer $L$ is defined by the Gram produce of all vectorized feature maps $F_{XL}^k$ with $k \leq K$. In other words, $G_{XL}$ is a $K$\ x\ $K$ matrix and the element $G_{XL}(k,l)$ at the $k^{th}$ line and $l^{th}$ column of $G_{XL}$ is the vectorial produce between $F_{XL}^k$ and $F_{XL}^l$ : \begin{align}G_{XL}(k,l) = \langle F_{XL}^k, F_{XL}^l\rangle = \sum_i F_{XL}^k(i) . F_{XL}^l(i)\end{align} Where $F_{XL}^k(i)$ is the $i^{th}$ element of $F_{XL}^k$. We can see $G_{XL}(k,l)$ as a measure of the correlation between feature maps $k$ and $l$. In that way, $G_{XL}$ represents the correlation matrix of feature maps of $X$ at layer $L$. Note that the size of $G_{XL}$ only depends on the number of feature maps, not on the size of $X$. Then, if $Y$ is another image *of any size*, we define the distance of style at layer $L$ as follow: \begin{align}D_S^L(X,Y) = \|G_{XL} - G_{YL}\|^2 = \sum_{k,l} (G_{XL}(k,l) - G_{YL}(k,l))^2\end{align} In order to minimize in one shot $D_C(X,C)$ between a variable image $X$ and target content-image $C$ and $D_S(X,S)$ between $X$ and target style-image $S$, both computed at several layers , we compute and sum the gradients (derivative with respect to $X$) of each distance at each wanted layer: \begin{align}\nabla_{ extit{total}}(X,S,C) = \sum_{L_C} w_{CL_C}.\nabla_{ extit{content}}^{L_C}(X,C) + \sum_{L_S} w_{SL_S}.\nabla_{ extit{style}}^{L_S}(X,S)\end{align} Where $L_C$ and $L_S$ are respectivement the wanted layers (arbitrary stated) of content and style and $w_{CL_C}$ and $w_{SL_S}$ the weights (arbitrary stated) associated with the style or the content at each wanted layer. Then, we run a gradient descent over $X$: \begin{align}X \leftarrow X - \alpha \nabla_{ extit{total}}(X,S,C)\end{align} Ok. That's enough with maths. If you want to go deeper (how to compute the gradients) **we encourage you to read the original paper** by Leon A. Gatys and AL, where everything is much better and much clearer explained. For our implementation in PyTorch, we already have everything we need: indeed, with PyTorch, all the gradients are automatically and dynamically computed for you (while you use functions from the library). This is why the implementation of this algorithm becomes very comfortable with PyTorch. PyTorch implementation ---------------------- If you are not sure to understand all the mathematics above, you will probably get it by implementing it. If you are discovering PyTorch, we recommend you to first read this :doc:`Introduction to PyTorch </beginner/deep_learning_60min_blitz>`. Packages ~~~~~~~~ We will have recourse to the following packages: - ``torch``, ``torch.nn``, ``numpy`` (indispensables packages for neural networks with PyTorch) - ``torch.autograd.Variable`` (dynamic computation of the gradient wrt a variable) - ``torch.optim`` (efficient gradient descents) - ``PIL``, ``PIL.Image``, ``matplotlib.pyplot`` (load and display images) - ``torchvision.transforms`` (treat PIL images and transform into torch tensors) - ``torchvision.models`` (train or load pre-trained models) - ``copy`` (to deep copy the models; system package) ```python from __future__ import print_function import torch import torch.nn as nn from torch.autograd import Variable import torch.optim as optim from PIL import Image import matplotlib.pyplot as plt import torchvision.transforms as transforms import torchvision.models as models import copy ``` Cuda ~~~~ If you have a GPU on your computer, it is preferable to run the algorithm on it, especially if you want to try larger networks (like VGG). For this, we have ``torch.cuda.is_available()`` that returns ``True`` if you computer has an available GPU. Then, we can use method ``.cuda()`` that moves allocated proccesses associated with a module from the CPU to the GPU. When we want to move back this module to the CPU (e.g. to use numpy), we use the ``.cpu()`` method. Finally, ``.type(dtype)`` will be use to convert a ``torch.FloatTensor`` into ``torch.cuda.FloatTensor`` to feed GPU processes. ```python use_cuda = torch.cuda.is_available() dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor ``` Load images ~~~~~~~~~~~ In order to simplify the implementation, let's start by importing a style and a content image of the same dimentions. We then scale them to the desired output image size (128 or 512 in the example, depending on gpu availablity) and transform them into torch tensors, ready to feed a neural network: .. Note:: Here are links to download the images required to run the tutorial: `picasso.jpg <http://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg>`__ and `dancing.jpg <http://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg>`__. Download these two images and add them to a directory with name ``images`` ```python # desired size of the output image imsize = 512 if use_cuda else 128 # use small size if no gpu loader = transforms.Compose([ transforms.Scale(imsize), # scale imported image transforms.ToTensor()]) # transform it into a torch tensor def image_loader(image_name): image = Image.open(image_name) image = Variable(loader(image)) # fake batch dimension required to fit network's input dimensions image = image.unsqueeze(0) return image style_img = image_loader("images/picasso.jpg").type(dtype) content_img = image_loader("images/dancing.jpg").type(dtype) assert style_img.size() == content_img.size(), \ "we need to import style and content images of the same size" ``` Imported PIL images has values between 0 and 255. Transformed into torch tensors, their values are between 0 and 1. This is an important detail: neural networks from torch library are trained with 0-1 tensor image. If you try to feed the networks with 0-255 tensor images the activated feature maps will have no sense. This is not the case with pre-trained networks from the Caffe library: they are trained with 0-255 tensor images. Display images ~~~~~~~~~~~~~~ We will use ``plt.imshow`` to display images. So we need to first reconvert them into PIL images: ```python unloader = transforms.ToPILImage() # reconvert into PIL image plt.ion() def imshow(tensor, title=None): image = tensor.clone().cpu() # we clone the tensor to not do changes on it image = image.view(3, imsize, imsize) # remove the fake batch dimension image = unloader(image) plt.imshow(image) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated plt.figure() imshow(style_img.data, title='Style Image') plt.figure() imshow(content_img.data, title='Content Image') ``` Content loss ~~~~~~~~~~~~ The content loss is a function that takes as input the feature maps $F_{XL}$ at a layer $L$ in a network fed by $X$ and return the weigthed content distance $w_{CL}.D_C^L(X,C)$ between this image and the content image. Hence, the weight $w_{CL}$ and the target content $F_{CL}$ are parameters of the function. We implement this function as a torch module with a constructor that takes these parameters as input. The distance $\|F_{XL} - F_{YL}\|^2$ is the Mean Square Error between the two sets of feature maps, that can be computed using a criterion ``nn.MSELoss`` stated as a third parameter. We will add our content losses at each desired layer as additive modules of the neural network. That way, each time we will feed the network with an input image $X$, all the content losses will be computed at the desired layers and, thanks to autograd, all the gradients will be computed. For that, we just need to make the ``forward`` method of our module returning the input: the module becomes a ''transparent layer'' of the neural network. The computed loss is saved as a parameter of the module. Finally, we define a fake ``backward`` method, that just call the backward method of ``nn.MSELoss`` in order to reconstruct the gradient. This method returns the computed loss: this will be useful when running the gradient descent in order to display the evolution of style and content losses. ```python class ContentLoss(nn.Module): def __init__(self, target, weight): super(ContentLoss, self).__init__() # we 'detach' the target content from the tree used self.target = target.detach() * weight # to dynamically compute the gradient: this is a stated value, # not a variable. Otherwise the forward method of the criterion # will throw an error. self.weight = weight self.criterion = nn.MSELoss() def forward(self, input): self.loss = self.criterion(input * self.weight, self.target) self.output = input return self.output def backward(self, retain_graph=True): self.loss.backward(retain_graph=retain_graph) return self.loss ``` .. Note:: **Important detail**: this module, although it is named ``ContentLoss``, is not a true PyTorch Loss function. If you want to define your content loss as a PyTorch Loss, you have to create a PyTorch autograd Function and to recompute/implement the gradient by the hand in the ``backward`` method. Style loss ~~~~~~~~~~ For the style loss, we need first to define a module that compute the gram produce $G_{XL}$ given the feature maps $F_{XL}$ of the neural network fed by $X$, at layer $L$. Let $\hat{F}_{XL}$ be the re-shaped version of $F_{XL}$ into a $K$\ x\ $N$ matrix, where $K$ is the number of feature maps at layer $L$ and $N$ the lenght of any vectorized feature map $F_{XL}^k$. The $k^{th}$ line of $\hat{F}_{XL}$ is $F_{XL}^k$. We let you check that $\hat{F}_{XL} \cdot \hat{F}_{XL}^T = G_{XL}$. Given that, it becomes easy to implement our module: ```python class GramMatrix(nn.Module): def forward(self, input): a, b, c, d = input.size() # a=batch size(=1) # b=number of feature maps # (c,d)=dimensions of a f. map (N=c*d) features = input.view(a * b, c * d) # resise F_XL into \hat F_XL G = torch.mm(features, features.t()) # compute the gram product # we 'normalize' the values of the gram matrix # by dividing by the number of element in each feature maps. return G.div(a * b * c * d) ``` The longer is the feature maps dimension $N$, the bigger are the values of the gram matrix. Therefore, if we don't normalize by $N$, the loss computed at the first layers (before pooling layers) will have much more importance during the gradient descent. We dont want that, since the most interesting style features are in the deepest layers! Then, the style loss module is implemented exactly the same way than the content loss module, but we have to add the ``gramMatrix`` as a parameter: ```python class StyleLoss(nn.Module): def __init__(self, target, weight): super(StyleLoss, self).__init__() self.target = target.detach() * weight self.weight = weight self.gram = GramMatrix() self.criterion = nn.MSELoss() def forward(self, input): self.output = input.clone() self.G = self.gram(input) self.G.mul_(self.weight) self.loss = self.criterion(self.G, self.target) return self.output def backward(self, retain_graph=True): self.loss.backward(retain_graph=retain_graph) return self.loss ``` Load the neural network ~~~~~~~~~~~~~~~~~~~~~~~ Now, we have to import a pre-trained neural network. As in the paper, we are going to use a pretrained VGG network with 19 layers (VGG19). PyTorch's implementation of VGG is a module divided in two child ``Sequential`` modules: ``features`` (containing convolution and pooling layers) and ``classifier`` (containing fully connected layers). We are just interested by ``features``: ```python cnn = models.vgg19(pretrained=True).features # move it to the GPU if possible: if use_cuda: cnn = cnn.cuda() ``` A ``Sequential`` module contains an ordered list of child modules. For instance, ``vgg19.features`` contains a sequence (Conv2d, ReLU, Maxpool2d, Conv2d, ReLU...) aligned in the right order of depth. As we said in *Content loss* section, we wand to add our style and content loss modules as additive 'transparent' layers in our network, at desired depths. For that, we construct a new ``Sequential`` module, in wich we are going to add modules from ``vgg19`` and our loss modules in the right order: ```python # desired depth layers to compute style/content losses : content_layers_default = ['conv_4'] style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] def get_style_model_and_losses(cnn, style_img, content_img, style_weight=1000, content_weight=1, content_layers=content_layers_default, style_layers=style_layers_default): cnn = copy.deepcopy(cnn) # just in order to have an iterable access to or list of content/syle # losses content_losses = [] style_losses = [] model = nn.Sequential() # the new Sequential module network gram = GramMatrix() # we need a gram module in order to compute style targets # move these modules to the GPU if possible: if use_cuda: model = model.cuda() gram = gram.cuda() i = 1 for layer in list(cnn): if isinstance(layer, nn.Conv2d): name = "conv_" + str(i) model.add_module(name, layer) if name in content_layers: # add content loss: target = model(content_img).clone() content_loss = ContentLoss(target, content_weight) model.add_module("content_loss_" + str(i), content_loss) content_losses.append(content_loss) if name in style_layers: # add style loss: target_feature = model(style_img).clone() target_feature_gram = gram(target_feature) style_loss = StyleLoss(target_feature_gram, style_weight) model.add_module("style_loss_" + str(i), style_loss) style_losses.append(style_loss) if isinstance(layer, nn.ReLU): name = "relu_" + str(i) model.add_module(name, layer) if name in content_layers: # add content loss: target = model(content_img).clone() content_loss = ContentLoss(target, content_weight) model.add_module("content_loss_" + str(i), content_loss) content_losses.append(content_loss) if name in style_layers: # add style loss: target_feature = model(style_img).clone() target_feature_gram = gram(target_feature) style_loss = StyleLoss(target_feature_gram, style_weight) model.add_module("style_loss_" + str(i), style_loss) style_losses.append(style_loss) i += 1 if isinstance(layer, nn.MaxPool2d): name = "pool_" + str(i) model.add_module(name, layer) # *** return model, style_losses, content_losses ``` .. Note:: In the paper they recommend to change max pooling layers into average pooling. With AlexNet, that is a small network compared to VGG19 used in the paper, we are not going to see any difference of quality in the result. However, you can use these lines instead if you want to do this substitution: :: # avgpool = nn.AvgPool2d(kernel_size=layer.kernel_size, # stride=layer.stride, padding = layer.padding) # model.add_module(name,avgpool) Input image ~~~~~~~~~~~ Again, in order to simplify the code, we take an image of the same dimensions than content and style images. This image can be a white noise, or it can also be a copy of the content-image. ```python input_img = content_img.clone() # if you want to use a white noise instead uncomment the below line: # input_img = Variable(torch.randn(content_img.data.size())).type(dtype) # add the original input image to the figure: plt.figure() imshow(input_img.data, title='Input Image') ``` Gradient descent ~~~~~~~~~~~~~~~~ As Leon Gatys, the author of the algorithm, suggested `here <https://discuss.pytorch.org/t/pytorch-tutorial-for-neural-transfert-of-artistic-style/336/20?u=alexis-jacq>`__, we will use L-BFGS algorithm to run our gradient descent. Unlike training a network, we want to train the input image in order to minimise the content/style losses. We would like to simply create a PyTorch L-BFGS optimizer, passing our image as the variable to optimize. But ``optim.LBFGS`` takes as first argument a list of PyTorch ``Variable`` that require gradient. Our input image is a ``Variable`` but is not a leaf of the tree that requires computation of gradients. In order to show that this variable requires a gradient, a possibility is to construct a ``Parameter`` object from the input image. Then, we just give a list containing this ``Parameter`` to the optimizer's constructor: ```python def get_input_param_optimizer(input_img): # this line to show that input is a parameter that requires a gradient input_param = nn.Parameter(input_img.data) optimizer = optim.LBFGS([input_param]) return input_param, optimizer ``` **Last step**: the loop of gradient descent. At each step, we must feed the network with the updated input in order to compute the new losses, we must run the ``backward`` methods of each loss to dynamically compute their gradients and perform the step of gradient descent. The optimizer requires as argument a "closure": a function that reevaluates the model and returns the loss. However, there's a small catch. The optimized image may take its values between $-\infty$ and $+\infty$ instead of staying between 0 and 1. In other words, the image might be well optimized and have absurd values. In fact, we must perform an optimization under constraints in order to keep having right vaues into our input image. There is a simple solution: at each step, to correct the image to maintain its values into the 0-1 interval. ```python def run_style_transfer(cnn, content_img, style_img, input_img, num_steps=300, style_weight=1000, content_weight=1): """Run the style transfer.""" print('Building the style transfer model..') model, style_losses, content_losses = get_style_model_and_losses(cnn, style_img, content_img, style_weight, content_weight) input_param, optimizer = get_input_param_optimizer(input_img) print('Optimizing..') run = [0] while run[0] <= num_steps: def closure(): # correct the values of updated input image input_param.data.clamp_(0, 1) optimizer.zero_grad() model(input_param) style_score = 0 content_score = 0 for sl in style_losses: style_score += sl.backward() for cl in content_losses: content_score += cl.backward() run[0] += 1 if run[0] % 50 == 0: print("run {}:".format(run)) print('Style Loss : {:4f} Content Loss: {:4f}'.format( style_score.data[0], content_score.data[0])) print() return style_score + content_score optimizer.step(closure) # a last correction... input_param.data.clamp_(0, 1) return input_param.data ``` Finally, run the algorithm ```python output = run_style_transfer(cnn, content_img, style_img, input_img) plt.figure() imshow(output, title='Output Image') # sphinx_gallery_thumbnail_number = 4 plt.ioff() plt.show() ```
ce13b4ebb39147c64e7e2b1bd22d792d34a852f4
26,797
ipynb
Jupyter Notebook
neural_style_tutorial.ipynb
bazitur/dlschl_project
c05b70f4262819389d46d3e16a70fe9dac4ae644
[ "Apache-2.0" ]
null
null
null
neural_style_tutorial.ipynb
bazitur/dlschl_project
c05b70f4262819389d46d3e16a70fe9dac4ae644
[ "Apache-2.0" ]
null
null
null
neural_style_tutorial.ipynb
bazitur/dlschl_project
c05b70f4262819389d46d3e16a70fe9dac4ae644
[ "Apache-2.0" ]
null
null
null
96.740072
5,359
0.640333
true
5,467
Qwen/Qwen-72B
1. YES 2. YES
0.808067
0.815232
0.658763
__label__eng_Latn
0.990601
0.368857
```python import seaborn as sns import matplotlib.pyplot as plt import pandas as pd import numpy as np #sns.set_theme(style="whitegrid") sns.set(rc={'figure.figsize':(16,13)}) ``` ```python # Absolute error def AE(y, y_hat, t, T): resid = np.abs(y - y_hat) return resid ############################################# # Temporal decay loss def EP_td(y, y_hat, t, T, losstype="loss", weight=1): decay = ((T-t)/T) resid = np.abs(y - y_hat) if losstype=="loss": EP = resid**decay if losstype=="penalty": EP = resid+ weight*(resid**decay) return EP # Progressive temporal decay loss def EP_prog_td(y, y_hat, t, T, losstype="loss", weight=1): decay = 1/t resid = np.abs(y - y_hat) if losstype=="loss": EP = resid*decay if losstype=="penalty": EP = resid+ weight*(resid*decay) return EP ``` ### Previous formulations \begin{equation}MAE_{PtD} = \frac{1}{N}\sum_{i=1}^{N}\frac{1}{T}\sum_{t=1}^{T_i}\mid y_{t}^i - \hat{y}_{t}^i\mid ^{\frac{T_i-t}{T_i}}\end{equation} and \begin{equation}MAE_{MtD} = \frac{1}{N}\sum_{i=1}^{N}\frac{1}{T}\sum_{t=1}^{T_i}\frac{\mid y_{t}^i - \hat{y}_{t}^i\mid}{t} \end{equation} ```python """ Missing the "balance" weight to adjust accuracy versus earliness, and missing a more flexible rate of decay form """ ``` '\nMissing the "balance" weight to adjust accuracy versus earliness,\nand missing a more flexible rate of decay form\n' ## Useful formulas for exponentials: <b>Radioactive decay example: </b> \begin{equation} X_{t+1} = \frac{X_{t}}{2} \end{equation} Half-life = 1 year <b>Bounded exponential growth (Logistic equation):</b> \begin{equation} X_{t+1} = rX_{t}\left(1-\frac{X_{t}}{max(X)}\right) \end{equation} ```python ``` # New loss formulation Basic components: \begin{equation} MAE + \beta f(MAE,\alpha) \end{equation} Where $\beta$ is the weighting parameter of the earliness component, versus the $\gamma$ parameter that weight normal accuracy. $\alpha$ acts as a decay-rate parameter specifying to which degree the temporal decay should prioritize the earliest events in a sequence/trace. The earliness/penalty component consist of: \begin{equation} f(MAE,\alpha) = \frac{1}{N}\sum_{i=1}^{N}\frac{1}{T}\sum_{t=1}^{T_i}\frac{\mid y_{t}^i - \hat{y}_{t}^i\mid}{\alpha t} \end{equation} and thus the full loss becomes: \begin{equation} MAE_{td}: \frac{1}{N}\sum_{i=1}^{N}\frac{1}{T}\sum_{t=1}^{T_i}\gamma\mid y_{t}^i - \hat{y}_{t}^i\mid + \beta \frac{\mid y_{t}^i - \hat{y}_{t}^i\mid}{\alpha t} \end{equation} However, the first batches of experiments show that adding the accuracy component only leads to inferior performance compared to baseline MAE. Current implementation of MAE_td is thus as simple as: \begin{equation} MAE_{td}: \frac{1}{N}\sum_{i=1}^{N}\frac{1}{T}\sum_{t=1}^{T_i}\frac{\mid y_{t}^i - \hat{y}_{t}^i\mid}{\alpha t} \end{equation} ```python # Temporal decay loss def EP_td(y, y_hat, t, T, losstype="penalty", alpha=1, beta=1, gamma=1): resid = np.abs(y - y_hat) EP = gamma*resid + beta*(resid/t*alpha) return EP ``` ```python def generate_obs(metric, T = 10, alpha = 1, beta = 1, gamma=1, loss_comp="loss"): y = np.linspace(start=250,stop=50, num=10,endpoint=True) y_hat = np.linspace(start=250,stop=50, num=10,endpoint=True)+50 t = np.array([1,2,3,4,5,6,7,8,9,10]) #placeholder resid = [0]*len(t) if metric == "AE": resid = AE(y, y_hat, t, T) if metric == "EP_td": resid = EP_td(y, y_hat, t, T, losstype=loss_comp, alpha=alpha, beta=beta, gamma=gamma) #if metric == "EP_prog_td": # resid = EP_prog_td(y, y_hat, t, T, losstype=loss_comp, weight=weight_1) df = pd.DataFrame({"y":y, "y_hat":y_hat, "t":t, "resid":resid, "loss":[metric]*T, "alpha":[alpha]*T, "beta":[beta]*T, "gamma":[gamma]*T}) return df """ Test the data generator with no loss function """ generate_obs("EP_td", T = 10, alpha = 1, beta = 1, gamma=1, loss_comp="loss") ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>y</th> <th>y_hat</th> <th>t</th> <th>resid</th> <th>loss</th> <th>alpha</th> <th>beta</th> <th>gamma</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>250.000000</td> <td>300.000000</td> <td>1</td> <td>100.000000</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>1</th> <td>227.777778</td> <td>277.777778</td> <td>2</td> <td>75.000000</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>2</th> <td>205.555556</td> <td>255.555556</td> <td>3</td> <td>66.666667</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>3</th> <td>183.333333</td> <td>233.333333</td> <td>4</td> <td>62.500000</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>4</th> <td>161.111111</td> <td>211.111111</td> <td>5</td> <td>60.000000</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>5</th> <td>138.888889</td> <td>188.888889</td> <td>6</td> <td>58.333333</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>6</th> <td>116.666667</td> <td>166.666667</td> <td>7</td> <td>57.142857</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>7</th> <td>94.444444</td> <td>144.444444</td> <td>8</td> <td>56.250000</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>8</th> <td>72.222222</td> <td>122.222222</td> <td>9</td> <td>55.555556</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>9</th> <td>50.000000</td> <td>100.000000</td> <td>10</td> <td>55.000000</td> <td>EP_td</td> <td>1</td> <td>1</td> <td>1</td> </tr> </tbody> </table> </div> ```python metrics_of_interest = ["AE","EP_td"] #,"EP_prog_td" #accuracy/earliness balance betas = [1] #[0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75] gammas = [1] #[0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75] #decay rate parameter alphas = [0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75] data = [] for balance1 in betas: for balance2 in gammas: for rate in alphas: for metric in metrics_of_interest: obs = generate_obs(metric, beta = balance1, gamma=balance2, alpha = rate) data.append(obs) residuals = pd.concat(data) residuals.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>y</th> <th>y_hat</th> <th>t</th> <th>resid</th> <th>loss</th> <th>alpha</th> <th>beta</th> <th>gamma</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>250.000000</td> <td>300.000000</td> <td>1</td> <td>50.0</td> <td>AE</td> <td>0.0</td> <td>1</td> <td>1</td> </tr> <tr> <th>1</th> <td>227.777778</td> <td>277.777778</td> <td>2</td> <td>50.0</td> <td>AE</td> <td>0.0</td> <td>1</td> <td>1</td> </tr> <tr> <th>2</th> <td>205.555556</td> <td>255.555556</td> <td>3</td> <td>50.0</td> <td>AE</td> <td>0.0</td> <td>1</td> <td>1</td> </tr> <tr> <th>3</th> <td>183.333333</td> <td>233.333333</td> <td>4</td> <td>50.0</td> <td>AE</td> <td>0.0</td> <td>1</td> <td>1</td> </tr> <tr> <th>4</th> <td>161.111111</td> <td>211.111111</td> <td>5</td> <td>50.0</td> <td>AE</td> <td>0.0</td> <td>1</td> <td>1</td> </tr> </tbody> </table> </div> ```python g = sns.FacetGrid(residuals, col="alpha", col_wrap=4, height=4, size=3, hue="loss", aspect=1) # g.map(sns.barplot, 't','resid') plt.savefig("loss_functions.png") plt.show() ``` ## Multiple settings ```python metrics_of_interest = ["EP_td"] #,"EP_prog_td" #accuracy/earliness balance betas = [1] #[0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75] gammas = [0, 0.5, 1] #[0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75] #decay rate parameter alphas = [0.01, 0.5, 1.0]#, 1.75] data = [] for balance1 in betas: for balance2 in gammas: for rate in alphas: for metric in metrics_of_interest: obs = generate_obs(metric, beta = balance1, gamma=balance2, alpha = rate) data.append(obs) residuals = pd.concat(data) residuals#.head(10) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>y</th> <th>y_hat</th> <th>t</th> <th>resid</th> <th>loss</th> <th>alpha</th> <th>beta</th> <th>gamma</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>250.000000</td> <td>300.000000</td> <td>1</td> <td>0.500000</td> <td>EP_td</td> <td>0.01</td> <td>1</td> <td>0.0</td> </tr> <tr> <th>1</th> <td>227.777778</td> <td>277.777778</td> <td>2</td> <td>0.250000</td> <td>EP_td</td> <td>0.01</td> <td>1</td> <td>0.0</td> </tr> <tr> <th>2</th> <td>205.555556</td> <td>255.555556</td> <td>3</td> <td>0.166667</td> <td>EP_td</td> <td>0.01</td> <td>1</td> <td>0.0</td> </tr> <tr> <th>3</th> <td>183.333333</td> <td>233.333333</td> <td>4</td> <td>0.125000</td> <td>EP_td</td> <td>0.01</td> <td>1</td> <td>0.0</td> </tr> <tr> <th>4</th> <td>161.111111</td> <td>211.111111</td> <td>5</td> <td>0.100000</td> <td>EP_td</td> <td>0.01</td> <td>1</td> <td>0.0</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>5</th> <td>138.888889</td> <td>188.888889</td> <td>6</td> <td>58.333333</td> <td>EP_td</td> <td>1.00</td> <td>1</td> <td>1.0</td> </tr> <tr> <th>6</th> <td>116.666667</td> <td>166.666667</td> <td>7</td> <td>57.142857</td> <td>EP_td</td> <td>1.00</td> <td>1</td> <td>1.0</td> </tr> <tr> <th>7</th> <td>94.444444</td> <td>144.444444</td> <td>8</td> <td>56.250000</td> <td>EP_td</td> <td>1.00</td> <td>1</td> <td>1.0</td> </tr> <tr> <th>8</th> <td>72.222222</td> <td>122.222222</td> <td>9</td> <td>55.555556</td> <td>EP_td</td> <td>1.00</td> <td>1</td> <td>1.0</td> </tr> <tr> <th>9</th> <td>50.000000</td> <td>100.000000</td> <td>10</td> <td>55.000000</td> <td>EP_td</td> <td>1.00</td> <td>1</td> <td>1.0</td> </tr> </tbody> </table> <p>90 rows × 8 columns</p> </div> ```python g = sns.FacetGrid(residuals, col="gamma", row="alpha", hue="loss") g.map(sns.barplot, 't','resid') plt.savefig('loss_weights.png', dpi=300) plt.show() ``` ```python ``` <Figure size 1152x936 with 0 Axes> ```python metrics_of_interest = ["EP_td"] #,"EP_prog_td" balance_weights = np.linspace(start=0.5,stop=2, num=4,endpoint=True) #[ 0.5, 1.0, 1.5] rates = np.linspace(start=0.5, stop=2, num=4, endpoint=True) #[1.0, 1.25, 1.5] data = [] for rate in rates: for balance in balance_weights: for metric in metrics_of_interest: obs = generate_obs(metric, balance = balance, rate = rate) data.append(obs) residuals = pd.concat(data) residuals.head(20) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>y</th> <th>y_hat</th> <th>t</th> <th>resid</th> <th>loss</th> <th>balance</th> <th>rate</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>250.000000</td> <td>300.000000</td> <td>1</td> <td>62.500000</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>1</th> <td>227.777778</td> <td>277.777778</td> <td>2</td> <td>56.250000</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>2</th> <td>205.555556</td> <td>255.555556</td> <td>3</td> <td>54.166667</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>3</th> <td>183.333333</td> <td>233.333333</td> <td>4</td> <td>53.125000</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>4</th> <td>161.111111</td> <td>211.111111</td> <td>5</td> <td>52.500000</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>5</th> <td>138.888889</td> <td>188.888889</td> <td>6</td> <td>52.083333</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>6</th> <td>116.666667</td> <td>166.666667</td> <td>7</td> <td>51.785714</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>7</th> <td>94.444444</td> <td>144.444444</td> <td>8</td> <td>51.562500</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>8</th> <td>72.222222</td> <td>122.222222</td> <td>9</td> <td>51.388889</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>9</th> <td>50.000000</td> <td>100.000000</td> <td>10</td> <td>51.250000</td> <td>EP_td</td> <td>0.5</td> <td>0.5</td> </tr> <tr> <th>0</th> <td>250.000000</td> <td>300.000000</td> <td>1</td> <td>75.000000</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>1</th> <td>227.777778</td> <td>277.777778</td> <td>2</td> <td>62.500000</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>2</th> <td>205.555556</td> <td>255.555556</td> <td>3</td> <td>58.333333</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>3</th> <td>183.333333</td> <td>233.333333</td> <td>4</td> <td>56.250000</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>4</th> <td>161.111111</td> <td>211.111111</td> <td>5</td> <td>55.000000</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>5</th> <td>138.888889</td> <td>188.888889</td> <td>6</td> <td>54.166667</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>6</th> <td>116.666667</td> <td>166.666667</td> <td>7</td> <td>53.571429</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>7</th> <td>94.444444</td> <td>144.444444</td> <td>8</td> <td>53.125000</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>8</th> <td>72.222222</td> <td>122.222222</td> <td>9</td> <td>52.777778</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> <tr> <th>9</th> <td>50.000000</td> <td>100.000000</td> <td>10</td> <td>52.500000</td> <td>EP_td</td> <td>1.0</td> <td>0.5</td> </tr> </tbody> </table> </div> ```python g = sns.FacetGrid(residuals, col="balance",row="rate", hue="loss") g.map(sns.barplot, 't','resid') ``` ```python ``` ```python ``` ```python ``` ```python ``` ```python ``` ```python ``` ```python ``` ```python ```
4e7e6acb1c89cb712a92f851e32f3126d6293361
128,787
ipynb
Jupyter Notebook
analysis/Temporal loss parameterized version.ipynb
Mikeriess/ML4PM_Temp_losses
6b31c33c942c2473f237d5e29153aebc2921be47
[ "MIT" ]
null
null
null
analysis/Temporal loss parameterized version.ipynb
Mikeriess/ML4PM_Temp_losses
6b31c33c942c2473f237d5e29153aebc2921be47
[ "MIT" ]
null
null
null
analysis/Temporal loss parameterized version.ipynb
Mikeriess/ML4PM_Temp_losses
6b31c33c942c2473f237d5e29153aebc2921be47
[ "MIT" ]
null
null
null
103.693237
42,764
0.794614
true
7,014
Qwen/Qwen-72B
1. YES 2. YES
0.835484
0.721743
0.603005
__label__kor_Hang
0.207284
0.239312
<a href="https://colab.research.google.com/github/leehanchung/cs224w/blob/main/notebooks/XCS224W_Colab3.ipynb" target="_parent"></a> # **CS224W - Colab 3** In Colab 2 we constructed GNN models by using PyTorch Geometric's built in GCN layer, `GCNConv`. In this Colab we will go a step deeper and implement the **GraphSAGE** ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) and **GAT** ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)) layers directly. Then we will run and test our models on the CORA dataset, a standard citation network benchmark dataset. Next, we will use [DeepSNAP](https://snap.stanford.edu/deepsnap/), a Python library assisting efficient deep learning on graphs, to split the graphs in different settings and apply dataset transformations. Lastly, using DeepSNAP's transductive link prediction dataset spliting functionality, we will construct a simple GNN model for the task of edge property predition (link prediction). **Note**: Make sure to **sequentially run all the cells in each section** so that the intermediate variables / packages will carry over to the next cell Have fun and good luck on Colab 3 :) # Device We recommend using a GPU for this Colab. Please click `Runtime` and then `Change runtime type`. Then set the `hardware accelerator` to **GPU**. ## Installation ```python # Install torch geometric import os if 'IS_GRADESCOPE_ENV' not in os.environ: !pip uninstall torch-scatter --y !pip uninstall torch-sparse --y !pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html !pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html !pip install torch-geometric !pip install -q git+https://github.com/snap-stanford/deepsnap.git ``` Found existing installation: torch-scatter 2.0.8 Uninstalling torch-scatter-2.0.8: Successfully uninstalled torch-scatter-2.0.8 Found existing installation: torch-sparse 0.6.12 Uninstalling torch-sparse-0.6.12: Successfully uninstalled torch-sparse-0.6.12 Looking in links: https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html Collecting torch-scatter Using cached https://data.pyg.org/whl/torch-1.9.0%2Bcu111/torch_scatter-2.0.8-cp37-cp37m-linux_x86_64.whl (10.4 MB) Installing collected packages: torch-scatter Successfully installed torch-scatter-2.0.8 Looking in links: https://pytorch-geometric.com/whl/torch-1.9.0+cu111.html Collecting torch-sparse Using cached https://data.pyg.org/whl/torch-1.9.0%2Bcu111/torch_sparse-0.6.12-cp37-cp37m-linux_x86_64.whl (3.7 MB) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from torch-sparse) (1.4.1) Requirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.7/dist-packages (from scipy->torch-sparse) (1.19.5) Installing collected packages: torch-sparse Successfully installed torch-sparse-0.6.12 Requirement already satisfied: torch-geometric in /usr/local/lib/python3.7/dist-packages (2.0.1) Requirement already satisfied: yacs in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.1.8) Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.23.0) Requirement already satisfied: googledrivedownloader in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.4) Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (4.62.3) Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.1.5) Requirement already satisfied: pyparsing in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.4.7) Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.19.5) Requirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.22.2.post1) Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (3.13) Requirement already satisfied: rdflib in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (6.0.2) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.4.1) Requirement already satisfied: networkx in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.6.3) Requirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.11.3) Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->torch-geometric) (2.0.1) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->torch-geometric) (2018.9) Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->torch-geometric) (2.8.2) Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->torch-geometric) (1.15.0) Requirement already satisfied: isodate in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (0.6.0) Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (57.4.0) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (2.10) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (2021.5.30) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (1.24.3) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (3.0.4) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->torch-geometric) (1.0.1) ```python import torch_geometric torch_geometric.__version__ ``` '2.0.1' # 1) GNN Layers ## Implementing Layer Modules In Colab 2, we implemented a GCN model for node and graph classification tasks. However, for that notebook we took advantage of PyG's built in GCN module. For Colab 3, we provide a build upon a general Graph Neural Network Stack, into which we will be able to plugin our own module implementations: GraphSAGE and GAT. We will then use our layer implemenations to complete node classification on the CORA dataset, a standard citation network benchmark. In this dataset, nodes correspond to documents and edges correspond to undirected citations. Each node or document in the graph is assigned a class label and features based on the documents binarized bag-of-words representation. Specifically, the Cora graph has 2708 nodes, 5429 edges, 7 prediction classes, and 1433 features per node. ## GNN Stack Module Below is the implementation of a general GNN stack, where we can plugin any GNN layer, such as **GraphSage**, **GAT**, etc. This module is provided for you. Your implementations of the **GraphSage** and **GAT** layers will function as components in the GNNStack Module. ```python import torch import torch_scatter import torch.nn as nn import torch.nn.functional as F import torch_geometric.nn as pyg_nn import torch_geometric.utils as pyg_utils from torch import Tensor from typing import Union, Tuple, Optional from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType, OptTensor) from torch.nn import Parameter, Linear from torch_sparse import SparseTensor, set_diag from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, softmax class GNNStack(torch.nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, args, emb=False): super(GNNStack, self).__init__() conv_model = self.build_conv_model(args.model_type) self.convs = nn.ModuleList() self.convs.append(conv_model(input_dim, hidden_dim)) assert (args.num_layers >= 1), 'Number of layers is not >=1' for l in range(args.num_layers-1): self.convs.append(conv_model(args.heads * hidden_dim, hidden_dim)) # post-message-passing self.post_mp = nn.Sequential( nn.Linear(args.heads * hidden_dim, hidden_dim), nn.Dropout(args.dropout), nn.Linear(hidden_dim, output_dim)) self.dropout = args.dropout self.num_layers = args.num_layers self.emb = emb def build_conv_model(self, model_type): if model_type == 'GraphSage': return GraphSage elif model_type == 'GAT': # When applying GAT with num heads > 1, you need to modify the # input and output dimension of the conv layers (self.convs), # to ensure that the input dim of the next layer is num heads # multiplied by the output dim of the previous layer. # HINT: In case you want to play with multiheads, you need to change the for-loop that builds up self.convs to be # self.convs.append(conv_model(hidden_dim * num_heads, hidden_dim)), # and also the first nn.Linear(hidden_dim * num_heads, hidden_dim) in post-message-passing. return GAT def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch for i in range(self.num_layers): x = self.convs[i](x, edge_index) x = F.relu(x) x = F.dropout(x, p=self.dropout,training=self.training) x = self.post_mp(x) if self.emb == True: return x return F.log_softmax(x, dim=1) def loss(self, pred, label): return F.nll_loss(pred, label) ``` ## Creating Our Own Message Passing Layer Now let's start implementing our own message passing layers! Working through this part will help us become acutely familiar with the behind the scenes work of implementing Pytorch Message Passing Layers, allowing us to build our own GNN models. To do so, we will work with and implement 3 critcal functions needed to define a PyG Message Passing Layer: `forward`, `message`, and `aggregate`. Before diving head first into the coding details, let us quickly review the key components of the message passing process. To do so, we will focus on a single round of messsage passing with respect to a single central node $x$. Before message passing, $x$ is associated with a feature vector $x^{l-1}$, and the goal of message passing is to update this feature vector as $x^l$. To do so, we implement the following steps: 1) each neighboring node $v$ passes its current message $v^{l-1}$ across the edge $(x, v)$ - 2) for the node $x$, we aggregate all of the messages of the neighboring nodes (for example through a sum or mean) - and 3) we transform the aggregated information by for example applying linear and non-linear transformations. Altogether, the message passing process is applied such that every node $u$ in our graph updates its embedding by acting as the central node $x$ in step 1-3 described above. Now, we extending this process to that of a single message passing layer, the job of a message passing layer is to update the current feature representation or embedding of each node in a graph by propagating and transforming information within the graph. Overall, the general paradigm of a message passing layers is: 1) pre-processing -> 2) **message passing** / propagation -> 3) post-processing. The `forward` fuction that we will implement for our message passing layer captures this execution logic. Namely, the `forward` function handles the pre and post-processing of node features / embeddings, as well as initiates message passing by calling the `propagate` function. The `propagate` function encapsulates the message passing process! It does so by calling three important functions: 1) `message`, 2) `aggregate`, and 3) `update`. Our implementation will vary slightly from this, as we will not explicitly implement `update`, but instead place the logic for updating node embeddings after message passing and within the `forward` function. To be more specific, after information is propagated (message passing), we can further transform the node embeddings outputed by `propagate`. Therefore, the output of `forward` is exactly the node embeddings after one GNN layer. Lastly, before starting to implement our own layer, let us dig a bit deeper into each of the functions described above: 1. ``` def propagate(edge_index, x=(x_i, x_j), extra=(extra_i, extra_j), size=size): ``` Calling `propagate` initiates the message passing process. Looking at the function parameters, we highlight a couple of key parameters. - `edge_index` is passed to the forward function and captures the edge structure of the graph. - `x=(x_i, x_j)` represents the node features that will be used in message passing. In order to explain why we pass the tuple `(x_i, x_j)`, we first look at how our edges are represented. For every edge $(i, j) \in \mathcal{E}$, we can differentiate $i$ as the source or central node ($x_{central}$) and j as the neighboring node ($x_{neighbor}$). Taking the example of message passing above, for a central node $u$ we will aggregate and transform all of the messages associated with the nodes $v$ s.t. $(u, v) \in \mathcal{E}$ (i.e. $v \in \mathcal{N}_{u}$). Thus we see, the subscripts `_i` and `_j` allow us to specifcally differenciate features associated with central nodes (i.e. nodes recieving message information) and neighboring nodes (i.e. nodes passing messages). This is definitely a somewhat confusing concept; however, one key thing to remember / wrap your head around is that depending on the perspective, a node $x$ acts as a central node or a neighboring node. In fact, in undirected graphs we store both edge directions (i.e. $(i, j)$ and $(j, i)$). From the central node perspective, `x_i`, x is collecting neighboring information to update its embedding. From a neighboring node perspective, `x_j`, x is passing its message information along the edge connecting it to a different central node. - `extra=(extra_i, extra_j)` represents additional information that we can associate with each node beyond its current feature embedding. In fact, we can include as many additional parameters of the form `param=(param_i, param_j)` as we would like. Again, we highlight that indexing with `_i` and `_j` allows us to differentiate central and neighboring nodes. The output of the `propagate` function is a matrix of node embeddings after the message passing process and has shape $[N, d]$. 2. ``` def message(x_j, ...): ``` The `message` function is called by propagate and constructs the messages from neighboring nodes $j$ to central nodes $i$ for each edge $(i, j)$ in *edge_index*. This function can take any argument that was initially passed to `propagate`. Furthermore, we can again differentiate central nodes and neighboring nodes by appending `_i` or `_j` to the variable name, .e.g. `x_i` and `x_j`. Looking more specifically at the variables, we have: - `x_j` represents a matrix of feature embeddings for all neighboring nodes passing their messages along their respective edge (i.e. all nodes $j$ for edges $(i, j) \in \mathcal{E}$). Thus, its shape is $[|\mathcal{E}|, d]$! - In implementing GAT we will see how to access additional variables passed to propagate Critically, we see that the output of the `message` function is a matrix of neighboring node embeddings ready to be aggregated, having shape $[|\mathcal{E}|, d]$. 3. ``` def aggregate(self, inputs, index, dim_size = None): ``` Lastly, the `aggregate` function is used to aggregate the messages from neighboring nodes. Looking at the parameters we highlight: - `inputs` represents a matrix of the messages passed from neighboring nodes (i.e. the output of the `message` function). - `index` has the same shape as `inputs` and tells us the central node that corresponding to each of the rows / messages $j$ in the `inputs` matrix. Thus, `index` tells us which rows / messages to aggregate for each central node. The output of `aggregate` is of shape $[N, d]$. For additional resources refer to the PyG documentation for implementing custom message passing layers: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html ## GraphSage Implementation For our first GNN layer, we will implement the well known GraphSage ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) layer! For a given *central* node $v$ with current embedding $h_v^{l-1}$, the message passing update rule to tranform $h_v^{l-1} \rightarrow h_v^l$ is as follows: \begin{equation} h_v^{(l)} = W_l\cdot h_v^{(l-1)} + W_r \cdot AGG(\{h_u^{(l-1)}, \forall u \in N(v) \}) \end{equation} where $W_1$ and $W_2$ are learanble weight matrices and the nodes $u$ are *neighboring* nodes. Additionally, we use mean aggregation for simplicity: \begin{equation} AGG(\{h_u^{(l-1)}, \forall u \in N(v) \}) = \frac{1}{|N(v)|} \sum_{u\in N(v)} h_u^{(l-1)} \end{equation} One thing to note is that we're adding a **skip connection** to our GraphSage implementation through the term $W_l\cdot h_v^{(l-1)}$. Before implementing this update rule, we encourage you to think about how different parts of the formulas above correspond with the functions outlined earlier: 1) `forward`, 2) `message`, and 3) `aggregate`. As a hint, we are given what the aggregation function is (i.e. mean aggregation)! Now the question remains, what are the messages passed by each neighbor nodes and when do we call the `propagate` function? Note: in this case the message function or messages are actually quite simple. Additionally, remember that the `propagate` function encapsulates the operations of / the outputs of the combined `message` and `aggregate` functions. Lastly, $\ell$-2 normalization of the node embeddings is applied after each iteration. <font color='red'>For the following questions, DON'T refer to any existing implementations online.</font> ```python class GraphSage(MessagePassing): def __init__(self, in_channels, out_channels, normalize = True, bias = False, **kwargs): super(GraphSage, self).__init__(**kwargs) self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_l = None self.lin_r = None ############################################################################ # TODO: Your code here! # Define the layers needed for the message and update functions below. # self.lin_l is the linear transformation that you apply to embedding # for central node. # self.lin_r is the linear transformation that you apply to aggregated # message from neighbors. # Our implementation is ~2 lines, but don't worry if you deviate from this. self.lin_l = nn.Linear(self.in_channels, self.out_channels) self.lin_r = nn.Linear(self.in_channels, self.out_channels) ############################################################################ self.reset_parameters() def reset_parameters(self): self.lin_l.reset_parameters() self.lin_r.reset_parameters() def forward(self, x, edge_index, size = None): """""" out = None ############################################################################ # TODO: Your code here! # Implement message passing, as well as any post-processing (our update rule). # 1. Call propagate function to conduct the message passing. # 1.1 See the description of propagate above or the following link for more information: # https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html # 1.2 We will only use the representation for neighbor nodes (x_j), so by default # we pass the same representation for central and neighbor nodes as x=(x, x). # 2. Update our node embedding with skip connection. # 3. If normalize is set, do L-2 normalization (defined in # torch.nn.functional) # # Our implementation is ~5 lines, but don't worry if you deviate from this. x_propagate = self.propagate(edge_index, x=(x, x), size=size) x = self.lin_l(x) + x_propagate if self.normalize: x = F.normalize(x) out = x ############################################################################ return out def message(self, x_j): out = None ############################################################################ # TODO: Your code here! # Implement your message function here. # Hint: Look at the formulation of the mean aggregation function, focusing on # what message each neighboring node passes. # # Our implementation is ~1 lines, but don't worry if you deviate from this. out = self.lin_r(x_j) ############################################################################ return out def aggregate(self, inputs, index, dim_size = None): out = None # The axis along which to index number of nodes. node_dim = self.node_dim ############################################################################ # TODO: Your code here! # Implement your aggregate function here. # See here as how to use torch_scatter.scatter: # https://pytorch-scatter.readthedocs.io/en/latest/functions/scatter.html#torch_scatter.scatter # # Our implementation is ~1 lines, but don't worry if you deviate from this. out = torch_scatter.scatter(inputs, index, dim=node_dim, reduce='mean') ############################################################################ return out ``` ## GAT Implementation Attention mechanisms have become the state-of-the-art in many sequence-based tasks such as machine translation and learning sentence representations. One of the major benefits of attention-based mechanisms is their ability to focus on the most relevant parts of the input to make decisions. In this problem, we will see how attention mechanisms can be used to perform node classification over graph-structured data through the usage of Graph Attention Networks (GATs) ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)). The building block of the Graph Attention Network is the graph attention layer, which is a variant of the aggregation function. Let $N$ be the number of nodes and $F$ be the dimension of the feature vector for each node. The input to each graph attentional layer is a set of node features: $\mathbf{h} = \{\overrightarrow{h_1}, \overrightarrow{h_2}, \dots, \overrightarrow{h_N}$\}, $\overrightarrow{h_i} \in R^F$. The output of each graph attentional layer is a new set of node features, which may have a new dimension $F'$: $\mathbf{h'} = \{\overrightarrow{h_1'}, \overrightarrow{h_2'}, \dots, \overrightarrow{h_N'}\}$, with $\overrightarrow{h_i'} \in \mathbb{R}^{F'}$. We will now describe how this transformation is performed for each graph attention layer. First, a shared linear transformation parametrized by the weight matrix $\mathbf{W} \in \mathbb{R}^{F' \times F}$ is applied to every node. Next, we perform self-attention on the nodes. We use a shared attention function $a$: \begin{equation} a : \mathbb{R}^{F'} \times \mathbb{R}^{F'} \rightarrow \mathbb{R}. \end{equation} that computes the attention coefficients capturing the importance of node $j$'s features to node $i$: \begin{equation} e_{ij} = a(\mathbf{W_l}\overrightarrow{h_i}, \mathbf{W_r} \overrightarrow{h_j}) \end{equation} The most general formulation of self-attention allows every node to attend to all other nodes which drops all structural information. However, to utilize graph structure in the attention mechanisms, we use **masked attention**. In masked attention, we only compute attention coefficients $e_{ij}$ for nodes $j \in \mathcal{N}_i$ where $\mathcal{N}_i$ is some neighborhood of node $i$ in the graph. To easily compare coefficients across different nodes, we normalize the coefficients across $j$ using a softmax function: \begin{equation} \alpha_{ij} = \text{softmax}_j(e_{ij}) = \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik})} \end{equation} For this problem, our attention mechanism $a$ will be a single-layer feedforward neural network parametrized by a weight vectors $\overrightarrow{a} \in \mathbb{R}^{F'}$ and $\overrightarrow{a} \in \mathbb{R}^{F'}$, followed by a LeakyReLU nonlinearity (with negative input slope 0.2). Let $\cdot^T$ represent transposition and $||$ represent concatenation. The coefficients computed by our attention mechanism may be expressed as: \begin{equation} \alpha_{ij} = \frac{\exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_j}\Big)\Big)}{\sum_{k\in \mathcal{N}_i} \exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_k}\Big)\Big)} \end{equation} For the following questions, we denote `alpha_l` = $\alpha_l = [...,\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i},...] \in \mathcal{R}^n$ and `alpha_r` = $\alpha_r = [..., \overrightarrow{a_r}^T \mathbf{W_r} \overrightarrow{h_j}, ...] \in \mathcal{R}^n$. At every layer of GAT, after the attention coefficients are computed for that layer, the aggregation function can be computed by a weighted sum of neighborhood messages, where weights are specified by $\alpha_{ij}$. Now, we use the normalized attention coefficients to compute a linear combination of the features corresponding to them. These aggregated features will serve as the final output features for every node. \begin{equation} h_i' = \sum_{j \in \mathcal{N}_i} \alpha_{ij} \mathbf{W_r} \overrightarrow{h_j}. \end{equation} At this point, we have covered a lot of information! Before reading further about multi-head attention, we encourage you to go again through the excersize of thinking about what components of the attention mechanism correspond with the different funcitons: 1) `forward`, 2) `message`, and 3 `aggregate`. - Hint 1: Our aggregation is very similar to that of GraphSage except now we are using sum aggregation - Hint 2: The terms we aggregate over again represent the individual message that each neighbor node j sends. Thus, we see that $\alpha_{ij}$ is part of the message each node sends and is thus computed during the message step. This makes sense since an attention weight is associated with each edge in the graph. - Hint 3: Look at the terms in the definition of $\alpha_{ij}$. What values do we want to pre-process and pass as parameters to the `propagate` function. The parameters of `message(..., x_j, alpha_j, alpha_i, ...)` should give a good hint. ### Multi-Head Attention To stabilize the learning process of self-attention, we use multi-head attention. To do this we use $K$ independent attention mechanisms, or ``heads'' compute output features as in the above equations. Then, we concatenate these output feature representations: \begin{equation} \overrightarrow{h_i}' = ||_{k=1}^K \Big(\sum_{j \in \mathcal{N}_i} \alpha_{ij}^{(k)} \mathbf{W_r}^{(k)} \overrightarrow{h_j}\Big) \end{equation} where $||$ is concentation, $\alpha_{ij}^{(k)}$ are the normalized attention coefficients computed by the $k$-th attention mechanism $(a^k)$, and $\mathbf{W}^{(k)}$ is the corresponding input linear transformation's weight matrix. Note that for this setting, $\mathbf{h'} \in \mathbb{R}^{KF'}$. ```python class GAT(MessagePassing): def __init__(self, in_channels, out_channels, heads = 2, negative_slope = 0.2, dropout = 0., **kwargs): super(GAT, self).__init__(node_dim=0, **kwargs) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.negative_slope = negative_slope self.dropout = dropout self.lin_l = None self.lin_r = None self.att_l = None self.att_r = None ############################################################################ # TODO: Your code here! # Define the layers needed for the message functions below. # self.lin_l is the linear transformation that you apply to embeddings # BEFORE message passing. # # Pay attention to dimensions of the linear layers, since we're using # multi-head attention. # Our implementation is ~1 lines, but don't worry if you deviate from this. self.lin_l = nn.Linear(self.in_channels, self.heads * self.out_channels) ############################################################################ self.lin_r = self.lin_l ############################################################################ # TODO: Your code here! # Define the attention parameters \overrightarrow{a_l/r}^T in the above intro. # You have to deal with multi-head scenarios. # Use nn.Parameter instead of nn.Linear # Our implementation is ~2 lines, but don't worry if you deviate from this. self.att_l = nn.Parameter(torch.randn(heads, self.out_channels)) self.att_r = nn.Parameter(torch.randn(heads, self.out_channels)) ############################################################################ self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.lin_l.weight) nn.init.xavier_uniform_(self.lin_r.weight) nn.init.xavier_uniform_(self.att_l) nn.init.xavier_uniform_(self.att_r) def forward(self, x, edge_index, size = None): H, C = self.heads, self.out_channels ############################################################################ # TODO: Your code here! # Implement message passing, as well as any pre- and post-processing (our update rule). # 1. First apply linear transformation to node embeddings, and split that # into multiple heads. We use the same representations for source and # target nodes, but apply different linear weights (W_l and W_r) # 2. Calculate alpha vectors for central nodes (alpha_l) and neighbor nodes (alpha_r). # 3. Call propagate function to conduct the message passing. # 3.1 Remember to pass alpha = (alpha_l, alpha_r) as a parameter. # 3.2 See there for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html # 4. Transform the output back to the shape of N * d. # Our implementation is ~5 lines, but don't worry if you deviate from this. # x_l dims: N x H x C x_l = self.lin_l(x).view(-1, H, C) # x_r dims: N x H x C x_r = self.lin_r(x).view(-1, H, C) # alpha_l dims: # 1 x H x C * N x H x C alpha_l = self.att_l.unsqueeze(0) * x_l # alpha_r dims: # 1 x H x C * N x H x C alpha_r = self.att_r.unsqueeze(0) * x_r out = self.propagate(edge_index, x = (x_l, x_r), alpha=(alpha_l, alpha_r)) out = out.view(-1, H*C) ############################################################################ return out def message(self, x_j, alpha_j, alpha_i, index, ptr, size_i): ############################################################################ # TODO: Your code here! # Implement your message function. Putting the attention in message # instead of in update is a little tricky. # 1. Calculate the final attention weights using alpha_i and alpha_j, # and apply leaky Relu. # 2. Calculate softmax over the neighbor nodes for all the nodes. Use # torch_geometric.utils.softmax instead of the one in Pytorch. # 3. Apply dropout to attention weights (alpha). # 4. Multiply embeddings and attention weights. As a sanity check, the output # should be of shape E * H * d. # 5. ptr (LongTensor, optional): If given, computes the softmax based on # sorted inputs in CSR representation. You can simply pass it to softmax. # Our implementation is ~5 lines, but don't worry if you deviate from this. alpha_ij = F.leaky_relu(alpha_i + alpha_j, negative_slope=self.negative_slope) if ptr is None: alpha_ij = softmax(alpha_ij, index) else: alpha_ij = softmax(alphaij, ptr) alpha_ij = F.dropout(alpha_ij, p=self.dropout) out = x_j * alpha_ij ############################################################################ return out def aggregate(self, inputs, index, dim_size = None): ############################################################################ # TODO: Your code here! # Implement your aggregate function here. # See here as how to use torch_scatter.scatter: https://pytorch-scatter.readthedocs.io/en/latest/_modules/torch_scatter/scatter.html # Pay attention to "reduce" parameter is different from that in GraphSage. # Our implementation is ~1 lines, but don't worry if you deviate from this. out = torch_scatter.scatter(inputs, index, dim=self.node_dim, reduce='sum') ############################################################################ return out ``` ## Building Optimizers This function has been implemented for you. **For grading purposes please use the default Adam optimizer**, but feel free to play with other types of optimizers on your own. ```python import torch.optim as optim def build_optimizer(args, params): weight_decay = args.weight_decay filter_fn = filter(lambda p : p.requires_grad, params) if args.opt == 'adam': optimizer = optim.Adam(filter_fn, lr=args.lr, weight_decay=weight_decay) elif args.opt == 'sgd': optimizer = optim.SGD(filter_fn, lr=args.lr, momentum=0.95, weight_decay=weight_decay) elif args.opt == 'rmsprop': optimizer = optim.RMSprop(filter_fn, lr=args.lr, weight_decay=weight_decay) elif args.opt == 'adagrad': optimizer = optim.Adagrad(filter_fn, lr=args.lr, weight_decay=weight_decay) if args.opt_scheduler == 'none': return None, optimizer elif args.opt_scheduler == 'step': scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate) elif args.opt_scheduler == 'cos': scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.opt_restart) return scheduler, optimizer ``` ## Training and Testing Here we provide you with the functions to train and test. **Please do not modify this part for grading purposes.** ```python import time import networkx as nx import numpy as np import torch import torch.optim as optim from tqdm import trange import pandas as pd import copy from torch_geometric.datasets import TUDataset from torch_geometric.datasets import Planetoid from torch_geometric.data import DataLoader import torch_geometric.nn as pyg_nn import matplotlib.pyplot as plt def train(dataset, args): print("Node task. test set size:", np.sum(dataset[0]['test_mask'].numpy())) print() test_loader = loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False) # build model model = GNNStack(dataset.num_node_features, args.hidden_dim, dataset.num_classes, args) scheduler, opt = build_optimizer(args, model.parameters()) # train losses = [] test_accs = [] best_acc = 0 best_model = None for epoch in trange(args.epochs, desc="Training", unit="Epochs"): total_loss = 0 model.train() for batch in loader: opt.zero_grad() pred = model(batch) label = batch.y pred = pred[batch.train_mask] label = label[batch.train_mask] loss = model.loss(pred, label) loss.backward() opt.step() total_loss += loss.item() * batch.num_graphs total_loss /= len(loader.dataset) losses.append(total_loss) if epoch % 10 == 0: test_acc = test(test_loader, model) test_accs.append(test_acc) if test_acc > best_acc: best_acc = test_acc best_model = copy.deepcopy(model) else: test_accs.append(test_accs[-1]) return test_accs, losses, best_model, best_acc, test_loader def test(loader, test_model, is_validation=False, save_model_preds=False, model_type=None): test_model.eval() correct = 0 # Note that Cora is only one graph! for data in loader: with torch.no_grad(): # max(dim=1) returns values, indices tuple; only need indices pred = test_model(data).max(dim=1)[1] label = data.y mask = data.val_mask if is_validation else data.test_mask # node classification: only evaluate on nodes in test set pred = pred[mask] label = label[mask] if save_model_preds: print ("Saving Model Predictions for Model Type", model_type) data = {} data['pred'] = pred.view(-1).cpu().detach().numpy() data['label'] = label.view(-1).cpu().detach().numpy() df = pd.DataFrame(data=data) # Save locally as csv df.to_csv('CORA-Node-' + model_type + '.csv', sep=',', index=False) correct += pred.eq(label).sum().item() total = 0 for data in loader.dataset: total += torch.sum(data.val_mask if is_validation else data.test_mask).item() return correct / total class objectview(object): def __init__(self, d): self.__dict__ = d ``` ## Let's Start the Training! We will be working on the CORA dataset on node-level classification. This part is implemented for you. **For grading purposes, please do not modify the default parameters.** However, feel free to play with different configurations just for fun! **Submit your best accuracy and loss on Gradescope.** ```python if 'IS_GRADESCOPE_ENV' not in os.environ: for args in [ {'model_type': 'GraphSage', 'dataset': 'cora', 'num_layers': 2, 'heads': 1, 'batch_size': 32, 'hidden_dim': 32, 'dropout': 0.5, 'epochs': 500, 'opt': 'adam', 'opt_scheduler': 'none', 'opt_restart': 0, 'weight_decay': 5e-3, 'lr': 0.01}, ]: args = objectview(args) for model in ['GraphSage', 'GAT']: args.model_type = model # Match the dimension. if model == 'GAT': args.heads = 2 else: args.heads = 1 if args.dataset == 'cora': dataset = Planetoid(root='/tmp/cora', name='Cora') else: raise NotImplementedError("Unknown dataset") test_accs, losses, best_model, best_acc, test_loader = train(dataset, args) print("Maximum test set accuracy: {0}".format(max(test_accs))) print("Minimum loss: {0}".format(min(losses))) # Run test for our best model to save the predictions! test(test_loader, best_model, is_validation=False, save_model_preds=True, model_type=model) print() plt.title(dataset.name) plt.plot(losses, label="training loss" + " - " + args.model_type) plt.plot(test_accs, label="test accuracy" + " - " + args.model_type) plt.legend() plt.show() ``` ## Question 1.1: What is the maximum accuracy obtained on the test set for GraphSage? (10 points) Running the cell above will show the results of your best model and save your best model's predictions to a file named *CORA-Node-GraphSage.csv*. As we have seen before you can view this file by clicking on the *Folder* icon on the left side pannel. When you sumbit your assignment, you will have to download this file and attatch it to your submission. ## Question 1.2: What is the maximum accuracy obtained on test set for GAT? (10 points) Running the training cell above will also save your best GAT model predictions as *CORA-Node-GAT.csv*. When you sumbit your assignment, you will have to download this file and attatch it to your submission. # 2) DeepSNAP Basics In previous Colabs, we have seen graph class (NetworkX) and tensor (PyG) representations of graphs. The graph class `nx.Graph` provides rich analysis and manipulation functionalities, such as computing the clustering coefficient and PageRank vector for a graph. When working with PyG we were then introduced to tensor based representation of graphs (i.e. edge tensor `edge_index` and node attributes tensors `x` and `y`). In this section, we present DeepSNAP, a package that combines the benifits of both graph representations and offers a full pipeline for GNN training / validation / and testing. Namely, DeepSNAP includes a graph class representation to allow for more efficient graph manipulation and analysis in addition to a tensor based representation for efficient message passing computation. In general, [DeepSNAP](https://github.com/snap-stanford/deepsnap) is a Python library to assist efficient deep learning on graphs. DeepSNAP enables flexible graph manipulation, standard graph learning pipelines, heterogeneous graphs, and ovearll represents a simple graph learning API. In more detail: 1. DeepSNAP allows for sophisticated graph manipulations, such as feature computation, pretraining, subgraph extraction etc. during/before training. 2. DeepSNAP standardizes the pipelines for node, edge, and graph-level prediction tasks under inductive or transductive settings. Specifically, DeepSNAP removes previous non-trivial / repetative design choices left to the user, such as how to split datasets. DeepSNAP thus greatly saves repetitive often non-trivial coding efforts and enables fair model comparison. 3. Many real-world graphs are heterogeneous in nature (i.e. include different node types or edge types). However, most packages lack complete support for heterogeneous graphs, including data storage and flexible message passing. DeepSNAP provides an efficient and flexible heterogeneous graph that supports both node and edge heterogeneity. In this next section, we will focus on working with DeepSNAP for graph manipulation and dataset splitting. [DeepSNAP](https://github.com/snap-stanford/deepsnap) is a newly released project and it is still under development. If you find any bugs or have any improvement ideas, feel free to raise issues or create pull requests on the GitHub directly :) ## Setup ```python import torch import networkx as nx import matplotlib.pyplot as plt from deepsnap.graph import Graph from deepsnap.batch import Batch from deepsnap.dataset import GraphDataset from torch_geometric.datasets import Planetoid, TUDataset from torch.utils.data import DataLoader def visualize(G, color_map=None, seed=123): if color_map is None: color_map = '#c92506' plt.figure(figsize=(8, 8)) nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G, seed=seed), \ label=None, node_color=color_map, node_shape='o', node_size=150) edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G, seed=seed), alpha=0.5) if color_map is not None: plt.scatter([],[], c='#c92506', label='Nodes with label 0', edgecolors="black", s=140) plt.scatter([],[], c='#fcec00', label='Nodes with label 1', edgecolors="black", s=140) plt.legend(prop={'size': 13}, handletextpad=0) nodes.set_edgecolor('black') plt.show() ``` ## DeepSNAP Graph The `deepsnap.graph.Graph` class is the core class of DeepSNAP. It not only represents a graph in tensor format but also includes a graph object from a graph manipulation package. Currently DeepSNAP supports [NetworkX](https://networkx.org/) and [Snap.py](https://snap.stanford.edu/snappy/doc/index.html) as back end graph manipulation packages. In this Colab, we will focus on using NetworkX as the back end graph manipulation package. ### NetworkX to DeepSNAP To begin, let us first work through converting a simple random NetworkX graph to a DeepSNAP graph. ```python if 'IS_GRADESCOPE_ENV' not in os.environ: num_nodes = 100 p = 0.05 seed = 100 # Generate a networkx random graph G = nx.gnp_random_graph(num_nodes, p, seed=seed) # Generate some random node features and labels node_feature = {node : torch.rand([5, ]) for node in G.nodes()} node_label = {node : torch.randint(0, 2, ()) for node in G.nodes()} # Set the random features and labels to G nx.set_node_attributes(G, node_feature, name='node_feature') nx.set_node_attributes(G, node_label, name='node_label') # Print one node example for node in G.nodes(data=True): print(node) break color_map = ['#c92506' if node[1]['node_label'].item() == 0 else '#fcec00' for node in G.nodes(data=True)] # Visualize the graph visualize(G, color_map=color_map) # Transform the networkx graph into the deepsnap graph graph = Graph(G) # Print out the general deepsnap graph information print(graph) # DeepSNAP will convert node attributes to tensors # Notice the type of tensors print("Node feature (node_feature) has shape {} and type {}".format(graph.node_feature.shape, graph.node_feature.dtype)) print("Node label (node_label) has shape {} and type {}".format(graph.node_label.shape, graph.node_label.dtype)) # DeepSNAP will also generate the edge_index tensor print("Edge index (edge_index) has shape {} and type {}".format(graph.edge_index.shape, graph.edge_index.dtype)) # Different from only storing tensors, deepsnap graph also references to the networkx graph # We will discuss why the reference will be helpful later print("The DeepSNAP graph has {} as the internal manupulation graph".format(type(graph.G))) ``` ### Tensor graph attributes Similar to the native PyG tensor based representation, DeepSNAP includes a graph tensor based representation with three levels of graph attributes. In this example, we primarily have **node level** attributes including `node_feature` and `node_label`. The other two levels of attributes are **edge** and **graph** attributes. Similar to node level attributes, these attributes are prefixed by their respective type. For example, the features become `edge_feature` or `graph_feature` and labels becomes `edge_label` or `graph_label` etc. ### Graph Object DeepSNAP additionally allows us to easily access graph information through the backend graph object and graph manipulation package. ```python if 'IS_GRADESCOPE_ENV' not in os.environ: # Number of nodes print("The random graph has {} nodes".format(graph.num_nodes)) # Number of edges print("The random graph has {} edges".format(graph.num_edges)) ``` The random graph has 100 nodes The random graph has 262 edges ### PyG to DeepSNAP Lastly, DeepSNAP provides functionality to automatically transform a PyG dataset into a list of DeepSNAP graphs. Here we transform the CORA dataset into a list with one DeepSNAP graph (i.e. the singular CORA graph). ```python if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cora' name = 'Cora' # The Cora dataset pyg_dataset= Planetoid(root, name) # PyG dataset to a list of deepsnap graphs graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Get the first deepsnap graph (CORA only has one graph) graph = graphs[0] print(graph) ``` Graph(G=[], edge_index=[2, 10556], edge_label_index=[2, 10556], node_feature=[2708, 1433], node_label=[2708], node_label_index=[2708]) ## Question 2.1: How many classes are in the CORA graph? How many features does each node have? (5 points) ```python def get_num_node_classes(graph): # TODO: Implement a function that takes a deepsnap graph object # and return the number of node classes of that graph. num_node_classes = 0 ############# Your code here ############# ## (~1 line of code) ## Note ## 1. Colab autocomplete functionality might be useful ## 2. DeepSNAP documentation might be useful https://snap.stanford.edu/deepsnap/modules/graph.html num_node_classes = graph.num_node_labels ########################################## return num_node_classes def get_num_node_features(graph): # TODO: Implement a function that takes a deepsnap graph object # and return the number of node features of that graph. num_node_features = 0 ############# Your code here ############# ## (~1 line of code) ## Note ## 1. Colab autocomplete functionality might be useful ## 2. DeepSNAP documentation might be useful https://snap.stanford.edu/deepsnap/modules/graph.html num_node_features = graph.num_node_features ########################################## return num_node_features if 'IS_GRADESCOPE_ENV' not in os.environ: num_node_classes = get_num_node_classes(graph) num_node_features = get_num_node_features(graph) print("{} has {} classes".format(name, num_node_classes)) print("{} has {} features".format(name, num_node_features)) ``` Cora has 7 classes Cora has 1433 features ## DeepSNAP Dataset Now, we will learn how to create DeepSNAP datasets. A `deepsnap.dataset.GraphDataset` contains a list of `deepsnap.graph.Graph` objects. In addition to the list of graphs, we specify what task the dataset will be used on, such as node level task (`task=node`), edge level task (`task=link_pred`) and graph level task (`task=graph`). The GraphDataset class contains many other useful parameters that can be specified during initialization. If you are interested, you can take a look at the [documentation](https://snap.stanford.edu/deepsnap/modules/dataset.html#deepsnap-graphdataset). As an example, we will first look at the COX2 dataset, which contains 467 graphs. In initializng our dataset, we convert the PyG dataset into its corresponding DeepSNAP dataset and specify the task to `graph`. ```python if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cox2' name = 'COX2' # Load the dataset through PyG pyg_dataset = TUDataset(root, name) # Convert to a list of deepsnap graphs graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Convert list of deepsnap graphs to deepsnap dataset with specified task=graph dataset = GraphDataset(graphs, task='graph') print(dataset) ``` GraphDataset(467) ## Question 2.2: What is the label of the graph with index 100? (5 points) ```python def get_graph_class(dataset, idx): # TODO: Implement a function that takes a deepsnap dataset object, # the index of a graph in the dataset, and returns the class/label # of the graph (in integer). label = -1 ############# Your code here ############ ## (~1 line of code) ## Notice ## 1. The graph label refers to a graph-level attribute label = dataset[idx].graph_label ######################################### return label if 'IS_GRADESCOPE_ENV' not in os.environ: graph_0 = dataset[0] print(graph_0) idx = 100 label = get_graph_class(dataset, idx) print('Graph with index {} has label {}'.format(idx, label)) ``` Graph(G=[], edge_index=[2, 82], edge_label_index=[2, 82], graph_label=[1], node_feature=[39, 35], node_label_index=[39], task=[]) Graph with index 100 has label tensor([0]) ## Question 2.3: How many edges are in the graph with index 200? (5 points) ```python def get_graph_num_edges(dataset, idx): # TODO: Implement a function that takes a deepsnap dataset object, # the index of a graph in dataset, and returns the number of # edges in the graph (in integer). num_edges = 0 ############# Your code here ############ ## (~1 lines of code) ## Note ## 1. You can use the class property directly num_edges = dataset[idx].num_edges ######################################### return num_edges if 'IS_GRADESCOPE_ENV' not in os.environ: idx = 200 num_edges = get_graph_num_edges(dataset, idx) print('Graph with index {} has {} edges'.format(idx, num_edges)) ``` Graph with index 200 has 49 edges # 3) DeepSNAP Advanced Now that we have learned the basics of DeepSNAP lets move on to some more advanced functionalities. In this section we will use DeepSNAP for graph feature computation and transductive/inductive dataset splitting. ## Setup ```python import torch import networkx as nx import matplotlib.pyplot as plt from deepsnap.graph import Graph from deepsnap.batch import Batch from deepsnap.dataset import GraphDataset from torch_geometric.datasets import Planetoid, TUDataset from torch.utils.data import DataLoader ``` ## Data Split in Graphs As discussed in (LECTURE REFERENCE), data splitting for graphs can be much harder than for CV or NLP. In general, data splitting is divided into two settings, **inductive** and **transductive**. ## Inductive Split In an inductive setting, we split a list of multiple graphs into disjoint training/valiation and test sets. Here is an example of using DeepSNAP to inductively split a list of graphs for a graph level task (graph classification etc.): ```python if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cox2' name = 'COX2' pyg_dataset = TUDataset(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Here we specify the task as graph-level task such as graph classification task = 'graph' dataset = GraphDataset(graphs, task=task) # Specify transductive=False (inductive) dataset_train, dataset_val, dataset_test = dataset.split(transductive=False, split_ratio=[0.8, 0.1, 0.1]) print("COX2 train dataset: {}".format(dataset_train)) print("COX2 validation dataset: {}".format(dataset_val)) print("COX2 test dataset: {}".format(dataset_test)) ``` COX2 train dataset: GraphDataset(373) COX2 validation dataset: GraphDataset(46) COX2 test dataset: GraphDataset(48) ## Transductive Split In the transductive setting, the training /validation / test sets are all over the same graph. As discussed in (LECTURE REF), we consider a transductive setting when we do not need to generalize to new unseen graphs. As an example, here we transductively split the CORA graph for a node level task, such as node classification. (Notice that in DeepSNAP the default split setting is random (i.e. DeepSNAP randomly splits the e.g. nodes into train / val / test); however, you can also use a fixed split by specifying `fixed_split=True` when loading the dataset from PyG or changing the `node_label_index` directly). ```python if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cora' name = 'Cora' pyg_dataset = Planetoid(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Here we specify the task as node-level task such as node classification task = 'node' dataset = GraphDataset(graphs, task=task) # Specify we want the transductive splitting dataset_train, dataset_val, dataset_test = dataset.split(transductive=True, split_ratio=[0.8, 0.1, 0.1]) print("Cora train dataset: {}".format(dataset_train)) print("Cora validation dataset: {}".format(dataset_val)) print("Cora test dataset: {}".format(dataset_test)) print("Original Cora has {} nodes".format(dataset.num_nodes[0])) # The nodes in each set can be find in node_label_index print("After the split, Cora has {} training nodes".format(dataset_train[0].node_label_index.shape[0])) print("After the split, Cora has {} validation nodes".format(dataset_val[0].node_label_index.shape[0])) print("After the split, Cora has {} test nodes".format(dataset_test[0].node_label_index.shape[0])) ``` Cora train dataset: GraphDataset(1) Cora validation dataset: GraphDataset(1) Cora test dataset: GraphDataset(1) Original Cora has 2708 nodes After the split, Cora has 2166 training nodes After the split, Cora has 270 validation nodes After the split, Cora has 272 test nodes ## Edge Level Split Compared to node and graph level splitting, edge level splitting is a little bit tricky ;) For edge level splitting we need to consider several different tasks: 1. Splitting positive edges into train / val / test datasets. 2. Sampling / re-sampling negative edges (i.e. edges not present in the graph). 3. Splitting edges into message passing and supervision edges. With regard to point 3, for edge level data splitting we classify edges into two types. The first is `message passing` edges, edges that are used for message passing by our GNN. The second is `supervision`, edges that are used in the loss function for backpropagation. DeepSNAP allows for two different modes, where the `message passing` and `supervision` edges are either the same or disjoint. ### All Edge Splitting Mode First, we explore the `edge_train_mode="all"` mode for edge level splitting, where the `message passing` and `supervision` edges are shared during training. ```python if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cora' name = 'Cora' pyg_dataset = Planetoid(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) # Specify task as link_pred for edge-level task task = 'link_pred' # Specify the train mode, "all" mode is default for deepsnap dataset edge_train_mode = "all" dataset = GraphDataset(graphs, task=task, edge_train_mode=edge_train_mode) # Transductive link prediction split dataset_train, dataset_val, dataset_test = dataset.split(transductive=True, split_ratio=[0.8, 0.1, 0.1]) print("Cora train dataset: {}".format(dataset_train)) print("Cora validation dataset: {}".format(dataset_val)) print("Cora test dataset: {}".format(dataset_test)) ``` Cora train dataset: GraphDataset(1) Cora validation dataset: GraphDataset(1) Cora test dataset: GraphDataset(1) /usr/local/lib/python3.7/dist-packages/torch/_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at /pytorch/aten/src/ATen/native/BinaryOps.cpp:467.) return torch.floor_divide(self, other) In DeepSNAP, the indices of supervision edges are stored in the `edge_label_index` tensor and the corresponding edge labels are stored in `edge_label` tensor. ```python if 'IS_GRADESCOPE_ENV' not in os.environ: print("Original Cora graph has {} edges".format(dataset[0].num_edges)) print() print("Train set has {} message passing edge".format(dataset_train[0].edge_index.shape[1] // 2)) print("Train set has {} supervision (positive) edges".format(dataset_train[0].edge_label_index.shape[1] // 4)) print() print("Validation set has {} message passing edge".format(dataset_val[0].edge_index.shape[1] // 2)) print("Validation set has {} supervision (positive) edges".format(dataset_val[0].edge_label_index.shape[1] // 4)) print() print("Test set has {} message passing edge".format(dataset_test[0].edge_index.shape[1] // 2)) print("Test set has {} supervision (positive) edges".format(dataset_test[0].edge_label_index.shape[1] // 4)) ``` Original Cora graph has 5278 edges Train set has 4222 message passing edge Train set has 4222 supervision (positive) edges Validation set has 4222 message passing edge Validation set has 527 supervision (positive) edges Test set has 4749 message passing edge Test set has 529 supervision (positive) edges **Specific things to note in `all` mode**: * At training time: the supervision edges are the same as the training message passing edges. * At validation time: the message passing edges are the training message passing edges and training supervision edges (still the training message passing edges in this case). However, we now include a set of unseen validation supervision edges that are disjoint from the training supervision edges. * At test time: the message passing edges are the union of training message passing edges, training supervision edges, and validation supervision edges. The test supervision edges then disjoint from the training supervision edges and validation supervision edges. * We exclude negative edges in this illustration. However, the attributes `edge_label` and `edge_label_index` naturally also include the negative supervision edges (by default the number of negative edges is the same as the number of positive edges, hence the divide by 4 above). Now, that we have seen the basics of the `all` method for edge splitting, we will implement a function that checks whether two edge index tensors are disjoint and explore more edge splitting properties by using that function. ## Question 3: Implement a function that checks whether two edge_index tensors are disjoint (i.e. do not share any common edges). Then answer the True/False questions below. (5 points) ```python def edge_indices_disjoint(edge_index_1, edge_index_2): # TODO: Implement this function that takes two edge index tensors, # and returns whether these two edge index tensors are disjoint. disjoint = None ############# Your code here ############ ## (~5 lines of code) ## Note ## 1. Here disjoint means that there is no single edge belongs to both edge index tensors ## 2. You do not need to consider the undirected case. For example, if edge_index_1 contains ## edge (a, b) and edge_index_2 contains edge (b, a). We will treat them as disjoint in this ## function. edge_index_1_np = edge_index_1.T.detach().cpu().numpy() edge_index_2_np = edge_index_2.T.detach().cpu().numpy() intercept = [x for x in set(tuple(x) for x in edge_index_1_np) & set(tuple(x) for x in edge_index_2_np)] disjoint = len(intercept) == 0 ######################################### return disjoint ``` ```python if 'IS_GRADESCOPE_ENV' not in os.environ: num_train_edges = dataset_train[0].edge_label_index.shape[1] // 2 train_pos_edge_index = dataset_train[0].edge_label_index[:, :num_train_edges] train_neg_edge_index = dataset_train[0].edge_label_index[:, num_train_edges:] print("3.1 Training (supervision) positve and negative edges are disjoint = {}"\ .format(edge_indices_disjoint(train_pos_edge_index, train_neg_edge_index))) num_val_edges = dataset_val[0].edge_label_index.shape[1] // 2 val_pos_edge_index = dataset_val[0].edge_label_index[:, :num_val_edges] val_neg_edge_index = dataset_val[0].edge_label_index[:, num_val_edges:] print("3.2 Validation (supervision) positve and negative edges are disjoint = {}"\ .format(edge_indices_disjoint(val_pos_edge_index, val_neg_edge_index))) num_test_edges = dataset_test[0].edge_label_index.shape[1] // 2 test_pos_edge_index = dataset_test[0].edge_label_index[:, :num_test_edges] test_neg_edge_index = dataset_test[0].edge_label_index[:, num_test_edges:] print("3.3 Test (supervision) positve and negative edges are disjoint = {}"\ .format(edge_indices_disjoint(test_pos_edge_index, test_neg_edge_index))) print("3.4 Test (supervision) positve and validation (supervision) positve edges are disjoint = {}"\ .format(edge_indices_disjoint(test_pos_edge_index, val_pos_edge_index))) print("3.5 Validation (supervision) positve and training (supervision) positve edges are disjoint = {}"\ .format(edge_indices_disjoint(val_pos_edge_index, train_pos_edge_index))) ``` 3.1 Training (supervision) positve and negative edges are disjoint = True 3.2 Validation (supervision) positve and negative edges are disjoint = True 3.3 Test (supervision) positve and negative edges are disjoint = True 3.4 Test (supervision) positve and validation (supervision) positve edges are disjoint = True 3.5 Validation (supervision) positve and training (supervision) positve edges are disjoint = True ### Disjoint Edge Splitting Mode Now we will look at a relatively more complex transductive edge split setting, the `edge_train_mode="disjoint"` mode in DeepSNAP. In this setting, the `message passing` and `supervision` edges are completely disjoint ```python if 'IS_GRADESCOPE_ENV' not in os.environ: edge_train_mode = "disjoint" dataset = GraphDataset(graphs, task='link_pred', edge_train_mode=edge_train_mode) orig_edge_index = dataset[0].edge_index dataset_train, dataset_val, dataset_test = dataset.split( transductive=True, split_ratio=[0.8, 0.1, 0.1]) train_message_edge_index = dataset_train[0].edge_index train_sup_edge_index = dataset_train[0].edge_label_index val_message_edge_index = dataset_val[0].edge_index val_sup_edge_index = dataset_val[0].edge_label_index test_message_edge_index = dataset_test[0].edge_index test_sup_edge_index = dataset_test[0].edge_label_index print("Original Cora graph has {} edges".format(dataset[0].num_edges)) print() print("Train set has {} message passing edge".format(train_message_edge_index.shape[1] // 2)) print("Train set has {} supervision (positive) edges".format(train_sup_edge_index.shape[1] // 4)) print() print("Validation set has {} message passing edge".format(val_message_edge_index.shape[1] // 2)) print("Validation set has {} supervision (positive) edges".format(val_sup_edge_index.shape[1] // 4)) print() print("Test set has {} message passing edge".format(test_message_edge_index.shape[1] // 2)) print("Test set has {} supervision (positive) edges".format(test_sup_edge_index.shape[1] // 4)) ``` Original Cora graph has 5278 edges Train set has 3377 message passing edge Train set has 845 supervision (positive) edges Validation set has 4222 message passing edge Validation set has 527 supervision (positive) edges Test set has 4749 message passing edge Test set has 529 supervision (positive) edges **Specific things to note in `disjoint` mode**: * At training time: the training supervision edges are disjoint from the training message passing edges. * At validation time: the message passing edges are the union of training message passing edges and training supervision edges. The validation supervision edges are disjoint from both the training message passing and supervision edges. * At test time: the message passing edges are the training message passing edges, training supervision edges, and validation supervision edges. The test supervision edges are disjoint from all the training and validation edges. ## Negative Edges For edge level tasks, sampling negative edges is critical. Moreover, during each training iteration, we want to resample the negative edges. Below we print the training and validation sets negative edges in two training iterations. What we demonstrate is that the negative edges are only resampled during training. ```python if 'IS_GRADESCOPE_ENV' not in os.environ: dataset = GraphDataset(graphs, task='link_pred', edge_train_mode="disjoint") datasets = {} follow_batch = [] datasets['train'], datasets['val'], datasets['test'] = dataset.split( transductive=True, split_ratio=[0.8, 0.1, 0.1]) dataloaders = { split: DataLoader( ds, collate_fn=Batch.collate(follow_batch), batch_size=1, shuffle=(split=='train') ) for split, ds in datasets.items() } neg_edges_1 = None for batch in dataloaders['train']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_1 = batch.edge_label_index[:, num_edges:] print("First iteration training negative edges:") print(neg_edges_1) break neg_edges_2 = None for batch in dataloaders['train']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_2 = batch.edge_label_index[:, num_edges:] print("Second iteration training negative edges:") print(neg_edges_2) break neg_edges_1 = None for batch in dataloaders['val']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_1 = batch.edge_label_index[:, num_edges:] print("First iteration validation negative edges:") print(neg_edges_1) break neg_edges_2 = None for batch in dataloaders['val']: num_edges = batch.edge_label_index.shape[1] // 2 neg_edges_2 = batch.edge_label_index[:, num_edges:] print("Second iteration validation negative edges:") print(neg_edges_2) break ``` First iteration training negative edges: tensor([[ 929, 69, 1042, ..., 572, 1133, 358], [1410, 2548, 2525, ..., 645, 2494, 2686]]) Second iteration training negative edges: tensor([[1825, 2407, 2433, ..., 599, 940, 868], [ 250, 1064, 514, ..., 1799, 2427, 52]]) First iteration validation negative edges: tensor([[ 2, 1232, 972, ..., 1000, 2505, 1749], [1156, 2353, 645, ..., 2365, 1618, 409]]) Second iteration validation negative edges: tensor([[ 2, 1232, 972, ..., 1000, 2505, 1749], [1156, 2353, 645, ..., 2365, 1618, 409]]) If you are interested in more graph splitting settings, please refer to the DeepSNAP dataset [documentation](https://snap.stanford.edu/deepsnap/modules/dataset.html). ## Graph Transformation and Feature Computation The other core functionality of DeepSNAP is graph transformation / feature computation. In DeepSNAP, we divide graph transformation / feature computation into two different types. The first includes transformations before training (e.g. transform the whole dataset before training directly), and the second includes transformations during training (transform batches of graphs). Below is an example that uses the NetworkX back end to calculate the PageRank value for each node and subsequently transforms the node features by concatenating each nodes PageRank score (transform the dataset before training). ```python def pagerank_transform_fn(graph): # Get the referenced networkx graph G = graph.G # Calculate the pagerank by using networkx pr = nx.pagerank(G) # Transform the pagerank values to tensor pr_feature = torch.tensor([pr[node] for node in range(graph.num_nodes)], dtype=torch.float32) pr_feature = pr_feature.view(graph.num_nodes, 1) # Concat the pagerank values to the node feature graph.node_feature = torch.cat([graph.node_feature, pr_feature], dim=-1) if 'IS_GRADESCOPE_ENV' not in os.environ: root = './tmp/cox2' name = 'COX2' pyg_dataset = TUDataset(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) dataset = GraphDataset(graphs, task='graph') print("Number of features before transformation: {}".format(dataset.num_node_features)) dataset.apply_transform(pagerank_transform_fn, update_tensor=False) print("Number of features after transformation: {}".format(dataset.num_node_features)) ``` Number of features before transformation: 35 Number of features after transformation: 36 ## Question 4: Implement a transformation that adds the clustering coefficient of each node to its feature vector and then report the clustering coefficient of the node with index 3 in the graph with index 406 (5 points). ```python def cluster_transform_fn(graph): # TODO: Implement a function that takes an deepsnap graph object and # transform the graph by adding each node's clustering coefficient to its # graph.node_feature representation ############# Your code here ############ ## (~5 lines of code) ## Note ## 1. Compute the clustering coefficient value for each node and ## concat this value to the last dimension of graph.node_feature # Get networkx graph G = graph.G # Calculate clustering coefficient/pagerank using networkx pr = nx.algorithms.cluster.clustering(G) # Transform pagerank value to tensor pr_feature = torch.tensor([pr[node] for node in range(graph.num_nodes)], dtype=torch.float16) pr_feature = pr_feature.view(graph.num_nodes, 1) # concat pagerank values to the node features graph.node_feature = torch.cat([graph.node_feature, pr_feature], dim=-1) ######################################### if 'IS_GRADESCOPE_ENV' not in os.environ: root = './cox2' name = 'COX2' pyg_dataset = TUDataset(root, name) graphs = GraphDataset.pyg_to_graphs(pyg_dataset) dataset = GraphDataset(graphs, task='graph') # Transform the dataset dataset.apply_transform(cluster_transform_fn, update_tensor=False) node_idx = 3 graph_idx = 406 node_feature = dataset[graph_idx].node_feature print("The node has clustering coefficient: {}".format(round(node_feature[node_idx][-1].item(), 2))) ``` The node has clustering coefficient: 0.17 ### Final Thoughts Apart from transforming the whole dataset before training, DeepSNAP can also transform the graph (usually sampled batches of graphs, `deepsnap.batch.Batch`) during each training iteration. Also, DeepSNAP supports the synchronization of the transformation between the referenced graph objects and tensor representations. For example, you can just update the NetworkX graph object in the transform function and by specifying `update_tensor=True` the internal tensor representations will be automatically updated! For more information, please refer to the DeepSNAP [documentation](https://snap.stanford.edu/deepsnap/). # 4) Edge Level Prediction From the last section, we learned how DeepSNAP trandsuctively splits edges for edge level tasks. For the last part of the notebook, we will use DeepSNAP and PyG together to implement a simple edge level prediction (link prediction) model! Specifically, we will use a 2 layer GraphSAGE embedding model to generate node embeddings, and then compute link predictions through a dot product link prediction head. Namely, given an edge (u, v) with GNN feature embeddings $f_u$ and $f_v$, our link prediction head generates its link prediction as $f_u \cdot f_v$. To give a brief intuition for this dot product link prediction model, we are learning a GNN that embedds nodes such that nodes that have an edge in the graph are closer within the embedding space than nodes that do not have an edge. The dot product provides a proxy for closeness in our embedding space where a high positive dot product indicates that two vectors are more closely aligned (the angle between the vectors is small), whereas a negative dot-product indicates that vectors are unaligned (the angle between the vectors is greater than 90). ```python import copy import torch import numpy as np import networkx as nx import matplotlib.pyplot as plt from deepsnap.graph import Graph from deepsnap.batch import Batch from deepsnap.dataset import GraphDataset from torch_geometric.datasets import Planetoid, TUDataset from torch.utils.data import DataLoader import torch.nn.functional as F from torch_geometric.nn import SAGEConv class LinkPredModel(torch.nn.Module): def __init__(self, input_dim, hidden_dim, num_classes, dropout=0.2): super(LinkPredModel, self).__init__() self.conv1 = SAGEConv(input_dim, hidden_dim) self.conv2 = SAGEConv(hidden_dim, num_classes) self.loss_fn = None ############# Your code here ############# ## (~1 line of code) ## Note ## 1. Initialize the loss function to BCEWithLogitsLoss self.loss_fn = nn.BCEWithLogitsLoss() ########################################## self.dropout = dropout def reset_parameters(self): self.conv1.reset_parameters() self.conv2.reset_parameters() def forward(self, batch): node_feature, edge_index, edge_label_index = batch.node_feature, batch.edge_index, batch.edge_label_index ############# Your code here ############# ## (~6 line of code) ## Note ## 1. Feed the node feature into the first conv layer ## 2. Add a ReLU after the first conv layer ## 3. Add dropout after the ReLU (with probability self.dropout) ## 4. Feed the output to the second conv layer ## 5. Select the embeddings of the source nodes and destination nodes ## by using the edge_label_index and compute the similarity of each pair ## by dot product x = self.conv1(node_feature, edge_index) x = F.relu(x) x = F.dropout(x, p=self.dropout) x = self.conv2(x, edge_index) x_src = x[edge_label_index[0]] x_dst = x[edge_label_index[1]] x_similarity = x_src * x_dst pred = torch.sum(x_similarity, dim=-1) ########################################## return pred def loss(self, pred, link_label): return self.loss_fn(pred, link_label) ``` ```python from sklearn.metrics import * def train(model, dataloaders, optimizer, args): val_max = 0 best_model = model for epoch in range(1, args["epochs"]): for i, batch in enumerate(dataloaders['train']): batch.to(args["device"]) ############# Your code here ############# ## (~6 lines of code) ## Note ## 1. Zero grad the optimizer ## 2. Compute loss and backpropagate ## 3. Update the model parameters optimizer.zero_grad() pred = model(batch) loss = model.loss(pred, batch.edge_label.type_as(pred)) loss.backward() optimizer.step() ########################################## log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}, Loss: {}' score_train = test(model, dataloaders['train'], args) score_val = test(model, dataloaders['val'], args) score_test = test(model, dataloaders['test'], args) print(log.format(epoch, score_train, score_val, score_test, loss.item())) if val_max < score_val: val_max = score_val best_model = copy.deepcopy(model) return best_model def test(model, dataloader, args, save_model_preds=False): model.eval() score = 0 preds = None labels = None ############# Your code here ############# ## (~7 lines of code) ## Note ## 1. Loop through batches in the dataloader (Note for us there is only one batch!) ## 2. Feed the batch to the model ## 3. Feed the model output to sigmoid ## 4. Compute the ROC-AUC score by using sklearn roc_auc_score function ## Note: Look into flattening and converting torch tensors into numpy arrays ## 5. Edge labels are stored in batch.edge_label ## 6. Make sure to save your **numpy** model predictions as 'preds' ## and the **numpy** edge labels as 'labels' # for batch in dataloader: for batch in dataloaders['test']: batch.to(args['device']) preds = model(batch) preds = torch.sigmoid(preds).cpu().detach().numpy() labels = batch.edge_label.cpu().detach().numpy() score += roc_auc_score(labels, preds) score /= len(dataloaders['test']) ########################################## if save_model_preds: print ("Saving Link Classification Model Predictions") print() data = {} data['pred'] = preds data['label'] = labels df = pd.DataFrame(data=data) # Save locally as csv df.to_csv('CORA-Link-Prediction.csv', sep=',', index=False) return score ``` ```python # Please don't change any parameters args = { "device" : 'cuda' if torch.cuda.is_available() else 'cpu', "hidden_dim" : 128, "epochs" : 200, } ``` ```python if 'IS_GRADESCOPE_ENV' not in os.environ: pyg_dataset = Planetoid('./tmp/cora', 'Cora') graphs = GraphDataset.pyg_to_graphs(pyg_dataset) dataset = GraphDataset( graphs, task='link_pred', edge_train_mode="disjoint" ) datasets = {} datasets['train'], datasets['val'], datasets['test']= dataset.split( transductive=True, split_ratio=[0.85, 0.05, 0.1]) input_dim = datasets['train'].num_node_features num_classes = datasets['train'].num_edge_labels model = LinkPredModel(input_dim, args["hidden_dim"], num_classes).to(args["device"]) model.reset_parameters() optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) dataloaders = {split: DataLoader( ds, collate_fn=Batch.collate([]), batch_size=1, shuffle=(split=='train')) for split, ds in datasets.items()} best_model = train(model, dataloaders, optimizer, args) log = "Best Model Accuraies Train: {:.4f}, Val: {:.4f}, Test: {:.4f}" best_train_roc = test(best_model, dataloaders['train'], args) best_val_roc = test(best_model, dataloaders['val'], args) best_test_roc = test(best_model, dataloaders['test'], args, save_model_preds=True) print(log.format(best_train_roc, best_val_roc, best_test_roc)) ``` Epoch: 001, Train: 0.5019, Val: 0.4953, Test: 0.5062, Loss: 0.6932055354118347 Epoch: 002, Train: 0.5089, Val: 0.4964, Test: 0.4886, Loss: 0.6931920647621155 Epoch: 003, Train: 0.5206, Val: 0.4928, Test: 0.5118, Loss: 0.6932067275047302 Epoch: 004, Train: 0.5019, Val: 0.4990, Test: 0.5043, Loss: 0.6931806802749634 Epoch: 005, Train: 0.5082, Val: 0.5174, Test: 0.4950, Loss: 0.6932178139686584 Epoch: 006, Train: 0.5022, Val: 0.5271, Test: 0.5069, Loss: 0.6932305097579956 Epoch: 007, Train: 0.5222, Val: 0.4992, Test: 0.5077, Loss: 0.6932173371315002 Epoch: 008, Train: 0.4989, Val: 0.5117, Test: 0.5131, Loss: 0.6931739449501038 Epoch: 009, Train: 0.5138, Val: 0.5229, Test: 0.5192, Loss: 0.693060040473938 Epoch: 010, Train: 0.5168, Val: 0.5268, Test: 0.5048, Loss: 0.6931229829788208 Epoch: 011, Train: 0.5327, Val: 0.5091, Test: 0.5290, Loss: 0.6930479407310486 Epoch: 012, Train: 0.5337, Val: 0.5132, Test: 0.5371, Loss: 0.6930755972862244 Epoch: 013, Train: 0.5220, Val: 0.5220, Test: 0.5197, Loss: 0.693119466304779 Epoch: 014, Train: 0.5292, Val: 0.5178, Test: 0.5210, Loss: 0.6930415630340576 Epoch: 015, Train: 0.5166, Val: 0.5225, Test: 0.5263, Loss: 0.6930370330810547 Epoch: 016, Train: 0.5366, Val: 0.5217, Test: 0.5326, Loss: 0.693081796169281 Epoch: 017, Train: 0.5396, Val: 0.5155, Test: 0.5344, Loss: 0.6929906010627747 Epoch: 018, Train: 0.5304, Val: 0.5175, Test: 0.5372, Loss: 0.6929558515548706 Epoch: 019, Train: 0.5353, Val: 0.5420, Test: 0.5374, Loss: 0.6928719282150269 Epoch: 020, Train: 0.5443, Val: 0.5569, Test: 0.5219, Loss: 0.6928818225860596 Epoch: 021, Train: 0.5439, Val: 0.5291, Test: 0.5486, Loss: 0.6928641200065613 Epoch: 022, Train: 0.5366, Val: 0.5287, Test: 0.5459, Loss: 0.6927971243858337 Epoch: 023, Train: 0.5313, Val: 0.5400, Test: 0.5477, Loss: 0.6928063035011292 Epoch: 024, Train: 0.5432, Val: 0.5306, Test: 0.5574, Loss: 0.6927868127822876 Epoch: 025, Train: 0.5360, Val: 0.5359, Test: 0.5440, Loss: 0.6928010582923889 Epoch: 026, Train: 0.5425, Val: 0.5373, Test: 0.5201, Loss: 0.692813515663147 Epoch: 027, Train: 0.5476, Val: 0.5447, Test: 0.5412, Loss: 0.6927600502967834 Epoch: 028, Train: 0.5377, Val: 0.5493, Test: 0.5632, Loss: 0.6926769614219666 Epoch: 029, Train: 0.5447, Val: 0.5579, Test: 0.5498, Loss: 0.6925768852233887 Epoch: 030, Train: 0.5531, Val: 0.5458, Test: 0.5538, Loss: 0.6925328969955444 Epoch: 031, Train: 0.5502, Val: 0.5608, Test: 0.5495, Loss: 0.6924236416816711 Epoch: 032, Train: 0.5685, Val: 0.5580, Test: 0.5468, Loss: 0.6924571990966797 Epoch: 033, Train: 0.5701, Val: 0.5536, Test: 0.5451, Loss: 0.6924594640731812 Epoch: 034, Train: 0.5480, Val: 0.5416, Test: 0.5586, Loss: 0.6924310922622681 Epoch: 035, Train: 0.5610, Val: 0.5585, Test: 0.5397, Loss: 0.6923143267631531 Epoch: 036, Train: 0.5472, Val: 0.5463, Test: 0.5490, Loss: 0.6921097040176392 Epoch: 037, Train: 0.5622, Val: 0.5556, Test: 0.5273, Loss: 0.6921440958976746 Epoch: 038, Train: 0.5449, Val: 0.5430, Test: 0.5551, Loss: 0.6920964121818542 Epoch: 039, Train: 0.5575, Val: 0.5524, Test: 0.5576, Loss: 0.6919888257980347 Epoch: 040, Train: 0.5427, Val: 0.5541, Test: 0.5403, Loss: 0.6920359134674072 Epoch: 041, Train: 0.5478, Val: 0.5588, Test: 0.5605, Loss: 0.691764235496521 Epoch: 042, Train: 0.5579, Val: 0.5612, Test: 0.5501, Loss: 0.6918411254882812 Epoch: 043, Train: 0.5679, Val: 0.5459, Test: 0.5517, Loss: 0.6918407082557678 Epoch: 044, Train: 0.5434, Val: 0.5483, Test: 0.5473, Loss: 0.6916317939758301 Epoch: 045, Train: 0.5485, Val: 0.5527, Test: 0.5550, Loss: 0.6913734674453735 Epoch: 046, Train: 0.5520, Val: 0.5572, Test: 0.5527, Loss: 0.691251814365387 Epoch: 047, Train: 0.5697, Val: 0.5536, Test: 0.5556, Loss: 0.6910400390625 Epoch: 048, Train: 0.5674, Val: 0.5626, Test: 0.5664, Loss: 0.691244900226593 Epoch: 049, Train: 0.5519, Val: 0.5604, Test: 0.5554, Loss: 0.6907708644866943 Epoch: 050, Train: 0.5556, Val: 0.5720, Test: 0.5574, Loss: 0.6903576850891113 Epoch: 051, Train: 0.5573, Val: 0.5688, Test: 0.5653, Loss: 0.6909399628639221 Epoch: 052, Train: 0.5678, Val: 0.5613, Test: 0.5536, Loss: 0.6901348233222961 Epoch: 053, Train: 0.5552, Val: 0.5795, Test: 0.5688, Loss: 0.6899186372756958 Epoch: 054, Train: 0.5662, Val: 0.5630, Test: 0.5643, Loss: 0.6895727515220642 Epoch: 055, Train: 0.5702, Val: 0.5625, Test: 0.5529, Loss: 0.6899831295013428 Epoch: 056, Train: 0.5701, Val: 0.5714, Test: 0.5617, Loss: 0.688920259475708 Epoch: 057, Train: 0.5670, Val: 0.5650, Test: 0.5717, Loss: 0.688893735408783 Epoch: 058, Train: 0.5651, Val: 0.5668, Test: 0.5672, Loss: 0.6887857913970947 Epoch: 059, Train: 0.5730, Val: 0.5687, Test: 0.5598, Loss: 0.6882851719856262 Epoch: 060, Train: 0.5730, Val: 0.5745, Test: 0.5768, Loss: 0.688106119632721 Epoch: 061, Train: 0.5701, Val: 0.5684, Test: 0.5683, Loss: 0.6876559853553772 Epoch: 062, Train: 0.5765, Val: 0.5755, Test: 0.5710, Loss: 0.6875297427177429 Epoch: 063, Train: 0.5779, Val: 0.5758, Test: 0.5827, Loss: 0.6867380738258362 Epoch: 064, Train: 0.5782, Val: 0.5670, Test: 0.5797, Loss: 0.68626868724823 Epoch: 065, Train: 0.5750, Val: 0.5858, Test: 0.5769, Loss: 0.6862733364105225 Epoch: 066, Train: 0.5778, Val: 0.5756, Test: 0.5804, Loss: 0.6857287287712097 Epoch: 067, Train: 0.5749, Val: 0.5798, Test: 0.5715, Loss: 0.685171365737915 Epoch: 068, Train: 0.5849, Val: 0.5790, Test: 0.5741, Loss: 0.6841467618942261 Epoch: 069, Train: 0.5831, Val: 0.5791, Test: 0.5880, Loss: 0.6842892169952393 Epoch: 070, Train: 0.5765, Val: 0.5782, Test: 0.5797, Loss: 0.6838349103927612 Epoch: 071, Train: 0.5860, Val: 0.5885, Test: 0.5847, Loss: 0.6830412149429321 Epoch: 072, Train: 0.5827, Val: 0.5856, Test: 0.5859, Loss: 0.6808038949966431 Epoch: 073, Train: 0.5878, Val: 0.5872, Test: 0.5836, Loss: 0.6804813146591187 Epoch: 074, Train: 0.5849, Val: 0.5877, Test: 0.5882, Loss: 0.6788002252578735 Epoch: 075, Train: 0.5950, Val: 0.5847, Test: 0.5861, Loss: 0.6783311367034912 Epoch: 076, Train: 0.5907, Val: 0.5925, Test: 0.5909, Loss: 0.6774953603744507 Epoch: 077, Train: 0.5903, Val: 0.5897, Test: 0.5954, Loss: 0.6758286356925964 Epoch: 078, Train: 0.5999, Val: 0.5948, Test: 0.5963, Loss: 0.675263524055481 Epoch: 079, Train: 0.6012, Val: 0.6020, Test: 0.5992, Loss: 0.6710941195487976 Epoch: 080, Train: 0.6011, Val: 0.5963, Test: 0.6017, Loss: 0.6713895201683044 Epoch: 081, Train: 0.6026, Val: 0.5997, Test: 0.6035, Loss: 0.6704581379890442 Epoch: 082, Train: 0.6122, Val: 0.6075, Test: 0.6132, Loss: 0.6695705056190491 Epoch: 083, Train: 0.6164, Val: 0.6143, Test: 0.6156, Loss: 0.6671017408370972 Epoch: 084, Train: 0.6198, Val: 0.6248, Test: 0.6218, Loss: 0.6689134836196899 Epoch: 085, Train: 0.6330, Val: 0.6274, Test: 0.6404, Loss: 0.6630488038063049 Epoch: 086, Train: 0.6432, Val: 0.6436, Test: 0.6440, Loss: 0.6611646413803101 Epoch: 087, Train: 0.6535, Val: 0.6534, Test: 0.6497, Loss: 0.6594260931015015 Epoch: 088, Train: 0.6574, Val: 0.6575, Test: 0.6556, Loss: 0.6585035920143127 Epoch: 089, Train: 0.6550, Val: 0.6719, Test: 0.6709, Loss: 0.6572673916816711 Epoch: 090, Train: 0.6725, Val: 0.6675, Test: 0.6728, Loss: 0.651466429233551 Epoch: 091, Train: 0.6772, Val: 0.6765, Test: 0.6786, Loss: 0.6508798599243164 Epoch: 092, Train: 0.6733, Val: 0.6770, Test: 0.6773, Loss: 0.6480381488800049 Epoch: 093, Train: 0.6733, Val: 0.6829, Test: 0.6780, Loss: 0.6487643718719482 Epoch: 094, Train: 0.6846, Val: 0.6789, Test: 0.6798, Loss: 0.6418784856796265 Epoch: 095, Train: 0.6845, Val: 0.6838, Test: 0.6803, Loss: 0.6384830474853516 Epoch: 096, Train: 0.6805, Val: 0.6882, Test: 0.6869, Loss: 0.6390049457550049 Epoch: 097, Train: 0.6846, Val: 0.6803, Test: 0.6791, Loss: 0.641418993473053 Epoch: 098, Train: 0.6776, Val: 0.6836, Test: 0.6817, Loss: 0.6370711922645569 Epoch: 099, Train: 0.6855, Val: 0.6783, Test: 0.6785, Loss: 0.6326021552085876 Epoch: 100, Train: 0.6744, Val: 0.6765, Test: 0.6750, Loss: 0.6325771808624268 Epoch: 101, Train: 0.6755, Val: 0.6802, Test: 0.6804, Loss: 0.6332057118415833 Epoch: 102, Train: 0.6774, Val: 0.6818, Test: 0.6858, Loss: 0.6326180696487427 Epoch: 103, Train: 0.6832, Val: 0.6878, Test: 0.6849, Loss: 0.6269603371620178 Epoch: 104, Train: 0.6846, Val: 0.6784, Test: 0.6855, Loss: 0.6200358867645264 Epoch: 105, Train: 0.6769, Val: 0.6822, Test: 0.6828, Loss: 0.6257349252700806 Epoch: 106, Train: 0.6778, Val: 0.6847, Test: 0.6811, Loss: 0.6317898631095886 Epoch: 107, Train: 0.6798, Val: 0.6795, Test: 0.6824, Loss: 0.6347403526306152 Epoch: 108, Train: 0.6757, Val: 0.6804, Test: 0.6797, Loss: 0.6143648028373718 Epoch: 109, Train: 0.6861, Val: 0.6798, Test: 0.6769, Loss: 0.6243528127670288 Epoch: 110, Train: 0.6838, Val: 0.6796, Test: 0.6907, Loss: 0.622592031955719 Epoch: 111, Train: 0.6899, Val: 0.6895, Test: 0.6906, Loss: 0.6148200631141663 Epoch: 112, Train: 0.6933, Val: 0.6927, Test: 0.6928, Loss: 0.6142141222953796 Epoch: 113, Train: 0.6904, Val: 0.6858, Test: 0.6943, Loss: 0.61095130443573 Epoch: 114, Train: 0.6854, Val: 0.6899, Test: 0.6904, Loss: 0.6084436774253845 Epoch: 115, Train: 0.6851, Val: 0.6866, Test: 0.6905, Loss: 0.6014653444290161 Epoch: 116, Train: 0.6855, Val: 0.6905, Test: 0.6917, Loss: 0.6062939763069153 Epoch: 117, Train: 0.6884, Val: 0.6863, Test: 0.6922, Loss: 0.6057735085487366 Epoch: 118, Train: 0.6912, Val: 0.6885, Test: 0.6918, Loss: 0.6048821210861206 Epoch: 119, Train: 0.6924, Val: 0.6887, Test: 0.6927, Loss: 0.5992241501808167 Epoch: 120, Train: 0.6963, Val: 0.6995, Test: 0.6926, Loss: 0.6004308462142944 Epoch: 121, Train: 0.6916, Val: 0.6929, Test: 0.6951, Loss: 0.6008142828941345 Epoch: 122, Train: 0.6885, Val: 0.6908, Test: 0.6898, Loss: 0.597372829914093 Epoch: 123, Train: 0.6924, Val: 0.6954, Test: 0.6960, Loss: 0.6026260852813721 Epoch: 124, Train: 0.6953, Val: 0.6937, Test: 0.6927, Loss: 0.5994287133216858 Epoch: 125, Train: 0.6991, Val: 0.6923, Test: 0.6964, Loss: 0.5926941633224487 Epoch: 126, Train: 0.6901, Val: 0.6915, Test: 0.6953, Loss: 0.5947172045707703 Epoch: 127, Train: 0.6929, Val: 0.6982, Test: 0.6940, Loss: 0.5826076865196228 Epoch: 128, Train: 0.6934, Val: 0.7004, Test: 0.6951, Loss: 0.5893241763114929 Epoch: 129, Train: 0.6926, Val: 0.6915, Test: 0.6912, Loss: 0.5897101163864136 Epoch: 130, Train: 0.6890, Val: 0.6911, Test: 0.6837, Loss: 0.5968517661094666 Epoch: 131, Train: 0.6877, Val: 0.6908, Test: 0.6850, Loss: 0.589328408241272 Epoch: 132, Train: 0.6943, Val: 0.6917, Test: 0.6922, Loss: 0.5831955671310425 Epoch: 133, Train: 0.6971, Val: 0.6953, Test: 0.7014, Loss: 0.5696879625320435 Epoch: 134, Train: 0.6930, Val: 0.6998, Test: 0.6988, Loss: 0.5866789817810059 Epoch: 135, Train: 0.6912, Val: 0.6847, Test: 0.6834, Loss: 0.5844400525093079 Epoch: 136, Train: 0.6843, Val: 0.6922, Test: 0.6905, Loss: 0.5795565843582153 Epoch: 137, Train: 0.6946, Val: 0.6935, Test: 0.6870, Loss: 0.5705973505973816 Epoch: 138, Train: 0.6994, Val: 0.6960, Test: 0.6919, Loss: 0.573519229888916 Epoch: 139, Train: 0.6969, Val: 0.6982, Test: 0.6976, Loss: 0.5754662156105042 Epoch: 140, Train: 0.7000, Val: 0.6936, Test: 0.6962, Loss: 0.5710124373435974 Epoch: 141, Train: 0.6977, Val: 0.6974, Test: 0.6912, Loss: 0.5696238279342651 Epoch: 142, Train: 0.6941, Val: 0.6926, Test: 0.6963, Loss: 0.5677017569541931 Epoch: 143, Train: 0.6944, Val: 0.6818, Test: 0.6934, Loss: 0.5729352235794067 Epoch: 144, Train: 0.6935, Val: 0.6907, Test: 0.6890, Loss: 0.5693244338035583 Epoch: 145, Train: 0.6975, Val: 0.6938, Test: 0.7049, Loss: 0.5650897026062012 Epoch: 146, Train: 0.6992, Val: 0.7005, Test: 0.6973, Loss: 0.5605905652046204 Epoch: 147, Train: 0.6993, Val: 0.7074, Test: 0.6988, Loss: 0.5520856380462646 Epoch: 148, Train: 0.7046, Val: 0.6977, Test: 0.6991, Loss: 0.5596573948860168 Epoch: 149, Train: 0.7030, Val: 0.6995, Test: 0.7039, Loss: 0.5518948435783386 Epoch: 150, Train: 0.7009, Val: 0.7009, Test: 0.7007, Loss: 0.5539484620094299 Epoch: 151, Train: 0.7032, Val: 0.7044, Test: 0.7017, Loss: 0.5543316006660461 Epoch: 152, Train: 0.7123, Val: 0.7061, Test: 0.7035, Loss: 0.554809033870697 Epoch: 153, Train: 0.7123, Val: 0.7023, Test: 0.7083, Loss: 0.5473509430885315 Epoch: 154, Train: 0.7083, Val: 0.7049, Test: 0.7143, Loss: 0.5487227439880371 Epoch: 155, Train: 0.7078, Val: 0.7112, Test: 0.7117, Loss: 0.5427062511444092 Epoch: 156, Train: 0.7134, Val: 0.7131, Test: 0.7106, Loss: 0.5389374494552612 Epoch: 157, Train: 0.7166, Val: 0.7156, Test: 0.7157, Loss: 0.5452955365180969 Epoch: 158, Train: 0.7201, Val: 0.7202, Test: 0.7172, Loss: 0.5453580021858215 Epoch: 159, Train: 0.7228, Val: 0.7193, Test: 0.7295, Loss: 0.5366584658622742 Epoch: 160, Train: 0.7202, Val: 0.7210, Test: 0.7190, Loss: 0.5381338596343994 Epoch: 161, Train: 0.7270, Val: 0.7233, Test: 0.7250, Loss: 0.5340146422386169 Epoch: 162, Train: 0.7286, Val: 0.7189, Test: 0.7235, Loss: 0.5423163771629333 Epoch: 163, Train: 0.7274, Val: 0.7305, Test: 0.7249, Loss: 0.5440378785133362 Epoch: 164, Train: 0.7344, Val: 0.7310, Test: 0.7325, Loss: 0.5293395519256592 Epoch: 165, Train: 0.7375, Val: 0.7351, Test: 0.7351, Loss: 0.5210434198379517 Epoch: 166, Train: 0.7452, Val: 0.7383, Test: 0.7383, Loss: 0.533065140247345 Epoch: 167, Train: 0.7420, Val: 0.7399, Test: 0.7403, Loss: 0.5240846276283264 Epoch: 168, Train: 0.7326, Val: 0.7400, Test: 0.7425, Loss: 0.5234970450401306 Epoch: 169, Train: 0.7409, Val: 0.7451, Test: 0.7399, Loss: 0.5360404253005981 Epoch: 170, Train: 0.7472, Val: 0.7486, Test: 0.7388, Loss: 0.5209401845932007 Epoch: 171, Train: 0.7507, Val: 0.7517, Test: 0.7532, Loss: 0.5266425013542175 Epoch: 172, Train: 0.7503, Val: 0.7514, Test: 0.7527, Loss: 0.5186090469360352 Epoch: 173, Train: 0.7505, Val: 0.7531, Test: 0.7534, Loss: 0.5280333757400513 Epoch: 174, Train: 0.7552, Val: 0.7526, Test: 0.7503, Loss: 0.5169417262077332 Epoch: 175, Train: 0.7522, Val: 0.7548, Test: 0.7490, Loss: 0.5271726250648499 Epoch: 176, Train: 0.7538, Val: 0.7506, Test: 0.7491, Loss: 0.525481104850769 Epoch: 177, Train: 0.7562, Val: 0.7601, Test: 0.7537, Loss: 0.5083685517311096 Epoch: 178, Train: 0.7586, Val: 0.7582, Test: 0.7592, Loss: 0.5087206363677979 Epoch: 179, Train: 0.7539, Val: 0.7575, Test: 0.7601, Loss: 0.524002194404602 Epoch: 180, Train: 0.7592, Val: 0.7525, Test: 0.7563, Loss: 0.5085655450820923 Epoch: 181, Train: 0.7619, Val: 0.7643, Test: 0.7559, Loss: 0.5135030746459961 Epoch: 182, Train: 0.7499, Val: 0.7533, Test: 0.7544, Loss: 0.5254723429679871 Epoch: 183, Train: 0.7564, Val: 0.7625, Test: 0.7638, Loss: 0.5017015337944031 Epoch: 184, Train: 0.7559, Val: 0.7550, Test: 0.7587, Loss: 0.5086198449134827 Epoch: 185, Train: 0.7640, Val: 0.7551, Test: 0.7635, Loss: 0.5079658627510071 Epoch: 186, Train: 0.7547, Val: 0.7546, Test: 0.7589, Loss: 0.5196709632873535 Epoch: 187, Train: 0.7573, Val: 0.7547, Test: 0.7511, Loss: 0.5095032453536987 Epoch: 188, Train: 0.7583, Val: 0.7568, Test: 0.7608, Loss: 0.5064508318901062 Epoch: 189, Train: 0.7639, Val: 0.7599, Test: 0.7582, Loss: 0.506013810634613 Epoch: 190, Train: 0.7581, Val: 0.7564, Test: 0.7571, Loss: 0.5119727849960327 Epoch: 191, Train: 0.7545, Val: 0.7540, Test: 0.7552, Loss: 0.4914553463459015 Epoch: 192, Train: 0.7569, Val: 0.7505, Test: 0.7552, Loss: 0.5008942484855652 Epoch: 193, Train: 0.7584, Val: 0.7565, Test: 0.7597, Loss: 0.4939987063407898 Epoch: 194, Train: 0.7549, Val: 0.7504, Test: 0.7539, Loss: 0.49491646885871887 Epoch: 195, Train: 0.7568, Val: 0.7509, Test: 0.7587, Loss: 0.4982417821884155 Epoch: 196, Train: 0.7530, Val: 0.7523, Test: 0.7503, Loss: 0.506991446018219 Epoch: 197, Train: 0.7558, Val: 0.7564, Test: 0.7518, Loss: 0.5017726421356201 Epoch: 198, Train: 0.7604, Val: 0.7544, Test: 0.7535, Loss: 0.498566210269928 Epoch: 199, Train: 0.7577, Val: 0.7596, Test: 0.7664, Loss: 0.4957054555416107 Saving Link Classification Model Predictions Best Model Accuraies Train: 0.7554, Val: 0.7567, Test: 0.7609 ## Question 4: What is the maximum ROC-AUC score you get for your best_model on test set? (13 points) After training your model, download and submit your best model prediction file: *CORA-Link-Prediction.csv*. As we have seen before you can view this file by clicking on the *Folder* icon on the left side pannel. # Submission You will need to submit four files on Gradescope to complete this notebook. 1. Your completed *XCS224W_Colab3.ipynb*. From the "File" menu select "Download .ipynb" to save a local copy of your completed Colab. 2. *CORA-Node-GraphSage.csv* 3. *CORA-Node-GAT.csv* 4. *CORA-Link-Prediction.csv* Download the csv files by selecting the *Folder* icon on the left panel. To submit your work, zip the files downloaded in steps 1-4 above and submit to gradescope. **NOTE:** DO NOT rename any of the downloaded files. ```python ```
4af3f88bf7ae95a7870f7e40323bdc078ed824b1
344,974
ipynb
Jupyter Notebook
notebooks/XCS224W_Colab3.ipynb
leehanchung/cs224w
4e7bba7a2769c5ed016c53e165535a2bd34f7b22
[ "MIT" ]
10
2021-09-15T06:52:47.000Z
2022-03-10T16:11:30.000Z
notebooks/XCS224W_Colab3.ipynb
leehanchung/cs224w
4e7bba7a2769c5ed016c53e165535a2bd34f7b22
[ "MIT" ]
null
null
null
notebooks/XCS224W_Colab3.ipynb
leehanchung/cs224w
4e7bba7a2769c5ed016c53e165535a2bd34f7b22
[ "MIT" ]
null
null
null
123.161014
162,342
0.795538
true
28,338
Qwen/Qwen-72B
1. YES 2. YES
0.709019
0.819893
0.58132
__label__eng_Latn
0.832028
0.188931
# Discrete Choice Discrete choice models are models that model a single (mutually exclusive) choice, in contrast to the standard models where a quantity is estimated. In this notebook we will try to get you familiarized with discrete choice, the difference between logit and probit, and how to implement them (and more advanced models), using the module [statsmodels](https://www.statsmodels.org/stable/index.html). ### Linear Regression As a lazy student we want to study as little as possible, but still pass the final test. Let's pretend we have a dataset of last year's students, with the hours they studied for the test, and whether or not they passed. From this dataset we can make an estimation how many hours we have to study ourselves to pass. If we would try a linear expression approach we would try to fit the function: \begin{equation} Y = \beta_0 + \beta_1 X_1 \end{equation} where $Y$ equals the chance of passing, $\beta_0$ the base chance of passing, $\beta_1$ the increase of chance of passing per hour we study, and $X_1$ the hours a student studied. First: We install statsmodels and test if we can import it. ```python from IPython.display import clear_output !pip install statsmodels import statsmodels clear_output() print("Everything A-Okay!") ``` Everything A-Okay! ```python %matplotlib inline import matplotlib.pyplot as plt import statsmodels.api as sm import numpy as np # the hours each student studied for the test, and whether they passed or failed students = {'hours': [0, 2, 3, 4, 4.5, 4.5, 5, 6, 6.5, 7, 8, 8, 9, 9.5, 10, 10.5, 12, 13.5], 'passed': [False, False, False, False, False, False, False, False, False, True, True, True, False, True, True, False, True, True]} # use ordinary least squares (OLS) to fit our function Y intercept, slope = sm.OLS(students['passed'], sm.add_constant(students['hours'])).fit().params # plot the results of the students plt.scatter(students['hours'], students['passed']) plt.xlabel('hours studied'); plt.ylabel('pass/fail') # plot the results of the fit x_vals = np.array(plt.gca().get_xlim()) y_vals = intercept + slope * x_vals plt.plot(x_vals, y_vals, '--') # set proper axes plt.xlim([-1, 14]); plt.ylim([-0.1, 1.1]) plt.show() ``` We fitted our $Y$ function with a simple linear square approach, by using the method [sm.OLS](https://www.statsmodels.org/dev/generated/statsmodels.regression.linear_model.OLS.html) from statsmodels. Its first argument is the $Y$ value we try to fit, and the second argument are the $\beta$ values we try to fit. Note that we have to add a constant value (`sm.add_constant`) if we want a $\beta_0$ value The obvious problem with the linear regression approach is that we try to model the chance of pass (or failure), but our model can give values outside of the range (0, 1). If a student did not study the model gives a chance lower than 0% of succes, and after more than 14 hours of study the chance of passing is higher than a 100%! To solve this problem we need discrete models. ### Binary Discrete Choice Discrete models are similar to our previous approach, except the $Y$ value is not modelled on a continuous scale, but is limited between discrete alternatives. To solve these models we need a **utility** function, which closely resembles the function we tried to fit using linear regression, but with some added noise $\epsilon$: \begin{equation} U = \beta_0 + \beta_1 X_1 + \epsilon \\ \begin{cases} pass & U > 0\\ fail & else\\ \end{cases} \end{equation} This utility function represents the preference for an outcome. In our case if the utility is a number above zero, it means the student passes, otherwise the student fails. To get a probability from our utility we need a function $F$ which maps the utility to a probability between the range (0, 1). \begin{equation} P_{n1} = F(U) \end{equation} Here we will discuss two of the most common $F$ funcitons, **Logit** & **Probit**. ### Logit (Logistic regression) When using the Logit approach we assume that the log-odds of pass/failure can be expressed as a linear function of our input (the utility), and our unobserved fraction of the utility ($\epsilon$) follows a logistic distribution: \begin{equation} log (\frac{P_{n1}}{1 - P_{n1}}) = U \\ \frac{P_{n1}}{1 - P_{n1}} = e^U \end{equation} which we can rewrite to: \begin{equation} P_{n1} = \frac{e^U}{1 + e^U} \end{equation} In the Logit case our function $F$ is just the sigmoid/logistic function! So what did we gain from this approach? Our values are now limited between the range (0, 1), but more importantly, we can interpret out coefficients as odds! If for instance after fitting our $\beta_1$ has a value of $1.1$, it means that for each hour of study the chance of passing would be $e^{1.1} \approx 3$ times as likely to happen! ### Probit The probit model assumes that the unobserved fraction of the utility ($\epsilon$) follows a standard normal distribution: \begin{equation} P_{n1} = \Phi(\beta_0 + \beta_1 X_1) \end{equation} where $\Phi$ is the cumulative distribution function of the (standard) normal distribution. ### Difference So what is the difference between a normal distribution and a logit distribution? Let's plot them both: ```python from scipy.stats import norm, logistic import math # standard normal distribution mu = 0; std = 1 x = np.linspace(-4, 4, 100) # plot the normal pdf & cdf normal = norm.pdf(x, loc=mu, scale=std) plt.plot(x, normal, label='normal distribution') plt.plot(x, np.cumsum(normal) / sum(normal), label='cumulative normal distribution') # plot the logistic pdf & cdf logist = logistic.pdf(x, loc=mu, scale=std * math.sqrt(3) / math.pi) plt.plot(x, logist, label='logistic distribution') plt.plot(x, np.cumsum(logist) / sum(logist), label='cumulative logistic distribution') plt.ylabel('probability') plt.legend() plt.show() ``` They are very similar! Note that the logit distribution has fatter tails, so it will produce more extreme values than the normal distribution. Now let's see how they differ in performance of the fit: ```python # plot the results of the students plt.scatter(students['hours'], students['passed']) plt.xlabel('hours studied'); plt.ylabel('pass/fail') # set proper axes plt.xlim([-1, 14]); plt.ylim([-0.1, 1.1]) x_vals = sm.add_constant(np.linspace(-1, 14, 100)) # use probit to fit our function probit = sm.Probit(students['passed'], sm.add_constant(students['hours'])) pr_model = probit.fit(disp=0) # disp=0 to silence the verbose function pseudo_r_p = pr_model.prsquared # plot the results of probit y_vals = pr_model.predict(x_vals) plt.plot(x_vals[:, 1], y_vals, '--', label='probit') # use logit to fit our function logit = sm.Logit(students['passed'], sm.add_constant(students['hours'])) lo_model = logit.fit(disp=0) # disp=0 to silence the verbose function pseudo_r_l = lo_model.prsquared # plot the results of logit y_vals = lo_model.predict(x_vals) plt.plot(x_vals[:, 1], y_vals, '--', label='logit') plt.legend() plt.show() # show summary of both models print(pr_model.summary()) print(lo_model.summary()) ``` ```python # so what is the probability of passing the course if you study 9 hours for the test? # your answer has to be correct for at least two significant digits chance = None def calc_utility(model, hours): """ Determines the utility of a certain regression model and input of hours studied """ constant, slope = model.params return constant + slope * hours def prob_Logit(utility): """ Determines the probability of passing the exam given a Logistic regression and utility """ return 1 / (1 + math.exp(-utility)) def prob_Probit(utility): """ Determines the probability of passing the exam given a Probit regression and utility """ return norm.cdf(utility, loc=mu, scale=std) utility = calc_utility(lo_model, 9) chance = prob_Logit(utility) chance_lo = lo_model.predict([1, 9])[0] assert round(chance, 2) == round(chance_lo, 2), "Probability is not significant for at least two digits" utility = calc_utility(pr_model, 9) chance = prob_Probit(utility) chance_pr = pr_model.predict([1, 9])[0] assert round(chance, 2) == round(chance_pr, 2), "Probability is not significant for at least two digits" ``` ```python assert 0 <= chance <= 1 ``` Even though the fitted parameters of both models are quite different, the actual fits are extremely close, and differ little in their predictions, pseudo R squares, or looks. ### Multinomial logit When dealing with multiple discrete alternatives, we have to make use of multinomial discrete choice. We rewrite our original utility function into one utility function per choice, where the chance of choice $i$ is defined as such: \begin{equation} P_i = Prob(U_i > U_j \quad \forall j \neq i) \end{equation} We generated a dataset of 250 students, which contains the students' income, distance to university, how lazy they are, and what transport (either bike, car, bus 40 or bus 240) they use to get to university. Implement multinomial logit yourself, take a look at [MNLogit](https://www.statsmodels.org/dev/generated/statsmodels.discrete.discrete_model.MNLogit.html). Remember to add a constant (`sm.add_constant`) to our observed variables. Also note that you should use numeric labels, and not the text-label. ```python import pandas as pd # load our dataset df = pd.read_csv('transport.csv', sep=';', usecols=['income', 'distance', 'lazy', 'transport', 'transport_id']) # print the 'head' of the dataframe to get a feel for the data print(df.head()) # implement multinomial logit df_exogeneous = df.drop(columns=["transport", "transport_id"]) df_endogeneous = df["transport_id"] x_vals = sm.add_constant(df_exogeneous) MNL = sm.MNLogit(df_endogeneous, x_vals) model = MNL.fit(disp=0) # let's see how it predicts on our own dataset (you should get at least 200 out of 250 predictions correct!) # the predict function returns a dataframe shape (250, 4), where each column is the chance of that choice. # Assume that the option with the highest chance is chosen MNL_predict = model.predict(x_vals) MNL_predict["choice"] = MNL_predict.idxmax(axis=1) correct_predictions = np.where(MNL_predict["choice"] == df["transport_id"], 1, 0) correct_predictions = correct_predictions.sum() assert correct_predictions > 200, "Number of correct predictions should at least be 200" ``` income distance lazy transport transport_id 0 0.0 11.0 7.0 bus 240 3 1 433.0 6.0 7.0 bus 40 2 2 450.0 8.0 7.0 bus 240 3 3 662.0 9.0 5.0 car 1 4 168.0 5.0 1.0 bike 0 ```python assert type(model).__name__ == 'MultinomialResultsWrapper' ``` ### Logit limitations - **Taste variation**: every students shares the same $\beta$ values, while this not necessarily has to be true. Some students might've done earlier courses which resemble a lot of the subject matter of the course, so they have a higher $\beta_0$ value, and some student might just be more efficient while learning, resulting in a higher $\beta_1$ value. Logit does not allow different $\beta$ values for its choice makers. - **Independece of Irrelevant Alternatives (IIA)**: If we make people choose between two options (e.g. bulbasaur and squirtle), adding a third option (charmander) should not change peoples original order of the two options. For example: if someone prefers a squirtle over a bulbasaur, by also giving them the choice of a charmander, they should not suddenly like bulbasaur more. Multinomial logit does not allow independence of irrelevant alternatives. - **Repeated choice**: Logit assumes no correlation in repeated choices. If a person takes the bike to work one day, it might influence him/her to take the bike the next day. Maybe he/she got lost, so won't take the bike again. Or the person gets to know the road better, so biking the next day becomes faster. ### Nested logit When we look closer at the data we see that bus 240 and bus 40 are similar choices, and after a quick questionnaire we realize that if bus 40 does not go all student's will use bus 240 and vice versa. Multinomial logit violates this bus-dependency (independence of irrelevant alternatives). However if we would implement nested logit we would be guaranteed of this dependency: Your task now is to finish the NestedLogit class, which incorporates this logic. It should fit the choices bike, car, and bus using `sm.MNLogit` and the two different buses by `sm.Logit`. ```python class NestedLogit(): def __init__(self, labels, variables): self.labels = labels self.variables = variables # Makes a binary choice problem for the different busses self.bus = self.labels[self.labels.isin([2, 3])] self.bus.where(self.bus == 2, 0, inplace = True) self.bus.where(self.bus == 0, 1, inplace = True) self.variables_bus = self.variables[self.variables.index.isin(self.bus.index)] def fit(self): """ Method that fits the predictions of the NestedLogit. """ # use logit to fit our function for the different busses logit_bus = sm.Logit(self.bus, sm.add_constant(self.variables_bus)) self.model_bus = logit_bus.fit(disp=0) # disp=0 to silence the verbose function # use Multinomial Logit to fit our function to choose the transport labels_others = self.labels.where(self.labels != 3, 2) MNL_others = sm.MNLogit(labels_others, sm.add_constant(self.variables)) self.model_others = MNL_others.fit(disp=0) def predict(self, variables): """ Method that returns the predictions of the NestedLogit, based on the fit, shape (N, 4) """ predict_others = self.model_others.predict(variables) predict_busses = self.model_bus.predict(variables) prob_bus = predict_others[2] prob_bus40 = prob_bus.multiply(predict_busses) prob_bus240 = prob_bus.multiply(1 - predict_busses) predict_others[2] = prob_bus40 predict_others[3] = prob_bus240 return predict_others # Calls to NestedLogit nlogit = NestedLogit(df['transport_id'], sm.add_constant(df[['income', 'distance', 'lazy']])) nlogit.fit() y_vals = nlogit.predict(sm.add_constant(df[['income', 'distance', 'lazy']])) # How does nested logit compare to multinomial logit? You should get at least 170 predictions correct! y_vals = model.predict(x_vals) y_vals["choice"] = y_vals.idxmax(axis=1) correct_predictions = np.where(y_vals["choice"] == df["transport_id"], 1, 0) correct_predictions = correct_predictions.sum() assert correct_predictions > 170, "Number of correct predictions should at least be 170" ``` ```python nlogit = NestedLogit(df['transport_id'], sm.add_constant(df[['income', 'distance', 'lazy']])) nlogit.fit() y_vals = nlogit.predict(sm.add_constant(df[['income', 'distance', 'lazy']])) assert y_vals.shape == (250, 4) ``` In the file generate_data.py is the data generated. Can you design a dataset where NestedLogit outperforms MultiNomialLogit? Why does nested logit not outperform multinomial logit? ### Advanced models For more complex logit models, such as mixed logit which allows for taste variation. Take a look at [PyLogit](https://github.com/timothyb0912/pylogit)!
52f732cb896b09cc4122c5f28b957c1166610015
89,747
ipynb
Jupyter Notebook
Notebooks/Assignment 3/.ipynb_checkpoints/discretechoice-checkpoint.ipynb
JRMfer/ABM_individual
e1ec9dd5301d69a61e40ac0c1d9bee694433780f
[ "MIT" ]
1
2020-02-28T10:29:35.000Z
2020-02-28T10:29:35.000Z
Notebooks/Assignment 3/discretechoice.ipynb
JRMfer/ABM_individual
e1ec9dd5301d69a61e40ac0c1d9bee694433780f
[ "MIT" ]
null
null
null
Notebooks/Assignment 3/discretechoice.ipynb
JRMfer/ABM_individual
e1ec9dd5301d69a61e40ac0c1d9bee694433780f
[ "MIT" ]
null
null
null
128.027104
30,332
0.842702
true
3,973
Qwen/Qwen-72B
1. YES 2. YES
0.855851
0.749087
0.641107
__label__eng_Latn
0.981237
0.327837
# Neural Nets v2 `nn_v2` Should do [Working efficiently with jupyter lab](https://florianwilhelm.info/2018/11/working_efficiently_with_jupyter_lab/) When this was a notebook with integrated tests, we did: \ ` %load_ext autoreload %autoreload 2 %matplotlib widget #%matplotlib inline` ```python # import Importing_Notebooks import numpy as np ``` A network built of components which: 1. accept an ordered set of reals (we'll use `numpy.array`, and call them vectors) at the input port and produce another at the output port - this is forward propagation. ${\displaystyle f\colon \mathbf {R} ^{n}\to \mathbf {R} ^{m}}$ 1. accept an ordered set of reals at the output port, representing the gradient of the loss function at the output, and produce the gradient of the loss function at the input port - this is back propagation, aka backprop. ${\displaystyle b\colon \mathbf {R} ^{m}\to \mathbf {R} ^{n}}$ 1. from the gradient of the loss function at the output, calculate the partial of the loss function w.r.t the internal parameters ${\displaystyle \frac{\partial E}{\partial w} }$ 1. accept a scalar $\eta$ to control the adjustment of internal parameters. _Or is this effected by scaling the loss gradient before passing??_ 1. update internal parameters ${\displaystyle w \leftarrow w - \eta \frac{\partial E}{\partial w} }$ ```python class Layer: def __init__(self): pass def __call__(self, x): """Computes response to input""" raise NotImplementedError def backprop(self, output_delE): """Uses output error gradient to adjust internal parameters, and returns gradient of error at input""" raise NotImplementedError def state_vector(self): """Provide the layer's learnable state as a vector""" raise NotImplementedError ``` A network built of a cascade of layers: ```python class Network: def __init__(self): self.layers = [] self.eta = 0.1 #FIXME def extend(self, net): self.layers.append(net) def __call__(self, input): v = input for net in self.layers: v = net(v) return v def learn(self, facts): for (x, expected) in facts: y = self(x) e = y - expected loss = float(e.dot(e.T))/2.0 egrad = e * self.eta for net in reversed(self.layers): egrad = net.backprop(egrad) return loss def state_vector(self): """Provide the network's learnable state as a vector""" return np.concatenate([layer.state_vector() for layer in self.layers]) ``` ___ ## Useful Layers ### Identify ```python class IdentityLayer(Layer): def __call__(self, x): return x def backprop(self, output_delE): return output_delE def state_vector(self): return np.array([]) ``` ### Affine A layer that does an [affine transformation](https://mathworld.wolfram.com/AffineTransformation.html) aka affinity, which is the classic fully-connected layer with output offsets. $$ \mathbf{M} \mathbf{x} + \mathbf{b} = \mathbf{y} $$ where $$ \mathbf{x} = \sum_{j=1}^{n} x_j \mathbf{\hat{x}}_j \\ \mathbf{b} = \sum_{i=1}^{m} b_i \mathbf{\hat{y}}_i \\ \mathbf{y} = \sum_{i=1}^{m} y_i \mathbf{\hat{y}}_i $$ and $\mathbf{M}$ can be written $$ \begin{bmatrix} m_{1,1} & \dots & m_{1,n} \\ \vdots & \ddots & \vdots \\ m_{m,1} & \dots & m_{m,n} \end{bmatrix} \\ $$ #### Error gradient back-propagation $$ \begin{align} \frac{\partial loss}{\partial\mathbf{x}} &= \frac{\partial loss}{\partial\mathbf{y}} \frac{\partial\mathbf{y}}{\partial\mathbf{x}} \\ &= \mathbf{M}^\mathsf{T}\frac{\partial loss}{\partial\mathbf{y}} \end{align} $$ #### Parameter adjustment $$ \frac{\partial loss}{\partial\mathbf{M}} = \frac{\partial loss}{\partial\mathbf{y}} \frac{\partial\mathbf{y}}{\partial\mathbf{M}} = \frac{\partial loss}{\partial\mathbf{y}} \mathbf{x} \\ \frac{\partial loss}{\partial\mathbf{b}} = \frac{\partial loss}{\partial\mathbf{y}} \frac{\partial\mathbf{y}}{\partial\mathbf{b}} = \frac{\partial loss}{\partial\mathbf{y}} $$ ```python def column_vecify(m): #FIXME: find the numpy way return m.reshape((len(m),1)) class AffinityLayer(Layer): """An affine transformation, which is the classic fully-connected layer with offsets""" def __init__(self, n, m): self.M = np.empty((m, n)) self.b = np.empty((m, 1)) self.randomize() def randomize(self): self.M[:] = np.random.randn(*self.M.shape) self.b[:] = np.random.randn(*self.b.shape) def __call__(self, x): self.input = x self.output = self.M @ x + self.b return self.output def backprop(self, output_delE): input_delE = self.M.T @ output_delE self.M -= np.einsum('ik,jk', output_delE, self.input) \ if len(output_delE.shape) == 2 \ else np.outer(output_delE, self.input) self.b -= column_vecify(np.sum(output_delE,axis=1)) \ if len(output_delE.shape) == 2 \ else output_delE return input_delE def state_vector(self): return np.concatenate((self.M.ravel(), self.b.ravel())) ``` ### Map Maps a scalar function on the inputs, for e.g. activation layers. ```python class MapLayer(Layer): """Map a scalar function on the input taken element-wise""" def __init__(self, fun, dfundx): self.vfun = np.vectorize(fun) self.vdfundx = np.vectorize(dfundx) def __call__(self, x): self.input = x return self.vfun(x) def backprop(self, output_delE): input_delE = self.vdfundx(self.input) * output_delE return input_delE def state_vector(self): return np.array([]) ``` ___ To produce an importable `nn_v2.py`: 1. Save this notebook 1. Uncomment the `jupyter nbconvert` line below 1. Execute it. 1. Comment out the convert again 1. Save the notebook again in that form ```python ### !jupyter nbconvert --to script nn_v2.ipynb ``` [NbConvertApp] Converting notebook nn_v2.ipynb to script [NbConvertApp] Writing 6390 bytes to nn_v2.py ```python ```
9663912238322971e6d9618b8dffd70a37cf3c40
10,184
ipynb
Jupyter Notebook
nbs/OLD/nn_v2.ipynb
pramasoul/aix
98333b875f6c6cda6dee86e6eab02c5ddc622543
[ "MIT" ]
null
null
null
nbs/OLD/nn_v2.ipynb
pramasoul/aix
98333b875f6c6cda6dee86e6eab02c5ddc622543
[ "MIT" ]
1
2021-11-29T03:44:00.000Z
2021-12-19T05:34:04.000Z
nbs/OLD/nn_v2.ipynb
pramasoul/aix
98333b875f6c6cda6dee86e6eab02c5ddc622543
[ "MIT" ]
null
null
null
29.433526
298
0.508739
true
1,721
Qwen/Qwen-72B
1. YES 2. YES
0.868827
0.754915
0.65589
__label__eng_Latn
0.849812
0.362184
# Tetracycline Resistance Model (c) 2021 Tom Röschinger. This work is licensed under a [Creative Commons Attribution License CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). All code contained herein is licensed under an [MIT license](https://opensource.org/licenses/MIT). ```julia using SymPy, Polynomials, Jedi, ColorSchemes, Turing, Distributions, LinearAlgebra # Comment this out if Jedi.jl is not installed #Jedi.default_gr!() ``` In this notebook we go through everything bla bla ```julia @syms a λ λ_0 κ_t K_d Δr j V_0 a_ex K_M P_out P_in ``` (a, λ, λ_0, κ_t, K_d, Δr, j, V_0, a_ex, K_M, P_out, P_in) ```julia eq1 = (a * λ) / (κ_t * K_d) - Δr * (1 - λ/λ_0) ``` $\begin{equation*}- Δr \left(- \frac{λ}{λ_{0}} + 1\right) + \frac{a λ}{K_{d} κ_{t}}\end{equation*}$ ```julia eq2 = a^2 * (-λ - P_out) + a * (-λ * K_M + P_in * a_ex - P_out * K_M - V_0 * λ/λ_0 ) + P_in * a_ex * K_M ``` $\begin{equation*}K_{M} P_{in} a_{ex} + a^{2} \left(- P_{out} - λ\right) + a \left(- K_{M} P_{out} - K_{M} λ + P_{in} a_{ex} - \frac{V_{0} λ}{λ_{0}}\right)\end{equation*}$ ```julia expr1 = solve(eq1, a)[1] ``` $\begin{equation*}\frac{K_{d} Δr κ_{t} \left(- λ + λ_{0}\right)}{λ λ_{0}}\end{equation*}$ ```julia expr2 = subs(eq2, a=>expr1) ``` $\begin{equation*}K_{M} P_{in} a_{ex} + \frac{K_{d}^{2} Δr^{2} κ_{t}^{2} \left(- P_{out} - λ\right) \left(- λ + λ_{0}\right)^{2}}{λ^{2} λ_{0}^{2}} + \frac{K_{d} Δr κ_{t} \left(- λ + λ_{0}\right) \left(- K_{M} P_{out} - K_{M} λ + P_{in} a_{ex} - \frac{V_{0} λ}{λ_{0}}\right)}{λ λ_{0}}\end{equation*}$ ```julia expr3 = expr2 * λ^2 * λ_0^2 ``` $\begin{equation*}λ^{2} λ_{0}^{2} \left(K_{M} P_{in} a_{ex} + \frac{K_{d}^{2} Δr^{2} κ_{t}^{2} \left(- P_{out} - λ\right) \left(- λ + λ_{0}\right)^{2}}{λ^{2} λ_{0}^{2}} + \frac{K_{d} Δr κ_{t} \left(- λ + λ_{0}\right) \left(- K_{M} P_{out} - K_{M} λ + P_{in} a_{ex} - \frac{V_{0} λ}{λ_{0}}\right)}{λ λ_{0}}\right)\end{equation*}$ ```julia expr4 = factor(expr3) ``` $\begin{equation*}K_{M} K_{d} P_{out} Δr κ_{t} λ^{2} λ_{0} - K_{M} K_{d} P_{out} Δr κ_{t} λ λ_{0}^{2} + K_{M} K_{d} Δr κ_{t} λ^{3} λ_{0} - K_{M} K_{d} Δr κ_{t} λ^{2} λ_{0}^{2} + K_{M} P_{in} a_{ex} λ^{2} λ_{0}^{2} - K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ^{2} + 2 K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ λ_{0} - K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ_{0}^{2} - K_{d}^{2} Δr^{2} κ_{t}^{2} λ^{3} + 2 K_{d}^{2} Δr^{2} κ_{t}^{2} λ^{2} λ_{0} - K_{d}^{2} Δr^{2} κ_{t}^{2} λ λ_{0}^{2} - K_{d} P_{in} a_{ex} Δr κ_{t} λ^{2} λ_{0} + K_{d} P_{in} a_{ex} Δr κ_{t} λ λ_{0}^{2} + K_{d} V_{0} Δr κ_{t} λ^{3} - K_{d} V_{0} Δr κ_{t} λ^{2} λ_{0}\end{equation*}$ ```julia expr5 = collect(expr4, λ) ``` $\begin{equation*}- K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ_{0}^{2} + λ^{3} \left(K_{M} K_{d} Δr κ_{t} λ_{0} - K_{d}^{2} Δr^{2} κ_{t}^{2} + K_{d} V_{0} Δr κ_{t}\right) + λ^{2} \left(K_{M} K_{d} P_{out} Δr κ_{t} λ_{0} - K_{M} K_{d} Δr κ_{t} λ_{0}^{2} + K_{M} P_{in} a_{ex} λ_{0}^{2} - K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} + 2 K_{d}^{2} Δr^{2} κ_{t}^{2} λ_{0} - K_{d} P_{in} a_{ex} Δr κ_{t} λ_{0} - K_{d} V_{0} Δr κ_{t} λ_{0}\right) + λ \left(- K_{M} K_{d} P_{out} Δr κ_{t} λ_{0}^{2} + 2 K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ_{0} - K_{d}^{2} Δr^{2} κ_{t}^{2} λ_{0}^{2} + K_{d} P_{in} a_{ex} Δr κ_{t} λ_{0}^{2}\right)\end{equation*}$ ```julia println(expr5.coeff(λ, 4)) expr5.coeff(λ, 4) ``` 0 $\begin{equation*}0\end{equation*}$ ```julia println(expr5.coeff(λ, 3)) factor(expr5.coeff(λ, 3), K_d) ``` K_M*K_d*Δr*κ_t*λ_0 - K_d^2*Δr^2*κ_t^2 + K_d*V_0*Δr*κ_t $\begin{equation*}- K_{d} Δr κ_{t} \left(- K_{M} λ_{0} + K_{d} Δr κ_{t} - V_{0}\right)\end{equation*}$ ```julia println(expr5.coeff(λ, 2)) factor(expr5.coeff(λ, 2), K_d) ``` K_M*K_d*P_out*Δr*κ_t*λ_0 - K_M*K_d*Δr*κ_t*λ_0^2 + K_M*P_in*a_ex*λ_0^2 - K_d^2*P_out*Δr^2*κ_t^2 + 2*K_d^2*Δr^2*κ_t^2*λ_0 - K_d*P_in*a_ex*Δr*κ_t*λ_0 - K_d*V_0*Δr*κ_t*λ_0 $\begin{equation*}K_{M} P_{in} a_{ex} λ_{0}^{2} - K_{d}^{2} \left(P_{out} Δr^{2} κ_{t}^{2} - 2 Δr^{2} κ_{t}^{2} λ_{0}\right) - K_{d} \left(- K_{M} P_{out} Δr κ_{t} λ_{0} + K_{M} Δr κ_{t} λ_{0}^{2} + P_{in} a_{ex} Δr κ_{t} λ_{0} + V_{0} Δr κ_{t} λ_{0}\right)\end{equation*}$ ```julia println(expr5.coeff(λ, 1)) factor(expr5.coeff(λ, 1), K_d) ``` -K_M*K_d*P_out*Δr*κ_t*λ_0^2 + 2*K_d^2*P_out*Δr^2*κ_t^2*λ_0 - K_d^2*Δr^2*κ_t^2*λ_0^2 + K_d*P_in*a_ex*Δr*κ_t*λ_0^2 $\begin{equation*}K_{d} Δr κ_{t} λ_{0} \left(- K_{M} P_{out} λ_{0} + K_{d} \left(2 P_{out} Δr κ_{t} - Δr κ_{t} λ_{0}\right) + P_{in} a_{ex} λ_{0}\right)\end{equation*}$ ```julia println(expr5.coeff(λ, 0)) expr5.coeff(λ, 0) ``` -K_d^2*P_out*Δr^2*κ_t^2*λ_0^2 $\begin{equation*}- K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ_{0}^{2}\end{equation*}$ ```julia expr5 ``` $\begin{equation*}- K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ_{0}^{2} + λ^{3} \left(K_{M} K_{d} Δr κ_{t} λ_{0} - K_{d}^{2} Δr^{2} κ_{t}^{2} + K_{d} V_{0} Δr κ_{t}\right) + λ^{2} \left(K_{M} K_{d} P_{out} Δr κ_{t} λ_{0} - K_{M} K_{d} Δr κ_{t} λ_{0}^{2} + K_{M} P_{in} a_{ex} λ_{0}^{2} - K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} + 2 K_{d}^{2} Δr^{2} κ_{t}^{2} λ_{0} - K_{d} P_{in} a_{ex} Δr κ_{t} λ_{0} - K_{d} V_{0} Δr κ_{t} λ_{0}\right) + λ \left(- K_{M} K_{d} P_{out} Δr κ_{t} λ_{0}^{2} + 2 K_{d}^{2} P_{out} Δr^{2} κ_{t}^{2} λ_{0} - K_{d}^{2} Δr^{2} κ_{t}^{2} λ_{0}^{2} + K_{d} P_{in} a_{ex} Δr κ_{t} λ_{0}^{2}\right)\end{equation*}$ ```julia function solve_polynomial(aex, λ0, κt, Kd, KM, j, V0, Δr) c = zeros(4) c1 = κt * Kd * Δr c[4] = c1 * (KM * λ0 - c1 + V0) c[3] = KM * j * aex * λ0^2 - c1^2 * (j - 2λ0) - c1 * λ0 * (-KM * j + KM * λ0 + j * aex + V0) c[2] = c1 * λ0 * (-KM * j * λ0 + c1 * (2j - λ0) + j * aex * λ0) c[1] = -Kd^2*j*Δr^2*κt^2*λ0^2 pol = Polynomial(c) return Polynomials.roots(pol) end ``` solve_polynomial (generic function with 1 method) ```julia _aex = 0 # µM _λ0 = 0.68 # h**-1 _κt = 0.06 # µM**-1 h**-1 _Kd = .1 # µM _KM = 100 # µM _j = 10# h**-1 Pin = 10 Pout = 3 _V0 = 0 _Δr = 46.5 # µM a_ex_range = [0, 1] sol_list = zeros(ComplexF64, 3, length(a_ex_range)) for (i, x) in enumerate(a_ex_range) # Pack parameters together args = (x, _λ0, _κt, _Kd, _KM, _j, _V0, _Δr) # Find roots sol_list[:, i] = solve_polynomial(args...) end ``` ```julia sol_list[:, 2] ``` 3-element Vector{ComplexF64}: -33.89338895304312 + 0.0im -0.0028014943665923423 + 0.0im 0.20062909641230975 + 0.0im ```julia p = scatter( ylim=[0, 1], xscale=:log, legendtitle = "V0", ylabel="λ/λ0", xlabel="a_ex", title="Is this the right font?" ) V_0_list = [0, 1000, 5000, 10000] for (_V0, c) in zip(V_0_list, palette(:BuPu_6)[2:end]) y = [] x = Float64[] for i in 1:length(a_ex_range) args = (a_ex_range[i], _λ0, _κt, _Kd, _KM, _j, _V0, _Δr) solutions = solve_polynomial(args...) _y = [imag(x) == 0 ? real(x)/_λ0 : missing for x in solutions] push!(y, _y...) _x = ones(length(_y)) .* a_ex_range[i] push!(x, _x...) end scatter!(p, x, y, color=c, label="$_V0") end savefig(p, "res_model.pdf") p ``` ## Writing Generative Model Now we are going to write down the model in a Bayesian way and try out various priors. \begin{align} \lambda_0 &= 0.68 h^{-1},\\[.5em] \kappa_t &= 0.06 \mu M^{-1} h^{-1},\\[.5em] K_d &= 0.1 \mu M,\\[.5em] K_M &= 10 \mu M,\\[.5em] \Delta r &= 46.5 \mu M,\\[.5em] \log j &\sim \text{Norm}(2, 2),\\[.5em] j &= 10^{\log j} \times 1 s^{-1},\\[.5em] \log P_\mathrm{in} &\sim \text{Norm}(2, 2),\\[.5em] P_\mathrm{in} &= 10^{\log P_\mathrm{in}} \times 1 s^{-1},\\[.5em] \log P_\mathrm{out} &\sim \text{Norm}(2, 2),\\[.5em] P_\mathrm{out} &= 10^{\log P_\mathrm{out}} \times 1 s^{-1},\\[.5em] \log V_0 &\sim \text{Norm}(2, 2),\\[.5em] V_0 &= 10^{\log V_0 } \times 1 \mu M s^{-1} \end{align} ```julia @model function fit_growth(growth_rates, aex_arr) log_j ~ Normal(2, .2) log_V0 ~ Normal(2, .2) log_sigma ~ Normal(-3, 1) sigma = float(10^log_sigma) j = 10^log_j V0 = 10^log_V0 λ0 = 0.68 κt = 0.06 Kd = .1 KM = 10 Δr = 46.5 for (i, aex) in enumerate(aex_arr) λ = growth_rates[i] println(i) println(typeof(V0)) x = [aex, λ0, κt, Kd, KM, j, V0, Δr] solutions = solve_polynomial(x) #y = [imag(x) == 0 ? real(x)/λ0 : missing for x in solutions] #y = filter(x -> !ismissing(x) && (x > 0), y) #if length(y) == 3 #deleteat!(y, 2) #opt_y = y[argmin([(λ - root)^2 for root in y])] #else #opt_y = y[1] #end #println(opt_y) #println(typeof(opt_y)) #println() #growth_rates[i] ~ Normal(opt_y / λ0, sigma) end end ``` fit_growth (generic function with 3 methods) ```julia y = Float64[] for i in 1:length(a_ex_range) args = (a_ex_range[i], _λ0, _κt, _Kd, _KM, _j, 0, _Δr) solutions = solve_polynomial(args...) _y = [imag(x) == 0 ? real(x)/_λ0 : missing for x in solutions] push!(y, _y[1]) end ``` ```julia chn = sample(fit_growth(y, a_ex_range), NUTS(0.65), 100) ``` ```julia chn ``` ```julia ?ForwardDiff.Dual ``` ```julia ?eigvals! ``` search: eigvals! eigvals ``` eigvals!(A; permute::Bool=true, scale::Bool=true, sortby) -> values ``` Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. The `permute`, `scale`, and `sortby` keywords are the same as for [`eigen`](@ref). !!! note The input matrix `A` will not contain its eigenvalues after `eigvals!` is called on it - `A` is used as a workspace. # Examples ```jldoctest julia> A = [1. 2.; 3. 4.] 2×2 Matrix{Float64}: 1.0 2.0 3.0 4.0 julia> eigvals!(A) 2-element Vector{Float64}: -0.3722813232690143 5.372281323269014 julia> A 2×2 Matrix{Float64}: -0.372281 -1.0 0.0 5.37228 ``` --- ``` eigvals!(A, B; sortby) -> values ``` Same as [`eigvals`](@ref), but saves space by overwriting the input `A` (and `B`), instead of creating copies. !!! note The input matrices `A` and `B` will not contain their eigenvalues after `eigvals!` is called. They are used as workspaces. # Examples ```jldoctest julia> A = [1. 0.; 0. -1.] 2×2 Matrix{Float64}: 1.0 0.0 0.0 -1.0 julia> B = [0. 1.; 1. 0.] 2×2 Matrix{Float64}: 0.0 1.0 1.0 0.0 julia> eigvals!(A, B) 2-element Vector{ComplexF64}: 0.0 - 1.0im 0.0 + 1.0im julia> A 2×2 Matrix{Float64}: -0.0 -1.0 1.0 -0.0 julia> B 2×2 Matrix{Float64}: 1.0 0.0 0.0 1.0 ``` --- ``` eigvals!(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> values ``` Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. `irange` is a range of eigenvalue *indices* to search for - for instance, the 2nd to 8th eigenvalues. --- ``` eigvals!(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> values ``` Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. `vl` is the lower bound of the interval to search for eigenvalues, and `vu` is the upper bound. ```julia ```
b5b14cad0185ba7fd682ca7b6d5a9da312948fcd
84,394
ipynb
Jupyter Notebook
code/exploratory/old/Untitled1-Copy1.ipynb
RPGroup-PBoC/fit_seq
7cc7931fd2edf421591de293354348fb940a1a32
[ "MIT" ]
null
null
null
code/exploratory/old/Untitled1-Copy1.ipynb
RPGroup-PBoC/fit_seq
7cc7931fd2edf421591de293354348fb940a1a32
[ "MIT" ]
null
null
null
code/exploratory/old/Untitled1-Copy1.ipynb
RPGroup-PBoC/fit_seq
7cc7931fd2edf421591de293354348fb940a1a32
[ "MIT" ]
null
null
null
49.585194
5,184
0.569851
true
5,377
Qwen/Qwen-72B
1. YES 2. YES
0.891811
0.600188
0.535255
__label__eng_Latn
0.216076
0.081905
# 多元高斯分布 一个向量形式的随机变量$X=\left[X_1\cdots X_n\right]^T$,期望为$\mu\in\mathbb R^n$,协方差矩阵为$\varSigma\in\mathbb S_{++}^n$(在[线性代数](sn01.ipynb)笔记中$\mathbb S_{++}^n$为$n\times n$正定对称矩阵空间,具体定义为$\mathbb S_{++}^n=\left\{A\in\mathbb R^{n\times n}: A=A^T,\ \forall x\in\mathbb R^n\land x\neq0\to x^TAx\gt0\right\}$),如果随机变量的概率密度函数(这篇笔记中我们使用$p(\bullet)$表示概率密度函数,代替[概率论](sn02.ipynb)笔记中的$f_X(\bullet)$)能够定义为: $$ p\left(x;\mu,\varSigma\right)=\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\exp\left(-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)\right) $$ 我们就称该随机变量服从**多元正态(高斯)分布(multivariate normal (or Gaussian) distribution)**,写作$X\sim\mathcal N\left(\mu,\varSigma\right)$。在这篇笔记中,我们简要讨论一下多元高斯分布的基本属性。 ## 1. 与单变量高斯分布的关系 回忆**单变量正态分布(a univariate normal (or Gaussian) distribution)**的概率密度函数: $$ P\left(x;\mu,\sigma^2\right)=\frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{1}{2\sigma^2}\left(x-\mu\right)^2\right) $$ 式中指数函数的参数$-\frac{1}{2\sigma^2}\left(x-\mu\right)^2$是一个关于$x$的二次函数,其二次项系数为负,是一个开口向下的抛物线(parabola)。而整个式子的系数$\frac{1}{\sqrt{2\pi}\sigma}$是一个常量,不依靠变量$x$,因此我们可以把它简单的看做是一个保证$\displaystyle\frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^{\infty}\exp\left(-\frac{1}{2\sigma^2}\left(x-\mu\right)^2\right)=1$的“标准化因子”。 左图为关于$X$的单变量高斯分布的概率密度,右图为关于$X_1,X_2$的多元高斯分布的概率密度。 在多元高斯分布中,指数函数的参数$-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)$是关于$x$的二次型。因为$\varSigma$是正定的(正定矩阵的逆矩阵仍是正定矩阵),则对于任意非零向量$z$有$z^T\varSigma^{-1}z\gt0$。这意味着对于任意$x\neq\mu$有: $$ \begin{align} \left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)&\gt0\\ -\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)&\lt0 \end{align} $$ 类似单变量中的情形,我们可以把指数函数的参数想象成一个开口向下的抛物面。而整个式子的系数$\displaystyle\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}$比单变量情形下的系数更加复杂,不过它依然不依靠$x$,所以我们仍然可以将其看做一个用来保证$\displaystyle\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}\cdots\int_{-\infty}^{\infty}\exp\left(-\frac{1}{2}\left(x-\mu\right)^T\varSigma^{-1}\left(x-\mu\right)\right)\mathrm dx_1\mathrm dx_2\cdots\mathrm dx_n=1$的“标准化因子”。 ## 2. 协方差矩阵 理解协方差矩阵的概念是掌握多元高斯分布的关键。回忆对于一对随机变量$X,Y$,其**协方差(covariance)**定义为: $$ \mathrm{Cov}[X,Y]=\mathrm E\left[(X-\mathrm E[X])(Y-\mathrm E[Y])\right]=\mathrm E[XY]-\mathrm E[X]\mathrm E[Y] $$ 在处理多个变量时,协方差矩阵提供了一种对所有“变量对”协方差的简明表达。具体的讲,协方差矩阵(通常记为$\varSigma$)是一个$n\times n$矩阵,它的第$(i,j)$个元素为$\mathrm{Cov}[X_i,X_j]$。 下列命题为我们提供了另一种关于随机变量$X$的协方差矩阵的描述(证明见附录A.1): **命题1:**对于任意一个期望为$\mu$协方差矩阵为$\varSigma$的随机变量$X$,有: $$ \varSigma=\mathrm E\left[(X-\mu)(X-\mu)^T\right]=\mathrm E\left[XX^T\right]-\mu\mu^T\tag{1} $$ 在多元高斯分布的定义中,我们要求协方差矩阵$\varSigma$是一个对称正定矩阵(即$\varSigma\in\mathbb S_{++}^n$)。为什么需要这样的约束条件?在下面的命题中可以看到,*任意*随机向量的协方差矩阵必须是对称半正定矩阵: **命题2:**假设$\varSigma$是某个随机向量$X$的协方差矩阵,则$\varSigma$一定是对称半正定矩阵。 **证明:**从$\varSigma$的定义中可以直接看到起对称性($\varSigma=\varSigma^T$),接下来我们证明它是半正定矩阵,对于任意$z\in\mathbb R^n$: $$ \begin{align} z^T\varSigma z&=\sum_{i=1}^n\sum_{j=1}^n\left(\varSigma_{ij}z_iz_j\right)\tag{2}\\ &=\sum_{i=1}^n\sum_{j=1}^n\left(\mathrm{Cov}\left[X_i,X_j\right]\cdot z_iz_j\right)\\ &=\sum_{i=1}^n\sum_{j=1}^n\left(\mathrm E\left[\left(X_i-\mathrm E\left[X_i\right]\right)\left(X_j-\mathrm E\left[X_j\right]\right)\right]\cdot z_iz_j\right)\\ &=\mathrm E\left[\sum_{i=1}^n\sum_{j=1}^n\left(X_i-\mathrm E\left[X_i\right]\right)\left(X_j-\mathrm E\left[X_j\right]\cdot z_iz_j\right)\right]\tag{3} \end{align} $$ $(2)$式就是二次型展开后的样子(见[线性代数](sn01.ipynb)笔记),$(3)$式利用了期望的线性性质(见[概率论](sn02.ipynb)笔记)。 为了完成证明,现在观察中括号内的项形为$\sum_i\sum_jx_ix_jz_iz_j=\left(x^Tz\right)^2\geq0$(参考[问题集1](cs229.stanford.edu/materials/ps1.pdf))。因此,期望括号内的这个量总是非负的,则期望本身总是非负的,即$z^T\varSigma z\geq0$。 上面的命题证明了一个合法的协方差矩阵$\varSigma$总是对称半正定的。而为了使$\varSigma^{-1}$存在(出现在多元高斯分布定义式中),则$\varSigma$必须是可逆的(即满秩)。由于任意满秩的对称半正定矩阵必定是对称正定矩阵,则有$\varSigma$一定是对称正定矩阵。 ## 3. 对角协方差矩阵的情形 为了对多元高斯分布有一个直观认识,我们来看一个简单的例子,$n=2$且协方差矩阵$\varSigma$为对角矩阵时: $$ x=\begin{bmatrix}x_1\\x_2\end{bmatrix}\qquad\mu=\begin{bmatrix}\mu_1\\\mu_2\end{bmatrix}\qquad\varSigma=\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix} $$ 在这样的条件下,多元高斯分布的概率密度为: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\begin{vmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{vmatrix}^{1/2}}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix}^{-1}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}\right)\\ &=\frac{1}{2\pi\left(\sigma_1^2\cdot\sigma_2^2-0\cdot0\right)^{1/2}}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\frac{1}{\sigma_1^2}&0\\0&\frac{1}{\sigma_2^2}\end{bmatrix}^{-1}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}\right) \end{align} $$ 这一步使用了$2\times 2$矩阵行列式的计算式$\begin{vmatrix}a&b\\c&d\end{vmatrix}=ad-bc$;而对角矩阵求逆就是将对角线上的每个元素求导。继续我们的演算: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2}\begin{bmatrix}x_1-\mu_1\\x_2-\mu_2\end{bmatrix}^T\begin{bmatrix}\frac{1}{\sigma_1^2}\left(x_1-\mu_1\right)\\\frac{1}{\sigma_2^2}\left(x_2-\mu_2\right)\end{bmatrix}\right)\\ &=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ &=\frac{1}{2\pi\sigma_1}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2\right)\cdot\frac{1}{2\pi\sigma_2}\exp\left(-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right) \end{align} $$ 很容易看出最后一个式子是两个独立的高斯分布的乘积,其中一个期望为$\mu_1$方差为$\sigma_1^2$,另一个期望为$\mu_2$方差为$\sigma_2^2$。推广到一般情况,一个期望为$\mu\in\mathbb R^n$、协方差为对角矩阵$\mathrm{diag}\left(\sigma_1^2,\sigma_2^2,\cdots,\sigma_n^2\right)$的$n$维高斯分布,与一组由$n$个相互独立的以$\mu_i$为期望、$\sigma_i^2$为方差的高斯分布组成的分布是相同的。 ## 4. Isocontours 另一种了解多元高斯分布概念的方法是理解它的**isocontours**。对于函数$f:\mathbb R^2\to\mathbb R$,其等值线是一个集合$\left\{x\in\mathbb R^2: f(x)=c, c\in\mathbb R\right\}$(isocontours也称作level curves。一般的函数$f:\mathbb R^n\to\mathbb R$的**level set**是一个形为$\left\{x\in\mathbb R^2: f(x)=c, c\in\mathbb R\right\}$的集合)。 ### 4.1 Isocontours的形状 多元高斯分布的isocontours是什么样的?我们继续使用前面简单的例子,$n=1$且$\varSigma$为对角矩阵: $$ x=\begin{bmatrix}x_1\\x_2\end{bmatrix}\qquad\mu=\begin{bmatrix}\mu_1\\\mu_2\end{bmatrix}\qquad\varSigma=\begin{bmatrix}\sigma_1^2&0\\0&\sigma_2^2\end{bmatrix} $$ 上一小节的最后我们得到了: $$ \begin{align} p\left(x;\mu,\varSigma\right)&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\tag{4} \end{align} $$ 现在来考虑一下$p\left(x;\mu,\varSigma\right)=c$时由平面上所有点组成的level set,其中$c\in\mathbb R$为某些常数。计算所有$x_1,x_2\in\mathbb R$: $$ \begin{align} c&=\frac{1}{2\pi\sigma_1\sigma_2}\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ 2\pi c\sigma_1\sigma_2&=\exp\left(-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\right)\\ \log\left(2\pi c\sigma_1\sigma_2\right)&=-\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2-\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\\ \log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)&=\frac{1}{2\sigma_1^2}\left(x_1-\mu_1\right)^2+\frac{1}{2\sigma_2^2}\left(x_2-\mu_2\right)^2\\ 1&=\frac{\left(x_1-\mu_1\right)^2}{2\sigma_1^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)}+\frac{\left(x_2-\mu_2\right)^2}{2\sigma_2^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)} \end{align} $$ 定义$\displaystyle r_1=\sqrt{2\sigma_1^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)},\ r_2=\sqrt{2\sigma_2^2\log\left(\frac{1}{2\pi c\sigma_1\sigma_2}\right)}$,则有: $$ 1=\left(\frac{x_1-\mu_1}{r_1}\right)^2+\left(\frac{x_2-\mu_2}{r_2}\right)^2\tag{5} $$ $(5)$式就是高中解析几何讲过的长短轴与坐标轴平行的**椭圆(axis-aligned ellipse)**,其中心位于$\left(\mu_1,\mu_2\right)$,与$x_1$平行的轴长为$2r_1$,与$x_2$平行的轴长为$2r_2$。 ## 4.2 轴长 (左图为以$\mu=\begin{bmatrix}3\\2\end{bmatrix}$为期望、以对角矩阵$\varSigma=\begin{bmatrix}25&0\\0&9\end{bmatrix}$为协方差矩阵的概率密度函数的热力图。可以看到椭圆的中心位于$(3,2)$,椭圆的长短轴之比为$5:3$。右图为以$\mu=\begin{bmatrix}3\\2\end{bmatrix}$为期望、以对角矩阵$\varSigma=\begin{bmatrix}10&5\\5&5\end{bmatrix}$为协方差矩阵的概率密度函数的热力图,它的长短轴并不与坐标轴垂直。椭圆的中心仍在$(3,2)$,但是长短轴被某个线性变换旋转了一个角度。) 为了更好的理解level curves的形状是如何随着多元高斯分布的随机变量的改变而改变的,假设我们对$c$取高斯分布概率密度峰值$1/e$时的$r_1,r_2$感兴趣。 先观察$(4)$式的最大值,此时$x_1=\mu_1,x_2=\mu_2$,代回$(4)$式得到高斯分布概率密度的峰值为$\frac{1}{2\pi\sigma_1\sigma_2}$。 再令$c=\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)$,带入$r_1,r_2$求得: $$ \begin{eqnarray} r_1&=\sqrt{2\sigma_1^2\log\left(\frac{1}{2\pi\sigma_1\sigma_2\cdot\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)}\right)}&=\sigma_1\sqrt 2\\ r_2&=\sqrt{2\sigma_2^2\log\left(\frac{1}{2\pi\sigma_1\sigma_2\cdot\frac{1}{e}\left(\frac{1}{2\pi\sigma_1\sigma_2}\right)}\right)}&=\sigma_2\sqrt 2 \end{eqnarray} $$ 从这里可以看出,对于第$i$个维度(分量$x_i$),使其概率达到高斯分布概率密度峰值的$1/e$的相应轴长($r_i$)与该维度的相应标准差$\sigma_i$呈正比。从直觉上讲这也是对的:随机变量$x_i$的方差越小,则在该维度上高斯分布图像的峰值就越“紧凑”,于是反映在isocontours上椭圆在该维度上的轴$r_i$就越短。 ## 4.3 非对角协方差矩阵及高维情形 很明显,上面的推导依赖于$\varSigma$是对角矩阵这一假设,不过即使在非对角矩阵的情形下,推导的结论也有相似之处。推广到一般情况下,isocontours的图像不再是长短轴与坐标轴平行的椭圆了,现在的椭圆这是被**旋转**了一个角度而已。再推广到高维情形下,在$n$维环境中,只有level set的几何形态变成了$\mathbb R^n$中的椭球面而已。 ## 5. 线性变换的解释 前几个小节我们主要关注对具有角型协方差矩阵的多元高斯分布是如何变化的。我们还发现具有对角型协方差矩阵的$n$维多元高斯分布其实可以被看做是以$\mu_i$为期望、$\sigma_i^2$为方差的$n$个相互独立的高斯分布的随机变量组成的分布。在这一小节,我们再从变量值的角度解释一下非对角协方差矩阵的情形。 这一小节的关键在于下面这个定理(证明见附录A.2中): **定理1:**令$X\sim\mathcal N\left(\mu,\varSigma\right),\ \mu\in\mathbb R^n,\ \sigma\in\mathbb S_{++}^n$,则存在矩阵$B\in\mathbb R^n$,若定义$Z=B^{-1}X(X-\mu)$,则$Z\sim\mathcal N(0,I)$。 注意到如果$Z\sim\mathcal N(0,I)$,利用第4节的知识,则$Z$可以被看做是由$n$个相互独立的标准正态分布($Z_i\sim\mathcal N(0,1)$)组成的。再进一步,如果$Z=B^{-1}(X-\mu)$,则用简单的代数就可以知道$X=BZ+\mu$。 因此,这个定理指出,任何服从多元高斯分布的随机变量$X$都能够通过一个线性变换($X=BZ+\mu$)分解为$n$个相互独立的标准正态分布。 ## 附录A.1 我们来证明$(1)$式的前一个等式 (后一个等式直接展开即可得到$\mathrm E\left[(X-\mu)(X-\mu)^T\right]=\mathrm E\left[XX^T-X\mu^T-\mu X^T+\mu\mu^T\right]=\mathrm E\left[XX^T\right]-\mathrm E[X]\mu^T-\mu\mathrm E\left[X^T\right]+\mu\mu^T=\mathrm E\left[XX^T\right]-\mu\mu^T$,注意$E[X]=\mu$,而常数的期望是常数。) $$ \begin{align} \varSigma&= \begin{bmatrix} \mathrm{Cov}[X_1,X_1]&\cdots&\mathrm{Cov}[X_1,X_n]\\ \vdots&\ddots&\vdots\\ \mathrm{Cov}[X_n,X_1]&\cdots&\mathrm{Cov}[X_n,X_n] \end{bmatrix}\\ &=\begin{bmatrix} \mathrm{E}\left[(X_1-\mu_1)^2\right]&\cdots&\mathrm{E}\left[(X_1-\mu_1)(X_n-\mu_n)\right]\\ \vdots&\ddots&\vdots\\ \mathrm{E}\left[(X_n-\mu_n)(X_1-\mu_1)\right]&\cdots&\mathrm{E}\left[(X_n-\mu_n)^2\right] \end{bmatrix}\\ &=\mathrm{E}\begin{bmatrix} (X_1-\mu_1)^2&\cdots&(X_1-\mu_1)(X_n-\mu_n)\\ \vdots&\ddots&\vdots\\ (X_n-\mu_n)(X_1-\mu_1)&\cdots&(X_n-\mu_n)^2 \end{bmatrix}\tag{6}\\ &=\mathrm E\begin{bmatrix}\begin{bmatrix}X_1-\mu_1\\\vdots\\X_n-\mu_n\end{bmatrix}\begin{bmatrix}X_1-\mu_1\cdots X_n-\mu_n\end{bmatrix}\end{bmatrix}\tag{7}\\ &=\mathrm E\left[(X-\mu)(X-\mu)^T\right] \end{align} $$ $(6)$式的根据是矩阵的期望就是对矩阵每一个元素取期望,而$(7)$式的根据是向量乘法: $$ zz^T=\begin{bmatrix}z_1\\z_2\\\vdots\\z_n\end{bmatrix}\begin{bmatrix}z_1&z_2&\cdots&z_n\end{bmatrix} =\begin{bmatrix}z_1z_1&z_1z_2&\cdots&z_1z_n\\z_2z_1&z_2z_2&\cdots&z_2z_n\\\vdots&\vdots&\ddots&\vdots\\z_nz_1&z_nz_2&\cdots&z_nz_n\end{bmatrix} $$ ## 附录A.2 **证明定理1:**令$X\sim\mathcal N\left(\mu,\varSigma\right),\ \mu\in\mathbb R^n,\ \sigma\in\mathbb S_{++}^n$,则存在矩阵$B\in\mathbb R^n$,若定义$Z=B^{-1}X(X-\mu)$,则$Z\sim\mathcal N(0,I)$。 证明分为两步:我们先要证明$\varSigma$可以被分解为$\varSigma=BB^T$的形式,其中$B$是某个可逆矩阵;之后再将随机变量$X$使用线性变换$Z=B^{-1}(X-\mu)$变为随机变量$Z$。 **第一步:分解协方差矩阵。**回忆[线性代数](sn01.ipynb)笔记中关于对称矩阵的两个性质(见“对称矩阵的特征值与特征向量”一节): 1. 任意实对称矩阵$A\in\mathbb R^{n\times n}$必定能被写成$A=U\varLambda U^T$的形式,其中$U$是一个满秩正交矩阵,每一列都来自$A$的特征向量;$\varLambda$是一个对角矩阵,对角线元素均来自$A$的特征值。 2. 如果$A$是对称正定矩阵,则$A$的特征值均为正值。 由于协方差矩阵$\varSigma$是一个正定矩阵,则根据性质1就可以使用恰当的$U,\varLambda$将矩阵分解为$A=U\varLambda U^T$。再根据第二个性质,可以定义矩阵$\varLambda^{1/2}\in\mathbb R^{n\times n}$,该对角矩阵中对角线元素皆为原$\varLambda$对角线元素的平方根。所以有$\varLambda=\varLambda^{1/2}\left(\varLambda^{1/2}\right)^T$,那么可以将$\varSigma$进一步分解为: $$ \varSigma= U\varLambda U^T= U\varLambda^{1/2}\left(\varLambda^{1/2}\right)^TU^T= U\varLambda^{1/2}\left(U\varLambda^{1/2}\right)^T= BB^T $$ 其中$B=U\varLambda^{1/2}$。(关于$B$是可逆矩阵:很明显正交矩阵$U$是可逆的,而可逆矩阵$U$右乘满秩对角矩阵后,仅会对$U$每列的大小产生影响,并不会改变$U$的秩,得证。)于是可以得到$\varSigma^{-1}=B^{-T}B^{-1}$,将其代入多元高斯分布的概率密度函数: $$ p\left(x;\mu,\varSigma\right)=\frac{1}{\left(2\pi\right)^{n/2}\left\lvert\varSigma\right\rvert^{1/2}}\exp\left(-\frac{1}{2}\left(x-\mu\right)^TB^{-T}B^{-1}\left(x-\mu\right)\right)\tag{8} $$ **第二步:改变随机变量。**定义向量形式的随机变量$Z=B^{-1}(X-\mu)$。介绍一个概率论基本公式(并没有在概率论的笔记中出现),用来描述原随机变量与变更后随机变量间的关系: * 设随机变量$X=[X_1,\cdots,X_n]^T\in\mathbb R^n$是一个向量形随机变量,其联合概率密度函数为$f_X:\mathbb R^n\to \mathbb R$。若$Z=H(X)\in\mathbb R^n$,其中$H$是一个双射可微函数,则随机变量$Z$的联合概率密度函数为$f_Z:\mathbb R^n\to\mathbb R$,其中$f_z$定义为: $$ f_Z(z)=f_X(x)\cdot \left\lvert \det\left( \begin{bmatrix} \frac{\partial x_1}{\partial z_1}&\cdots&\frac{\partial x_1}{\partial z_n}\\ \vdots&\ddots&\vdots\\ \frac{\partial x_n}{\partial z_1}&\cdots&\frac{\partial x_n}{\partial z_n} \end{bmatrix} \right) \right\rvert $$ 使用改变随机变量的公式,(此处跳过线性代数计算)可以发现随机变量$Z$具有如下的联合概率密度: $$ p_Z(z)=\frac{1}{(2\pi)^{n/2}}\exp\left(-\frac{1}{2}z^Tz\right)\tag{9} $$ 得证。
19e5906e34b3f3898fb7beb651c9b165c4a20913
17,195
ipynb
Jupyter Notebook
0-BasicConcept/note/sn07.ipynb
PeterChenYijie/MachineLearningZeroToALL
b14005c3e0b5a39a0ba82db5c9791f682b5effd5
[ "MIT" ]
8
2018-04-20T09:10:20.000Z
2019-02-16T07:50:32.000Z
0-BasicConcept/note/sn07.ipynb
DeepInDeeper/MachineLearningZeroToALL
b14005c3e0b5a39a0ba82db5c9791f682b5effd5
[ "MIT" ]
null
null
null
0-BasicConcept/note/sn07.ipynb
DeepInDeeper/MachineLearningZeroToALL
b14005c3e0b5a39a0ba82db5c9791f682b5effd5
[ "MIT" ]
4
2020-01-27T00:55:59.000Z
2021-03-25T00:07:56.000Z
57.316667
524
0.566967
true
8,316
Qwen/Qwen-72B
1. YES 2. YES
0.817574
0.63341
0.51786
__label__yue_Hant
0.115062
0.041492
```python from epipack import SymbolicEpiModel, DeterministicEpiModel, StochasticEpiModel import sympy as sy %matplotlib notebook ``` ```python %matplotlib notebook ``` ```python S, I, R, eta, rho, omega = sy.symbols("S I R eta rho omega") ``` ```python SIRS = SymbolicEpiModel([S,I,R]) SIRS.set_processes([ #### transmission process #### # S + I (eta)-> I + I (S, I, eta, I, I), #### transition processes #### # I (rho)-> R # R (omega)-> S (I, rho, R), (R, omega, S), ]) ``` <epipack.symbolic_epi_models.SymbolicEpiModel at 0x119a67ad0> ```python SIRS.ODEs() ``` [Eq(Derivative(S, t), -I*S*eta + R*omega), Eq(Derivative(I, t), I*(S*eta - rho)), Eq(Derivative(R, t), I*rho - R*omega)] ```python SIRS.find_fixed_points() ``` $\displaystyle \left\{\left( S, \ 0, \ 0\right), \left( \frac{\rho}{\eta}, \ \frac{R \omega}{\rho}, \ R\right)\right\}$ ```python SIRS.jacobian() ``` $\displaystyle \left[\begin{matrix}- I \eta & - S \eta & \omega\\I \eta & S \eta - \rho & 0\\0 & \rho & - \omega\end{matrix}\right]$ ```python SIRS.get_eigenvalues_at_disease_free_state() ``` {-omega: 1, eta - rho: 1, 0: 1} ```python S, I, R = list("SIR") R0 = 2.5 rho = recovery_rate = 1 # let's say 1/days eta = infection_rate = R0 * recovery_rate omega = 1/14 # in units of 1/days SIRS = DeterministicEpiModel([S,I,R]) SIRS.set_processes([ #### transmission process #### # S + I (eta)-> I + I (S, I, eta, I, I), #### transition processes #### # I (rho)-> R # R (omega)-> S (I, rho, R), (R, omega, S), ]) SIRS.set_initial_conditions({S:1-0.01, I:0.01}) ``` <epipack.deterministic_epi_models.DeterministicEpiModel at 0x119f62710> ```python t = np.linspace(0,40,1000) result = SIRS.integrate(t) ``` ```python import matplotlib.pyplot as pl pl.figure() for compartment, timeseries in result.items(): pl.plot(t, timeseries, label=compartment) pl.xlabel(r"time $t\times\rho$") pl.ylabel("incidence") pl.ylim([0,1]) pl.legend() pl.gcf().tight_layout() ``` <IPython.core.display.Javascript object> ```python import networkx as nx k0 = 50 eta = R0 * rho / k0 N = int(1e4) edges = [ (e[0], e[1], 1.0) for e in nx.fast_gnp_random_graph(N,k0/(N-1)).edges() ] SIRS = StochasticEpiModel([S,I,R],N,edge_weight_tuples=edges) SIRS.set_link_transmission_processes([ #### transmission process #### # I + S (eta)-> I + I (I, S, eta, I, I), ]) SIRS.set_node_transition_processes([ #### transition processes #### # I (rho)-> R # R (omega)-> S (I, rho, R), (R, omega, S), ]) SIRS.set_random_initial_conditions({S:N-int(1e-2*N), I:int(1e-2*N)}) ``` <epipack.stochastic_epi_models.StochasticEpiModel at 0x11c174e50> ```python t_s, result_s = SIRS.simulate(40) ``` ```python pl.figure() for compartment, timeseries in result.items(): #pl.plot(t, timeseries, label=compartment) pl.plot(t_s, result_s[compartment]/N, label=compartment+" (sim.)") pl.xlabel(r"time $t\times\rho$") pl.ylabel("incidence") pl.ylim([0,1]) pl.legend() pl.gcf().tight_layout() pl.show() ``` <IPython.core.display.Javascript object>
fffb70aa17f69cf9b27d7d90af4427809d9caf39
248,315
ipynb
Jupyter Notebook
cookbook/notebooks/epipack_SIRS_example.ipynb
PaPeK/epipack
52fb26ba35672fbce1f2f598eac2ed71e6fcb90f
[ "MIT" ]
null
null
null
cookbook/notebooks/epipack_SIRS_example.ipynb
PaPeK/epipack
52fb26ba35672fbce1f2f598eac2ed71e6fcb90f
[ "MIT" ]
null
null
null
cookbook/notebooks/epipack_SIRS_example.ipynb
PaPeK/epipack
52fb26ba35672fbce1f2f598eac2ed71e6fcb90f
[ "MIT" ]
null
null
null
129.939822
87,055
0.812037
true
1,079
Qwen/Qwen-72B
1. YES 2. YES
0.91611
0.800692
0.733522
__label__eng_Latn
0.196617
0.542548
### Simplification - *Polynomial* ```python from sympy import * x,y,z = symbols('x y z') init_printing(use_unicode=False) ``` ```python simplify(sin(x)**2+cos(x)**2) simplify(x**3-y**3) simplify((x-y)*(x**2+x*y+y**2)) # Note: this func doesn't fit every expr, also maybe too slow. simplify(x**2+2*x+1) ``` ```python factor(x**2*y+x*y) factor(x**3-y**3) factor_list(x**3-y**3) ``` ```python expand((x+2)**2) expand((x-5)*(x-2)) factor(x**2+4*x+4) factor(x**2-7*x+10) ``` ```python factor(sin(x)**2+2*sin(x)*cos(x)+cos(x)**2) expand((cos(x)+sin(x))**2) ``` ```python expr = x*y**2*3+x-3+2*x**2-x**2*y+x**3 expr collect(expr,x) # the coefficient(系数) of x**n in factor collect(expr,x).coeff(3) collect(expr,x).coeff(x,2) collect(expr,x).coeff(x,3) ``` ```python expr = (x**2+2*x+5)/(x**3+x)+x expr # use the former one! cancel(expr) factor(expr) ``` ```python expr = (4*x**3+22*x**4+13)/(12*x+6*x**2) expr apart(expr) ``` ### Simplification - *Trigonometic* ```python cos(pi/4) acos(x) cos(acos(x)) ``` ```python # simplify is okay as well :) trigsimp( sin(x)**2 +cos(x)**2 ) trigsimp( sin(x)**4 -2*cos(x)**2*sin(x)**2 +cos(x)**4 ) ``` ```python trigsimp(expand_trig(sin(x+y))) print() expand_trig(sin(x+y)) expand_trig(cos(x+y)) print() expand_trig(sin(2*x)) expand_trig(cos(2*x)) ``` ### Simplification - *Powers* ```python # y got know: # x**a * y**a !always= (x*y)**a # (x**a)**b !always= x**(a*b) x,y = symbols('x y',positive=True) a,b = symbols('a b',real=True) z,t,c = symbols('z t c') ``` ```python sqrt(x) == x**Rational(1/2) ``` True ```python # x**(a+b) powsimp(x**a*x**b) expand_power_exp(x**(a+b)) # (x*y)**a powsimp(x**a*y**a) expand_power_base((x*y)**a) ``` ```python # "t,z,c isn't read/postive" # means that it (may) not valid for simplifying powsimp(t**c*z**c) powsimp(t**c*z**c,force=True) expand_power_base((z*t)**c) expand_power_base((z*t)**c,force=True) # or like this (there's a '2' inside) (z*t)**2 ``` ```python # valid -> expand (like "2**(5+3)" => "2**8") # not valid -> Nope! powdenest((x**a)**b) powdenest((z**a)**b) ``` ### Simplification - *Logarithms* ```python ln(x) == log(x) ``` True ```python x,y = symbols('x y',positive=True) n = symbols('n',real=True) ``` ```python # some rules: # valid (pos/real) -> do the thing you wanna do # not valid (uncer) -> not applied until u use 'force=True' ``` ```python expand_log(log(x*y)) expand_log(log(x/y)) expand_log(log(x**2)) expand_log(log(x**n)) # Hmm.. expand_log(log(x**z)) # not z*log(x) expand_log(log(x**z),force=True) # okay :> ``` ```python logcombine(log(x)+log(y)) logcombine(log(x)-log(y)) logcombine(n*log(x)) logcombine(n*log(z)) logcombine(n*log(z),force=True) ``` ### Special Functions ```python x,y,z = symbols('x y z') k,m,n = symbols('k m n') ``` ```python factorial(n) binomial(n,k) hyper([1,2],[3],z) ``` ```python sin(x).rewrite(cos) cos(x).rewrite(cos) tan(x).rewrite(sin) sin(2*x).rewrite(cos) ``` ```python expr = factorial(n)/factorial(n-3) expr combsimp(expr) ```
c3ed13f278dc5c8a9474729a853468e37b667fa4
108,084
ipynb
Jupyter Notebook
sympy-part02-simplify.ipynb
codingEzio/code_python_learn_math
bd7869d05e1b4ec250cc5fa13470a960b299654e
[ "Unlicense" ]
null
null
null
sympy-part02-simplify.ipynb
codingEzio/code_python_learn_math
bd7869d05e1b4ec250cc5fa13470a960b299654e
[ "Unlicense" ]
null
null
null
sympy-part02-simplify.ipynb
codingEzio/code_python_learn_math
bd7869d05e1b4ec250cc5fa13470a960b299654e
[ "Unlicense" ]
null
null
null
78.208394
3,276
0.817457
true
1,111
Qwen/Qwen-72B
1. YES 2. YES
0.960952
0.847968
0.814856
__label__eng_Latn
0.30154
0.731516
# 微积分 SymPy支持微分和积分操作,也支持推导极限 ```python from sympy import init_printing init_printing(use_unicode=True) ``` ```python from sympy import symbols x, y, z = symbols('x y z') ``` ## `diff()`微分(求导) ```python from sympy import diff ``` ```python diff(x**3+x**2+x+1) ``` `diff(exp,var,level)`可以求多阶导数,需要指定变量和阶数 ```python diff(x**3+x**2+x+1,x,2) ``` 同样的,也可以求偏导 ```python diff(x**3+x*y**2+x*y+1,x) ``` 要创建未化简的的导数,需要使用导数类.它具有与diff相同的语法,但必须显式的指定是谁的微分 ```python from sympy import Derivative ``` ```python exp = diff(x**3+x**2+x+1) exp ``` ```python Derivative(exp,x) ``` 要推导导数类的实例,可以使用算式的`doit()`方法 ```python Derivative(exp,x).doit() ``` ## `integrate(exp,var)`积分 ```python from sympy import integrate from sympy import cos ``` ### 不定积分 ```python exp = cos(x) exp ``` ```python integrate(exp, x) ``` ### 定积分 定积分一般会有上下限,可以用元组(integration_variable, lower_limit, upper_limit)替换var,sympy.注意`oo`表示无穷大 ```python from sympy import oo,exp ``` $ \int _0^\infty e^{-x} dx$ ```python integrate(exp(-x), (x, 0, oo)) ``` ### 重积分 比如我们想求如下这个二重积分 $\int _{-\infty}^\infty \int _{-\infty}^\infty e^{-x^2-y^2} dx dy$ ```python integrate(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo)) ``` 如果积分无法求得,那么它会返回原来的样子 ```python expr = integrate(x**x, x) expr ``` 就像微分一样,积分也有对应的类型Integral,定义方式也相似 ```python from sympy import Integral,log ``` ```python expr = Integral(log(x)**2, x) expr ``` ```python expr.doit() ``` ## 极限 SymPy可以使用limit函数计算极限。 ```python from sympy import limit,sin ``` ```python limit(sin(x)/x, x, 0) ``` ```python expr = x**2/exp(x) expr.subs(x, oo) ``` $\displaystyle \text{NaN}$ ```python limit(expr, x, oo) ``` 极限也有一个类,也和上面的微分积分差不多 ```python from sympy import Limit ``` ```python expr = Limit((cos(x) - 1)/x, x, 0) expr ``` ```python expr.doit() ``` ## 泰勒展开 泰勒公式是一个用函数在某点的信息描述其附近取值的公式.如果函数足够平滑的话,在已知函数在某一点的各阶导数值的情况之下,泰勒公式可以用这些导数值做系数构建一个多项式来近似函数在这一点的邻域中的值.泰勒公式还给出了这个多项式和实际的函数值之间的偏差. SymPy可以计算围绕一个点的函数做泰勒级数展开.它使用算式的`.series(x, x0, n)`方法,就像之前在验证欧拉公式时我们做的那样 ```python expr = exp(sin(x)) expr ``` ```python expr.series(x, 0, 4) ``` ```python from sympy import symbols x0 = symbols("x0") ``` ```python expr.series(x, x0, 4) ``` ## 有限差分 到目前为止我们分别用分析导数和原始函数来研究表达式. 但是如果我们想要一个表达式来估计曲线的导数,如果我们缺少一个闭合形式表示,或者我们还不知道函数值那又怎么办呢? 一种方法是使用有限差分法. SymPy中提供了接口`as_finite_difference()`可以在任何微分实例上来生成任意阶导数的近似: ```python from sympy import Function,finite_diff_weights,apply_finite_diff ``` ```python f = Function('f') dfdx = Derivative(f(x))#.diff(x) dfdx ``` ```python dfdx.as_finite_difference() ``` 这里我们使用步长为1,等距离估计的最小点数来近似函数对x的一阶导数.我们可以使用任意的步长 ```python f = Function('f') d2fdx2 = Derivative(f(x),x, 2) d2fdx2 ``` ```python h = symbols('h') d2fdx2.as_finite_difference([-3*h,-h,2*h]) ``` 如果要评估权重,你可以手动计算 ```python finite_diff_weights(2, [-3, -1, 2], 0)[-1][-1] ``` 注意,我们只需要从`finite_diff_weights`返回的最后一个子列表中取最后一个元素.这样做的原因是`finite_diff_weights`产生较低阶导数的权重,并使用较少的点. 如果使用`finite_diff_weights`直接看起来很复杂,并且觉得Derivative实例操作的`as_finite_difference`函数不够灵活,则可以使用`apply_finite_diff`,它接受order,x_list,y_list和x0作为参数 ```python x_list = [-3, 1, 2] y_list = symbols('a b c') apply_finite_diff(1, x_list, y_list, 0) ```
1d044a2a5206fcae0ba2dda264bab9c77c7e9ab8
61,142
ipynb
Jupyter Notebook
src/数据分析篇/工具介绍/SymPy/符号计算/.ipynb_checkpoints/微积分-checkpoint.ipynb
hsz1273327/TutorialForDataScience
1d8e72c033a264297e80f43612cd44765365b09e
[ "MIT" ]
null
null
null
src/数据分析篇/工具介绍/SymPy/符号计算/.ipynb_checkpoints/微积分-checkpoint.ipynb
hsz1273327/TutorialForDataScience
1d8e72c033a264297e80f43612cd44765365b09e
[ "MIT" ]
3
2020-03-31T03:36:05.000Z
2020-03-31T03:36:21.000Z
src/数据分析篇/工具介绍/SymPy/符号计算/.ipynb_checkpoints/微积分-checkpoint.ipynb
hsz1273327/TutorialForDataScience
1d8e72c033a264297e80f43612cd44765365b09e
[ "MIT" ]
null
null
null
61.080919
8,060
0.787102
true
1,597
Qwen/Qwen-72B
1. YES 2. YES
0.9659
0.890294
0.859935
__label__yue_Hant
0.724225
0.83625
```python from neural_odes import * %load_ext autoreload %autoreload 2 ``` ## Solve the Discrete Lotka-Volterra Eqtn for general non-symmetric $A$ \begin{equation} \begin{aligned} p_i(t+1) &= p_i(t)\big[1+r_i\big(1-\frac{\sum_{j=1}^dA_{ij}p_j(t)}{k_i}\big)\big], i = 1, \dots d\\ &= p_i(t)\big[1+r_i\big(1-\frac{\mathbf{b}_i^T\big(\sum_{j=1}^d\mathbf{c}_jp_j(t)\big)}{k_i}\big)\big], i = 1, \dots d \\ &= p_i(t)\big[1+r_i\big(1-\frac{\mathbf{b}_i^TC\mathbf{p}}{k_i}\big)\big], i = 1, \dots d \\ \end{aligned} \end{equation}. We approximate $A_{ij} = \mathbf{b}_i^T\mathbf{c}_j$ using the low rank matrix approximation $A= B^TC$, where $B = [\mathbf{b}_1, \cdots, \mathbf{b}_d] \in \mathbb{R}^{k \times d}$ and $C = [\mathbf{c}_1, \cdots, \mathbf{c}_d] \in \mathbb{R}^{k \times d}$. Each $\mathbf{b}_i, \mathbf{c}_i \in \mathbb{R}^k$, where $k \ll d$ are the embeddings of time series $i$. In matrix-vector form, we have $A\mathbf{p}$, which has computational complexity $\mathcal{O}(d^2)$ for $A \in \mathbb{R}^{d \times d}, \mathbf{p} \in \mathbb{R}^d$. Using the low-rank form, we can write $A\mathbf{p} = B^T(C \mathbf{p})$. We do not want to explicitly form the matrix $B^TC$, since this would have higher complexity of $\mathcal{O}(kd^2)$. We instead break the computation into two matrix-vector products as indicated by the parathesis, each of complexity $\mathcal{O}(kd) \ll \mathcal{O}(d^2).$ We start with $d = 100$ for the number of time series and will learn the synthetic data from the equation for random initialized $A, \mathbf{r}, \mathbf{k}$ ### Step 0. Set parameters ```python num_time_series = 100 num_time_steps = 10 low_rank_param = 5 ``` To enable the symmetric form, where $C = B$, set `is_sym = True`. To explicitly form the low rank matrix matrix product $A = B^TC$, set `is_full_matrix = True`. ```python is_sym = False is_full_matrix = False ``` ### Step 1. Generate data for $A \in \mathbb{R}^{d \times d}, \mathbf{r}, \mathbf{k} \in \mathbb{R}^d$ and the initial condition $\mathbf{p}(0) \in \mathbb{R}^d$. ```python # Use gpu and higher precision for numerical computations ctx = mx.gpu() dtype = 'float64' ``` ```python _, _, p0, A = generate_data(num_time_series) r = nd.ones(num_time_series, ctx=ctx, dtype=dtype) k = 100*r r.shape, k.shape, p0.shape, A.shape ``` ((100,), (100,), (100,), (100, 100)) ### Step 3. Solve the LV eqn for $p_i(t+1), 0 \le t < N - 1$. We store $P$ as a matrix in $\mathbb{R}^{d \times N}$, whose first column is the initial condition $\mathbf{p}(0) \in \mathbb{R}^d$. Then $P = [\mathbf{p}(0), \dots, \mathbf{p}(N-1)].$ Use two embeddings to learn $B, C \in \mathbb{R}^{k \times d}$ ```python neural_lv = NeuralLV(num_time_series, num_time_steps, low_rank_param, is_full_matrix, p0, r, k, A, is_sym) ``` ```python # Set number of iterations to run num_epochs = 5000 ``` ```python p_approx, p, A_approx, model= neural_lv.run(num_epochs) ``` HBox(children=(IntProgress(value=0, max=5000), HTML(value=''))) ```python # Can re run until convergence extra 1000 at a time (Ran twice for total of 7000 iterations) p_approx, p, A_approx, model= neural_lv.run(model=model) ``` HBox(children=(IntProgress(value=0, max=1000), HTML(value=''))) ### Step 5. Error Evaluation and Plotting ```python print(f'l2 norm of the error = {nd.norm(p_approx-p)}') ``` l2 norm of the error = [0.08958898] <NDArray 1 @gpu(0)> ```python print(f'max norm of the error = {nd.max(nd.abs(p_approx-p))}') ``` max norm of the error = [0.01711842] <NDArray 1 @gpu(0)> ```python print(f'l2 matrix norm of the error of A and its low rank approx = {nd.norm(A_approx-A)}') ``` l2 matrix norm of the error of A and its low rank approx = [39.32629733] <NDArray 1 @gpu(0)> ```python # First ten time series lv_plot_ts(p, p_approx, fig_size_width=20) ```
ad08f3182de7189596aea4079507d2793a41617c
83,732
ipynb
Jupyter Notebook
src/gluonts/nursery/auto_ode/auto-ode-lv-discrete-time-nonsymmetric.ipynb
Xiaoxiong-Liu/gluon-ts
097c492769258dd70b7f223f826b17b0051ceee9
[ "Apache-2.0" ]
2,648
2019-06-03T17:18:27.000Z
2022-03-31T08:29:22.000Z
src/gluonts/nursery/auto_ode/auto-ode-lv-discrete-time-nonsymmetric.ipynb
Xiaoxiong-Liu/gluon-ts
097c492769258dd70b7f223f826b17b0051ceee9
[ "Apache-2.0" ]
1,220
2019-06-04T09:00:14.000Z
2022-03-31T10:45:43.000Z
src/gluonts/nursery/auto_ode/auto-ode-lv-discrete-time-nonsymmetric.ipynb
Xiaoxiong-Liu/gluon-ts
097c492769258dd70b7f223f826b17b0051ceee9
[ "Apache-2.0" ]
595
2019-06-04T01:04:31.000Z
2022-03-30T10:40:26.000Z
261.6625
75,572
0.922455
true
1,385
Qwen/Qwen-72B
1. YES 2. YES
0.853913
0.843895
0.720613
__label__eng_Latn
0.791511
0.512557
# Characterization of Systems in the Spectral Domain *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the bachelors module Signals and Systems, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).* ## Phase and Group Delay The [phase and group delay](https://en.wikipedia.org/wiki/Group_delay_and_phase_delay) characterize the phase and delay properties of an LTI system. Both quantify the frequency dependent delay that is imprinted on a signal when passing through a system. In many applications the delay introduced by a system should be as small as possible or within reasonable limits. ### Phase Delay For an LTI system with transfer function $H(j \omega)$ the phase delay is defined as follows \begin{equation} t_p(\omega) = - \frac{\varphi(j \omega)}{\omega} \end{equation} where $\varphi(j \omega) = \arg \{ H(j \omega) \}$ denotes the phase of the transfer function. The phase delay quantifies the delay of a single harmonic exponential signal $e^{j \omega t}$ with frequency $\omega$ when passing through the system. The negative sign in the definition of the phase delay results in a positive phase delay $t_p(\omega) > 0$ when a signal is delayed by a system. Note that the phase delay may not be defined for $\omega = 0$. **Example** As example, the phase delay $t_p(\omega)$ is computed for the [2nd order low-pass filter introduced before](../laplace_transform/network_analysis.ipynb#Example:-Second-Order-Low-Pass-Filter). First the transfer function $H(j \omega)$ is defined in `SymPy` ```python %matplotlib inline import sympy as sym sym.init_printing() L, R, C = sym.symbols('L R C', positive=True) w = sym.symbols('omega', real=True) s = sym.I * w H = 1 / (C*L*s**2 + C*R*s + 1) H ``` Now the phase delay $t_p(\omega)$ is computed ```python phi = sym.arg(H) tp = - phi/w tp ``` and the result is visualized using the normalized values $R=1$, $L=0.5$ and $C=0.4$ for the elements of the low-pass filter ```python RLC = {R: 1, L: sym.Rational('.5'), C: sym.Rational('.4')} sym.plot(tp.subs(RLC), (w, -10, 10), xlabel='$\omega$', ylabel='$t_p(j \omega)$') ``` <sympy.plotting.plot.Plot at 0x106658208> ### Group Delay The group delay is defined as the derivative of the phase with respect to the frequency \begin{equation} t_g(\omega) = - \frac{d \varphi(j \omega)}{d \omega} \end{equation} The group delay quantifies the delay the amplitude envelope of a group of exponential signals observes when passing through a system. The negative sign in above definition results in a positive group delay for a system imposing a delay onto the input signal. Note that the [phase](https://en.wikipedia.org/wiki/Instantaneous_phase) $\varphi(j \omega)$ is in general only unique for $- \pi < \varphi(j \omega) \leq \pi$. If the phase exceeds this range it is wrapped back. For meaningful results it is required to unwrap the phase before computing the group delay. **Example** The group delay $t_g(\omega)$ of above 2nd order low-pass filter is computed and plotted for the normalized values ```python tg = - sym.diff(phi, w) sym.plot(tg.subs(RLC), (w, -10, 10), xlabel='$\omega$', ylabel='$t_g(j \omega)$') ``` <sympy.plotting.plot.Plot at 0x11686aeb8> **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Continuous- and Discrete-Time Signals and Systems - Theory and Computational Examples*.
0947b373e37dd9e17c56527db7c74cd33a443a3f
132,209
ipynb
Jupyter Notebook
systems_spectral_domain/phase_group_delay.ipynb
spatialaudio/signals-and-systems-lecture
93e2f3488dc8f7ae111a34732bd4d13116763c5d
[ "MIT" ]
243
2016-04-01T14:21:00.000Z
2022-03-28T20:35:09.000Z
systems_spectral_domain/phase_group_delay.ipynb
iamzhd1977/signals-and-systems-lecture
b134608d336ceb94d83cdb66bc11c6d4d035f99c
[ "MIT" ]
6
2016-04-11T06:28:17.000Z
2021-11-10T10:59:35.000Z
systems_spectral_domain/phase_group_delay.ipynb
iamzhd1977/signals-and-systems-lecture
b134608d336ceb94d83cdb66bc11c6d4d035f99c
[ "MIT" ]
63
2017-04-20T00:46:03.000Z
2022-03-30T14:07:09.000Z
64.871933
15,978
0.633368
true
1,015
Qwen/Qwen-72B
1. YES 2. YES
0.782662
0.757794
0.593097
__label__eng_Latn
0.979769
0.216294
### Deep Neural Network for Bound-Virtual Classfication In this notebook we compile all the functions that are used to generate the training dataset for the bound-virtual enhancement classification. The dataset is prepared such that the input is the single-channel s-wave cross-section with enhancement at the threshold while the output is the corresponding label of the pole causing the enhancement. The output is described by <br> \begin{equation} \begin{pmatrix} \mbox{bound} \\ \mbox{virtual} \end{pmatrix} \end{equation} <br> and the input s-wave cross section is obtained by using the S-matrix parametrization <br> \begin{equation} S(p)=e^{2i\delta_{bg}(p)} \left(-\dfrac{p+i\gamma_{far}}{p-i\gamma_{far}}\right) \left(-\dfrac{p+i\gamma_{near}}{p-i\gamma_{near}}\right) \end{equation} <br> where $\delta_{bg}(p)$ is the background phase, $\gamma_{far}$ is the background virtual pole and $\gamma_{near}$ is the relevant near-threshold pole. The background phase shift is parametrized using the relation <br> \begin{equation} \delta_{bg}(p)=\eta \tan^{-1}\left(\dfrac{p}{\Lambda_{bg}}\right). \end{equation} <br> We start by importing the basic packages: ```python import math import numpy as np import random import pickle import matplotlib.pyplot as plt from matplotlib.pyplot import figure import sklearn from sklearn.utils import shuffle import chainer from chainer.dataset import convert ``` We now define the global parameters used in generating the dataset. This includes the hadron masses, the center-of-mass energy $E_{cm}$ where the cross-section is to be plotted and the non-relativistic relation: $p=\sqrt{2\mu E_{cm}}$. The parameters of the background phase, which consists of $\eta$ and $\Lambda_{bg}$, are also specified. ```python #################################### #Units in MeV Nucleon = 938.9186795 Pion = 138.0394 Eta = 547.862 mu_NN = 1/(1/Nucleon + 1/Nucleon) mu_piN = 1/(1/Pion + 1/Nucleon) mu_etaN = 1/(1/Eta + 1/Nucleon) #Center-of-mass energy axis (in MeV) Emax = 100.0 E_Ndata = 200 Einput = np.linspace(Emax/E_Ndata, Emax, num=E_Ndata).astype(np.float32) #choose system by specifying reduced mass (convert to fm^-1) mu = mu_NN/(197.3) #relative momentum (energy input is in MeV but p is in fm^-1) #to be used in S-matrix evaluation def p_rel(E): #E is in MeV #output p_rel is fm^-1 return np.sqrt(2*mu*E/197.3) #descriptive labels of network output labelz = ['bound', 'virtual', 'resonance'] Nexpn = 10 #information about data generation #expn = [-1, -2, -3, -4] expn = np.random.uniform(low=-4, high=-1, size=Nexpn) NLambda = 20 Ngamma = 2*500 Nvirt = 20 print('total number of data to be generated:', len(expn)*NLambda*Ngamma*Nvirt) #generate random values of background poles #and near-threshold pole in units of 1/fm #197.3 MeV*fm = hbar*c Lamlow = 500 #in MeV/c Lamhig = 700 #in MeV/c Lambda = np.random.uniform(low=Lamlow/197.3, high=Lamhig/197.3, size=NLambda) #to prevent a background to be closer to threshold than the bound state pole, #we will vary gamma according to the value of Lambda ``` total number of data to be generated: 4000000 #### Generating the Training Dataset The function <br> `amplitude2, labelout, ppole, vpole, BGprmtrs = gen_boundvirtual()` <br> generates the bound-virtual classification dataset which contains the following: - the input cross-section (s-wave amplituded) - output label describing the nature of pole causing the enhancement - the ner-threshold pole position in unit of fm${}^{-1}$ - background parameters and the position of far-threshold virtual pole <br> ```python def gen_boundvirtual(): #define this functions as part of the module labelout = [] ppole = [] vpole = [] amplitude2 = [] BGprmtrs = [] for xpindx in range(len(expn)): for lmindx in range(len(Lambda)): #Generate near-threshold pole #for shallow bound state poles gamlow = 0/197.3 #(in fm^-1) gives B.E. of 0 MeV gamhig = 200/197.3 #(in fm^-1) gives B.E. of 42.55 MeV gammapos = np.random.uniform(low=gamlow, high=gamhig, size=int(Ngamma/2)) #for virtual state poles gamlow = 0/197.3 #in fm^-1 gamhig = 0.9*Lambda[lmindx] #in fm^-1 gammaneg = np.random.uniform(low=-gamhig, high=-gamlow, size=int(Ngamma/2)) #combine bound and virtual gamma = np.concatenate((gammaneg, gammapos)) for gamindx in range(len(gamma)): #The far virtual pole partner can be bigger than the #cut-off but cannot be smaller than the near-threshold pole #We just take the positive values below #for virtual pole near the near-threshold pole virt_near = np.random.uniform(low=1.1*abs(gamma[gamindx]), high=10.1*abs(gamma[gamindx]), size=int(Nvirt/2)) #for virtual pole far from near-threshold pole virt_far = np.random.uniform(low=1.1*abs(Lambda[lmindx]), high=2.1*abs(Lambda[lmindx]), size=int(Nvirt/2)) #combine far virtual poles virtpole = np.concatenate((virt_near,virt_far)) for virindx in range(len(virtpole)): def PWAsqr(p): #background phaseshift sbg = np.exp(2*(1j)*expn[xpindx]*np.arctan(p/Lambda[lmindx])) #with background virtual pole sbg = -sbg*(p-(1j)*virtpole[virindx])/(p+(1j)*virtpole[virindx]) #The full S-matrix smatrix = -sbg*(p+(1j)*gamma[gamindx])/(p-(1j)*gamma[gamindx]) #The partial wave amplitude fmatrix = (smatrix-1)/(2*(1j)*p) return abs(fmatrix*np.conj(fmatrix)) #Classify cross-section according to the sign #of near-threshold pole if gamma[gamindx]>0: #label for near-threshold bound state labelout.append(0) elif gamma[gamindx]<0: #label for near-threshold virtual state labelout.append(1) #relative momentum axis prel = p_rel(Einput) #cross-section amplitude2.append(PWAsqr(prel)) #auxillary data for checking: #near-threshold pole value ppole.append(gamma[gamindx]) #in fm^-1 #far virtual pole value vpole.append(virtpole[virindx]) #in fm^-1 #background parameters BGprmtrs.append([expn[xpindx],Lambda[lmindx]]) #report the number of bound-virtual cross-sections labelz = [i for i, x in enumerate(range(len(labelout))) if labelout[x]==0] print('number of bound state: ',len(labelz)) labelz = [i for i, x in enumerate(range(len(labelout))) if labelout[x]==1] print('number of virtual state: ',len(labelz)) return amplitude2, labelout, ppole, vpole, BGprmtrs ``` The function <br> `seecrosssec()` <br> gives the cross-section profile of a randmoly chosen sample from the bound-virtual dataset. The parameters used to generate the data are also shown. ```python def seecrosssec(): #chckind is a random integer #to call one of the generated data sample chckind = np.random.randint(0,len(labelout)) font_set_size = 15 plt.plot(Einput, amplitude2[chckind]*10,'o') plt.title('input data', fontsize=font_set_size) plt.xlabel('$E_{cm}$ (MeV)', fontsize=font_set_size) plt.xticks(fontsize=font_set_size) plt.ylabel('$|f_{\ell}(E)|^2$ ($mb$)', fontsize=font_set_size) plt.yticks(fontsize=font_set_size) plt.tight_layout() print('label:', labelz[labelout[chckind]]) print('energy pole:',-ppole[chckind]**2/(2*mu)*197.3,'MeV') print('bg.v. pole:',-vpole[chckind]**2/(2*mu)*197.3,'MeV') print('eta:',BGprmtrs[chckind][0]) print('Lambda:', BGprmtrs[chckind][1]*197.3,'MeV') return plt.show() ``` If you are now happy with the data generated, you may now export the dataset for future use.<br> Enter the command <br> `exportdata(amplitude2, labelout, ppole, vpole, BGprmtrs)` <br> to the console to export data in local directory. ```python def exportdata(amplitude2, labelout, ppole, vpole, BGprmtrs): #EXPORT generated data #Set delete=0 if you want to delete the generated data to free some space #Set delete=1 if you don't want to delete the generated data pickle.dump (ppole, open('ppole.pkl','wb')) pickle.dump (vpole, open('vpole.pkl','wb')) pickle.dump (amplitude2, open('amplitude2.pkl','wb')) pickle.dump (labelout, open('labelout.pkl','wb')) pickle.dump (BGprmtrs, open('BGprmtrs.pkl','wb')) print('export done') return ``` ```python del amplitude2, labelout, ppole, vpole, BGprmtrs ``` ```python def get_testtrain(Nshuffle): #import prepared dataset inputtraining = pickle.load(open('amplitude2.pkl','rb')) outputtraining = pickle.load(open('labelout.pkl','rb')) print('size of training set:', len(inputtraining)) print('number of nodes in input layer:',len(inputtraining[0])) #shuffle the imported data for ndx in range(Nshuffle): inputtraining, outputtraining = shuffle(inputtraining, outputtraining) #create input-output tuples that can be read by chainer dataset00 = chainer.datasets.TupleDataset(inputtraining, outputtraining) #split training set with testing set train, test = chainer.datasets.split_dataset(dataset00, int(0.8*len(inputtraining)), order=None) pickle.dump (train, open('chainer_train.pkl','wb')) pickle.dump (test, open('chainer_test.pkl','wb')) return train, test ```
fe851bf9ba3780f84478d3ece5ec4e6343745142
14,239
ipynb
Jupyter Notebook
generate_dataset.ipynb
sombillo/DNN-for-bound-virtual-classification
cb7e42ac0c6111cd3f3796504bf09015938795fd
[ "MIT" ]
null
null
null
generate_dataset.ipynb
sombillo/DNN-for-bound-virtual-classification
cb7e42ac0c6111cd3f3796504bf09015938795fd
[ "MIT" ]
null
null
null
generate_dataset.ipynb
sombillo/DNN-for-bound-virtual-classification
cb7e42ac0c6111cd3f3796504bf09015938795fd
[ "MIT" ]
null
null
null
38.692935
380
0.520542
true
2,707
Qwen/Qwen-72B
1. YES 2. YES
0.907312
0.712232
0.646217
__label__eng_Latn
0.867641
0.339709
```python import torch import pandas as pd import numpy as np import torch.nn as nn ``` ```python a = np.eye(5, 3)[np.array([0, 1, 0, 2, 1])] a.reshape(-1, 3) ``` array([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.], [0., 0., 1.], [0., 1., 0.]]) ```python a = np.zeros((5, 4, 3)) b = np.random.randn(5, 4, 3) a - b[:, :, 1].reshape(5, -1, 1) ``` array([[[ 0.27490142, 0.27490142, 0.27490142], [-0.28210933, -0.28210933, -0.28210933], [-0.54029727, -0.54029727, -0.54029727], [ 0.09021319, 0.09021319, 0.09021319]], [[ 1.53565429, 1.53565429, 1.53565429], [-1.03396388, -1.03396388, -1.03396388], [ 0.38334356, 0.38334356, 0.38334356], [ 0.12727384, 0.12727384, 0.12727384]], [[-0.83364892, -0.83364892, -0.83364892], [-0.31906883, -0.31906883, -0.31906883], [ 0.10114786, 0.10114786, 0.10114786], [-2.08064701, -2.08064701, -2.08064701]], [[-1.82743214, -1.82743214, -1.82743214], [-0.88780012, -0.88780012, -0.88780012], [-0.59953832, -0.59953832, -0.59953832], [-1.14967454, -1.14967454, -1.14967454]], [[ 1.45511417, 1.45511417, 1.45511417], [ 0.2408046 , 0.2408046 , 0.2408046 ], [-0.08786458, -0.08786458, -0.08786458], [ 1.01358446, 1.01358446, 1.01358446]]]) ```python a = np.eye(5, 4)[np.array([1, 2, 1, 0, 4])] target = np.array([1, 2, 1, 0, 4]).reshape(-1, 1) target1 = np.array([[1, 2], [0, 3], [2, 0], [2, 1]]) ``` ```python from ylearn.estimator_model.utils import cartesian ``` ```python label = cartesian([[1, 2, 3], [4, 5]]) t = np.array([[1, 4], [2, 5], [3, 5], [2, 4]]) v = np.zeros((4, 1)) label_dict = {tuple(k):i for i, k in enumerate(label)} for i, t_i in enumerate(t): v[i] = label_dict[tuple(t_i)] ``` ```python import ylearn.estimator_model.double_ml as dml from sklearn.linear_model import LinearRegression ``` ```python a = dml.DML4CATE( x_model=LinearRegression(), y_model=LinearRegression() ) ``` ```python len({1:2}) ``` 1 ```python t = np.array([['a', 'hello'], ['b', 'bad'], ['a', 'not']]) a.comp_transormer(t, categories='auto') a._is_fitted = True ``` ```python d = a.comp_transormer(np.array(['a', 'hello']).reshape(1, -1)) d ``` array([[0., 1., 0., 0., 0., 0.]]) ```python any([True, False, True]) ``` True ```python from ylearn.estimator_model.utils import convert4onehot, get_tr_ctrl # convert4onehot(d).astype(int)[0] get_tr_ctrl(['b', 'not'], a.comp_transormer, True) ``` 5 ```python np.eye(5,3)[np.array([2, 1, 1, 2, 2])] ``` array([[0., 0., 1.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 1.]]) ```python np.array([1, 0, 0]) ``` array([1, 0, 0]) ```python np.ones((5, 4))[:, np.array([2])] ``` array([[1.], [1.], [1.], [1.], [1.]]) ```python a.label_dict ``` {(0, 0): 0, (0, 1): 1, (0, 2): 2, (1, 0): 3, (1, 1): 4, (1, 2): 5} ```python a.label_dict ``` {(0, 0): 0, (0, 1): 1, (0, 2): 2, (1, 0): 3, (1, 1): 4, (1, 2): 5} ```python np.full((v.shape[0], 1), np.NaN) ``` array([[nan], [nan], [nan], [nan]]) ```python OrdinalEncoder(categories='auto') ``` ```python cartesian([[1, 2, 3], [4, 5]]) ``` array([[1, 4], [1, 5], [2, 4], [2, 5], [3, 4], [3, 5]]) ```python {(1, 2, 3):2} ``` {(1, 2, 3): 2} ```python np.array([0, 1, 0, 0]).nonzero()[0] ``` (array([1]),) ```python tuple(np.array([[1, 2, 3], [4, 5, 6]])) ``` (array([1, 2, 3]), array([4, 5, 6])) ```python from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder ordi = OrdinalEncoder() o.fit_transform(target1).toarray() ordi.fit_transform(np.array([['good', 2, 1], ['bad', 3, 2]])).astype(int) ``` array([[1, 0, 0], [0, 1, 1]]) ```python o = OneHotEncoder(categories=[np.array([1, 2, 3])]) o.fit_transform(np.array([1, 2, 3, 2, 1]).reshape(-1, 1)).toarray() # o.n_features_in_ ``` array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 1., 0.], [1., 0., 0.]]) ```python ordi.categories_ ``` [array(['bad', 'good'], dtype='<U21'), array(['2', '3'], dtype='<U21'), array(['1', '2'], dtype='<U21')] ```python np.arange(len(ordi.categories_[0])) ``` array([0, 1]) ```python np.array([[5]]) * np.ones((4, 3)) ``` array([[5., 5., 5.], [5., 5., 5.], [5., 5., 5.], [5., 5., 5.]]) ```python a = np.random.randint(0, 5, (4, 2)) np.repeat(a, 3, 0) np.tile(a, (2, 3)) ``` array([[2, 1, 2, 1, 2, 1], [0, 4, 0, 4, 0, 4], [4, 2, 4, 2, 4, 2], [0, 3, 0, 3, 0, 3], [2, 1, 2, 1, 2, 1], [0, 4, 0, 4, 0, 4], [4, 2, 4, 2, 4, 2], [0, 3, 0, 3, 0, 3]]) ```python a = np.all(np.array([1, 1]) == np.array([[1, 1], [1, 0], [1, 1], [0, 0]]), axis=1) b = np.all(np.array([0, 0]) == np.array([[1, 0], [0, 0], [1, 1], [1, 1]]), axis=1) np.any((a, b), axis=0) ``` array([ True, True, True, False]) \begin{equation} (\frac{I(x_i=xt)y_i}{ps_{x_i=xt}(w_i)} - yt_i\frac{I(x_i=xt)-ps_{x_i=xt}(w_i)}{ps_{x_i=xt}(w_i)}) (\frac{I(x_i=x0)y_i}{ps_{x_i=x0}(w_i)} - y0_i\frac{I(x_i=x0)-ps_{x_i=x0}(w_i)}{ps_{x_i=x0}(w_i)}) \end{equation} ```python from estimator_model.utils import get_wv, _get_wv a = np.ones((3, 2)) b = np.zeros((3, 5)) c = None get_wv(c, a) == _get_wv(c, a) ``` array([[ True, True], [ True, True], [ True, True]]) ```python np.zeros(np.zeros((3, 4)).shape + (3,)).shape ``` (3, 4, 3) ```python from sklearn.preprocessing import PolynomialFeatures ``` ```python x = np.random.randint(0, 6, size=(4, 3)) x ``` array([[4, 3, 1], [2, 2, 3], [3, 1, 0], [3, 0, 5]]) ```python poly = PolynomialFeatures(2) poly.fit(x) poly.transform(x) ``` array([[ 1., 4., 3., 1., 16., 12., 4., 9., 3., 1.], [ 1., 2., 2., 3., 4., 4., 6., 4., 6., 9.], [ 1., 3., 1., 0., 9., 3., 0., 1., 0., 0.], [ 1., 3., 0., 5., 9., 0., 15., 0., 0., 25.]]) ```python pol = PolynomialFeatures() ``` ```python e = np.array([['hello', 'bad'], ['good', 'bad'], ['hey', 'not'], ['ok', 'not' ]]) ``` ```python from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder c = OneHotEncoder() c.fit(e) c.transform(e).toarray() ``` array([[0., 1., 0., 0., 1., 0.], [1., 0., 0., 0., 1., 0.], [0., 0., 1., 0., 0., 1.], [0., 0., 0., 1., 0., 1.]]) ```python f = OrdinalEncoder() f.fit_transform(e) f.categories_ ``` [array(['good', 'hello', 'hey', 'ok'], dtype='<U5'), array(['bad', 'not'], dtype='<U5')] ```python from itertools import product list(product([np.array([0, 1, 2, 3]), np.array([0, 1]), np.array([0, 1])])) ``` [(array([0, 1, 2, 3]),), (array([0, 1]),), (array([0, 1]),)] ```python from sklearn.linear_model import LinearRegression l = LinearRegression() x = np.array([[1], [2], [3], [4]]) y = (np.random.normal() * (x)).ravel() l.fit(x, y) ``` LinearRegression() ```python np.all(np.array([[1, 2, 3], [4, 5, 6]]) == np.array([1, 2, 3]), axis=1) ``` array([ True, False]) ```python a = np.ones((3, 2, 4)) a[:, :, 1] = np.zeros((3, 2)) ``` ```python np.ones((3, 2)).squeeze() ``` array([[1., 1.], [1., 1.], [1., 1.]]) ```python def broadcast_unit_treatments(X, d_t): """ Generate `d_t` unit treatments for each row of `X`. Parameters ---------- d_t: int Number of treatments X : array Features Returns ------- X, T : (array, array) The updated `X` array (with each row repeated `d_t` times), and the generated `T` array """ d_x = X.shape[0] eye = np.eye(d_t) # tile T and repeat X along axis 0 (so that the duplicated rows of X remain consecutive) T = np.tile(eye, (d_x, 1)) Xs = np.repeat(X, d_t, axis=0) return Xs, T broadcast_unit_treatments(np.random.randint(0, 3, (3, 2)), 3) ``` (array([[0, 1], [0, 1], [0, 1], [2, 1], [2, 1], [2, 1], [2, 0], [2, 0], [2, 0]]), array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])) ```python a = [1, 2, 3] a.insert(0, 0) np.ones((5,1)).repeat(3, axis=1) ``` array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) ```python a = np.zeros((1, 5)) a[:, 0] = 1 np.repeat(a, 2, axis=0) ``` array([[1., 0., 0., 0., 0.], [1., 0., 0., 0., 0.]]) ```python from sklearn.preprocessing import OneHotEncoder ``` ```python np.array([[1, 2, 3], [4, 5, 6]])[np.array([True, False])] ``` ```python len(np.array([2, 3, 4])) ``` 3 ```python product() ``` ```python np.array([[1, 2, 3], [4, 5,5]]).ravel() ``` array([1, 2, 3, 4, 5, 5]) ```python np.eye(5, 3)[np.array([1, 2, 2, 1, 1])] ``` array([[0., 1., 0.], [0., 0., 1.], [0., 0., 1.], [0., 1., 0.], [0., 1., 0.]]) ```python a = np.array([['good', 'hello'], ['bad', 'hello'], ['good', 'not']]) from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder o = OneHotEncoder() o.fit(np.array([1, 1, 0, 1]).reshape(-1, 1)) o.categories_ ``` [array([0, 1])] ```python np.array([np.ones(2,), np.ones(2, ), np.ones(2, )]) ``` array([[1., 1.], [1., 1.], [1., 1.]]) ```python list([1, 2, 3]) ``` [1, 2, 3] ```python np.repeat(np.array([[4, 5, 6]]), 3, axis=0) ``` array([[4, 5, 6], [4, 5, 6], [4, 5, 6]]) ```python from estimator_model.utils import get_group_ids target = np.repeat(np.array([[4, 5, 6]]), 3, axis=0) a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([[2, 3, 4], [3, 4, 5], [1, 1, 1]]) get_group_ids(target, a, a, b) ``` [array([[4, 5, 6]]), array([[3, 4, 5]])] ```python np.mean() ``` ```python np.concatenate() ``` ```python index = np.all(np.array([[1, 2, 3], [1, 0, 1]]) == np.array([[1, 0, 3], [1, 0, 1]]), axis=1) np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])[np.where(index)] ``` array([[4, 5, 6]]) ```python np.argmax(np.random.normal(size=((3, 2))), axis=1) ``` array([1, 1, 1]) ```python np.concatenate((np.ones((3, 1)), np.random.normal(size=((3, 2))), np.zeros((3, 1))), axis=1) ``` array([[ 1. , 0.19681735, -1.1459864 , 0. ], [ 1. , 1.92138143, 0.37209335, 0. ], [ 1. , -0.80434229, 0.55304074, 0. ]]) ```python np.zeros) ``` ```python torch.argmax( nn.Softmax(dim=1)(torch.randn(5, 3)) , dim=1).reshape(-1, 1) ``` tensor([[2], [0], [1], [1], [1]]) ```python a = nn.Softmax(dim=1) b = nn.LogSoftmax(dim=1) c = torch.randn(5, 3) torch.log(a(c)) nn.CrossEntropyLoss ``` tensor([[-0.1901, -2.6149, -2.3034], [-1.4519, -0.8391, -1.0973], [-0.9513, -0.8132, -1.7701], [-3.6564, -2.8858, -0.0852], [-1.4684, -0.3985, -2.3190]]) ```python b(c) ``` tensor([[-0.1901, -2.6149, -2.3034], [-1.4519, -0.8391, -1.0973], [-0.9513, -0.8132, -1.7701], [-3.6564, -2.8858, -0.0852], [-1.4684, -0.3985, -2.3190]]) ```python a = torch.cat((torch.ones(5, 3), torch.zeros(5, 2)), dim=1) torch.cat((a[:, :5], a[:, 5:5]), dim=1) ``` tensor([[1., 1., 1., 0., 0.], [1., 1., 1., 0., 0.], [1., 1., 1., 0., 0.], [1., 1., 1., 0., 0.], [1., 1., 1., 0., 0.]]) ```python m = nn.Embedding(5, 4) m(torch.tensor([1, 2, 3, 4,2])) ``` tensor([[ 0.9123, -0.3915, 0.1124, 0.3993], [ 2.1194, 0.1742, -0.6385, 0.0089], [ 0.0088, -0.6977, -0.4991, 1.0913], [ 0.3151, -0.0975, -0.6318, -0.1264], [ 2.1194, 0.1742, -0.6385, 0.0089]], grad_fn=<EmbeddingBackward0>) ```python torch.argmax(torch.eye(5, 4), dim=0).reshape(-1, 1) ``` tensor([[0], [1], [2], [3]]) ```python m = nn.Softmax(dim=1) m(torch.randn(5, 3)) ``` tensor([[0.1918, 0.3555, 0.4527], [0.1439, 0.4630, 0.3932], [0.8279, 0.1352, 0.0369], [0.1293, 0.3380, 0.5327], [0.1388, 0.7279, 0.1333]]) ```python torch.where(torch.eye(5, 4).index_select(dim=0, index=(torch.ones(5,) * 2).int()) == 1) ``` tensor([2, 2, 2, 2, 2]) ```python torch.zeros(5,).mean(dim=0) ``` tensor(0.) ```python pd.DataFrame({'a':['hello', 'good', 2]}).values ``` array([['hello'], ['good'], [2]], dtype=object) ```python a = np.array([1, 2, 3, 3, 1]) torch.tensor(a).shape ``` torch.Size([5]) ```python a = np.random.randn(3, 4) a ``` array([[ 0.3866119 , 0.53135121, 1.57163397, -0.05024657], [-1.15621478, 0.51949391, 0.46480889, 0.3432628 ], [-1.13285389, -0.21865636, 0.2157348 , -0.33427615]]) ```python for i, j in enumerate(a.T): a = j ``` array([-0.05024657, 0.3432628 , -0.33427615]) ```python a = np.random.randn(3, 5) a.argmax(axis=1) ``` array([4, 4, 4]) ```python w = [ np.random.normal(size=(4, 1)) for i in range(3) ] w = np.concatenate(tuple(w), axis=1) w_coef = np.random.normal( 1, 0.2, size=(2, 3) ) x = w.dot(w_coef.transpose()) ``` ```python np.dot(np.random.randn(3,).transpose(), np.random.randn(3, 2)).transpose() ``` array([4.07799799, 0.88329315]) ```python np.eye(2)[x.argmax(axis=1)] ``` array([[1., 0.], [1., 0.], [0., 1.], [0., 1.]]) ```python x ``` array([[-1.85906821, -1.32622782, -1.19364618, -1.53628564], [-1.81711645, -1.8115858 , -1.55145149, -1.7550974 ], [-0.85039268, -0.28857254, -0.35973296, -0.64413723], [-1.45962002, -1.51957314, -1.2924692 , -1.43797645]]) ```python nn.Softmax(dim=1)(a).argmax(dim=1).view(3, -1) ``` tensor([[0], [0], [0]]) ```python a.transpose().shape ``` (4, 3) ```python a = torch.randn(3, 2) b = torch.randn(3, 2) torch.sum(a * b, dim=1).unsqueeze(1).shape ``` torch.Size([3, 1]) ```python pi = torch.randn(3, 2) pi_ = torch.exp(pi) pi_ ``` tensor([[0.8265, 0.9079], [0.5721, 5.1133], [2.0180, 0.2811]]) ```python pi_ = pi_ / torch.sum(pi_, dim=1).unsqueeze(1).expand_as(pi_) pi_ ``` tensor([[0.4765, 0.5235], [0.1006, 0.8994], [0.8777, 0.1223]]) ```python mu = torch.randn(3, 2, 3) sigma = torch.abs(torch.randn(3, 2, 3)) ``` ```python mix = torch.distributions.Categorical(pi_) comp = torch.distributions.Independent(torch.distributions.Normal(mu, sigma), 1) gmm = torch.distributions.MixtureSameFamily(mix, comp) ``` ```python d = gmm.sample() d ``` tensor([[ 0.1713, -1.5641, -0.1730], [-2.0129, 0.8305, -1.4073], [-0.1666, -0.4928, 0.3084]]) ```python d.view(-1, 3) ``` tensor([[ 0.1713, -1.5641, -0.1730], [-2.0129, 0.8305, -1.4073], [-0.1666, -0.4928, 0.3084]]) ```python a = torch.tensor([1, 2, 3, 4]) b = torch.zeros((4, 4)) torch.eye(4, 4)[1] ``` tensor([0., 1., 0., 0.]) ```python f = torch.rand(3, 1) ``` ```python g = f.clone().detach() g ``` tensor([[0.6147], [0.0253], [0.5778]]) ```python for i in range(5): f = torch.rand(3, 1) g = torch.concat((f, g), dim=0) g ``` tensor([[0.1551], [0.1150], [0.5389], [0.4203], [0.2498], [0.2340], [0.3530], [0.0219], [0.5971], [0.1908], [0.9878], [0.4121], [0.1878], [0.9655], [0.9310], [0.6147], [0.0253], [0.5778]]) ```python g.shape ``` torch.Size([18, 1]) ```python d = g.view(6, -1) ``` ```python g[0][0] = 1 d ``` tensor([[1.0000, 0.1150, 0.5389], [0.4203, 0.2498, 0.2340], [0.3530, 0.0219, 0.5971], [0.1908, 0.9878, 0.4121], [0.1878, 0.9655, 0.9310], [0.6147, 0.0253, 0.5778]]) ```python a = torch.randn(6, 2) b = torch.randn(6, 2) c = a * b d = torch.sum(c, dim=1) ``` ```python a = torch.randn(3, 2) b = a.unsqueeze(dim=1) b ``` tensor([[[-2.1470, 0.4693]], [[-0.7005, -2.2612]], [[-0.2056, -0.6673]]]) ```python a = torch.randn(3, 4) a ``` tensor([[ 2.0146, 1.1581, -0.6280, 1.2848], [ 0.0081, -1.2483, 0.0801, 0.6994], [ 1.7687, -0.8702, -0.4849, 0.2106]]) ```python x = torch.randn(3, 1) z = torch.zeros(3, 1) g = torch.cat((x, z), dim=1) g.requires_grad = True # x.requires_grad = True # z.requires_grad = True l = torch.mean(-torch.square(g)) l.backward() ``` ```python g[:,1:] ``` tensor([[0.], [0.], [0.]], grad_fn=<SliceBackward0>) ```python def t(x): return x ** 2 + 2 a = torch.ones(3, 2, requires_grad=True) b = torch.zeros(3, 1) c = torch.cat((a, b), dim=1) z = torch.sum(t(c) - torch.zeros_like(c)) z.backward() ``` ```python ``` tensor(24., grad_fn=<SumBackward0>) ```python c = torch.randn(3, 2, 2) c ``` tensor([[[-1.2056, -0.2942], [ 0.0869, -0.6678]], [[-0.8665, -0.3370], [-0.3098, -0.9897]], [[-1.2215, 1.1381], [ 0.5559, 2.0055]]]) ```python d = torch.randn(3, 2).unsqueeze(dim=2) e = d.expand_as(c) e[1] ``` tensor([[ 2.0886, 2.0886], [-1.7030, -1.7030]]) ```python g = torch.sum(c * e, dim=1) ``` tensor([[-1.1951, -0.0179], [-1.2823, 0.9815], [ 1.0219, -4.3168]]) ```python kk = torch.distributions.Normal(torch.tensor([1.0]), torch.tensor([1.0])) ``` ```python torch.normal(torch.randn(3, 5), torch.abs(torch.randn(3, 5))).shape ``` torch.Size([3, 5]) ```python a = torch.randn(1, 2, 4) a ``` tensor([[[-1.4792, -0.2155, 0.3315, -0.2551], [ 0.0538, -1.3072, 0.7378, 0.1132]]]) ```python a.squeeze(dim=0).shape ``` torch.Size([2, 4]) ```python torch.tensor([1, 2, 3]).unsqueeze(0).repeat(5, 1).shape ``` torch.Size([5, 3]) ```python torch.mean(torch.log(torch.abs(g)), dim=0) ``` tensor([ 0.1495, -0.8602]) ```python torch.cat((a, b), dim=0) ``` tensor([[ 0.5812, -0.7614], [ 0.9483, 0.3945], [-1.3171, 0.0244], [-0.7375, 0.4646], [ 0.9193, 0.7448], [-0.2335, 0.8908], [ 0.7415, 0.5046], [ 0.5779, 1.0458], [ 1.1320, -0.6056], [ 0.8067, -0.9804], [ 2.1514, -0.6210], [ 0.2589, -1.3588]]) ```python f = torch.exp(d.view(3, -1)) ``` ```python f / torch.sum(f, dim=1).unsqueeze(1) ``` tensor([[0.2862, 0.7138], [0.3881, 0.6119], [0.9419, 0.0581]]) ```python g / h ``` tensor([[0.2862, 0.7138], [0.3881, 0.6119], [0.9419, 0.0581]]) ```python for i in range(1, 5): print(i) ``` 1 2 3 4 ```python mu = torch.randn((3, 1)) mu = mu.repeat(2, 1) mu ``` tensor([[0.5439], [0.0771], [0.2039], [0.5439], [0.0771], [0.2039]]) ```python mu.shape ``` torch.Size([6, 1]) ```python c = mu.view(3, -1) c.shape ``` torch.Size([3, 2]) ```python a = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) ``` ```python (a['b'].value_counts() / a.shape[0]).values ``` array([0.33333333, 0.33333333, 0.33333333]) ```python a['a'] = 1 a ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>a</th> <th>b</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>1</td> <td>4</td> </tr> <tr> <th>1</th> <td>1</td> <td>5</td> </tr> <tr> <th>2</th> <td>1</td> <td>6</td> </tr> </tbody> </table> </div> ```python torch.tensor(a['a'].values) ``` tensor([1, 2, 3]) ```python c = torch.tensor([1, 1, 1]) d = torch.tensor([2, 2, 2]) e = torch.cat((c, d), dim=0) c[:] = 3 e c ``` tensor([3, 3, 3]) ```python c ``` 2 ```python x = torch.randn((3, 3)) ``` ```python x1 = x.unsqueeze(dim=1).expand_as(mu) x1.size() ``` torch.Size([3, 2, 3]) ```python ``` tensor([[[-0.4188, -0.3362, -0.3925], [-0.4188, -0.3362, -0.3925]], [[ 1.1694, 0.1003, 0.2921], [ 1.1694, 0.1003, 0.2921]], [[-1.4250, -0.2158, 1.0102], [-1.4250, -0.2158, 1.0102]]])
bbf3e757c51e00e0934fab77ac7fc8561f32835e
60,411
ipynb
Jupyter Notebook
trivial.ipynb
DataCanvasIO/YLearn
d65b5afb83deed154c710de9096317165d95014a
[ "Apache-2.0" ]
3
2022-03-28T07:41:28.000Z
2022-03-29T06:24:52.000Z
trivial.ipynb
DataCanvasIO/YLearn
d65b5afb83deed154c710de9096317165d95014a
[ "Apache-2.0" ]
null
null
null
trivial.ipynb
DataCanvasIO/YLearn
d65b5afb83deed154c710de9096317165d95014a
[ "Apache-2.0" ]
null
null
null
21.575357
1,057
0.427091
true
8,906
Qwen/Qwen-72B
1. YES 2. YES
0.894789
0.812867
0.727345
__label__krc_Cyrl
0.380402
0.528198
$$ \newcommand{\pd}[2]{ \frac{\partial #1}{\partial #2} } \newcommand{\od}[2]{\frac{d #1}{d #2}} \newcommand{\td}[2]{\frac{D #1}{D #2}} \newcommand{\ab}[1]{\langle #1 \rangle} \newcommand{\bss}[1]{\textsf{\textbf{#1}}} \newcommand{\ol}{\overline} \newcommand{\olx}[1]{\overline{#1}^x} $$ # Homework 4: Equation Derivations In this homework, we will simply re-derive equations that we have already derived in class. The point of doing it yourself is to make sure you understand each step of the derivation. The starting point for all derivations is the Boussinesq horizontal equations of motion at low Rossby number: $$ \begin{align} - f v &= -\pd{\phi}{x} + \nu \nabla^2 u \\ f u &= -\pd{\phi}{y} + \nu \nabla^2 v \end{align} $$ Plus the hydrostatic balance $$ \pd{\phi}{z} = b $$ and the Boussinesq continuity equation: $$ \pd{u}{x} + \pd{v}{y} + \pd{w}{z} = 0 \ . $$ Make sure your notation is precise enough to destiguish between - Ordinary vs. partial derivatives - Vectors vs. scalars - Lower case and capital letters For every problem, write _very neatly_ and _explain each step_. Each problem is worth 25%. ### 1) Derive the Thermal Wind Balance $$ \begin{align} - f \pd{v_g}{z} &= -\pd{b}{x}\\ f \pd{u_g}{z} &= -\pd{b}{y} \ . \end{align} $$ and explain what it means. ### 2) Derive the formula for Ekman pumping $$ w_{Ek} = \pd{}{x}\left( \frac{\tau_y}{\rho_0 f} \right) - \pd{}{y}\left( \frac{\tau_x}{\rho_0 f} \right) \ . $$ ### 3) Derive the formula for Sverdrup transport $$ \beta V_{Sv} = \frac{1}{\rho_0} \mbox{curl} ( \boldsymbol{\tau}_{surf}) \ . $$ Explain in words what the equation means. ### 4) Derive the formula for the Stommel Aarons deep flow \begin{equation*} V=\frac{f Q_s}{\beta A} \end{equation*} Explain what $Q_s$ and $A$ represent, and explain what the equation means in words.
da25fa4ed6cf3a8c916d7ef84de0183c5b7d4b92
3,285
ipynb
Jupyter Notebook
homework-2018/po-hw-4.ipynb
dgumustel/intro_to_physical_oceanography
45d21e1642038bfc93ebe645f16f55dba72901db
[ "MIT" ]
82
2015-09-18T02:01:53.000Z
2022-02-28T01:43:48.000Z
homework-2018/po-hw-4.ipynb
Sumanshekhar17/intro_to_physical_oceanography
e624bbbd6d67235b3fcf2764e256dd2ed024481e
[ "MIT" ]
5
2015-09-19T01:35:28.000Z
2022-02-28T17:23:53.000Z
homework-2018/po-hw-4.ipynb
Sumanshekhar17/intro_to_physical_oceanography
e624bbbd6d67235b3fcf2764e256dd2ed024481e
[ "MIT" ]
51
2015-09-12T00:30:33.000Z
2022-02-08T19:37:51.000Z
27.838983
191
0.505023
true
647
Qwen/Qwen-72B
1. YES 2. YES
0.907312
0.847968
0.769372
__label__eng_Latn
0.938557
0.62584
# Recurrent networks - Once we learn the general equation and properties of the line / hyperplane we can turn around and perform various learning tasks - like regression and classification - using it as a model. - By the same token now that we have a basic understanding of how to model general ordered data, we can use those models to perform various learning tasks. - In this Section we use the generic variable order dynamic system introduced in the previously to perform simple learning tasks on time series. This is often referred to as Recurrent Neural Networks (or RNNs for short). RNNs can also be used for more sophisticated time series problems - like machine translation - which we discuss in the next Section. Activate next cell to toggle code on and off ```python from IPython.display import display from IPython.display import HTML import IPython.core.display as di # Example: di.display_html('<h3>%s:</h3>' % str, raw=True) # This line will hide code by default when the notebook is eåxported as HTML di.display_html('', raw=True) # This line will add a button to toggle visibility of code blocks, for use with the HTML export version di.display_html('''<button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Toggle code</button>''', raw=True) ``` <button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Toggle code</button> ```python ## This code cell will not be shown in the HTML version of this notebook # imports from custom library import sys sys.path.append('../../') # import custom lib from mlrefined_libraries import recurrent_library as reclib # import dataset path datapath = '../../mlrefined_datasets/recurrent_datasets/' # import autograd functionality to bulid function's properly for optimizers import autograd.numpy as np # this is needed to compensate for %matplotlib notebook's tendancy to blow up images when plotted inline %matplotlib notebook from matplotlib import rcParams rcParams['figure.autolayout'] = True %load_ext autoreload %autoreload 2 ``` # Using unlimited memory dynamic systems for time series prediction - Recurrent Neural Networks (RNNs) = the use of [unlimited memory dynamic systems](https://jermwatt.github.io/control-notes/posts/dynamic_systems_unlimited_memory/dynamic_systems_unlimited_memory.html) for learning on time series - General formula for a simple unlimited memory system \begin{equation} h_p = f\left(h_{p-1},x_p \right). \end{equation} - If $f$ here is linear we have \begin{equation} h_p = w_0 + w_1h_{p-1} + w_2x_p. \end{equation} - To use such a model for any sort of prediction task we <ol> <li>choose a function $f$</li> <li> train favorite standard ML scheme on input/output pairs</li> <li> to predict next future value feed in *only the most recent value* of the series into the trained model</li> </ol> - Making a training set of input/output data for a time series by feeding in the entire series, and "windowing" of ever-increasing size will happen natually <figure> <p> </p> <figcaption> <strong></strong> <em> </em> </figcaption> </figure> - A common sort of model consisting of a simple unlimited memory dynamic system - often called a *Recurrent Neural Network* and the function $f$ usually chosen as a standard activation function - adds a final linear combination to the final output of the system \begin{equation} \text{model}\left(x_p,\Theta\right) = v_0 + h_pv_1 \end{equation} - Here the parameter set $\Theta$ contains $v_0, v_1$ the weights of the final linear combination, as well as any parameters internal to $h_p = f\left(h_{p-1},x_p \right)$ - For a regression problem this model generates our predicted output - Denote by $$ \,\,\,\,\, \,\,\, \,\,\, \,\,\, \,\,\, \,\,\, \,\,\, \,\, \hat{y}_p:= \text{model}\left(x_p,\Theta\right) \\ y_p:= x_p \\ \,\,\,\,\, \,\,\,\,\, \,\,\,\,\, \,\,\,\,\, \,\, \ell\left(a,b\right) := \left(a-b \right)^2 $$ - We have a standard Least Squares regression \begin{equation} \frac{1}{P-D}\sum_{p=D}^{P}\ell\left(\hat{y}_p,y_p\right) \end{equation} - When used for time series prediction we hope hidden states learn interesting and useful *summary statistics* and important *long-term dependencies* - of our entire dataset - [Deeper systems with additional hidden layers](https://jermwatt.github.io/control-notes/posts/dynamic_systems_unlimited_memory/dynamic_systems_unlimited_memory.html#deeper-systems) can learn higher order summary statistics - But present a trade-off: the recursion from $x_p$ all the way to the origin at $x_1$ can be very long, which will be performed during each training update - This can slow down optimization, and can naturally cause 'exploding/vanishing' gradient problems when using gradient descent - Lots of tricks - from careful selection of activation function, to carefully engineered optimization algorithms, to the construction of more complicated activations like the Long Short Term Memory function - However while *training* is tough, evaluation of trained models are fast (just plug in the most recent value of the series) #### <span style="color:#a50e3e;">Example 1. </span> Time series prediction using an RNN - Below we load in a time series dataset we will perform prediction on - Note: we will first ["contrast normalize"](https://jermwatt.github.io/control-notes/posts/zca_sphereing/ZCA_Sphereing.html) this series - that is we mean center and re-scale by its standard deviation ```python ## This code cell will not be shown in the HTML version of this notebook # load in driver csvname = datapath + 'normalized_apple_prices.csv' x_series = np.loadtxt(csvname,delimiter = ',')[np.newaxis,:] # standard normalize input series normalizer,inverse_normalizer = reclib.time_series_lib.normalizers.standard(x_series) x_series = normalizer(x_series) # Plot the standard normalized series reclib.time_series_lib.fixed_order_plotters.plot_sequences(x_series) ``` <IPython.core.display.Javascript object> - We perform training / validate our model precisely as we did previously with the autoregressive process - Here however, we will be able to learn quite a good model using only a single input at-a-time from the true series - Plotted below is the training fit (in blue) and validation fit (in yellow) resulting from a run of [derivative free/zero order optimization](https://jermwatt.github.io/machine_learning_refined/notes/2_Zero_order_methods/2_6_Coordinate.html) employing the Least Squares cost function ```python ## This code cell will not be shown in the HTML version of this notebook def step(h_t_prev,x_t_prev,w): return w[0] + w[1]*h_t_prev + w[2]*x_t_prev # exponential average function def model(x,w): # set initial conditions of h to values of x h = [x[:,0]] # range over x and create h for t in range(0,np.size(x)-1): # get current point and prior hidden state h_t_prev = h[-1] x_t_prev = x[:,t] # make next element and store h_t = step(h_t_prev,x_t_prev,w) h.append(h_t) return np.array(h).T # an implementation of the least squares cost function for linear regression def least_squares(w,x,y): # compute cost over batch cost = np.sum((model(x,w) - y)**2) return cost/float(np.size(y)) ``` ```python # This code cell will not be shown in the HTML version of this notebook # initialize with input/output data mylib1 = reclib.time_series_lib.rnn_lib.super_setup.Setup(x_series,x_series) # perform preprocessing step(s) - especially input normalization mylib1.preprocessing_steps(normalizer = 'standard') # split into training and validation sets mylib1.make_train_val_split(train_portion = 0.6) # choose cost mylib1.choose_cost(name = 'least_squares',model = model,step = step) # fit an optimization w = 0.1*np.random.randn(3,1) mylib1.fit(max_its = 50,alpha_choice = 'diminishing',optimizer = 'zero_order',w_init = w,verbose = False) # show cost function history mylib1.show_histories() ``` <IPython.core.display.Javascript object> ```python ## This code cell will not be shown in the HTML version of this notebook # Plot the standard normalized series and its training fit reclib.time_series_lib.variable_order_plotters.plot_train_val_sequences(mylib1) ``` <IPython.core.display.Javascript object> - As with the autoregressive approach, here we too can use our trained model as a generator ```python ## This code cell will not be shown in the HTML version of this notebook # Plot the standard normalized series reclib.time_series_lib.variable_order_plotters.plot_train_gen_sequences(mylib1) ``` <IPython.core.display.Javascript object>
cdd2259ec92ab12e040cf544ab8063d871ce3717
481,665
ipynb
Jupyter Notebook
presentations/recurrent_networks/recurrent_networks.ipynb
jermwatt/blog
3dd0d464d7a17c1c7a6508f714edc938dc3c03e9
[ "MIT" ]
14
2019-04-17T23:55:14.000Z
2021-08-08T02:18:49.000Z
presentations/recurrent_networks/recurrent_networks.ipynb
jermwatt/blog
3dd0d464d7a17c1c7a6508f714edc938dc3c03e9
[ "MIT" ]
null
null
null
presentations/recurrent_networks/recurrent_networks.ipynb
jermwatt/blog
3dd0d464d7a17c1c7a6508f714edc938dc3c03e9
[ "MIT" ]
3
2019-04-10T22:46:27.000Z
2020-11-06T09:16:30.000Z
132.252883
110,695
0.808236
true
2,166
Qwen/Qwen-72B
1. YES 2. YES
0.637031
0.787931
0.501936
__label__eng_Latn
0.972702
0.004496
# ACSE-3 (Numerical Methods) <a class="tocSkip"> # Coursework 2 <a class="tocSkip"> ## Coursework 2A - Advection-diffusion of a Gaussian This question involves the solution of unsteady advection-diffusion in one spatial dimension using central finite difference schemes in space and explicit and implicit schemes in time. - i. Write some code to implement the FTCS, BTCS, and Crank-Nicolson schemes (described in lecture 8) for advection-diffusion on a periodic domain. Verify the correctness of your implementations (you could use the following exact solution or any other way you see fit). In the lecture 8 homework question 2 "An analytical solution to advection-diffusion" I explained how an appropriate Gaussian function can be considered as an exact solution to the advection-diffusion problem as long as we initialise our solution with the Gaussian evaluated at some $t>0$ (since in the limit of $t=0$ the solution is a Dirac-delta function). We can then use the same function evaluated at the appropriate time as an exact solution to compare our numerical method against. - ii. Solve this problem using a Peclet number of 200, in a periodic domain of length 1, and advect long enough so that (given the periodic domain) the exact solution arrives back at the starting location. - iii. What theoretical orders of accuracy do you expect for your three solvers with respect to the mesh spacing and the time step (i.e. assuming the error can be decomposed in the following way: $\,\text{error} = \mathcal{O}(\Delta x^p) + \mathcal{O}(\Delta t^q)$, what do you expect $p$ and $q$ to be in each case?) - iv. Confirm these orders of accuracy through convergence analyses of your three solvers by appropriately varying the mesh spacing and/or the time step in a series of experiments. (Note that you will need to deal with the periodic domain in your definition of the exact solution.) ## Coursework 2B - Variable mesh resolution We said in lecture 7 that there were two approaches to achieve non-uniform or adaptive mesh resolution. In the lecture we went through the process of transforming and solving a problem in a computational domain. We applied our methodology to a BVP model problem of steady-state advection-diffusion with Dirichlet boundary conditions which led to a boundary layer at the right hand end of the domain (recall that this was a good test case as the problem has an analytical solution). In this question we will consider the same model problem with the same parameters, i.e. a Peclet number of 30 and left and right boundary condition values of 0 and 1. This coursework question is about the other approach where we prescribe a non-uniform mesh in physical space and update our finite difference solver to work with non-uniform resolution. Your tasks are to: - i. perform this generalisation (i.e. you need to update our discretisation codes so that our finite difference formulae make use of the local mesh spacing $\Delta x_i:=x_{i+1}-x_i$, rather than assume it is a constant) [Hint: don't forget to also update how you deal with the enforcement of BCs which for us here make use of ghost nodes. As the first node inside the domain changes its location, you either need to also update the ghost node location, or change the discretisation of the boundary condition so that it accounts for (e.g.) `x[0]` and `x[1]` being different distances from where our boundary is located.] - ii. verify your code against (a) the original uniform mesh version, and (b) with a convergence analysis against the known analytical solution - iii. generate a non-uniform mesh such that the arclength of the exact solution (i.e. in this part of the question you may assume knowledge of the exact solution) is approximately equal between mesh points, and compare the numerical solution obtained using this mesh and your new approach against uniform and optimal stretched meshes (i.e. using our function `BVP_AD_central_stretch_mesh`) in a convergence analysis. - iv. investigate an approach where you start from a uniform mesh, solve the problem, subdivide certain computational cells if local steepness of the most recently computed numerical solution across that cell is higher than a user-defined tolerance, and then iterate by solving the problem on this new mesh and again subdividing. Can you come up with an approach that is competitive (in terms of a convergence plot) with the above approaches which were all able to make use of a priori knowledge of the solution behaviour? [Hint: be careful to allow refinement close to the boundary, but not outside of the physical domain, i.e. you only want a single ghost node outside the physical domain at each boundary] ```python # Import libraries. import numpy as np import matplotlib.pyplot as plt import scipy.optimize as sop from scipy import interpolate ``` # Coursework 2A - Advection-diffusion of a Gaussian The goal is to implement three different solutions, FTCS, BTCS and Crank-Nicolson, for the time-dependent advection-diffusion equation in one-dimension: <br> $$\frac{\partial c}{\partial t} + U\frac{\partial c}{\partial x} = \kappa\frac{\partial^2 c}{\partial x^2}.$$ <br> To solve this PDE it is possible to adopt the method of lines. It consists in decoupling the space and time discretisation. The space discretisation is performed first, in this case using a finite difference method. This reduces the problem to a system of ODEs, which can be solved using a time stepping method like a linear multi-step method. The three schemes involved in the task employ the same space discretisation, central difference, and differ for what concern the time stepping. <br> <br> ## Space discretisation <br> The discretisation in space is achieved through a central difference. Considering a three point symmetric stencil, we can approximate a second derivative as: <br> \begin{align*} f''(x_i) & \approx g(\; f_{i-1}, \; f_{i}, \; f_{i+1}\; )\\[5pt] & = a_{-1}\;f_{i-1}+ a_i\; f_{i} + a_{i+1}\; f_{i+1}. \end{align*} <br> The coefficients $a$ are computed with the method of undetermined coefficients, which yields: <br> $$a_{-1}=\frac{1}{h^2},\;\;\;\;\;\; a_0 = -\frac{2}{h^2},\;\;\;\;\;\; a_1 = \frac{1}{h^2},$$ <br> Substituting into the previous equation, we have: <br> $$ f''_i \approx \frac{f_{i+1} - 2f_i + f_{i-1}}{h^2}. $$ <br> Such a discretisation achieves an accuracy of the second order and it is used to approximate the second derivative in the diffusion term: <br> $$ \left.\kappa\frac{\partial^2 c}{\partial x^2}\right|_{x_i} \approx \kappa\frac{c_{i+1} - 2c_i + c_{i-1}}{\Delta x^2}.$$ <br> In order to be coherent for what concern the accuracy, we use the same method for the approximation of the first derivative in the advection term: <br> $$ \left.U\frac{\partial c}{\partial x}\right|_{x_i} \approx U\frac{c_{i+1} - c_{i-1}}{2\Delta x}.$$ <br> ## Time discretisation ### FTCS Here we do time-stepping using the forward Euler method. The time derivative in the advection-diffusion equation is therefore approximated by: <br> $$ \left.\frac{\partial c}{\partial t}\right|_{t_i} \approx \frac{c^{t_i+\Delta t} - c^{t_i}}{\Delta t}.$$ <br> This time stepping method is accurate to the first order. ### BTCS The backward Euler method, also having a first order accuracy, is used in the discretisation: <br> $$ \left.\frac{\partial c}{\partial t}\right|_{t_i} \approx \frac{c^{t_i} - c^{t_i-\Delta t}}{\Delta t}.$$ <br> ### Crank-Nicolson The method uses the trapezoidal rule to discretise time. It is accurate to the second order. <br> $$ \frac{c^{t_i+\Delta t}-c^{t_i}}{\Delta t} \approx \frac{1}{2}\;\left.\frac{\partial c}{\partial t}\right|_{t_i} + \frac{1}{2}\;\left.\frac{\partial c}{\partial t}\right|_{t_i+\Delta t}\;,$$ <br> where the derivatives in $t_i$ and $t_i+\Delta t$ are approximated with the forward and backward Euler rules, respectively. ## Define functions we use for space discretisation ```python def adv_central_periodic(N): """ Space discretisation of the advection term. Assumes central difference on a symmetric stencil with periodic boundary conditions Parameters ---------- N : integer Number of nodes in the space mesh. Returns ------- Dx : numpy array NxN array, space discretisation of the advection term. """ Dx = 0.5*np.eye(N, k=1) - 0.5*np.eye(N, k=-1) Dx[0, -1] = -0.5 Dx[-1, 0] = 0.5 return Dx def diff_central_periodic(N): """ Space discretisation of the diffusion term. Assumes central difference on a symmetric stencil with periodic boundary conditions. Parameters ---------- N : integer Number of nodes in the space mesh. Returns ------- Dx : numpy array NxN array, space discretisation of the diffusion term. """ Dxx = np.eye(N, k=1) - 2*np.eye(N) + np.eye(N, k=-1) Dxx[0, -1] = 1 Dxx[-1, 0] = 1 return Dxx ``` ## Implement analytical solution ```python def adv_diff_analytical_solution(U, kappa, x0, x, t): """ Analtical solution of the advection-diffusion problem. Assumes a Dirac-delta function as initial condition(t=0). Parameters ---------- U : float Speed of advection. kappa : float Diffusivity. x0 : float Starting point (in space) of the solution. x : numpy vector Position of the space nodes. t : float Time at which the solution is evaluated. Returns ------- Function defining the analytical solution at time t and space given by the x vector. """ return np.exp( -((x-x0) - U*t)**2 / (4.*kappa*t)) / np.sqrt(4. * np.pi * kappa * t) ``` ## Implement the three solvers ```python def solver_FTCS(N, dt, t_start, t_end, U, kappa): """ Implement solution with: Space discretisation --> central difference Time discretisation --> forward Euler Parameters ---------- N : integer Number of nodes in space. dt : float Time step size. t_start : float Initial time of the simulation. t_end : float End time of the simulation. U : float Speed of advection. kappa : float Diffusivity. Returns ------- C_ft : numpy 1D array Values of the solution. x : numpy 1D array Space values where solution is evaluated. t : numpy 1D array Time values when solution is evaluated. """ # Spatial mesh. dx = L/N x = np.linspace(0, L, N) # Get the discretisation matrix. Dx = adv_central_periodic(N) Dxx = diff_central_periodic(N) A = -(U / dx) * Dx + (kappa / dx**2) * Dxx # Time vector. t = np.arange(t_start, t_end, dt) # Matrix to store the discretised solution. C_ft = np.empty((len(x), len(t))) # Define an initial condition. # # Gaussian shape. # C_ft[:, 0] = np.exp( - ((x-0.2)/0.05)**2 ) # Or get the analytical solution. C_ft[:,0] = adv_diff_analytical_solution(U, kappa, x0, x, t_start) # Get the solution at the values of time. I = np.eye(len(x)) for n in range(len(t)-1): C_ft[:, n+1] = np.matmul((I + A * dt), C_ft[:, n]) # # Save the FTCS solution. # np.save('C_ft',C_ft) return C_ft, x, t ``` ```python def solver_BTCS(N, dt, t_start, t_end, U, kappa): """ Implement solution with: Space discretisation --> central difference Time discretisation --> backward Euler Parameters ---------- N : integer Number of nodes in space. dt : float Time step size. t_start : float Initial time of the simulation. t_end : float End time of the simulation. U : float Speed of advection. kappa : float Diffusivity. Returns ------- C_bt : numpy 1D array Values of the solution. x : numpy 1D array Space values where solution is evaluated. t : numpy 1D array Time values when solution is evaluated. """ # Define the spatial mesh. dx = L / N x = np.linspace(0, L, N) # Get the discretisation matrix. Dx = adv_central_periodic(N) Dxx = diff_central_periodic(N) A = -(U / dx) * Dx + (kappa / dx**2) * Dxx # Get the time vector. t = np.arange(t_start, t_end, dt) # Matrix to store the discretised solution. C_bt = np.empty((len(x), len(t))) # Define an initial condition. # # Gaussian shape. # C_bt[:, 0] = np.exp( - ((x-0.2)/0.05)**2 ) # Or get the analytical solution. C_bt[:,0] = adv_diff_analytical_solution(U, kappa, x0, x, t_start) # Get the BTCS solution at the values of time. # Use optimize.root. Previous state as initial guess. for n in range(len(t)-1): def bf(x): """ Arrange the backward Euler formula as a root problem. """ bf = np.zeros_like(x) bf = C_n + (A*dt)@x - x return bf C_n = C_bt[:, n] sol = sop.root(bf, C_n) C_bt[:, n+1] = sol.x # Save the BTCS solution. # np.save('C_bt', C_bt) return C_bt, x, t ``` ```python def solver_CN(N, dt, t_start, t_end, U, kappa): """ Implement solution with: Space discretisation --> central difference Time discretisation --> trapezoidal rule Parameters ---------- N : integer Number of nodes in space. dt : float Time step size. t_start : float Initial time of the simulation. t_end : float End time of the simulation. U : float Speed of advection. kappa : float Diffusivity. Returns ------- C_cn : numpy 1D array Values of the solution. x : numpy 1D array Space values where solution is evaluated. t : numpy 1D array Time values when solution is evaluated. """ # Define the spatial mesh. dx = L / N x = np.linspace(0, L, N) # Get the discretisation matrix. Dx = adv_central_periodic(N) Dxx = diff_central_periodic(N) A = -(U / dx) * Dx + (kappa / dx**2) * Dxx # Get the time vector. t = np.arange(t_start, t_end, dt) # Matrix to store the discretised solution. C_cn = np.empty((len(x), len(t))) # Define an initial condition. # # Gaussian shape. # C_cn[:, 0] = np.exp( - ((x-0.2)/0.05)**2 ) # Or get the analytical solution. C_cn[:,0] = adv_diff_analytical_solution(U, kappa, x0, x, t_start) # # Get the Crank-Nicolson solution. # # Use optimize.root. Previous state as initial guess. for n in range(len(t)-1): def am1(x): """ Arrange the AM1 formula as a root problem. """ am1 = np.zeros_like(x) am1 = -x + C_n + ((.5*dt)*A)@x + ((.5*dt)*A)@C_n return am1 C_n = C_cn[:, n] sol = sop.root(am1, C_n) C_cn[:, n+1] = sol.x # # Save the Crank-Nicolson solution. # np.save('C_cn', C_cn) return C_cn, x, t ``` ## Run solvers and plot the solutions ```python # Set up figure. ax = np.array([0, 1, 2]) fig, ax = plt.subplots(1, 3, figsize=(19, 7)) for l in range(len(ax)): ax[l].set_xlabel('$x/L$', fontsize = 20) ax[l].set_ylabel('$C/C_E$', fontsize = 20) # ax[l].set_xlim(0, 1) # ax[l].set_ylim(-0.1, 1.1) U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 500 dx = L / N dt = 0.001 t_start = 0.25 t_end = t_start + 4 x0 = 0.2 # print('Pe_c: {0:.5f}'.format(U*dx/kappa)) # print('CFL: {0:.5f}'.format(U*dt/dx)) # print('r: {0:.5f}'.format(kappa*dt/(dx**2))) C_ft, x, t = solver_FTCS(N, dt, t_start, t_end, U, kappa) # FTCS. # C_ft = np.load('C_ft.npy') ax[0].set_title('Adv-Diff time-dependent FTCS solution', fontsize = 16) for i in np.arange(0, len(t), int(len(t)/5)): ax[0].plot(x, C_ft[:, i], '.-') U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 100 dx = L / N dt = 0.01 t_start = 0.25 t_end = t_start + 4 x0 = 0.2 C_bt, x, t = solver_BTCS(N, dt, t_start, t_end, U, kappa) # BTCS. # C_bt = np.load('C_bt.npy') ax[1].set_title('Adv-Diff time-dependent BTCS solution', fontsize = 16) for i in np.arange(0, len(t), int(len(t)/5)): ax[1].plot(x, C_bt[:, i], '.-') U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 100 dx = L / N dt = 0.01 t_start = 0.25 t_end = t_start + 4 x0 = 0.2 C_cn, x, t = solver_CN(N, dt, t_start, t_end, U, kappa) # Crank-Nicolson. # C_cn = np.load('C_cn.npy') ax[2].set_title('Adv-Diff time-dependent Crank-Nicolson solution', fontsize = 16) for i in np.arange(0, len(t), int(len(t)/5)): ax[2].plot(x, C_cn[:, i], '.-') ``` ## Implement a periodic analytical solution In order to model periodic boundaries for the analytical solution I use a simple approach in which, at each time step, the portion of solution that has left the domain on the right is copied on the left side of the domain. ```python # Get periodic analytical solution. def analytical_periodic(N, U, kappa, x0, x, t_start, t_end, dt): """ Implement the analytical solution taking into account the periodic boundaries. Parameters ---------- N : integer Number of nodes in space. U : float Speed of advection. kappa : float Diffusivity. x0 : float Initial point (in space) of the solution. t_start : float Initial time of the simulation. t_end : float End time of the simulation. dt : float Time step size. Returns ------- C_exa_1 : numpy 1D array Values of the analytical solution. """ t_vec = np.arange(t_start, t_end, dt) C_exa = np.empty((len(x), len(t_vec))) C_exa_1 = np.empty((len(x), len(t_vec))) C_exa[:, 0] = adv_diff_analytical_solution(U, kappa, x0, x, t_vec[0]) C_exa_1[:, 0] = adv_diff_analytical_solution(U, kappa, x0, x, t_vec[0]) for i in range(1, len(t_vec)): C_exa[:, i] = adv_diff_analytical_solution(U, kappa, x0, x, t_vec[i]) C_exa_1[i:, i] = C_exa[0:-i,i] C_exa_1[0:i, i] = C_exa[-i:,i] x0 = x0 - U*dt return C_exa_1 ``` ```python # parameters U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 100 dx = L / N dt = 0.01 t_start = 0.2 t_end = t_start + 4 t = np.arange(t_start, t_end, dt) x0 = 0.45 x_ini = np.linspace(0, L, N) # get periodic exact solution. dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) C_exa = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) fig, axs = plt.subplots(4, 1, figsize=(6, 10)) axs = axs.reshape(-1) fig.tight_layout(w_pad=4, h_pad=5) for i, n in enumerate(np.linspace(0, len(t)-1, 4, dtype = int)): axs[i].plot(x_exa, C_exa[:, n], 'k-', label='exact') axs[i].legend(loc = 'best') axs[i].set_xlabel('x') axs[i].set_ylabel('$C/C_E$') axs[i].set_title('Analytical solution at $t=${0:.3f}'.format(t[n]), fontsize = 16) ``` ## Comments on the figure The figure shows the analytical solution at different time steps considering periodic boundary conditions. ## Compare analytical and FTCS solution ```python # parameters U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 500 dx = L / N dt = 0.001 t_start = 0.05 t_end = t_start + 4 x0 = 0.5 # Plot parameters to check stability. print('Pe_c: {0:.5f}'.format(U*dx/kappa)) print('CFL: {0:.5f}'.format(U*dt/dx)) print('r: {0:.5f}'.format(kappa*dt/(dx**2))) # FTCS. C_ft, x, t = solver_FTCS(N, dt, t_start, t_end, U, kappa) # Get exact solution. dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) # C_exact = analytical_periodic(N, U, kappa, x0, x, t_start, t_end, dt) C_exact = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) fig, axs = plt.subplots(4, 1, figsize=(6, 10)) axs = axs.reshape(-1) fig.tight_layout(w_pad=4, h_pad=5) for i, n in enumerate(np.linspace(0, len(t)-1, 4, dtype=int)): # axs[i].plot(x, C_exact[:, n], 'k-', label='exact') axs[i].plot(x_exa, C_exact[:, n], 'k-', label='exact') axs[i].plot(x, C_ft[:, n], 'r-', label='FTCS') axs[i].legend(loc='best') axs[i].set_title('FTCS solution at $t=${0:.3f}'.format(t[n]), fontsize=16) ``` ## Compare analytical and BTCS solution ```python U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 200 dx = L / N dt = 0.001 t_start = 0.25 t_end = t_start + 4 x0 = 0.45 C, x, t = solver_BTCS(N, dt, t_start, t_end, U, kappa) # Get exact solution. dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) C_exact = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) fig, axs = plt.subplots(4, 1, figsize=(6, 10)) axs = axs.reshape(-1) fig.tight_layout(w_pad=4, h_pad=5) for i, n in enumerate(np.linspace(0, len(t)-1, 4, dtype=int)): axs[i].plot(x_exa, C_exact[:, n], 'k-', label='exact') axs[i].plot(x, C[:, n], 'r-', label='BTCS') axs[i].legend(loc='best') axs[i].set_title('BTCS solution at $t=${0:.3f}'.format(t[n]), fontsize=16) ``` ## Compare analytical and Crank-Nicolson solution ```python U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 200 dx = L / N dt = 0.01 t_start = 0.25 t_end = t_start + 4 x0 = 0.45 C, x, t = solver_CN(N, dt, t_start, t_end, U, kappa) # Get exact solution. dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) C_exact = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) fig, axs = plt.subplots(4, 1, figsize=(6, 10)) axs = axs.reshape(-1) fig.tight_layout(w_pad=4, h_pad=5) for i, n in enumerate(np.linspace(0, len(t)-1, 4, dtype=int)): axs[i].plot(x_exa, C_exact[:, n], 'k-', label='exact') axs[i].plot(x, C[:, n], 'r-', label='Crank-Nicolson') axs[i].legend(loc='best') axs[i].set_title('Crank-Nicolson solution at $t=${0:.3f}'.format(t[n]), fontsize=16) ``` ## Theoretical order of accuracy The three implemented schemes have the same space discretisation, a central difference on a symmetric stencil formed by three nodes. Such a method is accurate to the second order. The methods FTCS and BTCS employ forward Euler and backward Euler, respectively, to perform the time discretisation. Both backward and forward Euler are first order accurate.<br> Assuming we can decouple the error in two terms, one proportional to the space step size, and the other one proportional to the time step size, the order of accuracy of FTCS and BTCS con be defined as: <br> $$\text{error} = \mathcal{O}(\Delta x^2) + \mathcal{O}(\Delta t).$$ <br> Considering that $\Delta x$ and $\Delta t$ are usually much lower than one, the error will be dominated by the time step size. FTCS and BTCS are therefore expected to be first order accurate.<br> In the Crank-Nicolson method, central difference is again used for the space discretisation, and the trapezoidal scheme is used in the time discretisation. This method is therefore expected to be second order accurate both in space and in time. Using the previous equation and substituting the proper orders, we get: <br> $$\text{error} = \mathcal{O}(\Delta x^2) + \mathcal{O}(\Delta t^2).$$ <br> ## Convergence analysis of the FTCS method ```python U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 100 dt = 0.004 t_start = 0.2 t_end = t_start + 4 x0 = 0.45 n_ite = 4 err_all = [] N_all = [] dt_all = [] for i in range(n_ite): C, x, t = solver_FTCS(N, dt, t_start, t_end, U, kappa) dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) C_exa = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) # interpolate back to initial mesh c_exa = np.empty((len(x_exa))) c_exa = C_exa[:, -1] f = interpolate.interp1d(x_exa, c_exa) c_exa = f(x) # Error: root mean square of the errors. err = np.sqrt(np.mean( (C[:, -1] - c_exa)**2 )) err_all.append(err) N_all.append(N) dt_all.append(dt) N = N*2 dt = dt/2 fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.loglog(dt_all, err_all, 'bs', label = 'FTCS', markersize = 8) # Get a linear fit to the errors using numpy.polyfit. fit = np.polyfit(np.log(dt_all), np.log(err_all), 1) ax1.loglog(dt_all, np.exp(fit[1]) * dt_all**(fit[0]), 'b-', label = 'slope: {:.2f}'.format(fit[0])) ax1.legend(loc = 'best') ax1.set_xlabel('Delta t', fontsize = 16) ax1.set_ylabel('Error at final time', fontsize = 16) ax1.set_title('Convergence analysis for the FTCS method', fontsize = 16) plt.show() ``` ## Comments on the figure As expected from theoretical considerations, the error is dominated by its time component and the method is therefore first order accurate. ## Convergence analysis of the BTCS method ```python # Parameters U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 100 dt = 0.01 t_start = 0.2 t_end = t_start + 4 x0 = 0.25 n_ite = 4 err_all = [] N_all = [] dt_all = [] for i in range(n_ite): C, x, t = solver_BTCS(N, dt, t_start, t_end, U, kappa) dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) C_exa = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) # interpolate back to initial mesh c_exa = np.empty((len(x_exa))) c_exa = C_exa[:, -1] f = interpolate.interp1d(x_exa, c_exa) c_exa = f(x) # # Error: root mean square of the errors. err = np.sqrt(np.mean( (C[:, -1] - c_exa)**2 )) err_all.append(err) N_all.append(N) dt_all.append(dt) N = N*2 dt = dt/2 fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.loglog(dt_all, err_all, 'bs', label = 'BTCS', markersize = 8) # Get a linear fit to the errors using numpy.polyfit. fit = np.polyfit(np.log(dt_all[:]), np.log(err_all[:]), 1) ax1.loglog(dt_all, np.exp(fit[1]) * dt_all**(fit[0]), 'b-', label = 'slope: {:.2f}'.format(fit[0])) ax1.legend(loc = 'best') ax1.set_xlabel('Delta t', fontsize = 16) ax1.set_ylabel('Error at final time', fontsize = 16) ax1.set_title('Convergence analysis for the BTCS method', fontsize = 16) plt.show() ``` ## Comments on the figure As for the FTCS method, even in this case the error is dominated by the time component and the method is first order accurate. ## Convergence analysis for the Crank-Nicolson method ```python U = .25 L = 1. Pe = 200. kappa = U*L/Pe N = 25 dt = 0.04 t_start = 0.25 t_end = t_start + 4 x0 = 0.45 n_ite = 4 err_all = [] N_all = [] dt_all = [] dx_all = [] for i in range(n_ite): C, x, t = solver_CN(N, dt, t_start, t_end, U, kappa) dx_exa = U*dt N_exa = int(L/dx_exa) x_exa = np.linspace(0, L, N_exa) C_exa = analytical_periodic(N_exa, U, kappa, x0, x_exa, t_start, t_end, dt) # interpolate back to initial mesh c_exa = np.empty((len(x_exa))) c_exa = C_exa[:, -1] f = interpolate.interp1d(x_exa, c_exa) c_exa = f(x) # Error: root mean square of the errors. err = np.sqrt(np.mean( (C[:, -1] - c_exa)**2 )) err_all.append(err) N_all.append(N) dt_all.append(dt) dx = L / N dx_all.append(dx) N = N*2 dt = dt/4 # Plot error as function of dt. fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.loglog(dx_all, err_all, 'bs', label = 'Crank-Nicolson', markersize = 8) # Get a linear fit to the errors using numpy.polyfit. fit = np.polyfit(np.log(dx_all), np.log(err_all), 1) ax1.loglog(dx_all, np.exp(fit[1]) * dx_all**(fit[0]), 'b-', label = 'slope: {:.2f}'.format(fit[0])) ax1.legend(loc = 'best') ax1.set_xlabel('Delta t', fontsize = 16) ax1.set_ylabel('Error at final time', fontsize = 16) ax1.set_title('Convergence analysis for the Crank-Nicolson method', fontsize = 16) plt.show() ``` ## Comments on the figure The slope of the linear fit is roughly equals to $2$, in accordance with the fact that the Crank-Nicolson method is second order accurate in both space and time. # Coursework 2B - Variable mesh resolution ## i. ## Assemble and solve the AD BVP problem on a general 1D mesh In lecture 7, we used the method of undetermined coefficients to perform a discretisation of the second derivative on a three-nodes symmetric stencil. Here I use the same method to obtain a discretisation on a general 3 nodes stencil, without assuming it symmetric.<br> In the method of undetermined coefficients we approximate a certain derivative of a function through the values of the same function in neighboring locations (which will be the nodes of the stencil). The general formula is: <br> $$ f^{(n)}(x_i) \approx g(\ldots,\; f_{i-2}, \; f_{i-1}, \; f_{i}, \; f_{i+1}, \; f_{i+2}, \; \ldots). $$ <br> For a second order derivative and a three-nodes stencil, we have: <br> $$ f''(x_i) \approx g(\; f_{i-1}, \; f_{i}, \; f_{i+1}\; ). $$ <br> In a finite difference scheme, $g$ is a linear function, so we have: <br> $$ g(\; f_{i-1}, \; f_{i}, \; f_{i+1}\; )= a_{i-1}\;f_{i-1}+ a_i\; f_{i} + a_{i+1}\; f_{i+1}. $$ <br> We need to find the coefficients $a$ considering a general stencil where the distance between nodes $i-1$ and $i$, and the distance between nodes $i$ and $i+1$, might no be the same. To find the coefficents we start by defining $f_{i-1}$ and $f_{i+1}$ using Taylor series: <br> \begin{align} f_{i-1} &= f_i - h_j f'_i + \frac{h_j^2}{2} f''_{i} + \mathcal{O}(h_j^3),\\[10pt] f_{i+1} &= f_i + h_{j+1} f'_i + \frac{h_{j+1}^2}{2} f''_{i} + \mathcal{O}(h_{j+1}^3), \end{align} <br> substituting into the formula given for $g$ and collecting terms we get: <br> \begin{align*} f''_i & = (a_{i-1} + a_i + a_{i+1})f_i + (a_{i+1}h_{j+1} - a_{i-1}h_j)f'_i + (a_{i+1}\frac{h_{j+1}^2}{2} + a_{i-1}\frac{h_{j}^2}{2})\;f''_i + \mathcal{O}(h^3). \end{align*} <br> The RHS of the above equation properly approximate the second order derivative if: <br> $$ a_{i-1} + a_i + a_{i+1}=0, \;\;\;\;\;\; a_{i+1}h_{j+1} - a_{i-1}h_j = 0, \;\;\;\;\;\; a_{i+1}\frac{h_{j+1}^2}{2} + a_{i-1}\frac{h_{j}^2}{2} = 1. $$ <br> Writing the three equations above in a matrix system (see below), we can obtain the $a$ coeffients using the numpy linear algebra library. <br> $$ \begin{pmatrix} 1 & 1 & 1 \\ h_j & 0 & h_{j+1} \\ \frac{h_{j}^2}{2} & 0 & \frac{h_{j+1}^2}{2} \end{pmatrix} \begin{pmatrix} a_{i-1}\\ a_i\\ a_{i+1} \end{pmatrix} = \begin{pmatrix} 0\\ 0\\ 1 \end{pmatrix} $$ <br> <br> ## Dealing with Dirichlet boundary conditions Since the stencil might not be symmetric, when setting up the boundary conditions we need to take into account the possiblity that the distance between the left ghost node and the boundary is not equal to the distance between boundary and first interior node. Also, on the other side of the domain, the distance between the last interior node and the boundary might not be equal to the distance between the boundary and the right ghost node.<br> In the Dirichlet form, the boundary conditions for our problem are: <br> $$C(0,t) = 0, \;\;\;\; C(L,t) = C_E, $$ <br> where $L$ indicate the lenght of the domain, which starts at $x=0$. The conditions can be approximated with a linear interpolation. For the left boundary condition we have: <br> $$ \frac{C_{bl}-C_0}{\Delta x_{bl}} = \frac{C_1-C_0}{\Delta x_0} \;\; \implies C_{bl} = (1-\frac{\Delta x_{bl}}{\Delta x_0})\;C_0 + \frac{\Delta x_{bl}}{\Delta x_0}\;C_1, $$ <br> where $C_{bl}$ indicates the value of the function at the left border (in this case $= 0$), $\Delta x_{bl}$ is the distance between left ghost node ($C_0$) and the left boundary, $\Delta x_0$ is the distance between $C_0$ and the first interior node ($C_1$). Adopting the same approach for the right boundary condition we have: <br> $$ \frac{C_{br}-C_N}{\Delta x_{br}} = \frac{C_{N+1}-C_N}{\Delta x_j} \;\; \implies C_{br} = (1-\frac{\Delta x_{br}}{\Delta x_j})\;C_N + \frac{\Delta x_{br}}{\Delta x_j}\;C_{N+1}, $$ <br> with $C_{br}$ indicating the value of the function at the right border (in this case $= C_E$), $\Delta x_{br}$ the distance between the right most interior node ($C_N$) and the right boundary, $\Delta x_j$ the distance between $C_N$ and the right ghost node ($C_{N+1}$). ```python def BVP_AD_central(x, L, U, kappa, CE, N): """ Assemble and solve the AD BVP problem using central differences for both advection and diffusion on a stencil of general shape. Parameters ---------- x : numpy 1D array Space mesh. L : float Length of the domain. U : float Speed of advection. kappa : float Diffusivity. CE : float Right boundary condition. N : integer Number of nodes in space. Returns ------- C : numpy 1D array Values of the solution. """ dx_ind = np.linspace(0, len(x)-2, len(x)-1) # Get the delta x vector. dx_vec = np.empty((len(dx_ind))) for i in range(1, len(x)): dx_vec[i-1] = x[i] - x[i-1] # get the distance between left ghost node and left boundary lbx = -x[0] # get the distance between the node N and the right boundary rbx = dx_vec[-1] - (x[-1] - L) A = np.zeros((N + 2, N + 2)) b = np.zeros((N + 2)) # interior points for i in range(1, N + 1): LHS = np.array([ [1, 1, 1], [0, dx_vec[i], -dx_vec[i-1]], [0, (dx_vec[i]**2)/2, (dx_vec[i-1]**2)/2]]) RHS = np.array([0, 0, 1]) a_coe = np.linalg.solve(LHS, RHS) A[i, i - 1] = a_coe[2] * kappa + (1/(dx_vec[i]+dx_vec[i-1])) * U A[i, i] = a_coe[0] * kappa A[i, i + 1] = a_coe[1] * kappa - (1/(dx_vec[i]+dx_vec[i-1])) * U b[i] = 0 # BC1 A[0, 0] = 1 - lbx/dx_vec[0] A[0, 1] = lbx/dx_vec[0] b[0] = 0 # BC2 A[N + 1, N] = 1 - rbx/dx_vec[-1] A[N + 1, N + 1] = rbx/dx_vec[-1] b[N + 1] = CE # BC2 # invert matrix C = np.linalg.solve(A, b) return C ``` ## Original function, from lecture 7, for uniform mesh ```python def BVP_AD_central_unimesh(L, U, kappa, CE, N): """ Assemble and solve the AD BVP problem using central differences for both advection and diffusion on a symmetric stencil. Parameters ---------- L : float Length of the domain. U : float Speed of advection. kappa : float Diffusivity. CE : float Right boundary condition. N : integer Number of nodes in space. Returns ------- x : numpy 1D array Space mesh (uniform). C : numpy 1D array Values of the solution. """ dx = L / N x = np.linspace(-dx / 2, dx / 2 + L, N + 2) A = np.zeros((N + 2, N + 2)) b = np.zeros((N + 2)) # interior points r_diff = kappa / dx**2 r_adv = 0.5 * U / dx for i in range(1, N + 1): A[i, i - 1] = r_diff + r_adv A[i, i] = -2 * r_diff A[i, i + 1] = r_diff - r_adv b[i] = 0 # BC1 A[0, 0] = 0.5 A[0, 1] = 0.5 b[0] = 0 # BC2 A[N + 1, N] = 0.5 A[N + 1, N + 1] = 0.5 b[N + 1] = CE # BC2 # invert matrix C = np.linalg.solve(A, b) return x, C ``` ## ii. ## Comparison between original and generalised functions ```python # parameters L = 1. U = 1. Pe = 30. kappa = 1./Pe CE = 1. N = 10 # call function considering uniform mesh to get numerical solution x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # define a non uniform mesh a = 5. x_var = L * np.arcsinh(np.sinh(a)*x_uni)/a # call the generalised function to get numerical solution C_var = BVP_AD_central(x_var, L, U, kappa, CE, N) # get the exact solution x_exa = np.linspace(0, L, 1000) C_exa = CE * (np.exp(Pe * x_exa / L) - 1) / (np.exp(Pe) - 1) # plot figure fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (15,7)) ax1.plot(x_exa, C_exa, '-k', label = 'Exact') ax1.plot(x_uni, C_uni, 'ob', label = 'Numerical, uniform mesh') ax1.plot(x_var, C_var, 'or', label = 'Numerical, non-uniform mesh') ax1.set_ylim(-0.25, 1.5) ax1.set_xlabel('$x/L$', fontsize = 16) ax1.set_ylabel('$C/C_E$', fontsize = 16) ax1.set_title('Adv-diff BVP, uniform vs. non-uniform mesh, N=10', fontsize = 16) ax1.legend(loc='best', fontsize = 16) ax1.set_xlim(0, 1); # test another N N = N*2 # call function considering uniform mesh to get numerical solution x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # define a non uniform mesh a = 5. x_var = L * np.arcsinh(np.sinh(a)*x_uni)/a # call the generalised function to get numerical solution C_var = BVP_AD_central(x_var, L, U, kappa, CE, N) ax2.plot(x_exa, C_exa, '-k', label = 'Exact') ax2.plot(x_uni, C_uni, 'ob', label = 'Numerical, uniform mesh') ax2.plot(x_var, C_var, 'or', label = 'Numerical, non-uniform mesh') ax2.set_ylim(-0.25, 1.5) ax2.set_xlabel('$x/L$', fontsize = 16) ax2.set_ylabel('$C/C_E$', fontsize = 16) ax2.set_title('Adv-diff BVP, uniform vs. non-uniform mesh, N=20', fontsize = 16) ax2.legend(loc='best', fontsize = 16) ax2.set_xlim(0, 1); ``` ## Comments on the figure The figure shows a comparison between the numerical solutions obtained with unform and non-uniform meshes. On the left panel the space mesh is defined by 10 points. The uniform mesh solution (blue dots) shows undershooting and is relatively far from the exact solution in correspondence of the boundary layer. Doubling the number of space nodes (right panel) the undershooting is decreased but the match with the exact solution is still poor at the boundary layer.<br> The numerical solution obtained with a non-uniform mesh (red dots), with the space step size decreasing where the steepness of the exact solution increases, shows a good match even at 10 space nodes. This implies that using an appropriate non-uniform mesh can be useful in order to obtain an accurate solution without significantly increasing the cost (in terms of time) of the simulation. ## Convergence analysis ```python # parameters L = 1. U = 1. Pe = 30. kappa = 1./Pe CE = 1. N = 100 n_ite = 5 dx_uni_all = [] dx_var1_all = [] err_uni_all = [] err_var1_all = [] for n in range(n_ite): # define a uniform mesh dx = L / N dx_uni = dx x = np.linspace(-dx / 2, dx / 2 + L, N + 2) # get the delta x vector dx_ind = np.linspace(0, len(x)-2, len(x)-1) dx_vec = np.empty((len(dx_ind))) for i in range(1, len(x)): dx_vec[i-1] = x[i] - x[i-1] # call function considering uniform mesh to get numerical solution x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # get the exact solution C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) # get an optimally stretched mesh a = 10. # optimal value for Pe = 30 x_var1 = L * np.arcsinh(np.sinh(a)*x)/a # call the generalised function to get numerical solution C_var1 = BVP_AD_central(x_var1, L, U, kappa, CE, N) # get the errors C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) err_uni_all.append( np.sqrt(np.mean( (C_uni - C_exa)**2 )) ) C_exa = CE * (np.exp(Pe * x_var1 / L) - 1) / (np.exp(Pe) - 1) err_var1_all.append( np.sqrt(np.mean( (C_var1 - C_exa)**2 )) ) dx_uni_all.append(dx_uni) N = N*2 # plot the figure of the convergence analysis fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.loglog(dx_uni_all, err_uni_all, 'bs', label = 'uniform mesh', markersize = 8) ax1.loglog(dx_uni_all, err_var1_all, 'rs', label = 'non-uniform mesh', markersize = 8) # Get a linear fit to the errors using numpy.polyfit. fit = np.polyfit(np.log(dx_uni_all), np.log(err_uni_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'b-', label = 'slope: {:.2f}'.format(fit[0])) fit = np.polyfit(np.log(dx_uni_all), np.log(err_var1_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'r-', label = 'slope: {:.2f}'.format(fit[0])) ax1.legend(loc = 'best', fontsize = 14) ax1.set_xlabel('Delta x', fontsize = 16) ax1.set_ylabel('Error', fontsize = 16) ax1.set_title('Convergence analysis', fontsize = 16) plt.show() ``` ## Comments on the figure The figure show convergence analysis for the solutions obtained considering uniform (blue squares) and non-uniform (red squares) meshes. The $\Delta x$ refers to the space step size of the uniform mesh. The order of convergence is the same for both solutions. The non-uniform mesh results in a significantly lower error. ## iii. ## Generate non-uniform mesh considering arclength of exact solution The goal is to generate a non-uniform mesh in a way that the arclength of the exact solution is approximately equally distributed among the mesh points. In order to do this I first compute the appoximate length of the exact solution: <br> $$ al = \sum_{i=1}^{N} [(C_i - C_{i-1})^2 + (x_i - x_{i-1})^2]^\frac{1}{2}, $$ <br> Where $N$ is the number of mesh points and $C_i$ are the values of the function at nodes $x_i$. Then I divide the total arclength equally among the space steps: <br> $$ a = \frac{al}{N+1} $$ <br> I initialise the first node of the new non-uniform mesh in the same position of the first node of the uniform mesh. The second node of the non-uniform mesh is initialised at distance $a$ from the first node. This because on this node the derivative of the exact solution is very low, so $\Delta x \approx a$. In order to find the position of the following node I consider the equation: <br> $$ a^2 = (f(x_2) - f(x_1))^2 + (x_2 - x_{1})^2, $$ <br> which i solve for $x_2$ considering it as a root problem and employing the Newton method. I then moved one step to the right till I find the new position for all the nodes. ```python # parameters L = 1. U = 1. Pe = 30. kappa = 1./Pe CE = 1. N = 20 # define a uniform mesh dx = L / N x = np.linspace(-dx / 2, dx / 2 + L, N + 2) # get the delta x vector dx_ind = np.linspace(0, len(x)-2, len(x)-1) dx_vec = np.empty((len(dx_ind))) for i in range(1, len(x)): dx_vec[i-1] = x[i] - x[i-1] # call function considering uniform mesh to get numerical solution x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # construct the exact solution C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) # get a non-uniform dx in a way that each dx account for the same arclength # of the exact solution # get total arclength al = 0 for i in range (1, len(x_uni)): al += np.sqrt( (C_exa[i]-C_exa[i-1])**2 + (x_uni[i]-x_uni[i-1])**2 ) a = al / (N+1) def f_exa(x): return CE * (np.exp(Pe * x / L) - 1) / (np.exp(Pe) - 1) x_var = np.empty((len(x_uni))) # initialize first and second node x_var[0] = x_uni[0] x_var[1] = x_uni[0] + a for i in range(2, len(x_var)): def f(x): return ( f_exa(x) - f_exa(x_var[i-1]) )**2 + (x - x_var[i-1])**2 - a**2 x_var[i] = sop.newton(f, x_var[i-1] + (x_var[i-1]-x_var[i-2])) # Eliminate nodes beyond the right ghost node if x_var[-1] > L: indexes = np.where(x_var>1) x_var = x_var[0: indexes[0][0]+1] # call the generalised function to get numerical solution C_var = BVP_AD_central(x_var, L, U, kappa, CE, len(x_var)-2) # get the exact solution with a fine sampling xf = np.linspace(0, L, 1000) C_exa = CE * (np.exp(Pe * xf / L) - 1) / (np.exp(Pe) - 1) fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.plot(xf, C_exa, 'k', label = 'exact solution', markersize = 2) ax1.plot(x_uni, C_uni, 'bs', label = 'uniform mesh', markersize = 5) ax1.plot(x_var, C_var, 'rs', label = 'non-uniform mesh', markersize = 5) ax1.set_xlabel('x', fontsize = 16) ax1.set_ylabel('$C/C_E$', fontsize = 16) ax1.set_xlim(0, 1) ax1.set_ylim(-0.1, 1.25) ax1.legend(loc = 'best', fontsize = 14) plt.grid() plt.show() ``` ## Convergence analysis ```python # parameters L = 1. U = 1. Pe = 30. kappa = 1./Pe CE = 1. N = 100 n_ite = 5 dx_uni_all = [] dx_var_all = [] dx_var1_all = [] err_uni_all = [] err_var_all = [] err_var1_all = [] for n in range(n_ite): # define a uniform mesh dx = L / N dx_uni = dx x = np.linspace(-dx / 2, dx / 2 + L, N + 2) # get the delta x vector dx_ind = np.linspace(0, len(x)-2, len(x)-1) dx_vec = np.empty((len(dx_ind))) for i in range(1, len(x)): dx_vec[i-1] = x[i] - x[i-1] # call function considering uniform mesh to get numerical solution x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # get the exact solution C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) # get a non-uniform dx in a way that each dx account for the same arclength # of the exact solution # get total arclength al = 0 for i in range (1, len(x_uni)): al += np.sqrt( (C_exa[i]-C_exa[i-1])**2 + (x_uni[i]-x_uni[i-1])**2 ) a = al / (N+1) def f_exa(x): return CE * (np.exp(Pe * x / L) - 1) / (np.exp(Pe) - 1) x_var = np.empty((len(x_uni))) # initialize first and second node x_var[0] = x_uni[0] x_var[1] = x_uni[0] + a for i in range(2, len(x_var)): def f(x): return ( f_exa(x) - f_exa(x_var[i-1]) )**2 + (x - x_var[i-1])**2 - a**2 x_var[i] = sop.newton(f, x_var[i-1] + (x_var[i-1]-x_var[i-2])) # eliminate nodes beyond the right ghost node if x_var[-1] > L: indexes = np.where(x_var>1) x_var = x_var[0: indexes[0][0]+1] # call the generalised function to get numerical solution C_var = BVP_AD_central(x_var, L, U, kappa, CE, len(x_var)-2) # get an optimally stretched mesh a = 10. # optimal value for Pe = 30 x_var1 = L * np.arcsinh(np.sinh(a)*x)/a # call the generalised function to get numerical solution C_var1 = BVP_AD_central(x_var1, L, U, kappa, CE, N) # get the errors C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) err_uni_all.append( np.sqrt(np.mean( (C_uni - C_exa)**2 )) ) C_exa = CE * (np.exp(Pe * x_var / L) - 1) / (np.exp(Pe) - 1) err_var_all.append( np.sqrt(np.mean( (C_var - C_exa)**2 )) ) C_exa = CE * (np.exp(Pe * x_var1 / L) - 1) / (np.exp(Pe) - 1) err_var1_all.append( np.sqrt(np.mean( (C_var1 - C_exa)**2 )) ) dx_uni_all.append(dx_uni) N = N*2 # plot the figure of the convergence analysis fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.loglog(dx_uni_all, err_uni_all, 'bs', label = 'uniform mesh', markersize = 8) ax1.loglog(dx_uni_all, err_var_all, 'rs', label = 'non-uniform mesh', markersize = 8) ax1.loglog(dx_uni_all, err_var1_all, 'gs', label = 'optimally stretched mesh', markersize = 8) # Get a linear fit to the errors using numpy.polyfit. fit = np.polyfit(np.log(dx_uni_all), np.log(err_uni_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'b-', label = 'slope: {:.2f}'.format(fit[0])) fit = np.polyfit(np.log(dx_uni_all), np.log(err_var_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'r-', label = 'slope: {:.2f}'.format(fit[0])) fit = np.polyfit(np.log(dx_uni_all), np.log(err_var1_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'g-', label = 'slope: {:.2f}'.format(fit[0])) ax1.legend(loc = 'best', fontsize = 14) ax1.set_xlabel('Delta x', fontsize = 16) ax1.set_ylabel('Error', fontsize = 16) ax1.set_title('Convergence analysis', fontsize = 16) plt.show() ``` ## Comments on the figure The figure shows convergence analysis for solutions obtained with uniform mesh, non-uniform mesh generated taking into account length of the exact solution, and optimally stretched mesh. The $\Delta x$ refers to the space step size of the uniform mesh. The solution obtained with the mesh that takes into account length of the exact solution has an error comparable to the one obtained with the optimally stretched mesh. ## iv. ## Generate non-uniform mesh according to local steepness of numerical solution Here I generate a new non-uniform mesh, taking into account the local steepness of the numerial solution obtained on a uniform mesh. To do this, I calculate the derivative of the numerical solution in each space step. Where the derivative is higher then a certain tolerance value, the space step is equally subdivided in three new steps. ```python def remesh(x_uni, C_uni, tol): """ Get a uniform mesh and modify it accordind to the local steepness of the numerical solution. Parameters ---------- x_uni : numpy 1D array Uniform space mesh. C_uni : numpy 1D array Numerical solution obtain with the uniform mesh. tol : float If the local derivative of the numerical solution is higher than this, the space step is equally divided in three smaller steps. Returns ------- x_var : numpy 1D array Modified mesh. N_var : integer Number of space steps in the new mesh. """ # compute derivative in between each node dcdx_vec = np.empty( len(x_uni)-1 ) for i in range(1, len(x_uni)): dcdx_vec[i-1] = ( C_uni[i]-C_uni[i-1] ) - ( x_uni[i]-x_uni[i-1] ) n_ite = 0 while np.max(dcdx_vec) > tol: n_ite += 1 # get a new mesh splitting in three intervals where derivative is higher then tolerance dx_vec = [] for i in range(1, len(x_uni)): dx = x_uni[i]-x_uni[i-1] dcdx_vec[i-1] = ( C_uni[i]-C_uni[i-1] ) - ( x_uni[i]-x_uni[i-1] ) if dcdx_vec[i-1] < tol: dx_vec.append(dx) else: dx_vec.append(dx*(1/3)) dx_vec.append(dx*(1/3)) dx_vec.append(dx*(1/3)) # build the new mesh dx_vec = np.array(dx_vec) x_var = np.empty( len(dx_vec)+1 ) x_var[0] = x_uni[0] for i in range( 1, len(x_var) ): x_var[i] = x_var[i-1] + dx_vec[i-1] # cut the new mesh in case there are more then two ghost nodes # cut the left side indexes = np.where(x_var<0) indexes = np.array(indexes[0][:]) if len(indexes)>1: x_var = x_var[indexes[-1]:] # cut the right side: indexes = np.where(x_var>1) indexes = np.array(indexes[0][:]) if len (indexes)>1: x_var = x_var[0:indexes[0]+1] # get the new number of nodes N_var = len(x_var)-2 # Compute the new solution C_var = BVP_AD_central(x_var, L, U, kappa, CE, N_var) # update the derivative vec dcdx_vec = np.empty( len(x_var)-1 ) for i in range(1, len(x_var)): dcdx_vec[i-1] = ( C_var[i]-C_var[i-1] ) - ( x_var[i]-x_var[i-1] ) x_uni = x_var C_uni = C_var if n_ite==0: x_var = x_uni N_var = len(x_var)-2 print('\n No re-meshing needed considering a tolerance = %f \n' % tol) else: print('\nRe-meshing the uniform mesh:') print('number of iterations = %i\n' % n_ite) return x_var, N_var # parameters L = 1. U = 1. Pe = 30. kappa = 1./Pe CE = 1. N = 10 tol = 0.5 # get numerical solution considering a uniform mesh x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # get numerical solution re-meshing the uniform mesh x_var, N_var = remesh(x_uni, C_uni, tol) C_var = BVP_AD_central(x_var, L, U, kappa, CE, N_var) # plot fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.plot(x_uni, C_uni, 'bs', label = 'uniform mesh', markersize = 8) ax1.plot(x_var, C_var, 'rs', label = 'non-uniform mesh', markersize = 8) ax1.set_xlabel('x', fontsize = 16) ax1.set_ylabel('$C/C_E$', fontsize = 16) ax1.legend(loc = 'best', fontsize = 16) ax1.set_xlim(0, 1) ax1.set_ylim(-0.1, 1.1) plt.grid() plt.show() ``` ```python # parameters L = 1. U = 1. Pe = 30. kappa = 1./Pe CE = 1. N = 50 n_ite = 6 dx_uni_all = [] dx_var_all = [] dx_var1_all = [] err_uni_all = [] err_var_all = [] err_var1_all = [] err_var2_all = [] for n in range(n_ite): # define a uniform mesh dx = L / N dx_uni = dx x = np.linspace(-dx / 2, dx / 2 + L, N + 2) # get the delta x vector dx_ind = np.linspace(0, len(x)-2, len(x)-1) dx_vec = np.empty((len(dx_ind))) for i in range(1, len(x)): dx_vec[i-1] = x[i] - x[i-1] # call function considering uniform mesh to get numerical solution x_uni, C_uni = BVP_AD_central_unimesh(L, U, kappa, CE, N) # get the exact solution C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) # get a non-uniform dx in a way that each dx account for the same arclength # of the exact solution # get total arclength al = 0 for i in range (1, len(x_uni)): al += np.sqrt( (C_exa[i]-C_exa[i-1])**2 + (x_uni[i]-x_uni[i-1])**2 ) a = al / (N+1) def f_exa(x): return CE * (np.exp(Pe * x / L) - 1) / (np.exp(Pe) - 1) x_var = np.empty((len(x_uni))) # initialize first and second node x_var[0] = x_uni[0] x_var[1] = x_uni[0] + a for i in range(2, len(x_var)): def f(x): return ( f_exa(x) - f_exa(x_var[i-1]) )**2 + (x - x_var[i-1])**2 - a**2 x_var[i] = sop.newton(f, x_var[i-1] + (x_var[i-1]-x_var[i-2])) # eliminate nodes beyond the right ghost node if x_var[-1] > L: indexes = np.where(x_var>1) x_var = x_var[0: indexes[0][0]+1] # call the generalised function to get numerical solution C_var = BVP_AD_central(x_var, L, U, kappa, CE, len(x_var)-2) # get an optimally stretched mesh a = 10. # optimal value for Pe = 30 x_var1 = L * np.arcsinh(np.sinh(a)*x)/a # call the generalised function to get numerical solution C_var1 = BVP_AD_central(x_var1, L, U, kappa, CE, N) # get numerical solution re-meshing the uniform mesh tol = 0.01 x_var2, N_var2 = remesh(x_uni, C_uni, tol) C_var2 = BVP_AD_central(x_var2, L, U, kappa, CE, N_var2) # get the errors C_exa = CE * (np.exp(Pe * x_uni / L) - 1) / (np.exp(Pe) - 1) err_uni_all.append( np.sqrt(np.mean( (C_uni - C_exa)**2 )) ) C_exa = CE * (np.exp(Pe * x_var / L) - 1) / (np.exp(Pe) - 1) err_var_all.append( np.sqrt(np.mean( (C_var - C_exa)**2 )) ) C_exa = CE * (np.exp(Pe * x_var1 / L) - 1) / (np.exp(Pe) - 1) err_var1_all.append( np.sqrt(np.mean( (C_var1 - C_exa)**2 )) ) C_exa = CE * (np.exp(Pe * x_var2 / L) - 1) / (np.exp(Pe) - 1) err_var2_all.append( np.sqrt(np.mean( (C_var2 - C_exa)**2 )) ) dx_uni_all.append(dx_uni) N = N*2 ``` Re-meshing the uniform mesh: number of iterations = 4 Re-meshing the uniform mesh: number of iterations = 4 Re-meshing the uniform mesh: number of iterations = 3 Re-meshing the uniform mesh: number of iterations = 2 Re-meshing the uniform mesh: number of iterations = 2 Re-meshing the uniform mesh: number of iterations = 1 ```python # plot the figure of the convergence analysis fig = plt.figure(figsize = (10, 10)) ax1 = plt.subplot(111) ax1.loglog(dx_uni_all, err_uni_all, 'bs', label = 'uniform mesh', markersize = 8) ax1.loglog(dx_uni_all, err_var_all, 'rs', label = 'non-uniform mesh sol. 1', markersize = 8) ax1.loglog(dx_uni_all, err_var1_all, 'gs', label = 'optimally stretched mesh', markersize = 8) ax1.loglog(dx_uni_all, err_var2_all, 'ms', label = 'non-uniform mesh sol. 2', markersize = 8) # Get a linear fit to the errors using numpy.polyfit. fit = np.polyfit(np.log(dx_uni_all), np.log(err_uni_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'b-', label = 'slope: {:.2f}'.format(fit[0])) fit = np.polyfit(np.log(dx_uni_all), np.log(err_var_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'r-', label = 'slope: {:.2f}'.format(fit[0])) fit = np.polyfit(np.log(dx_uni_all), np.log(err_var1_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'g-', label = 'slope: {:.2f}'.format(fit[0])) fit = np.polyfit(np.log(dx_uni_all), np.log(err_var2_all), 1) ax1.loglog(dx_uni_all, np.exp(fit[1]) * dx_uni_all**(fit[0]), 'm-', label = 'slope: {:.2f}'.format(fit[0])) ax1.legend(loc = 'best', fontsize = 14) ax1.set_xlabel('Delta x', fontsize = 16) ax1.set_ylabel('Error', fontsize = 16) ax1.set_title('Convergence analysis', fontsize = 16) plt.show() ``` ## Comments on the figure The figure shows the convergence analysis for the various meshes developed in Coursework-2B. Non-uniform mesh solution 1 (red squares) refers to the errors of the solution obtained with the mesh that takes into account the arclength of the analytical solution. The errors of the solution obtained with the optimally stretched mesh is represented by the green squares.<br> The purple squares are for the errors relative to the solution obtained with the last mesh, assuming no knowledge of the exact solution. The accuracy of this solution is affected by the tolerance considered, with lower tolerance giving a more accurate solution. Convergence in this case drops to one, but note that the $\Delta x$ referes to the one of the uniform mesh and it therefore represents only the maximum $\Delta x$ of this mesh. Although the order of accuracy decreases, for larger space step sizes it is possible to obtain solutions more accurate then those calculated with the optimally stretched mesh.
6246c47307ab77e1e559cd09052a176774f14c6b
700,148
ipynb
Jupyter Notebook
.ipynb_checkpoints/ACSE-3-Coursework-2-checkpoint.ipynb
mattiaguerri/NumericalMethods
c496ae74f92b7c2a0d01a4318e14b74c902cfc5c
[ "MIT" ]
1
2021-03-04T12:07:32.000Z
2021-03-04T12:07:32.000Z
ACSE-3-Coursework-2.ipynb
mattiaguerri/NumericalMethods
c496ae74f92b7c2a0d01a4318e14b74c902cfc5c
[ "MIT" ]
null
null
null
ACSE-3-Coursework-2.ipynb
mattiaguerri/NumericalMethods
c496ae74f92b7c2a0d01a4318e14b74c902cfc5c
[ "MIT" ]
null
null
null
292.459482
97,380
0.913545
true
18,109
Qwen/Qwen-72B
1. YES 2. YES
0.812867
0.914901
0.743693
__label__eng_Latn
0.913287
0.56618
```python from sympy.physics.units import * from sympy import * # Rounding: import decimal from decimal import Decimal as DX def iso_round(obj, pv, rounding=decimal.ROUND_HALF_EVEN): import sympy """ Rounding acc. to DIN EN ISO 80000-1:2013-08 place value = Rundestellenwert """ assert pv in set([ # place value # round to: 1, # 1 0.1, # 1st digit after decimal 0.01, # 2nd 0.001, # 3rd 0.0001, # 4th 0.00001, # 5th 0.000001, # 6th 0.0000001, # 7th 0.00000001, # 8th 0.000000001, # 9th 0.0000000001, # 10th ]) try: tmp = DX(str(float(obj))) obj = tmp.quantize(DX(str(pv)), rounding=rounding) except: for i in range(len(obj)): tmp = DX(str(float(obj[i]))) obj[i] = tmp.quantize(DX(str(pv)), rounding=rounding) return obj # LateX: kwargs = {} kwargs["mat_str"] = "bmatrix" kwargs["mat_delim"] = "" # kwargs["symbol_names"] = {FB: "F^{\mathsf B}", } # Units: (k, M, G ) = ( 10**3, 10**6, 10**9 ) (mm, cm, deg) = ( m/1000, m/100, pi/180) Newton = kg*m/s**2 Pa = Newton/m**2 MPa = M*Pa GPa = G*Pa kN = k*Newton half = S(1)/2 # --- EA, l, F1, F2 = var("EA, l, F1, F2") sub_list = [ ( EA, 2 *Pa*m**2 ), ( l, 1 *m ), ( F1, 1 *Newton /2 ), ( F2, 2 *Newton /2 ), ] def k(phi): """ element stiffness matrix """ # phi is angle between: # 1. vector along global x axis # 2. vector along 1-2-axis of truss # phi is counted positively about z. # pprint("phi / deg:") # pprint(N(deg(phi),3)) (c, s) = ( cos(phi), sin(phi) ) (cc, ss, sc) = ( c*c, s*s, s*c) return Matrix( [ [ cc, sc, -cc, -sc], [ sc, ss, -sc, -ss], [-cc, -sc, cc, sc], [-sc, -ss, sc, ss], ]) (p1, p2, p3) = (0 *pi/180, 30 *pi/180, 45 *pi/180) (k1, k2, k3) = (EA/sqrt(3)/l*k(p1), EA/2/l*k(p2), EA/sqrt(2)/l*k(p3)) pprint("\nk1 / (EA / l): ") pprint(k1 / (EA/l) ) pprint("\nk2 / (EA / l): ") pprint(k2 / (EA/l) ) pprint("\nk3 / (EA / l): ") pprint(k3 / (EA/l) ) # A = EA/l*Matrix([ # [ 1 , -S(1)/2 ], # [ -S(1)/2, 1 ] # ]) # # u2x, u3x = var("u2x, u3x") # u = Matrix([u2x , u3x ]) # f = Matrix([F1 , F2 ]) # # u2x, u3x = var("u2x, u3x") # # eq = Eq(A*u , f) # sol = solve(eq, [u2x, u3x]) # pprint("\nSolution:") # pprint(sol) # # u2x, u3x = sol[u2x], sol[u3x] # # pprint("\nu2x / m:") # tmp = u2x.subs(sub_list) # tmp /= m # pprint(tmp) # # pprint("\nu3x / m:") # tmp = u3x.subs(sub_list) # tmp /= m # pprint(tmp) # # pprint("\nF1x / N:") # tmp = - EA/l * u2x/2 # tmp = tmp.subs(sub_list) # tmp /= Newton # pprint(tmp) ```
6ff3c1677ed8765e605d5ee60931fdfb4aec38a3
5,155
ipynb
Jupyter Notebook
ipynb/WB-Klein/5/stallkamp_cc.ipynb
kassbohm/wb-snippets
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
[ "MIT" ]
null
null
null
ipynb/WB-Klein/5/stallkamp_cc.ipynb
kassbohm/wb-snippets
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
[ "MIT" ]
null
null
null
ipynb/WB-Klein/5/stallkamp_cc.ipynb
kassbohm/wb-snippets
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
[ "MIT" ]
null
null
null
32.421384
82
0.342968
true
1,119
Qwen/Qwen-72B
1. YES 2. YES
0.859664
0.7773
0.668217
__label__eng_Latn
0.125927
0.390822
<!-- dom:TITLE: Learning from data: Bayesian Parameter Estimation --> # Learning from data: Bayesian Parameter Estimation <!-- dom:AUTHOR: Christian Forssén at Department of Physics, Chalmers University of Technology, Sweden --> <!-- Author: --> **Christian Forssén**, Department of Physics, Chalmers University of Technology, Sweden Date: **Sep 16, 2019** Copyright 2018-2019, Christian Forssén. Released under CC Attribution-NonCommercial 4.0 license <!-- dom:FIGURE:[fig/m1m2.png, width=400 frac=0.8] Joint pdf for the masses of two black holes merging obtained from the data analysis of a gravitational wave signal. This representation of a joint pdf is known as a corner plot. <div id="fig:gw"></div> --> <!-- begin figure --> <div id="fig:gw"></div> <p>Joint pdf for the masses of two black holes merging obtained from the data analysis of a gravitational wave signal. This representation of a joint pdf is known as a corner plot.</p> <!-- end figure --> # Inference With Parametric Models Inductive inference with parametric models is a very important tool in the natural sciences. * Consider $N$ different models $M_i$ ($i = 1, \ldots, N$), each with parameters $\boldsymbol{\theta}_i$. Each of them implies a sampling distribution for possible data $$ p(D|\boldsymbol{\theta}_i, M_i) $$ * The likelihood function is the pdf of the actual, observed data ($D_\mathrm{obs}$) given a set of parameters $\boldsymbol{\theta}_i$: $$ \mathcal{L}_i (\boldsymbol{\theta}_i) \equiv p(D_\mathrm{obs}|\boldsymbol{\theta}_i, M_i) $$ * We may be uncertain about $M_i$ (model uncertainty), * or uncertain about $\boldsymbol{\theta}_i$ (parameter uncertainty). Parameter Estimation: : Premise = We have chosen a model (say $M_1$) $\Rightarrow$ What can we say about its parameters $\boldsymbol{\theta}_1$? Model comparison: : Premise = We have a set of different models $\{M_i\}$ $\Rightarrow$ How do they compare with each other? Do we have evidence to say that, e.g. $M_1$, is better than the other models? Model adequacy: : Premise = We have a model $M_1$ $\Rightarrow$ Is $M_1$ adequate? Hybrid Uncertainty: : Models share some common params: $\boldsymbol{\theta}_i = \{ \boldsymbol{\varphi}, \boldsymbol{\eta}_i\}$ $\Rightarrow$ What can we say about $\boldsymbol{\varphi}$? (Systematic error is an example) ## Parameter estimation Overview comments: * In general terms, "parameter estimation" in physics means obtaining values for parameters (constants) that appear in a theoretical model which describes data (exceptions to this general definition exist of course). * Conventionally this process is known as "parameter fitting" and the goal is to find the "best fit". * We will make particular interpretations of these phrases from our Bayesian point of view. * We will also see how familiar ideas like "least-squares optimization" show up from a Bayesian perspective. ## Bayesian parameter estimation We will now consider the very important task of model parameter estimation using statistical inference. Let us first remind ourselves what can go wrong in a fit. We have encountered both **underfitting** (model is not complex enough to describe the variability in the data) and **overfitting** (model tunes to data fluctuations, or terms are underdetermined causing them playing off each other). Bayesian methods can prevent/identify both these situations. <!-- ===== Example: Measured flux from a star (single parameter) ===== --> # Example: Measured flux from a star (single parameter) Adapted from the blog [Pythonic Perambulations](http://jakevdp.github.io) by Jake VanderPlas. Imagine that we point our telescope to the sky, and observe the light coming from a single star. Our physics model will be that the star's true flux is constant with time, i.e. that it has a fixed value $F_\mathrm{true}$ (we'll also ignore effects like sky noise and other sources of systematic error). Thus, we have a single model parameter: $F_\mathrm{true}$. We'll assume that we perform a series of $N$ measurements with our telescope, where the i:th measurement reports an observed photon flux $F_i$ and is accompanied by an error model given by $e_i$[^errors]. The question is, given this set of measurements $D = \{F_i\}_{i=0}^{N-1}$, and the statistical model $F_i = F_\mathrm{true} + e_i$, what is our best estimate of the true flux $F_\mathrm{true}$? [^errors]: We'll make the reasonable assumption that errors are Gaussian. In a Frequentist perspective, $e_i$ is the standard deviation of the results of a single measurement event in the limit of repetitions of *that event*. In the Bayesian perspective, $e_i$ is the standard deviation of the (Gaussian) probability distribution describing our knowledge of that particular measurement given its observed value. Because the measurements are number counts, a Poisson distribution is a good approximation to the measurement process: ```python %matplotlib inline import numpy as np from scipy import stats import matplotlib.pyplot as plt import emcee ``` ```python np.random.seed(1) # for repeatability F_true = 1000 # true flux, say number of photons measured in 1 second N = 50 # number of measurements F = stats.poisson(F_true).rvs(N) # N measurements of the flux e = np.sqrt(F) # errors on Poisson counts estimated via square root ``` Now let's make a simple visualization of the "observed" data, see Fig. [fig:flux](#fig:flux). ```python fig, ax = plt.subplots() ax.errorbar(F, np.arange(N), xerr=e, fmt='ok', ecolor='gray', alpha=0.5) ax.vlines([F_true], 0, N, linewidth=5, alpha=0.2) ax.set_xlabel("Flux");ax.set_ylabel("measurement number"); ``` <!-- dom:FIGURE:[fig/singlephotoncount_fig_1.png, width=400 frac=0.8] Single photon counts (flux measurements). <div id="fig:flux"></div> --> <!-- begin figure --> <div id="fig:flux"></div> <p>Single photon counts (flux measurements).</p> <!-- end figure --> These measurements each have a different error $e_i$ which is estimated from Poisson statistics using the standard square-root rule. In this toy example we know the true flux that was used to generate the data, but the question is this: given our measurements and statistical model, what is our best estimate of $F_\mathrm{true}$? Let's take a look at the frequentist and Bayesian approaches to solving this. ### Simple Photon Counts: Frequentist Approach We'll start with the classical frequentist maximum likelihood approach. Given a single observation $D_i = F_i$, we can compute the probability distribution of the measurement given the true flux $F_\mathrm{true}$ given our assumption of Gaussian errors <!-- Equation labels as ordinary links --> <div id="_auto1"></div> $$ \begin{equation} p(D_i | F_\mathrm{true}, I) = \frac{1}{\sqrt{2\pi e_i^2}} \exp \left( \frac{-(F_i-F_\mathrm{true})^2}{2e_i^2} \right). \label{_auto1} \tag{1} \end{equation} $$ This should be read "the probability of $D_i$ given $F_\mathrm{true}$ equals ...". You should recognize this as a normal distribution with mean $F_\mathrm{true}$ and standard deviation $e_i$. We construct the *likelihood function* by computing the product of the probabilities for each data point <!-- Equation labels as ordinary links --> <div id="_auto2"></div> $$ \begin{equation} \mathcal{L}(F_\mathrm{true}) = \prod_{i=1}^N p(D_i | F_\mathrm{true}, I), \label{_auto2} \tag{2} \end{equation} $$ here $D = \{D_i\}$ represents the entire set of measurements. Because the value of the likelihood can become very small, it is often more convenient to instead compute the log-likelihood. **Notice.** In the following we will use $\log$ to denote the natural logarithm. We will write $\log_{10}$ if we specifically mean the logarithm with base 10. Combining the previous two equations and computing the log, we have <!-- Equation labels as ordinary links --> <div id="_auto3"></div> $$ \begin{equation} \log\mathcal{L} = -\frac{1}{2} \sum_{i=1}^N \left[ \log(2\pi e_i^2) + \frac{(F_i-F_\mathrm{true})^2}{e_i^2} \right]. \label{_auto3} \tag{3} \end{equation} $$ What we'd like to do is determine $F_\mathrm{true}$ such that the likelihood is maximized. At this pont we can note that that problem of maximizing the likelihood is equivalent to the minimization of the sum <!-- Equation labels as ordinary links --> <div id="_auto4"></div> $$ \begin{equation} \sum_{i=1}^N \frac{(F_i-F_\mathrm{true})^2}{e_i^2}, \label{_auto4} \tag{4} \end{equation} $$ which you should recognize as the chi-squared function encountered in the linear regression model. Therefore, it is not surprising that this particular maximization problem can be solved analytically (i.e. by setting $d\log\mathcal{L}/d F_\mathrm{true} = 0$). This results in the following observed estimate of $F_\mathrm{true}$ <!-- Equation labels as ordinary links --> <div id="_auto5"></div> $$ \begin{equation} F_\mathrm{est} = \frac{ \sum_{i=1}^N w_i F_i }{ \sum_{i=1}^N w_i}, \quad w_i = 1/e_i^2. \label{_auto5} \tag{5} \end{equation} $$ Notice that in the special case of all errors $e_i$ being equal, this reduces to <!-- Equation labels as ordinary links --> <div id="_auto6"></div> $$ \begin{equation} F_\mathrm{est} = \frac{1}{N} \sum_{i=1} F_i. \label{_auto6} \tag{6} \end{equation} $$ That is, in agreement with intuition, $F_\mathrm{est}$ is simply the mean of the observed data when errors are equal. We can go further and ask what the error of our estimate is. In the frequentist approach, this can be accomplished by fitting a Gaussian approximation to the likelihood curve at maximum; in this simple case this can also be solved analytically (the sum of Gaussians is also a Gaussian). It can be shown that the standard deviation of this Gaussian approximation is $\sigma_\mathrm{est}$, which is given by <!-- Equation labels as ordinary links --> <div id="_auto7"></div> $$ \begin{equation} \frac{ 1 } {\sigma_\mathrm{est}^2} = \sum_{i=1}^N w_i . \label{_auto7} \tag{7} \end{equation} $$ These results are fairly simple calculations; let's evaluate them for our toy dataset: ```python w=1./e**2 print(f""" F_true = {F_true} F_est = {(w * F).sum() / w.sum():.0f} +/- { w.sum() ** -0.5:.0f} (based on {N} measurements) """) ``` `F_true = 1000` `F_est = 998 +/- 4 (based on 50 measurements)` We find that for 50 measurements of the flux, our estimate has an error of about 0.4% and is consistent with the input value. ### Simple Photon Counts: Bayesian Approach The Bayesian approach, as you might expect, begins and ends with probabilities. Our hypothesis is that the star has a constant flux $F_\mathrm{true}$. It recognizes that what we fundamentally want to compute is our knowledge of the parameter in question given the data and other information (such as our knowledge of uncertainties for the observed values), i.e. in this case, $p(F_\mathrm{true} | D,I)$. Note that this formulation of the problem is fundamentally contrary to the frequentist philosophy, which says that probabilities have no meaning for model parameters like $F_\mathrm{true}$. Nevertheless, within the Bayesian philosophy this is perfectly acceptable. To compute this pdf, Bayesians next apply Bayes' Theorem. If we set the prior $p(F_\mathrm{true}|I) \propto 1$ (a flat prior), we find $p(F_\mathrm{true}|D,I) \propto p(D | F_\mathrm{true},I) \equiv \mathcal{L}(F_\mathrm{true})$ and the Bayesian probability is maximized at precisely the same value as the frequentist result! So despite the philosophical differences, we see that (for this simple problem at least) the Bayesian and frequentist point estimates are equivalent. ### A note about priors The prior allows inclusion of other information into the computation, which becomes very useful in cases where multiple measurement strategies are being combined to constrain a single model. The necessity to specify a prior, however, is one of the more controversial pieces of Bayesian analysis. A frequentist will point out that the prior is problematic when no true prior information is available. Though it might seem straightforward to use a noninformative prior like the flat prior mentioned above, there are some [surprisingly subtleties](http://normaldeviate.wordpress.com/2013/07/13/lost-causes-in-statistics-ii-noninformative- priors/comment-page-1/) involved. It turns out that in many situations, a truly noninformative prior does not exist! Frequentists point out that the subjective choice of a prior which necessarily biases your result has no place in statistical data analysis. A Bayesian would counter that frequentism doesn't solve this problem, but simply skirts the question. Frequentism can often be viewed as simply a special case of the Bayesian approach for some (implicit) choice of the prior: a Bayesian would say that it's better to make this implicit choice explicit, even if the choice might include some subjectivity. ### Simple Photon Counts: Bayesian approach in practice Leaving these philosophical debates aside for the time being, let's address how Bayesian results are generally computed in practice. For a one parameter problem like the one considered here, it's as simple as computing the posterior probability $p(F_\mathrm{true} | D,I)$ as a function of $F_\mathrm{true}$: this is the distribution reflecting our knowledge of the parameter $F_\mathrm{true}$. But as the dimension of the model grows, this direct approach becomes increasingly intractable. For this reason, Bayesian calculations often depend on sampling methods such as Markov Chain Monte Carlo (MCMC). For this practical example, let us apply an MCMC approach using Dan Foreman-Mackey's [emcee](http://dan.iel.fm/emcee/current/) package. Keep in mind here that the goal is to generate a set of points drawn from the posterior probability distribution, and to use those points to determine the answer we seek. To perform this MCMC, we start by defining Python functions for the prior $p(F_\mathrm{true} | I)$, the likelihood $p(D | F_\mathrm{true},I)$, and the posterior $p(F_\mathrm{true} | D,I)$, noting that none of these need be properly normalized. Our model here is one-dimensional, but to handle multi-dimensional models we'll define the model in terms of an array of parameters $\boldsymbol{\theta}$, which in this case is $\boldsymbol{\theta} = [F_\mathrm{true}]$ ```python def log_prior(theta): if theta>0 and theta<10000: return 0 # flat prior else: return -np.inf def log_likelihood(theta, F, e): return -0.5 * np.sum(np.log(2 * np.pi * e ** 2) \ + (F - theta[0]) ** 2 / e ** 2) def log_posterior(theta, F, e): return log_prior(theta) + log_likelihood(theta, F, e) ``` Now we set up the problem, including generating some random starting guesses for the multiple chains of points. ```python ndim = 1 # number of parameters in the model nwalkers = 50 # number of MCMC walkers nwarm = 1000 # "warm-up" period to let chains stabilize nsteps = 2000 # number of MCMC steps to take # we'll start at random locations between 0 and 2000 starting_guesses = 2000 * np.random.rand(nwalkers, ndim) sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[F,e]) sampler.run_mcmc(starting_guesses, nsteps) # Shape of sampler.chain = (nwalkers, nsteps, ndim) # Flatten the sampler chain and discard warm-in points: samples = sampler.chain[:, nwarm:, :].reshape((-1, ndim)) ``` If this all worked correctly, the array sample should contain a series of 50,000 points drawn from the posterior. Let's plot them and check. See results in Fig. [fig:flux-bayesian](#fig:flux-bayesian). ```python fig, ax = plt.subplots() ax.hist(samples, bins=50, histtype="stepfilled", alpha=0.3, density=True) ax.set_xlabel(r'$F_\mathrm{est}$') ax.set_ylabel(r'$p(F_\mathrm{est}|D,I)$'); ``` <!-- dom:FIGURE:[fig/singlephotoncount_fig_2.png, width=400 frac=0.8] Bayesian posterior pdf (represented by a histogram of MCMC samples) from flux measurements. <div id="fig:flux-bayesian"></div> --> <!-- begin figure --> <div id="fig:flux-bayesian"></div> <p>Bayesian posterior pdf (represented by a histogram of MCMC samples) from flux measurements.</p> <!-- end figure --> ## Aside: Best estimates and confidence intervals The posterior distribution from our Bayesian data analysis is the key quantity that encodes our inference about the values of the model parameters, given the data and the relevant background information. Often, however, we wish to summarize this result with just a few numbers: the best estimate and a measure of its reliability. There are a few different options for this. The choice of the most appropriate one depends mainly on the shape of the posterior distribution: ### Symmetric posterior pdfs Since the probability (density) associated with any particular value of the parameter is a measure of how much we believe that it lies in the neighbourhood of that point, our best estimate is given by the maximum of the posterior pdf. If we denote the quantity of interest by $\theta$, with a posterior pdf $P =p(\theta|D,I)$, then the best estimate of its value $\theta_0$ is given by the condition $dP/d\theta|_{\theta=\theta_0}=0$. Strictly speaking, we should also check the sign of the second derivative to ensure that $\theta_0$ represents a maximum. To obtain a measure of the reliability of this best estimate, we need to look at the width or spread of the posterior pdf about $\theta_0$. When considering the behaviour of any function in the neighbourhood of a particular point, it is often helpful to carry out a Taylor series expansion; this is simply a standard tool for (locally) approximating a complicated function by a low-order polynomial. The linear term is zero at the maximum and the quadratic term is often the dominating one determining the width of the posterior pdf. Ignoring all the higher-order terms we arrive at the Gaussian approximation (see more details below) <!-- Equation labels as ordinary links --> <div id="_auto8"></div> $$ \begin{equation} p(\theta|D,I) \approx \frac{1}{\sigma\sqrt{2\pi}} \exp \left[ -\frac{(\theta-\mu)^2}{2\sigma^2} \right], \label{_auto8} \tag{8} \end{equation} $$ where the mean $\mu = \theta_0$ and the variance $\sigma = \left( - \left. \frac{d^2L}{d\theta^2} \right|_{\theta_0} \right)^{-1/2}$, where $L$ is the logarithm of the posterior $P$. Our inference about the quantity of interest is conveyed very concisely, therefore, by the 67% Bayesian confidence interval $\theta = \theta_0 \pm \sigma$, and $$ p(\theta_0-\sigma < \theta < \theta_0+\sigma | D,I) = \int_{\theta_0-\sigma}^{\theta_0+\sigma} p(\theta|D,I) d\theta \approx 0.67. $$ ### Asymmetric posterior pdfs While the maximum of the posterior ($\theta_0$) can still be regarded as giving the best estimate, the true value is now more likely to be on one side of this rather than the other. Alternatively one can compute the mean value, $\langle \theta \rangle = \int \theta p(\theta|D,I) d\theta$, although this tends to overemphasise very long tails. The best option is probably a compromise that can be employed when having access to a large sample from the posterior (as provided by an MCMC), namely to give the median of this ensemble. Furthermore, the concept of an error-bar does not seem appropriate in this case, as it implicitly entails the idea of symmetry. A good way of expressing the reliability with which a parameter can be inferred, for an asymmetric posterior pdf, is rather through a *confidence interval*. Since the area under the posterior pdf between $\theta_1$ and $\theta_2$ is proportional to how much we believe that $\theta$ lies in that range, the shortest interval that encloses 67% of the area represents a sensible measure of the uncertainty of the estimate. Obviously we can choose to provide some other degree-of-belief that we think is relevant for the case at hand. Assuming that the posterior pdf has been normalized, to have unit area, we need to find $\theta_1$ and $\theta_2$ such that: $$ p(\theta_1 < \theta < \theta_2 | D,I) = \int_{\theta_1}^{\theta_2} p(\theta|D,I) d\theta \approx 0.67, $$ where the difference $\theta_2 - \theta_1$ is as small as possible. The region $\theta_1 < \theta < \theta_2$ is then called the shortest 67% confidence interval. ### Multimodal posterior pdfs We can sometimes obtain posteriors which are multimodal; i.e. contains several disconnected regions with large probabilities. There is no difficulty when one of the maxima is very much larger than the others: we can simply ignore the subsidiary solutions, to a good approximation, and concentrate on the global maximum. The problem arises when there are several maxima of comparable magnitude. What do we now mean by a best estimate, and how should we quantify its reliability? The idea of a best estimate and an error-bar, or even a confidence interval, is merely an attempt to summarize the posterior with just two or three numbers; sometimes this just can’t be done, and so these concepts are not valid. For the bimodal case we might be able to characterize the posterior in terms of a few numbers: two best estimates and their associated error-bars, or disjoint confidence intervals. For a general multimodal pdf, the most honest thing we can do is just display the posterior itself. Two options for assigning credible intervals to asymmetric and multimodal pdfs: * Equal-tailed interval: the probability area above and below the interval are equal. * Highest posterior density (HPD) interval: The posterior density for any point within the interval is larger than the posterior density for any point outside the interval. ### Different views on confidence intervals A Bayesian confidence interval, or credible interval, or degree-of-belief (DOB) interval is the following: Given this data and other information there is $d \%$ probability that this interval contains the true value of the parameter. E.g. a 95% DOB interval implies that the Baysian data analyser would bet 20-to-1 that the true result is inside the interval. A frequentist 95% confidence interval should be understood as follows: "There is a 95% probability that when I compute a confidence interval from data of this sort that he true value of the parameter will fall within the (hypothetical) space of observations". So the parameter is fixed (no pdf) and the confidence interval is based on random sampling of data. Let's try again to understand this: If we make a large number of repeated samples, then 95% of the intervals extracted in this way will include the true value of the parameter. ### Simple Photon Counts: Best estimates and confidence intervals To compute these numbers for our example, you would run: ```python sampper=np.percentile(samples, [2.5, 16.5, 50, 83.5, 97.5],axis=0).flatten() print(f""" F_true = {F_true} Based on {N} measurements the posterior point estimates are: ...F_est = { np.mean(samples):.0f} +/- { np.std(samples):.0f} or using credibility intervals: ...F_est = {sampper[2]:.0f} (posterior median) ...F_est in [{sampper[1]:.0f}, {sampper[3]:.0f}] (67% credibility interval) ...F_est in [{sampper[0]:.0f}, {sampper[4]:.0f}] (95% credibility interval) """) ``` `F_true = 1000` `Based on 50 measurements the posterior point estimates are:` `...F_est = 998 +/- 4` `or using credibility intervals:` `...F_est = 998 (posterior median)` `...F_est in [993, 1002] (67% credibility interval)` `...F_est in [989, 1006] (95% credibility interval)` In this particular example, the posterior pdf is actually a Gaussian (since it is constructed as a product of Gaussians), and the mean and variance from the quadratic approximation will agree exactly with the frequentist approach. From this final result you might come away with the impression that the Bayesian method is unnecessarily complicated, and in this case it certainly is. Using an MCMC sampler to characterize a one-dimensional normal distribution is a bit like using the Death Star to destroy a beach ball, but we did this here because it demonstrates an approach that can scale to complicated posteriors in many, many dimensions, and can provide nice results in more complicated situations where an analytic likelihood approach is not possible. Furthermore, as data and models grow in complexity, the two approaches can diverge greatly. # Example: Gaussian noise and averages The example in the demonstration notebook is from Sivia's book. How do we infer the mean and standard deviation of a Gaussian distribution from $M$ measurements $D \in \{ x_k \}_{k=0}^{M-1}$ that should be distributed according to a normal distribution $p( D | \mu,\sigma,I)$? Start from Bayes theorem $$ p(\mu,\sigma | D, I) = \frac{p(D|\mu,\sigma,I) p(\mu,\sigma|I)}{p(D|I)} $$ * Remind yourself about the names of the different terms. * It should become intuitive what the different probabilities (pdfs) describe. * Bayes theorem tells you how to flip from (hard-to-compute) $p(\mu,\sigma | D, I) \Leftrightarrow p(D|\mu,\sigma,I)$ (easier-to-compute). Aside on the denominator, which is known as the "data probability" or "marginalized likelihood" or "evidence". * With $\theta$ denoting a general vector of parameters we must have $$ p(D|I) = \int d\theta p(D|\theta,I) p(\theta|I). $$ * This integration (or marginalization) over all parameters is often difficult to perform. * Fortunately, for **parameter estimation** we don't need $p(D|I)$ since it doesn't depend on $\theta$. We usually only need relative probabilities, or we can determine the normalization $N$ after we have computed the unnormalized posterior $$ p(\theta | D,I) = \frac{1}{N} p(D|\theta,I) p(\theta|I). $$ If we use a uniform prior $p(\theta | I ) \propto 1$ (in a finite volume), then the posterior is proportional to the **likelihood** $$ p(\theta | D,I) \propto p(D|\theta,I) = \mathcal{L}(\theta) $$ In this particular situation, the mode of the likelihood (which would correspond to the point estimate of maximum likelihood) is equivalent to the mode of the posterior pdf in the Bayesian analysis. The real use of the prior, however, is to include into the analysis any additional information that you might have. The prior statement makes such additional assumptions and information very explicit. But how do we actually compute the posterior in practice. Most often we won't be able to get an analytical expression, but we can sample the distribution using a method known as Markov Chain Monte Carlo (MCMC). # Example: Fitting a straight line The next example that we will study is the well known fit of a straight line. * Here the theoretical model is $$ y_\mathrm{th}(x; \theta) = m x + b, $$ with parameters $\theta = [b,m]$. * The statistical model for the data is $$ y_{\mathrm{exp},i} = y_{\mathrm{th},i} + \delta y_{\mathrm{exp},i}, $$ where we often assume that the experimental errors are independent and normally distributed so that $$ y_i = \mathcal{N} \left( y_\mathrm{th}(x_i; \theta), e_i^2 \right). $$ * Is independent errors always a good approximation? * An even better statistical model for theoretical models with a quantified, finite resolution would be $$ y_\mathrm{exp} = y_\mathrm{th} + \delta y_\mathrm{exp} + \delta y_\mathrm{th}. $$ ### Why normal distributions? Let us give a quick motivation why Gaussian distributions show up so often. Say that we have a pdf $p(\theta | D,I)$. Our best estimate from this pdf will be $\theta_0$ where $$ \left. \frac{ \partial p }{ \partial \theta } \right|_{\theta_0} = 0, \qquad \left. \frac{ \partial^2 p }{ \partial \theta^2 } \right|_{\theta_0} < 0. $$ The distribution usually varies very rapidly so we study $L(\theta) \equiv \log p$ instead. Near the peak, it behaves as $$ L(\theta) = L(\theta_0) + \frac{1}{2} \left. \frac{\partial^2 L}{\partial \theta^2} \right|_{\theta_0} \left( \theta - \theta_0 \right)^2 + \ldots, $$ where the first-order term is zero since we are expanding around a maximum and $\partial L / \partial\theta = 0$. If we neglect higher-order terms we find that $$ p(\theta|D,I) \approx A \exp \left[ \frac{1}{2} \left. \frac{\partial^2 L}{\partial \theta^2} \right|_{\theta_0} \left( \theta - \theta_0 \right)^2 \right], $$ which is a Gaussian $\mathcal{N}(\mu,\sigma^2)$ with $$ \mu = \theta_0, \qquad \sigma^2 = \left( - \left. \frac{\partial^2 L}{\partial \theta^2} \right|_{\theta_0} \right)^{-1/2}. $$ ## Correlations In the "fitting a straight-line" example you should find that the joint pdf for the slope and the intercept $[m, b]$ corresponds to a slanted ellipse. That result implies that the model parameters are **correlated**. * Try to understand the correlation that you find in this example. Let us explore correlations by studying the behavior of the pdf at the maximum. A Taylor expansion for a bivariate pdf $p(x,y)$ around the mode $(x_0,y_0)$ gives $$ p(x,y) \approx p(x_0,y_0) + \frac{1}{2} \begin{pmatrix} x-x_0 & y-y_0 \end{pmatrix} H \begin{pmatrix} x-x_0 \\ y-y_0 \end{pmatrix}, $$ where $H$ is the symmetric Hessian matrix $$ \begin{pmatrix} A & C \\ C & B \end{pmatrix}, $$ with elements $$ A = \left. \frac{\partial^2 p}{\partial x^2} \right|_{x_0,y_0}, \quad B = \left. \frac{\partial^2 p}{\partial y^2} \right|_{x_0,y_0}, \quad C = \left. \frac{\partial^2 p}{\partial x \partial y} \right|_{x_0,y_0}. $$ * So in this quadratic approximation the contour is an ellipse centered at $(x_0,y_0)$ with orientation and eccentricity determined by $A,B,C$. * The principal axes are found from the eigenvectors of $H$. * Depending on the skewness of the ellipse, the parameters are either (i) not correlated, (ii) correlated, or (iii) anti-correlated. * Take a minute to consider what that implies.
6359eae48da6580c12ade0a4116cc05aeabdd707
40,461
ipynb
Jupyter Notebook
doc/pub/BayesianParameterEstimation/ipynb/BayesianParameterEstimation.ipynb
fraidowolf/tif285-project1
16724bf233ce20aba0de02655cb9072dd5c7098e
[ "CC0-1.0" ]
null
null
null
doc/pub/BayesianParameterEstimation/ipynb/BayesianParameterEstimation.ipynb
fraidowolf/tif285-project1
16724bf233ce20aba0de02655cb9072dd5c7098e
[ "CC0-1.0" ]
null
null
null
doc/pub/BayesianParameterEstimation/ipynb/BayesianParameterEstimation.ipynb
fraidowolf/tif285-project1
16724bf233ce20aba0de02655cb9072dd5c7098e
[ "CC0-1.0" ]
null
null
null
42.861229
996
0.619906
true
7,657
Qwen/Qwen-72B
1. YES 2. YES
0.771843
0.800692
0.618009
__label__eng_Latn
0.995409
0.274172
# Structured prediction In this example$\newcommand{\reals}{\mathbf{R}}$$\newcommand{\ones}{\mathbf{1}}$, we fit a regression model to structured data, using an LLCP. The training dataset $\mathcal D$ contains $N$ input-output pairs $(x, y)$, where $x \in \reals^{n}_{++}$ is an input and $y \in \reals^{m}_{++}$ is an outputs. The entries of each output $y$ are sorted in ascending order, meaning $y_1 \leq y_2 \leq \cdots y_m$. Our regression model $\phi : \reals^{n}_{++} \to \reals^{m}_{++}$ takes as input a vector $x \in \reals^{n}_{++}$, and solves an LLCP to produce a prediction $\hat y \in \reals^{m}_{++}$. In particular, the solution of the LLCP is model's prediction. The model is of the form $$ \begin{equation} \begin{array}{lll} \phi(x) = & \mbox{argmin} & \ones^T (z/y + y / z) \\ & \mbox{subject to} & y_i \leq y_{i+1}, \quad i=1, \ldots, m-1 \\ && z_i = c_i x_1^{A_{i1}}x_2^{A_{i2}}\cdots x_n^{A_{in}}, \quad i = 1, \ldots, m. \end{array}\label{e-model} \end{equation} $$ Here, the minimization is over $y \in \reals^{m}_{++}$ and an auxiliary variable $z \in \reals^{m}_{++}$, $\phi(x)$ is the optimal value of $y$, and the parameters are $c \in \reals^{m}_{++}$ and $A \in \reals^{m \times n}$. The ratios in the objective are meant elementwise, as is the inequality $y \leq z$, and $\ones$ denotes the vector of all ones. Given a vector $x$, this model finds a sorted vector $\hat y$ whose entries are close to monomial functions of $x$ (which are the entries of $z$), as measured by the fractional error. The training loss $\mathcal{L}(\phi)$ of the model on the training set is the mean squared loss $$ \mathcal{L}(\phi) = \frac{1}{N}\sum_{(x, y) \in \mathcal D} \|y - \phi(x)\|_2^2. $$ We emphasize that $\mathcal{L}(\phi)$ depends on $c$ and $A$. In this example, we fit the parameters $c$ and $A$ in the LLCP to minimize the training loss $\mathcal{L}(\phi)$. **Fitting.** We fit the parameters by an iterative projected gradient descent method on $\mathcal L(\phi)$. In each iteration, we first compute predictions $\phi(x)$ for each input in the training set; this requires solving $N$ LLCPs. Next, we evaluate the training loss $\mathcal L(\phi)$. To update the parameters, we compute the gradient $\nabla \mathcal L(\phi)$ of the training loss with respect to the parameters $c$ and $A$. This requires differentiating through the solution map of the LLCP. We can compute this gradient efficiently, using the ``backward`` method in CVXPY (or CVXPY Layers). Finally, we subtract a small multiple of the gradient from the parameters. Care must be taken to ensure that $c$ is strictly positive; this can be done by clamping the entries of $c$ at some small threshold slightly above zero. We run this method for a fixed number of iterations. This example is described in the paper [Differentiating through Log-Log Convex Programs](http://web.stanford.edu/~boyd/papers/pdf/diff_llcvx.pdf). Shane Barratt formulated the idea of using an optimization layer to regress on sorted vectors. **Requirements.** This example requires PyTorch and CvxpyLayers >= v0.1.3. ```python from cvxpylayers.torch import CvxpyLayer import cvxpy as cp import matplotlib.pyplot as plt import numpy as np import torch torch.set_default_tensor_type(torch.DoubleTensor) %matplotlib inline ``` ### Data generation ```python n = 20 m = 10 # Number of training input-output pairs N = 100 # Number of validation pairs N_val = 50 ``` ```python torch.random.manual_seed(243) np.random.seed(243) normal = torch.distributions.multivariate_normal.MultivariateNormal(torch.zeros(n), torch.eye(n)) lognormal = lambda batch: torch.exp(normal.sample(torch.tensor([batch]))) A_true = torch.randn((m, n)) / 10 c_true = np.abs(torch.randn(m)) ``` ```python def generate_data(num_points, seed): torch.random.manual_seed(seed) np.random.seed(seed) latent = lognormal(num_points) noise = lognormal(num_points) inputs = noise + latent input_cp = cp.Parameter(pos=True, shape=(n,)) prediction = cp.multiply(c_true.numpy(), cp.gmatmul(A_true.numpy(), input_cp)) y = cp.Variable(pos=True, shape=(m,)) objective_fn = cp.sum(prediction / y + y/prediction) constraints = [] for i in range(m-1): constraints += [y[i] <= y[i+1]] problem = cp.Problem(cp.Minimize(objective_fn), constraints) outputs = [] for i in range(num_points): input_cp.value = inputs[i, :].numpy() problem.solve(cp.SCS, gp=True) outputs.append(y.value) return inputs, torch.stack([torch.tensor(t) for t in outputs]) ``` ```python train_inputs, train_outputs = generate_data(N, 243) plt.plot(train_outputs[0, :].numpy()) ``` ```python val_inputs, val_outputs = generate_data(N_val, 0) plt.plot(val_outputs[0, :].numpy()) ``` ## Monomial fit to each component We will initialize the parameters in our LLCP model by fitting monomials to the training data, without enforcing the monotonicity constraint. ```python log_c = cp.Variable(shape=(m,1)) theta = cp.Variable(shape=(n, m)) inputs_np = train_inputs.numpy() log_outputs_np = np.log(train_outputs.numpy()).T log_inputs_np = np.log(inputs_np).T offsets = cp.hstack([log_c]*N) ``` ```python cp_preds = theta.T @ log_inputs_np + offsets objective_fn = (1/N) * cp.sum_squares(cp_preds - log_outputs_np) lstq_problem = cp.Problem(cp.Minimize(objective_fn)) ``` ```python lstq_problem.is_dcp() ``` True ```python lstq_problem.solve(verbose=True) ``` ----------------------------------------------------------------- OSQP v0.6.0 - Operator Splitting QP Solver (c) Bartolomeo Stellato, Goran Banjac University of Oxford - Stanford University 2019 ----------------------------------------------------------------- problem: variables n = 1210, constraints m = 1000 nnz(P) + nnz(A) = 23000 settings: linear system solver = qdldl, eps_abs = 1.0e-05, eps_rel = 1.0e-05, eps_prim_inf = 1.0e-04, eps_dual_inf = 1.0e-04, rho = 1.00e-01 (adaptive), sigma = 1.00e-06, alpha = 1.60, max_iter = 10000 check_termination: on (interval 25), scaling: on, scaled_termination: off warm start: on, polish: on, time_limit: off iter objective pri res dua res rho time 1 0.0000e+00 3.30e+00 1.22e+04 1.00e-01 3.06e-03s 50 1.0014e-02 1.72e-07 1.64e-07 1.75e-03 7.37e-03s plsh 1.0014e-02 1.56e-15 1.17e-14 -------- 9.68e-03s status: solved solution polish: successful number of iterations: 50 optimal objective: 0.0100 run time: 9.68e-03s optimal rho estimate: 8.77e-05 0.010014212812318733 ```python c = torch.exp(torch.tensor(log_c.value)).squeeze() lstsq_val_preds = [] for i in range(N_val): inp = val_inputs[i, :].numpy() pred = cp.multiply(c,cp.gmatmul(theta.T.value, inp)) lstsq_val_preds.append(pred.value) ``` ## Fitting ```python A_param = cp.Parameter(shape=(m, n)) c_param = cp.Parameter(pos=True, shape=(m,)) x_slack = cp.Variable(pos=True, shape=(n,)) x_param = cp.Parameter(pos=True, shape=(n,)) y = cp.Variable(pos=True, shape=(m,)) prediction = cp.multiply(c_param, cp.gmatmul(A_param, x_slack)) objective_fn = cp.sum(prediction / y + y / prediction) constraints = [x_slack == x_param] for i in range(m-1): constraints += [y[i] <= y[i+1]] problem = cp.Problem(cp.Minimize(objective_fn), constraints) problem.is_dgp(dpp=True) ``` True ```python A_param.value = np.random.randn(m, n) x_param.value = np.abs(np.random.randn(n)) c_param.value = np.abs(np.random.randn(m)) layer = CvxpyLayer(problem, parameters=[A_param, c_param, x_param], variables=[y], gp=True) ``` ```python torch.random.manual_seed(1) A_tch = torch.tensor(theta.T.value) A_tch.requires_grad_(True) c_tch = torch.tensor(np.squeeze(np.exp(log_c.value))) c_tch.requires_grad_(True) train_losses = [] val_losses = [] lam1 = torch.tensor(1e-1) lam2 = torch.tensor(1e-1) opt = torch.optim.SGD([A_tch, c_tch], lr=5e-2) for epoch in range(10): preds = layer(A_tch, c_tch, train_inputs, solver_args={'acceleration_lookback': 0})[0] loss = (preds - train_outputs).pow(2).sum(axis=1).mean(axis=0) with torch.no_grad(): val_preds = layer(A_tch, c_tch, val_inputs, solver_args={'acceleration_lookback': 0})[0] val_loss = (val_preds - val_outputs).pow(2).sum(axis=1).mean(axis=0) print('(epoch {0}) train / val ({1:.4f} / {2:.4f}) '.format(epoch, loss, val_loss)) train_losses.append(loss.item()) val_losses.append(val_loss.item()) opt.zero_grad() loss.backward() opt.step() with torch.no_grad(): c_tch = torch.max(c_tch, torch.tensor(1e-8)) ``` (epoch 0) train / val (0.0018 / 0.0014) (epoch 1) train / val (0.0017 / 0.0014) (epoch 2) train / val (0.0017 / 0.0014) (epoch 3) train / val (0.0017 / 0.0014) (epoch 4) train / val (0.0017 / 0.0014) (epoch 5) train / val (0.0017 / 0.0014) (epoch 6) train / val (0.0016 / 0.0014) (epoch 7) train / val (0.0016 / 0.0014) (epoch 8) train / val (0.0016 / 0.0014) (epoch 9) train / val (0.0016 / 0.0014) ```python with torch.no_grad(): train_preds_tch = layer(A_tch, c_tch, train_inputs)[0] train_preds = [t.detach().numpy() for t in train_preds_tch] ``` ```python with torch.no_grad(): val_preds_tch = layer(A_tch, c_tch, val_inputs)[0] val_preds = [t.detach().numpy() for t in val_preds_tch] ``` ```python fig = plt.figure() i = 0 plt.plot(val_preds[i], label='LLCP', color='teal') plt.plot(lstsq_val_preds[i], label='least squares', linestyle='--', color='gray') plt.plot(val_outputs[i], label='true', linestyle='-.', color='orange') w, h = 8, 3.5 plt.xlabel(r'$i$') plt.ylabel(r'$y_i$') plt.legend() plt.show() ```
4c8a5919cc7442ee17c947867014f40cfcb3a68a
48,722
ipynb
Jupyter Notebook
examples/notebooks/derivatives/structured_prediction.ipynb
jasondark/cvxpy
56aaa01b0e9d98ae5a91a923708129a7b37a6f18
[ "ECL-2.0", "Apache-2.0" ]
3,285
2015-01-03T04:02:29.000Z
2021-04-19T14:51:29.000Z
examples/notebooks/derivatives/structured_prediction.ipynb
h-vetinari/cvxpy
86307f271819bb78fcdf64a9c3a424773e8269fa
[ "ECL-2.0", "Apache-2.0" ]
1,138
2015-01-01T19:40:14.000Z
2021-04-18T23:37:31.000Z
examples/notebooks/derivatives/structured_prediction.ipynb
h-vetinari/cvxpy
86307f271819bb78fcdf64a9c3a424773e8269fa
[ "ECL-2.0", "Apache-2.0" ]
765
2015-01-02T19:29:39.000Z
2021-04-20T00:50:43.000Z
91.582707
15,184
0.821518
true
3,034
Qwen/Qwen-72B
1. YES 2. YES
0.909907
0.822189
0.748116
__label__eng_Latn
0.737427
0.576455
```python # https://colab.research.google.com/github/kassbohm/tm-snippets/blob/master/ipynb/TM_A/TM_2/lagrange.ipynb from sympy.physics.units import * from sympy import * a0, a1, a2 = var("a0, a1, a2") b0, b1, b2 = var("b0, b1, b2") c0, c1, c2 = var("c0, c1, c2") xi = var("xi") L0 = a0 + a1*xi + a2*xi*xi L1 = b0 + b1*xi + b2*xi*xi L2 = c0 + c1*xi + c2*xi*xi e1 = Eq(L0.subs(xi,S(0)/2), 1) e2 = Eq(L0.subs(xi,S(1)/2), 0) e3 = Eq(L0.subs(xi,S(2)/2), 0) e4 = Eq(L1.subs(xi,S(0)/2), 0) e5 = Eq(L1.subs(xi,S(1)/2), 1) e6 = Eq(L1.subs(xi,S(2)/2), 0) e7 = Eq(L2.subs(xi,S(0)/2), 0) e8 = Eq(L2.subs(xi,S(1)/2), 0) e9 = Eq(L2.subs(xi,S(2)/2), 1) eqns = [e1,e2,e3,e4,e5,e6,e7,e8,e9] unks = [a0,a1,a2,b0,b1,b2,c0,c1,c2] sol = solve(eqns,unks) # pprint(sol) # exit() l, EA, n = var("l, EA, n") (x0, x1, x2) = (0, l/2, l) x = var("x") u0, u1, u2 = var("u0, u1, u2") Ax = var("Ax") # Lagrange-polynomials: L0 = (x - x1)*(x - x2) L0 /= (x0 - x1)*(x0 - x2) L1 = (x - x0)*(x - x2) L1 /= (x1 - x0)*(x1 - x2) L2 = (x - x0)*(x - x1) L2 /= (x2 - x0)*(x2 - x1) pprint("\nShape-Functions wiht ξ = x/l and Integrals:") for Li in [L0, L1, L2]: pprint("\n") # Li = Li.simplify() # pprint(Li) Li = Li.expand() tmp = Li.subs(x/l, xi) # pprint(Li) pprint(tmp) # pprint(latex(Li)) I = integrate(Li, (x, 0, l)) pprint(I) # pprint("\nChecking x=0, x=l/2, x=l:") # pprint(Li.subs(x,0)) # pprint(Li.subs(x,L/2)) # pprint(Li.subs(x,L)) L0p = diff(L0,x) L1p = diff(L1,x) L2p = diff(L2,x) pprint("\n\nDerivatives of Shape Functions:") for Lip in [L0p, L1p, L2p]: Lip = Lip.simplify() pprint(Lip) pprint("\n\nStiffness Matrix k / EA:") k00, k01, k02 = L0p*L0p, L1p*L0p, L2p*L0p k11, k12 = L1p*L1p, L2p*L1p k22 = L2p*L2p k00 = integrate(k00, (x, 0, l)) k01 = integrate(k01, (x, 0, l)) k02 = integrate(k02, (x, 0, l)) k11 = integrate(k11, (x, 0, l)) k12 = integrate(k12, (x, 0, l)) k22 = integrate(k22, (x, 0, l)) k = Matrix([ [k00, k01, k02], [k01, k11, k12], [k02, k12, k22] ]) pprint(k) k*=EA f = Matrix([Ax + n*l/6, 2*n*l/3, n*l/6]) u = Matrix([0, u1, u2]) eq = Eq(k*u,f) pprint("\n\nSolution:") sol = solve(eq, [Ax, u1, u2], dict=True) sol = sol[0] Ax, u1, u2 = sol[Ax], sol[u1], sol[u2] pprint("\nAx:") pprint(Ax) pprint("\nu1:") pprint(u1) pprint("\nu2:") pprint(u2) u = Matrix([0, u1, u2]) Lp = Matrix([L0p, L1p, L2p]) eps = u.dot(Lp) pprint("\nε:") pprint("\nx=0:") tmp = eps.subs(x,0) pprint(tmp) pprint("\nx=l:") tmp = eps.subs(x,l) pprint(tmp) pprint("\nu(x) / (nl² / EA):") u0 = 0 ux = L0*u0 + L1*u1 + L2*u2 tmp = ux / ( n*l**2 / EA ) tmp = tmp.simplify() tmp = tmp.subs(x, xi*l) tmp = tmp.expand() pprint(tmp) # Shape-Functions and Integrals: # (l - 2⋅x)⋅(l - x) # ───────────────── # 2 # l # l # ─ # 6 # 4⋅x⋅(l - x) # ─────────── # 2 # l # 2⋅l # ─── # 3 # x⋅(-l + 2⋅x) # ──────────── # 2 # l # l # ─ # 6 # # # Derivatives of Shape Functions: # -3⋅l + 4⋅x # ────────── # 2 # l # 4⋅(l - 2⋅x) # ─────────── # 2 # l # -l + 4⋅x # ──────── # 2 # l # # # Stiffness Matrix k / EA: # ⎡ 7 -8 1 ⎤ # ⎢─── ─── ───⎥ # ⎢3⋅l 3⋅l 3⋅l⎥ # ⎢ ⎥ # ⎢-8 16 -8 ⎥ # ⎢─── ─── ───⎥ # ⎢3⋅l 3⋅l 3⋅l⎥ # ⎢ ⎥ # ⎢ 1 -8 7 ⎥ # ⎢─── ─── ───⎥ # ⎣3⋅l 3⋅l 3⋅l⎦ # # # Solution: # # Ax: # -l⋅n # # u1: # 2 # 3⋅l ⋅n # ────── # 8⋅EA # # u2: # 2 # l ⋅n # ──── # 2⋅EA ```
75c31fb01bc0cf9d15b3614cb0e9e4f4aa45078b
7,599
ipynb
Jupyter Notebook
ipynb/TM_A/TM_2/lagrange.ipynb
kassbohm/tm-snippets
5e0621ba2470116e54643b740d1b68b9f28bff12
[ "MIT" ]
null
null
null
ipynb/TM_A/TM_2/lagrange.ipynb
kassbohm/tm-snippets
5e0621ba2470116e54643b740d1b68b9f28bff12
[ "MIT" ]
null
null
null
ipynb/TM_A/TM_2/lagrange.ipynb
kassbohm/tm-snippets
5e0621ba2470116e54643b740d1b68b9f28bff12
[ "MIT" ]
null
null
null
32.063291
119
0.389788
true
1,815
Qwen/Qwen-72B
1. YES 2. YES
0.92523
0.7773
0.719181
__label__yue_Hant
0.078552
0.50923
# Chapter 4 `Original content created by Cam Davidson-Pilon` `Ported to Python 3 and PyMC3 by Max Margenot (@clean_utensils) and Thomas Wiecki (@twiecki) at Quantopian (@quantopian)` ______ ## The greatest theorem never told This chapter focuses on an idea that is always bouncing around our minds, but is rarely made explicit outside books devoted to statistics. In fact, we've been using this simple idea in every example thus far. ```python import json import matplotlib s = json.load(open("../styles/bmh_matplotlibrc.json")) matplotlib.rcParams.update(s) ``` /home/gjc216/.virtualenvs/edge/lib/python3.7/site-packages/matplotlib/__init__.py:855: MatplotlibDeprecationWarning: examples.directory is deprecated; in the future, examples will be found relative to the 'datapath' directory. "found relative to the 'datapath' directory.".format(key)) ### The Law of Large Numbers Let $Z_i$ be $N$ independent samples from some probability distribution. According to *the Law of Large numbers*, so long as the expected value $E[Z]$ is finite, the following holds, $$\frac{1}{N} \sum_{i=1}^N Z_i \rightarrow E[ Z ], \;\;\; N \rightarrow \infty.$$ In words: > The average of a sequence of random variables from the same distribution converges to the expected value of that distribution. This may seem like a boring result, but it will be the most useful tool you use. ### Intuition If the above Law is somewhat surprising, it can be made more clear by examining a simple example. Consider a random variable $Z$ that can take only two values, $c_1$ and $c_2$. Suppose we have a large number of samples of $Z$, denoting a specific sample $Z_i$. The Law says that we can approximate the expected value of $Z$ by averaging over all samples. Consider the average: $$ \frac{1}{N} \sum_{i=1}^N \;Z_i $$ By construction, $Z_i$ can only take on $c_1$ or $c_2$, hence we can partition the sum over these two values: \begin{align} \frac{1}{N} \sum_{i=1}^N \;Z_i & =\frac{1}{N} \big( \sum_{ Z_i = c_1}c_1 + \sum_{Z_i=c_2}c_2 \big) \\\\[5pt] & = c_1 \sum_{ Z_i = c_1}\frac{1}{N} + c_2 \sum_{ Z_i = c_2}\frac{1}{N} \\\\[5pt] & = c_1 \times \text{ (approximate frequency of $c_1$) } \\\\ & \;\;\;\;\;\;\;\;\; + c_2 \times \text{ (approximate frequency of $c_2$) } \\\\[5pt] & \approx c_1 \times P(Z = c_1) + c_2 \times P(Z = c_2 ) \\\\[5pt] & = E[Z] \end{align} Equality holds in the limit, but we can get closer and closer by using more and more samples in the average. This Law holds for almost *any distribution*, minus some important cases we will encounter later. ##### Example ____ Below is a diagram of the Law of Large numbers in action for three different sequences of Poisson random variables. We sample `sample_size = 100000` Poisson random variables with parameter $\lambda = 4.5$. (Recall the expected value of a Poisson random variable is equal to its parameter.) We calculate the average for the first $n$ samples, for $n=1$ to `sample_size`. ```python %matplotlib inline import numpy as np from IPython.core.pylabtools import figsize import matplotlib.pyplot as plt figsize( 12.5, 5 ) sample_size = 100000 expected_value = lambda_ = 4.5 poi = np.random.poisson N_samples = range(1,sample_size,100) for k in range(3): samples = poi( lambda_, sample_size ) partial_average = [ samples[:i].mean() for i in N_samples ] plt.plot( N_samples, partial_average, lw=1.5,label="average \ of $n$ samples; seq. %d"%k) plt.plot( N_samples, expected_value*np.ones_like( partial_average), ls = "--", label = "true expected value", c = "k" ) plt.ylim( 4.35, 4.65) plt.title( "Convergence of the average of \n random variables to its \ expected value" ) plt.ylabel( "average of $n$ samples" ) plt.xlabel( "# of samples, $n$") plt.legend(); ``` Looking at the above plot, it is clear that when the sample size is small, there is greater variation in the average (compare how *jagged and jumpy* the average is initially, then *smooths* out). All three paths *approach* the value 4.5, but just flirt with it as $N$ gets large. Mathematicians and statistician have another name for *flirting*: convergence. Another very relevant question we can ask is *how quickly am I converging to the expected value?* Let's plot something new. For a specific $N$, let's do the above trials thousands of times and compute how far away we are from the true expected value, on average. But wait &mdash; *compute on average*? This is simply the law of large numbers again! For example, we are interested in, for a specific $N$, the quantity: $$D(N) = \sqrt{ \;E\left[\;\; \left( \frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \;\right)^2 \;\;\right] \;\;}$$ The above formulae is interpretable as a distance away from the true value (on average), for some $N$. (We take the square root so the dimensions of the above quantity and our random variables are the same). As the above is an expected value, it can be approximated using the law of large numbers: instead of averaging $Z_i$, we calculate the following multiple times and average them: $$ Y_k = \left( \;\frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \; \right)^2 $$ By computing the above many, $N_y$, times (remember, it is random), and averaging them: $$ \frac{1}{N_Y} \sum_{k=1}^{N_Y} Y_k \rightarrow E[ Y_k ] = E\;\left[\;\; \left( \frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \;\right)^2 \right]$$ Finally, taking the square root: $$ \sqrt{\frac{1}{N_Y} \sum_{k=1}^{N_Y} Y_k} \approx D(N) $$ ```python figsize( 12.5, 4) N_Y = 250 #use this many to approximate D(N) N_array = np.arange( 1000, 50000, 2500 ) #use this many samples in the approx. to the variance. D_N_results = np.zeros( len( N_array ) ) lambda_ = 4.5 expected_value = lambda_ #for X ~ Poi(lambda) , E[ X ] = lambda def D_N( n ): """ This function approx. D_n, the average variance of using n samples. """ Z = poi( lambda_, (n, N_Y) ) average_Z = Z.mean(axis=0) return np.sqrt( ( (average_Z - expected_value)**2 ).mean() ) for i,n in enumerate(N_array): D_N_results[i] = D_N(n) plt.xlabel( "$N$" ) plt.ylabel( "expected squared-distance from true value" ) plt.plot(N_array, D_N_results, lw = 3, label="expected distance between\n\ expected value and \naverage of $N$ random variables.") plt.plot( N_array, np.sqrt(expected_value)/np.sqrt(N_array), lw = 2, ls = "--", label = r"$\frac{\sqrt{\lambda}}{\sqrt{N}}$" ) plt.legend() plt.title( "How 'fast' is the sample average converging? " ); ``` As expected, the expected distance between our sample average and the actual expected value shrinks as $N$ grows large. But also notice that the *rate* of convergence decreases, that is, we need only 10 000 additional samples to move from 0.020 to 0.015, a difference of 0.005, but *20 000* more samples to again decrease from 0.015 to 0.010, again only a 0.005 decrease. It turns out we can measure this rate of convergence. Above I have plotted a second line, the function $\sqrt{\lambda}/\sqrt{N}$. This was not chosen arbitrarily. In most cases, given a sequence of random variable distributed like $Z$, the rate of convergence to $E[Z]$ of the Law of Large Numbers is $$ \frac{ \sqrt{ \; Var(Z) \; } }{\sqrt{N} }$$ This is useful to know: for a given large $N$, we know (on average) how far away we are from the estimate. On the other hand, in a Bayesian setting, this can seem like a useless result: Bayesian analysis is OK with uncertainty so what's the *statistical* point of adding extra precise digits? Though drawing samples can be so computationally cheap that having a *larger* $N$ is fine too. ### How do we compute $Var(Z)$ though? The variance is simply another expected value that can be approximated! Consider the following, once we have the expected value (by using the Law of Large Numbers to estimate it, denote it $\mu$), we can estimate the variance: $$ \frac{1}{N}\sum_{i=1}^N \;(Z_i - \mu)^2 \rightarrow E[ \;( Z - \mu)^2 \;] = Var( Z )$$ ### Expected values and probabilities There is an even less explicit relationship between expected value and estimating probabilities. Define the *indicator function* $$\mathbb{1}_A(x) = \begin{cases} 1 & x \in A \\\\ 0 & else \end{cases} $$ Then, by the law of large numbers, if we have many samples $X_i$, we can estimate the probability of an event $A$, denoted $P(A)$, by: $$ \frac{1}{N} \sum_{i=1}^N \mathbb{1}_A(X_i) \rightarrow E[\mathbb{1}_A(X)] = P(A) $$ Again, this is fairly obvious after a moments thought: the indicator function is only 1 if the event occurs, so we are summing only the times the event occurs and dividing by the total number of trials (consider how we usually approximate probabilities using frequencies). For example, suppose we wish to estimate the probability that a $Z \sim Exp(.5)$ is greater than 5, and we have many samples from a $Exp(.5)$ distribution. $$ P( Z > 5 ) = \frac{1}{N}\sum_{i=1}^N \mathbb{1}_{z > 5 }(Z_i) $$ ```python N = 100000 print( np.mean( [ np.random.exponential( 0.6 ) > 5 for i in range(N) ] ) ) ``` 0.00022 ### What does this all have to do with Bayesian statistics? *Point estimates*, to be introduced in the next chapter, in Bayesian inference are computed using expected values. In more analytical Bayesian inference, we would have been required to evaluate complicated expected values represented as multi-dimensional integrals. No longer. If we can sample from the posterior distribution directly, we simply need to evaluate averages. Much easier. If accuracy is a priority, plots like the ones above show how fast you are converging. And if further accuracy is desired, just take more samples from the posterior. When is enough enough? When can you stop drawing samples from the posterior? That is the practitioners decision, and also dependent on the variance of the samples (recall from above a high variance means the average will converge slower). We also should understand when the Law of Large Numbers fails. As the name implies, and comparing the graphs above for small $N$, the Law is only true for large sample sizes. Without this, the asymptotic result is not reliable. Knowing in what situations the Law fails can give us *confidence in how unconfident we should be*. The next section deals with this issue. ## The Disorder of Small Numbers The Law of Large Numbers is only valid as $N$ gets *infinitely* large: never truly attainable. While the law is a powerful tool, it is foolhardy to apply it liberally. Our next example illustrates this. ##### Example: Aggregated geographic data Often data comes in aggregated form. For instance, data may be grouped by state, county, or city level. Of course, the population numbers vary per geographic area. If the data is an average of some characteristic of each the geographic areas, we must be conscious of the Law of Large Numbers and how it can *fail* for areas with small populations. We will observe this on a toy dataset. Suppose there are five thousand counties in our dataset. Furthermore, population number in each state are uniformly distributed between 100 and 1500. The way the population numbers are generated is irrelevant to the discussion, so we do not justify this. We are interested in measuring the average height of individuals per county. Unbeknownst to us, height does **not** vary across county, and each individual, regardless of the county he or she is currently living in, has the same distribution of what their height may be: $$ \text{height} \sim \text{Normal}(150, 15 ) $$ We aggregate the individuals at the county level, so we only have data for the *average in the county*. What might our dataset look like? ```python figsize( 12.5, 4) std_height = 15 mean_height = 150 n_counties = 5000 pop_generator = np.random.randint norm = np.random.normal #generate some artificial population numbers population = pop_generator(100, 1500, n_counties ) average_across_county = np.zeros( n_counties ) for i in range( n_counties ): #generate some individuals and take the mean average_across_county[i] = norm(mean_height, 1./std_height, population[i] ).mean() #located the counties with the apparently most extreme average heights. i_min = np.argmin( average_across_county ) i_max = np.argmax( average_across_county ) #plot population size vs. recorded average plt.scatter( population, average_across_county, alpha = 0.5, c="#7A68A6") plt.scatter( [ population[i_min], population[i_max] ], [average_across_county[i_min], average_across_county[i_max] ], s = 60, marker = "o", facecolors = "none", edgecolors = "#A60628", linewidths = 1.5, label="extreme heights") plt.xlim( 100, 1500 ) plt.title( "Average height vs. County Population") plt.xlabel("County Population") plt.ylabel("Average height in county") plt.plot( [100, 1500], [150, 150], color = "k", label = "true expected \ height", ls="--" ) plt.legend(scatterpoints = 1); ``` What do we observe? *Without accounting for population sizes* we run the risk of making an enormous inference error: if we ignored population size, we would say that the county with the shortest and tallest individuals have been correctly circled. But this inference is wrong for the following reason. These two counties do *not* necessarily have the most extreme heights. The error results from the calculated average of smaller populations not being a good reflection of the true expected value of the population (which in truth should be $\mu =150$). The sample size/population size/$N$, whatever you wish to call it, is simply too small to invoke the Law of Large Numbers effectively. We provide more damning evidence against this inference. Recall the population numbers were uniformly distributed over 100 to 1500. Our intuition should tell us that the counties with the most extreme population heights should also be uniformly spread over 100 to 1500, and certainly independent of the county's population. Not so. Below are the population sizes of the counties with the most extreme heights. ```python print("Population sizes of 10 'shortest' counties: ") print(population[ np.argsort( average_across_county )[:10] ], '\n') print("Population sizes of 10 'tallest' counties: ") print(population[ np.argsort( -average_across_county )[:10] ]) ``` Population sizes of 10 'shortest' counties: [104 127 117 123 253 107 142 281 396 185] Population sizes of 10 'tallest' counties: [130 108 176 273 345 127 138 174 134 154] Not at all uniform over 100 to 1500. This is an absolute failure of the Law of Large Numbers. ##### Example: Kaggle's *U.S. Census Return Rate Challenge* Below is data from the 2010 US census, which partitions populations beyond counties to the level of block groups (which are aggregates of city blocks or equivalents). The dataset is from a Kaggle machine learning competition some colleagues and I participated in. The objective was to predict the census letter mail-back rate of a group block, measured between 0 and 100, using census variables (median income, number of females in the block-group, number of trailer parks, average number of children etc.). Below we plot the census mail-back rate versus block group population: ```python figsize( 12.5, 6.5 ) data = np.genfromtxt( "./data/census_data.csv", skip_header=1, delimiter= ",") plt.scatter( data[:,1], data[:,0], alpha = 0.5, c="#7A68A6") plt.title("Census mail-back rate vs Population") plt.ylabel("Mail-back rate") plt.xlabel("population of block-group") plt.xlim(-100, 15e3 ) plt.ylim( -5, 105) i_min = np.argmin( data[:,0] ) i_max = np.argmax( data[:,0] ) plt.scatter( [ data[i_min,1], data[i_max, 1] ], [ data[i_min,0], data[i_max,0] ], s = 60, marker = "o", facecolors = "none", edgecolors = "#A60628", linewidths = 1.5, label="most extreme points") plt.legend(scatterpoints = 1); ``` The above is a classic phenomenon in statistics. I say *classic* referring to the "shape" of the scatter plot above. It follows a classic triangular form, that tightens as we increase the sample size (as the Law of Large Numbers becomes more exact). I am perhaps overstressing the point and maybe I should have titled the book *"You don't have big data problems!"*, but here again is an example of the trouble with *small datasets*, not big ones. Simply, small datasets cannot be processed using the Law of Large Numbers. Compare with applying the Law without hassle to big datasets (ex. big data). I mentioned earlier that paradoxically big data prediction problems are solved by relatively simple algorithms. The paradox is partially resolved by understanding that the Law of Large Numbers creates solutions that are *stable*, i.e. adding or subtracting a few data points will not affect the solution much. On the other hand, adding or removing data points to a small dataset can create very different results. For further reading on the hidden dangers of the Law of Large Numbers, I would highly recommend the excellent manuscript [The Most Dangerous Equation](http://nsm.uh.edu/~dgraur/niv/TheMostDangerousEquation.pdf). ##### Example: How to order Reddit submissions You may have disagreed with the original statement that the Law of Large numbers is known to everyone, but only implicitly in our subconscious decision making. Consider ratings on online products: how often do you trust an average 5-star rating if there is only 1 reviewer? 2 reviewers? 3 reviewers? We implicitly understand that with such few reviewers that the average rating is **not** a good reflection of the true value of the product. This has created flaws in how we sort items, and more generally, how we compare items. Many people have realized that sorting online search results by their rating, whether the objects be books, videos, or online comments, return poor results. Often the seemingly top videos or comments have perfect ratings only from a few enthusiastic fans, and truly more quality videos or comments are hidden in later pages with *falsely-substandard* ratings of around 4.8. How can we correct this? Consider the popular site Reddit (I purposefully did not link to the website as you would never come back). The site hosts links to stories or images, called submissions, for people to comment on. Redditors can vote up or down on each submission (called upvotes and downvotes). Reddit, by default, will sort submissions to a given subreddit by Hot, that is, the submissions that have the most upvotes recently. How would you determine which submissions are the best? There are a number of ways to achieve this: 1. *Popularity*: A submission is considered good if it has many upvotes. A problem with this model is that a submission with hundreds of upvotes, but thousands of downvotes. While being very *popular*, the submission is likely more controversial than best. 2. *Difference*: Using the *difference* of upvotes and downvotes. This solves the above problem, but fails when we consider the temporal nature of submission. Depending on when a submission is posted, the website may be experiencing high or low traffic. The difference method will bias the *Top* submissions to be the those made during high traffic periods, which have accumulated more upvotes than submissions that were not so graced, but are not necessarily the best. 3. *Time adjusted*: Consider using Difference divided by the age of the submission. This creates a *rate*, something like *difference per second*, or *per minute*. An immediate counter-example is, if we use per second, a 1 second old submission with 1 upvote would be better than a 100 second old submission with 99 upvotes. One can avoid this by only considering at least t second old submission. But what is a good t value? Does this mean no submission younger than t is good? We end up comparing unstable quantities with stable quantities (young vs. old submissions). 3. *Ratio*: Rank submissions by the ratio of upvotes to total number of votes (upvotes plus downvotes). This solves the temporal issue, such that new submissions who score well can be considered Top just as likely as older submissions, provided they have many upvotes to total votes. The problem here is that a submission with a single upvote (ratio = 1.0) will beat a submission with 999 upvotes and 1 downvote (ratio = 0.999), but clearly the latter submission is *more likely* to be better. I used the phrase *more likely* for good reason. It is possible that the former submission, with a single upvote, is in fact a better submission than the later with 999 upvotes. The hesitation to agree with this is because we have not seen the other 999 potential votes the former submission might get. Perhaps it will achieve an additional 999 upvotes and 0 downvotes and be considered better than the latter, though not likely. What we really want is an estimate of the *true upvote ratio*. Note that the true upvote ratio is not the same as the observed upvote ratio: the true upvote ratio is hidden, and we only observe upvotes vs. downvotes (one can think of the true upvote ratio as "what is the underlying probability someone gives this submission a upvote, versus a downvote"). So the 999 upvote/1 downvote submission probably has a true upvote ratio close to 1, which we can assert with confidence thanks to the Law of Large Numbers, but on the other hand we are much less certain about the true upvote ratio of the submission with only a single upvote. Sounds like a Bayesian problem to me. One way to determine a prior on the upvote ratio is to look at the historical distribution of upvote ratios. This can be accomplished by scraping Reddit's submissions and determining a distribution. There are a few problems with this technique though: 1. Skewed data: The vast majority of submissions have very few votes, hence there will be many submissions with ratios near the extremes (see the "triangular plot" in the above Kaggle dataset), effectively skewing our distribution to the extremes. One could try to only use submissions with votes greater than some threshold. Again, problems are encountered. There is a tradeoff between number of submissions available to use and a higher threshold with associated ratio precision. 2. Biased data: Reddit is composed of different subpages, called subreddits. Two examples are *r/aww*, which posts pics of cute animals, and *r/politics*. It is very likely that the user behaviour towards submissions of these two subreddits are very different: visitors are likely friendly and affectionate in the former, and would therefore upvote submissions more, compared to the latter, where submissions are likely to be controversial and disagreed upon. Therefore not all submissions are the same. In light of these, I think it is better to use a `Uniform` prior. With our prior in place, we can find the posterior of the true upvote ratio. The Python script `top_showerthoughts_submissions.py` will scrape the best posts from the `showerthoughts` community on Reddit. This is a text-only community so the title of each post *is* the post. Below is the top post as well as some other sample posts: ```python #adding a number to the end of the %run call will get the ith top post. %run top_showerthoughts_submissions.py 2 print("Post contents: \n") print(top_post) ``` 6.2.0 Post contents: Those "choose your own flavor" Coke machines are probably just there to gather marketing data; They see what the most popular flavors are so they can release bottled/canned versions. ```python """ contents: an array of the text from the last 100 top submissions to a subreddit votes: a 2d numpy array of upvotes, downvotes for each submission. """ n_submissions = len(votes) submissions = np.random.randint( n_submissions, size=4) print("Some Submissions (out of %d total) \n-----------"%n_submissions) for i in submissions: print('"' + contents[i] + '"') print("upvotes/downvotes: ",votes[i,:], "\n") ``` Some Submissions (out of 98 total) ----------- "At some point in your life, someone will take a photo of you that will be on display at your funeral." upvotes/downvotes: [16 0] "Your dog probably sees things they are terrified of outside but decide to bark anyway to warn you/protect you." upvotes/downvotes: [208 9] "All money is pizza money it’s just sometimes you have to use it for other things" upvotes/downvotes: [431 59] "If you were born in 1869 and lived to be 100, you were born when the transcontinental railroad was finished and lived to see the moon landing. It's possible that same amount of change could happen in our lifetimes." upvotes/downvotes: [81 5] For a given true upvote ratio $p$ and $N$ votes, the number of upvotes will look like a Binomial random variable with parameters $p$ and $N$. (This is because of the equivalence between upvote ratio and probability of upvoting versus downvoting, out of $N$ possible votes/trials). We create a function that performs Bayesian inference on $p$, for a particular submission's upvote/downvote pair. ```python import pymc3 as pm def posterior_upvote_ratio( upvotes, downvotes, samples = 20000): """ This function accepts the number of upvotes and downvotes a particular submission recieved, and the number of posterior samples to return to the user. Assumes a uniform prior. """ N = upvotes + downvotes with pm.Model() as model: upvote_ratio = pm.Uniform("upvote_ratio", 0, 1) observations = pm.Binomial( "obs", N, upvote_ratio, observed=upvotes) trace = pm.sample(samples, step=pm.Metropolis()) burned_trace = trace[int(samples/4):] return burned_trace["upvote_ratio"] ``` Below are the resulting posterior distributions. ```python figsize( 11., 8) posteriors = [] colours = ["#348ABD", "#A60628", "#7A68A6", "#467821", "#CF4457"] for i in range(len(submissions)): j = submissions[i] posteriors.append( posterior_upvote_ratio( votes[j, 0], votes[j,1] ) ) plt.hist( posteriors[i], bins = 10, density = True, alpha = .9, histtype="step",color = colours[i%5], lw = 3, label = '(%d up:%d down)\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) ) plt.hist( posteriors[i], bins = 10, density = True, alpha = .2, histtype="stepfilled",color = colours[i], lw = 3, ) plt.legend(loc="upper left") plt.xlim( 0, 1) plt.title("Posterior distributions of upvote ratios on different submissions"); ``` Some distributions are very tight, others have very long tails (relatively speaking), expressing our uncertainty with what the true upvote ratio might be. ### Sorting! We have been ignoring the goal of this exercise: how do we sort the submissions from *best to worst*? Of course, we cannot sort distributions, we must sort scalar numbers. There are many ways to distill a distribution down to a scalar: expressing the distribution through its expected value, or mean, is one way. Choosing the mean is a bad choice though. This is because the mean does not take into account the uncertainty of distributions. I suggest using the *95% least plausible value*, defined as the value such that there is only a 5% chance the true parameter is lower (think of the lower bound on the 95% credible region). Below are the posterior distributions with the 95% least-plausible value plotted: ```python N = posteriors[0].shape[0] lower_limits = [] for i in range(len(submissions)): j = submissions[i] plt.hist( posteriors[i], bins = 20, normed = True, alpha = .9, histtype="step",color = colours[i], lw = 3, label = '(%d up:%d down)\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) ) plt.hist( posteriors[i], bins = 20, normed = True, alpha = .2, histtype="stepfilled",color = colours[i], lw = 3, ) v = np.sort( posteriors[i] )[ int(0.05*N) ] #plt.vlines( v, 0, 15 , color = "k", alpha = 1, linewidths=3 ) plt.vlines( v, 0, 10 , color = colours[i], linestyles = "--", linewidths=3 ) lower_limits.append(v) plt.legend(loc="upper left") plt.legend(loc="upper left") plt.title("Posterior distributions of upvote ratios on different submissions"); order = np.argsort( -np.array( lower_limits ) ) print(order, lower_limits) ``` The best submissions, according to our procedure, are the submissions that are *most-likely* to score a high percentage of upvotes. Visually those are the submissions with the 95% least plausible value close to 1. Why is sorting based on this quantity a good idea? By ordering by the 95% least plausible value, we are being the most conservative with what we think is best. When using the lower-bound of the 95% credible interval, we believe with high certainty that the 'true upvote ratio' is at the very least equal to this value (or greater), thereby ensuring that the best submissions are still on top. Under this ordering, we impose the following very natural properties: 1. given two submissions with the same observed upvote ratio, we will assign the submission with more votes as better (since we are more confident it has a higher ratio). 2. given two submissions with the same number of votes, we still assign the submission with more upvotes as *better*. ### But this is too slow for real-time! I agree, computing the posterior of every submission takes a long time, and by the time you have computed it, likely the data has changed. I delay the mathematics to the appendix, but I suggest using the following formula to compute the lower bound very fast. $$ \frac{a}{a + b} - 1.65\sqrt{ \frac{ab}{ (a+b)^2(a + b +1 ) } }$$ where \begin{align} & a = 1 + u \\\\ & b = 1 + d \\\\ \end{align} $u$ is the number of upvotes, and $d$ is the number of downvotes. The formula is a shortcut in Bayesian inference, which will be further explained in Chapter 6 when we discuss priors in more detail. ```python def intervals(u,d): a = 1. + u b = 1. + d mu = a/(a+b) std_err = 1.65*np.sqrt( (a*b)/( (a+b)**2*(a+b+1.) ) ) return ( mu, std_err ) print("Approximate lower bounds:") posterior_mean, std_err = intervals(votes[:,0],votes[:,1]) lb = posterior_mean - std_err print(lb) print("\n") print("Top 40 Sorted according to approximate lower bounds:") print("\n") order = np.argsort( -lb ) ordered_contents = [] for i in order[:40]: ordered_contents.append( contents[i] ) print(votes[i,0], votes[i,1], contents[i]) print("-------------") ``` Approximate lower bounds: [0.96711925 0.94162649 0.83521658 0.89948033 0.93298123 0.92139647 0.95607591 0.92458598 0.95067603 0.91325068 0.95467916 0.91301601 0.86800628 0.93300071 0.91387422 0.95452891 0.94318204 0.93385266 0.93771511 0.90866277 0.8436714 0.84979447 0.96023852 0.91465559 0.75425942 0.96587192 0.90183539 0.92327676 0.89983924 0.9244614 0.97070774 0.85373162 0.7816908 0.96400616 0.94246974 0.88897414 0.88621472 0.93303496 0.95131565 0.91494935 0.9073345 0.93792895 0.94874614 0.86765877 0.86828537 0.89610301 0.8825551 0.94191707 0.90991491 0.88773339 0.83703075 0.95082337 0.79811635 0.95763257 0.83730175 0.82382813 0.8027221 0.90184807 0.94854702 0.94864269 0.87279132 0.76137738 0.9507226 0.85542404 0.89239065 0.73711754 0.92346577 0.92561395 0.81207876 0.76310347 0.88796035 0.93173568 0.81109538 0.85941097 0.80731596 0.83490455 0.80933194 0.73142963 0.94799705 0.81845702 0.74556767 0.93111579 0.82545948 0.77910901 0.71674317 0.76867393 0.81109538 0.80933194 0.66500492 0.95264061 0.93577171 0.81019924 0.78188878 0.72708753 0.86498277 0.91228157 0.85773646 0.66251028] Top 40 Sorted according to approximate lower bounds: 195 2 Sometimes it’s hard to tell if you hang out with someone because you like them, or because of habit. ------------- 9833 304 With Doris Day’s death (RIP), Queen Elizabeth II, Brigette Bardot, Chubby Checker, Bob Dylan, Ringo Starr, and Paul McCartney are the only 6 people alive who are referenced in Billy Joel’s “We Didn’t Start the Fire.” ------------- 433 9 People don't celebrate your birthday as much when you get older because the novelty of you existing has worn off. ------------- 71 0 We all have different voices in our head when people tExT LiKe ThIs ------------- 975 30 If robots ever gain sentience then will they make their own "drugs"? Like viruses that mess with their programming so they feel better ------------- 18858 786 Banjos sound like a guitar with a southern accent ------------- 6938 289 Telling a person to "breathe" is better than telling them to "calm down" ------------- 4045 169 One of the craziest moments growing up as a kid was realising that teachers exist and have a life outside of school. ------------- 452 14 Out of all the times I’ve seen a squirrel. I’ve never seen one pooping. ------------- 148 3 If we were to have a 3rd World War today, kids from opposing countries could be trash talking each other online. ------------- 373 12 Nobody has hated you enough to kill you yet ------------- 1435 60 Pornography is the only form of media that you want to know all the spoilers before experiencing it. ------------- 142 3 In maybe a couple of years, seeing a car with no driver will be an every day sight, and won't be creepy. ------------- 1302 54 A bowl cut is a 360° bang ------------- 284 9 Every time snakes move, they get a belly rub. ------------- 136 3 Artificial intelligence has a long way to go to become the threat to humanity that Natural stupidity already is. ------------- 914 38 At some point, we decided to stop carrying around swords everywhere ------------- 209 6 Won’t power is probably stronger than will power ------------- 458 19 If you make a diamond hoe in minecraft, you're really good at the game or really bad at the game. ------------- 188 6 The next generation will never know the fun we had when dragging an error box around a computer to cover the entire screen ------------- 43 0 Some poor bastard died in WW1 thinking “At least this is the war that will end all wars” ------------- 1905 100 If girls only had sex with guys with big dicks, guys would eventually evolve to only have big dicks ------------- 66 1 With self driving cars, knowing how to drive will eventually become an ancient skill ------------- 325 14 Because of the internet a guy will see more naked women than all the men in his lineage and through tinder he will get rejected by more women than all the men in his lineage. ------------- 128 4 Those fun zones where kids play games and win tickets to exchange for prizes -- they're just practice casinos. ------------- 235 10 If cats could talk, they probably wouldn't. ------------- 516 27 We look back 100 years ago and feel sorry that people had to suffer through pain without any medication. But in 100 years time, people will have some amazing method of dealing with pain and feel sorry for us that we had to use medication. ------------- 548 29 The Target-to-Walmart ratio is a good indicator of how affluent a neighborhood is. ------------- 3165 202 We never appreciate breathing through our noses until we can’t anymore ------------- 120 4 Censored bad word seem much more serious than uncensored ------------- 208 9 Your dog probably sees things they are terrified of outside but decide to bark anyway to warn you/protect you. ------------- 33 0 Tug of war is the only time the fat kid gets picked first in gym class ------------- 679 43 Once you turn 18, your 6,570 day free trial has expired. ------------- 157 7 The show Veggie Tales’ main character is a fruit. ------------- 32 0 Your pet probably thinks you're cute, too. ------------- 89 3 Seeing a teacher outside of school is the lowest level of celebrity sighting. ------------- 2368 178 The heat from the sun just traveled 149.6 million killmeters just to melt our fucking chocolate bar. ------------- 166 9 Finding that Santa ate your cookies as a child when you're home alone is exciting and amazing. Finding Santa ate your cookies as an adult when you're home alone is terrifying and scary ------------- 822 62 There are infinite realities where the only difference are the position of a single atom. ------------- 294 19 This year for Halloween, there's going to be a bunch of fat, middle age men dressing up as Thor ------------- We can view the ordering visually by plotting the posterior mean and bounds, and sorting by the lower bound. In the plot below, notice that the left error-bar is sorted (as we suggested this is the best way to determine an ordering), so the means, indicated by dots, do not follow any strong pattern. ```python r_order = order[::-1][-40:] plt.errorbar( posterior_mean[r_order], np.arange( len(r_order) ), xerr=std_err[r_order], capsize=0, fmt="o", color = "#7A68A6") plt.xlim( 0.3, 1) plt.yticks( np.arange( len(r_order)-1,-1,-1 ), map( lambda x: x[:30].replace("\n",""), ordered_contents) ); ``` In the graphic above, you can see why sorting by mean would be sub-optimal. ### Extension to Starred rating systems The above procedure works well for upvote-downvotes schemes, but what about systems that use star ratings, e.g. 5 star rating systems. Similar problems apply with simply taking the average: an item with two perfect ratings would beat an item with thousands of perfect ratings, but a single sub-perfect rating. We can consider the upvote-downvote problem above as binary: 0 is a downvote, 1 if an upvote. A $N$-star rating system can be seen as a more continuous version of above, and we can set $n$ stars rewarded is equivalent to rewarding $\frac{n}{N}$. For example, in a 5-star system, a 2 star rating corresponds to 0.4. A perfect rating is a 1. We can use the same formula as before, but with $a,b$ defined differently: $$ \frac{a}{a + b} - 1.65\sqrt{ \frac{ab}{ (a+b)^2(a + b +1 ) } }$$ where \begin{align} & a = 1 + S \\\\ & b = 1 + N - S \\\\ \end{align} where $N$ is the number of users who rated, and $S$ is the sum of all the ratings, under the equivalence scheme mentioned above. ##### Example: Counting Github stars What is the average number of stars a Github repository has? How would you calculate this? There are over 6 million respositories, so there is more than enough data to invoke the Law of Large numbers. Let's start pulling some data. TODO ### Conclusion While the Law of Large Numbers is cool, it is only true so much as its name implies: with large sample sizes only. We have seen how our inference can be affected by not considering *how the data is shaped*. 1. By (cheaply) drawing many samples from the posterior distributions, we can ensure that the Law of Large Number applies as we approximate expected values (which we will do in the next chapter). 2. Bayesian inference understands that with small sample sizes, we can observe wild randomness. Our posterior distribution will reflect this by being more spread rather than tightly concentrated. Thus, our inference should be correctable. 3. There are major implications of not considering the sample size, and trying to sort objects that are unstable leads to pathological orderings. The method provided above solves this problem. ### Appendix ##### Derivation of sorting submissions formula Basically what we are doing is using a Beta prior (with parameters $a=1, b=1$, which is a uniform distribution), and using a Binomial likelihood with observations $u, N = u+d$. This means our posterior is a Beta distribution with parameters $a' = 1 + u, b' = 1 + (N - u) = 1+d$. We then need to find the value, $x$, such that 0.05 probability is less than $x$. This is usually done by inverting the CDF ([Cumulative Distribution Function](http://en.wikipedia.org/wiki/Cumulative_Distribution_Function)), but the CDF of the beta, for integer parameters, is known but is a large sum [3]. We instead use a Normal approximation. The mean of the Beta is $\mu = a'/(a'+b')$ and the variance is $$\sigma^2 = \frac{a'b'}{ (a' + b')^2(a'+b'+1) }$$ Hence we solve the following equation for $x$ and have an approximate lower bound. $$ 0.05 = \Phi\left( \frac{(x - \mu)}{\sigma}\right) $$ $\Phi$ being the [cumulative distribution for the normal distribution](http://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution) ##### Exercises 1\. How would you estimate the quantity $E\left[ \cos{X} \right]$, where $X \sim \text{Exp}(4)$? What about $E\left[ \cos{X} | X \lt 1\right]$, i.e. the expected value *given* we know $X$ is less than 1? Would you need more samples than the original samples size to be equally accurate? ```python ## Enter code here import scipy.stats as stats exp = stats.expon( scale=4 ) N = 1e5 X = exp.rvs( int(N) ) ## ... ``` 2\. The following table was located in the paper "Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression" [2]. The table ranks football field-goal kickers by their percent of non-misses. What mistake have the researchers made? ----- #### Kicker Careers Ranked by Make Percentage <table><tbody><tr><th>Rank </th><th>Kicker </th><th>Make % </th><th>Number of Kicks</th></tr><tr><td>1 </td><td>Garrett Hartley </td><td>87.7 </td><td>57</td></tr><tr><td>2</td><td> Matt Stover </td><td>86.8 </td><td>335</td></tr><tr><td>3 </td><td>Robbie Gould </td><td>86.2 </td><td>224</td></tr><tr><td>4 </td><td>Rob Bironas </td><td>86.1 </td><td>223</td></tr><tr><td>5</td><td> Shayne Graham </td><td>85.4 </td><td>254</td></tr><tr><td>… </td><td>… </td><td>…</td><td> </td></tr><tr><td>51</td><td> Dave Rayner </td><td>72.2 </td><td>90</td></tr><tr><td>52</td><td> Nick Novak </td><td>71.9 </td><td>64</td></tr><tr><td>53 </td><td>Tim Seder </td><td>71.0 </td><td>62</td></tr><tr><td>54 </td><td>Jose Cortez </td><td>70.7</td><td> 75</td></tr><tr><td>55 </td><td>Wade Richey </td><td>66.1</td><td> 56</td></tr></tbody></table> In August 2013, [a popular post](http://bpodgursky.wordpress.com/2013/08/21/average-income-per-programming-language/) on the average income per programmer of different languages was trending. Here's the summary chart: (reproduced without permission, cause when you lie with stats, you gunna get the hammer). What do you notice about the extremes? ------ #### Average household income by programming language <table > <tr><td>Language</td><td>Average Household Income ($)</td><td>Data Points</td></tr> <tr><td>Puppet</td><td>87,589.29</td><td>112</td></tr> <tr><td>Haskell</td><td>89,973.82</td><td>191</td></tr> <tr><td>PHP</td><td>94,031.19</td><td>978</td></tr> <tr><td>CoffeeScript</td><td>94,890.80</td><td>435</td></tr> <tr><td>VimL</td><td>94,967.11</td><td>532</td></tr> <tr><td>Shell</td><td>96,930.54</td><td>979</td></tr> <tr><td>Lua</td><td>96,930.69</td><td>101</td></tr> <tr><td>Erlang</td><td>97,306.55</td><td>168</td></tr> <tr><td>Clojure</td><td>97,500.00</td><td>269</td></tr> <tr><td>Python</td><td>97,578.87</td><td>2314</td></tr> <tr><td>JavaScript</td><td>97,598.75</td><td>3443</td></tr> <tr><td>Emacs Lisp</td><td>97,774.65</td><td>355</td></tr> <tr><td>C#</td><td>97,823.31</td><td>665</td></tr> <tr><td>Ruby</td><td>98,238.74</td><td>3242</td></tr> <tr><td>C++</td><td>99,147.93</td><td>845</td></tr> <tr><td>CSS</td><td>99,881.40</td><td>527</td></tr> <tr><td>Perl</td><td>100,295.45</td><td>990</td></tr> <tr><td>C</td><td>100,766.51</td><td>2120</td></tr> <tr><td>Go</td><td>101,158.01</td><td>231</td></tr> <tr><td>Scala</td><td>101,460.91</td><td>243</td></tr> <tr><td>ColdFusion</td><td>101,536.70</td><td>109</td></tr> <tr><td>Objective-C</td><td>101,801.60</td><td>562</td></tr> <tr><td>Groovy</td><td>102,650.86</td><td>116</td></tr> <tr><td>Java</td><td>103,179.39</td><td>1402</td></tr> <tr><td>XSLT</td><td>106,199.19</td><td>123</td></tr> <tr><td>ActionScript</td><td>108,119.47</td><td>113</td></tr> </table> ### References 1. Wainer, Howard. *The Most Dangerous Equation*. American Scientist, Volume 95. 2. Clarck, Torin K., Aaron W. Johnson, and Alexander J. Stimpson. "Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression." (2013): n. page. [Web](http://www.sloansportsconference.com/wp-content/uploads/2013/Going%20for%20Three%20Predicting%20the%20Likelihood%20of%20Field%20Goal%20Success%20with%20Logistic%20Regression.pdf). 20 Feb. 2013. 3. http://en.wikipedia.org/wiki/Beta_function#Incomplete_beta_function ```python from IPython.core.display import HTML def css_styling(): styles = open("../styles/custom.css", "r").read() return HTML(styles) css_styling() ``` <style> @font-face { font-family: "Computer Modern"; src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunss.otf'); } @font-face { font-family: "Computer Modern"; font-weight: bold; src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunsx.otf'); } @font-face { font-family: "Computer Modern"; font-style: oblique; src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunsi.otf'); } @font-face { font-family: "Computer Modern"; font-weight: bold; font-style: oblique; src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunso.otf'); } div.cell{ width:800px; margin-left:16% !important; margin-right:auto; } h1 { font-family: Helvetica, serif; } h4{ margin-top:12px; margin-bottom: 3px; } div.text_cell_render{ font-family: Computer Modern, "Helvetica Neue", Arial, Helvetica, Geneva, sans-serif; line-height: 145%; font-size: 130%; width:800px; margin-left:auto; margin-right:auto; } .CodeMirror{ font-family: "Source Code Pro", source-code-pro,Consolas, monospace; } .prompt{ display: None; } .text_cell_render h5 { font-weight: 300; font-size: 22pt; color: #4057A1; font-style: italic; margin-bottom: .5em; margin-top: 0.5em; display: block; } .warning{ color: rgb( 240, 20, 20 ) } </style> <style> img{ max-width:800px} </style>
7458cedeeb79754a223c1763770fd50e2b7e8474
475,551
ipynb
Jupyter Notebook
Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb
gjcooper/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
0082bb1183c114c5f99d88e743150a9612dc65de
[ "MIT" ]
null
null
null
Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb
gjcooper/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
0082bb1183c114c5f99d88e743150a9612dc65de
[ "MIT" ]
null
null
null
Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb
gjcooper/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
0082bb1183c114c5f99d88e743150a9612dc65de
[ "MIT" ]
null
null
null
373.567164
87,532
0.917147
true
12,717
Qwen/Qwen-72B
1. YES 2. YES
0.853913
0.914901
0.781246
__label__eng_Latn
0.994378
0.653428
``` # default_exp assignment_part_1_solution ``` # Assignment Part 1 Solution -------------------- ## Laplace Transforms 1. Calculate the Laplace Transform $X(s)$ of the signal $x(t)$: $$ x(t) = (4t-3cos(5t))e^{-2t} $$ $$ x(t)=4te^{-2t} - 3cos(5t)e^{-2t} \rightarrow X(s) = 4 \frac{1}{(s+2)^2} - 3 \frac{(s+2)}{(s+2)^2 + 25} $$ Let's verify it with Sympy ``` import matplotlib.pyplot as plt # import matplotlib - it will be useful later. import sympy # import sympy t, s = sympy.symbols('t, s') ``` ``` f1 = 4*t*sympy.exp(-2*t) f2 = - 3*sympy.cos(5*t)*sympy.exp(-2*t) print('{} {}'.format(f1, f2)) ``` 4*t*exp(-2*t) -3*exp(-2*t)*cos(5*t) ``` F1 = sympy.laplace_transform(f1, t, s, noconds=True) F2 = sympy.laplace_transform(f2, t, s, noconds=True) print('{} {}'.format(F1, F2)) ``` 4/(s + 2)**2 -(3*s + 6)/((s + 2)**2 + 25) 2. Calculate the Inverse Laplace Transform $g(t)$ of the transfer function $G(s)$: $$ G(s)=2 + \frac{3}{2s^3+3s^2+s} $$ $$ G(s)=2 + \frac{3}{2s^3+3s^2+s} = 2 + \frac{3}{s(2s^2+3s+1)} = 2 + \frac{3}{2s(s+1)(s+\frac{1}{2})} = 2 + \frac{3}{s} + \frac{3}{s+1} + \frac{-6}{s+\frac{1}{2}} $$ $$ g(t) = 2\delta(t) + 3 + 3e^{-t} -6e^{-0.5t} $$ ``` F1 = 2 F2 = 3/(2*s**3+3*s**2+s) ``` ``` f1 = sympy.inverse_laplace_transform(F1, s, t) f2 = sympy.inverse_laplace_transform(F2, s, t) print('{} + {}'.format(f1, f2)) ``` 2*DiracDelta(t) + 3*Heaviside(t) + 3*exp(-t)*Heaviside(t) - 6*exp(-t/2)*Heaviside(t) ``` f3 = sympy.inverse_laplace_transform(F1+F2, s, t) print('{}'.format(f3)) ``` 2*DiracDelta(t) + 3*Heaviside(t) + 3*exp(-t)*Heaviside(t) - 6*exp(-t/2)*Heaviside(t) ## Block Diagrams 3. Calculate the equivalent transfer function $G_t(s)=\frac{Y(s)}{R_1(s)}$ of the following block diagram: <tr> <td> </td> </tr> <tr> <td> </td> </tr> <tr> <td> </td> </tr> <tr> <td> </td> </tr> $$ \frac{G_1G_4(G_2+G_3)}{1+G_1G_4H_1 + H_2G_1G_4(G_2+G_3)} $$ ## System Response 4. Write the transfer function to a step input of a second order system characterised by: - Static gain $G(0)=5$ - Damping ratio $\xi=0.5$ - Settling time $t_s=3 s$ - No zeros We would like a system with form: $$ \frac{K}{s^2+2\xi\omega_ns+\omega_n^2} $$ we also know that: $$ t_s \approx -\frac{1}{\xi\omega_n}ln(0.05)\;\; s $$ from which: $$ \omega_n = -\frac{1}{3\cdot0.5}ln(0.05) = 2 \;\; rad/s $$ which means: $$ \frac{K}{s^2+2s+4} $$ since we want $G(0)=5 \rightarrow K=20$ $$ \frac{20}{s^2+2s+4} $$ 5. Plot the qualitative behaviour of the step response of the system: $$ G(s)=\frac{20(3+0.1s)(s^2+10s+160)}{(2s+10)(0.1s+5)(s^2+2s+400)} $$ Let's re-write the transfer function slightly: $$ G(s)=\frac{20\cdot0.1(30+s)(s^2+10s+160)}{2(s+\frac{10}{2})0.1(s+\frac{5}{0.1})(s^2+2s+400)} $$ The poles are: $$ s = -5, -50, -1.+19.97498436j, -1.-19.97498436j $$ and the zeros are: $$ s = -30, -5.+11.61895004j, -5.-11.61895004j $$ The system is asymptotically stable, we can use the dominant poles approximation: $$ G(s)=\frac{20\cdot0.1\cdot30\cdot160}{2\cdot5\cdot0.1\cdot50(s^2+2s+400)} = \frac{192}{(s^2+2s+400)} $$ The response to a step input can be obtained as: $$ Y(s) = G(s)\frac{1}{s} = \frac{192}{(s^2+2s+400)}\frac{1}{s} $$ And, using partial fraction decomposition: $$ Y(s) = \frac{K}{s} + \frac{A}{s-p_1} + \frac{A^*}{s-p_1^*} $$ Where $$ K = \frac{192}{(s^2+2s+400)} \bigg |_{s=0} = \frac{192}{400} = 0.48 $$ And for the pole $p_1=−1.+19.97498436𝑗$ We know that when we have complex conjugate poles - see notebook `03_Transfer_function`: $$ f(t) = |A|e^{\sigma t} \big [ e^{j(\Phi_A+\omega t)} + e^{-j(\Phi_A+\omega t)} \big ] = 2|A|e^{\sigma t}\cos(\Phi_A+\omega t) $$ where: $$ A = (s-p_1)\frac{192}{s(s-p_1)(s-p_1^*)} \bigg |_{s=-1+20j} = \frac{192}{s(s-p_1^*)} \bigg |_{s=-1+20j} = \frac{192}{(-1+20j)( 40j)} = -0.24+0.012j $$ ``` abs(-0.24+0.012) ``` 0.22799999999999998 ``` abs(192/(40j*(-1+20j))) ``` 0.2397005613306827 ``` import numpy as np np.sqrt(399) ``` 19.974984355438178 and the response is $$ y(t) = K + Ae^{p_1}t + A^*e^{p_1^*}t = K + 2|A|e^{\sigma t}cos(\omega t + \angle{A}) $$ where $p_1=\sigma+\omega t$ and $A=|A|e^{j\angle{A}}$ $$ y(t) = 0.48 + 2*0.23*e^{-1t}cos(20t -\frac{\pi}{2}) $$ We can check that the inverse laplace of the system transfer function using Sympy: ``` def evaluate(f, times): res = [] for time in times: res.append(f.evalf(subs={t:time}).n(chop=1e-5)) return res def L(f): return sympy.laplace_transform(f, t, s, noconds=True) def invL(F): return sympy.inverse_laplace_transform(F, s, t) ``` ``` t, s = sympy.symbols('t, s') F = 192/(s**2+2*s+400) invL(F) ``` $\displaystyle \frac{64 \sqrt{399} e^{- t} \sin{\left(\sqrt{399} t \right)} \theta\left(t\right)}{133}$ Where: ``` import numpy as np import cmath np.sqrt(399) ``` 19.974984355438178 And now we can plot it to verify the step response: ``` time = np.linspace(0, 7, 1000) y = 0.48 + 2*0.23*np.exp(-time) plt.plot(time, y) plt.xlabel('time'); ``` Finally, let's compare the result we got with what Sympy calculates ``` fig, ax = plt.subplots(1,1,figsize=(8,5)) time = np.linspace(0,7,1000) ax.plot(time, evaluate(invL(F*1/s), time), linewidth=3) ax.plot(time, y, 'g') ax.set_title(f'y(end)={y[-1]}'); ``` Let's look at the response more closely: ``` plt.plot(time[:100], y[:100]) ``` We can also use the Python Control Library to verify our dominant pole approximation ``` import control import matplotlib.pyplot as plt ``` ``` s = control.TransferFunction.s G = (20*(3+0.1*s)*(s**2+10*s+160))/((2*s+10)*(0.1*s+5)*(s**2 + 2*s + 400)) G_approx = 192/(s**2 + 2*s + 400) ``` ``` T, yout = control.step_response(G, T=np.linspace(0, 7, 1000)); Tap, youtap = control.step_response(G_approx, T=np.linspace(0, 7, 1000)); plt.plot(T, yout, 'b', label='original') plt.plot(Tap, youtap, 'r', label='approx d.p.') plt.legend() plt.grid() ``` 6. For the system $G(s)$ defined above, calculate: - Its steady state value $y_\infty$ to a step input (when $t \rightarrow \infty$) - Its settling time $t_s$ - If the system oscillates, calculate the period $T_\omega$ of the oscillation - $y(t\rightarrow\infty) = 0.48$ - $t_s \approx -\frac{1}{\xi\omega_n}ln(0.05) \approx 3$ given that: - $\omega_n=\sqrt{399}\approx20$ - $2\xi\omega_n=2 \rightarrow 20 \xi =1 \rightarrow \xi = 0.05$ And finally: - $T_{P} = \frac{2\pi}{w_n\sqrt{1-\xi^2}} = 0.314s$ ``` xi = 0.05 wn = 20 t_s = -1/(xi*wn)*np.log(0.05) print('t_s = {}'.format(t_s)) Tp = 2*3.14/(wn*np.sqrt(1-xi**2)) print('T_p = {}'.format(Tp)) ``` t_s = 2.995732273553991 T_ = 0.3143932374740646 ## Routh Criterion 7. Determine the values of $K$ for which the following feedback system is asymptotically stable: <tr> <td> </td> </tr> The characteristic equation of the feedback system is: $$ 1+K\frac{s^2+2s+64}{s^2(s+0.5)} = 0 \rightarrow s^2(s+0.5)+K(s^2+2s+64) = 0 \rightarrow s^3 + (0.5+K)s^2 + 2Ks+64K=0 $$ The Routh table is: | | | | |-|-|-| |$3$| $1$ | $2K$ | |$2$| $0.5+K$ | $64K$ | |$1$| $(0.5+K)2K-64K$ | $0$ | |$0$| $64K$ | | from which it is possible to find the following constraints on $K$: $$ 0.5 + K > 0, \hspace{0.5cm} 2K^2 +1K-64K > 0, \hspace{0.5cm}K>0 $$ $$ K>0.5, \hspace{0.5cm} K>31.5, \hspace{0.5cm} K>0 $$ And the system is asymptotically stable for $K>31.5$ ## Bode Plots 8. Plot the Bode amplitude and phase plots for the system discussed in question 7. $$ G_d(s) = \frac{s^2+2s+64}{s^2(s+0.5)}= \frac{64(\frac{s^2}{64}+\frac{2s}{64}+1)}{0.5s^2(\frac{s}{0.5}+1)} $$ $K_{dB}=20\log(64/0.5) \approx 42 dB$ <tr> <td> </td> </tr> ## DC Motor Transfer Functions The figures below represents a DC motor attached to an inertial load. <table style='margin: 0 auto' rules=none> <tr> <td> </td> <td> </td> </tr> </table> <table style='margin: 0 auto' rules=none> <tr> <td> </td> </tr> </table> The input voltage can be applied to either the field or the armature terminals. The voltages applied to the field and armature sides of the motor are represented by $V_f$ and $V_a$. The resistances and inductances of the field and armature sides of the motor are represented by $R_f$ , $L_f$, $R_a$, and $L_a$. The torque generated by the motor can be assumed to be related linarly to $i_f$ and $i_a$, the currents in the field and armature sides of the motor, as follows: $$ T_m = K i_f i_a \;\;\;\;(1) $$ From the equation above, clearly to retain the linearity, one current must be kept constant, while the other becomes the input current. This makes it possible to have field-current or armature-current controlled motors. We will focus on the field-current motor for our analysis. This means that in a field-current controlled motor, the armature current is kept constant, while the field current $i_a$ is controlled through the field voltage $V_f$. $$ T_m = K i_f i_a = K_m i_f \;\;\;\;(2) $$ where $K_m$ is defined as the motor constant. The motor torque increases linearly with the field current. **1. Write the Transfer Function from the input current to the resulting torque of the previous equation** $$ \frac{T_m(s)}{I_f(s)} = K_m $$ For the field side of the motor the voltage/current relationship is $$ V_f =V_R +V_L = R_f i_f +L_f (di_f dt) \;\;\;\;(3) $$ **2. Write the Transfer Function from the input voltage to the resulting current** Vf = RfI+LsI I = Vf/Lf/(Rf/Lf+s) $$ \frac{I_f(s)}{V_f(s)} = \frac{\frac{1}{L_f}}{s+\frac{R_f}{L_f}} \;\;\;\;(4) $$ **3. Write the Transfer Function from the input voltage to the resulting motor torque** $$ \frac{T_m(s)}{V_f(s)} = \frac{\frac{K_m}{L_f}}{s+\frac{R_f}{L_f}} \;\;\;\;(5) $$ **4. Discuss how the motor torque behaves with respect to different input signals (e.g., step input in field voltage, etc.)** This is a first-order system (type 0), and a step input in field voltage $V_f$ results in an exponential rise in the motor torque. ------------------ We can now add load to the motor and verify how its behaviour changes. The motor torque $T_m(s)$ is equal to the torque delivered to the load, and this relation can be expressed as: $$ T_m(s) = T_L(s) + T_d(s) \;\;\;\;(6) $$ where $T_L(s)$, and $T_d(s)$ is the disturbance torque (e.g., external forces acting on the load). We can calculate the rotational motion of the inertial load summing moments (see Figures above): $$ \sum{M} = T_L - f\omega = J \dot{\omega} \;\;\;\;(7) $$ where $f$ is due to friction, and $J$ is the load inertia. **5. Write the Laplace transform of the load torque $T_L(s)$ using using equation (7) above** $$ T_L(s) = Js\Omega(s)+f\Omega(s) $$ **6. Given that the relationship between position and angular velocity $\omega$, refine the equation you have just calculated to explicit the rotor position $\theta$ (_hint:_ $\dot\theta = \omega$)** $$ T_L(s) = Js^2\theta(s) + fs\theta(s) $$ where $\Omega(s) = \mathcal{L}(\omega(t))$ **7. Put together equations (2,4,6) and calculate the transfer function of the motor-load combination** $$ \frac{\theta(s)}{V_f(s)} = \frac{K_m}{s(Js+f)(L_fs+R_f)} = \frac{\frac{K_m}{JL_f}}{s(s+\frac{f}{J})(s+\frac{R_f}{L_f})}. \;\;\;\;(8) $$ **8. Discuss the resuting system (order, dominant pole approximations, what happens when parameters change, select specific values for the parameters and draw its step response.)** $$ \frac{\theta(s)}{V_f(s)} = \frac{\frac{K_m}{JL_f}}{s(s+\frac{f}{J})(s+\frac{R_f}{L_f})} = \frac{\frac{K_m}{fR_f}}{s(\tau_f s+1)(\tau_L s+1)} $$ where $\tau_f=\frac{L_f}{R_f}$ and $\tau_L=\frac{J}{f}$. When $\tau_L > \tau_f$, the field time constant $\tau_f$ can be neglected. Let's choose the following values: - (J) moment of inertia of the rotor 0.01 kg.m^2 - (f) motor viscous friction constant 0.1 N.m.s - (Ke) electromotive force constant 0.01 V/rad/sec - (Kt) motor torque constant 0.01 N.m/Amp - (R) electric resistance 1 Ohm - (L) electric inductance 0.5 H _Note that in SI units, $K_e = K_t = K$_ $$ \frac{\theta(s)}{V_f(s)} = \frac{\frac{0.01}{0.1\cdot1}}{s(0.5/1s+1)(0.01/0.1s+1)} = \frac{0.1}{s(0.5s+1)(0.1s+1)} $$ ``` s = control.TransferFunction.s G_motor = 0.1/(s*(0.5*s+1)*(0.1*s+1)) ``` ``` T, yout = control.step_response(G_motor, T=np.linspace(0, 7, 1000)); plt.plot(T, yout, 'b', label='dc-motor-step-response') plt.legend() plt.xlabel('time') plt.grid() ``` **9. Draw a block diagram of the field controlled DC motor from the field voltage to the position output using all the relevant equations calculated so far. Make sure you include the disturbance $T_d(s)$ as per equation (6).** <table style='margin: 0 auto' rules=none> <tr> <td> </td> </tr> </table> ``` ```
c9d41d01f9926622cf6b2159f258903e4a5748e8
148,858
ipynb
Jupyter Notebook
A_assignment_part_1_solution.ipynb
andreamunafo/classical_control_theory
5e1bef562e32fb9efcde83891cb19ce5825a6a7f
[ "Apache-2.0" ]
null
null
null
A_assignment_part_1_solution.ipynb
andreamunafo/classical_control_theory
5e1bef562e32fb9efcde83891cb19ce5825a6a7f
[ "Apache-2.0" ]
null
null
null
A_assignment_part_1_solution.ipynb
andreamunafo/classical_control_theory
5e1bef562e32fb9efcde83891cb19ce5825a6a7f
[ "Apache-2.0" ]
null
null
null
122.014754
39,920
0.87308
true
4,831
Qwen/Qwen-72B
1. YES 2. YES
0.91611
0.805632
0.738047
__label__eng_Latn
0.834858
0.553063
```python import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data_utils import operator import numpy as np ``` ## RNN intuition Let us assume that we have an input $x = [x_1, x_2, ..., x_N]$ and we need to learn the mapping for some output $y = [y_1, y_2, ..., y_N]$, where $N$ is variable for each instance. In this case we can't just use a simple feed forward neural network which maps $x \rightarrow y$, as this will not work with variable length sequences. Furthermore, the number or parameters required for training such a network would be proportional to $size(x_i)*N$. This is a major memory cost. Additionally, if the sequence has some common mapping between $x_i$ and $y_i$, then we would be learning redundant weights for each pair in the sequence. This is where an RNN network is more useful. The basic idea is that each input $x_i$ is processed in a similar fashion using the same processing module and some additional context variable (which we will henseforth refer to as the **hidden state**). This hidden state should capture some information about the part of the sequence which has already been processed. Now at each step of the sequence we need to do the following: * Generate the output based on the previous hidden state and current input * Update the hidden state based on the previous hidden state and current input. The order of the above steps is not fixed and forms the basis of many RNN spin-offs. What is important, at each step, is to have a new output and a new hidden state. Sometimes, the hidden state and the outputs are the same, to make the network smaller. But the core idea remains same. Below we would like to formalize the general intuition of an RNN module. Initialize an initial hidden state $h_{0}$ with some initial value. At timestep n: $$ \begin{equation} h^{'}_{i} = f(x_{i},h_{i})\\ y_{i} = g(x_{i},h^{'}_{i})\\ h_{i+1} = h^{'}_{i}\\ \end{equation} $$ Here $y_{i}$ is the output and $h^{'}_{i}$ is the intermediate hidden state. ```python class Input2Hidden(nn.Module): def __init__(self, x_dim, concat_layers=False): """Input2Hidden module Args: x_dim: input vector dimension concat_layers: weather to concat input and hidden layers or sum them """ super(Input2Hidden, self).__init__() self.concat_layers = concat_layers input_dim = x_dim if self.concat_layers: input_dim = 2*x_dim self.linear_layer = nn.Linear(input_dim, x_dim) def forward(self, x, h): if self.concat_layers: cell_input = torch.cat([x,h], dim=1) else: cell_input = x + h assert isinstance(cell_input, Variable) logit = F.tanh(self.linear_layer(cell_input)) return logit class Hidden2Output(nn.Module): def __init__(self, x_dim, out_dim, concat_layers=False): """Hidden2Output module Args: x_dim: input vector dimension out_dim: output vector dimension concat_layers: weather to concat input and hidden layers or sum them """ super(Hidden2Output, self).__init__() input_dim = x_dim self.concat_layers = concat_layers if self.concat_layers: input_dim = 2*x_dim self.linear_layer = nn.Linear(input_dim, out_dim) def forward(self, x, h): if self.concat_layers: cell_input = torch.cat([x,h], dim=1) else: cell_input = x + h assert isinstance(cell_input, Variable) logit = F.tanh(self.linear_layer(cell_input)) return logit class CustomRNNCell(nn.Module): def __init__(self, i2h, h2o): super(CustomRNNCell, self).__init__() self.i2h = i2h self.h2o = h2o def forward(self, x, h): assert isinstance(x, Variable) assert isinstance(h, Variable) h_prime = self.i2h(x,h) assert isinstance(h_prime, Variable) output = self.h2o(x,h_prime) return output, h_prime class Model(nn.Module): def __init__(self, embedding, rnn_cell): super(Model, self).__init__() self.embedding = embedding self.rnn_cell = rnn_cell self.loss_function = nn.CrossEntropyLoss() def forward(self, word_ids, hidden=None): if hidden is None: hidden = Variable(torch.zeros( word_ids.data.shape[0],self.embedding.embedding_dim)) assert isinstance(hidden, Variable) embeddings = self.embedding(word_ids) max_seq_length = word_ids.data.shape[-1] outputs, hidden_states = [], [] for i in range(max_seq_length): x = embeddings[:, i, :] assert isinstance(x, Variable) #print("x={}\nhidden={}".format(x,hidden)) output, hidden = self.rnn_cell(x, hidden) assert isinstance(output, Variable) assert isinstance(hidden, Variable) #print("output: {}, hidden: {}".format(output.data.shape, hidden.data.shape)) outputs.append(output.unsqueeze(1)) hidden_states.append(hidden.unsqueeze(1)) outputs = torch.cat(outputs, 1) hidden_states = torch.cat(hidden_states, 1) assert isinstance(outputs, Variable) assert isinstance(hidden_states, Variable) return outputs, hidden_states def loss(self, word_ids, target_ids, hidden=None): outputs, hidden_states = self.forward(word_ids, hidden=hidden) outputs = outputs.view(-1, outputs.data.shape[-1]) target_ids = target_ids.view(-1) assert isinstance(outputs, Variable) assert isinstance(target_ids, Variable) #print("output={}\ttargets={}".format(outputs.data.shape,target_ids.data.shape)) loss = self.loss_function(outputs, target_ids) return loss def predict(self, word_ids, hidden=None): outputs, hidden_states = self.forward(word_ids, hidden=hidden) outputs = outputs.view(-1, outputs.data.shape[-1]) max_scores, predictions = outputs.max(1) predictions = predictions.view(*word_ids.data.shape) #print(word_ids.data.shape, predictions.data.shape) assert word_ids.data.shape == predictions.data.shape, "word_ids: {}, predictions: {}".format( word_ids.data.shape, predictions.data.shape ) return predictions def tensors2variables(*args, requires_grad=False): return tuple(map(lambda x: Variable(x, requires_grad=requires_grad), args)) def get_batch(tensor_types, *args, requires_grad=False): return tuple(map(lambda t,arg: Variable(t(arg), requires_grad=requires_grad), tensor_types, args)) ``` ## Learning to predict bit flip Let us take a simple example of using an RNN to predict the flip in bits of an $N$ bit unsigned integer. In python for an integer n represented using $N$ bits, the unsigned bitflip can be written as `(~n) & ((1<<N)-1)`. Four our RNN each bit from the left will be $x_i$ and each flipped bit will be $y_i$. This task doesn't require any temporal dependencies but will be a good exercise to test the accuracy of RNN implementation. Theoretically, the network should learn to do this job perfectly in a few iterations. Later we will move to an example which does require the network to learn some temporal dependencies between inputs. For our network we define $f(x,h)$ as a simple affine layer with $tanh$ activation, which takes the concatanated input $[x_i, h_{i-1}]$ and returns a new hidden state $h^{'}_{i}$. Similarly, we have $g(x_i, h^{'}_{n})$ also represented as an affine layer with $tanh$ activation, of the concatanation of its inputs $[x_i, h^{'}_{n}]$, resulting in a new output $y_{i}$. More formally, we have $$ \begin{equation} h^{'}_{i} = f(x_{i},h_{i}) = \sigma([x_i, h_{i-1}]W_{i2h})\\ y_{i} = g(x_{i},h^{'}_{i}) = \sigma([x_i, h^{'}_{n}]W_{h2o})\\ h_{i+1} = h^{'}_{i}\\ \end{equation} $$ ```python input_size=2 embedding_size=3 output_size=2 embedding = nn.Embedding(input_size, embedding_size) f = Input2Hidden(embedding_size, concat_layers=True) g = Hidden2Output(embedding_size, 2, concat_layers=True) rnn_cell = CustomRNNCell(f,g) model = Model(embedding, rnn_cell) ``` ```python word_ids = [[0, 1, 0, 1, 0, 1]] target_ids = [[1, 0, 1, 0, 1, 0]] tensor_types = (torch.LongTensor, torch.LongTensor) word_ids_tensor, target_ids_tensor = get_batch(tensor_types, word_ids, target_ids) print(word_ids_tensor, target_ids_tensor) ``` Variable containing: 0 1 0 1 0 1 [torch.LongTensor of size 1x6] Variable containing: 1 0 1 0 1 0 [torch.LongTensor of size 1x6] ```python model.forward(torch.cat([word_ids_tensor, word_ids_tensor, word_ids_tensor], 0))[0] ``` Variable containing: (0 ,.,.) = 0.1159 -0.0933 -0.4920 0.6325 0.1634 -0.0420 -0.5012 0.6430 0.1758 -0.0425 -0.5058 0.6449 (1 ,.,.) = 0.1159 -0.0933 -0.4920 0.6325 0.1634 -0.0420 -0.5012 0.6430 0.1758 -0.0425 -0.5058 0.6449 (2 ,.,.) = 0.1159 -0.0933 -0.4920 0.6325 0.1634 -0.0420 -0.5012 0.6430 0.1758 -0.0425 -0.5058 0.6449 [torch.FloatTensor of size 3x6x2] ```python model.predict(torch.cat([word_ids_tensor, word_ids_tensor, word_ids_tensor], 0)) ``` Variable containing: 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 [torch.LongTensor of size 3x6] ```python loss = model.loss(word_ids_tensor, target_ids_tensor) loss ``` Variable containing: 1.1108 [torch.FloatTensor of size 1] ```python loss.backward() ``` ```python model.predict(word_ids_tensor) ``` Variable containing: 0 1 0 1 0 1 [torch.LongTensor of size 1x6] ```python def create_dataset(max_len=5): """Create a dataset of max_len bits and their flipped values Args: max_len: Maximum number of bits in the number """ max_val = (1<<max_len) X, Y = [], [] for i in range(max_val): x = "{0:0{1}b}".format(i,max_len) y = "{0:0{1}b}".format((~i) & max_val-1,max_len) x = tuple(map(int, x)) y = tuple(map(int, y)) X.append(x) Y.append(y) return X, Y ``` ```python X, Y = create_dataset(max_len=10) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=8, shuffle=True) ``` ```python %%time learning_rate = 1e-4 max_epochs = 20 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch, Y_batch = tensors2variables(X_batch, Y_batch) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % 2 != 0: continue loss = model.loss(*tensors2variables(X_tensors, Y_tensors)) Y_predict = model.predict(Variable(X_tensors)).data accuracy = (Y_tensors == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[002]: loss=0.982; accuracy=45.000% Epoch[004]: loss=0.902; accuracy=50.000% Epoch[006]: loss=0.827; accuracy=50.000% Epoch[008]: loss=0.754; accuracy=50.000% Epoch[010]: loss=0.681; accuracy=50.000% Epoch[012]: loss=0.609; accuracy=50.000% Epoch[014]: loss=0.536; accuracy=77.500% Epoch[016]: loss=0.459; accuracy=100.000% Epoch[018]: loss=0.383; accuracy=100.000% Epoch[020]: loss=0.320; accuracy=100.000% CPU times: user 32.2 s, sys: 1min 42s, total: 2min 15s Wall time: 22.6 s ```python Y_predict = model.predict(X_batch) Y_predict[:10].data ``` 1 1 1 1 0 0 0 0 0 0 1 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 1 1 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 0 0 1 0 1 1 0 1 0 0 0 0 0 0 1 0 1 1 1 1 1 1 1 0 0 0 1 1 1 0 [torch.LongTensor of size 8x10] ## Learning to predict bit shift This example requires some learning of temporal dependencies. We want to learn our network the output when the input $x$'s bits are shifted right by $K$ positions. This can be done using `x >> K`. Similarly, a left shift can be done using `(a << K) & (1<<N) -1)`, where $N$ is the max length of the bit sequence. ```python def create_dataset(max_len=5, K=4): """Create a dataset of max_len bits and their flipped values Args: max_len: Maximum number of bits in the number """ X, Y = [], [] max_val = 2**max_len for i in range(max_val): x = "{0:0{1}b}".format(i,max_len) y = "{0:0{1}b}".format(i>>K,max_len) x = tuple(map(int, x)) y = tuple(map(int, y)) X.append(x) Y.append(y) return X, Y ``` ```python input_size=2 embedding_size=3 output_size=2 embedding = nn.Embedding(input_size, embedding_size) f = Input2Hidden(embedding_size, concat_layers=True) g = Hidden2Output(embedding_size, 2, concat_layers=True) rnn_cell = CustomRNNCell(f,g) model = Model(embedding, rnn_cell) ``` ```python X, Y = create_dataset(max_len=10, K=4) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=128, shuffle=True) ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python %%time learning_rate = 1e-4 max_epochs = 10000 check_every=500 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch, Y_batch = tensors2variables(X_batch, Y_batch) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % check_every != 0: continue loss = model.loss(*tensors2variables(X_tensors, Y_tensors)) Y_predict = model.predict(Variable(X_tensors)).data accuracy = (Y_tensors == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[500]: loss=0.552; accuracy=76.133% Epoch[1000]: loss=0.523; accuracy=78.555% Epoch[1500]: loss=0.512; accuracy=78.896% Epoch[2000]: loss=0.494; accuracy=79.600% Epoch[2500]: loss=0.464; accuracy=79.600% Epoch[3000]: loss=0.409; accuracy=83.193% Epoch[3500]: loss=0.368; accuracy=85.518% Epoch[4000]: loss=0.348; accuracy=87.441% Epoch[4500]: loss=0.341; accuracy=87.500% Epoch[5000]: loss=0.337; accuracy=87.500% Epoch[5500]: loss=0.334; accuracy=87.500% Epoch[6000]: loss=0.332; accuracy=87.500% Epoch[6500]: loss=0.331; accuracy=87.500% Epoch[7000]: loss=0.330; accuracy=87.500% Epoch[7500]: loss=0.329; accuracy=87.500% Epoch[8000]: loss=0.329; accuracy=87.500% Epoch[8500]: loss=0.329; accuracy=87.500% Epoch[9000]: loss=0.328; accuracy=87.500% Epoch[9500]: loss=0.328; accuracy=87.500% Epoch[10000]: loss=0.327; accuracy=87.500% CPU times: user 19min 9s, sys: 57min 56s, total: 1h 17min 6s Wall time: 12min 52s ```python Y_predict = model.predict(X_batch) Y_predict[:10].data ``` 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 1 0 1 1 1 0 0 0 0 1 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 1 0 1 0 0 0 0 0 1 0 1 0 1 0 0 0 0 1 0 1 0 1 0 [torch.LongTensor of size 10x10] ## Too slow to learn The network trained above takes more than 10,000 epochs to converge to only $90\%$ accuracy. This reflects a major shortcoming of general RNN's. The shortcoming comes from a problem known as vanishing gradients, where gradients based on more distant steps become numerically too small to update the current layer, leading to information loss and failure to learn long range dependencies. Researcher's have worked around this using what is known as gated or memory based RNN cell's, which allows the information to be stored for a longer duration in the network and the gradients from long range dependencies to flow more easily. Two of the most popular variants are Long Short Term Memory (LSTM) cells and Gated Recurrent Unit (GRU) cells. The core idea is to allow some memory of the current state to be stored for the long time either in a seperate memory cell or in the hidden state. This is usually done by selectively reading and editing from the memory based on the current step. In the following sections we will understand the GRU cells which are a very simple extension of RNN and solve the vanishing gradient problem. The LSTM cells are a bit more involved and will be discussed later. ## Gated Recurrent Unit (GRU) The idea behind GRU's is to update part of the hidden state and retain the rest. This is done using the following functions: * reset gate - Identifies what proportion of hidden state should be reset * update gate - Identifies what proportion of hidden state should be updated The implementation is as follow: $$ \begin{equation} reset = \sigma(W_{r}[x_i, h_{i-1}])\\ update = \sigma(W_{u}[x_i, h_{i-1}])\\ interim\_hidden = tanh(W_{i}[x_i, reset \circ h_{i-1}])\\ h^{'}_{i} = update \circ interim\_hidden + (1-update) \circ h_{i-1} \\ \end{equation} $$ ```python class GRUCell(nn.Module): def __init__(self, input_dim, output_dim): super(GRUCell, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.reset_linear = nn.Linear(2*self.input_dim, self.input_dim) self.update_linear = nn.Linear(2*self.input_dim, self.input_dim) self.interim_linear = nn.Linear(2*self.input_dim, self.input_dim) self.output_linear = nn.Linear(2*self.input_dim, output_dim) def forward(self, x, h): concat_tensors = torch.cat([x,h], dim=1) reset = F.sigmoid(self.reset_linear(concat_tensors)) update = F.sigmoid(self.update_linear(concat_tensors)) reset_hidden = reset * h concat_reset_hidden = torch.cat([x, reset_hidden], dim=1) interim_hidden = F.tanh(self.interim_linear(concat_reset_hidden)) h_prime = update * interim_hidden + (1-update) * h concat_out = torch.cat([x, h_prime], dim=1) output = F.tanh(self.output_linear(concat_out)) return output, h_prime ``` ```python input_size=2 embedding_size=3 output_size=2 embedding = nn.Embedding(input_size, embedding_size) rnn_cell = GRUCell(embedding_size, output_size) model = Model(embedding, rnn_cell) ``` ```python X, Y = create_dataset(max_len=10, K=4) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=64, shuffle=True) ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python %%time learning_rate = 1e-4 max_epochs = 3000 check_every=500 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch, Y_batch = tensors2variables(X_batch, Y_batch) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % check_every != 0: continue loss = model.loss(*tensors2variables(X_tensors, Y_tensors)) Y_predict = model.predict(Variable(X_tensors)).data accuracy = (Y_tensors == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[500]: loss=0.412; accuracy=83.740% Epoch[1000]: loss=0.350; accuracy=86.836% Epoch[1500]: loss=0.302; accuracy=91.025% Epoch[2000]: loss=0.279; accuracy=92.422% Epoch[2500]: loss=0.270; accuracy=93.242% Epoch[3000]: loss=0.264; accuracy=93.506% CPU times: user 21min 12s, sys: 1h 6min 27s, total: 1h 27min 39s Wall time: 14min 38s ```python model ``` Model ( (embedding): Embedding(2, 3) (rnn_cell): GRUCell ( (reset_linear): Linear (6 -> 3) (update_linear): Linear (6 -> 3) (interim_linear): Linear (6 -> 3) (output_linear): Linear (6 -> 2) ) (loss_function): CrossEntropyLoss ( ) ) ## Running using GPU This makes things go faster. ```python input_size=2 embedding_size=3 output_size=2 embedding = nn.Embedding(input_size, embedding_size) rnn_cell = GRUCell(embedding_size, output_size) model = Model(embedding, rnn_cell).cuda() ``` ```python X, Y = create_dataset(max_len=10, K=4) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" batch_size=64 train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True) ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python overall_hidden = Variable(torch.zeros(1,model.embedding.embedding_dim)) overall_hidden = overall_hidden.cuda() overall_hidden ``` Variable containing: 0 0 0 [torch.cuda.FloatTensor of size 1x3 (GPU 0)] ```python %%time learning_rate = 1e-4 max_epochs = 3000 check_every=500 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch = X_batch.cuda(async=True) Y_batch = Y_batch.cuda(async=True) X_batch, Y_batch = tensors2variables(X_batch, Y_batch) hidden = overall_hidden.repeat(batch_size, 1) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch, hidden=hidden) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % check_every != 0: continue hidden = overall_hidden.repeat(X_tensors.shape[0], 1) loss = model.loss(*tensors2variables(X_tensors.cuda(), Y_tensors.cuda()), hidden=hidden) Y_predict = model.predict(Variable(X_tensors.cuda()), hidden=hidden).data accuracy = (Y_tensors.cuda() == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[500]: loss=0.406; accuracy=84.111% Epoch[1000]: loss=0.320; accuracy=88.711% Epoch[1500]: loss=0.285; accuracy=91.377% Epoch[2000]: loss=0.270; accuracy=92.842% Epoch[2500]: loss=0.262; accuracy=93.301% Epoch[3000]: loss=0.255; accuracy=93.975% CPU times: user 18min 6s, sys: 3.28 s, total: 18min 9s Wall time: 18min 11s It looks like the GPU version is actually a bit slower in this case. This might be due to the small size of our dataset, for which the cost of moving tensors to GPU is greater than the gain by speeding up network computations. ## Increasing the network capacity This can be done by increasing the hidden units in the network. Or in our case by increasing the embedding size as that is used to derive the number of hidden units. ```python input_size=2 embedding_size=10 output_size=2 embedding = nn.Embedding(input_size, embedding_size) rnn_cell = GRUCell(embedding_size, output_size) model = Model(embedding, rnn_cell).cuda() ``` ```python X, Y = create_dataset(max_len=10, K=4) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" batch_size=64 train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True) ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python overall_hidden = Variable(torch.zeros(1,model.embedding.embedding_dim)) overall_hidden = overall_hidden.cuda() overall_hidden ``` Variable containing: 0 0 0 0 0 0 0 0 0 0 [torch.cuda.FloatTensor of size 1x10 (GPU 0)] ```python %%time learning_rate = 1e-4 max_epochs = 1000 check_every=50 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch = X_batch.cuda(async=True) Y_batch = Y_batch.cuda(async=True) X_batch, Y_batch = tensors2variables(X_batch, Y_batch) hidden = overall_hidden.repeat(batch_size, 1) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch, hidden=hidden) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % check_every != 0: continue hidden = overall_hidden.repeat(X_tensors.shape[0], 1) loss = model.loss(*tensors2variables(X_tensors.cuda(), Y_tensors.cuda()), hidden=hidden) Y_predict = model.predict(Variable(X_tensors.cuda()), hidden=hidden).data accuracy = (Y_tensors.cuda() == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[050]: loss=0.642; accuracy=65.000% Epoch[100]: loss=0.605; accuracy=70.000% Epoch[150]: loss=0.521; accuracy=70.010% Epoch[200]: loss=0.426; accuracy=82.100% Epoch[250]: loss=0.376; accuracy=86.182% Epoch[300]: loss=0.344; accuracy=88.877% Epoch[350]: loss=0.293; accuracy=93.311% Epoch[400]: loss=0.229; accuracy=96.768% Epoch[450]: loss=0.188; accuracy=98.701% Epoch[500]: loss=0.157; accuracy=99.805% Epoch[550]: loss=0.143; accuracy=99.971% Epoch[600]: loss=0.136; accuracy=100.000% Epoch[650]: loss=0.131; accuracy=100.000% Epoch[700]: loss=0.130; accuracy=100.000% Epoch[750]: loss=0.129; accuracy=100.000% Epoch[800]: loss=0.128; accuracy=100.000% Epoch[850]: loss=0.128; accuracy=100.000% Epoch[900]: loss=0.127; accuracy=100.000% Epoch[950]: loss=0.127; accuracy=100.000% Epoch[1000]: loss=0.127; accuracy=100.000% CPU times: user 6min 5s, sys: 1.22 s, total: 6min 6s Wall time: 6min 7s As we can see, the network converges **10x** quicker than the one with lower capacity and also achieves $100\%$ accuracy in just 600 epochs. This is a very useful result, as it shows that in order to learn more complex functionalities we need networks with larger capacities as well as computationally efficient structures. Luckily for us many of the standard functionalities, are usually implemented efficiently in neural network libraries. Pytorch implements many of the standard neural network modules efficiently using it's C code, which can give us an order of magniture of improvement (especially for larger networks). These modules include GRU cells and a GRU module which can process the whole sequence. We will look at these in detail below. ## Using Pytorch's GRUCell Let us check our implementation using the Pytorch's inbuild GRU cell ```python class PytorchGRUCell(nn.Module): def __init__(self, input_dim, output_dim): super(PytorchGRUCell, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.gru = torch.nn.GRUCell(self.input_dim, self.input_dim) self.output_linear = nn.Linear(2*self.input_dim, self.output_dim) def forward(self, x, h): h_prime = self.gru(x,h) concat_out = torch.cat([x, h_prime], dim=1) output = F.tanh(self.output_linear(concat_out)) return output, h_prime ``` ```python input_size=2 embedding_size=10 output_size=2 embedding = nn.Embedding(input_size, embedding_size) rnn_cell = PytorchGRUCell(embedding_size, output_size) model = Model(embedding, rnn_cell).cuda() ``` ```python X, Y = create_dataset(max_len=10, K=4) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" batch_size=64 train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True) ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python overall_hidden = Variable(torch.zeros(1,model.embedding.embedding_dim)) overall_hidden = overall_hidden.cuda() overall_hidden ``` Variable containing: 0 0 0 0 0 0 0 0 0 0 [torch.cuda.FloatTensor of size 1x10 (GPU 0)] ```python %%time learning_rate = 1e-4 max_epochs = 1000 check_every=50 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch = X_batch.cuda(async=True) Y_batch = Y_batch.cuda(async=True) X_batch, Y_batch = tensors2variables(X_batch, Y_batch) hidden = overall_hidden.repeat(batch_size, 1) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch, hidden=hidden) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % check_every != 0: continue hidden = overall_hidden.repeat(X_tensors.shape[0], 1) loss = model.loss(*tensors2variables(X_tensors.cuda(), Y_tensors.cuda()), hidden=hidden) Y_predict = model.predict(Variable(X_tensors.cuda()), hidden=hidden).data accuracy = (Y_tensors.cuda() == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[050]: loss=0.564; accuracy=70.000% Epoch[100]: loss=0.450; accuracy=80.488% Epoch[150]: loss=0.380; accuracy=86.357% Epoch[200]: loss=0.334; accuracy=89.805% Epoch[250]: loss=0.292; accuracy=92.783% Epoch[300]: loss=0.259; accuracy=94.795% Epoch[350]: loss=0.237; accuracy=96.182% Epoch[400]: loss=0.221; accuracy=96.670% Epoch[450]: loss=0.190; accuracy=97.900% Epoch[500]: loss=0.148; accuracy=99.990% Epoch[550]: loss=0.135; accuracy=100.000% Epoch[600]: loss=0.131; accuracy=100.000% Epoch[650]: loss=0.130; accuracy=100.000% Epoch[700]: loss=0.129; accuracy=100.000% Epoch[750]: loss=0.128; accuracy=100.000% Epoch[800]: loss=0.128; accuracy=100.000% Epoch[850]: loss=0.127; accuracy=100.000% Epoch[900]: loss=0.127; accuracy=100.000% Epoch[950]: loss=0.127; accuracy=100.000% Epoch[1000]: loss=0.127; accuracy=100.000% CPU times: user 3min 10s, sys: 948 ms, total: 3min 11s Wall time: 3min 11s Great, this implementation is almost **2x** times faster than our implementation, probably because it is written using the C backend. ## Using Pytorch GRU module ```python torch.cat([torch.zeros(1,2,3), torch.ones(1,2,3)], 2) ``` (0 ,.,.) = 0 0 0 1 1 1 0 0 0 1 1 1 [torch.FloatTensor of size 1x2x6] ```python class PyTorchModel(nn.Module): def __init__(self, input_size, embedding_size, output_size): super(PyTorchModel, self).__init__() self.input_size = input_size self.embedding_size = embedding_size self.output_size = output_size self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.GRU(embedding_size, embedding_size) self.output_linear = nn.Linear(2*self.embedding_size, self.output_size) self.loss_function = nn.CrossEntropyLoss() def forward(self, word_ids, hidden=None): if hidden is None: hidden = Variable(torch.zeros( word_ids.data.shape[0],self.embedding.embedding_dim)) assert isinstance(hidden, Variable) embeddings = self.embedding(word_ids) max_seq_length = word_ids.data.shape[-1] ## RNN input and output shapes are (seq_len, batch_size, input_size) embeddings = embeddings.permute(1,0,2) hidden_states, hidden = self.rnn(embeddings) concat_tensors = torch.cat([embeddings, hidden_states], 2) concat_tensors = concat_tensors.permute(1,0,2).contiguous() concat_tensors = concat_tensors.view(-1, concat_tensors.data.shape[2]) outputs = self.output_linear(concat_tensors) hidden_states = hidden_states.permute(1,0,2) outputs = outputs.view(self.input_size, -1, self.output_size) assert isinstance(outputs, Variable) assert isinstance(hidden_states, Variable) return outputs, hidden_states def loss(self, word_ids, target_ids, hidden=None): outputs, hidden_states = self.forward(word_ids, hidden=hidden) outputs = outputs.view(-1, outputs.data.shape[-1]) target_ids = target_ids.view(-1) assert isinstance(outputs, Variable) assert isinstance(target_ids, Variable) #print("output={}\ttargets={}".format(outputs.data.shape,target_ids.data.shape)) loss = self.loss_function(outputs, target_ids) return loss def predict(self, word_ids, hidden=None): outputs, hidden_states = self.forward(word_ids, hidden=hidden) outputs = outputs.view(-1, outputs.data.shape[-1]) max_scores, predictions = outputs.max(1) predictions = predictions.view(*word_ids.data.shape) #print(word_ids.data.shape, predictions.data.shape) assert word_ids.data.shape == predictions.data.shape, "word_ids: {}, predictions: {}".format( word_ids.data.shape, predictions.data.shape ) return predictions ``` ```python input_size=2 embedding_size=10 output_size=2 model = PyTorchModel(input_size, embedding_size, output_size).cuda() ``` ```python X, Y = create_dataset(max_len=10, K=4) X_tensors, Y_tensors = tuple(map(torch.LongTensor, [X, Y])) print(X_tensors.shape, Y_tensors.shape) assert X_tensors.shape == Y_tensors.shape, "X and Y should be of same shape" batch_size=64 train = data_utils.TensorDataset(X_tensors, Y_tensors) train_loader = data_utils.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True) ``` torch.Size([1024, 10]) torch.Size([1024, 10]) ```python overall_hidden = Variable(torch.zeros(1,model.embedding.embedding_dim)) overall_hidden = overall_hidden.cuda() overall_hidden ``` Variable containing: 0 0 0 0 0 0 0 0 0 0 [torch.cuda.FloatTensor of size 1x10 (GPU 0)] ```python %%time learning_rate = 1e-4 max_epochs = 1000 check_every=50 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(1, max_epochs+1): for X_batch, Y_batch in train_loader: X_batch = X_batch.cuda(async=True) Y_batch = Y_batch.cuda(async=True) X_batch, Y_batch = tensors2variables(X_batch, Y_batch) hidden = overall_hidden.repeat(batch_size, 1) # Forward pass: compute predicted y by passing x to the model. loss = model.loss(X_batch, Y_batch, hidden=hidden) # Before the backward pass, use the optimizer object to zero all of the # gradients for the variables it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model # parameters loss.backward() # Calling the step function on an Optimizer makes an update to its # parameters optimizer.step() if epoch % check_every != 0: continue hidden = overall_hidden.repeat(X_tensors.shape[0], 1) loss = model.loss(*tensors2variables(X_tensors.cuda(), Y_tensors.cuda()), hidden=hidden) Y_predict = model.predict(Variable(X_tensors.cuda()), hidden=hidden).data accuracy = (Y_tensors.cuda() == Y_predict).sum() /operator.mul(*Y_tensors.shape) * 100. print("Epoch[{:03d}]: loss={:5.3f}; accuracy={:.3f}%".format(epoch, loss.data[0], accuracy)) ``` Epoch[050]: loss=0.580; accuracy=70.000% Epoch[100]: loss=0.453; accuracy=77.949% Epoch[150]: loss=0.342; accuracy=84.902% Epoch[200]: loss=0.255; accuracy=88.398% Epoch[250]: loss=0.193; accuracy=94.902% Epoch[300]: loss=0.136; accuracy=96.455% Epoch[350]: loss=0.074; accuracy=99.297% Epoch[400]: loss=0.032; accuracy=99.990% Epoch[450]: loss=0.016; accuracy=100.000% Epoch[500]: loss=0.009; accuracy=100.000% Epoch[550]: loss=0.005; accuracy=100.000% Epoch[600]: loss=0.003; accuracy=100.000% Epoch[650]: loss=0.002; accuracy=100.000% Epoch[700]: loss=0.001; accuracy=100.000% Epoch[750]: loss=0.001; accuracy=100.000% Epoch[800]: loss=0.001; accuracy=100.000% Epoch[850]: loss=0.000; accuracy=100.000% Epoch[900]: loss=0.000; accuracy=100.000% Epoch[950]: loss=0.000; accuracy=100.000% Epoch[1000]: loss=0.000; accuracy=100.000% CPU times: user 57.6 s, sys: 952 ms, total: 58.5 s Wall time: 58.7 s This is super fast, **3x** faster than using the GRUCell and **6x** faster than our implementation. This concludes our introduction to sequence tagging using Pytorch. The example covered here were very small so as to demonstrate the code required to implement a neural network as well as to give an intuition about the kind of tasks the networks can handle. More complex models can be built on top of this demo, which can handle variable length sequences, complex inference process (e.g. Linear Chain Conditional Random Fields for predicting the best sequence of outputs), and complex handling of input like words, phrases, etc. ```python print("Pytorch Version: {}".format(torch.__version__)) ``` Pytorch Version: 0.2.0_4 ```python ```
332d760a4a25b6e16996407d9f0203a223095ff8
59,525
ipynb
Jupyter Notebook
Pytorch RNN sequence tagging.ipynb
Anou9531/Pytorch-Implementation
6ce3d5123852a77ca565b4acb0efe12b68fd803c
[ "Apache-2.0" ]
182
2017-01-25T13:08:18.000Z
2022-03-02T13:27:27.000Z
Pytorch RNN sequence tagging.ipynb
Anou9531/Pytorch-Implementation
6ce3d5123852a77ca565b4acb0efe12b68fd803c
[ "Apache-2.0" ]
1
2018-07-11T07:45:46.000Z
2018-07-11T07:45:46.000Z
Pytorch RNN sequence tagging.ipynb
Anou9531/Pytorch-Implementation
6ce3d5123852a77ca565b4acb0efe12b68fd803c
[ "Apache-2.0" ]
48
2017-01-26T14:50:42.000Z
2022-03-12T02:40:04.000Z
35.880048
1,206
0.5474
true
11,595
Qwen/Qwen-72B
1. YES 2. YES
0.841826
0.815232
0.686284
__label__eng_Latn
0.819407
0.432798
# Linearized Dynamics Near Steady-State We start from the linearized state-equations of the passively mode-locked laser [ref1] [ref1] [ref1]: https://github.com/adrianschlatter/notebooks/blob/master/Dynamics%20of%20Passively%20Mode-Locked%20Lasers.ipynb ``` import sympy as sym sym.init_printing(use_latex='mathjax') Pst, gst, EsatL, dqPdEP = sym.symbols("P_{st} g_{st} E_{satL} q'_P") TR, tauL, etaP, Toc, PP0 = sym.symbols('T_R tau_L eta_P T_{oc} P_{P0}') alpha, beta, gamma, epsilon = sym.symbols('alpha beta gamma epsilon') w0 = sym.symbols('omega_0') Pst_ = sym.Eq(Pst, -EsatL / tauL + etaP * PP0 / gst) x = sym.MatrixSymbol('x', 2, 1) xdot = sym.MatrixSymbol('xdot', 2, 1) y = sym.MatrixSymbol('y', 1, 1) u = sym.MatrixSymbol('u', 1, 1) A = sym.Matrix([[-Pst * dqPdEP, Pst / TR], [-gst / EsatL, -1 / tauL - Pst / EsatL]]) B = sym.Matrix([[0], [etaP / EsatL]]) C = sym.Matrix([[Toc, 0]]) sym.Eq(xdot, A * x + B * u) ``` $$\dot{x} = \left[\begin{matrix}0\\\frac{\eta_{P}}{E_{{satL}}}\end{matrix}\right] u + \left[\begin{matrix}- P_{{st}} q'_{P} & \frac{P_{{st}}}{T_{R}}\\- \frac{g_{{st}}}{E_{{satL}}} & - \frac{1}{\tau_{L}} - \frac{P_{{st}}}{E_{{satL}}}\end{matrix}\right] x$$ ``` sym.Eq(y, C * x) ``` $$y = \left[\begin{matrix}T_{{oc}} & 0\end{matrix}\right] x$$ where $\vec{x} = \left[ \delta P, \delta g \right]^T$, $\vec u = \left[\delta P_P \right]$, and $\vec y = \left[ \delta P_{out} \right]$. Now, we rewrite this in new state variables $\vec x' = \left[ \delta \dot{P} / \omega_0, \delta P \right]$, with $\omega_0 = \sqrt{ \frac{P_{st} g_{st}}{T_R E_{satL}} + P_{st} q'_P \left( 1 / \tau_L + P_{st} / E_{satL} \right)}$. From the state equation above, we immediately see the transformation matrix $M$: ``` M = sym.Matrix([[-Pst * dqPdEP / w0, Pst / TR / w0], [1, 0]]) w0_ = sym.Eq(w0, sym.sqrt(-A[0, 1] * A[1, 0] + A[0, 0] * A[1, 1])) xp = sym.MatrixSymbol("x'", 2, 1) sym.simplify(sym.Eq(xp, M * x)) ``` $$x' = \left[\begin{matrix}- \frac{P_{{st}} q'_{P}}{\omega_{0}} & \frac{P_{{st}}}{T_{R} \omega_{0}}\\1 & 0\end{matrix}\right] x$$ The transformed state equation is: $ \begin{align*} \dot{x'} &= M A M^{-1} x' + M B u \\ y &= C \cdot x = C M^{-1} x' \end{align*}$ ``` M = M.subs(w0_.lhs, w0_.rhs) Bp = M * B Ap = M * A * M**-1 sym.Eq(sym.MatrixSymbol("A'", 2, 2), sym.simplify(Ap)) ``` $$A' = \left[\begin{matrix}- P_{{st}} q'_{P} - \frac{1}{\tau_{L}} - \frac{P_{{st}}}{E_{{satL}}} & - \sqrt{\frac{P_{{st}} q'_{P}}{\tau_{L}} + \frac{P_{{st}}^{2} q'_{P}}{E_{{satL}}} + \frac{P_{{st}} g_{{st}}}{E_{{satL}} T_{R}}}\\\sqrt{\frac{P_{{st}} q'_{P}}{\tau_{L}} + \frac{P_{{st}}^{2} q'_{P}}{E_{{satL}}} + \frac{P_{{st}} g_{{st}}}{E_{{satL}} T_{R}}} & 0\end{matrix}\right]$$ ``` sym.Eq(sym.MatrixSymbol("B'", 2, 1), sym.simplify(Bp)) ``` $$B' = \left[\begin{matrix}\frac{P_{{st}} \eta_{P}}{E_{{satL}} T_{R} \sqrt{\frac{P_{{st}}}{E_{{satL}} T_{R} \tau_{L}} \left(T_{R} q'_{P} \left(E_{{satL}} + P_{{st}} \tau_{L}\right) + g_{{st}} \tau_{L}\right)}}\\0\end{matrix}\right]$$ We recognize that the off-diagonal elements of $A$ are $\omega_0$ and $-\omega_0$. Furthermore, we define $ \begin{align*} \zeta &= \frac{1}{2 \omega_0} \left(1 / \tau_L + P_{st} \cdot \left(q'_P + 1 / E_{satL} \right) \right) \\ \rho &= \frac{P_{st} \eta_P}{E_{satL} T_R \omega_0^2} \end{align*} $ ``` zeta = sym.Symbol('zeta', real=True) zeta_ = sym.Eq(zeta, (1 / tauL + Pst * (dqPdEP + 1 / EsatL)) / 2 / w0) rho = sym.Symbol('rho', real=True) rho_ = sym.Eq(rho, Pst * etaP / (EsatL * TR * w0) / w0) Bpn = sym.Matrix(Bp) Apn = sym.Matrix(Ap) Bpn[0, 0] = Bp[0, 0] / (w0_.lhs / w0_.rhs) * rho_.lhs / rho_.rhs Apn[0, 0] = Ap[0, 0] * zeta_.lhs / zeta_.rhs Apn[0, 1] = Ap[0, 1] * w0_.lhs / w0_.rhs Apn[1, 0] = Ap[1, 0] * w0_.lhs / w0_.rhs sym.Eq(sym.MatrixSymbol("x'dot", 2, 1), sym.simplify(Apn * xp + Bpn * u)) ``` $$\dot{x'} = \left[\begin{matrix}\omega_{0} \rho\\0\end{matrix}\right] u + \left[\begin{matrix}- 2 \omega_{0} \zeta & - \omega_{0}\\\omega_{0} & 0\end{matrix}\right] x'$$ Looking at the eigenvalues (see below), we see that the system - is stable if $\zeta > 0$ - is unstable if $\zeta < 0$ (always) - has oscillatory modes for $-1 < \zeta < 1$ (radicand is negative) - is critically damped for $\zeta = 1$ - is over-critically damped for $\zeta > 1$ (radicand is positive) Of course, this analysis is only true if $\omega_0$ is real. ``` Apn.eigenvals() ``` $$\left \{ \omega_{0} \left(- \zeta - \sqrt{\left(\zeta - 1\right) \left(\zeta + 1\right)}\right) : 1, \quad \omega_{0} \left(- \zeta + \sqrt{\left(\zeta - 1\right) \left(\zeta + 1\right)}\right) : 1\right \}$$ Now, we apply a unit-step to the input of the system and determine where the output will stabilize. To do that, we have to solve the following equation: ``` eq = sym.Eq(sym.Matrix([[0], [0]]), Apn * xp + Bpn) sym.simplify(eq) ``` $$\left[\begin{matrix}0\\0\end{matrix}\right] = \left[\begin{matrix}\omega_{0} \rho\\0\end{matrix}\right] + \left[\begin{matrix}- 2 \omega_{0} \zeta & - \omega_{0}\\\omega_{0} & 0\end{matrix}\right] x'$$ From the resulting $x'$ we obtain for the steady-state $y$ response: ``` sym.simplify(C * M**-1 * -Apn**-1 * Bpn)[0] ``` $$T_{{oc}} \rho$$ The DC-response to small pump-power changes is the slope efficiency. ``` sym.simplify((Toc * rho_.rhs).subs([(w0_.lhs, w0_.rhs), (Pst_.lhs, Pst_.rhs)])) ``` $$\frac{T_{{oc}} \eta_{P} g_{{st}}}{P_{{P0}} T_{R} \eta_{P} q'_{P} + g_{{st}}^{2}}$$ For $q'_P = 0$ (no saturable absorber), this simplifies to $\eta_P \cdot \frac{T_{oc}}{g_{st}}$, i.e. the slope efficiency depends only on the pump-absorption efficiency and the ratio of output coupling to total losses.
df0a3cf344d2c3d773803ceacf2c58c22783ab70
17,663
ipynb
Jupyter Notebook
Passively Mode-Locked Laser - Steady-State Dynamics.ipynb
adrianschlatter/notebooks
997ce44b68998959cf486d5ba77f2a9b5336c28f
[ "BSD-2-Clause" ]
null
null
null
Passively Mode-Locked Laser - Steady-State Dynamics.ipynb
adrianschlatter/notebooks
997ce44b68998959cf486d5ba77f2a9b5336c28f
[ "BSD-2-Clause" ]
2
2018-09-22T19:08:41.000Z
2018-09-22T19:11:34.000Z
Passively Mode-Locked Laser - Steady-State Dynamics.ipynb
adrianschlatter/notebooks
997ce44b68998959cf486d5ba77f2a9b5336c28f
[ "BSD-2-Clause" ]
null
null
null
40.326484
456
0.463455
true
2,354
Qwen/Qwen-72B
1. YES 2. YES
0.917303
0.757794
0.695127
__label__eng_Latn
0.245584
0.453344
$\newcommand{\xbf}{{\bf x}} \newcommand{\ybf}{{\bf y}} \newcommand{\wbf}{{\bf w}} \newcommand{\Ibf}{\mathbf{I}} \newcommand{\Xbf}{\mathbf{X}} \newcommand{\Rbb}{\mathbb{R}} \newcommand{\vec}[1]{\left[\begin{array}{c}#1\end{array}\right]} $ # Introduction aux réseaux de neurones Matériel de cours rédigé par Pascal Germain, 2018 ************ ```python import numpy as np import aidecours from matplotlib import pyplot as plt from sklearn.datasets import make_blobs from aidecours import code_button as CB aidecours.center_images(); CB() ``` <style> .output_png { display: table-cell; text-align: center; vertical-align: middle; } </style> <a href="javascript:code_toggle()">voir/cacher le code</a>. # Cacher ce neurone que je ne saurais voir Un *réseau de neurones artificiels* n'est qu'un simple *graphe de calcul*, qui exprime une fonction (possiblement) complexe comme une succession d'opérations simples. Commencons par examiner un réseau de neurones très simple, possédant deux neurones d'entrées et un neurone de sortie. Ce réseau représente une fonction $R_\wbf: \Rbb^2 \to \Rbb$, que nous illustrons ainsi: <p> <center></center> Le réseau $R_\wbf$ ci-dessus reçoit deux valeurs en entrée: $x_1$ et $x_2$. Ces valeurs sont respectivement multipliées par les poids $w_1$ et $w_2$. Le neurone de droite reçoit les valeurs $w_1 x_1$ et $w_2 x_2$, et les combine en appliquant la fonction $f$. La valeur obtenue correspond à la sortie du réseau. $$R_\wbf\left({\small\vec{x_1\\[-3mm]x_2}}\right) = f(w_1 x_1+w_2 x_2)\,.$$ ## Régression: Le neurone linéaire Par exemple, si $f$ est la fonction identitée $f(x)=x$, ce réseau représente l'opération: $$R_\wbf\left({\small\vec{x_1\\[-3mm]x_2}}\right) = f(w_1 x_1 + w_2 x_2) = w_1 x_1 + w_2 x_2\,.$$ Nous avons donc ici d'un réseau de neurones dans sa plus simple expression, représentant un produit scalaire entre un vecteur $\xbf = (x_1,x_2)$ et un vecteur $\wbf = (w_1,w_2)$ : $$R_\wbf({\xbf}) = \wbf\cdot \xbf\,.$$ En réalité, un réseau de neuronnes possédera généalement plus de 2 entrées. On notera $d$ la dimension de l'espace d'entré du réseau (le nombre de neurones d'entrée). On a donc $\xbf\in\Rbb^d$ et $\wbf\in\Rbb^d$. $$R_\wbf({\xbf}) = \sum_{i=1}^d w_i x_i = \wbf\cdot \xbf\,.$$ <center></center> ### Entraînement du réseau Typiquement, l'entraînement d'un réseau de neurones consistera a présenter au réseau un ensemble d'apprentissage afin qui «apprenne» les poids ${\bf w}$. Cet **ensemble d'apprentissage** sera noté $S$ et contiendra $n$ observations, $$S = \{ ({\bf x}_1, y_1), ({\bf x}_2, y_2), \ldots, ({\bf x}_n, y_n) \}\,,$$ où une *observation* est un couple entrée-sortie $({\bf x}, y)$. ### Fonction de perte Afin de guider le processus d'apprentissage, nous devons choisir une fonction de perte $L\big(y',y\big)$. L'apprentissage consistera alors à résoudre le problème suivant: $$\min_\wbf \left[\frac1n \sum_{i=1}^n L\Big(R_\wbf(\xbf_i), y_i\Big)\right].$$ ### Perte quadratique Prenons l'exemple de la perte quadratique: $$L_{\rm quad}\big(y', y\big) = (y'-y)^2\,.$$ Ici, on note $y'$ l'étiquette prédite par le réseau de neurones et $y$ la véritable étiquette d'une observation $\xbf$. ```python def perte_quadratique(y_prime, y): return (y_prime-y)**2 for y in (-1, 2, 3): aidecours.show_1d_function(lambda a: perte_quadratique(a, y), -2, 4, label=f'$y={y}$'); plt.xlim(-2,4), plt.ylim(0,5) plt.title('Perte quadratique'); plt.xlabel('$y\'$'); plt.ylabel('$L_{quad}(y,y\')$'); CB() ``` Le problème d'apprentissage devient alors $$\min_\wbf \left[\frac1n \sum_{i=1}^n (\wbf\cdot\xbf_i- y_i)^2\right].$$ Imaginons un ensemble d'entraîment à 3 observations: $$ \begin{array}{rcl} S = \{ & ((1,1),& {-}1),\\ & ((0,-1),& 3),\\ &((2,\frac12),& 2)\quad\} \end{array}$$ Illustrons la fonction objectif correspondante: ```python x = np.array([(1,1),(0,-1),(2,.5)]) y = np.array([-1,3,2]) def calc_perte_quadratique(w): return np.mean((x @ w - y) ** 2) aidecours.show_2d_function(calc_perte_quadratique, -10, 10, .5) plt.title('Fonction objectif'); plt.xlabel('$w_1$'); plt.ylabel('$w_2$'); CB() ``` ### C'est la méthode des moindres carrés! On sait comment trouver la réponse exacte au problème d'optimisation. En représentant les observations $S$ sous forme matricielle, $$ \Xbf = \left[ \begin{array}{cc} 1& 1\\ 0& -1\\ 2& \frac12 \end{array} \right], \quad\ybf = \left[ \begin{array}{c} -1\\ 3\\ 2 \end{array} \right] $$ On sait que $\wbf^* = (\Xbf^T \Xbf)^{-1} \Xbf^T \ybf \approx (1.76, -2.90)$ voir: <https://fr.wikipedia.org/wiki/M%C3%A9thode_des_moindres_carr%C3%A9s> ```python w_opt = np.linalg.inv(x.T @ x) @ x.T @ y aidecours.show_2d_function(calc_perte_quadratique, -10, 10, .5) plt.scatter(*w_opt, s=200, marker='*', c='r') plt.title('Fonction objectif'); plt.xlabel('$w_1$'); plt.ylabel('$w_2$'); CB() ``` ### Régularisation Comme tout algorithme d'apprentissage, un réseau de neurone est susceptible de *surapprendre* les données d'apprentissage. Nous verrons dans ce cours quelques techniques pour se prémunir les réseaux de neurones contre le surapprentisage. ### Régularisation $\mathrm{L}^2$ La première technique est d'ajouter un régularisateur à la fonction objectif. Il est fréquent d'utiliser la norme Euclidienne des poids $\wbf$ au carré comme fonction de régularisation: $$\min_\wbf \left[\frac1n \sum_{i=1}^n L\big(R_\wbf(\xbf_i), y_i\big) + \frac\lambda2\|\wbf\|^2\right].$$ Ici, $\lambda>0$ est un *hyperparamètre* de la procédure d'entraîment du réseau de neurones. ```python lambdas = (0., 1., 10.) fig, axes = plt.subplots(1, len(lambdas), figsize=(16, 4)) for param_lambda, ax in zip(lambdas, axes): def calc_perte_quadratique_regularisee(w): return np.mean((x @ w - y) ** 2) + param_lambda * (w @ w) / 2 aidecours.show_2d_function(calc_perte_quadratique_regularisee, -10, 10, .5, ax=ax) w_opt = np.linalg.inv(x.T @ x + param_lambda * np.eye(2) * 2) @ x.T @ y ax.scatter(*w_opt, s=200, marker='*', c='r') plt.title(f'Fonction objectif ($\lambda = {param_lambda}$)'); plt.xlabel('$w_1$'); plt.ylabel('$w_2$'); CB() ``` ### C'est la régression de Ridge (c.-à-d. les moindres carrés régularisés) On sait comment trouver la réponse exacte au problème d'optimisation. En représentant les observations $S$ sous forme matricielle, $$ \Xbf = \left[ \begin{array}{cc} 1& 1\\ 0& -1\\ 2& \frac12 \end{array} \right], \quad\ybf = \left[ \begin{array}{c} -1\\ 3\\ 2 \end{array} \right], $$ on sait que $\wbf^* = (\Xbf^T \Xbf+\lambda \Ibf d)^{-1} \Xbf^T \ybf\,.$ $\lambda$ |$\longrightarrow$ | $w_1^*$ | $w_2^*$ ----------|-|---------|-------- 0 |$\longrightarrow$ | 1.76 | -2.90 1 |$\longrightarrow$ | 0.73 | -1.05 10 |$\longrightarrow$ | 0.13 | -0.15 voir: <https://fr.wikipedia.org/wiki/R%C3%A9gularisation_de_Tikhonov> ## Classification: Le neurone sigmoïdale La combinaison de le neurone de sortie linéaire et de la perte quadratique convient bien aux problèmes de régression (où les étiquettes sont des valeurs réelles ($y\in\Rbb$). Imaginons maintenant que nous voulons résoudre un problème de classification binaire: $$y\in\{0,1\}$$ Reprenons notre réseau de neurones simplifié. <p> <center> </center> Nous allons considérer que le neurone de sortie applique la **fonction sigmoidale**, habituellement notée $\sigma$: $$f(x) = \sigma(x) = \frac{1}{1+e^{-x}}\,.$$ ```python def sigmoid(x): return 1 / (1+np.exp(-x)) aidecours.show_1d_function(sigmoid, -6, 6, constante_x=.5); plt.title('Fonction sigmoidale'); plt.xlabel('$x$'); plt.ylabel('$\sigma(x)$'); CB() ``` La sortie du réseau de neurones sera donc comprise entre $0$ et $1$. On considère que l'étiquette prédite est $y=0$ lorsque $$R_\wbf(\xbf) = \sigma(\wbf\cdot\xbf) < 0.5\,,$$ et $y=1$ sinon. Une sortie près de $0.5$ est interprétée comme une grande incertitude envers le résultat. Inversement, plus la sortie est près de $0$ ou de $1$, plus on considère que le réseau est *confiant* envers sa décision. Une interprétation *bayésienne* de la sortie du neurone sigmoïdal est de la voir comme la probabilité, selon le réseau $R_\wbf$, que $y=1$ pour une certaine observation $\xbf$: $$P(y = 1\,|\,\xbf; \wbf) \ = \ \sigma(\wbf\cdot\xbf) \,.$$ Conséquemment: $$P(y = 0\,|\,\xbf; \wbf) \ = \ 1 - P(y = 1\,|\,\xbf; \wbf) \ =\ 1 - \sigma(\wbf\cdot\xbf) \,.$$ Lors de l'apprentissage, on désire pénaliser le réseau d'autant plus que la probabilité attribuée à l'étiquette d'une observation s'éloigne de la véritable étiquette. La fonction de perte utilisée dans ce contexte est nommé la perte du **négatif log vraisemblance**: $$\begin{align} L_{\rm nlv}\big(y', y\big) = &- y \log(y') - (1-y)\log(1-y') \\[2mm] = &\begin{cases} -\log(1-y') & \mbox{si $y=0$ ,} \\[1mm] -\log(y') & \mbox{si $y=1$ .} \end{cases}\end{align}$$ ```python def perte_nlv_y0(y_prime): return - np.log(1. - y_prime) def perte_nlv_y1(y_prime): return - np.log(y_prime) aidecours.show_1d_function(perte_nlv_y0, 0, 0.999, .01, label='$y=0$'); aidecours.show_1d_function(perte_nlv_y1, 0.001, 1, .01, label='$y=1$'); plt.title('Négatif Log Vraisemblance'); plt.xlabel('$y\'$'); plt.ylabel('$L_{nlv}(y,y\')$'); CB() ``` #### Récapitulons. 1. Le neurone de sortie applique la fonction sigmoïdale à la somme de ses entrées: $$\sigma(\wbf\cdot\xbf) = \frac{1}{1+e^{-\wbf\cdot\xbf}}\,.$$ 2. Si $\sigma(\wbf\cdot\xbf) > 0.5$, on déclare que $y=1$. 3. Lors de l'apprentissage, on pénalise selon $$ \begin{align} L_{\rm nlv}\Big(\sigma(\wbf\cdot\xbf), y\Big) &= - y \log(\sigma(\wbf\cdot\xbf)) - (1-y)\log(1-\sigma(\wbf\cdot\xbf))\\ &= \quad\vdots\\[1mm] &= - y \wbf\cdot\xbf + \log(1+e^{\wbf\cdot\xbf}) \end{align}$$ La fonction objectif est alors: $$ \min_\wbf \left[\frac1n \sum_{i=1}^n - y_i \wbf\cdot\xbf_i + \log(1+e^{\wbf\cdot\xbf_i})+ \frac\lambda2\|\wbf\|^2\right]. $$ ### Il s'agit de la régression logistique! **À démontrer en exercice** ```python aidecours.show_1d_function(lambda a: perte_nlv_y0(sigmoid(a)), label='$y=0$'); aidecours.show_1d_function(lambda a: perte_nlv_y1(sigmoid(a)), label='$y=1$'); plt.title('Perte logistique'); plt.xlabel('$x$'); plt.ylabel('$L_{nlv}(1,\sigma(x))$'); CB() ``` ### Exemple de classification Ci-dessous, nous générons au hasard un ensemble de 100 observations en deux dimensions à l'aide de la fonction `sklearn.datasets.make_blobs` (voir: http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html) ```python x, y = make_blobs(n_samples=100, centers=2, n_features=2, cluster_std=1, random_state=0) aidecours.show_2d_dataset(x, y); CB() ``` Illustrons la fonction à optimiser (avec $\lambda=0.01$): $$ \frac1n \sum_{i=1}^n - y_i \wbf\cdot\xbf_i + \log(1+e^{\wbf\cdot\xbf_i})+ \frac\lambda2\|\wbf\|^2\,. $$ ```python def calc_perte_logistique(w): pred = sigmoid(x @ w) pred[y==0] = 1-pred[y==0] return np.mean(-np.log(pred)) + .01*w @ w/2 aidecours.show_2d_function(calc_perte_logistique, -4, 4, .05) w_opt =[ 1.59526315, -1.09703947] plt.scatter(*w_opt, s=200, marker='*', c='r') plt.title('Fonction objectif'); plt.xlabel('$w_1$'); plt.ylabel('$w_2$'); CB() ``` L'optimum se trouve en $\wbf^* \approx \vec{1.60\\ -1.10}\,$. Ce qui correspond au prédicteur suivant. ```python from sklearn.linear_model import LogisticRegression algo = LogisticRegression(C=100./len(y), fit_intercept=False) algo.fit(x, y) coord=aidecours.show_2d_predictions(x, y, algo.predict) plt.plot([coord[0],0,0], [0,0,coord[2]], '--k'); #print('vecteur w :', algo.coef_[0]) #print('biais b :', algo.intercept_) CB() ``` ## Ajout d'un biais Pour éviter de restreindre la fonction de prédiction à passer par l'origine, on ajoute un **biais**: <center></center> Dans le cas de notre réseau simple, la fonction de prédiction devient alors: $$R_{\wbf,b}({\xbf}) = f\left(\sum_{i=1}^d w_i x_i + b\right) = f\big(\wbf\cdot \xbf+b\big).$$ Retournons à notre exemple de régression logistique (c'est-à-dire le réseau de neurone où le neurone de sortie $f$ est la **fonction sigmoïdale** $\sigma$ et a perte du **négatif log vraisemblance** $L_{\rm nlv}$. Le problème d'optimisation **avec biais** s'exprime ainsi: $$ \min_{\wbf,b} \left[\frac1n \sum_{i=1}^n - y_i (\wbf\cdot\xbf_i+b) + \log(1+e^{\wbf\cdot\xbf_i+b})+ \frac\lambda2\|\wbf\|^2\right]. $$ ```python algo = LogisticRegression(C=100/len(y), fit_intercept=True) algo.fit(x, y) coord=aidecours.show_2d_predictions(x, y, algo.predict) plt.plot([coord[0],0,0], [0,0,coord[2]], '--k'); CB() #print('vecteur w :', algo.coef_[0]) #print('biais b :', algo.intercept_[0]) CB() ``` L'optimum se trouve en $\wbf^* \approx \vec{0.97\\ -1.52}, b \approx 2.33\,$.
e93312ba3171a9bd74a977a29895d5d6264e1e69
904,472
ipynb
Jupyter Notebook
notebooks/01 - Commencons simplement.ipynb
pgermain/cours2018-Intro_aux_r-seaux_de_neurones
bec6bb9e9ff40fbac0e29469fe038b3c10beebf9
[ "CC-BY-4.0" ]
null
null
null
notebooks/01 - Commencons simplement.ipynb
pgermain/cours2018-Intro_aux_r-seaux_de_neurones
bec6bb9e9ff40fbac0e29469fe038b3c10beebf9
[ "CC-BY-4.0" ]
null
null
null
notebooks/01 - Commencons simplement.ipynb
pgermain/cours2018-Intro_aux_r-seaux_de_neurones
bec6bb9e9ff40fbac0e29469fe038b3c10beebf9
[ "CC-BY-4.0" ]
2
2018-10-23T14:22:25.000Z
2020-11-19T23:36:33.000Z
711.062893
366,840
0.947666
true
4,647
Qwen/Qwen-72B
1. YES 2. YES
0.845942
0.808067
0.683578
__label__fra_Latn
0.799172
0.426513
<a href="https://colab.research.google.com/github/AI-odyssey/AI-odyssey.github.io/blob/master/%5BHW3%5D_Practice_1_answer.ipynb" target="_parent"></a> # **[HW3] Practice_1** 1. Install packages 2. Matrix operations 3. Linear system 4. Inverse matrix 5. Linear combination 선형대수 실습은, exercise 혹은 coding 문제풀이에 집중하기 보다는, 수업시간에 배웠던 개념들을 visualize 하면서 이를 폭넓게 이해하는 데에 초점을 맞추고 있습니다. 실습에서 사용한 예시 외에도, 다양한 matrix들을 visualize 하면서 개념을 익혀보시기 바랍니다. # 1. Install packages > 필요한 package를 설치하고 import합니다 ```python # visualization을 위한 helper code입니다. if 'google.colab' in str(get_ipython()): print('Downloading plot_helpers.py to util/ (only neded for colab') !mkdir util; wget https://raw.githubusercontent.com/minireference/noBSLAnotebooks/master/util/plot_helpers.py -P util ``` ```python import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.mplot3d import Axes3D import scipy as sp import scipy.linalg import sympy as sy from util.plot_helpers import plot_vec, plot_vecs, autoscale_arrows sy.init_printing() np.set_printoptions(precision=3) np.set_printoptions(suppress=True) ``` ## Numpy, matplotlib > 선형대수 및 앞으로 공부할 과목들 (머신러닝, 딥러닝, 자연어처리)에서 끊임없이 사용하게 될 두 가지 package가 있는데, (1) Numpy, (2) Matplotlib 입니다. 본 실습은 선형대수 실습시간인 만큼, python package에 대해 자세히 설명하지는 않을 것입니다. 따라서 이에 대해 전혀 모르시는 경우, 다음 tutorial의 Numpy와 Matplotlib 부분을 먼저 따라 치면서 익히고 오시는 것을 추천합니다 (http://aikorea.org/cs231n/python-numpy-tutorial/). > (주의 1) 해당 tutorial은 python 2 기준으로 작성되었기 때문에, google colab에서 오류가 날 수 있습니다 (e.g., print 다음 괄호가 없기 때문에 오류가 남). 그럴 경우엔 다음 링크 (https://cs231n.github.io/python-numpy-tutorial/#numpy)를 보고 수정하시면 됩니다 (같은 파일인데, 영어로 되어있습니다. 영어도 괜찮으시다면, 이 링크를 바로 보시는게 편합니다). > (주의 2) Matplotlib의 맨 마지막 예시 (imshow 사용)는 건너뛰시면 됩니다. # 2. Matrix operations 강의에서 배운 matrix의 기본 성질에 대해 복습해봅시다. > Matrix addition $\cdot$ matrix multiplication은 Matrix $A,B,C$, 그리고 constant $c$에 대해 다음 식들을 만족합니다. > Addition 1. $A+ B= B+ A$ 2. $(A+B)+ C=A+(B+C)$ 3. $c(A+B)=cA+cB$ 4. $(c+d)A=cA+c{D}$ 5. $c(dA)=(cd)A$ 6. $A+{0}=A$, where ${0}$ is the zero matrix 7. For any $A$, there exists an $- A$, such that $ A+(- A)=0$. > Multiplication 1. $ A({BC})=({AB}) C$ 2. $c({AB})=(cA)B=A(cB)$ 3. $A(B+ C)={AB}+{AC}$ 4. $(B+C)A={BA}+{CA}$ > Multiplication의 경우 (1) elementwise multiplication과 (2) matrix multiplication이 있는데, 양쪽 다 많이 쓰이는 만큼 잘 구분해야 합니다. ```python A = np.array([[1, 2], [3, 4]]) B = np.array([[5, 6], [7, 8]]) ``` ```python A*B # elementwise product ``` array([[ 5, 12], [21, 32]]) ```python A@B # matrix product ``` array([[19, 22], [43, 50]]) > Matrix multiplication은 교환법칙이 성립하지 않습니다 ($AB \neq BA$). 아래 sympy는 $x_1$, $x_2$ 등의 기호(symbol)을 사용하게 해주는 라이브러리입니다. ```python import sympy as sy A = sy.Matrix([[3, 4], [7, 8]]) B = sy.Matrix([[5, 3], [2, 1]]) A@B ``` $\displaystyle \left[\begin{matrix}23 & 13\\51 & 29\end{matrix}\right]$ ```python B@A ``` $\displaystyle \left[\begin{matrix}36 & 44\\13 & 16\end{matrix}\right]$ > 하지만 vector multiplication은 교환법칙이 성립합니다 ($x^Ty = y^Tx$). ```python # 계산에 사용할 기호(symbol)들을 아래와 같이 선언합니다. x1, x2, y1, y2 = sy.symbols('x1, x2, y1, y2', real = True) # 위에서 선언한 기호를 활용하여, np.array와 유사한 방식으로 벡터를 선업합니다. x = sy.Matrix([x1, x2]) y = sy.Matrix([y1, y2]) x.T@y ``` $\displaystyle \left[\begin{matrix}x_{1} y_{1} + x_{2} y_{2}\end{matrix}\right]$ ```python y.T@x ``` $\displaystyle \left[\begin{matrix}x_{1} y_{1} + x_{2} y_{2}\end{matrix}\right]$ > Matrix transpose는 Matrix $A,B$, 그리고 constant $c$에 대해 다음 식들을 만족합니다. 1. $(A^T)^T$ 2. $(A+B)^T=A^T+B^T$ 3. $(cA)^T=cA^T$ 4. $(AB)^T=B^TA^T$ 4번이 성립하는지 실제로 알아봅시다. ```python a, b, c, d, e, f, g, h, i, j, k, l = sy.symbols('a, b, c, d, e, f, g, h, i, j, k, l', real = True) A = sy.Matrix([[a, b], [c, d], [e, f]]) B = sy.Matrix([[g, h, i], [j, k, l]]) AB = A@B AB_tr = AB.transpose() AB_tr # 4번의 좌항 ``` $\displaystyle \left[\begin{matrix}a g + b j & c g + d j & e g + f j\\a h + b k & c h + d k & e h + f k\\a i + b l & c i + d l & e i + f l\end{matrix}\right]$ ```python B_tr_A_tr = B.transpose()@A.transpose() B_tr_A_tr # 4번의 우항 ``` $\displaystyle \left[\begin{matrix}a g + b j & c g + d j & e g + f j\\a h + b k & c h + d k & e h + f k\\a i + b l & c i + d l & e i + f l\end{matrix}\right]$ ```python AB_tr == B_tr_A_tr ``` True > 따라서 $(AB)^T$ 는 $B^TA^T$와 같음을 알 수 있습니다. # 3. Linear system > Linear system은 n(>=1)개의 linear equation(일차 방정식)들의 collection을 뜻합니다. 동일한 n개의 variable을 가진 m개의 일차 방정식들은, 다음과 같이 표현될 수 있습니다 $$ a_{11}x_{1}+a_{12}x_{2}+a_{13}x_{3}+\cdots +a_{1n}x_{n}=b_{1} \\ a_{21}x_{1}+a_{22}x_{2}+a_{13}x_{3}+\cdots +a_{2n}x_{n}=b_{2} \\ \cdots \\ a_{m1}x_{1}+a_{m2}x_{2}+a_{m3}x_{3}+\cdots +a_{mn}x_{n}=b_{m} $$ ### n=2일 때의 linear system > 다음과 같은 예시를 생각해 봅시다. $$ x_1 + x_2 = 6 \\ x_1 - x_2 = -4 $$. > 두 linear equation을 plot한다면 ```python x1 = np.linspace(-5, 5, 100) x2_1 = -x1 + 6 x2_2 = x1 + 4 fig, ax = plt.subplots(figsize = (12, 7)) ax.scatter(1, 5, s = 200, zorder=5, color = 'r', alpha = .8) ax.plot(x1, x2_1, lw =3, label = '$x_1+x_2=6$') ax.plot(x1, x2_2, lw =3, label = '$x_1-x_2=-4$') ax.plot([1, 1], [0, 5], ls = '--', color = 'b', alpha = .5) ax.plot([-5, 1], [5, 5], ls = '--', color = 'b', alpha = .5) ax.set_xlim([-5, 5]) ax.set_ylim([0, 12]) ax.legend() s = '$(1,5)$' ax.text(1, 5.5, s, fontsize = 20) ax.set_title('Solution of $x_1+x_2=6$, $x_1-x_2=-4$', size = 22) ax.grid() ``` > 이 나오게 됩니다. 따라서 두 직선이 만나는 교점, 즉 $x_1=1, x_2=5$가 주어진 두 linear equation을 만족하는 해가 됨을 알 수 있습니다. 이는 굳이 plot을 그리지 않고도, 연립방정식을 사용하여 쉽게 구할 수 있습니다. ### n=3일 때의 linear system > 다음과 같은 예시를 생각해 봅시다. $$ x_1 + 2x_2 + 3x_3 = 6 \\ 2x_1 + 5x_2 + 2x_3 = 4 \\ 6x_1 - 3x_2 + x_3 = 2 $$. > 세 linear equation을 plot한다면 ```python x1 = np.linspace(-10, 10, 20) x2 = np.linspace(-10, 10, 20) X1, X2 = np.meshgrid(x1, x2) fig = plt.figure(figsize = (9, 9)) ax = fig.add_subplot(111, projection = '3d') X3_1 = (6 - 2*X2 - X1) * (1/3) ax.plot_surface(X1, X2, X3_1, cmap ='viridis', alpha = 1) X3_2 = (4 - 5*X2 - 2*X1) * (1/2) ax.plot_surface(X1, X2, X3_2, cmap ='summer', alpha = 1) X3_3 = 2 + 3*X2 - 6*X1 ax.plot_surface(X1, X2, X3_3, cmap ='spring', alpha = 1) ax.set_xlabel('$x_1$-axis') ax.set_ylabel('$x_2$-axis') ax.set_zlabel('$x_3$-axis') plt.show() ``` > 이 나오게 됩니다. 따라서 n=2인 첫 번째 예시와 마찬가지로, 세 평면이 만나는 교점을 찾으면 주어진 세 linear equation을 만족하는 해가 됩니다. n=2일 경우보다 그림을 보고 직관적으로 해를 알기 어렵고, 연립방정식으로 해를 구하는 것도 상대적으로 복잡해졌음을 알 수 있습니다. > 주어진 예시들은 $$ Ax = b $$ $$ \begin{bmatrix} 1 & 2 & 3\\ 2 & 5 & 2\\ 6 & -3 & 1 \end{bmatrix} \begin{bmatrix} x_1\\ x_2\\ x_3 \end{bmatrix} = \begin{bmatrix} 6\\ 4\\ 2 \end{bmatrix} $$ > 의 꼴로 생각할 수 있는데, 위의 두 plot은 이를 "row" (linear equation)를 중심으로 이들의 교점을 구하는 방식으로 접근했습니다. 이 방식과는 다르게, "column"들을 중심으로 생각해봅시다. 각 column을 vector로 생각하면 위 linear system은 $$ x_1\begin{bmatrix} 1\\ 2\\ 6 \end{bmatrix} + x_2\begin{bmatrix} 2\\ 5\\ -3 \end{bmatrix} + x_3\begin{bmatrix} 3\\ 2\\ 1 \end{bmatrix} = \begin{bmatrix} 6\\ 4\\ 2 \end{bmatrix}$$ > 로 분해할 수 있습니다 (Matrix multiplications as column combinations). 즉 이 경우 $x_1, x_2, x_3$은 각 column vector의 계수가 됩니다. 이를 plot한다면 ```python A = sy.Matrix([[1, 2, 3], [2, 5, 2], [6, -3, 1]]) x1, x2, x3 = sy.symbols('x1 x2 x3') x = sy.Matrix([x1,x2,x3]) print('column vectors:', A.col(0), A.col(1), A.col(2)) fig = plt.figure(figsize = (9,9)) ax = fig.add_subplot(111, projection = '3d') ax.scatter(6, 4, 2, s = 200, color = 'red') plot_vecs(A.col(0), A.col(1), A.col(2)) autoscale_arrows() ``` > 위와 같이 나오게 됩니다. 즉, 세 column vector ([1,2,6], [3,2,1],[2,5,-3])를 linear combination 해서 b ([6,4,2]) 를 만드는 계수 $x_1,x_2,x_3$을 찾는 문제로 바뀌게 됩니다. * Row picture: 각 row (linear equation; $a_{i1}x_{1}+a_{i2}x_{2}+a_{i3}x_{3}+\cdots +a_{in}x_{n}=b_{i}$) 들의 교점을 찾는다. * Column picture: 각 column (vector; $[a_{1i}, a_{2i}, \cdots, a_{mi}]^T$) 들의 linear combination이 b가 되도록 하는 계수 $x_1, x_2, x_3$을 찾는다. > 오늘 강의에서 $Ax=b$의 해를 찾는 방법을 자세히 배우지는 않았지만, 위 예시의 경우 우연히도 2*[3,2,1] = [6,4,2]이기 때문에, 해가 $[x_1, x_2, x_3] = [0,0,2]$임을 쉽게 알 수 있습니다. sympy를 사용해서 위 $x$가 해가 맞는지 확인해봅시다. ```python from sympy.solvers.solveset import linsolve # linsolve 함수로 해를 구할 것입니다 x1, x2, x3 = sy.symbols('x1 x2 x3') A = sy.Matrix(((1,2,3),(2,5,2),(6,-3,1))) b = sy.Matrix((6,4,2)) system = A,b # 이와 같이 system을 정의합니다. linsolve(system, x1, x2, x3) # system, 그리고 x의 원소를 차례로 넣습니다. ``` $\displaystyle \left\{\left( 0, \ 0, \ 2\right)\right\}$ >$x$의 해가 [0,0,2]가 맞음을 sympy로도 확인할 수 있습니다.이처럼, 앞으로 배우게 될 강의에서도 주로 column picture의 관점에서 문제들을 생각하게 될 것입니다. ### 해가 없는 경우의 linear system > 위의 두 예시는 해가 1개만 존재하는 linear system 이었습니다. 오늘 강의에서 배웠듯, $A$의 inverse가 존재하기 때문에, 해 $x$는 unique하게 한 개만 존재하는 경우입니다. 이젠 다음과 같은 예시를 생각해 봅시다. $$ x_1 + x_2 + x_3 = 1 \\ x_1 - x_2 - 2x_3 = 2 \\ 2x_1 - x_3 = 1 $$. > 위 system을 행렬로 표현해보면 $$ Ax = b $$ $$ \begin{bmatrix} 1 & 1 & 1\\ 1 & -1 & -2\\ 2 & 0 & -1 \end{bmatrix} \begin{bmatrix} x_1\\ x_2\\ x_3 \end{bmatrix} = \begin{bmatrix} 1\\ 2\\ 1 \end{bmatrix} $$ > 이 됩니다. 이를 column picture로 plot해 본다면 ```python A = sy.Matrix([[1, 1, 1], [1, -1, -2], [2, 0, -1]]) x1, x2, x3 = sy.symbols('x1 x2 x3') x = sy.Matrix([x1,x2,x3]) fig = plt.figure(figsize = (9,9)) ax = fig.add_subplot(111, projection = '3d') ax.scatter(1, 2, 1, s = 200, color = 'red') plot_vecs(A.col(0), A.col(1), A.col(2)) autoscale_arrows() ``` > 세 column vector의 linear combination으로 b (빨간색 점) 를 만들 수 있으면 해가 1개 이상 존재하고, 만드는 것이 불가능 하다면 해가 존재하지 않습니다. A의 해를 확인해 봅시다. ```python x1, x2, x3 = sy.symbols('x1 x2 x3') A = sy.Matrix(((1,1,1),(1,-1,-2),(2,0,-1))) b = sy.Matrix((1,2,1)) system = A,b linsolve(system, x1, x2, x3) ``` > 해가 없음을 알 수 있습니다. 만약 $A$가 invertible 하다면 (즉 A의 determinant가 0이라면), 해 $x$는 반드시 존재하며, 1개로 unique하게 얻어져야 합니다. 따라서, 이 경우엔 해가 없기 때문에, *det A* 가 0이 되는지 확인해봅시다. ```python A.det() ``` ### 해가 무수히 많은 경우의 linear system > 다음과 같은 예시를 생각해 봅시다. $$ x_2 - x_3 = 4 \\ 2x_1 + x_2 + 2x_3 = 4 \\ 2x_1 + 2x_2 + x_3 = 8 $$. > 위 system을 행렬로 표현해보면 $$ Ax = b $$ $$ \begin{bmatrix} 0 & 1 & -1\\ 2 & 1 & 2\\ 2 & 2 & 1 \end{bmatrix} \begin{bmatrix} x_1\\ x_2\\ x_3 \end{bmatrix} = \begin{bmatrix} 4\\ 4\\ 8 \end{bmatrix} $$ > 이 됩니다. 이를 column picture로 plot해 본다면 ```python A = sy.Matrix([[0, 1, -1], [2, 1, 2], [2, 2, 1]]) x1, x2, x3 = sy.symbols('x1 x2 x3') x = sy.Matrix([x1,x2,x3]) fig = plt.figure(figsize = (9,9)) ax = fig.add_subplot(111, projection = '3d') plot_vecs(A.col(0), A.col(1), A.col(2)) autoscale_arrows() ax.scatter(4,4,8, s = 200, color = 'red') ax.set(xlim = [0.,5.], ylim = [0.,5.,], zlim = [0., 10.]) plt.show() ``` > 이 됩니다. 세 column vector의 linear combination으로 b (빨간색 점) 를 만들 수 있으면 해가 1개 이상 존재하고, 만드는 것이 불가능 하다면 해가 존재하지 않습니다. A의 해를 확인해봅시다. ```python A = sy.Matrix(((0,1,-1),(2,1,2),(2,2,1))) b = sy.Matrix((4,4,8)) system = A,b linsolve(system, x1, x2, x3) ``` > 위 결과가 의미하는 것은, 어떤 $x_3$의 값에 대해서도 위와 같은 형태의 vector는 해가 된다는 것입니다. 즉 ..., (-3/2, 5, 1) ($x_3$이 1일 경우), (-3, 6, 2) ($x_3$이 2일 경우), ..., (-3n/2, n+4, n), ... 모두가 해가 될 수 있다는 것이므로, 해가 무수히 많이 존재하는 경우입니다. 이때도 마찬가지로 해 $x$가 unique하게 결정되지 않는 case이므로, A의 determinant가 0인지 확인해봅시다. ```python A.det() ``` ### Exercise 다음 linear system들의 (i) 해가 무수히 많이 존재하는 경우, (ii) 해가 한 개 존재하는 경우, (iii) 해가 존재하지 않는 경우로 나누고 존재할 경우 해를 구하시오. 또한 각각에 대해 A의 inverse가 존재하는지 설명하시오. (Hint: 위 예시의 linsolve 함수를 사용) $ Ax = b $ \\ (a) $ \begin{bmatrix} 0 & 1 & 4\\ 1 & 3 & 5\\ 3 & 7 & 7 \end{bmatrix} \begin{bmatrix} x_1\\ x_2\\ x_3 \end{bmatrix} = \begin{bmatrix} -5\\ -2\\ 6 \end{bmatrix} $ \\ (b) $ \begin{bmatrix} 1 & 0 & -3\\ 2 & 2 & 9\\ 0 & 1 & 5 \end{bmatrix} \begin{bmatrix} x_1\\ x_2\\ x_3 \end{bmatrix} = \begin{bmatrix} 8\\ 7\\ -2 \end{bmatrix} $ \\ (c) $ \begin{bmatrix} 1 & 2 & 3\\ 4 & 5 & 9\\ 9 & 12 & 21 \end{bmatrix} \begin{bmatrix} x_1\\ x_2\\ x_3 \end{bmatrix} = \begin{bmatrix} 5\\ 2\\ 9 \end{bmatrix} $ Answer: ```python # answer: 해가 존재하지 않음 A = sy.Matrix([[0,1,4],[1,3,5],[3,7,7]]) b = sy.Matrix([-5,-2,6]) system = A,b linsolve(system, x1, x2, x3) ``` ```python # answer: 해가 unique하게 한개만 존재 A = sy.Matrix([[1,0,-3],[2,2,9],[0,1,5]]) b = sy.Matrix([8,7,-2]) system = A,b linsolve(system, x1, x2, x3) ``` $\displaystyle \left\{\left( 5, \ 3, \ -1\right)\right\}$ ```python # answer: 해가 무수히 많이 존재 A = sy.Matrix([[1,2,3],[4,5,9],[9,12,21]]) b = sy.Matrix([5,2,9]) system = A,b linsolve(system, x1, x2, x3) ``` # 4. Inverse matrix > Square matrix $A$의 역행렬 $A^{-1}$은 다음과 같이 정의됩니다. $$ A^{-1}A = AA^{-1} = I_n$$ > 역행렬이 존재한다는 의미는, input vector $x$와 output vector $Ax$ 사이에 일대일대응 관계가 성립한다는 의미입니다. <center> <figcaption> 출처: Ref. [1]</figcaption> </center> > 즉, $Ax$로부터 $x$를 복원할 수 있다는 말이고, 이를 복원하는 행렬이 $A^{-1}$이 됩니다. > 만약 $n \times n$ 행렬 $A$의 역행렬이 존재한다면, $Ax = b$는 $x = A^{-1}b$로 해를 구할 수 있습니다. ```python A = np.array([[1, 2, 3], [2, 5, 2], [6, -3, 1]]) b = np.array([6,4,2]) A_inv = np.linalg.inv(A) # np.linalg.inv를 활용하면 A의 inverse를 구할 수 있습니다 A_inv_b = A_inv @ b # A^{-1}b x = A_inv_b # x = A^{-1}b print(x) # 검산해봅시다 (구한 x를 대입) print(A@x) print(b) ``` [0. 0. 2.] [6. 4. 2.] [6 4 2] > Ax와 b가 같음을 알 수 있습니다. # 5. Linear combination 두 vector $ \begin{bmatrix} 4 \\ 2 \end{bmatrix} $, $ \begin{bmatrix} -2 \\ 2 \end{bmatrix} $의 linear combination은 다음과 같이 나타낼 수 있습니다. $$ c_1\begin{bmatrix} 4 \\ 2 \end{bmatrix} + c_2\begin{bmatrix} -2 \\ 2 \end{bmatrix} $$ 이를 plot해 봅시다. ```python fig, ax = plt.subplots(figsize=(8, 8)) vec = np.array([[[0,0,4,2]], [[0,0,-2,2]], [[0,0,2,10]], [[0,0,8,4]], [[0,0,-6,6]]]) colors = ['b','b','r','b','b'] # tail이 origin, head가 (4,2), (-2,2), (2,10), (8,4), (-6,6)인 vector plot for i in range(vec.shape[0]): X,Y,U,V = zip(*vec[i,:,:]) ax.quiver(X, Y, U, V, angles='xy', scale_units='xy', color = colors[i], scale=1, alpha = .6) ax.text(x = vec[i,0,2], y = vec[i,0,3], s = '(%.0d, %.0d)' %(vec[i,0,2],vec[i,0,3]), fontsize = 16) # tail이 (8,4), head가 (2,10)인 vector plot points12 = np.array([[8,4],[2,10]]) ax.plot(points12[:,0], points12[:,1], c = 'b', lw = 3.5,alpha =0.5, ls = '--') # tail이 (-6,6), head가 (2,10)인 vector plot points34 = np.array([[-6, 6],[2,10]]) ax.plot(points34[:,0], points34[:,1], c = 'b', lw = 3.5,alpha =0.5, ls = '--') ax.set_xlim([-10, 10]) ax.set_ylim([0, 10.5]) ax.set_xlabel('x-axis', fontsize =16) ax.set_ylabel('y-axis', fontsize =16) ax.grid() ######################################Basis######################################## a = np.arange(-11, 20, 1) x = np.arange(-11, 20, 1) # 붉은색 격자 plot for i in a: y1 = i + 0.5*x # 0.5(기울기) = 2/4 ax.plot(x, y1, ls = '--', color = 'pink', lw = 2) y2 = i - x # -1(기울기) = 2/(-2) ax.plot(x, y2, ls = '--', color = 'pink', lw = 2) ax.set_title('Linear Combination of Two Vectors in $\mathbf{R}^2$', size = 22, x =0.5, y = 1.01) plt.show() ``` > 위의 그림은 두 벡터의 linear combination으로 $ \begin{bmatrix} 2 \\ 10 \end{bmatrix}$을 만드는 그립입니다. $$ \begin{bmatrix} 2 \\ 10 \end{bmatrix} = 2\begin{bmatrix} 4 \\ 2 \end{bmatrix} + 3\begin{bmatrix} -2 \\ 2 \end{bmatrix} $$ > $\begin{bmatrix} 4 \\ 2 \end{bmatrix}$에 3을 곱하고, $\begin{bmatrix} -2 \\ 2 \end{bmatrix}$에 2를 곱한 뒤 더하면 $\begin{bmatrix} 2 \\ 10 \end{bmatrix}$이 나옴을 알 수 있습니다. 붉은 격자는 두 벡터의 linear combination으로 도달할 수 있는 공간을 의미합니다. 그림에서 해당 붉은 격자가 $R^2$ 전체를 커버함을 확인할 수 있습니다. # Reference 1. https://en.wikipedia.org/wiki/Bijection,_injection_and_surjection 2. http://aikorea.org/cs231n/python-numpy-tutorial/#matplotlib 3. https://github.com/MacroAnalyst/Linear_Algebra_With_Python
086478b5cf57fba005e5f520d89e86fee970f863
681,987
ipynb
Jupyter Notebook
[HW3]_Practice_1_answer.ipynb
AI-odyssey/AI-odyssey.github.io
31aa670933b7768751250321a77c79726174f805
[ "MIT" ]
null
null
null
[HW3]_Practice_1_answer.ipynb
AI-odyssey/AI-odyssey.github.io
31aa670933b7768751250321a77c79726174f805
[ "MIT" ]
null
null
null
[HW3]_Practice_1_answer.ipynb
AI-odyssey/AI-odyssey.github.io
31aa670933b7768751250321a77c79726174f805
[ "MIT" ]
null
null
null
450.751487
174,350
0.931887
true
7,724
Qwen/Qwen-72B
1. YES 2. YES
0.808067
0.743168
0.60053
__label__kor_Hang
0.997295
0.233562
# Taylor integration example: $\dot x=x^2$ Here, we will integrate the initial-value problem (IVP) defined by $$ \begin{align} \dot x &= x^2 \\ x(0)&=x_0 \end{align} $$ Given a real number $A>0$, the restriction of the function $f(x)=x^2$ over the interval $I_A=(-A,A)$ satisfies the Lipschitz condition $|f(x_1)-f(x_2)| \leq 2A|x_1-x_2|$ for every $x_1,x_2 \in I_A$. Therefore, the Picard-Lindelöf theorem for ordinary differential equations guarantees there exists a $\delta>0$ such that a solution for this IVP exists and is unique for $t \in [0,\delta]$, for any $x_0 \in I_A$. Note that in this case, it is necessary to restrict the function to a bounded interval in order to obtain a Lipschitz condition, which in turn is necessary to fulfill the conditions of the Picard-Lindelöf theorem. Now, for any initial condition $x_0>0$, $t_0=0$ the analytical solution for this problem is: $$ x(t)=\frac{x_0}{1-x_0\cdot t} $$ In particular, the analytical solution exhibits a divergence at $t=1/x_0$; i.e., the solution is guaranteed to exist only for $t \in [0,1/x_0)$. How does a numerical integrator behave near this divergence? We will try two methods to integrate this problem: + Adaptive time-step, 4th-order, Runge-Kutta (`ODE.jl`) + Taylor method (`TaylorIntegration.jl`) + Adaptive time-step, Runge-Kutta-Fehlberg 7/8 method (`ODE.jl`) As an initial condition to integrate this IVP, we choose $x_0=3$, since this number is not exactly representable in a binary floating-point format. Thus, any constant time-step numerical integrator should break down when integrating up to $t_\mathrm{max}=1/3$. We start off by including the relevant packages: ```julia using TaylorIntegration, ODE, PyPlot ``` The ODE: ```julia diffeq(t, x) = x.^2 ``` diffeq (generic function with 1 method) ## 1. Adaptive time-step, 4th order, Runge Kutta method We select $x_0=3$, $t_0=0$. Then, the singularity is at $t=1/3$. ```julia @time tRK, xRK = ode45(diffeq, 3.0, [0.0, 0.34]); #warmup lap @time tRK, xRK = ode45(diffeq, 3.0, [0.0, 0.34]); ``` Warning: dt < minstep. Stopping. 1.235191 seconds (1.71 M allocations: 77.339 MB, 1.86% gc time) Warning: dt < minstep. Stopping. 0.000999 seconds (18.45 k allocations: 530.688 KB) Plot $x$ vs $t$ (log-log): ```julia title("x vs t (log-log)") xlabel(L"\log_{10}(t)") ylabel(L"\log_{10}(x(t))") grid(true) plot(log10(tRK[2:end]), log10(xRK[2:end]), ".-"); ``` What is the final state of the system? ```julia tRK[end], xRK[end] ``` (0.33333423758781194,7.125446356124256e17) Does the integrator get past the singularity? ```julia tRK[end]>1/3 ``` true The answer is yes! So the last value of the solution is meaningless: ```julia xRK[end] #this value is meaningless ``` 7.125446356124256e17 How many steps did the RK integrator perform? ```julia length(xRK)-1 ``` 166 How does the numerical solution compare to the analytical solution? The analytical solution is: ```julia exactsol(t, x0) = x0./(1.0-x0.*t) #analytical solution ``` exactsol (generic function with 1 method) The relative difference between the numerical and analytical solution, $\delta x$, is: ```julia δxRK = (xRK-exactsol(tRK, 3.0))./exactsol(tRK, 3.0) #error relative to analytical solution ; ``` The $\delta x$ vs $t$ plot (semilog): ```julia title("Relative error (semi-log)") xlabel(L"\log_{10}(t)") ylabel(L"\log_{10}(\delta x(t))") grid(true) plot(tRK, log10(abs(δxRK)), "o-"); ``` This plot means that the error of the numerical solution grows systematically; and at the end of the integration, the error in the numerical solution is ```julia (xRK[end-1]-exactsol(tRK[end-1], 3.0)) ``` 5.433622064235371e17 ## 2. Taylor method Again, we select $x_0=3$, $t_0=0$. The order of the Taylor integration is $28$, and we set the absolute tolerance equal to $10^{-20}$; this value is used during each time-step in order to compute an adaptive step size. We set the maximum number of integration steps equal to the number of steps that the previous integrator did. ```julia @time tT, xT = taylorinteg(diffeq, 3.0, 0.0, 0.34, 28, 1e-20, maxsteps=length(xRK)-1); #warmup lap @time tT, xT = taylorinteg(diffeq, 3.0, 0.0, 0.34, 28, 1e-20, maxsteps=length(xRK)-1); ``` 0.172252 seconds (186.71 k allocations: 9.235 MB) 0.002670 seconds (37.91 k allocations: 2.348 MB) WARNING: Maximum number of integration steps reached; exiting. WARNING: Maximum number of integration steps reached; exiting. ```julia tT[end], xT[end] ``` (0.3333333329479479,2.5948055925168757e9) How many steps did the Taylor integrator perform? ```julia length(xT)-1 ``` 166 Below, we show the $x$ vs $t$ plot (log-log): ```julia # axis([0, 0.35, -15, 10]) title("x vs t (log-log)") xlabel(L"\log_{10}(t)") ylabel(L"\log_{10}(x(t))") grid(true) plot(log10(tT[2:end]), log10(xT[2:end]), ".-"); ``` Does the integrator get past the singularity? ```julia tT[end] > 1/3 ``` false The answer is no! Even if increase the value of the `maxsteps` keyword in `taylorinteg`, it doesn't get past the singularity! Now, the relative difference between the numerical and analytical solution, $\delta x$, is: ```julia δxT = (xT.-exactsol(tT, 3.0))./exactsol(tT, 3.0); ``` The $\delta x$ vs $t$ plot (logscale): ```julia title("Relative error (semi-log)") xlabel(L"t") ylabel(L"\log_{10}(\delta x(t))") grid(true) plot(tT, log10(abs(δxT)), "o-"); ``` We observe that, while the execution time is ~10 times longer wrt 4th-order RK, the numerical solution obtained by the Taylor integrator stays within $10^{-12}$ of the analytical solution, for a same number of steps. Now, that happens if we use a higher order Runge Kutta method to integrate this problem? ## 3. Runge-Kutta-Fehlberg 7/8 method Here we use the Runge-Kutta-Fehlberg 7/8 method, included in `ODE.jl`, to integrate the same problem as before. ```julia @time t78, x78 = ode78(diffeq, 3.0, [0.0, 0.34]); #warmup lap @time t78, x78 = ode78(diffeq, 3.0, [0.0, 0.34]); ``` Warning: dt < minstep. Stopping. 0.164455 seconds (150.73 k allocations: 6.613 MB, 4.77% gc time) Warning: dt < minstep. Stopping. 0.000907 seconds (17.76 k allocations: 516.594 KB) Plot $x$ vs $t$ (log-log): ```julia title("x vs t (log-log)") xlabel(L"\log_{10}(t)") ylabel(L"\log_{10}(x(t))") grid(true) plot(log10(t78[2:end]), log10(x78[2:end]), ".-"); ``` What is the final state of the system? ```julia t78[end], x78[end] ``` (0.3333336190078445,1.6711546943686948e18) Does the integrator get past the singularity? ```julia t78[end]>1/3 ``` true The answer is yes! So the last value of the solution is meaningless: ```julia x78[end] #this value is meaningless ``` 1.6711546943686948e18 How many steps did the RK integrator perform? ```julia length(x78)-1 ``` 91 The relative difference between the numerical and analytical solution, $\delta x$, is: ```julia δx78 = (x78-exactsol(t78, 3.0))./exactsol(t78, 3.0) #error relative to analytical solution ; ``` The $\delta x$ vs $t$ plot (semilog): ```julia title("Relative error (semi-log)") xlabel(L"t") ylabel(L"\log_{10}(\delta x(t))") grid(true) plot(t78, log10(abs(δx78)), "o-"); ``` This time, the RKF 7/8 integrator is "only" twice as fast as the Taylor integrator, but the error continues to be greater than the error from the latter by several orders of magnitude. ## 4. Adaptive 4th-order RK, stringer tolerance As a last example, we will integrate once again our problem using a 4th-order adaptive RK integrator, but imposing a stringer tolerance: ```julia #@time sol = solve(ODEProblem(diffeq, 3.0), [0.0,0.34], alg=:Feagin14) #warmup lap #@time sol = solve(ODEProblem(diffeq, 3.0), [0.0,0.34], alg=:Feagin14) ``` ```julia #@time sol = solve(ODEProblem(diffeq, 3.0), [0.0,0.33333], alg=:DP5); #warmup lap #@time sol = solve(ODEProblem(diffeq, 3.0), [0.0,0.34], alg=:DP5); ``` ```julia @time tRK_, xRK_ = ode45(diffeq, 3.0, [0.0, 0.34], abstol=1e-8, reltol=1e-8 ); #warmup lap @time tRK_, xRK_ = ode45(diffeq, 3.0, [0.0, 0.34], abstol=1e-8, reltol=1e-8 ); #warmup lap ; ``` Warning: dt < minstep. Stopping. 0.027715 seconds (502.96 k allocations: 15.123 MB) Warning: dt < minstep. Stopping. 0.031589 seconds (502.82 k allocations: 15.115 MB, 14.73% gc time) Now, the integrator takes 10 times longer to complete the integration than the Taylor method. Does it get past the singularity? ```julia tRK_[end] > 1/3 ``` true Yes! So, once again, the last value reported by the integrator is completely meaningless. But, has it attained a higher precision than the Taylor method? Well, let's calculate once again the numerical error relative to the analytical solution: ```julia δxo = (xo-exactsol(to, 3.0))./exactsol(to, 3.0); ``` And now, let's plot this relative error vs time: ```julia title("Relative error (semi-log)") xlabel(L"t") ylabel(L"\log_{10}(\delta x(t))") ylim(-20,20) grid(true) plot(to, log10(abs(δxo)), "o-"); ``` The numerical error has actually gotten worse! `TaylorIntegration.jl` is indeed a really competitive package to integrate ODEs. ```julia ```
2c3c3fde6b2146687aab2b79f1468fdf34e05065
290,785
ipynb
Jupyter Notebook
examples/x-dot-equals-x-squared.ipynb
JuliaPackageMirrors/TaylorIntegration.jl
0b095efa8dbe54913d01628ca24b83eae579908c
[ "MIT" ]
1
2022-01-22T13:06:53.000Z
2022-01-22T13:06:53.000Z
examples/x-dot-equals-x-squared.ipynb
JuliaPackageMirrors/TaylorIntegration.jl
0b095efa8dbe54913d01628ca24b83eae579908c
[ "MIT" ]
null
null
null
examples/x-dot-equals-x-squared.ipynb
JuliaPackageMirrors/TaylorIntegration.jl
0b095efa8dbe54913d01628ca24b83eae579908c
[ "MIT" ]
null
null
null
284.247312
47,290
0.928432
true
3,026
Qwen/Qwen-72B
1. YES 2. YES
0.909907
0.882428
0.802927
__label__eng_Latn
0.933805
0.703802
[Sebastian Raschka](http://sebastianraschka.com) - [Open in IPython nbviewer](http://nbviewer.ipython.org/github/rasbt/pattern_classification/blob/master/python_howtos/scikit_linear_classificationreate=1) - [Link to this IPython notebook on Github](http://nbviewer.ipython.org/github/rasbt/pattern_classification/blob/master/machine_learning/scikit-learn/scikit_linear_classification.ipynb) - [Link to the GitHub Repository pattern_classification](https://github.com/rasbt/pattern_classification) ```python %load_ext watermark %watermark -a 'Gopala KR' -u -d -v -p numpy,scikit-learn,matplotlib ``` The watermark extension is already loaded. To reload it, use: %reload_ext watermark Gopala KR last updated: 2018-01-28 CPython 3.6.3 IPython 6.2.1 numpy 1.13.1 scikit-learn 0.19.0 matplotlib 2.0.2 /srv/venv/lib/python3.6/site-packages/watermark/watermark.py:155: DeprecationWarning: Importing scikit-learn as `scikit-learn` has been depracated and will not be supported anymore in v1.7.0. Please use the package name `sklearn` instead. DeprecationWarning) <hr> I would be happy to hear your comments and suggestions. Please feel free to drop me a note via [twitter](https://twitter.com/rasbt), [email](mailto:bluewoodtree@gmail.com), or [google+](https://plus.google.com/+SebastianRaschka). <hr> # An Introduction to simple linear supervised classification using `scikit-learn` In this introduction I want to give a brief overview of how Python's `scikit-learn` machine learning library can be used for simple linear classification. <br> <br> # Sections - [About the dataset](#About-the-dataset) - [Reading in a dataset from a CSV file](#Reading-in-a-dataset-from-a-CSV-file) - [Visualizing the Wine dataset](#Visualizing-the-Wine-dataset) - [Splitting into training and test dataset](#Splitting-into-training-and-test-dataset) - [Feature Scaling](#Feature-Scaling) - [Introduction to Multiple Discriminant Analysis (MDA)](#MDA) - [Classification via LDA](#LDA) - [Stochastic Gradient Descent (SGD) as linear classifier](#SGD) <br> <br> <br> <br> ## About the dataset [[back to top]](#Sections) For the following tutorial, we will be working with the free "Wine" Dataset that is deposited on the UCI machine learning repository (http://archive.ics.uci.edu/ml/datasets/Wine). <br> <font size="1"> **Reference:** Forina, M. et al, PARVUS - An Extendible Package for Data Exploration, Classification and Correlation. Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy.</font> <br> <br> The Wine dataset consists of 3 different classes where each row correspond to a particular wine sample. The class labels (1, 2, 3) are listed in the first column, and the columns 2-14 correspond to the following 13 attributes (features): 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline An excerpt from the wine_data.csv dataset: <pre>1,14.23,1.71,2.43,15.6,127,2.8,3.06,.28,2.29,5.64,1.04,3.92,1065 1,13.2,1.78,2.14,11.2,100,2.65,2.76,.26,1.28,4.38,1.05,3.4,1050 [...] 2,12.37,.94,1.36,10.6,88,1.98,.57,.28,.42,1.95,1.05,1.82,520 2,12.33,1.1,2.28,16,101,2.05,1.09,.63,.41,3.27,1.25,1.67,680 [...] 3,12.86,1.35,2.32,18,122,1.51,1.25,.21,.94,4.1,.76,1.29,630 3,12.88,2.99,2.4,20,104,1.3,1.22,.24,.83,5.4,.74,1.42,530</pre> <br> <br> ## Reading in a dataset from a CSV file [[back to top]](#Sections) Since it is quite typical to have the input data stored locally, as mentioned above, we will use the [`numpy.loadtxt`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html) function now to read in the data from the CSV file. (alternatively [`np.genfromtxt()`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html) could be used in similar way, it provides some additional options) ```python import numpy as np # reading in all data into a NumPy array all_data = np.loadtxt(open("../../data/wine_data.csv","r"), delimiter=",", skiprows=0, dtype=np.float64 ) # load class labels from column 1 y_wine = all_data[:,0] # conversion of the class labels to integer-type array y_wine = y_wine.astype(np.int64, copy=False) # load the 14 features X_wine = all_data[:,1:] # printing some general information about the data print('\ntotal number of samples (rows):', X_wine.shape[0]) print('total number of features (columns):', X_wine.shape[1]) # printing the 1st wine sample float_formatter = lambda x: '{:.2f}'.format(x) np.set_printoptions(formatter={'float_kind':float_formatter}) print('\n1st sample (i.e., 1st row):\nClass label: {:d}\n{:}\n' .format(int(y_wine[0]), X_wine[0])) # printing the rel.frequency of the class labels print('Class label frequencies') print('Class 1 samples: {:.2%}'.format(list(y_wine).count(1)/y_wine.shape[0])) print('Class 2 samples: {:.2%}'.format(list(y_wine).count(2)/y_wine.shape[0])) print('Class 3 samples: {:.2%}'.format(list(y_wine).count(3)/y_wine.shape[0])) ``` total number of samples (rows): 178 total number of features (columns): 13 1st sample (i.e., 1st row): Class label: 1 [14.23 1.71 2.43 15.60 127.00 2.80 3.06 0.28 2.29 5.64 1.04 3.92 1065.00] Class label frequencies Class 1 samples: 33.15% Class 2 samples: 39.89% Class 3 samples: 26.97% <br> <br> ## Visualizing the Wine dataset [[back to top]](#Sections) There are endless way to visualize datasets for get an initial idea of how the data looks like. The most common ones are probably histograms and scatter plots. Scatter plots are useful for visualizing features in more than just one dimension, for example to get a feeling for the correlation between particular features. Unfortunately, we can't plot all 13 features here at once, since the visual cortex of us humans is limited to a maximum of three dimensions. Below, we will create an example 2D-Scatter plot from the features "Alcohol content" and "Malic acid content". Additionally, we will use the [`scipy.stats.pearsonr`](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html) function to calculate a Pearson correlation coefficient between these two features. ```python %matplotlib inline ``` ```python from matplotlib import pyplot as plt from scipy.stats import pearsonr plt.figure(figsize=(10,8)) for label,marker,color in zip( range(1,4),('x', 'o', '^'),('blue', 'red', 'green')): # Calculate Pearson correlation coefficient R = pearsonr(X_wine[:,0][y_wine == label], X_wine[:,1][y_wine == label]) plt.scatter(x=X_wine[:,0][y_wine == label], # x-axis: feat. from col. 1 y=X_wine[:,1][y_wine == label], # y-axis: feat. from col. 2 marker=marker, # data point symbol for the scatter plot color=color, alpha=0.7, label='class {:}, R={:.2f}'.format(label, R[0]) # label for the legend ) plt.title('Wine Dataset') plt.xlabel('alcohol by volume in percent') plt.ylabel('malic acid in g/l') plt.legend(loc='upper right') plt.show() ``` <br> <br> If we want to pack 3 different features into one scatter plot at once, we can also do the same thing in 3D: ```python from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') for label,marker,color in zip( range(1,4),('x', 'o', '^'),('blue','red','green')): ax.scatter(X_wine[:,0][y_wine == label], X_wine[:,1][y_wine == label], X_wine[:,2][y_wine == label], marker=marker, color=color, s=40, alpha=0.7, label='class {}'.format(label)) ax.set_xlabel('alcohol by volume in percent') ax.set_ylabel('malic acid in g/l') ax.set_zlabel('ash content in g/l') plt.legend(loc='upper right') plt.title('Wine dataset') plt.show() ``` <br> <br> ## Splitting into training and test dataset [[back to top]](#Sections) It is a typical procedure for machine learning and pattern classification tasks to split one dataset into two: a training dataset and a test dataset. The training dataset is henceforth used to train our algorithms or classifier, and the test dataset is a way to validate the outcome quite objectively before we apply it to "new, real world data". Here, we will split the dataset randomly so that 70% of the total dataset will become our training dataset, and 30% will become our test dataset, respectively. ```python from sklearn.cross_validation import train_test_split from sklearn import preprocessing X_train, X_test, y_train, y_test = train_test_split(X_wine, y_wine, test_size=0.30, random_state=123) ``` /srv/venv/lib/python3.6/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20. "This module will be removed in 0.20.", DeprecationWarning) Note that since this a random assignment, the original relative frequencies for each class label are not maintained. ```python print('Class label frequencies') print('\nTraining Dataset:') for l in range(1,4): print('Class {:} samples: {:.2%}'.format(l, list(y_train).count(l)/y_train.shape[0])) print('\nTest Dataset:') for l in range(1,4): print('Class {:} samples: {:.2%}'.format(l, list(y_test).count(l)/y_test.shape[0])) ``` Class label frequencies Training Dataset: Class 1 samples: 36.29% Class 2 samples: 42.74% Class 3 samples: 20.97% Test Dataset: Class 1 samples: 25.93% Class 2 samples: 33.33% Class 3 samples: 40.74% <br> <br> ## Feature Scaling [[back to top]](#Sections) Another popular procedure is to standardize the data prior to fitting the model and other analyses so that the features will have the properties of a standard normal distribution with $\mu = 0$ and $\sigma = 1$ where $\mu$ is the mean (average) and $\sigma$ is the standard deviation from the mean, so that the standard scores of the samples are calculated as follows: \begin{equation} z = \frac{x - \mu}{\sigma}\end{equation} ```python std_scale = preprocessing.StandardScaler().fit(X_train) X_train = std_scale.transform(X_train) X_test = std_scale.transform(X_test) ``` ```python f, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(10,5)) for a,x_dat, y_lab in zip(ax, (X_train, X_test), (y_train, y_test)): for label,marker,color in zip( range(1,4),('x', 'o', '^'),('blue','red','green')): a.scatter(x=x_dat[:,0][y_lab == label], y=x_dat[:,1][y_lab == label], marker=marker, color=color, alpha=0.7, label='class {}'.format(label) ) a.legend(loc='upper right') ax[0].set_title('Training Dataset') ax[1].set_title('Test Dataset') f.text(0.5, 0.04, 'malic acid (standardized)', ha='center', va='center') f.text(0.08, 0.5, 'alcohol (standardized)', ha='center', va='center', rotation='vertical') plt.show() ``` <br> <br> <a id="PCA"></a> <br> <br> <a id='MDA'></a> ## Linear Transformation & Classification: Multiple Discriminant Analysis (MDA) [[back to top]](#Sections) The main purposes of a Multiple Discriminant Analysis is to analyze the data to identify patterns to project it onto a subspace that yields a better separation of the classes. Also, the dimensionality of the dataset shall be reduced with minimal loss of information. **The approach is very similar to a Principal Component Analysis (PCA), but in addition to finding the component axes that maximize the variance of our data, we are additionally interested in the axes that maximize the separation of our classes (e.g., in a supervised pattern classification problem)** Here, our desired outcome of the multiple discriminant analysis is to project a feature space (our dataset consisting of n d-dimensional samples) onto a smaller subspace that represents our data "well" and has a good class separation. A possible application would be a pattern classification task, where we want to reduce the computational costs and the error of parameter estimation by reducing the number of dimensions of our feature space by extracting a subspace that describes our data "best". #### Principal Component Analysis (PCA) Vs. Multiple Discriminant Analysis (MDA) Both Multiple Discriminant Analysis (MDA) and Principal Component Analysis (PCA) are linear transformation methods and closely related to each other. In PCA, we are interested to find the directions (components) that maximize the variance in our dataset, where in MDA, we are additionally interested to find the directions that maximize the separation (or discrimination) between different classes (for example, in pattern classification problems where our dataset consists of multiple classes. In contrast two PCA, which ignores the class labels). **In other words, via PCA, we are projecting the entire set of data (without class labels) onto a different subspace, and in MDA, we are trying to determine a suitable subspace to distinguish between patterns that belong to different classes. Or, roughly speaking in PCA we are trying to find the axes with maximum variances where the data is most spread (within a class, since PCA treats the whole data set as one class), and in MDA we are additionally maximizing the spread between classes.** In typical pattern recognition problems, a PCA is often followed by an MDA. If you are interested, you can find more information about the MDA in my IPython notebook [Stepping through a Multiple Discriminant Analysis - using Python's NumPy and matplotlib](http://nbviewer.ipython.org/github/rasbt/pattern_classification/blob/master/dimensionality_reduction/projection/linear_discriminant_analysis.ipynb?create=1). Like we did in the PCA section above, we will use a `scikit-learn` funcion, [`sklearn.lda.LDA`](http://scikit-learn.org/stable/modules/generated/sklearn.lda.LDA.html) in order to transform our training data onto 2 dimensional subspace, where MDA is basically the more generalized form of an LDA (Linear Discriminant Analysis): ```python from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA sklearn_lda = LDA(n_components=2) sklearn_transf = sklearn_lda.fit_transform(X_train, y_train) plt.figure(figsize=(10,8)) for label,marker,color in zip( range(1,4),('x', 'o', '^'),('blue', 'red', 'green')): plt.scatter(x=sklearn_transf[:,0][y_train == label], y=sklearn_transf[:,1][y_train == label], marker=marker, color=color, alpha=0.7, label='class {}'.format(label) ) plt.xlabel('vector 1') plt.ylabel('vector 2') plt.legend() plt.title('Most significant singular vectors after linear transformation via LDA') plt.show() ``` <br> <br> <br> <br> ## Classification via LDA [[back to top]](#Sections) The LDA that we've just used in the section above can also be used as a simple linear classifier. ```python # fit model lda_clf = LDA() lda_clf.fit(X_train, y_train) LDA(n_components=None, priors=None) # prediction print('1st sample from test dataset classified as:', lda_clf.predict(X_test[0,:].reshape(1, -1))) print('actual class label:', y_test[0]) ``` 1st sample from test dataset classified as: [3] actual class label: 3 Another handy subpackage of sklearn is `metrics`. The [`metrics.accuracy_score`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html), for example, is quite useful to evaluate how many samples can be classified correctly: ```python from sklearn import metrics pred_train = lda_clf.predict(X_train) print('Prediction accuracy for the training dataset') print('{:.2%}'.format(metrics.accuracy_score(y_train, pred_train))) ``` Prediction accuracy for the training dataset 100.00% To verify that over model was not overfitted to the training dataset, let us evaluate the classifier's accuracy on the test dataset: ```python pred_test = lda_clf.predict(X_test) print('Prediction accuracy for the test dataset') print('{:.2%}'.format(metrics.accuracy_score(y_test, pred_test))) ``` Prediction accuracy for the test dataset 98.15% <br> <br> **Confusion Matrix** As we can see above, there was a very low misclassification rate when we'd apply the classifier on the test data set. A confusion matrix can tell us in more detail which particular classes could not classified correctly. <table cellspacing="0" border="0"> <colgroup width="60"></colgroup> <colgroup span="4" width="82"></colgroup> <tr> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" colspan=2 rowspan=2 height="44" align="center" bgcolor="#FFFFFF"><b><font face="Helvetica" size=4><br></font></b></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" colspan=3 align="center" bgcolor="#FFFFFF"><b><font face="Helvetica" size=4>predicted class</font></b></td> </tr> <tr> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#EEEEEE"><font face="Helvetica" size=4>class 1</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#EEEEEE"><font face="Helvetica" size=4>class 2</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#EEEEEE"><font face="Helvetica" size=4>class 3</font></td> </tr> <tr> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" rowspan=3 height="116" align="center" bgcolor="#F6F6F6"><b><font face="Helvetica" size=4>actual class</font></b></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#EEEEEE"><font face="Helvetica" size=4>class 1</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#99FFCC"><font face="Helvetica" size=4>True positives</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#F6F6F6"><font face="Helvetica" size=4><br></font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#F6F6F6"><font face="Helvetica" size=4><br></font></td> </tr> <tr> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#EEEEEE"><font face="Helvetica" size=4>class 2</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#FFFFFF"><font face="Helvetica" size=4><br></font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#99FFCC"><font face="Helvetica" size=4>True positives</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#FFFFFF"><font face="Helvetica" size=4><br></font></td> </tr> <tr> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#EEEEEE"><font face="Helvetica" size=4>class 3</font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#F6F6F6"><font face="Helvetica" size=4><br></font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#F6F6F6"><font face="Helvetica" size=4><br></font></td> <td style="border-top: 1px solid #c1c1c1; border-bottom: 1px solid #c1c1c1; border-left: 1px solid #c1c1c1; border-right: 1px solid #c1c1c1" align="left" bgcolor="#99FFCC"><font face="Helvetica" size=4>True positives</font></td> </tr> </table> ```python print('Confusion Matrix of the LDA-classifier') print(metrics.confusion_matrix(y_test, lda_clf.predict(X_test))) ``` Confusion Matrix of the LDA-classifier [[14 0 0] [ 1 17 0] [ 0 0 22]] As we can see, one sample from class 2 was incorrectly labeled as class 1, from the perspective of class 1, this would be 1 "False Negative" or a "False Postive" from the perspective of class 2, respectively <br> <a id='SGD'></a> ## Stochastic Gradient Descent (SGD) as linear classifier [[back to top]](#Sections) Let us now compare the classification accuracy of the LDA classifier with a simple classification (we also use the probably not ideal default settings here) via stochastic gradient descent, an algorithm that minimizes a linear objective function. More information about the `sklearn.linear_model.SGDClassifier` can be found [here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html). ```python from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier() sgd_clf.fit(X_train, y_train) pred_train = sgd_clf.predict(X_train) pred_test = sgd_clf.predict(X_test) print('\nPrediction accuracy for the training dataset') print('{:.2%}\n'.format(metrics.accuracy_score(y_train, pred_train))) print('Prediction accuracy for the test dataset') print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test))) print('Confusion Matrix of the SGD-classifier') print(metrics.confusion_matrix(y_test, sgd_clf.predict(X_test))) ``` Prediction accuracy for the training dataset 99.19% Prediction accuracy for the test dataset 100.00% Confusion Matrix of the SGD-classifier [[14 0 0] [ 0 18 0] [ 0 0 22]] /srv/venv/lib/python3.6/site-packages/sklearn/linear_model/stochastic_gradient.py:84: FutureWarning: max_iter and tol parameters have been added in <class 'sklearn.linear_model.stochastic_gradient.SGDClassifier'> in 0.19. If both are left unset, they default to max_iter=5 and tol=None. If tol is not None, max_iter defaults to max_iter=1000. From 0.21, default max_iter will be 1000, and default tol will be 1e-3. "and default tol will be 1e-3." % type(self), FutureWarning) ```python ``` ```python ``` ```python ``` ```python test complete; Gopal ```
de428dd8e0faaaf997ea3d3dda85492a3f21e58d
238,530
ipynb
Jupyter Notebook
tests/others/scikit_linear_classification.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:16:23.000Z
2019-05-10T09:16:23.000Z
tests/others/scikit_linear_classification.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
null
null
null
tests/others/scikit_linear_classification.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:17:28.000Z
2019-05-10T09:17:28.000Z
217.240437
119,904
0.907337
true
6,834
Qwen/Qwen-72B
1. YES 2. YES
0.766294
0.70253
0.538344
__label__eng_Latn
0.87465
0.089084
# Problema: Solução da cadeia cinemática do robô antropomorphico mostrado abaixo. O mecanismo possui 2 elos e 3 juntas. ## Sistema de Coordenadas Primeiramente, temos que determinar a localização dos referenciais. O primeiro referencial, ```B0```, está fixo e será colocado na intercessão dos eixos das duas primeiras juntas. O segundo referencial, ```B1``` , está em rotação pura de $\theta_1$ em relação ao eixo $y$ do referencial ```B0```, e suas origens são coincidentes. A origem do referencial ```B2``` é a mesma de ```B1``` e ele segue o movimento da segunda junta, ou seja, ele está em rotação pura de $\theta_2$ em relação ao eixo $z$ de ```B1```. Por fim, a origem do referencial ```B3``` coincide com a intercessão do eixo $x$ de ```B2``` e o eixo de rotação da terceira junta, além de ser uma rotação pura de $\theta_3$ em relação ao eixo $z$ de ```B2```. Os elos são as partes rígidas que conectam as juntas; assim, nesse caso, temos três elos. Como o primeiro referencial é conincidente com os dois próximos, só precisaremos definir o comprimento de dois dos elos. Definindo o ponto $A$ na junta 2, o ponto $B$ na junta 3 e o ponto $C$ na ponta do último elo, podemos definir a distância entre os pontos A e B, e B e C como sendo $l_1$ e $l_2$, respectivamente. Assim, já podemos iniciar a implementação da solução do problema. ```python # Funções das Bibliotecas Utilizadas from sympy import symbols from sympy.physics.mechanics import dynamicsymbols from sympy.physics.vector import ReferenceFrame # Variáveis Simbólicas theta_1, theta_2, theta_3 = dynamicsymbols('theta_1 theta_2 theta_3') l_1, l_2 = symbols('l_1 l_2', positive = True) # Referenciais B0 = ReferenceFrame('B0') # Referencial Parado B1 = ReferenceFrame('B1') B1.orient(B0, 'Axis', [theta_1, B0.y]) # Referencial móvel: theta_1 em relação a B0.y B2 = ReferenceFrame('B2') B2.orient(B1, 'Axis', [theta_2, B1.z]) # Referencial móvel: theta_2 em relação a B1.z B3 = ReferenceFrame('B3') B3.orient(B2, 'Axis', [theta_3, B2.z]) # Referencial móvel: theta_3 em relação a B2.z ``` Esse código cria as variáveis simbolicas e os referenciais que serão usados pelo ```sympy```. $\theta_1, \theta_2$ e $\theta_3$ foram definidas usando ```dynammicsymbols``` para mostrar que as variáveis são funções do tempo, e o método ```symbols``` foi usado junto com o argumento ```positive=True``` para definir que os comprimentos dos elos assumem apenas valores positivos. Através na classe ```ReferenceFrame``` podemos facilmente criar referenciais inerciais. Todos os referenciais foram definidos a partir de uma rotação de outro referencial definido anteriormente, com exceção do referencial fixo ```B0```. ### Matrizes de Rotação Então com essas definições fica fácil representar as matrizes de rotação entre qualquer um dos referenciais definidos. Para isso, invocamos o método ```.dcm``` no referencial que queremos definir a partir do referencial do argumento. A seguir estão alguns exemplos, junto com o representação dos resultados em LaTeX. ```python # Matrizes de Rotação T_B0B1 = B0.dcm(B1) # Matriz de rotação de B1 para B0 T_B1B2 = B1.dcm(B2) # Matriz de rotação de B2 para B1 T_B2B3 = B2.dcm(B3) # Matriz de rotação de B3 para B2 T_B0B3 = (B0.dcm(B3)).simplify() # Matriz de rotação de B0 para B3 T_B0B2 = (B0.dcm(B2)).simplify() # Resultados em LaTeX from IPython.display import Latex from sympy import latex Latex("\\begin{eqnarray}" + "T_{\\theta_1}&=" + "&" +str(latex(T_B0B1)) + " T_{\\theta_2}=" + str(latex(T_B1B2)) + "T_{\\theta_3}=" + str(latex(T_B2B3)) + "\\\\T_{\\theta_1, \\theta_3}&=" + "&" + str(latex(T_B0B3)) + "\\end{eqnarray}") ``` \begin{eqnarray}T_{\theta_1}&=&\left[\begin{matrix}\cos{\left (\theta_{1}{\left (t \right )} \right )} & 0 & \sin{\left (\theta_{1}{\left (t \right )} \right )}\\0 & 1 & 0\\- \sin{\left (\theta_{1}{\left (t \right )} \right )} & 0 & \cos{\left (\theta_{1}{\left (t \right )} \right )}\end{matrix}\right] T_{\theta_2}=\left[\begin{matrix}\cos{\left (\theta_{2}{\left (t \right )} \right )} & - \sin{\left (\theta_{2}{\left (t \right )} \right )} & 0\\\sin{\left (\theta_{2}{\left (t \right )} \right )} & \cos{\left (\theta_{2}{\left (t \right )} \right )} & 0\\0 & 0 & 1\end{matrix}\right]T_{\theta_3}=\left[\begin{matrix}\cos{\left (\theta_{3}{\left (t \right )} \right )} & - \sin{\left (\theta_{3}{\left (t \right )} \right )} & 0\\\sin{\left (\theta_{3}{\left (t \right )} \right )} & \cos{\left (\theta_{3}{\left (t \right )} \right )} & 0\\0 & 0 & 1\end{matrix}\right]\\T_{\theta_1, \theta_3}&=&\left[\begin{matrix}\cos{\left (\theta_{2}{\left (t \right )} + \theta_{3}{\left (t \right )} \right )} \cos{\left (\theta_{1}{\left (t \right )} \right )} & - \sin{\left (\theta_{2}{\left (t \right )} + \theta_{3}{\left (t \right )} \right )} \cos{\left (\theta_{1}{\left (t \right )} \right )} & \sin{\left (\theta_{1}{\left (t \right )} \right )}\\\sin{\left (\theta_{2}{\left (t \right )} + \theta_{3}{\left (t \right )} \right )} & \cos{\left (\theta_{2}{\left (t \right )} + \theta_{3}{\left (t \right )} \right )} & 0\\- \sin{\left (\theta_{1}{\left (t \right )} \right )} \cos{\left (\theta_{2}{\left (t \right )} + \theta_{3}{\left (t \right )} \right )} & \sin{\left (\theta_{2}{\left (t \right )} + \theta_{3}{\left (t \right )} \right )} \sin{\left (\theta_{1}{\left (t \right )} \right )} & \cos{\left (\theta_{1}{\left (t \right )} \right )}\end{matrix}\right]\end{eqnarray} ### Posições Relativas de $A$, $B$ e $C$ Por termos definido um referencial para cada grau de liberdade do mecanismo, podemos facilmente expressar a posição de cada um dos pontos $A$, $B$ e $C$ em relação a esses referênciais. O ponto $A$ coincide com a origem do referencial fixo, ou seja, $\vec{r_{OA}}=\vec{0}$. O ponto $B$ é facilmente expresso em relação ao referencial ```B2```, nesse caso temos que $\vec{r_{AB}}={l_1 \vec{{i}_{B2}}}$. Finalmente, o ponto $C$ é representado por $\vec{r_{BC}}={l_2 \vec{{i}_{B3}}}$. Sendo $\vec{{i}_{B2}}$ e $\vec{{i}_{B3}}$ os versores na direção $x$ dos referenciais ```B2``` e ```B3``` respectivamente. A seguir, nota-se que a definição desses vetores posição é simples de ser implementado em ```sympy```. A principal vantagem é que não precisamos nos preocupar em relação a qual referencial estamos definindo os vetores, pois as relações entre os referenciais já foram criadas nas suas definições. Ou seja, podemos reescrever um vetor em relação a diferentes referenciais de maneira imediata, como mostrado abaixo. ```python from sympy.physics.vector import Vector # Vetores Posição entre os Pontos r_OA = Vector(0) # Vetor Nulo r_AB = l_1 * B2.x # Vetor que liga os pontos A e B expresso no referencial móvel B2 r_BC = l_2 * B3.x # Vetor que liga os pontos B e C expresso no referencial móvel B3 R_AB = r_AB.express(B0) # Vetor que liga os pontos A e B expresso no referencial fixo B0 R_BC = r_BC.express(B0) # Vetor que liga os pontos B e C expresso no referencial fixo B0 # Resultado em LaTeX na Forma Vetorial ou Vetor Coluna Latex("\\begin{eqnarray}" + "R_{AB}&=" + "&" +str(latex(R_AB)) + "\\\\" + "R_{AB}&=" + "&" +str(latex(R_AB.to_matrix(B0))) + "\\end{eqnarray}") ``` \begin{eqnarray}R_{AB}&=&l_{1} \operatorname{cos}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right)\mathbf{\hat{b0}_x} + l_{1} \operatorname{sin}\left(\theta_{2}\right)\mathbf{\hat{b0}_y} - l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right)\mathbf{\hat{b0}_z}\\R_{AB}&=&\left[\begin{matrix}l_{1} \cos{\left (\theta_{1}{\left (t \right )} \right )} \cos{\left (\theta_{2}{\left (t \right )} \right )}\\l_{1} \sin{\left (\theta_{2}{\left (t \right )} \right )}\\- l_{1} \sin{\left (\theta_{1}{\left (t \right )} \right )} \cos{\left (\theta_{2}{\left (t \right )} \right )}\end{matrix}\right]\end{eqnarray} ## Resultados Assim, já podemos encontrar os resultados da cinemática do ponto $C$ em relação a ```B0```. Para tal, só precisamos calcular a os vetores posição dos pontos $A$, $B$ e $C$, e suas primeiras e segunda derivadas no tempo, já que as variáveis $\theta_1$, $\theta_2$ e $\theta_3$ foram definidas como funções do tempo. Para melhor eficiência, poderíamos calcular os vetores aproveitando as fórmulas de Cinemática. Adicionalmente, a função ```trigsimp``` foi utilizada em cada elementos dos vetores colunas resultantes para simplificá-los. Então, segue o resultado. ```python from sympy.physics.vector import time_derivative, vlatex from sympy import trigsimp # Cinemática do ponto A em relação ao referencial B0 r_A = r_OA.express(B0) v_A = time_derivative(r_A, B0) a_A = time_derivative(v_A, B0) # Cinemática do ponto B em relação ao referencial B0 r_B = r_A.express(B0) + r_AB.express(B0) v_B = time_derivative(r_B, B0) a_B = time_derivative(v_B, B0) # Cinemática do ponto C em relação ao referencial B0 r_C = r_B.express(B0) + r_BC.express(B0) v_C = (time_derivative(r_C, B0)) a_C = (time_derivative(v_C, B0)) # Simplifcação dos Resultados r_A = (r_A.to_matrix(B0)).applyfunc(trigsimp) v_A = (v_A.to_matrix(B0)).applyfunc(trigsimp) a_A = (a_A.to_matrix(B0)).applyfunc(trigsimp) r_B = (r_B.to_matrix(B0)).applyfunc(trigsimp) v_B = (v_B.to_matrix(B0)).applyfunc(trigsimp) a_B = (a_B.to_matrix(B0)).applyfunc(trigsimp) r_C = (r_C.to_matrix(B0)).applyfunc(trigsimp) v_C = (v_C.to_matrix(B0)).applyfunc(trigsimp) a_C = (a_C.to_matrix(B0)).applyfunc(trigsimp) ``` ```python # Resultados em LaTeX na forma de Vetores Coluna Latex("\\begin{align}" "r_{C}=&" + str(vlatex(r_C)) + "\\\\" + "\\\\" "v_{C}=&" + str(vlatex(v_C)) + "\\\\" + "\\\\" "a_{C}=& " + str(vlatex(a_C)) + "\\end{align}") ``` \begin{align} _{C}=&\left[\begin{matrix}\left(l_{1} \operatorname{cos}\left(\theta_{2}\right) + l_{2} \operatorname{cos}\left(\theta_{2} + \theta_{3}\right)\right) \operatorname{cos}\left(\theta_{1}\right)\\l_{1} \operatorname{sin}\left(\theta_{2}\right) + l_{2} \operatorname{sin}\left(\theta_{2} + \theta_{3}\right)\\- \left(l_{1} \operatorname{cos}\left(\theta_{2}\right) + l_{2} \operatorname{cos}\left(\theta_{2} + \theta_{3}\right)\right) \operatorname{sin}\left(\theta_{1}\right)\end{matrix}\right]\\\\v_{C}=&\left[\begin{matrix}- l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{1} - l_{1} \operatorname{sin}\left(\theta_{2}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{2} - l_{2} \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{2} - l_{2} \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{3} - l_{2} \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{1}\\l_{1} \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{2} + l_{2} \left(\dot{\theta}_{2} + \dot{\theta}_{3}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right)\\l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{sin}\left(\theta_{2}\right) \dot{\theta}_{2} - l_{1} \operatorname{cos}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{1} + l_{2} \left(\operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{sin}\left(\theta_{1}\right) \dot{\theta}_{2} + \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{sin}\left(\theta_{1}\right) \dot{\theta}_{3} - \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{1}\right)\end{matrix}\right]\\\\a_{C}=& \left[\begin{matrix}2 l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{sin}\left(\theta_{2}\right) \dot{\theta}_{1} \dot{\theta}_{2} - l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \ddot{\theta}_{1} - l_{1} \operatorname{sin}\left(\theta_{2}\right) \operatorname{cos}\left(\theta_{1}\right) \ddot{\theta}_{2} - l_{1} \operatorname{cos}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{1}^{2} - l_{1} \operatorname{cos}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{2}^{2} + l_{2} \left(2 \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{sin}\left(\theta_{1}\right) \dot{\theta}_{1} \dot{\theta}_{2} + 2 \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{sin}\left(\theta_{1}\right) \dot{\theta}_{1} \dot{\theta}_{3} - \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \ddot{\theta}_{2} - \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \ddot{\theta}_{3} - \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \ddot{\theta}_{1} - \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{1}^{2} - \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{2}^{2} - 2 \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{2} \dot{\theta}_{3} - \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{3}^{2}\right)\\- l_{1} \operatorname{sin}\left(\theta_{2}\right) \dot{\theta}_{2}^{2} + l_{1} \operatorname{cos}\left(\theta_{2}\right) \ddot{\theta}_{2} + l_{2} \left(- \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{2}^{2} - 2 \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{2} \dot{\theta}_{3} - \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{3}^{2} + \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \ddot{\theta}_{2} + \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \ddot{\theta}_{3}\right)\\l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{sin}\left(\theta_{2}\right) \ddot{\theta}_{2} + l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{1}^{2} + l_{1} \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \dot{\theta}_{2}^{2} + 2 l_{1} \operatorname{sin}\left(\theta_{2}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{1} \dot{\theta}_{2} - l_{1} \operatorname{cos}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2}\right) \ddot{\theta}_{1} + l_{2} \left(\operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{sin}\left(\theta_{1}\right) \ddot{\theta}_{2} + \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{sin}\left(\theta_{1}\right) \ddot{\theta}_{3} + 2 \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{1} \dot{\theta}_{2} + 2 \operatorname{sin}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \dot{\theta}_{1} \dot{\theta}_{3} + \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{1}^{2} + \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{2}^{2} + 2 \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{2} \dot{\theta}_{3} + \operatorname{sin}\left(\theta_{1}\right) \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \dot{\theta}_{3}^{2} - \operatorname{cos}\left(\theta_{2} + \theta_{3}\right) \operatorname{cos}\left(\theta_{1}\right) \ddot{\theta}_{1}\right)\end{matrix}\right]\end{align}
93f55717fb947fafee2599caf6f9891deaf222ad
20,634
ipynb
Jupyter Notebook
examples/antropomorphic_robot/Notebook_Kinematics_3DoF-Antropomorphic_pt.ipynb
abhikamath/pydy
0d11df897c40178bb0ffd9caa9e25bccd1d8392a
[ "BSD-3-Clause" ]
298
2015-01-31T11:43:22.000Z
2022-03-15T02:18:21.000Z
examples/antropomorphic_robot/Notebook_Kinematics_3DoF-Antropomorphic_pt.ipynb
abhikamath/pydy
0d11df897c40178bb0ffd9caa9e25bccd1d8392a
[ "BSD-3-Clause" ]
359
2015-01-17T16:56:42.000Z
2022-02-08T05:27:08.000Z
examples/antropomorphic_robot/Notebook_Kinematics_3DoF-Antropomorphic_pt.ipynb
pydy/pydy
4a2c46faae44d06017b64335e48992ee8c53e1b6
[ "BSD-3-Clause" ]
109
2015-02-03T13:02:45.000Z
2021-12-21T12:57:21.000Z
69.241611
6,497
0.591887
true
5,616
Qwen/Qwen-72B
1. YES 2. YES
0.894789
0.817574
0.731557
__label__por_Latn
0.668361
0.537984
# Solving ODEs with the Euler integrator **Ordinary differential equations** ([ODE](http://mathworld.wolfram.com/OrdinaryDifferentialEquation.html)s) describe many phenomena in physics. They describe the changes of a **dependent variable** $y(t)$ as a function of a **single independent variable** (e.g. $t$ or $x$). - An ODE of order $n$ contains $\frac{d^n y}{dt^n}$ as the highest derivative. For example, **Newton's equations of motion** $$ F = m \frac{d^2 x(t)}{dt^2} $$ are second order ODEs. - An ODE of order $n$ requires $n$ initial conditions to uniquely determine a solution $y(t)$. For Newton: we need initial position $x(t=0)$ and velocity $v(t=0)$. - Linear ODEs contain no higher powers than 1 of any of the derivatives (including the 0-th derivative $y$ term). - Non-linear ODEs can contain any powers in the dependent variable and its derivatives. ## Integrating ODEs with Euler's algorithm First order ODE: $$ \frac{dy}{dt} = f(t, y) $$ Basic idea: 1. Start with initial conditions, $y_0 \equiv y(t=0)$ 2. Use $\frac{dy}{dt} = f(t, y)$ (the RHS!) to advance solution a small step $h$ forward in time: $y(t=h) \equiv y_1$ 3. Repeat with $y_1$ to obtain $y_2 \equiv y(t=2h)$... and for all future values of $t$. ### Euler's algorithm Use the forward difference approximation for the derivative: $$ f(t, y) = \frac{dy(t)}{dt} \approx \frac{y(t_{n+1}) - y(t_n)}{h} $$ Solve for the position in the future $y(t_{n+1})$, based on present *and known* values $y(t_n)$ and $f\big(t_n, y(t_n)\big)$: $$ y_{n+1} \approx y_n + h f(t_n, y_n) \quad \text{with} \quad y_n := y(t_n) $$ ### Convert 2nd order ODE to 2 coupled 1st order ODEs The 2nd order ODE is $$ \frac{d^2 y}{dt^2} = f(t, y) $$ Introduce "dummy" dependent variables $y_i$ with $y_0 \equiv y$ and \begin{alignat}{1} \frac{dy}{dt} &= \frac{dy_0}{dt} &= y_1\\ \frac{d^2y}{dt^2} &= \frac{dy_1}{dt} &= {} f(t, y_0). \end{alignat} The first equation defines the velocity $y_1 = v$ and the second one is the original ODE. ## Bouncing ball Problem: Integrate the equations of a bouncing ball under gravity * Drop from height $y_0 = 2$ within initial velocity $v_0 = 0$. * The ball bounces elastically off the ground at $y=0$. We have to solve the *second order ODE* (Newton's equations of motion with constant acceleration) $$ \frac{d^2 y}{dt^2} = -g. $$ The Euler scheme for any *first order ODE* $$ \frac{dy}{dt} = f(y, t) $$ is $$ y(t + h) = y(t) + h f(y(t), t). $$ In order to solve the original 2nd order equation of motion we make use of the fact that one $n$-th order ODE can be written as $n$ coupled first order ODEs, namely \begin{align} \frac{dy}{dt} &= v\\ \frac{dv}{dt} &= -g. \end{align} Solve each of the first order ODEs with Euler: \begin{align} y(t + h) &= y(t) + h v(t)\\ v(t + h) &= v(t) - h g. \end{align} ### Free fall Start with free fall as an even simpler problem. ```python import numpy as np ``` ```python # parameters g = -9.81 # initial conditions y = 2.0 v = 0.0 t = 0 dt = 0.01 # record initial conditions data = [[t, y, v]] # start at first step t = dt while t < 10: y = y + v*dt v = v + g*dt data.append([t, y, v]) t += dt data = np.array(data) ``` ```python data.shape ``` (1001, 3) Look at the first few values: ```python data[:4] ``` array([[ 0. , 2. , 0. ], [ 0.01 , 2. , -0.0981 ], [ 0.02 , 1.999019, -0.1962 ], [ 0.03 , 1.997057, -0.2943 ]]) To make it more convenient to get `t = data[:, 0]`, `y = data[:, 1]`, and `v = data[:, 2]` we use transposition to change the array from time x coordinates to coordinates x time and then use tuple assignment: ```python data = data.transpose() data.shape # t, y, v ``` (3, 1001) ```python t, y, v = data ``` Plot the trajectory $y(t)$ with matplotlib. (Using `t = data[0]` for time and `y = data[1]` for position, after the transposition!) ```python import matplotlib.pyplot as plt %matplotlib inline ``` ```python plt.plot(data[0], data[1]) plt.xlabel("time (s)") plt.ylabel("position (m)"); ``` ### Bouncing Add a floor at $y = 0$. What happens at the floor? – The velocity changes (elastic collision). ```python # parameters g = -9.81 y_floor = 0 # initial conditions y = 2.0 v = 0.0 t = 0 dt = 0.01 # record initial conditions data = [[t, y, v]] # start at first step t = dt while t < 10: y += v*dt if y > y_floor: v += g*dt else: v = -v # bounce off floor data.append([t, y, v]) t += dt data = np.array(data).transpose() ``` ```python plt.plot(data[0], data[1]) plt.xlabel("time (s)") plt.ylabel("position (m)"); ``` ## Summary: Euler integrator 1. If the order of the ODE > 1 then write the ODE as a coupled system of n first order ODEs. For Newton's EOM ($F = m\frac{d^2}{dt^2}$): \begin{align} \frac{dx}{dt} &= v\\ \frac{dv}{dt} &= m^{-1}F \end{align} Note that $F$ typically depends on $x$, e.g., $F(x) = -\frac{\partial U}{\partial x}$ when the force can be derived from a potential energy function $U(x)$. 2. Solve all first order ODEs with the forward Euler algorithm for time step $\Delta t$: \begin{align} x_{t+1} &= x_t + v \Delta t\\ v_{t+1} &= v_t + m^{-1} F(x_t) \Delta t \end{align} The time step of the Euler algorithm has to be chosen small because the error in $x(t)$ will go like $\Delta t^2$ – Euler is really a terrible algorithm but for this introductory class it is good enough. Better algorithms exist and are not much more difficult (see, e.g., PHY432 Computational Methods).
ae052ff25ed7549a469f8eb6c88a29b1363b84f4
57,018
ipynb
Jupyter Notebook
Module_5/euler_integrator.ipynb
Py4Phy/PHY202
ec3a0b0285f2601accfdbf0c30416e1351430342
[ "MIT" ]
2
2019-10-26T00:39:14.000Z
2019-10-29T19:35:20.000Z
Module_5/euler_integrator.ipynb
Py4Phy/PHY202
ec3a0b0285f2601accfdbf0c30416e1351430342
[ "MIT" ]
null
null
null
Module_5/euler_integrator.ipynb
Py4Phy/PHY202
ec3a0b0285f2601accfdbf0c30416e1351430342
[ "MIT" ]
null
null
null
98.476684
30,352
0.857764
true
1,887
Qwen/Qwen-72B
1. YES 2. YES
0.936285
0.888759
0.832132
__label__eng_Latn
0.958242
0.771653
disclaimer: To ensure that the notebook can be run from (more or less) any point, I try to load the relevant functions or modules whenever I use them in a cell. This is generally not good practice as it adds unneccesary overhead # 0. Image representation as numerical arrays ### We start by importing numpy and creating and printing a simple 9x9 checkerboard array ```python %matplotlib inline %load_ext autoreload %autoreload import numpy as np #make a 9x9 checkerboard checkBoard = # print(checkBoard) ``` ### Then, we import [pyplot](https://matplotlib.org/api/pyplot_api.html) and [image](https://matplotlib.org/api/image_api.html) modules from the ploting library [matplotlib](https://matplotlib.org/3.1.1/api/index.html). Using it, we can display our checkerboard array in image form: ```python import matplotlib.pyplot as plt import matplotlib.image as mpimg plt.imshow(checkBoard, cmap='gray', interpolation='nearest') plt.show() ``` ### As another simple example, we will import the [data](https://scikit-image.org/docs/dev/api/skimage.data.html) module image processing library [scikit-image](https://scikit-image.org/) and load a small image of a bush. #### First, we want to print the pixel values: ```python from skimage import data image_of_a_bush = data.lfw_subset() image_of_a_bush = image_of_a_bush[0,:,:] #print the #of dimentions, the shape, and the pixel values of the image print("The number of dimensions of the image is: ", image_of_a_bush.ndim) print("The size of the image is: ", image_of_a_bush.shape) print(image_of_a_bush) ``` ### Can you see the bush? #### Next, show the image: ```python plt.figure(figsize=(1,1)) # display the image plt.# ``` # 1. Pixel-level operations ### Now that we have a sense of what a digital image is, let's start manipulating it. We'll begin with simple pixel-level operations ## 1.1 Basic pixel-level operations ### Let's look at a more interesting image. From scikit-image data we'll open a example IHC image, and plot it using pyplot. ```python from skimage import data import matplotlib.pyplot as plt import numpy as np image_hist = data.immunohistochemistry() #check the size of the image print("The number of dimensions of the image is: ", image_hist.ndim) print("The size of the image is: ", image_hist.shape) plt.imshow(image_hist, cmap=plt.cm.gray) ``` ### Seems like we have an RGB image. Let's look at every channel independently. ```python plt.figure(figsize=(15,5)) plt.subplot(131) plt.gca().set_title('Red channel') plt.imshow(, cmap='Reds', interpolation='nearest') plt.subplot(132) plt.gca().set_title('Green channel') plt.imshow(, cmap='Greens', interpolation='nearest') plt.subplot(133) plt.gca().set_title('Blue channel') plt.imshow(, cmap='Blues', interpolation='nearest') plt.show() ``` ```python #for the moment let's look at only the first color channel image_hist = image_hist[:,:,0] plt.gca().set_title('First channel') plt.imshow(image_hist, cmap=plt.cm.gray) ``` ### We can invert the image using the *invert* function from [scikit-images utilities module](https://scikit-image.org/docs/dev/api/skimage.util.html): ```python from skimage.util import invert inverted_image = # plt.figure(figsize=(15,5)) plt.subplot(121) plt.gca().set_title('original image') plt.imshow(image_hist, cmap=plt.cm.gray) plt.subplot(122) plt.gca().set_title('inverted image') plt.imshow(inverted_image, cmap=plt.cm.gray) ``` ### Let's try some other pixel-level operations. We'll use the [Exposure module](https://scikit-image.org/docs/dev/api/skimage.exposure.html#skimage.exposure.adjust_sigmoid) from scikit image. 1. A gamma correction applies the nonlinear transform $V_{out} = V_{in}^\gamma$. 2. A log transform applies $V_{out} = log(V_{in}+1)$. 3. A sigmoid transform applies $V_{out} = \frac{1}{1+e^{gain\cdot(\text{cutoff}-V_{in})}}$. 4. Equalization transforms the intensity histogram of an image to a uniform distribution. It often enhances the contrast of the image 5. Contrast Limited Adaptive Histogram Equalization (CLAHE) works similarly to equalization thats applied separately to different regions of the image. Try to apply these by calling the relevant function from skimage.exposure, or by direct calculation. Play with the different parameters and see how they change the output. ```python from skimage import exposure # apply gamma scaling with gamma=2 gamma= gamma_corrected = # apply logarithmic scaling logarithmic_corrected = # apply sigmoidal scaling with cutoff=0.4 cutoff = sigmoid_corrected = # equalize equalize_corrected = # apply Contrast Limited Adaptive Histogram Equalization (CLAHE) CLHA_corrected = plt.figure(figsize=(15,10)) plt.subplot(231) plt.gca().set_title('original') plt.imshow(image_hist, cmap=plt.cm.gray) plt.subplot(232) plt.gca().set_title('gamma corrected') plt.imshow(gamma_corrected, cmap=plt.cm.gray) plt.subplot(233) plt.gca().set_title('log corrected') plt.imshow(logarithmic_corrected, cmap=plt.cm.gray) plt.subplot(234) plt.gca().set_title('sigmoid') plt.imshow(sigmoid_corrected, cmap=plt.cm.gray) plt.subplot(235) plt.gca().set_title('equalized') plt.imshow(equalize_corrected, cmap=plt.cm.gray) plt.subplot(236) plt.gca().set_title('CLHA corrected') plt.imshow(CLHA_corrected, cmap=plt.cm.gray) ``` ## 1.2 Image filtering ### Spatial filtering is an image processing technique for changing the intensities of a pixel according to the intensities of some neighborhood of pixels. ### The *Kernel* of the filter defines the neighborhood and the weights asigned to each pixel in the neighborhood: This procedure is formally a convolution and is marked by an asterisk: $I_o = I_i\ast f$. *side note: since a convolution in the spatial domain is equivalent to multiplication in the frequency domain. Sometimes it is more computationally reasonable to calculate these in fourier space.* *side side note: filtering can also be performed in the frequency domain by directly removing a set of frequencies from an image.* ### The kernel can be of any shape/size, it is applied to each pixel in the image, and the output is a new, filtered, image. The output image is often called the *response* to the given filter. Example, local average: #### Filtering is an incredibly versatile tool with which you can emphasize certain features or remove other features. #### Image processing operations implemented with filtering include smoothing, sharpening, and edge enhancement. ### To implement different image filters, we will use the [filters module from scikit-image](https://scikit-image.org/docs/dev/api/skimage.filters.html) ### 1.2.1 Smoothing #### Smoothing, aka low-pass filtering, is used for removing high-frequency noise from images. Most commonly, a gaussian kernel is used, but others (e.g. local mean/median) work too. We'll see the effect of gaussian filtering. Try to change the value of sigma (width of the gaussian) to see how the output changes. ```python import matplotlib.pyplot as plt import numpy as np from skimage import filters image_hist = data.immunohistochemistry() sigma = 2 gauss_filtered_img = plt.figure(figsize=(15,8)) plt.subplot(121) plt.gca().set_title('original image') plt.imshow(image_hist, cmap=plt.cm.gray) plt.subplot(122) plt.gca().set_title('response, gaussian smoothing') plt.imshow(gauss_filtered_img, cmap=plt.cm.gray) ``` ### 1.2.2 Sharpening #### sharpening is sometimes used to enhance a blurry (i.e. crappy) image. 1. Start with input image 2. Apply gaussian filter with very narrow kernel 3. Subtract filtered image from input image to get only high frequency components 3. Amplify (alpha) and add high frequency components to original input image ```python filter_blurred_f = filters.gaussian(gauss_filtered_img, sigma=0.5, multichannel=False) alpha = 3 sharpened = gauss_filtered_img + alpha * (gauss_filtered_img - filter_blurred_f) plt.figure(figsize=(15,8)) plt.subplot(121) plt.gca().set_title('input - blury image') plt.imshow(gauss_filtered_img, cmap=plt.cm.gray) plt.subplot(122) plt.gca().set_title('sharpened') plt.imshow(sharpened, cmap=plt.cm.gray) ``` #### Direct application of edge detectors often results in somewhat noisy responses. A way to overcome this is by first smoothing the image with a gaussian filter and then applying the edge filter. The width of the gaussian kernel will determine the size of edges the filter detects. ### 1.2.3 Edge enhancement #### Edge detecting filters work by measuring the local spatial gradient of an image. Common types are the Sobel, Prewitt and Roberts. #### The filters are usually applied to each direction individually and then the total magnitude of the gradient is calculated. $|\nabla| = \sqrt{\nabla_x^2+\nabla_y^2}$ #### Sobel: <br><br><br><br> #### Prewitt: <br><br><br><br> #### Roberts: ```python from skimage import data image_hist = data.immunohistochemistry() image_hist = image_hist[:,:,2] # sobel magnitude filtered_img = # sobel horizontal filtered_img_h = # sobel vertical filtered_img_v = plt.figure(figsize=(15,16)) plt.subplot(221) plt.gca().set_title('input image') plt.imshow(image_hist[:,:,2], cmap=plt.cm.gray) plt.subplot(222) plt.gca().set_title('sobel filter response - magnitude') plt.imshow(filtered_img, cmap=plt.cm.gray) plt.subplot(223) plt.gca().set_title('sobel filter response - horizontal edges') plt.imshow(np.abs(filtered_img_h), cmap=plt.cm.gray) plt.subplot(224) plt.gca().set_title('sobel filter response - vertical edges') plt.imshow(np.abs(filtered_img_v), cmap=plt.cm.gray) ``` #### Direct application of edge detectors often results in somewhat noisy responses. A way to overcome this is by first smoothing the image with a gaussian filter and then applying the edge filter. The width of the gaussian kernel will determine the size of edges the filter detects. ```python from skimage import data from skimage import feature image_hist = data.immunohistochemistry() image_hist = image_hist[:,:,2] # sobel magnitude filtered_img = filters.sobel(image_hist) #apply a gaussian filter, followed by a sobel filter with sigma=3 sigma= DoG_img = plt.figure(figsize=(15,16)) plt.subplot(221) plt.gca().set_title('input image') plt.imshow(image_hist, cmap=plt.cm.gray) plt.subplot(223) plt.gca().set_title('sobel filter response - magnitude') plt.imshow(filtered_img, cmap=plt.cm.gray) plt.subplot(224) plt.gca().set_title('DoG filter response - magnitude') plt.imshow(DoG_img, cmap=plt.cm.gray) ``` ### 1.2.4 Gaussian derivatives Now let's remember that the application of filters is an associative operation (because convolution is linear!). It's equivalent to apply the gaussian and then the gradient filter to the image and to apply the gradient filter to the gaussian, then apply the result to the image, i.e. $\nabla*(G*I) = (\nabla*G)*I$ where $\nabla$ is the gradient (derivative) filter, $G$ is the gaussian filter, and $I$ is the image. We can generalize this idea and apply the derivative multiple times, to get a family of interesting filters: $\nabla^n*G$: <br><br> Notice that all of the even orders are edge detectors, while all the odd orders are ridge detectors! ### Let's use a second order derivative (aka a Laplacian) and make a ridge detector: #### We'll look at a retinal photo where vasculature is an interesting feature. We invert the image so that the vasculature appears as bright lines on a dark background ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage.color import rgb2gray from skimage.util import invert #this is how we load an image from the hard drive image_retina = ((img_as_float(mpimg.imread("../Data/RetinalPhoto.png")))) image_ridges = invert(rgb2gray(image_retina)) plt.figure(figsize=(15,5)) plt.subplot(121) plt.gca().set_title('retinal photo') plt.imshow(image_retina, cmap=plt.cm.gray) plt.subplot(122) plt.gca().set_title('inverted grayscale image') plt.imshow(image_ridges, cmap=plt.cm.gray) ``` **Apply a gaussian filter, followed by a second order derivative (laplacian).** ```python sigma=3 LoG_ridges = plt.imshow(LoG_ridges,cmap='gray') plt.gca().set_title('retinal photo - response to LoG') ``` **This commonly used ridge detector is called a Laplacian of Gaussian (LoG)!** (We'll get back to this image in a bit) ## 1.3 Masking ### A mask is a binary image (0s and 1s) that typically separates a given input image into Foreground (interesting) and Background (boring) regions, or for picking a region-of-interest (ROI). A mask is *applied* to an image by element-wise multiplication. The size of a mask must be *identical* to the size of the image it's applied to. ### Let's begin by creating a simple circular mask. We'll create an array where the value at each point is it's distance from the center of the image, and display it as an image: ```python import matplotlib.pyplot as plt import numpy as np #dimensions in x and y y = 512 x = 512 #position of center centY = np.ceil(y/2) centX = np.ceil(x/2) #create the grid yy,xx = np.indices((y,x)) #create radial distance map radialDist = #display plt.gca().set_title('Radial distance') plt.imshow(radialDist, cmap='gray', interpolation='nearest') plt.show() ``` ### Of these points, we'll pick a circle of radius 100 and display it as an image: ```python circ1 = plt.show() plt.gca().set_title('Circle with radius 100') plt.imshow(circ1, cmap='inferno', interpolation='nearest') ``` #### This object is a **mask**. If you multiply this matrix of 0s and 1s with an image of the same size, only the parts that are ==1 will remain ### Let's apply this mask to our histology image. ```python plt.gca().set_title('Masked first channel') plt.imshow(, cmap=plt.cm.gray) ``` ### What happens if we invert the mask? ```python inverted_mask = plt.figure(figsize=(15,5)) plt.subplot(121) plt.gca().set_title('inverted mask') plt.imshow(inverted_mask, cmap=plt.cm.gray) plt.subplot(122) plt.gca().set_title('inverted masked image') plt.imshow(image_hist[:,:,2]*, cmap=plt.cm.gray) ``` **Just for closure, let's see what happens when we look at the full RGB image and try to apply the mask** ```python image = data.immunohistochemistry() masked_image = image*circ1 plt.imshow(masked_image, cmap=plt.cm.gray) ``` **Whoops. Seems like something is wrong. Our problem is that numpy didn't know how to multiply a 512x512x3 with a 512x512 mask. Numpy makes solving this very easy by adding a singleton dimension (look up broadcasting in your spare time).** ```python image = data.immunohistochemistry() plt.gca().set_title('Masked image') masked_image = mage*np.expand_dims(circ1,2) plt.imshow(masked_image, cmap=plt.cm.gray) ``` ## 1.4 Thresholding ## 1.4.1 Simple thresholding ### Thresholding an image is the process of setting an intensity (or intensities) for separating the different components of an image. #### In simplest case, the foreground and background have very different intensities. In that case thresholding is just clustering pixels by their intensity levels. ```python #this function from skimage converts images of integer types into floats, which are easier to work with. import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float from skimage import data # First, let's create a noisy image of blobs image_blobs = img_as_float(data.binary_blobs(length=512, seed=1)) sigma = 0.22 image_blobs += np.random.normal(loc=0, scale=sigma, size=image_blobs.shape) print("The number of dimensions of the image is: ", image_blobs.ndim) print("The size of the image is: ", image_blobs.shape) plt.imshow(image_blobs, cmap=plt.cm.gray) ``` ### To find the right threshold, let's examine a histogram of pixel intensity values ```python plt.hist(image_blobs.flatten(),bins=250) plt.show() ``` Pick an appropriate threshold, by eye, and see if you can remove the background. What happens when you increase or decrease the threshold? ```python thresh = mask = masked_image = plt.figure(figsize=(15,5)) plt.subplot(131) plt.gca().set_title('original') plt.imshow(image_blobs, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(132) plt.gca().set_title('mask') plt.imshow(mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(133) plt.gca().set_title('masked image') plt.imshow(masked_image, interpolation='nearest', cmap=plt.cm.gray) ``` ### Our mask looks ok, but it has a lot of salt & pepper speckle noise. Why is that? We can try and use what we learned before about filtering to clean up our results. What filter should we use? ```python from skimage import filters thresh = mask = masked_image = plt.figure(figsize=(15,5)) plt.subplot(131) plt.gca().set_title('original') plt.imshow(image_blobs, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(132) plt.gca().set_title('mask') plt.imshow(mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(133) plt.gca().set_title('masked image') plt.imshow(masked_image, interpolation='nearest', cmap=plt.cm.gray) ``` It's usually a good idea before creating a mask to despeckle an image using a narrow gaussian filter! ## 1.4.2 Morphological operations Morphology is a broad set of image processing operations that process images based on shapes. In a morphological operation, each pixel in the image is adjusted based on the value of other pixels in its neighborhood. By choosing the size and shape of the neighborhood, you can construct a morphological operation that is sensitive to specific shapes in the input image. (explanation from Mathworks) Morphological operations are based around a *structuring element*, which is a small binary image, often of a disk or a square. The structuring element is positioned at all possible locations in the image and it is compared with the corresponding neighbourhood of pixels. Some operations test whether the element "fits" within the neighbourhood, while others test whether it "hits" or intersects the neighbourhood. Common operations for image processing Erosion - output image =1 wherever the structuring element **fits** (erodes the mask) Dilation - output image =1 wherever the structuring element **hits** (expands the mask) Opening - Erosion followed by dilation (opens gaps in spots where the mask is weakly connected) Closing - Dilation followed by erosion (closes holes in the mask) A very thorough explanation of morphological operationscould be found [here](https://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm) ```python from skimage.morphology import erosion, dilation, opening, closing from skimage.morphology import disk #define a "disk" structuring element with radius 10 selem = #apply erosion, dilation, opening, and closing erosion_mask = dilation_mask = opening_mask = closing_mask = plt.figure(figsize=(15,10)) plt.subplot(231) plt.gca().set_title('mask') plt.imshow(mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(232) plt.gca().set_title('erosion') plt.imshow(erosion_mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(233) plt.gca().set_title('dilation') plt.imshow(dilation_mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(235) plt.gca().set_title('opening') plt.imshow(opening_mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(236) plt.gca().set_title('closing') plt.imshow(closing_mask, interpolation='nearest', cmap=plt.cm.gray) ``` ## 1.4.3 Masking actual data ### We'll repeat the thresholding procedure using an actual microscopy image of fluorescent nuclei ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg #this is how we load an image from the hard drive image_nuclei = img_as_float(mpimg.imread("../Data/xy040-1.png")) fig = plt.figure(num=None, figsize=(7.1, 4.6), dpi=80, facecolor='w', edgecolor='k') print("The number of dimensions of the image is: ", image_nuclei.ndim) print("The size of the image is: ", image_nuclei.shape) plt.imshow(image_nuclei, cmap=plt.cm.gray, vmin=0, vmax=0.01) ``` ### Again, let's plot a histogram of intensity values ```python plt. plt.xlim((0, 0.02)) plt.show() ``` ### And again we'll pick a value by eye: ```python thresh = #remember to despeckle before creating a mask! mask = plt.figure(figsize=(8,15)) plt.subplot(311) plt.gca().set_title('original') plt.imshow(image_nuclei, interpolation='nearest', cmap=plt.cm.gray, vmin=0, vmax=0.01) plt.subplot(312) plt.gca().set_title('mask') plt.imshow(mask, interpolation='nearest', cmap=plt.cm.gray) plt.subplot(313) plt.gca().set_title('masked image') plt.imshow(image_nuclei*mask, interpolation='nearest', cmap=plt.cm.gray, vmin=0, vmax=0.01) ``` ### Not bad! But also not very scalable. If we have 100s of images we can't look at them one-by-one and find thresholds by eye. Next, we'll look at some methods for automatically finding the thresholds. ## 1.5 Automated threshold calculation ### There is a very large list of algorithms for threshold calculation out there that are optimized for different situations. We will briefly review a few of the most common ones. ### 1.5.1 Iterative mean thresholding Algorithm: 1. Start with some threshold $T_i$ 2. Compute the means $m_0$ and $m_1$ of the BG and FG 3. Update $T_{i+1} = \frac{m_0+m_1}{2}$ 4. Repeat until it converges ### 1.5.2 Otsu thresholding The algorithm exhaustively searches for the threshold that minimizes the intra-class variance, defined for a given threshold $T$ as a weighted sum of variances of the two classes: $\sigma^2_w(T)=\omega_0(T)\sigma^2_0(T)+\omega_1(T)\sigma^2_1(T)$ For 2 classes, minimizing the intra-class variance is equivalent to maximizing inter-class variance, which is much easier to calculate: \begin{align} \sigma^2_b(T) & =\sigma^2-\sigma^2_w(T)=\omega_0(\mu_0-\mu_T)^2+\omega_1(\mu_1-\mu_T)^2 \\ & =\omega_0(T) \omega_1(T) \left[\mu_0(T)-\mu_1(T)\right]^2 \end{align} ### 1.5.3 Triangle thresholding Algorithm: 1. Draw a straight line between the histogram peak and the brightest value. 2. From every point on that line, draw the shortest connecting line to the histogram. 3. Find longest of these connecting lines. 4. Threshold is set at the intersection of that line and the curve. *note: Triangle thresholding is good for situations where the image is mostly background, and there is no clear "peak" of bright pixels.* ### [scikit-image's filters module](https://scikit-image.org/docs/dev/api/skimage.filters.html) implements a large variety of thresholding algorithms. Let's apply the ones we just learned about. ```python from skimage import filters #calculate iterative mean threshold meanThresh = print(meanThresh) #calculate otsu threshold OtsuThresh = print(OtsuThresh) #calculate triangle TriThresh = print(TriThresh) ``` ### Let's look at the resulting masks we get with each of these thresholds ```python fig = plt.figure(num=None, figsize=(12, 8), dpi=80) ax1 = fig.add_axes([0.1,0.6,0.4,0.4]) ax1.hist(image_nuclei.flatten(),bins=250) ax1.axvline(meanThresh, color='g', linestyle='--') ax1.axvline(OtsuThresh, color='r', linestyle='--') ax1.axvline(TriThresh, color='k', linestyle='--') ax1.legend(['mean' ,'otsu', 'triangle']) ax1.set_title('histogram') ax2 = fig.add_axes([0.6,0.6,0.4,0.4]) #get iterative mean mask (remember to despeckle) mask_mean = ax2.imshow(mask_mean) ax2.set_title('Iterative mean') ax2.set_axis_off() ax2 = fig.add_axes([0.1,0.1,0.4,0.4]) #get otsu mask mask_otsu = ax2.imshow(mask_otsu) ax2.set_title('Otsu') ax2.set_axis_off() ax2 = fig.add_axes([0.6,0.1,0.4,0.4]) #get triangle mask mask_tri = ax2.imshow(mask_tri) ax2.set_title('Triangle') ax2.set_axis_off() ``` ## Let's briefly look back at the retinal images and try to mask only the vasculature: **as before, load the image, invert, and apply LoG filter:** ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage.color import rgb2gray #this is how we load an image from the hard drive image_ridges = invert(rgb2gray(img_as_float(mpimg.imread("../Data/RetinalPhoto.png")))) plt.imshow(image_ridges, cmap=plt.cm.gray) plt.gca().set_title('retinal photo') sigma = 3 LoG_ridges = filters.laplace(filters.gaussian(image_ridges, sigma=sigma)) ax2.set_axis_off() ``` **Now, let's do the same procedure of automatically finding thresholds:** ```python from skimage import filters meanThresh = filters.threshold_mean(LoG_ridges) print(meanThresh) OtsuThresh = filters.threshold_otsu(LoG_ridges) print(OtsuThresh) TriThresh = filters.threshold_triangle(LoG_ridges) print(TriThresh) fig = plt.figure(num=None, figsize=(12, 8), dpi=80) ax1 = fig.add_axes([0.1,0.6,0.4,0.4]) ax1.hist(image_nuclei.flatten(),bins=250) ax1.axvline(meanThresh, color='g', linestyle='--') ax1.axvline(OtsuThresh, color='r', linestyle='--') ax1.axvline(TriThresh, color='k', linestyle='--') ax1.legend(['mean' ,'otsu', 'triangle']) ax1.set_title('histogram') ax1 = fig.add_axes([0.2,0.65,0.3,0.3]) plt.imshow(image_ridges, cmap=plt.cm.gray) ax1.set_axis_off() ax2 = fig.add_axes([0.6,0.6,0.4,0.4]) mask_mean = ax2.imshow(mask_mean) ax2.set_title('Iterative mean') ax2.set_axis_off() ax2 = fig.add_axes([0.1,0.1,0.4,0.4]) mask_otsu = ax2.imshow(mask_otsu) ax2.set_title('Otsu') ax2.set_axis_off() ax2 = fig.add_axes([0.6,0.1,0.4,0.4]) mask_tri = ax2.imshow(mask_tri) ax2.set_title('Triangle') ax2.set_axis_off() ``` **Play around with the width of the gaussian. Which thresholding algorithm works best in this case?** ### 1.5.4 Local thresholding #### All of the methods we saw so far are *global* in the sense that the same threshold is applied to the whole picture. Sometimes we can have an image with vastly different intensity distributions at different locations. Using local thresholding, we can overcome such cases. Let's compare the results from a global (Otsu) and a local threshold. ```python from skimage import data image = data.page() fig = plt.figure(num=None, figsize=(12, 8), dpi=80) #global thresholding threshGlobal = filters.threshold_otsu(image) ax1 = fig.add_axes([0.1,0.6,0.4,0.4]) ax1.set_title('mask - Otsu threshold') plt.imshow(image ,cmap='gray') ax2 = fig.add_axes([0.6,0.6,0.4,0.4]) ax2.set_title('mask - Otsu threshold') plt.imshow(image>threshGlobal,cmap='gray') #local thresholding #Try and change this number and see what happens block_size = 81 #calculate local threshold map threshLocal = ax1 = fig.add_axes([0.1,0.2,0.4,0.4]) ax1.imshow(threshLocal,cmap='gray') ax1.set_title('local threshold map') ax2 = fig.add_axes([0.6,0.2,0.4,0.4]) ax2.set_title('mask - Local threshold') plt.imshow(image>threshLocal,cmap='gray') ``` # 2. Image segmentation ### Image segmentation is the process of partitioning a digital image into multiple segments. The goal of segmentation is to simplify and/or change the representation of an image into something that is more meaningful and easier to analyze. ## 2.1 Connected components ### After we generate a mask, the simplest segmentation is achieved by taking regions in the mask that are connected and labeling each one as a separate object. #### We begin by generating a simple mask using the triangle threshold method: ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage import filters image_nuclei = img_as_float(mpimg.imread("../Data/xy040-1.png")) TriThresh = filters.threshold_triangle(image_nuclei) #despeckle mask = filters.gaussian(image_nuclei, sigma=1)>TriThresh ``` ### [scikit-image's measure module](https://scikit-image.org/docs/dev/api/skimage.measure.html) implements a variety of useful methods for segmentation. the *label* function returns a *labeled image* of connected components (CCs). Each CC is uniquely numbered by an integer. $\begin{bmatrix} 1 & 1 & 0 & 0 & 2\\ 1 & 1 & 0 & 2 & 2\\ 0 & 0 & 0 & 0 & 0\\ 0 & 3 & 0 & 4 & 4\\ 0 & 0 & 0 & 4 & 4\\ \end{bmatrix}$ ```python from skimage import measure #generate a labeled matrix of connected components labels = plt.figure(figsize=(12,5)) plt.subplot(121) plt.imshow(mask, cmap='gray') plt.subplot(122) plt.imshow(labels, cmap='nipy_spectral') ``` #### We can easily generate a *mask* for a **specific** CC using the binary operation *labels==i* ```python i=43 mask_of_CC_i = plt.figure(figsize=(10,5)) plt.imshow(mask_of_CC_i, cmap='gray') ``` ## Problem with simple CC segmentation : overlapping objects ### We often really care about having only a single object per label. Using CC, any overlapping objects will merge into one blob: ```python i=87 mask_of_CC_i = labels==i plt.imshow(mask_of_CC_i, cmap='gray') ``` ### These problems can be partially resolved using morphological operations, but there's no silver bullet ```python from skimage.morphology import erosion, dilation, opening, closing from skimage.morphology import disk #define a "disk" structuring element selem1 = disk(10) selem2 = disk(7) plt.figure(figsize=(15,10)) plt.subplot(121) plt.gca().set_title('original') plt.imshow(mask, cmap='nipy_spectral') plt.subplot(122) plt.gca().set_title('opening') plt.imshow(dilation(erosion(mask, selem1),selem2), interpolation='nearest', cmap='nipy_spectral') ``` ## 2.2 Watershed based segmentation ### 2.2.1 The watershed algorithm The watershed transformation treats the image it operates upon like a topographic map, with the brightness of each point representing its height, and finds the lines that run along the tops of ridges. More precisely, the algorithm goes as follows: 1. *Label* local minima (i.e. $S_1$, $S_2$) 2. Move to next higher intensity level 3. Assign to each point the label of it's closest label set. **<font color='red'>By passing the argument *watershed_line = 1* Points equidistant to multiple sets are labeled as boundaries and intensities set to 0</font>** 4. Repeat until all points are labeled Let's start with a very naive application. We will invert the image, and then simply apply the *watershed* function from the [scikit-image morphology module](https://scikit-image.org/docs/dev/api/skimage.morphology). The function returns a labeled image. We'll plot the edges of that image using a Sobel filter. ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage import filters from skimage.util import invert from skimage.morphology import watershed image_nuclei = img_as_float(mpimg.imread("../Data/xy040-1.png")) #invert image image_to_watershed = #Calculate watershed transform, remember to pass the watershed_line = 1 argument labels_naive = plt.figure(figsize=(15,10)) #let's look at all the boundaries plt.figure(figsize=(12,20)) plt.subplot(211) plt.gca().set_title('image fed to watershed') plt.imshow(image_to_watershed, cmap='gray') plt.subplot(212) plt.gca().set_title('watershed result') plt.imshow(filters.sobel(labels_naive), cmap='nipy_spectral') ``` ### So this clearly didn't work. Why? How do we fix it? Noise generates a ton of local minima. Each gets its own basin. This leads to massive oversegmentation. #### Watershed segmentation is only a *part* of a segmentation pipeline. Preprocessing (denoising, smoothing, seeding minima) of the image is CRUCIAL for it to work well. The first thing we'll do is to apply the mask that we found before. This is simply done by adding a *mask* argument to the watershed function. ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage import filters from skimage.util import invert from skimage.morphology import watershed image_nuclei = img_as_float(mpimg.imread("../Data/xy040-1.png")) #calculate mask mask = filters.gaussian(image_nuclei, sigma=1)>TriThresh #apply mask and invert image masked image = inverted_masked_image = image_to_watershed = inverted_masked_image #Calculate watershed transform #Now, also pass the mask to the watershed function so it avoids segmenting the BG labels_masked = #let's look at all the boundaries plt.figure(figsize=(12,20)) plt.subplot(211) plt.gca().set_title('image fed to watershed') plt.imshow(image_to_watershed, cmap='gray') plt.subplot(212) plt.gca().set_title('watershed result') plt.imshow(filters.sobel(labels_masked), cmap='nipy_spectral') ``` #### So we got rid of all the BG regions, but we are still oversegmenting. Why? Let's try to smoothen the image and get rid of the many local minima. How wide should the gaussian kernel be? ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage import filters from skimage.util import invert from skimage.morphology import watershed image_nuclei = img_as_float(mpimg.imread("../Data/xy040-1.png")) #Calculate mask mask = filters.gaussian(image_nuclei, sigma=1)>TriThresh #mask, smooth, and invert the image masked_image = image_nuclei*mask sigma_for_smoothing = smoothed_masked_image = inverted_smoothed_masked_image = invert(smoothed_masked_image) image_to_watershed = inverted_smoothed_masked_image #Calculate watershed transform #pass the mask to the watershed function so it avoids segmenting the BG labels_masked_smooth = watershed(image_to_watershed, watershed_line = 1, mask=mask) #let's look at all the boundaries plt.figure(figsize=(12,20)) plt.subplot(211) plt.gca().set_title('image fed to watershed') plt.imshow(image_to_watershed, cmap='gray') plt.subplot(212) plt.gca().set_title('watershed result') plt.imshow(filters.sobel(labels_masked_smooth), cmap='nipy_spectral') ``` ### We're starting to get somewhere!! Can we do better? #### We can do more to help the algorithm by providing local markers (seeds) from which to start the process #### We will find seeds by calculating local maxima over areas that are larger than 30x30 pixels using the footprint argument for the function peak_local_max ```python import matplotlib.pyplot as plt import numpy as np from skimage import img_as_float import matplotlib.image as mpimg from skimage import filters from skimage import measure from skimage.util import invert from skimage.morphology import watershed from skimage.feature import peak_local_max image_nuclei = img_as_float(mpimg.imread("../Data/xy040-1.png")) mask = filters.gaussian(image_nuclei, sigma=1)>TriThresh #mask, smooth, and invert the image masked_image = image_nuclei*mask sigma_for_smoothing = 4 smoothed_masked_image = filters.gaussian(masked_image, sigma=sigma_for_smoothing) inverted_smoothed_masked_image = invert(smoothed_masked_image) image_to_watershed = inverted_smoothed_masked_image #find local peaks to use as seeds #focus on this function. Look at the effect of different arguments! #Specifically. look at the footprint argument MaskedImagePeaks = #This is for presentation of our markers #create disk structuring element of radius 5 selem = disk(5) #dilate local peaks so that close ones merge peakMask = dilation(MaskedImagePeaks,selem) # label local peak regions to find initial markers markers = measure.label(peakMask) #pass the *markers* argument to the watershed function labels_localmax_markers = watershed(image_to_watershed,markers, watershed_line = 1, mask=mask) #let's look at all the boundaries plt.figure(figsize=(12,20)) plt.subplot(211) plt.gca().set_title('image fed to watershed') plt.imshow(image_to_watershed-peakMask, cmap='gray') plt.clim((0.95, 1)) plt.subplot(212) plt.gca().set_title('watershed result') plt.imshow(filters.sobel(labels_localmax_markers), cmap='nipy_spectral') ``` #### This is pretty good! We're still getting a few errors here and there, but there's no big systematic over- or under- segmentation. This is a typical good result when dealing with real data. # 3. Feature extraction ### Feature extraction is a process of dimensionality reduction by which an initial raw image is reduced to a list of objects and attributes ## 3.1 Extracting region properties ### [scikit-image's measure module](https://scikit-image.org/docs/dev/api/skimage.measure.html) implements a method called *regionprops* that accepts a labeled mask of connected components, and, optionally, a corresponding image, and returns a list. Each object on the list contains useful data about the size, shape, position, and intensity ([see the full list here](https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops)) of a specific component. The length of the list is equal to the total number of objects detected. #### We'll start by extracting the number of CC we found and the area of each CC ```python from skimage import measure #We use regionprops to extract properties on all the CCs props = measure.regionprops(labels_localmax_markers,image_nuclei) #how many total connected components did we get? print(len(props)) props[1].perimeter ``` ```python #This is how we make a list of a specific property for each CC areas = [r.area for r in props] #Do the same for the "mean_intensity" property intensities = [r.mean_intensity for r in props] #let's look at all the boundaries plt.figure(figsize=(12,6)) plt.subplot(121) plt.gca().set_title('areas') plt.hist(areas) plt.subplot(122) plt.gca().set_title('intensities') plt.hist(intensities) ``` ## 3.2 Some options for data presentation **We can look at individual objects we found** ```python i=2 plt.imshow(props[i].intensity_image) plt.gca().set_title('Single cell') ``` **Let's use a scatter plot to compare our results to the image** ```python intensities = np.array([r.mean_intensity for r in props]) centroids = np.array([r.centroid for r in props]) fig = plt.figure(figsize=(12,15)) fig.add_axes([0.1,0.6,0.4,0.25]) plt.gca().set_title('original') plt.imshow(image_nuclei, interpolation='nearest', cmap=plt.cm.gray, vmin=0, vmax=0.02) fig.add_axes([0.6,0.6,0.4,0.25]) plt.scatter(centroids[:,1],centroids[:,0], c=intensities) plt.axis('equal') plt.gca().invert_yaxis() ``` **Or even nicer scatter plots!** ```python intensities = np.array([r.mean_intensity for r in props]) areas = np.array([r.area for r in props]) centroids = np.array([r.centroid for r in props]) fig = plt.figure(figsize=(12,15)) fig.add_axes([0.1,0.6,0.4,0.25]) plt.gca().set_title('original') plt.imshow(image_nuclei, interpolation='nearest', cmap=plt.cm.gray, vmin=0, vmax=0.02) fig.add_axes([0.6,0.6,0.4,0.25]) plt.scatter(centroids[:,1],centroids[:,0], c=intensities, s=areas/20) plt.axis('equal') plt.gca().invert_yaxis() plt.text(centroids[10,1],centroids[10,0],props[10].label) ``` **You can even draw your points directly on the image!** ```python intensities = np.array([r.mean_intensity for r in props]) areas = np.array([r.area for r in props]) centroids = np.array([r.centroid for r in props]) fig = plt.figure(figsize=(12,15)) fig.add_axes([0.1,0.6,0.8,0.5]) plt.gca().set_title('original') plt.imshow(image_nuclei, interpolation='nearest', cmap=plt.cm.gray, vmin=0, vmax=0.02) plt.gca().patch.set_alpha(0.5) plt.scatter(centroids[:,1],centroids[:,0], c=intensities, alpha=1) ``` ## 3.3 Converting regionprops to a table (Dataframe) #### Let's define some useful functions for converting a list of props into a pandas dataframe. These should become obsolete soon since the new version of scikit-image will have this functionality #### These are some useful functions for converting a list of props into a pandas dataframe ```python import pandas as pd def scalar_attributes_list(im_props): """ Makes list of all scalar, non-dunder, non-hidden attributes of skimage.measure.regionprops object """ attributes_list = [] for i, test_attribute in enumerate(dir(im_props[0])): #Attribute should not start with _ and cannot return an array #does not yet return tuples if test_attribute[:1] != '_' and not\ isinstance(getattr(im_props[0], test_attribute), np.ndarray): attributes_list += [test_attribute] return attributes_list def regionprops_to_df(im_props): """ Read content of all attributes for every item in a list output by skimage.measure.regionprops """ attributes_list = scalar_attributes_list(im_props) # Initialise list of lists for parsed data parsed_data = [] # Put data from im_props into list of lists for i, _ in enumerate(im_props): parsed_data += [[]] for j in range(len(attributes_list)): parsed_data[i] += [getattr(im_props[i], attributes_list[j])] # Return as a Pandas DataFrame return pd.DataFrame(parsed_data, columns=attributes_list) ``` ### Now, to get all the properties in table form we simply run: ```python props_df = regionprops_to_df(props) props_df ``` ### Finally, if we imaged our cells in multiple channels, we would want to use the same segmented nuclei and measure intensities of other channels. ```python from skimage import measure from skimage import img_as_float import matplotlib.image as mpimg image_2ndChannel = img_as_float(mpimg.imread("../Data/xy040-2.png")) # extract regionprops using labels_localmax_markers mask from image_2ndChannel props_other_channel = measure.regionprops(labels_localmax_markers,image_2ndChannel) plt.figure(figsize=(12,6)) plt.subplot(121) plt.gca().set_title('Nuclei') plt.imshow(image_nuclei, cmap='gray') plt.subplot(122) plt.gca().set_title('other channel') plt.imshow(image_2ndChannel, cmap='gray') ``` ### Extract only the intensity related features ```python mean_2nd_channel = [r.mean_intensity for r in props_other_channel] max_2nd_channel = [r.max_intensity for r in props_other_channel] min_2nd_channel = [r.min_intensity for r in props_other_channel] plt.gca().set_title('intensities of 2nd channel') plt.hist(mean_2nd_channel) ``` ### Add these new features to the pandas dataframe ```python props_df['mean_intensity_ch2'] = mean_2nd_channel props_df['max_intensity_ch2'] = max_2nd_channel props_df['min_intensity_ch2'] = min_2nd_channel props_df ``` **Sometimes it's easier to see a bimodal distribution in log scale** ```python fig = plt.figure(figsize=(12,6)) fig.add_axes([0.1,0.1,0.4,0.4]) plt.gca().set_title('Histogram of intensities') plt.hist(props_df.mean_intensity_ch2,20) fig.add_axes([0.6,0.1,0.4,0.4]) plt.gca().set_title('Histogram of log of intensities') plt.hist(np.log(props_df.mean_intensity_ch2),20) ``` **We can compare distributions of different channels** ```python plt.figure(figsize=(12,6)) plt.gca().set_title('scatter plot of intensities') plt.scatter(np.log(props_df['max_intensity_ch2']), np.log(props_df['max_intensity'])) plt.xlabel('Ch2') plt.ylabel('Ch1') ``` ### And so, we've successfully implemented a simple image segmentation pipeline for multicolor microscopy data. #### Fin.
6a0039d358d69595c412a501ec7b66bad72be0d1
65,838
ipynb
Jupyter Notebook
20201109/JupyterNotebooks/DIP_AOY_Student.ipynb
alonyan/DIP
22fc811fc6debc224fca584c133dbc23e627014f
[ "MIT" ]
5
2020-09-27T14:10:28.000Z
2022-03-21T13:24:33.000Z
20201109/JupyterNotebooks/DIP_AOY_Student.ipynb
alonyan/DIP
22fc811fc6debc224fca584c133dbc23e627014f
[ "MIT" ]
null
null
null
20201109/JupyterNotebooks/DIP_AOY_Student.ipynb
alonyan/DIP
22fc811fc6debc224fca584c133dbc23e627014f
[ "MIT" ]
6
2019-11-06T07:57:10.000Z
2021-06-07T20:20:24.000Z
32.609212
494
0.597679
true
10,989
Qwen/Qwen-72B
1. YES 2. YES
0.877477
0.868827
0.762375
__label__eng_Latn
0.952047
0.609586
```python import numpy as np import sympy as sp import matplotlib.pyplot as plt %matplotlib inline import pi_sequences as p3 import boundary_layer_func1 as p1 import sequence_limits as p2 import diffeq_midpoint as p4 import math ``` ## Classwork 3 Michael Seaman, Chinmai Raman, Austin Ayers, Taylor Patti Organized by Andrew Malfavon ## Excercise A.2: Computing $\pi$ via sequences Michael Seaman The following sequences all converge to pi, although at different rates. In order: $$a_n = 4\sum_{k=1}^{n}\frac{(-1)^{k+1}}{2k-1}$$ $$b_n = (6\sum_{k=1}^{n}k^{-2})^{1/2} $$ $$c_n = (90\sum_{k=1}^{n}k^{-4})^{1/4} $$ $$d_n = \frac{6}{\sqrt{3}}\sum_{k=0}^{n}\frac{(-1)^{k}}{3^k(2k+1)}$$ $$e_n = 16\sum_{k=0}^{n}\frac{(-1)^{k}}{5^{2k+1}(2k+1)} - 4\sum_{k=0}^{n}\frac{(-1)^{k}}{239^{2k+1}(2k+1)}$$ ```python n = 30 plt.plot([x for x in range(n)],p3.pi_sequence(n, p3.fa),'g.') plt.show() ``` ```python plt.plot([x for x in range(n)],p3.pi_sequence(n, p3.fb) ** .5 ,'b.') plt.show() ``` ```python plt.plot([x for x in range(n)],p3.pi_sequence(n, p3.fc) ** .25 ,'y.') plt.show() ``` ```python plt.plot([x for x in range(n)],p3.pi_sequence(n, p3.fd),'r.') plt.show() ``` ```python plt.plot([x for x in range(n)],p3.pi_sequence(n, p3.fd),'c.') plt.show() ``` ```python n = 10 plt.plot([x + 20 for x in range(n)],p3.pi_sequence(n + 20, p3.fa)[-n:],'g.') plt.plot([x + 20 for x in range(n)],(p3.pi_sequence(n + 20, p3.fb) ** .5)[-n:] ,'b.') plt.plot([x + 20 for x in range(n)],(p3.pi_sequence(n + 20, p3.fc) ** .25)[-n:] ,'y.') plt.plot([x + 20 for x in range(n)],p3.pi_sequence(n + 20, p3.fd)[-n:],'r.') plt.plot([x + 20 for x in range(n)],p3.pi_sequence(n + 20, p3.fd)[-n:],'c.') plt.plot((20, 30), (math.pi, math.pi), 'b') plt.show() ``` # Chinmai Raman ## Classwork 3 ### 5.49 Experience Overflow in a Function Calculates an exponential function and returns the numerator, denominator and the fraction as a 3-tuple ```python x = np.linspace(0,1,10000) y1 = p1.v(x, 1, np.exp)[2] y2 = p1.v(x, 0.1, np.exp)[2] y3 = p1.v(x, 0.01, np.exp)[2] fig = plt.figure(1) plt.plot(x, y1, 'b') plt.plot(x, y2, 'r') plt.plot(x, y3, 'g') plt.xlabel('x') plt.ylabel('v(x)') plt.legend(['(1 - exp(x / mu)) / (1 - exp(1 / mu))']) plt.axis([x[0], x[-1], min(y3), max(y3)]) plt.title('Math Function') plt.show(fig) ``` # Austin Ayers ## Classwork 3 ### A.1 Determine the limit of a sequence Computes and returns the following sequence for N = 100 $$a_n = \frac{7+1/(n+1)}{3-1/(n+1)^2}, \qquad n=0,1,2,\ldots,N$$ ```python p2.part_a() ``` 4.0 2.53846153846 2.43243243243 2.39726027397 2.38016528926 2.37016574586 2.36363636364 2.3590504451 2.35565819861 2.35304990758 2.35098335855 2.34930643127 2.34791889007 2.34675205855 2.34575733545 2.34489937543 2.34415186773 2.34349482852 2.34291281052 2.34239368698 2.34192780643 2.34150739272 2.34112611129 2.34077875038 2.34046098306 2.34016918739 2.33990030857 2.33965175226 2.33942130105 2.33920704846 2.33900734635 2.33882076264 2.33864604703 2.33848210307 2.33832796527 2.33818278006 2.33804579007 2.33791632097 2.33779377038 2.33767759855 2.33756732039 2.33746249879 2.33736273877 2.33726768255 2.3371770053 2.3370904114 2.33700763123 2.33692841841 2.3368525473 2.3367798109 The series converges to: 7/3 or 2.3333..., and a_N was: 2.33671001895 and the difference was: 0.00337668561968 ```python p2.part_b() ``` The series converges to: 7/3 or 2.3333..., and a_N was: 2.33671001895 and the difference was: 0.00337668561968 The limit exists (to this algorithm's knowledge) The limit is roughly: 2.3367798109 ```python p2.part_c() ``` None The limit is roughly: 1.0 ```python p2.part_d() ``` ```python p2.part_e() ``` ```python p2.part_f() ``` the computations for x = pi go wrong for large N because sin(pi) = 0 and 2 ** (-n) approaches 0 as well, so the numerator and denominator both go to 0 and that usually leads to problems. # diffeq_midpoint ## Taylor Patti Uses the midpoint integration rule along with numpy vectors to produce a continuous vector which gives integral data for an array of prespecified points. Here we use it to integrate sin from 0 to pi. ```python function_call = p4.vector_midpoint(p4.np.sin, 0, p4.np.pi, 10000) print function_call[1][-1] ``` 1.99999990953 Observe the close adherance to the actual value of this cannonical value. We can also call it at a different value of x. Let's look at the value of this integral from 0 to pi over 2. Again, the result will have strikingly close adherance to the analytical value of this integral. ```python print function_call[1][5000] ``` 1.00031411403 ```python ```
f6380d834c8862443059146f9e92ce9a596c8e7a
89,750
ipynb
Jupyter Notebook
cw-3.ipynb
chapman-phys227-2016s/cw-3-classwork-team
0102dfa4c600804d4488e19ad380f9e8e9b6e954
[ "MIT" ]
null
null
null
cw-3.ipynb
chapman-phys227-2016s/cw-3-classwork-team
0102dfa4c600804d4488e19ad380f9e8e9b6e954
[ "MIT" ]
null
null
null
cw-3.ipynb
chapman-phys227-2016s/cw-3-classwork-team
0102dfa4c600804d4488e19ad380f9e8e9b6e954
[ "MIT" ]
null
null
null
159.982175
20,644
0.895287
true
1,860
Qwen/Qwen-72B
1. YES 2. YES
0.847968
0.622459
0.527825
__label__eng_Latn
0.761441
0.064645
```python from sympy import * init_printing() ``` ```python def skew(l): l1, l2, l3 = l return Matrix([ [0, -l3, l2], [l3, 0, -l1], [-l2, l1, 0] ]) ``` ```python # define state variables x, y, z, eta0, eps1, eps2, eps3, u, v, w, p, q, r = symbols('x y z et0 eps1 eps2 eps3 u v w p q r', real=True) s = Matrix([x, y, z, eta0, eps1, eps2, eps3, u, v, w, p, q, r]) # position and orientation eta = Matrix([x, y, z, eta0, eps1, eps2, eps3]) nu = Matrix([u, v, w, p, q, r]) # centre of gravity xg, yg, zg = symbols('xg yg zg', real=True) rg = Matrix([xg, yg, zg]) # centre of bouyancy xb, yb, zb = symbols('xb yb zb', real=True) rb = Matrix([xb, yb, zb]) # center of pressure xcp, ycp, zcp = symbols('xcp ycp zcp', real=True) rcp = Matrix([xcp, ycp, zcp]) # mass matrix m = symbols('m', real=True, positive=True) Ixx, Iyy, Izz = symbols('Ixx Iyy Izz') I0 = diag(Ixx, Iyy, Izz) M = BlockMatrix([ [m*eye(3), -m*skew(rg)], [m*skew(rg), I0] ]) M = Matrix(M) # M = simplify(M) # Coriolis and centripetal matrix nu1 = Matrix([u, v, w]) nu2 = Matrix([p, q, r]) crb = BlockMatrix([ [zeros(3), -m*skew(nu1)-m*skew(nu2)*skew(rg)], [-m*skew(nu1)+m*skew(rg)*skew(nu2), -skew(I0*nu2)] ]) crb = Matrix(crb) # crb = simplify(crb) # damping matrix Xuu, Yvv, Zww, Kpp, Mqq, Nrr = symbols( 'Xuu Yvv Zww Kpp Mqq Nrr', real=True ) D = Matrix([ [Xuu*abs(u), 0, 0, 0, 0, 0], [0, Yvv*abs(v), 0, 0, 0, 0], [0, 0, Zww*abs(w), 0, 0, 0], [0, -zcp*Yvv*abs(v), ycp*Zww*abs(w), Kpp*abs(p), 0, 0], [zcp*Xuu*abs(u), 0, -xcp*Zww*abs(w), 0, Mqq*abs(q), 0], [-ycp*Xuu*abs(u), xcp*Yvv*abs(v), 0, 0, 0, Nrr*abs(r)] ]) # D = simplify(D) # rotational transform between body and NED quaternions Tq = Rational(1,2)*Matrix([ [-eps1, -eps2, -eps3], [eta0, -eps3, eps2], [eps3, eta0, -eps1], [-eps2, eps1, eta0] ]) # Tq = simplify(Tq) Rq = Matrix([ [1-2*(eps2**2+eps3**2), 2*(eps1*eps2-eps3*eta0), 2*(eps1*eps3+eps2*eta0)], [2*(eps1*eps2+eps3*eta0), 1-2*(eps1**2+eps3**2), 2*(eps2*eps3-eps1*eta0)], [2*(eps1*eps3-eps2*eta0), 2*(eps2*eps3+eps1*eta0), 1-2*(eps1**2+eps2**2)] ]) Jeta = BlockMatrix([ [Rq, zeros(3)], [zeros(4,3), Tq] ]) Jeta = Matrix(Jeta) # Jeta = simplify(Jeta) # bouyancy in quaternions W, B = symbols('W B', real=True) fg = Matrix([0, 0, W]) fb = Matrix([0, 0, -B]) Rqinv = Rq.inv() geta = Matrix([ Rqinv*(fg+fb), skew(rg)*Rqinv*fg + skew(rb)*Rqinv*fb ]) # geta = simplify(geta) ``` ```python # print(cse(Jeta)) ``` ```python # thrust model Kt0, Kt1 = symbols('Kt0 Kt1', real=True) Kt = Matrix([Kt0, Kt1]) Qt0, Qt1 = symbols('Qt0 Qt1', real=True) Qt = Matrix([Qt0, Qt1]) # control inputs rpm0, rpm1 = symbols('rpm0 rpm1', real=True) rpm = Matrix([rpm0, rpm1]) de, dr = symbols('de dr', real=True) control_vector = Matrix([rpm0, rpm1, de, dr]) # control force vector Ft = Kt.dot(rpm) Mt = Qt.dot(rpm) # coefficient for each element in cost function tauc = Matrix([ Ft*cos(de)*cos(dr), -Ft*sin(dr), Ft*sin(de)*cos(dr), Mt*cos(de)*cos(dr), -Mt*sin(dr), Mt*sin(de)*cos(dr) ]) ``` ```python etadot = Jeta*nu nudot = M.inv()*(tauc - (crb + D)*nu - geta) ``` ```python sdot = Matrix([ etadot, nudot ]) ``` ```python sdot ``` $\displaystyle \left[\begin{matrix}u \left(- 2 eps_{2}^{2} - 2 eps_{3}^{2} + 1\right) + v \left(2 eps_{1} eps_{2} - 2 eps_{3} et_{0}\right) + w \left(2 eps_{1} eps_{3} + 2 eps_{2} et_{0}\right)\\u \left(2 eps_{1} eps_{2} + 2 eps_{3} et_{0}\right) + v \left(- 2 eps_{1}^{2} - 2 eps_{3}^{2} + 1\right) + w \left(- 2 eps_{1} et_{0} + 2 eps_{2} eps_{3}\right)\\u \left(2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right) + v \left(2 eps_{1} et_{0} + 2 eps_{2} eps_{3}\right) + w \left(- 2 eps_{1}^{2} - 2 eps_{2}^{2} + 1\right)\\- \frac{eps_{1} p}{2} - \frac{eps_{2} q}{2} - \frac{eps_{3} r}{2}\\\frac{eps_{2} r}{2} - \frac{eps_{3} q}{2} + \frac{et_{0} p}{2}\\- \frac{eps_{1} r}{2} + \frac{eps_{3} p}{2} + \frac{et_{0} q}{2}\\\frac{eps_{1} q}{2} - \frac{eps_{2} p}{2} + \frac{et_{0} r}{2}\\\frac{\left(- Iyy m xg yg zg + Izz m xg yg zg\right) \left(B \left(\frac{yb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Iyy q r - Izz q r - Kpp p \left|{p}\right| - W \left(\frac{yg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- m q yg - m r zg\right) - v \left(- Yvv zcp \left|{v}\right| + m p yg + m w\right) - w \left(Zww ycp \left|{w}\right| + m p zg - m v\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx Iyy yg - Ixx m xg^{2} yg - Iyy m yg^{3} - Iyy m yg zg^{2}\right) \left(B \left(\frac{xb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Ixx p q - Iyy p q - Nrr r \left|{r}\right| - W \left(\frac{xg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- Xuu ycp \left|{u}\right| + m r xg + m v\right) - v \left(Yvv xcp \left|{v}\right| + m r yg - m u\right) - w \left(- m p xg - m q yg\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Izz zg + Ixx m xg^{2} zg + Izz m yg^{2} zg + Izz m zg^{3}\right) \left(B \left(- \frac{xb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - Ixx p r + Izz p r - Mqq q \left|{q}\right| - W \left(- \frac{xg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(Xuu zcp \left|{u}\right| + m q xg - m w\right) - v \left(- m p xg - m r zg\right) - w \left(- Zww xcp \left|{w}\right| + m q zg + m u\right) + \left(- Qt_{0} rpm_{0} - Qt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Iyy xg yg + Ixx m xg^{3} yg + Iyy m xg yg^{3} + Izz m xg yg zg^{2}\right) \left(- Yvv v \left|{v}\right| - p \left(- m p yg - m w\right) - q \left(m p xg + m r zg\right) - r \left(- m r yg + m u\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(- Kt_{0} rpm_{0} - Kt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Izz xg zg + Ixx m xg^{3} zg + Iyy m xg yg^{2} zg + Izz m xg zg^{3}\right) \left(- Zww w \left|{w}\right| - p \left(- m p zg + m v\right) - q \left(- m q zg - m u\right) - r \left(m p xg + m q yg\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Xuu u \left|{u}\right| - p \left(m q yg + m r zg\right) - q \left(- m q xg + m w\right) - r \left(- m r xg - m v\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right) \left(Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Izz m xg^{2} + Ixx m^{2} xg^{4} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Izz m^{2} xg^{2} zg^{2}\right)}{Ixx Iyy Izz m - Ixx Iyy m^{2} xg^{2} - Ixx Iyy m^{2} yg^{2} - Ixx Izz m^{2} xg^{2} - Ixx Izz m^{2} zg^{2} + Ixx m^{3} xg^{4} + Ixx m^{3} xg^{2} yg^{2} + Ixx m^{3} xg^{2} zg^{2} - Iyy Izz m^{2} yg^{2} - Iyy Izz m^{2} zg^{2} + Iyy m^{3} xg^{2} yg^{2} + Iyy m^{3} yg^{4} + Iyy m^{3} yg^{2} zg^{2} + Izz m^{3} xg^{2} zg^{2} + Izz m^{3} yg^{2} zg^{2} + Izz m^{3} zg^{4}}\\\frac{\left(Ixx m xg yg zg - Izz m xg yg zg\right) \left(B \left(- \frac{xb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - Ixx p r + Izz p r - Mqq q \left|{q}\right| - W \left(- \frac{xg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(Xuu zcp \left|{u}\right| + m q xg - m w\right) - v \left(- m p xg - m r zg\right) - w \left(- Zww xcp \left|{w}\right| + m q zg + m u\right) + \left(- Qt_{0} rpm_{0} - Qt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Iyy xg + Ixx m xg^{3} + Ixx m xg zg^{2} + Iyy m xg yg^{2}\right) \left(B \left(\frac{xb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Ixx p q - Iyy p q - Nrr r \left|{r}\right| - W \left(\frac{xg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- Xuu ycp \left|{u}\right| + m r xg + m v\right) - v \left(Yvv xcp \left|{v}\right| + m r yg - m u\right) - w \left(- m p xg - m q yg\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Iyy Izz zg - Iyy m yg^{2} zg - Izz m xg^{2} zg - Izz m zg^{3}\right) \left(B \left(\frac{yb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Iyy q r - Izz q r - Kpp p \left|{p}\right| - W \left(\frac{yg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- m q yg - m r zg\right) - v \left(- Yvv zcp \left|{v}\right| + m p yg + m w\right) - w \left(Zww ycp \left|{w}\right| + m p zg - m v\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Iyy xg yg + Ixx m xg^{3} yg + Iyy m xg yg^{3} + Izz m xg yg zg^{2}\right) \left(- Xuu u \left|{u}\right| - p \left(m q yg + m r zg\right) - q \left(- m q xg + m w\right) - r \left(- m r xg - m v\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx m xg^{2} yg zg - Iyy Izz yg zg + Iyy m yg^{3} zg + Izz m yg zg^{3}\right) \left(- Zww w \left|{w}\right| - p \left(- m p zg + m v\right) - q \left(- m q zg - m u\right) - r \left(m p xg + m q yg\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Yvv v \left|{v}\right| - p \left(- m p yg - m w\right) - q \left(m p xg + m r zg\right) - r \left(- m r yg + m u\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(- Kt_{0} rpm_{0} - Kt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right) \left(Ixx Iyy Izz - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{2} yg^{2} - Iyy Izz m yg^{2} + Iyy m^{2} yg^{4} + Izz m^{2} yg^{2} zg^{2}\right)}{Ixx Iyy Izz m - Ixx Iyy m^{2} xg^{2} - Ixx Iyy m^{2} yg^{2} - Ixx Izz m^{2} xg^{2} - Ixx Izz m^{2} zg^{2} + Ixx m^{3} xg^{4} + Ixx m^{3} xg^{2} yg^{2} + Ixx m^{3} xg^{2} zg^{2} - Iyy Izz m^{2} yg^{2} - Iyy Izz m^{2} zg^{2} + Iyy m^{3} xg^{2} yg^{2} + Iyy m^{3} yg^{4} + Iyy m^{3} yg^{2} zg^{2} + Izz m^{3} xg^{2} zg^{2} + Izz m^{3} yg^{2} zg^{2} + Izz m^{3} zg^{4}}\\\frac{\left(- Ixx m xg yg zg + Iyy m xg yg zg\right) \left(B \left(\frac{xb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Ixx p q - Iyy p q - Nrr r \left|{r}\right| - W \left(\frac{xg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- Xuu ycp \left|{u}\right| + m r xg + m v\right) - v \left(Yvv xcp \left|{v}\right| + m r yg - m u\right) - w \left(- m p xg - m q yg\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx Izz xg - Ixx m xg^{3} - Ixx m xg yg^{2} - Izz m xg zg^{2}\right) \left(B \left(- \frac{xb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - Ixx p r + Izz p r - Mqq q \left|{q}\right| - W \left(- \frac{xg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(Xuu zcp \left|{u}\right| + m q xg - m w\right) - v \left(- m p xg - m r zg\right) - w \left(- Zww xcp \left|{w}\right| + m q zg + m u\right) + \left(- Qt_{0} rpm_{0} - Qt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Iyy Izz yg + Iyy m xg^{2} yg + Iyy m yg^{3} + Izz m yg zg^{2}\right) \left(B \left(\frac{yb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Iyy q r - Izz q r - Kpp p \left|{p}\right| - W \left(\frac{yg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- m q yg - m r zg\right) - v \left(- Yvv zcp \left|{v}\right| + m p yg + m w\right) - w \left(Zww ycp \left|{w}\right| + m p zg - m v\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Izz xg zg + Ixx m xg^{3} zg + Iyy m xg yg^{2} zg + Izz m xg zg^{3}\right) \left(- Xuu u \left|{u}\right| - p \left(m q yg + m r zg\right) - q \left(- m q xg + m w\right) - r \left(- m r xg - m v\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx m xg^{2} yg zg - Iyy Izz yg zg + Iyy m yg^{3} zg + Izz m yg zg^{3}\right) \left(- Yvv v \left|{v}\right| - p \left(- m p yg - m w\right) - q \left(m p xg + m r zg\right) - r \left(- m r yg + m u\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(- Kt_{0} rpm_{0} - Kt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Zww w \left|{w}\right| - p \left(- m p zg + m v\right) - q \left(- m q zg - m u\right) - r \left(m p xg + m q yg\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right) \left(Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}\right)}{Ixx Iyy Izz m - Ixx Iyy m^{2} xg^{2} - Ixx Iyy m^{2} yg^{2} - Ixx Izz m^{2} xg^{2} - Ixx Izz m^{2} zg^{2} + Ixx m^{3} xg^{4} + Ixx m^{3} xg^{2} yg^{2} + Ixx m^{3} xg^{2} zg^{2} - Iyy Izz m^{2} yg^{2} - Iyy Izz m^{2} zg^{2} + Iyy m^{3} xg^{2} yg^{2} + Iyy m^{3} yg^{4} + Iyy m^{3} yg^{2} zg^{2} + Izz m^{3} xg^{2} zg^{2} + Izz m^{3} yg^{2} zg^{2} + Izz m^{3} zg^{4}}\\\frac{\left(- Iyy m xg yg zg + Izz m xg yg zg\right) \left(- Xuu u \left|{u}\right| - p \left(m q yg + m r zg\right) - q \left(- m q xg + m w\right) - r \left(- m r xg - m v\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Iyy Izz yg + Iyy m xg^{2} yg + Iyy m yg^{3} + Izz m yg zg^{2}\right) \left(- Zww w \left|{w}\right| - p \left(- m p zg + m v\right) - q \left(- m q zg - m u\right) - r \left(m p xg + m q yg\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Iyy Izz zg - Iyy m yg^{2} zg - Izz m xg^{2} zg - Izz m zg^{3}\right) \left(- Yvv v \left|{v}\right| - p \left(- m p yg - m w\right) - q \left(m p xg + m r zg\right) - r \left(- m r yg + m u\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(- Kt_{0} rpm_{0} - Kt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Iyy m xg zg + m^{2} xg^{3} zg + m^{2} xg yg^{2} zg + m^{2} xg zg^{3}\right) \left(B \left(\frac{xb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Ixx p q - Iyy p q - Nrr r \left|{r}\right| - W \left(\frac{xg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- Xuu ycp \left|{u}\right| + m r xg + m v\right) - v \left(Yvv xcp \left|{v}\right| + m r yg - m u\right) - w \left(- m p xg - m q yg\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Izz m xg yg + m^{2} xg^{3} yg + m^{2} xg yg^{3} + m^{2} xg yg zg^{2}\right) \left(B \left(- \frac{xb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - Ixx p r + Izz p r - Mqq q \left|{q}\right| - W \left(- \frac{xg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(Xuu zcp \left|{u}\right| + m q xg - m w\right) - v \left(- m p xg - m r zg\right) - w \left(- Zww xcp \left|{w}\right| + m q zg + m u\right) + \left(- Qt_{0} rpm_{0} - Qt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Iyy Izz - Iyy m xg^{2} - Iyy m yg^{2} - Izz m xg^{2} - Izz m zg^{2} + m^{2} xg^{4} + m^{2} xg^{2} yg^{2} + m^{2} xg^{2} zg^{2}\right) \left(B \left(\frac{yb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Iyy q r - Izz q r - Kpp p \left|{p}\right| - W \left(\frac{yg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- m q yg - m r zg\right) - v \left(- Yvv zcp \left|{v}\right| + m p yg + m w\right) - w \left(Zww ycp \left|{w}\right| + m p zg - m v\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}}\\\frac{\left(Ixx m xg yg zg - Izz m xg yg zg\right) \left(- Yvv v \left|{v}\right| - p \left(- m p yg - m w\right) - q \left(m p xg + m r zg\right) - r \left(- m r yg + m u\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(- Kt_{0} rpm_{0} - Kt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx Izz xg - Ixx m xg^{3} - Ixx m xg yg^{2} - Izz m xg zg^{2}\right) \left(- Zww w \left|{w}\right| - p \left(- m p zg + m v\right) - q \left(- m q zg - m u\right) - r \left(m p xg + m q yg\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Izz zg + Ixx m xg^{2} zg + Izz m yg^{2} zg + Izz m zg^{3}\right) \left(- Xuu u \left|{u}\right| - p \left(m q yg + m r zg\right) - q \left(- m q xg + m w\right) - r \left(- m r xg - m v\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx m yg zg + m^{2} xg^{2} yg zg + m^{2} yg^{3} zg + m^{2} yg zg^{3}\right) \left(B \left(\frac{xb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Ixx p q - Iyy p q - Nrr r \left|{r}\right| - W \left(\frac{xg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- Xuu ycp \left|{u}\right| + m r xg + m v\right) - v \left(Yvv xcp \left|{v}\right| + m r yg - m u\right) - w \left(- m p xg - m q yg\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Izz m xg yg + m^{2} xg^{3} yg + m^{2} xg yg^{3} + m^{2} xg yg zg^{2}\right) \left(B \left(\frac{yb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Iyy q r - Izz q r - Kpp p \left|{p}\right| - W \left(\frac{yg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- m q yg - m r zg\right) - v \left(- Yvv zcp \left|{v}\right| + m p yg + m w\right) - w \left(Zww ycp \left|{w}\right| + m p zg - m v\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx Izz - Ixx m xg^{2} - Ixx m yg^{2} - Izz m yg^{2} - Izz m zg^{2} + m^{2} xg^{2} yg^{2} + m^{2} yg^{4} + m^{2} yg^{2} zg^{2}\right) \left(B \left(- \frac{xb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - Ixx p r + Izz p r - Mqq q \left|{q}\right| - W \left(- \frac{xg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(Xuu zcp \left|{u}\right| + m q xg - m w\right) - v \left(- m p xg - m r zg\right) - w \left(- Zww xcp \left|{w}\right| + m q zg + m u\right) + \left(- Qt_{0} rpm_{0} - Qt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}}\\\frac{\left(- Ixx m xg yg zg + Iyy m xg yg zg\right) \left(- Zww w \left|{w}\right| - p \left(- m p zg + m v\right) - q \left(- m q zg - m u\right) - r \left(m p xg + m q yg\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx Iyy xg + Ixx m xg^{3} + Ixx m xg zg^{2} + Iyy m xg yg^{2}\right) \left(- Yvv v \left|{v}\right| - p \left(- m p yg - m w\right) - q \left(m p xg + m r zg\right) - r \left(- m r yg + m u\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(- Kt_{0} rpm_{0} - Kt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx Iyy yg - Ixx m xg^{2} yg - Iyy m yg^{3} - Iyy m yg zg^{2}\right) \left(- Xuu u \left|{u}\right| - p \left(m q yg + m r zg\right) - q \left(- m q xg + m w\right) - r \left(- m r xg - m v\right) - \frac{\left(- B + W\right) \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \left(Kt_{0} rpm_{0} + Kt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Ixx m yg zg + m^{2} xg^{2} yg zg + m^{2} yg^{3} zg + m^{2} yg zg^{3}\right) \left(B \left(- \frac{xb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - Ixx p r + Izz p r - Mqq q \left|{q}\right| - W \left(- \frac{xg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} + \frac{zg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(Xuu zcp \left|{u}\right| + m q xg - m w\right) - v \left(- m p xg - m r zg\right) - w \left(- Zww xcp \left|{w}\right| + m q zg + m u\right) + \left(- Qt_{0} rpm_{0} - Qt_{1} rpm_{1}\right) \sin{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(- Iyy m xg zg + m^{2} xg^{3} zg + m^{2} xg yg^{2} zg + m^{2} xg zg^{3}\right) \left(B \left(\frac{yb \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Iyy q r - Izz q r - Kpp p \left|{p}\right| - W \left(\frac{yg \left(4 eps_{1}^{2} eps_{3}^{2} - 2 eps_{1}^{2} + 4 eps_{2}^{2} eps_{3}^{2} - 2 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{zg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- m q yg - m r zg\right) - v \left(- Yvv zcp \left|{v}\right| + m p yg + m w\right) - w \left(Zww ycp \left|{w}\right| + m p zg - m v\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \cos{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}} + \frac{\left(Ixx Iyy - Ixx m xg^{2} - Ixx m zg^{2} - Iyy m yg^{2} - Iyy m zg^{2} + m^{2} xg^{2} zg^{2} + m^{2} yg^{2} zg^{2} + m^{2} zg^{4}\right) \left(B \left(\frac{xb \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yb \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) + Ixx p q - Iyy p q - Nrr r \left|{r}\right| - W \left(\frac{xg \left(4 eps_{1}^{2} eps_{2} eps_{3} + 2 eps_{1} et_{0} + 4 eps_{2}^{3} eps_{3} + 4 eps_{2} eps_{3}^{3} + 4 eps_{2} eps_{3} et_{0}^{2} - 2 eps_{2} eps_{3}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1} - \frac{yg \left(4 eps_{1}^{3} eps_{3} + 4 eps_{1} eps_{2}^{2} eps_{3} + 4 eps_{1} eps_{3}^{3} + 4 eps_{1} eps_{3} et_{0}^{2} - 2 eps_{1} eps_{3} - 2 eps_{2} et_{0}\right)}{4 eps_{1}^{4} + 8 eps_{1}^{2} eps_{2}^{2} + 8 eps_{1}^{2} eps_{3}^{2} + 4 eps_{1}^{2} et_{0}^{2} - 4 eps_{1}^{2} + 4 eps_{2}^{4} + 8 eps_{2}^{2} eps_{3}^{2} + 4 eps_{2}^{2} et_{0}^{2} - 4 eps_{2}^{2} + 4 eps_{3}^{4} + 4 eps_{3}^{2} et_{0}^{2} - 4 eps_{3}^{2} + 1}\right) - u \left(- Xuu ycp \left|{u}\right| + m r xg + m v\right) - v \left(Yvv xcp \left|{v}\right| + m r yg - m u\right) - w \left(- m p xg - m q yg\right) + \left(Qt_{0} rpm_{0} + Qt_{1} rpm_{1}\right) \sin{\left(de \right)} \cos{\left(dr \right)}\right)}{Ixx Iyy Izz - Ixx Iyy m xg^{2} - Ixx Iyy m yg^{2} - Ixx Izz m xg^{2} - Ixx Izz m zg^{2} + Ixx m^{2} xg^{4} + Ixx m^{2} xg^{2} yg^{2} + Ixx m^{2} xg^{2} zg^{2} - Iyy Izz m yg^{2} - Iyy Izz m zg^{2} + Iyy m^{2} xg^{2} yg^{2} + Iyy m^{2} yg^{4} + Iyy m^{2} yg^{2} zg^{2} + Izz m^{2} xg^{2} zg^{2} + Izz m^{2} yg^{2} zg^{2} + Izz m^{2} zg^{4}}\end{matrix}\right]$ ```python ```
db0ced147942087e6a276e63ad5afae51731e87c
96,999
ipynb
Jupyter Notebook
sam_dynamics/notebooks/dynamics.ipynb
cisprague/sam_common
f477c8bb4fee3d36fe7ff87847833db3b207cf95
[ "BSD-3-Clause" ]
null
null
null
sam_dynamics/notebooks/dynamics.ipynb
cisprague/sam_common
f477c8bb4fee3d36fe7ff87847833db3b207cf95
[ "BSD-3-Clause" ]
null
null
null
sam_dynamics/notebooks/dynamics.ipynb
cisprague/sam_common
f477c8bb4fee3d36fe7ff87847833db3b207cf95
[ "BSD-3-Clause" ]
1
2021-05-04T09:48:56.000Z
2021-05-04T09:48:56.000Z
177.979817
67,325
0.380478
true
38,702
Qwen/Qwen-72B
1. YES 2. YES
0.913677
0.785309
0.717518
__label__kor_Hang
0.109897
0.505366
Enable Equation Numbering in $\LaTeX$ ```javascript %%javascript MathJax.Hub.Config({ TeX: { equationNumbers: { autoNumber: "AMS" } } }); ``` <IPython.core.display.Javascript object> ## Math Fundamentals First we define this definition \begin{equation} (\vec{w} \cdot \vec{u} + b) \geq 0 \implies + \end{equation} We add $y_i$ for mathematical convenience \begin{equation} y_i ( \vec{x}_i \cdot \vec{w} + b) -1 = 0 \end{equation} Calculate the width of the street \begin{equation} width = (\vec{x}_+ - \vec{x}_-) \cdot \frac{\vec{w}}{||\vec{w}||} \end{equation} $$ width = \frac{\vec{x}_+ \cdot \vec{w} - \vec{x}_- \cdot \vec{w}}{||\vec{w}||}$$ $$ \vec{x}_i \cdot \vec{w} = \frac{1}{y_i} - b $$ $$ \vec{x}_+ \cdot \vec{w} = 1 - b , \vec{x}_- \cdot \vec{w} = -1 - b $$ $$ width = \frac{1 - b - (-1 - b)}{||\vec{w}||}$$ $$ width = \frac{1 - b + 1 + b}{||\vec{w}||}$$ And we end up with \begin{equation} width = \frac{2}{||\vec{w}||} \end{equation} Now we have to maximaze that width so: $$ Max(width) \rightarrow Max \big( \frac{2}{||\vec{w}||} \big) \rightarrow Max \big( \frac{1}{||\vec{w}||} \big) $$ $$ Max \big( \frac{1}{||\vec{w}||} \big) \rightarrow Min (||\vec{w}||) \rightarrow Min \big( \frac{1}{2} ||\vec{w}|| \big) \rightarrow Min \big( \frac{1}{2} {||\vec{w}||}^2 \big)$$ Applying Lagrange Multipliers \begin{equation} L = \frac{1}{2} {||\vec{w}||}^2 - \sum{ \alpha_i [y_i ( \vec{x}_i \cdot \vec{w} + b) -1]} \end{equation} $$ \frac{\partial L}{\partial \vec{w}} = \vec{w} - \sum{\alpha_i y_i x_i} = 0 \implies \vec{w} = \sum{\alpha_i y_i x_i} $$ $$ \frac{\partial L}{\partial b} = - \sum{\alpha_i y_i} = 0 \implies \sum{\alpha_i y_i} = 0 $$ \begin{equation} L = \frac{1}{2} \big( \sum{\alpha_i y_i \vec{x}_i}\big) \big(\sum{\alpha_i y_i \vec{x}_i}\big) - \sum{ \alpha_i [y_i ( \vec{x}_i \cdot \sum{\alpha_i y_i \vec{x}_i} + b) -1]} \end{equation} \begin{equation} L = \frac{1}{2} \big( \sum{\alpha_i y_i \vec{x}_i}\big) \big(\sum{\alpha_i y_i \vec{x}_i}\big) - \big( \sum{ \alpha_i y_i \vec{x}_i} \big) \cdot \big(\sum{ \alpha_i y_i \vec{x}_i} \big) - \sum{ \alpha_i y_i b} + \sum{ \alpha_i} \end{equation} \begin{equation} L = \sum{ \alpha_i} - \frac{1}{2} \sum_i \sum_j \alpha_i \alpha_j y_i y_j \vec{x}_i \cdot \vec{x}_j \end{equation} \begin{equation} \sum{\alpha_i y_i \vec{x}_i} \cdot \vec{u} + b \geq 0 \implies + \end{equation} So we conclude that the minimization is basically the dot product ## Initialization ```python import pandas as pd import matplotlib.pyplot as plt import bqplot.pyplot as bplt import numpy as np ``` ```python class Support_Vector_Machine(): def __init__(self, visualization=True): self.visualization = visualization self.colors = {1: 'r', -1: 'b'} if self.visualization: self.fig = plt.figure() self.ax = self.fig.add_subplot(1, 1, 1) def fit(self, data): self.data = data # { ||w|| : [w, b]} opt_dict = {} transforms = [ [1, 1], [-1, 1], [-1, -1], [1, -1], ] all_data = np.array(list(self.data.values())).flatten() self.max_feature_value = max(all_data) self.min_feature_value = min(all_data) all_data = None step_sizes = [self.max_feature_value * 0.1, self.max_feature_value * 0.01, self.max_feature_value * 0.001, ] b_range_multiple = 5 b_multiple = 5 latest_optimum = self.max_feature_value * 10 limit = self.max_feature_value * b_range_multiple for step in step_sizes: w = np.array([latest_optimum, latest_optimum]) while True: for b in np.arange(-limit, limit, step * b_multiple): for transformation in transforms: w_t = w * transformation found_option = True # weakest link # y_i(x_i.w +b) >= 1 for i in self.data: for xi in self.data[i]: yi = i if not yi * (np.dot(w_t, xi) + b) >= 1: found_option = False break else: continue break if found_option: opt_dict[np.linalg.norm(w_t)] = [w_t, b] if w[0] < 0: break w = w - step norms = sorted(n for n in opt_dict) self.w, self.b = opt_dict[norms[0]] latest_optimum = self.w[0] - step * 2 for i in self.data: for xi in self.data[i]: print(f'{xi} : {yi * (np.dot(self.w, xi) + self.b)}') print(f'b: {self.b} | w: {self.w}') print(f'||w|| : {np.linalg.norm(self.w)}') if np.dot(self.w, np.array([1,0])) == 0: print("angle:",90) else: print(f'angle: {np.degrees(np.arctan(self.w[1]/self.w[0]))}') def predict(self, features): classification = np.sign(np.dot(np.array(features), self.w) + self.b) if classification != 0 and self.visualization: self.ax.scatter(features[0], features[1], marker='*', s=200, c=self.colors[classification]) return classification def visualize(self): [[self.ax.scatter(x[0], x[1], s=100, color=self.colors[i]) for x in data_dict[i]] for i in data_dict] xs, ys = zip(*[j for i in data_dict.values() for j in i]) x_max, y_max = max(xs), max(ys) x_min, y_min = min(xs), min(ys) x_margin = x_max*0.2 y_margin = y_max*0.2 xlist = np.linspace(x_min-x_margin, x_max+x_margin, 100) ylist = np.linspace(y_min-y_margin, y_max+y_margin, 100) X,Y = np.meshgrid(xlist, ylist) F = self.w[0]*X + self.w[1]*Y + self.b # Hyperplane Equation plt.contour(X, Y, F, [1], colors = 'r', linestyles = 'solid') plt.contour(X, Y, F, [-1], colors = 'b', linestyles = 'solid') plt.contour(X, Y, F, [0], colors = 'k', linestyles = 'dashed') plt.show() ``` ```python all(np.arange(-100, 100, 1) + 5 > 50) ``` False ```python data_dict = { -1:[ [1,7], [2,8], [3,8], ], 1:[ [5, 1], [6, -1], [7, 3], ] } ``` ```python %timeit np.array([j for i in [[key] * len(values) for key, values in data_dict.items()] for j in i]) ``` 4.65 µs ± 156 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```python %timeit np.array([[key] * len(values) for key, values in data_dict.items()]).flatten() ``` 6.57 µs ± 70.8 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```python %%timeit yi = [] for key, lenght in zip(data_dict.keys(), map(len, data_dict.values())): yi.append([key] * lenght) np.array(yi).flatten() ``` 7.36 µs ± 176 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```python yi = np.array([j for i in [[key] * len(values) for key, values in data_dict.items()] for j in i]) yi = np.array([[key] * len(values) for key, values in data_dict.items()]).flatten() yi ``` array([-1, -1, 1, 1]) ```python %timeit np.array([j for i in [values for key, values in data_dict.items()] for j in i]) ``` 5.81 µs ± 222 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```python %timeit np.array([j for i in [values for values in data_dict.values()] for j in i]) ``` 5.35 µs ± 42.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```python %timeit [i for values in data_dict.values() for i in values] ``` 875 ns ± 35 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) ```python xi = np.array([j for i in [values for key, values in data_dict.items()] for j in i]) xi = [i for values in data_dict.values() for i in values] xi ``` [[0, 0], [1, 0], [0.8660254, 0.5], [2, 2]] ```python np.array([k for i in list(data_dict.values()) for j in i for k in j]) ``` array([ 0. , 0. , 1. , 1. , 0.8660254, 0.5 ]) ```python [3] * 3 ``` [3, 3, 3] ```python [j for i in [values for key, values in data_dict.items()] for j in i] ``` [array([0, 0]), array([1, 1]), array([ 0.8660254, 0.5 ])] ```python a = np.array([1,1]) b = np.array([ [1,1], [0,1], [0,0], [1,1], [1,1], ]) np.dot(b, a) > 1 ``` array([ True, False, False, True, True], dtype=bool) ```python svm = Support_Vector_Machine() svm.fit(data_dict) predict_us = [ [0, 4], [1, 3], [3, 4], [3, 5], [5, 5], [5, 6], [6, -2], [5, 8], ] for p in predict_us: svm.predict(p) svm.visualize() ``` ```python svm = Support_Vector_Machine() svm.fit(data_dict) svm.visualize() ``` ```python svm = Support_Vector_Machine() svm.fit(data_dict) svm.visualize() ``` ```python v = np.array([1, 0]) angle = 60 angle_r = np.radians(angle) m = np.array([ [np.cos(angle_r), -np.sin(angle_r)], [np.sin(angle_r), np.cos(angle_r)] ]) nv = np.matmul(m, v.T) nv[np.abs(nv) < 1e-3] = 0 nv ``` array([ 0.5 , 0.8660254]) ```python v = np.array([1, 1]) m = np.array([0.8660254, 0.5]) d = m * v d, np.degrees(np.arctan(d[1]/d[0])) ``` (array([ 0.8660254, 0.5 ]), 30.000000108416181) ```python import numpy as np def func(X): w = X[0] b = X[1] L = X[2] # this is the multiplier. lambda is a reserved keyword in python return 1/np.linalg.norm(w) + L * () ``` ```python def dfunc(X): dLambda = np.zeros(len(X)) h = 1e-3 # this is the step size used in the finite difference. for i in range(len(X)): dX = np.zeros(len(X)) dX[i] = h dLambda[i] = (func(X+dX)-func(X-dX))/(2*h); return dLambda ``` ```python from scipy.optimize import fsolve X1 = fsolve(dfunc, [1, 1, 0]) print(X1, func(X1)) # this is the min X2 = fsolve(dfunc, [-1, -1, 0]) print(X2, func(X2)) ``` [ 0.70710678 0.70710678 -0.70710678] 1.41421356237 [-0.70710678 -0.70710678 0.70710678] -1.41421356237
e9d1165a43c48e802bb223d3140fd06d0dededc4
69,273
ipynb
Jupyter Notebook
notebooks/Support Vector Machine.ipynb
ELC/ML-Tutorial
05d4d4e424976b245fa6bf05b60dfc90109e3782
[ "MIT" ]
null
null
null
notebooks/Support Vector Machine.ipynb
ELC/ML-Tutorial
05d4d4e424976b245fa6bf05b60dfc90109e3782
[ "MIT" ]
null
null
null
notebooks/Support Vector Machine.ipynb
ELC/ML-Tutorial
05d4d4e424976b245fa6bf05b60dfc90109e3782
[ "MIT" ]
null
null
null
76.040615
19,544
0.784793
true
3,453
Qwen/Qwen-72B
1. YES 2. YES
0.899121
0.79053
0.710783
__label__eng_Latn
0.395508
0.489718
```python from sympy import * init_printing(use_latex='mathjax') Re,r,G,rho,eta,v_x,tau_xx,L_x,lam,tau,k,x = symbols('Re r G rho eta v_x tau_xx L_x lambda tau k x', positive=True) v0,p0,tau_xx0 = symbols('v0 p0 tau_xx0') ``` ```python K = r*G # bulk modulus from modulus ratio r rho = solve(Re - rho*sqrt((K+2*G)/rho)*L_x/eta,rho)[0] # density from Reynolds number Re V_p = sqrt((K+2*G)/rho) # velocity scale - primary wave velocity ``` ```python fun = v0*exp(-lam*tau*V_p/L_x)*sin(pi*k*x/L_x) v = fun p = fun tau_xx = fun ``` ```python eq1 = 1/K*diff(p,tau) + diff(v,x) # mass balance eq2 = rho*diff(v,tau) + diff(p-tau_xx,x) # momentum balance eq3 = 1/G*diff(tau_xx,tau) + 1/eta*tau_xx - 2*diff(v,x) # Maxwell rheological model ``` ```python disp = diff(diff(eq2,tau)/G + diff(eq3,x) + eq2/eta - diff(eq1,x)*K/G,tau)*eta/rho - diff(eq1,x)*K/rho disp = expand(disp/fun) disp = expand(simplify(disp/disp.coeff(lam**3))) ``` ```python sol = solve(disp,lam) disc = discriminant(disp,lam) r_opt = solve(discriminant(disc,Re),r)[0] Re_opt = solve(disc.subs(r,r_opt),Re)[0].subs(k,1) r_opt,Re_opt ``` $\displaystyle \left( \frac{1}{4}, \ \frac{9 \sqrt{3} \pi}{4}\right)$ ```python from sympy import maximum, lambdify import numpy as np import warnings warnings.filterwarnings('ignore') lam = [re(s.subs(k,1)) for s in sol] lamf = lambdify([Re,r],lam,"numpy") Re1 = np.linspace(float(Re_opt)/2,float(Re_opt)*3/2,1000) r1 = np.linspace(0,2*float(r_opt),1000) [Re2,r2] = np.meshgrid(Re1,r1) lam2 = np.stack(lamf(Re2,r2),axis=0) lam2 = np.nanmin(lam2,axis=0) ``` ```python %matplotlib notebook from matplotlib import pyplot as plt from matplotlib import cm plt.rcParams.update({"text.usetex": True, "font.size": 14}) f = plt.figure(figsize=(7,5)) ax = plt.subplot() pos = ax.contourf(Re2,r2,lam2,17,cmap=cm.cool) ax.contour(Re2,r2,lam2,17,linewidths=1,colors='k') ax.axhline(y=r_opt.evalf(),ls='--',c='w') ax.axvline(x=Re_opt.evalf(),ls='--',c='w') ax.set_xlabel("$Re$"); ax.set_ylabel("$r$") ax.set_xticks([float(Re_opt)/2,float(Re_opt),float(Re_opt)*3/2]) ax.set_yticks([0,float(r_opt),2*float(r_opt)]) ax.set_xticklabels(["$9\sqrt{3}\pi/8$","$9\sqrt{3}\pi/4$","$9\sqrt{3}\pi/2$"]) ax.set_yticklabels(["$0$","$1/4$","$1/2$"]) ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box') cbar = f.colorbar(pos,ax=ax) cbar.set_label("$\mathrm{min}\{\Re(\lambda_k)\}$") ``` <IPython.core.display.Javascript object> ```python ```
0b1bf158e3bd3456f8b44f96bbe93785f93f262b
588,692
ipynb
Jupyter Notebook
dispersion_analysis/dispersion_analysis_stokes1D.ipynb
PTsolvers/PseudoTransientStokes.jl
894f32b5110bcb0c878782465fd6b2c9b0cbbcff
[ "MIT" ]
1
2021-12-06T19:24:50.000Z
2021-12-06T19:24:50.000Z
dispersion_analysis/dispersion_analysis_stokes1D.ipynb
PTsolvers/PseudoTransientStokes.jl
894f32b5110bcb0c878782465fd6b2c9b0cbbcff
[ "MIT" ]
null
null
null
dispersion_analysis/dispersion_analysis_stokes1D.ipynb
PTsolvers/PseudoTransientStokes.jl
894f32b5110bcb0c878782465fd6b2c9b0cbbcff
[ "MIT" ]
null
null
null
517.304042
539,051
0.927429
true
935
Qwen/Qwen-72B
1. YES 2. YES
0.899121
0.715424
0.643253
__label__eng_Latn
0.131718
0.332823
# Taylor integration of the Kepler problem Here, we try to reproduce __exactly__ the [Kepler problem integration example](http://nbviewer.jupyter.org/github/JuliaDiff/TaylorSeries.jl/blob/master/examples/1-KeplerProblem.ipynb) made by Luis Benet in [JuliaDiff/TaylorSeries.jl](https://github.com/JuliaDiff/TaylorSeries.jl). The Kepler problem is the basis of planetary motion; it describes the motion of a secondary body (e.g., a planet, asteroid, comet, etc.), around a primary body (e.g., the Sun). In cartesian coordinates over the orbital plane, the Hamiltonian for the Kepler problem reads: $$ \begin{align} H_{\mathrm{Kepler}} &= \frac{1}{2\mu}(p_x^2+p_y^2)-\frac{\mu}{\sqrt{x^2+y^2}} \end{align} $$ where $\mu=G(m_1+m_2)$, $G$ is the gravitational constant, $m_1$ is the mass of the primary body and $m_2$ is the mass of the secondary body. If we write $\vec r_1 = (x_1,y_1)$ and $\vec r_2 = (x_2,y_2)$, respectively, for the position of the primary and secondary body, then the vector $\vec r = \vec r_2-\vec r_1$, with coordinates $\vec r = (x, y)=(x_2-x_1,y_2-y_1)$, represents the position of the secondary body relative to the primary body; i.e., $\vec r$ represents the so-called relative coordinates. In terms of the vector $\vec{r}$, the position $\vec{r}=0$ corresponds to the position of the primary body. Using Hamilton equations, we can obtain the equations of motion for the Kepler problem: $$ \begin{align} \dot x &= u \\ \dot y &= v \\ \dot u &= -\frac{\mu x}{(x^2+y^2)^{3/2}}\\ \dot v &= -\frac{\mu y}{(x^2+y^2)^{3/2}} \end{align} $$ Note that the canonical momenta are $p_x$ and $p_y$, while $u$ and $v$ are, respectively, the $x$ and $y$ components of the velocity; i.e., $p_x = \mu u$ and $p_y = \mu v$. First of all, we shall include all relevant packages: ```julia using TaylorIntegration, Plots, LaTeXStrings pyplot() ``` Fontconfig warning: ignoring UTF-8: not a valid region tag Plots.PyPlotBackend() Some parameters necessary for the integration: + $\mu$: the gravitational parameter + `q0`: the initial condition (we will select an initial condition which corresponds to elliptical motion) + `order`: the order of the Taylor expansion + `t_max`: the final time of the integration + `abs_tol`: the absolute tolerance + `n_iter`: the number of time-steps ```julia const μ = 1.0 const q0 = [0.19999999999999996, 0.0, 0.0, 3.0] # a initial condition for elliptical motion const order = 28 const t0 = 0.0 const t_max = 10000*(2π) # we are just taking a wild guess about the period ;) const abs_tol = 1.0E-20 const steps = 500000 ``` 500000 As usual, we write down the equations of motion into a `function`, which here we will name `kepler!`. `q` represents the system state; i.e., the set of values of the dynamical variables at a given instant; `dq` represents the time derivatives of the components of `q`. ```julia #an auxiliary array which helps with optimization: const r_p3d2 = Array{TaylorSeries.Taylor1{Float64}}(1) #the equations of motion for the Kepler problem: function kepler!(t, q, dq) r_p3d2[1] = (q[1]^2+q[2]^2)^(3/2) dq[1] = q[3] dq[2] = q[4] dq[3] = -μ*q[1]/r_p3d2[1] dq[4] = -μ*q[2]/r_p3d2[1] nothing end ``` kepler! (generic function with 1 method) The Taylor integration: ```julia t, q = taylorinteg(kepler!, q0, t0, 0.01, order, abs_tol, maxsteps=2); #warm-up lap @time t, q = taylorinteg(kepler!, q0, t0, t_max, order, abs_tol, maxsteps=steps); ``` 43.060857 seconds (464.94 M allocations: 39.523 GB, 12.87% gc time) The final state: ```julia t[end]/(2pi), q[end,:] ``` (10000.0,[0.2,2.72854e-9,-2.27378e-8,3.0]) Let's extract the values of $x$, $y$, $u$ and $v$ for each time-step: ```julia x, y, u, v = view(q,:,1), view(q,:,2), view(q,:,3), view(q,:,4); ``` ## Orbital motion The initial conditions we selected correspond to a elliptical orbit with a relatively high eccentricity: $e=0.8$ for the initial condition `q0=[0.19999999999999996, 0.0, 0.0, 3.0]`. How does the motion of the planet/asteroid/comet looks like? Well, let's plot its orbit over the $x-y$ plane (the orbit is shown in blue; the position of the primary body is shown as a yellow dot): ```julia scatter( [0.0], [0.0], label = L"\mathrm{Center\; of\; attraction}", ms=10 ) scatter!( x[1:75:end], y[1:75:end], title = L"\mathrm{Orbital\;\;motion}", xaxis = (L"x", (-2.0, 0.5), -2.0:0.5:2.0), yaxis = (L"y", (-0.8, 0.8), -2.0:0.5:2.0), label = L"\mathrm{Keplerian\; ellipse}", ms=0.5 ) ``` ## Energy conservation Below, we write the energy function of the Kepler problem; i.e., the Hamiltonian $H_{\mathrm{Kepler}}$ in terms of $x$, $y$, $u$ and $v$: ```julia H(x_, y_, u_, v_) = 0.5*(u_^2+v_^2)-μ/sqrt(x_^2+y_^2) ``` H (generic function with 1 method) Now, using the function `H` defined above, we calculate the energy during each time-step: ```julia E = H.(x, y, u, v); ``` $E_0=H_{\mathrm{Kepler}}(x_0,y_0,u_0,v_0)$, the initial value of the energy, is: ```julia E0=E[1] ``` -0.5000000000000009 We define $\delta E$ as the relative error in the energy; i.e.: $$ \begin{align} \delta E(t) &= \frac{E(t)-E_0}{E_0} \end{align} $$ ```julia δE = (E-E0)/(E0) ``` 436153-element Array{Float64,1}: -0.0 1.77636e-15 1.77636e-15 8.88178e-16 8.88178e-16 2.66454e-15 3.55271e-15 2.66454e-15 3.55271e-15 2.66454e-15 3.9968e-15 2.66454e-15 3.10862e-15 ⋮ -7.12763e-14 -7.19425e-14 -7.14984e-14 -7.10543e-14 -7.19425e-14 -7.10543e-14 -7.10543e-14 -6.92779e-14 -7.01661e-14 -6.92779e-14 -6.92779e-14 -7.10543e-14 The analytical solution preserves the energy; i.e., $\delta E(t)=0$ for the analytical solution. Thus, we expect our solution to be close to zero. So, even if the $\delta E$ is not perfectly zero during the whole integration, it is comparable to Julia's `Float64` machine-epsilon. Below, we plot $\delta E$ in units of Julia's `Float64` machine-epsilon as a function of time, $t$. In Julia, the machine-epsilon for the `Float64` type has a value ```julia eps(Float64) ``` 2.220446049250313e-16 ```julia plot( t/(2π), δE/eps(Float64), title = L"\mathrm{Energy\;\; relative\;\; error\;\; vs\;\; time}", xaxis = (L"t\mathrm{\,(orbital\;\;periods)}"), yaxis = (L"\delta E \mathrm{\;\;(machine\;\;epsilons)}"), label = L"\mathrm{Energy\;\; relative\;\; error\;\; vs\;\; time}" ) ``` Now, how does the energy error distribute around zero? ```julia histogram( δE/eps(Float64), title = L"\mathrm{Distribution\;\;of\;\;energy\;\;relative\;\;error}", xaxis = (L"\delta E"), yaxis = (L"\mathrm{Number\;\;of\;\;}\delta E\mathrm{\;\;values\;\;within\;\;bin\;\;range}"), leg = false, nbins=20 ) ``` INFO: binning = 20  The energy error behaves roughly as a random walk, which means that the numerical error in the energy is dominated by rounding errors. ## Angular momentum conservation Analogously to the energy, we now focus on the angular momentum, which is preserved by the analytical solution too. The value of the angular momentum is given by $$ \begin{align} L &= xv-yu \end{align} $$ We write the angular momentum function as: ```julia ang_mom(x_, y_, u_, v_) = x_.*v_-y.*u_ ``` ang_mom (generic function with 1 method) So the angular momentum during each time-step is: ```julia L = ang_mom(x, y, u, v); ``` $L_0=x_0v_0-y_0u_0$, the initial value of the angular momentum, is: ```julia L0 = L[1] ``` 0.5999999999999999 We define $\delta L$ as the relative error in the angular momentum; i.e.: $$ \begin{align} \delta L(t) &= \frac{L(t)-L_0}{L_0} \end{align} $$ ```julia δL = (L-L0)/L0; ``` Just as we did for the energy, we now plot $\delta L$ in units of `eps(Float64)` vs $t$: ```julia plot( t/(2π), δL/eps(Float64), title = L"\mathrm{Angular\;\;momentum\;\; relative\;\; error\;\; vs\;\; time}", xaxis = (L"t\mathrm{\,(orbital\;\;periods)}"), yaxis = (L"\delta L \mathrm{\;\;(machine\;\;epsilons)}"), label = L"\mathrm{Angular\;\;momentum\;\; relative\;\; error\;\; vs\;\; time}", leg=false, color=:orange ) ``` Again, we see that the angular momentum relative error is comparable to `eps(Float64)`; the maximum variation is ~$150$ machine epsilons. How does the distribution of this error look like? ```julia histogram( δL/eps(Float64), title = L"\mathrm{Distribution\;\;of\;\;angular\;\;momentum\;\;relative\;\;error}", xaxis = (L"\delta L"), yaxis = (L"\mathrm{Number\;\;of\;\;}\delta L\mathrm{\;\;values\;\;within\;\;bin\;\;range}"), leg = false, nbins=20, color=:orange ) ``` INFO: binning = 20  The distribution of $\delta L$ shows a peak near zero and roughly symmetrical around this value. This means that the error in the angular momentum is dominated also by rounding errors of floating-point arithmetic. Lastly, we reproduce __exactly__ the last plot shown in the original version of this example, authored by Luis Benet and included in `JuliaDiff/TaylorSeries.jl`'s [Kepler problem integration example](http://nbviewer.jupyter.org/github/JuliaDiff/TaylorSeries.jl/blob/master/examples/1-KeplerProblem.ipynb) jupyter notebook. A $\delta E$, $\delta L$ plot vs $t$: ```julia plot( [t/(2π), t/(2π)], [δE/eps(Float64), δL/eps(Float64)], title = L"\delta E\mathrm{\;\;(blue),\;\;}\delta L\mathrm{\;\;(green)\;\;vs\;\;time}", xaxis = (L"t\mathrm{\,(orbital\;\;periods)}"), yaxis = (L"\delta E\mathrm{,\;\;}\delta L\;\;\mathrm{(machine\;\;epsilons)}"), leg=false, ) ``` ```julia ```
dff4432ce359d1fd69fc00eecf9675eecd7c0378
295,947
ipynb
Jupyter Notebook
examples/Kepler-problem.ipynb
SebastianM-C/TaylorIntegration.jl
f3575ee1caba43e21312062d960613ec2ccba325
[ "MIT" ]
72
2016-09-22T22:32:12.000Z
2022-03-23T13:35:18.000Z
examples/Kepler-problem.ipynb
SebastianM-C/TaylorIntegration.jl
f3575ee1caba43e21312062d960613ec2ccba325
[ "MIT" ]
132
2016-09-21T05:43:08.000Z
2022-03-15T02:55:17.000Z
examples/Kepler-problem.ipynb
SebastianM-C/TaylorIntegration.jl
f3575ee1caba43e21312062d960613ec2ccba325
[ "MIT" ]
20
2016-09-24T04:37:11.000Z
2022-03-25T13:48:07.000Z
382.85511
68,967
0.936323
true
3,290
Qwen/Qwen-72B
1. YES 2. YES
0.92079
0.812867
0.74848
__label__eng_Latn
0.883585
0.577301
```python import numpy as np import matplotlib.pyplot as plt from scipy.stats import beta, gamma, norm, binom, uniform, t import numdifftools as nd from sklearn.linear_model import LinearRegression as linreg import sympy as sym from scipy.optimize import brentq, minimize from datetime import datetime as dt ``` * Please email me (ianzhang@connect.hku.hk) if you find any mistakes in the script. # Bayesian Inference $\pi(\theta|\mathbf{x})\propto L(\theta|\mathbf{x})\pi(\theta)=\prod_{i=1}^nf(x_i|\theta)\pi(\theta)$, what will happen if $n$ is very large? ## Normal example Assume model $f(x|\mu)=N(x|\mu, \tau)$ and prior $\pi(\mu)=N(\mu|\mu_0, \tau_0)$, where $\tau$ and $\tau_0$ are the precisions (reciprocal of the variance). With $n$ samples $\mathbf{x}=(x_1,\dots,x_n)$ from the model, we have the posterior: $$ \pi(\mu|\mathbf{x})=N(\mu|\frac{n\tau\overline{x}+\tau_0\mu_0}{n\tau+\tau_0}, n\tau+\tau_0) $$ A prior is just like the regularization in Machine Learning. It serves as bias–variance tradeoff method and always results in more bias but less variance. ## Beta prior and Binomial model (Conjugate prior example) Assume model $f(y|p)=Binomial(y|n,p)$ and prior $\pi(p)=Beta(p|\alpha, \beta)$. With $1$ sample $y$ from the Binomial model (or equivalently $n$ samples $\mathbf{x}=(x_1,\dots,x_n)$ ($y=\sum_{i=1}^nx_i)$ from a Bernoulli model $f(x|p)=Bernoulli(x|p)$), we have the posterior: $$ \pi(p|y)=\pi(p|\mathbf{x})=Beta(p|\alpha+y, \beta+n-y) $$ ```python np.random.seed(19971107) a, b = 0.5, 0.5 p = 0.7 fig, axs = plt.subplots(2, 2, figsize = (15, 10)) axs = axs.flatten() xx = np.linspace(0, 1, 1000) axs[0].plot(p, 0, 'x', label='true p') axs[0].plot(xx, beta.pdf(x=xx, a=a, b=b), label='prior') for i, n in enumerate([10, 50, 1000]): y = binom.rvs(n=n, p=p) posterior = beta(a=a+y, b=b+n-y) axs[0].plot(xx, posterior.pdf(xx), label='posterior (n={})'.format(n)) loc = posterior.mean() scale = posterior.std() xx_zoomin = np.linspace(loc - 4*scale, loc + 4*scale, 1000) axs[i+1].plot(xx_zoomin, posterior.pdf(xx_zoomin), label='posterior (n={})'.format(n)) axs[i+1].plot(xx_zoomin, norm.pdf(xx_zoomin, loc=loc, scale=scale), label='normal'.format(n)) for ax in axs: ax.legend() plt.show() ``` Under certain regularity conditions, the posterior distribution approaches a Gaussian as the number of samples grows (refer to this book http://www.stat.columbia.edu/~gelman/book/). This justifies the application of Laplace's method. **Think further:** Can you construct a simple example where this Gaussian phenomenon wouldn't appear? Is there any other Gaussian phenomenon appear in your example? ## Laplace approximation Assume the data set is big enough to make the posterior $\pi(\theta|\textbf{x})$ looks like a normal. Set $\theta^*$ to be the posterior mode, which means $\frac{\partial}{\partial\theta}\pi(\theta^*|\textbf{x})=0$. We know that there exists a $\sigma^2$ so that $$ \pi(\theta|\textbf{x})\approx\frac{\pi(\theta^*|\textbf{x})}{N(\theta^*|\theta^*, \sigma^2)}N(\theta|\theta^*, \sigma^2) $$ To derive the $\sigma^2$, we take a $\log$ on both sides and have the Taylar's expansion (https://en.wikipedia.org/wiki/Taylor_series) of the left side of the above equation at $\theta^*$: $$ \log\pi(\theta^*|\textbf{x})+\frac{\partial}{\partial\theta}\log\pi(\theta^*|\textbf{x})(\theta-\theta^*)+\frac{1}{2}\frac{\partial^2}{\partial\theta^2}\log\pi(\theta^*|\textbf{x})(\theta-\theta^*)^2\approx\log\pi(\theta|\textbf{x})\approx\log\pi(\theta^*|\textbf{x})-\frac{1}{2\sigma^2}(\theta-\theta^*)^2 $$ Note that $\frac{\partial}{\partial\theta}\log\pi(\theta^*|\textbf{x})=0$, so if we take $\sigma^2=(-\frac{\partial^2}{\partial\theta^2}\log\pi(\theta^*|\textbf{x}))^{-1}$, all kinds of inference based on $\pi(\theta|\textbf{x})$ can be approximated based on the normal distribution (multiplied by a constant). For example: $$ \mathbb{E}_{\pi(\theta|\mathbf{x})}[h(\Theta)]\approx\frac{\pi(\theta^*|\textbf{x})}{N(\theta^*|\theta^*, \sigma^2)}\mathbb{E}_{N(\theta|\theta^*, \sigma^2)}[h(\Theta)] $$ for a function $h(\theta)$. Another interesting thing is that, as, on the above reasoning, we didn't use the condition that the posterior was normalized. If $\pi(\theta|\textbf{x})$ is unnormalized, we can actually set $h(\theta)=1$ to approximate the normalizing constant $$ Z\approx\hat{Z}=\frac{\pi(\theta^*|\textbf{x})}{N(\theta^*|\theta^*, \sigma^2)} $$ * Try using the Laplace approximation to estimate the normalizing constant of the beta posterior $Beta(p|\alpha+y, \beta+n-y)$, which is actually already normalized. Draw the absolute error $|\hat{Z}-1|$ against $n$ based on the loglog plot (https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.loglog). What is the convergence rate? (hints: you can use the 'numdifftools' package (https://pypi.org/project/numdifftools/) and 'LinearRegression' from 'sklearn.linear_model' (https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression); if $err = cn^{-r}$, $\log(err)=\log(c)-r\log(n)$) ```python np.random.seed(19971107) ns = (10 ** np.linspace(1, 5, 50)).astype(np.int64) err = np.zeros(ns.size) for i, n in enumerate(ns): y = binom.rvs(n=n, p=p) posterior = beta(a=a+y, b=b+n-y).pdf p_star = (a+y-1) / (a+b+n-2) neglogposterior = lambda p: -np.log(posterior(p)) sigma2 = 1 / nd.Derivative(neglogposterior, n=2, step=1e-5)(p_star) normal = norm(loc = p_star, scale = np.sqrt(sigma2)).pdf Z_hat = posterior(p_star) / normal(p_star) err[i] = np.abs(Z_hat - 1) fit = linreg().fit(np.log(ns).reshape([-1, 1]), np.log(err)) c, r = np.exp(fit.intercept_), -fit.coef_[0] print('c = {:.4f}, r = {:.4f}'.format(c, r)) fig, ax = plt.subplots(figsize = (9, 5)) ax.loglog(ns, err) ax.loglog(ns, np.exp(fit.predict(np.log(ns).reshape([-1, 1])))) ax.set_xlabel('$\log(n)$') ax.set_ylabel('$\log(|\hat{Z}-1|)$') plt.show() ``` **Think further:** Is this convergence rate true for other estimations? Try some examples. # Monte Carlo ## (Naive) Monte Carlo Integration ```python x = sym.Symbol('x') h = (x+sym.sin(20*x)+sym.cos(1000*x))**2 h_int = sym.integrate(h) print('h(x) =') display(h) print('Integral of h(x) =') display(h_int) h = sym.lambdify(x, h) h_int = sym.lambdify(x, h_int) mu = h_int(1) - h_int(0) print('Integrate h(x) from 0 to 1: {}'.format(mu)) xx = np.linspace(0, 1, 1000) fig, ax = plt.subplots(figsize = (9, 5)) ax.plot(xx, h(xx), label='$h(x)$') ax.legend() plt.show() ``` For the highly irregular integrand $h(x)$, Monte Carlo (MC) will be your only choice to do numerical integration. Based on the LLT (Law of Large Numbers): $$ \mu=\int_a^bh(x)\mathrm{d}x=(b-a)\mathbb{E}_{U(a,b)}[h(X)]\approx\hat{\mu}=\frac{(b-a)}{n}\sum_{i=1}^nh(x_i), x_i\sim U(a,b) $$ Based on the CLT (Central Limit Theorem): $$ \sqrt{n}\frac{(\hat{\mu}-\mu)}{\sigma}\to N(0, 1), \sigma^2=(b-a)^2\mathrm{Var}_{U(a,b)}[h(X)] $$ which gives the $95\%$ confidence interval (C.I.): $$ [\hat{\mu}-\frac{1.96\hat{\sigma}}{\sqrt{n}}, \hat{\mu}+\frac{1.96\hat{\sigma}}{\sqrt{n}}], \hat{\sigma}^2=(b-a)^2\hat{\mathrm{Var}}[h(x_i)] $$ where $\hat{\mathrm{Var}}[h(x_i)]$ means the sample variance. Always construct the C.I. when you do estimation as it contains more information. The popularity of the confidence interval also comes from a theorem that, under some conditions, it converges with a speed of $n^{-1}$, while the point estimate normally only converges at the rate of $n^{-0.5}$ (refer to this book https://statweb.stanford.edu/~owen/mc/). * Estimate the integration of $h(x)$ from 0 to 1. Draw the point estimates and the corresponding confidence intervals against differents sample sizes $n$ in a single plot, where you should use the true value $\mu$ as a reference line. Draw the errors of the point estimates against $n$ in a loglog plot and check its convergence rate. ```python np.random.seed(19971107) samples = uniform.rvs(size=10000000) hx = h(samples) ns = np.arange(2, hx.size+1) cummean_x = np.cumsum(hx)[1:] / ns cummean_x2 = np.cumsum(hx**2)[1:] / ns cumstd = np.sqrt((cummean_x2 - cummean_x**2) / (ns - 1)) upper = cummean_x + 1.96 * cumstd lower = cummean_x - 1.96 * cumstd fig, axs = plt.subplots(1, 2, figsize = (15, 5)) axs[0].plot([ns[0], ns[3000]], [mu, mu], label='true $\mu$') axs[0].plot(ns[:3000], cummean_x[:3000], label='$\hat{mu}$') axs[0].plot(ns[:3000], upper[:3000], label='C.I. upper bound') axs[0].plot(ns[:3000], lower[:3000], label='C.I. lower bound') axs[0].legend() err = np.abs(cummean_x - mu) fit = linreg().fit(np.log(ns).reshape([-1, 1]), np.log(err), sample_weight=1/ns) c, r = np.exp(fit.intercept_), -fit.coef_[0] print('c = {:.4f}, r = {:.4f}'.format(c, r)) axs[1].loglog(ns, err) axs[1].loglog(ns, np.exp(fit.predict(np.log(ns).reshape([-1, 1])))) axs[1].set_xlabel('$\log(n)$') axs[1].set_ylabel('$\log(|\hat{\mu}-\mu|)$') plt.show() ``` **Think further:** Here I assigned weights for the linear regression, do you know why? Try what will happen without the weights. How to do the linear regression to estimate the convergence rate properly without the weights? ## Sampling - the core of MC Sampling is process to transform an distribution (or an integrable nonnegative function) $\pi(x)$ to a series of points $\{x_1,\dots,x_n\}$, which make it easier to estimate all kinds of the global properties of the distribution, like the mean, the variance, the covariance, the probability in an area, the quantile, the number of modes (local maximum points), the rough positions of all modes. In a word, the samples can represent the distribution $\pi(x)$: $$ \hat{\Pi}(x)=\frac{1}{n}\sum_{i=1}^n\mathbf{1}(x_i\le x)\to\mathbb{E}_\pi[\mathbf{1}(X\le x)]=\Pi(x) $$ There are three amazing points about the MC: 1. It is universal. Besides the above global properties, MC can be used as an alternative for optimization in the Machine Learning with a simple but powerful techique called the Annealing, while optimization can't replace MC. 2. It is robust. MC estimation generally has a convergence rate of $n^{-0.5}$ which wouldn't be influenced by the roughness of related functions or the curse of dimensionality. On the other hand, if you develop an algorithm with convergence rate worse than $n^{-0.5}$, your algorithm is even worse than random! 3. It is modest. Although lack of accuracy, different the optimization methodologies, MC provides many tools to evaluate the reliability of an estimation, like the CLT and the Bootstrap. ### The Inversion Method A random variable $X$ with a reversible cdf $F(x)$ can be sampled by $x_i=F^{-1}(u_i), u_i\sim U(0, 1), i=1,\dots,n$. This is a fundamental important method as all the initial pseudo random numbers generated by the computer always follows $U(0, 1)$. * Try to generate standard normal random samples if you only have the cdf of the normal distribution. Compare the samples with the normal pdf. (hints: use the 'norm.cdf' and the 'brentq' function from 'scipy.optimize' (https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html))) ```python np.random.seed(19971107) unifs = uniform.rvs(size=3000) samples = [] for unif in unifs: fun = lambda x: norm.cdf(x) - unif samples.append(brentq(fun, -100, 100)) xx = np.linspace(-4, 4, 1000) fig, ax = plt.subplots(figsize = (9, 5)) ax.plot(xx, norm.pdf(xx), label='$N(0, 1)$') ax.hist(samples, bins=50, density=True, histtype='step', label='samples') ax.legend() plt.show() ``` ### Transformation Sampling based on the relationships between different variables. The Inversion Method can be viewed as a special case of the Transformation. For more Information, check this https://en.wikipedia.org/wiki/Relationships_among_probability_distributions. ### Sequential Simulation Sampling based on the dependency between variables. A typical example is the mixture model $p(x)=\sum_{j=1}^mw_jp_j(x), \sum_{j=1}^mw_j=1$. To draw $n$ samples from $p(x)$, we firstly simulate $(n_1,\dots,n_m)\sim Multinomial(n;w_1,\dots,w_m)$, and than, draw $n_j$ samples from $p_j(x)$. Combining the $m$ groups of samples together, we finally have the $n$ samples from the mixture model. ### Rejection Sampling Another fundamental important method to draw i.i.d. samples. It generally says that we can generate variable $Y$ from variable $X$ as long as the density of $X$ covers that of $Y$. Or, if we put it another way, variable $X$ can generate any variables whose densities can be covered by the $X$'s density. The spirit of Rejection Sampling actually coincide that of the axiom schema of specification in the set theory (https://en.wikipedia.org/wiki/Axiom_schema_of_specification). Essentially, it says that any definable subclass of a set is a set. People considered it the most important axiom of the set theory as it avoided the famous Russell's paradox (https://en.wikipedia.org/wiki/Russell%27s_paradox). **Think further:** Check the axioms in ZFC (a standard set theory system, https://encyclopediaofmath.org/index.php?title=ZFC) to see if there are any other related sampling schemes. The procedure of Rejection Sampling (given the target distribution $\pi(x)$ and an envelope distribution $q(x)$): 1. Draw initial samples $\{x_1,\dots,x_m\}$ from $q(x)$, and calculate the corresponding ratios or weights $\{w_1,...,w_m\},w_j=w(x_j)=\pi(x_j)/q(x_j)$; 2. Calculate $C=\sup w(x)$, and accept each sample $x_j$ with the probability $p_j=w_j/C$ to obtain the final samples $\{x_1^*,\dots,x_n^*\}$. There are two ways to do the sampling: 1. We can start the sampling with $m=1$, and increase $m$ until enough final samples are obtained (each sample only be accepted or rejected once). 2. Or because the acceptance rate is $1/C$, to obtain $n$ final samples, we can set $m=Cn$ and make a ‘refund for any overpayment or a supplemental payment for any deficiency’ (多退少补). * Draw samples from a standard norm distribution based on the t distribution with 1 degree of freedom. Which of the above two sampling methods is faster? (hints: use the 'minimize' function from 'scipy.optimize' (https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html); use the 'datetime' package for timing (https://stackoverflow.com/questions/766335/python-speed-testing-time-difference-milliseconds) ```python np.random.seed(19971107) weight = lambda x: norm.pdf(x) / t(df=1).pdf(x) C = weight(minimize(lambda x: -np.log(weight(x)), x0=10, tol=1e-10).x) n = 3000 start1 = dt.now() samples1 = [] while len(samples1) < n: new = t(df=1).rvs() if uniform.rvs() <= weight(new) / C: samples1.append(new) end1 = dt.now() time1 = (end1 - start1).seconds start2 = dt.now() m = np.ceil(C * n).astype(np.int64) samples2 = t(df=1).rvs(size=m) samples2 = samples2[uniform.rvs(size=m) <= weight(samples2) / C] while samples2.size < n: new = t(df=1).rvs() if uniform.rvs() <= weight(new) / C: samples2 = np.append(samples2, new) samples2 = samples2[:n] end2 = dt.now() time2 = (end2 - start2).seconds xx = np.linspace(-4, 4, 1000) fig, ax = plt.subplots(figsize = (9, 5)) ax.plot(xx, norm.pdf(xx), label='$N(0, 1)$') ax.hist(samples1, bins=50, density=True, histtype='step', label='samples 1') ax.hist(samples2, bins=50, density=True, histtype='step', label='samples 2') ax.legend() plt.show() print('time 1: {}s; time 2: {}s'.format(time1, time2)) ``` **Think further:** When $m$ is big, is it reasonable to replace $C$ in the step 2 by the $\max w_j$? What's the benefits if we do so? Will the resulting final samples still follow the target distribution? Justify your thoughts by mathematics or by simulations. ### Importance Sampling Remember that sampling is to generate samples to represent the target distribution $\pi(x)$. Different from all the above sampling schemes, the Importance Sampling (IS) generates weighted samples to represent $\pi(x)$. With the proposal distribution $q(x)$ where $x_i$ comes from, we have the non-ratio version weighted samples $\{(x_1,w_1),\dots,(x_n,w_n)\}, w_i=w(x_i)/n$ or the ratio version weighted samples $\{(x_1,w_1),\dots,(x_n,w_n)\}, w_i=w(x_i)/\sum_{i=1}^nw(x_i)$, where $w(x)=\pi(x)/q(x)$ is called the likelihood ratio function or the importance weight function. Here one pair $(x_i,w_i)$, instead of $x_i$, is one single sample. And the ratio version is useful when $\pi(x)$ or $q(x)$ is unnormalized. **Think further:** Note that the Rejection Sampling and the Importance Sampling both use the ratio function $w(x)$, so what's the relationship between them? Just like before, under some mild conditions, the weighted samples represent the target distribution in the sense that $$ \hat{\Pi}(x)=\sum_{i=1}^nw_i\mathbf{1}(x_i\le x)\to\mathbb{E}_q[\frac{\pi(X)}{q(X)}\mathbf{1}(X\le x)]=\mathbb{E}_\pi[\mathbf{1}(X\le x)]=\Pi(x) $$ And also for a function $h(x)$: $$ \mu=\mathbb{E}_\pi[h(X)]\approx\hat{\mu}=\sum_{i=1}^nw_ih(x_i) $$ holds for both the ratio and non-ratio weighted samples. For the estimator $\hat{\mu}$ with non-ratio weights, we have the asymptotic variance $$ n\mathrm{Var}_q[\hat{\mu}]\to\mathbb{E}_q[(w(X)h(X)-\mu)^2]=\int\frac{(\pi(x)h(x)-\mu q(x))^2}{q(x)}\mathrm{d}x=\int\frac{(\pi(x)h(x))^2}{q(x)}\mathrm{d}x-\mu^2 $$ Straightly following Jensen’s inequality (https://en.wikipedia.org/wiki/Jensen%27s_inequality), the last expression gives the variance minimizer $q(x)\propto\pi(x)|h(x)|$. As for the estimator with ratio weights, based on the Delta method (https://en.wikipedia.org/wiki/Delta_method), we can derive the asymptotic variance $$ n\mathrm{Var}_q[\hat{\mu}]\to\mathbb{E}_q[w(X)^2(h(X)-\mu)^2]=\int\frac{\pi(x)^2(h(x)-\mu)^2}{q(x)}\mathrm{d}x $$ indicating an optimal proposal $q(x)\propto\pi(x)|h(x)-\mu|$ different from the previous one. In the non-ratio version, IS can potentially give us the zero variance estimator if and only if $h(x)$ is always positive or negative on its support. In the ratio version, the asymptotic variance is zero if and only if $h(x)=c$ for a constant $c$, in which case, we don't even need to do the estimation. * Let's return to the Laplace approximation example. This time, try using the non-ratio IS to estimate the normalizing constant of the beta posterior $Beta(p|\alpha+y, \beta+n-y)$ ($n$ here is not the sample size of IS samples), which is actually already normalized. Use the Laplace approximation to build the IS proposal, and do the IS estimation based on 100 samples for different $n$. Still, draw the absolute error $|\hat{Z}-1|$ against $n$ based on the loglog plot. What is the convergence rate? ```python np.random.seed(19971107) ns = (10 ** np.linspace(1, 5, 50)).astype(np.int64) err = np.zeros(ns.size) samples0 = norm.rvs(size=100) for i, n in enumerate(ns): y = binom.rvs(n=n, p=p) posterior = beta(a=a+y, b=b+n-y).pdf p_star = (a+y-1) / (a+b+n-2) neglogposterior = lambda p: -np.log(posterior(p)) sigma2 = 1 / nd.Derivative(neglogposterior, n=2, step=1e-5)(p_star) proposal = norm(loc = p_star, scale = np.sqrt(sigma2)).pdf samples = np.sqrt(sigma2) * samples0 + p_star Z_hat = np.mean(posterior(samples) / proposal(samples)) err[i] = np.abs(Z_hat - 1) fit = linreg().fit(np.log(ns).reshape([-1, 1]), np.log(err)) c, r = np.exp(fit.intercept_), -fit.coef_[0] print('c = {:.4f}, r = {:.4f}'.format(c, r)) fig, ax = plt.subplots(figsize = (9, 5)) ax.loglog(ns, err) ax.loglog(ns, np.exp(fit.predict(np.log(ns).reshape([-1, 1])))) ax.set_xlabel('$\log(n)$') ax.set_ylabel('$\log(|\hat{Z}-1|)$') plt.show() ``` This time, I used a little technique called the Common Random Numbers (https://www0.gsb.columbia.edu/mygsb/faculty/research/pubfiles/4261/glasserman_yao_guidelines.pdf). I generated 'samples0' in advance and transformed it to 'samples' for every given IS normal proposal. This technique is especially useful when you want to compare a random value with respect to different parameters. Try what will happen if you don't use the Common Random Numbers. **Think further:** Compare the codes here with that of the Laplace approximation example. Is the Laplace approximation looks like Importance Sampling with only one sample? Why is the performance of IS so different from that of the Laplace approximation? ```python ``` ```python ```
9efb6f63abd57223cd781977ecfb1371ef6eb208
308,919
ipynb
Jupyter Notebook
STAT6011 Computational Statistics/Tut 2/Tut 2 (with answers).ipynb
IanFla/Teaching-Experience
ed4c414239d223f66bea4a01ffc5e5776743ec64
[ "MIT" ]
null
null
null
STAT6011 Computational Statistics/Tut 2/Tut 2 (with answers).ipynb
IanFla/Teaching-Experience
ed4c414239d223f66bea4a01ffc5e5776743ec64
[ "MIT" ]
null
null
null
STAT6011 Computational Statistics/Tut 2/Tut 2 (with answers).ipynb
IanFla/Teaching-Experience
ed4c414239d223f66bea4a01ffc5e5776743ec64
[ "MIT" ]
null
null
null
406.472368
88,460
0.927347
true
6,193
Qwen/Qwen-72B
1. YES 2. YES
0.859664
0.839734
0.721889
__label__eng_Latn
0.943621
0.515521
# Stiff ODEs and implicit methods In this notebook we look at examples where Runge-Kutta methods require very small steps to be accurate. For these **stiff** ODEs implicit methods are better. Let's define our explicit methods first. ```python import numpy as np import matplotlib.pyplot as plt ``` ```python # The below commands make the font and image size bigger plt.rcParams.update({'font.size': 22}) plt.rcParams["figure.figsize"] = (15,10) ``` ```python def EulerStep(f, dx, xi, yi): return yi + dx*f(xi, yi) ``` ```python def RK2Step(f, dx, xi, yi): k1 = dx*f(xi, yi) k2 = dx*f(xi + dx, yi + k1) return yi + 0.5*(k1 + k2) ``` ```python def RK4Step(f, dx, xi, yi): k1 = dx*f(xi,yi) k2 = dx*f(xi + 0.5*dx, yi + 0.5*k1) k3 = dx*f(xi + 0.5*dx, yi + 0.5*k2) k4 = dx*f(xi + dx, yi + k3) return yi + 1/6*(k1 + 2*k2 + 2*k3 + k4) ``` ```python def ODESolve(f, dx, x0, y0, imax, method='RK4', plotSteps=False): xi = x0 yi = y0 # Create arrays to store the steps in steps = np.zeros((imax+1,2)) steps[0,0] = x0 steps[0,1] = y0 i = 0 while i < imax: if(method == 'RK4'): yi = RK4Step(f, dx, xi, yi) elif(method == 'RK2'): yi = RK2Step(f, dx, xi, yi) elif(method == 'Euler'): yi = EulerStep(f, dx, xi, yi) xi += dx i += 1 # Store the steps for plotting steps[i, 0] = xi steps[i, 1] = yi if(plotSteps): plt.scatter(steps[:,0], steps[:,1], color='red', linewidth=10) return [xi, yi] ``` # Stiff ODEs Let's look at an example. Consider the *first-order* ODE: $$ y'(x) = \lambda[-y(x) + \sin(x)] $$ with *initial conditions* $y(0) = 0$. This has the general solution of $$ y(x) = C e^{-\lambda x} + \frac{\lambda^2 \sin(x) - \lambda \cos(x)}{1 + \lambda^2}$$ The initial condition gives $$ C = \frac{\lambda}{1 + \lambda^2}$$ Let's define these equations and see how our RK methods do at solving the ODE. Try using the 'Euler', 'RK2' and 'RK4' methods below. ```python lam = 30 def dydxStiff(x,y): global lam return lam*(-y + np.sin(x)) def yStiff(x): global lam C = lam/(1+lam**2) return C*np.exp(-lam*x) + (lam**2*np.sin(x) -lam*np.cos(x))/(1+lam**2) ``` ```python x = np.linspace(0, 2.0, 100) y = yStiff(x) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.plot(x,y) ODESolve(dydxStiff, 0.1, 0, 0, 10, 'RK4', True) plt.legend(['Exact solution', 'Explicit RK4']); ``` We see that even the more accurate RK4 method does not do well. It can be made to perform better by decreasing the step size but we want to avoid this. Furthermore, if we increase $\lambda$ the step size required to accurately obtain the solution drops rapidly. To overcome this, let's look at implicit methods. One of the simplest is the Backwards Euler method. ## Implicit method: backwards Euler The backwards Euler method is given by: $$ \begin{align} x_{i+1} &= x_i + \Delta x \\ y_{i+1} &= y_i + \Delta x f(x_{i+1}, y_{i+1}) \\ \end{align} $$ Notice how, unlike the *explicit* methods we've looked at until now this does not immediately give a recipe for the next step in terms of the previous steps. We must solve the second equation for $y_{i+1}$. In general we won't be able to find an analytic solution, so we might use *numerical root finding*. Let's look at the stiff ODE example given above, where it turns out we can find an analytic solution. In this case we have: $$ \begin{align} x_{i+1} &= x_i + \Delta x \\ y_{i+1} &= y_i + \Delta x \lambda[-y_{i+1} + \sin(x_{i+1})] \end{align}$$ In this case we can solve the second equation explicitly for $y_{i+1}$: $$ y_{i+1} = \frac{y_i + \Delta x \lambda \sin(x_{i+1})}{1 + \Delta x \lambda}$$ Let's now implement this *specific* example in code and test it out. ```python def BackwardsEulerSpecific(dx, x0, y0, imax, plotSteps=False): global lam xi = x0 yi = y0 # Create arrays to store the steps in steps = np.zeros((imax+1,2)) steps[0,0] = x0 steps[0,1] = y0 i = 0 while i < imax: yi = (yi + dx * lam*np.sin(xi+dx))/(1+ dx*lam) xi += dx i += 1 # Store the steps for plotting steps[i, 0] = xi steps[i, 1] = yi if(plotSteps): plt.scatter(steps[:,0], steps[:,1], color='green', linewidth=10) return [xi, yi] ``` ```python x = np.linspace(0, 2.0, 100) y = yStiff(x) plt.xlabel('x') plt.ylabel('y') plt.grid(True) plt.plot(x,y) ODESolve(dydxStiff, 0.1, 0, 0, 10, 'RK4', True) BackwardsEulerSpecific(0.2, 0, 0, 10, True) plt.legend(['Exact solution', 'Explicit RK4', 'Implicit backwards Euler']); ``` We can thus see that the (implicit) backwards Euler method is much more successful than the (explicit) RK4 method, even when it takes larger steps. This is great because each step of the RK4 method is more expensive than a step of the Euler method. The downside to implicit methods can be seen from the code above: our implicit ODE solver code is not general, it only works for this specific example. In general, implicit methods are more complicated to implement as they often require numerical methods, e.g., root finding to find $y_{i+1}$. Note, unlike the forward Euler method, the backwards Euler method is stable across a much wider class of ODEs. ```python ```
14da561bdcad80286c9bb81a11c9c60c8c2c061f
108,518
ipynb
Jupyter Notebook
OrdinaryDifferentialEquations/StiffODEsAndImplicitMethods.ipynb
CianCoyle/ACM20030-Examples
fb81abf24d066717900657c1de4f2c6f87806413
[ "MIT" ]
13
2020-02-15T21:30:37.000Z
2021-09-21T12:03:13.000Z
OrdinaryDifferentialEquations/StiffODEsAndImplicitMethods.ipynb
CianCoyle/ACM20030-Examples
fb81abf24d066717900657c1de4f2c6f87806413
[ "MIT" ]
null
null
null
OrdinaryDifferentialEquations/StiffODEsAndImplicitMethods.ipynb
CianCoyle/ACM20030-Examples
fb81abf24d066717900657c1de4f2c6f87806413
[ "MIT" ]
24
2020-02-13T14:27:47.000Z
2022-02-05T14:17:10.000Z
332.877301
54,828
0.92263
true
1,712
Qwen/Qwen-72B
1. YES 2. YES
0.841826
0.894789
0.753257
__label__eng_Latn
0.935681
0.5884
# Notes on saturation Vapor Pressure # There are a large number of expressions for the saturation vapor pressure in the literature, and many of these, even recent ones, seem to reference previous studies in a haphazard way. So how much do these differ, is there a standard, and by what criteria should one judge them by. These are big questions, and I won't answer them comprehensively here, but perhaps a bit of insight can be shared. The first thing to note is that there is a community that concerns itself with this question. They call themselves the international association for the physical properties of water and steam, and mostly concern themselves with the behavior of water at high temperature. The approach of the IAPWS is to develop an empirical equation of state for water, in the form of a specification of its Helmholtz free energy, from which all other properties can be derived. The standard reference for the IAPWS equation of state is the publication by Wagner and Pru{\ss} (Thermodynamic Properties of Ordinary Water) published in 2002 and which describes the IAPWS-95 approved formulation. Minor corrections have since been made to this, which as best I can tell are relevant at high temperatures. By working with an equation of state, all properties of water, from the specific heats to the gas constants to the phase-change enthalpies can be derived consistently. The disadvantage of this approach is that the equation is derived by positing an analytic form that is then fit to a very wide and diverse abundance of existing data. The resultant equation is described in an ideal part, which involves a summation of nine terms and thirteen coefficients, and a residual part, with more than 50 terms and over 200 constants. For the case of the saturation vapor pressure over water Wagner and Pru{\ss} suggest a much simpler equation that is described in terms of only six coefficients. First, below I compare the relative error to the IAPWS standard as has been formlated and distributed in the iapws python package, version (1.4). There has been some discussion on the web of its implementation, but the similarity with the Wagner and Pru{\ss} formulation gives me confidence. ```python import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy import interpolate, optimize plot_dir = '/Users/m219063/Research/Projects/Thermodynamics/plots/' !%matplotlib inline ``` /bin/sh: line 0: fg: no job control ```python gravity = 9.8076 cpd = 1006. Rd = 287.05 Rv = 461.53 # IAPWS97 at 273.15 cpv = 1865.01 # '' lv0 = 2500.93e3 # IAPWS97 at 273.15 lf0 = 333.42e3 #'' cl = 4179.57 # IAPWS97 at 305 and P=0.1 MPa (chosen to give a good fit for es over ice) ci = 1905.43 # IAPWS97 at 247.065 and P=0.1 MPa (chosen to give a good fit for es over ice) eps1 = Rd/Rv eps2 = Rv/Rd -1. P0 = 100000. # Standard Pressure T0 = 273.15 # Standard Temperature PvC = 22.064e6 # Critical pressure of water vapor TvC = 647.096 # Critical temperature of water vapor TvT = 273.16 # Triple point temperature of water PvT = 611.655 lvT = lv0 + (cpv-cl)*(TvT-T0) lfT = lf0 + (cpv-ci)*(TvT-T0) lsT = lvT + lfT es_default = 'sonntag' def thermo_input(x, xtype='none'): import numpy as np x = np.asarray(x).flatten() scalar_input = False if x.ndim == 0: x = x[None] # Makes x 1D scalar_input = True if (xtype == 'Kelvin' and x.max() < 100 ): x = x+273.15 if (xtype == 'Celcius'and x.max() > 100 ): x = x-273.15 if (xtype == 'Pascal' and x.max() < 1200): x = x*100. if (xtype == 'kg/kg' and x.max() > 1.0) : x = x/1000. if (xtype == 'meter' and x.max() < 10.0): print('Warning: input should be in meters, max value less than 10, not corrected') return x, scalar_input def eslf(T, formula=es_default): """ Returns the saturation vapour pressure [Pa] over liquid given the temperature. Temperatures can be in Celcius or Kelvin. Formulas supported are - Goff-Gratch (1994 Smithsonian Tables) - Sonntag (1994) - Flatau - Magnus Tetens (MT) - Romps (2017) - Mpurhpy-Koop - Bolton - Wagner and Pruss (WP, 2002) is the default >>> eslf(273.16) 611.657 """ import numpy as np x, scalar_input = thermo_input(T, 'Kelvin') if formula == "flatau": if (np.min(x) > 100): x = x-273.16 np.maximum(x,-80.) c_es= np.asarray([0.6105851e+03, 0.4440316e+02, 0.1430341e+01, 0.2641412e-01, 0.2995057e-03,0.2031998e-05,0.6936113e-08,0.2564861e-11,-0.3704404e-13]) es = np.polyval(c_es[::-1],x) elif formula == "bolton": if (np.min(x) > 100): x = x-273.15 es = 611.2*np.exp((17.67*x)/(243.5+x)) elif formula == "sonntag": xx = -6096.9385/x + 16.635794 - 2.711193e-2*x + 1.673952e-5*x*x + 2.433502 * np.log(x) es = 100.*np.exp(xx) elif formula =='goff-gratch': x1 = 273.16/x x2 = 373.16/x xl = np.log10(1013.246 ) - 7.90298*(x2 - 1) + 5.02808*np.log10(x2) - 1.3816e-7*(10**(11.344*(1.-1./x2)) - 1.0) + 8.1328e-3 * (10**(-3.49149*(x2-1)) - 1.0) es =10**(xl+2) # plus 2 converts from hPa to Pa elif formula == 'wagner-pruss': vt = 1.-x/TvC es = PvC * np.exp(TvC/x * (-7.85951783*vt + 1.84408259*vt**1.5 - 11.7866497*vt**3 + 22.6807411*vt**3.5 - 15.9618719*vt**4 + 1.80122502*vt**7.5)) elif formula == 'hardy98': y = -2.8365744e+3/(x*x) - 6.028076559e+3/x + 19.54263612 - 2.737830188e-2*x + 1.6261698e-5*x**2 + 7.0229056e-10*x**3 - 1.8680009e-13*x**4 + 2.7150305 * np.log(x) es = np.exp(y) elif formula == 'romps': Rr = 461. cvl_r = 4119 cvv_r = 1418 cpv_r = cvv_r + Rr es = 611.65 * (x/TvT) **((cpv_r-cvl_r)/Rr) * np.exp((2.37403e6 - (cvv_r-cvl_r)*TvT)*(1/TvT - 1/x)/Rr) elif formula == "murphy-koop": es = np.exp(54.842763 - 6763.22/x - 4.210*np.log(x) + 0.000367*x + np.tanh(0.0415*(x - 218.8)) * (53.878 - 1331.22/x - 9.44523 * np.log(x) + 0.014025*x)) elif formula == "standard-analytic": c1 = (cpv-cl)/Rv c2 = lvT/(Rv*TvT) - c1 es = PvT * np.exp(c2*(1.-TvT/x)) * (x/TvT)**c1 else: exit("formula not supported") es = np.maximum(es,0) if scalar_input: return np.squeeze(es) return es def esif(T, formula=es_default): """ Returns the saturation vapour pressure [Pa] over ice given the temperature. Temperatures can be in Celcius or Kelvin. uses the Goff-Gratch (1994 Smithsonian Tables) formula >>> esli(273.15) 6.112 m """ import numpy as np x, scalar_input = thermo_input(T, 'Kelvin') if formula == "sonntag": es = 100 * np.exp(24.7219 - 6024.5282/x + 0.010613868*x - 0.000013198825*x**2 - 0.49382577*np.log(x)) elif formula == "goff-gratch": x1 = 273.16/x xi = np.log10( 6.1071) - 9.09718*(x1 - 1) - 3.56654*np.log10(x1) + 0.876793*(1 - 1./x1) es = 10**(xi+2) elif formula == "wagner-pruss": #(actually wagner et al, 2011) a1 = -0.212144006e+2 a2 = 0.273203819e+2 a3 = -0.610598130e+1 b1 = 0.333333333e-2 b2 = 0.120666667e+1 b3 = 0.170333333e+1 theta = T/TvT es = PvT * np.exp((a1*theta**b1 + a2 * theta**b2 + a3 * theta**b3)/theta) elif formula == "murphy-koop": es = np.exp(9.550426 - 5723.265/x + 3.53068 * np.log(x) - 0.00728332*x) elif formula == "romps": Rr = 461. cvv_r = 1418. cvs_r = 1861. cpv_r = cvv_r + Rr es = 611.65 * (x/TvT) **((cpv_r-cvs_r)/Rr) * np.exp((2.37403e6 + 0.33373e6 - (cvv_r-cvs_r)*TvT)*(1/TvT - 1/x)/Rr) elif formula == "standard-analytic": c1 = (cpv-ci)/Rv c2 = lsT/(Rv*TvT) - c1 es = PvT * np.exp(c2*(1.-TvT/x)) * (x/TvT)**c1 else: exit("formula not supported") es = np.maximum(es,0) if scalar_input: return np.squeeze(es) return es def esilf(T,formula=es_default): import numpy as np return np.minimum(esif(T,formula),eslf(T,formula)) def es(T,formula=es_default,state='liq'): import numpy as np x, scalar_input = thermo_input(T, 'Kelvin') if (state == 'liq'): return eslf(x,formula) if (state == 'ice'): return esif(x,formula) if (state == 'mxd'): return esilf(x,formula) def des(T,formula=es_default,state='liq'): import numpy as np x, scalar_input = thermo_input(T, 'Kelvin') dx = 0.01; xp = x+dx/2; xm = x-dx/2 return (es(xp,formula,state)-es(xm,formula,state))/dx def dlnesdlnT(T,formula=es_default,state='liq'): import numpy as np x, scalar_input = thermo_input(T, 'Kelvin') dx = 0.01; xp = x+dx/2; xm = x-dx/2 return ((es(xp,formula,state)-es(xm,formula,state))/es(x,formula,state) * (x/dx)) def phase_change_enthalpy(Tx,fusion=False): """ Returns the enthlapy [J/g] of vaporization (default) of water vapor or (if fusion=True) the fusion anthalpy. Input temperature can be in degC or Kelvin >>> phase_change_enthalpy(273.15) 2500.8e3 """ import numpy as np TC, scalar_input = thermo_input(Tx, 'Celcius') TK, scalar_input = thermo_input(Tx, 'Kelvin') if (fusion): el = lfT + (cl-ci)*(TK-TvT) else: el = lvT + (cpv-cl)*(TK-TvT) if scalar_input: return np.squeeze(el) return el ``` ## Some baic properties of water from the IAWPS routines. ## ```python import iapws print ('Using IAPWS Version %s\n'%(iapws.__version__,)) T = np.arange(183.15,313.15) ci_iapws = np.full(len(T),np.nan) cl_iapws = np.full(len(T),np.nan) for i,Tx in enumerate(T): if (Tx < 283): ci_iapws[i] = iapws._iapws._Ice(Tx, 0.1)['cp']*1000 / ci if (Tx > 263): cl_iapws[i] = iapws._iapws._Liquid(Tx, 0.1)['cp']*1000 / cl fig = plt.figure(figsize=(4,3)) ax1 = plt.subplot(1,1,1) ax1.set_xlabel('$T$ / K') ax1.set_ylabel('$c_\mathrm{i}$ / %5.2f, $c_\mathrm{l}$ / %5.2f'%(ci,cl)) ax1.set_xticks([185,247.07,273.15,305.00]) plt.scatter([247.065],[1.]) plt.scatter([305.000],[1.]) plt.plot(T,ci_iapws) plt.plot(T,cl_iapws) sns.set_context("paper", font_scale=1.2) sns.despine(offset=10) plt.tight_layout() fig.savefig(plot_dir+'cp-Tdependance.pdf') TK = np.arange(273.15,315.15,0.01) es_iapws = np.zeros(len(TK)) for i, x in enumerate(TK): es_iapws[i] = iapws.iapws97._PSat_T(x) *1.e6 #Temperature, [K]; Returns:Pressure, [MPa] ``` ## Behavior of saturation vapor pressure above the triple point ## This comparison of relative error suggests that the Wagner-Pru{\ss}, Murphy and Koop, Hardy, and Sonntag formulations lie closest to the IAPWS-97 reference. Romps (2017) and Bolton (1980) are similarly accurate and may have advantages. Hardy is interesting as it appears in a technical document and is rarely mentioned in the subsequent literature, but used by Vaisala in the calibration of their sondes ```python state = 'liq' fig = plt.figure(figsize=(10,5)) ax1 = plt.subplot(1,1,1) ax1.set_xlabel('$T$ / K') ax1.set_ylabel('$e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1$') ax1.set_yscale('log') es_ref = es_iapws es_w = es(TK,formula="wagner-pruss",state=state) es_r = es(TK,formula='romps',state=state) es_g = es(TK,formula='goff-gratch',state=state) es_m = es(TK,formula='murphy-koop',state=state) es_s = es(TK,formula='sonntag',state=state) es_b = es(TK,formula='bolton',state=state) es_f = es(TK,formula='flatau',state=state) es_h = es(TK,formula='hardy98',state=state) es_a = es(TK,formula='standard-analytic',state=state) plt.plot(TK,np.abs(es_h/es_ref-1),c='tab:blue',ls='solid',label='Hardy (1998)') plt.plot(TK,np.abs(es_f/es_ref-1),c='tab:orange',label='Flatau (1992)') plt.plot(TK,np.abs(es_g/es_ref-1),c='tab:green',label='Goff-Gratch (1957)') plt.plot(TK,np.abs(es_b/es_ref-1),c='tab:red',ls='dotted',label='Bolton (1980)') plt.plot(TK,np.abs(es_r/es_ref-1),c='tab:purple',label='Romps (2017)') plt.plot(TK,np.abs(es_s/es_ref-1),c='tab:grey',label='Sonntag (1990)') plt.plot(TK,np.abs(es_m/es_ref-1),c='tab:pink',label='Murphy-Koop (2005)') plt.plot(TK,np.abs(es_w/es_ref-1),c='tab:brown',label='Wagner-Pruss (2002)') plt.plot(TK,np.abs(es_a/es_ref-1),c='tab:purple',ls='dotted',label='Analytic') plt.legend(loc="lower right",ncol=3) sns.set_context("paper", font_scale=1.2) sns.despine(offset=10) fig.savefig(plot_dir+'es_l-error.pdf') ``` ## Extension to temperatures below the triple point ## To extend over the entire temperature range a different reference is required, for this any of the Hardy, Sonntag, Murphy-Koop and Wagner-Pru{\ss} formulations could suffice. We choose Wagner-Pru{\ss} because Wagner's group is responsible for the standard, and has also developed the IAPWS standard for saturation vapor pressure over ice. Below the results are plooted with respect to this standard over a much larger temperature range. It is not clear how accurate Wagner and Pru{\ss} wis hen extended well beyond the IAPWS range, based on which it might be that the grouping of errors of similar magnitude from the Bolton, Flatau and Goff-Gratch formulations are indicative of a low temperature bias in the Wagner-Pru{\ss} formualtion. I doubt that this is the case, as the poor performance of all these formulations in the higher temperature range, and the simplicity of their formulation make it unlikely. The agreement of the Murphy-Koop formulation with these simpler formulations at low temperature may be indicative of Murphy and Koops focus on saturation over ice rather than liquid. ```python state = 'liq' fig = plt.figure(figsize=(10,5)) ax1 = plt.subplot(1,1,1) ax1.set_xlabel('$T$ / K') ax1.set_ylabel('$e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1$') ax1.set_yscale('log') TK = np.arange(180,320,0.5) es_w = es(TK,formula="wagner-pruss",state=state) es_r = es(TK,formula='romps',state=state) es_g = es(TK,formula='goff-gratch',state=state) es_m = es(TK,formula='murphy-koop',state=state) es_s = es(TK,formula='sonntag',state=state) es_b = es(TK,formula='bolton',state=state) es_f = es(TK,formula='flatau',state=state) es_h = es(TK,formula='hardy98',state=state) es_a = es(TK,formula='standard-analytic',state=state) es_ref = es_w plt.plot(TK,np.abs(es_h/es_ref-1),c='tab:blue',ls='solid',label='Hardy (1998)') plt.plot(TK,np.abs(es_f/es_ref-1),c='tab:orange',label='Flatau (1992)') plt.plot(TK,np.abs(es_g/es_ref-1),c='tab:green',label='Goff-Gratch (1957)') plt.plot(TK,np.abs(es_b/es_ref-1),c='tab:red',ls='dotted',label='Bolton (1980)') plt.plot(TK,np.abs(es_r/es_ref-1),c='tab:purple',label='Romps (2017)') plt.plot(TK,np.abs(es_s/es_ref-1),c='tab:grey',label='Sonntag (1990)') plt.plot(TK,np.abs(es_m/es_ref-1),c='tab:pink',label='Murphy-Koop (2005)') plt.plot(TK,np.abs(es_a/es_ref-1),c='tab:purple',ls='dotted',label='Analytic') #plt.plot(TK,np.abs(es_w/es_ref-1),c='tab:olive',label='Wagner-Pruss (2002)') plt.legend(loc="lower left",ncol=2) sns.set_context("paper", font_scale=1.2) sns.despine(offset=10) fig.savefig(plot_dir+'es_lsc-error.pdf') ``` ## Saturation vapor pressure over ice ## A subset of the formulations also postulate forms for the saturation vapor pressure over ice. For the reference in this quantity we use Wagner et al., (2011) as this has been adopted as the IAPWS standard. Here is seems that Murphy and Koop's (2005) formulation behaves very well in comparision to Wagner et al., but Sonntag is also quite adequate, particularly at lower ($T<273.15$ K) temperatures where it is likely to be applied. ```python state = 'ice' fig = plt.figure(figsize=(10,5)) ax1 = plt.subplot(1,1,1) ax1.set_xlabel('$T$ / K') ax1.set_ylabel('$e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1$') ax1.set_yscale('log') TK = np.arange(180,320,0.5) es_w = es(TK,formula="wagner-pruss",state=state) es_r = es(TK,formula='romps',state=state) es_g = es(TK,formula='goff-gratch',state=state) es_m = es(TK,formula='murphy-koop',state=state) es_s = es(TK,formula='sonntag',state=state) es_a = es(TK,formula='standard-analytic',state=state) es_ref = es_w plt.plot(TK,np.abs(es_g/es_ref-1),c='tab:green',label='Goff-Gratch (1957)') plt.plot(TK,np.abs(es_r/es_ref-1),c='tab:purple',label='Romps (2017)') plt.plot(TK,np.abs(es_s/es_ref-1),c='tab:grey',label='Sonntag (1990)') plt.plot(TK,np.abs(es_m/es_ref-1),c='tab:pink',label='Murphy-Koop (2005)') plt.plot(TK,np.abs(es_a/es_ref-1),c='tab:purple',ls='dotted',label='Analytic') #plt.plot(TK,np.abs(es_w/es_ref-1),c='tab:olive',label='Wagner-Pruss (2002)') plt.legend(loc="lower left",ncol=2) sns.set_context("paper", font_scale=1.2) sns.despine(offset=10) fig.savefig(plot_dir+'es_i-error.pdf') ``` ## Clausius Clapeyron ## Often over looked is that many conceptual models are built on the application of the Clausius-Clapeyron equation, \begin{equation} \frac{\mathrm{d} \ln e_\mathrm{s}}{\mathrm{d \ln T}} \left(\frac{\ell_\mathrm{v}}{R_\mathrm{v} T}\right)^{-1} = 1 \end{equation} with the assumption that the vaporization enthalpy, $\ell_\mathrm{v}$ that appears in this equation, is linear in temperature following Kirchoff's relation. This is similar to assuming that the specific heats are independent of temeprature, an idealization which is, unfortunately, just that, and idealization. But because of this it is interesting to compare this expression as given by the above formulation of the saturation vapor pressure (through their numerical derivative) and independent expressions of $\ell_\mathrm{v}$ based on the assumption of constant specific heats. This is shown below for ice and liquid saturation. The analytic expression, which has larger errors for es is constructued to satisfy this relationship and is exact to the precision of the numerical calculations. The various formulations using more accurate expressions for $e_s$ which implicityl don't assume constancy in specific heats are similarly accurate, with the exception of Goff-Gratch, and Romps for Ice. Hardy is only shown for water. For ice Sonntag does not behave well for $T> 290$ K, but it is not likely to be used at these temperatures. Note that Romps would be perfect had we adopted his modified specific heats. Based on the above my recommendation is to use the formulations by Wagner's group, unless one is interested in very low temperatures ($T<180$K) in which case the formulation of Koop and Murphy may be desirable. For just liquid processes Hardy might be a good choice, it is less well known but used by Vaisala for its sondes. There may be advantages to using Sonntag if there is interest in liquid and ice as it might allow more efficient implementations, but for my tests all formulations were within 30% of one another. Another alternative, would be to use the analytic approach, either using Romps' formulae if getthing the staturation vapor pressure as close to measurements as possible is preferred, or using the analytic formula with the correct (at the standard temperature and pressure) specific heats and gast constants. ```python state = 'liq' fig = plt.figure(figsize=(10,10)) ax1 = plt.subplot(2,1,1) ax1.set_ylabel('$|\mathrm{CC}_\mathrm{liq} - 1|$') ax1.set_yscale('log') ax1.set_xticklabels([]) TK = np.arange(180,320,0.5) lv = phase_change_enthalpy(TK) if (state == 'ice'): lv += phase_change_enthalpy(TK,fusion=True) y = lv/(Rv * TK) cc_w = dlnesdlnT(TK,formula="wagner-pruss",state=state) / y cc_r = dlnesdlnT(TK,formula='romps',state=state) /y cc_g = dlnesdlnT(TK,formula='goff-gratch',state=state) /y cc_m = dlnesdlnT(TK,formula='murphy-koop',state=state) /y cc_s = dlnesdlnT(TK,formula='sonntag',state=state) /y cc_h = dlnesdlnT(TK,formula='hardy98',state=state) /y cc_a = dlnesdlnT(TK,formula='standard-analytic',state=state) /y plt.plot(TK,np.abs(cc_h/1 -1.),c='tab:blue',label='Hardy (1998)') plt.plot(TK,np.abs(cc_g/1 -1.),c='tab:green',label='Goff-Gratch (1957)') plt.plot(TK,np.abs(cc_r/1 -1.),c='tab:purple',label='Romps (2017)') plt.plot(TK,np.abs(cc_s/1 -1.),c='tab:grey',label='Sonntag (1990)') plt.plot(TK,np.abs(cc_m/1 -1.),c='tab:pink',label='Murphy-Koop (2005)') plt.plot(TK,np.abs(cc_w/1 -1.),c='tab:olive',label='Wagner-Pruss (2002)') plt.plot(TK,np.abs(cc_a/1 -1.),c='tab:purple',ls='dotted',label='Analytic') plt.legend(loc="lower left",ncol=1) state = 'ice' TK = np.arange(180,320,0.5) lv = phase_change_enthalpy(TK) if (state == 'ice'): lv = phase_change_enthalpy(TK,fusion=True) + phase_change_enthalpy(TK) y = lv/(Rv * TK) cc_w = dlnesdlnT(TK,formula="wagner-pruss",state=state) / y cc_r = dlnesdlnT(TK,formula='romps',state=state) /y cc_g = dlnesdlnT(TK,formula='goff-gratch',state=state) /y cc_m = dlnesdlnT(TK,formula='murphy-koop',state=state) /y cc_s = dlnesdlnT(TK,formula='sonntag',state=state) /y cc_a = dlnesdlnT(TK,formula='standard-analytic',state=state) /y ax2 = plt.subplot(2,1,2) ax2.set_xlabel('$T$ / K') ax2.set_ylabel('$|\mathrm{CC}_\mathrm{ice} - 1|$') ax2.set_yscale('log') plt.plot(TK,np.abs(cc_g/1 -1.),c='tab:green',label='Goff-Gratch (1957)') plt.plot(TK,np.abs(cc_r/1 -1.),c='tab:purple',label='Romps (2017)') plt.plot(TK,np.abs(cc_s/1 -1.),c='tab:grey',label='Sonntag (1990)') plt.plot(TK,np.abs(cc_m/1 -1.),c='tab:pink',label='Murphy-Koop (2005)') plt.plot(TK,np.abs(cc_w/1 -1.),c='tab:olive',label='Wagner-Pruss (2002)') plt.plot(TK,np.abs(cc_a/1. -1.),c='tab:purple',ls='dotted',label='Analytic') sns.set_context("paper", font_scale=1.2) sns.despine(offset=10) fig.savefig(plot_dir+'cc-error.pdf') ``` ## Optimizing analytic fits for saturation vapor pressure ## Romps suggests modifying the specific heats of liquid, ice and the gas constant of vapor to arrive at an optimal fit for the saturation vapor pressure using the analytic form. One can do almost as good by just modifying the specific heat of the condensate phases. Here we show how the maximum error in the fit depends on the specific heat of the condensate phases as compared to the reference, and how we arrive at our optimal fit by only manipulating the condensate phase specific heats to values that they anyway adopt within the range of temperatures spanned by the atmosphere. This justifys the default choice for saturation vapor pressure and the specific heats used in aes_thermo.py ```python fig = plt.figure(figsize=(10,5)) cl_1 = (iapws._iapws._Liquid(265, 0.1)['cp'])*1000. cl_2 = (iapws._iapws._Liquid(305, 0.1)['cp'])*1000 ci_1 = (iapws._iapws._Ice(193, 0.01)['cp'])*1000. ci_2 = (iapws._iapws._Ice(273, 0.10)['cp'])*1000 cls = np.arange(cl_2,cl_1) err = np.zeros(len(cls)) ax1 = plt.subplot(1,2,1) ax1.set_xlabel('$c_\mathrm{liq}$ / Jkg$^{-1}$K$^{-1}$') ax1.set_ylabel('$(e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1)_\mathrm{max}$ / %') ax1.set_yscale('log') state = 'liq' TK = np.arange(260,300,0.5) es_ref = es(TK,formula="wagner-pruss",state=state) for i,cx in enumerate(cls): c1 = (cpv-cx)/Rv c2 = lvT/(Rv*TvT) - c1 es_a = PvT * np.exp(c2*(1.-TvT/TK)) * (TK/TvT)**c1 err[i] = np.max(np.abs(es_a/es_ref -1.))*100. ax1.plot(cls,err,c='tab:purple',ls='dotted',label='Analytic $c_\mathrm{liq}$ for $T\in$ (260K,305K)') ax1.legend(loc="upper left",ncol=2) cis = np.arange(ci_1,ci_2) err = np.zeros(len(cis)) ax2 = plt.subplot(1,2,2) ax2.set_xlabel('$c_\mathrm{ice}$ / Jkg$^{-1}$K$^{-1}$') ax2.set_ylabel('$(e_{\mathrm{s,x}}/e_{\mathrm{s,ref}} - 1)_\mathrm{max}$ / %') ax2.set_yscale('log') state = 'ice' TK = np.arange(180,273,0.5) es_ref = es(TK,formula="wagner-pruss",state=state) for i,cx in enumerate(cis): c1 = (cpv-cx)/Rv c2 = lsT/(Rv*TvT) - c1 es_a = PvT * np.exp(c2*(1.-TvT/TK)) * (TK/TvT)**c1 err[i] = np.max(np.abs(es_a/es_ref -1.))*100. ax2.plot(cis,err,c='tab:purple',ls='dotted',label='Analytic $c_\mathrm{ice}$ for $T\in$ (193K,273K)') ax2.legend(loc="upper right",ncol=2) sns.set_context("paper", font_scale=1.2) sns.despine(offset=10) fig.savefig(plot_dir+'es-analytic-fits.pdf') Tfit = 305 print ('Taking fit for $c_\mathrm{liq}=$ %3.2f J/(kg K) at $T=$ %3.2f K'%(iapws._iapws._Liquid(Tfit, 0.1)['cp']*1000.,Tfit)) Tfit = 247.065 print ('Taking fit for $c_\mathrm{ice}=$ %3.2f J/(kg K) at $T=$ %3.2f K'%(iapws._iapws._Ice(Tfit, 0.1)['cp']*1000.,Tfit)) ``` ## RCEMIP comparision ## During RCEMIP (Wing et al.) different models output different RH, differing in ways of calculating it and also whether or not it was calculated relative to liquid or ice. In this analysis we create a python implementation of the intial RCEMIP sounding and then for the given state estimate the RH using different formulat and different assumptions regarding the reference condensate (liquid/ice). We also show the difference associated with 1 K of temperature. ```python def rcemip_on_z(z,SST): # function [T,q,p] = rcemip_on_z(z,SST) # # Inputs: # z: array of heights (low to high, m) # SST: sea surface temperature (K) # # Outputs: T = np.zeros(len(z)) # temperature (K) q = np.zeros(len(z)) # specific humidity (g/g) p = np.zeros(len(z)) # pressure (Pa) ## Constants g = 9.79764 #m/s^2 Rd = 287.04 #J/kgK ## Parameters p0 = 101480 #Pa surface pressure qt = 10**(-11) #g/g specific humidity at tropopause zq1 = 4000 #m zq2 = 7500 #m zt = 15000 #m tropopause height gamma = 0.0067 #K/m lapse rate ## Scratch Tv = np.zeros(len(z)) # temperature (K) if SST == 295: q0 = 0.01200; #g/g specific humidity at surface (adjusted from 300K value so RH near surface approx 80%) elif SST == 300: q0 = 0.01865; #g/g specific humidity at surface elif SST == 305: q0 = 0.02400 #g/g specific humidity at surface (adjusted from 300K value so RH near surface approx 80%) T0 = SST - 0 #surface air temperature adjusted to be 0K less than SST ## Virtual Temperature at surface and tropopause Tv0 = T0*(1 + 0.608*q0) #virtual temperature at surface Tvt = Tv0 - gamma*zt #virtual temperature at tropopause z=zt ## Pressure pt = p0*(Tvt/Tv0)**(g/(Rd*gamma)); #pressure at tropopause z=zt p = p0*((Tv0-gamma*z)/Tv0)**(g/(Rd*gamma)) #0 <= z <= zt p[z>zt] = pt*np.exp(-g*(z[z>zt]-zt)/(Rd*Tvt)) #z > zt ## Specific humidity q = q0*np.exp(-z/zq1)*np.exp(-(z/zq2)**2) q[z>zt] = qt #z > zt ## Temperature #Virtual Temperature Tv = Tv0 - gamma*z #0 <= z <= zt Tv[z>zt] = Tvt #z > zt #Absolute Temperature at all heights T = Tv/(1 + 0.608*q) return T, q, p z = np.arange(0,17000,100) T, q , p = rcemip_on_z(z,300) ``` ```python def get_rh (T,q,p,formula='wagner-pruss',state='liq'): es_w = es(T,formula=formula,state=state) x = es_w * eps1/(p-es_w) return 100.*q*(1+x)/x fig = plt.figure(figsize=(4,5)) ax1 = plt.subplot(1,1,1) ax1.set_ylabel('$z$ / km') ax1.set_xlabel('RH / %') ax1.set_ylim(0,14.5) ax1.set_yticks([0,4,8,12]) plt.plot(get_rh(T,q,p,state='mxd'),z/1000.,label = 'Wagner Pruss (ice/liq)') plt.plot(get_rh(T+1,q,p,state='mxd'),z/1000.,label = 'Wagner Pruss (ice/liq) + 1 K') plt.plot(get_rh(T,q,p,state='ice'),z/1000.,label = 'Wagner Pruss (ice)') plt.plot(get_rh(T,q,p,formula='romps',state='mxd'),z/1000.,label = 'Romps (ice/liq)') plt.plot(get_rh(T,q,p),z/1000.,label = 'Wagner Pruss (liq)') plt.plot(get_rh(T,q,p,formula='flatau'),z/1000.,label = 'Flatau (liq)') plt.legend(loc="lower left",ncol=1) sns.set_context("paper") sns.despine(offset=10) plt.tight_layout() fig.savefig(plot_dir+'RCEMIP-RHerror.pdf') ``` ## Credit ## Jiawei Bao, Geet George, and Hauke Schulz are thanked for comments on these notes, and the identification of some errors in earlier versions. ```python ```
663657903f55d3ffe6b4d326464f2a2b4be9698b
408,573
ipynb
Jupyter Notebook
saturation-water-vapor.ipynb
bjorn-stevens/Thermodynamics
f77a97f0908938b3f1f7bbb0e523b13582f436a1
[ "MIT" ]
1
2020-06-30T11:29:42.000Z
2020-06-30T11:29:42.000Z
saturation-water-vapor.ipynb
bjorn-stevens/Thermodynamics
f77a97f0908938b3f1f7bbb0e523b13582f436a1
[ "MIT" ]
null
null
null
saturation-water-vapor.ipynb
bjorn-stevens/Thermodynamics
f77a97f0908938b3f1f7bbb0e523b13582f436a1
[ "MIT" ]
null
null
null
459.070787
94,088
0.927403
true
9,277
Qwen/Qwen-72B
1. YES 2. YES
0.793106
0.675765
0.535953
__label__eng_Latn
0.778045
0.083528
```python # General import import numpy as np import scipy.sparse as sparse from scipy.integrate import ode import time import matplotlib.pyplot as plt ``` ```python # pyMPC and kalman import from pyMPC.mpc import MPCController from pyMPC.kalman import kalman_design_simple, LinearStateEstimator ``` ## System dynamics ## The system to be controlled is an inverted pendulum on a cart (see next Figure). The system is governed by the following differential equations: \begin{equation} \begin{aligned} (M+m)\ddot p + ml\ddot\phi \cos\phi - ml \dot \phi ^2 \sin \phi + b\dot p &= F \\ l \ddot \phi + \ddot p \cos \phi - g \sin\phi &= -f_\phi\dot \phi \end{aligned} \end{equation} Introducing the state vector $x=[p\; \dot p\; \phi\; \dot \phi]$ and the input $u=F$, the system dynamics are described in state-space by a set of an nonlinear ordinary differential equations: $\dot x = f(x,u)$ with \begin{equation} \begin{split} f(x,u) &= \begin{bmatrix} x_2\\ \frac{-mg \sin x_3\cos x_3 + mlx_4^3\sin x_3 + f_\phi m x_4 \cos x_3 - bx_2 + u }{M+(1-\cos^2 x_3)m}\\ x_3\\ \frac{(M+m)(g \sin x_3 - f_\phi x_4) - (lm x_4^2 \sin x_3 - bx_2 + u)\cos x_3}{l(M+(1-\cos^2 x_3)m)} \end{bmatrix}\\ \end{split} \end{equation} For MPC control design, the system is linearized about the upright (unstable) equilibrium point, i.e., about the point $x_{eq} = [0, \; 0\;, 0,\; 0]^\top$. The linearized system has form $\dot x = A_c x + B_c u$ with \begin{equation} A = \begin{bmatrix} 0& 1& 0& 0\\ 0& -\frac{b}{M}& -g\frac{m}{M}& f_\theta\frac{m}{M}\\ 0&0&0&1\\ 0&\frac{b}{Ml}& \frac{g(M+m)}{Ml}&-\frac{(M+m)f_\theta}{M l} \end{bmatrix},\qquad B= \begin{bmatrix} 0\\ \frac{1}{M}\\ 0\\ -\frac{1}{Ml}& \end{bmatrix} \end{equation} Next, the system is discretized with sampling time $T_s = 10\;\text{ms}$. Here we just use a Forward Euler dsicretization scheme for the sake of simplicity. ```python # Constants # M = 0.5 m = 0.2 b = 0.1 ftheta = 0.1 l = 0.3 g = 9.81 Ts = 10e-3 ``` ```python # System dynamics: \dot x = f_ODE(t,x,u) def f_ODE(t,x,u): F = u v = x[1] theta = x[2] omega = x[3] der = np.zeros(4) der[0] = v der[1] = (m * l * np.sin(theta) * omega ** 2 - m * g * np.sin(theta) * np.cos(theta) + m * ftheta * np.cos(theta) * omega + F - b * v) / (M + m * (1 - np.cos(theta) ** 2)) der[2] = omega der[3] = ((M + m) * (g * np.sin(theta) - ftheta * omega) - m * l * omega ** 2 * np.sin(theta) * np.cos(theta) - (F - b * v) * np.cos(theta)) / (l * (M + m * (1 - np.cos(theta) ** 2))) return der ``` ```python # Linearized System Matrices Ac =np.array([[0, 1, 0, 0], [0, -b / M, -(g * m) / M, (ftheta * m) / M], [0, 0, 0, 1], [0, b / (M * l), (M * g + g * m) / (M * l), -(M * ftheta + ftheta * m) / (M * l)]]) Bc = np.array([ [0.0], [1.0 / M], [0.0], [-1 / (M * l)] ]) Cc = np.array([[1., 0., 0., 0.], [0., 0., 1., 0.]]) Dc = np.zeros((2, 1)) [nx, nu] = Bc.shape # number of states and number or inputs ny = np.shape(Cc)[0] ``` ```python # Simple forward euler discretization Ad = np.eye(nx) + Ac * Ts Bd = Bc * Ts Cd = Cc Dd = Dc ``` ```python # Standard deviation of the measurement noise on position and angle std_npos = 0.005 std_nphi = 0.005 ``` ```python # Reference input and states xref = np.array([0.3, 0.0, 0.0, 0.0]) # reference state uref = np.array([0.0]) # reference input uminus1 = np.array([0.0]) # input at time step negative one - used to penalize the first delta u at time instant 0. Could be the same as uref. ``` ```python # Constraints xmin = np.array([-10.0, -10.0, -100, -100]) xmax = np.array([10.0, 10.0, 100, 100]) umin = np.array([-20]) umax = np.array([20]) Dumin = np.array([-5]) Dumax = np.array([5]) ``` ```python # Objective function weights Qx = sparse.diags([1.0, 0, 5.0, 0]) # Quadratic cost for states x0, x1, ..., x_N-1 QxN = sparse.diags([1.0, 0, 5.0, 0]) # Quadratic cost for xN Qu = 0.0 * sparse.eye(1) # Quadratic cost for u0, u1, ...., u_N-1 QDu = 0.1 * sparse.eye(1) # Quadratic cost for Du0, Du1, ...., Du_N-1 ``` ```python # Initialize simulation system phi0 = 15*2*np.pi/360 x0 = np.array([0, 0, phi0, 0]) # initial state t0 = 0 system_dyn = ode(f_ODE).set_integrator('vode', method='bdf') system_dyn.set_initial_value(x0, t0) _ = system_dyn.set_f_params(0.0) ``` ```python # Prediction horizon Np = 150 Nc = 75 ``` ```python # Instantiate and initialize MPC controller K = MPCController(Ad, Bd, Np=Np, Nc=Nc, x0=x0, xref=xref, uminus1=uminus1, Qx=Qx, QxN=QxN, Qu=Qu, QDu=QDu, xmin=xmin, xmax=xmax, umin=umin, umax=umax, Dumin=Dumin, Dumax=Dumax) K.setup() ``` ```python # Basic Kalman filter design Q_kal = np.diag([0.1, 10, 0.1, 10]) R_kal = np.eye(ny) L,P,W = kalman_design_simple(Ad, Bd, Cd, Dd, Q_kal, R_kal, type='filter') x0_est = x0 KF = LinearStateEstimator(x0_est, Ad, Bd, Cd, Dd,L) ``` ```python # Simulate in closed loop [nx, nu] = Bd.shape # number of states and number or inputs len_sim = 10 # simulation length (s) nsim = int(len_sim / Ts) # simulation length(timesteps) x_vec = np.zeros((nsim, nx)) y_vec = np.zeros((nsim, ny)) y_meas_vec = np.zeros((nsim, ny)) y_est_vec = np.zeros((nsim, ny)) x_est_vec = np.zeros((nsim, nx)) x_ref_vec = np.zeros((nsim, nx)) u_vec = np.zeros((nsim, nu)) t_MPC_CPU = np.zeros((nsim,1)) t_vec = np.arange(0, nsim) * Ts time_start = time.time() x_step = x0 x_step_est = x0 t_step = t0 uMPC = uminus1 for i in range(nsim): # Output for step i # System y_step = Cd.dot(system_dyn.y) # y[i] from the system ymeas_step = y_step ymeas_step[0] += std_npos * np.random.randn() ymeas_step[1] += std_nphi * np.random.randn() # Estimator # MPC uMPC = K.output() # u[i] = k(\hat x[i]) possibly computed at time instant -1 # Save output for step i y_vec[i, :] = y_step # y[i] y_meas_vec[i, :] = ymeas_step # y_meas[i] x_vec[i, :] = system_dyn.y # x[i] y_est_vec[i, :] = KF.y # \hat y[i|i-1] x_est_vec[i, :] = KF.x # \hat x[i|i-1] x_ref_vec[i, :] = xref #xref_fun(t_step) u_vec[i, :] = uMPC # u[i] # Update to i+1 # System system_dyn.set_f_params(uMPC) # set current input value to uMPC system_dyn.integrate(system_dyn.t + Ts) # integrate system dynamics for a time step # Kalman filter: update and predict KF.update(ymeas_step) # \hat x[i|i] KF.predict(uMPC) # \hat x[i+1|i] # MPC update for step i+1 time_MPC_start = time.time() K.update(KF.x, uMPC) # update with measurement (and possibly pre-compute u[i+1]) t_MPC_CPU[i] = time.time() - time_MPC_start # Time update t_step += Ts time_sim = time.time() - time_start ``` ```python # Plot results fig, axes = plt.subplots(3, 1, figsize=(10, 10), sharex=True) axes[0].plot(t_vec, x_est_vec[:, 0], "b", label="p_est") axes[0].plot(t_vec, x_vec[:, 0], "k", label='p') axes[0].plot(t_vec, x_ref_vec[:,0], "r--", linewidth=4, label="p_ref") axes[0].set_ylabel("Position (m)") axes[1].plot(t_vec, x_est_vec[:, 2] * 360 / 2 / np.pi, "b", label="phi_est") axes[1].plot(t_vec, x_vec[:, 2] * 360 / 2 / np.pi, label="phi") axes[1].plot(t_vec, x_ref_vec[:,2] * 360 / 2 / np.pi, "r--", linewidth=4, label="phi_ref") axes[1].set_ylabel("Angle (deg)") axes[2].plot(t_vec, u_vec[:, 0], label="u") axes[2].plot(t_vec, uref * np.ones(np.shape(t_vec)), "r--", linewidth=4, label="u_ref") axes[2].set_ylabel("Force (N)") for ax in axes: ax.grid(True) ax.legend() ``` ```python # Histogram of the MPC CPU time fig,ax = plt.subplots(1,1, figsize=(5,5)) ax.hist(t_MPC_CPU*1000, bins=100) ax.grid(True) _ = ax.set_xlabel('MPC computation CPU time (ms)') ```
aedf2a233ebf9530189d0c12a9dfe3b0fa522d7b
75,947
ipynb
Jupyter Notebook
examples/example_inverted_pendulum_kalman.ipynb
forgi86/pyMPC
291db149554767a035fcb01df3fed7a6b3fe60e4
[ "MIT" ]
84
2019-05-28T09:27:37.000Z
2022-03-31T08:38:23.000Z
examples/example_inverted_pendulum_kalman.ipynb
passion4energy/pyMPC
4b004ba707dab49cd36d96a3575b8593c870a904
[ "MIT" ]
2
2020-04-17T00:03:27.000Z
2021-01-30T11:35:58.000Z
examples/example_inverted_pendulum_kalman.ipynb
passion4energy/pyMPC
4b004ba707dab49cd36d96a3575b8593c870a904
[ "MIT" ]
20
2019-10-13T13:50:16.000Z
2022-03-31T08:38:25.000Z
172.606818
55,396
0.883669
true
2,925
Qwen/Qwen-72B
1. YES 2. YES
0.7773
0.743168
0.577664
__label__eng_Latn
0.386615
0.180438
# Quaternion Triple Products and Distance by Doug Sweetser, sweetser@alum.mit.edu - please feel free to email In this IPython notebook, efforts will be made to understand quaternion triple products and how they are related to distances in space and intervals in space-time as seen in special relativity. Rather than follow a historical story, I will try a more abstract approach. Initialize a few tools. ```python %%capture %matplotlib inline import numpy as np import sympy as sp import matplotlib.pyplot as plt # To get equations the look like, well, equations, use the following. from sympy.interactive import printing printing.init_printing(use_latex=True) from IPython.display import display # Tools for manipulating quaternions. import Q_tools as qt; ``` ## Spatial Rotations Define a triple product function modeled on what it takes to do a spatial rotation, $P R P^*$, where $R$ is a quaternion to be spatially rotated and $P$ is a quaternion parameter to do said rotation. ```python def triple_sandwich(r, p=qt.QH([1, 0, 0, 0])): """A function that takes 2 quaternions but does a triple product. The default value for P leaves R unchanged.""" return p.product(r.product(p.conj())) ``` ```python t, x, y, z = sp.symbols("t x y z") s, u, v, w = sp.symbols("s u v w") R = qt.QH([t, x, y, z]) P = qt.QH([s, u, v, w]) RP_sandwich = triple_sandwich(R, P) sp.simplify(RP_sandwich.t) ``` The first term is just the norm of the parameter $P$ times the scalar value of $R$, how simple! Rotating a value is complicated. ```python sp.simplify(RP_sandwich.x) ``` Show the interval of $R$ is unchanged up to the norm of the parameter $P$: ```python sp.simplify(sp.factor(RP_sandwich.square().t)) ``` The interval will be invariant so long as the norm of the parameter $P$ is equal to one. A common way to do this is to use sine and cosine functions due to the trig identity $\sin^2(\theta) + \cos^2(\theta) = 1$. ```python def triple_trig_z(r, a): """A rotation around the z axis only by the double angle of a.""" return triple_sandwich(r, qt.QH([sp.cos(a), 0, 0, sp.sin(a)])) def is_quadratic(r): """Tests if the the first term of the square of a quaternion is equal to t^2 - x^2 - y^2 - z^2.""" r2 = r.square() simple_r2 = sp.simplify(r2.t) it_is = ((simple_r2 == 1.0*t**2 - 1.0*x**2 - 1.0*y**2 - 1.0*z**2) or (simple_r2 == t**2 - x**2 - y**2 - z**2)) if it_is: display(t**2 - x**2 - y**2 - z**2) else: display(simple_r2) return it_is ``` ```python a = sp.Symbol('a') display(sp.simplify(triple_trig_z(R, a).t)) display(sp.simplify(triple_trig_z(R, a).x)) display(sp.simplify(triple_trig_z(R, a).y)) display(sp.simplify(triple_trig_z(R, a).z)) is_quadratic(triple_trig_z(R, a)) ``` An important thing to notice is that rotations work for arbitrarily small values of an angle. ```python display(sp.simplify(triple_trig_z(R, 0.01).t)) display(sp.simplify(triple_trig_z(R, 0.01).x)) display(sp.simplify(triple_trig_z(R, 0.01).y)) display(sp.simplify(triple_trig_z(R, 0.01).z)) is_quadratic(triple_trig_z(R, 0.01)) ``` This is relevant to the fact that the group $SO(3)$ is a compact group. It is easy to visualize the example above: it is a circle in the $xy$ plane with $t$ and $z$ unaltered. Circles are sets of points where the "next" point is an arbitrarily short distance away. Can we create a function that can take _any_ quaternion parameter $P$ yet still always generate another member of the group $SO(3)$? This can be done using the inverse of a quaternion which is the conjugate of a quaternion divided by the norm squared. Groups are about binary operations on a set. The binary operation can be a composite function, where the results of one rotation are fed into another. ```python def next_rotation(r, p=qt.QH([1, 0, 0, 0])): """Generates another member of the rotation group given a quaternion parameter P.""" return p.product(r.product(p.invert())) def composite_rotation(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0])): """A composite function of next_rotation.""" return next_rotation(next_rotation(r, p1), p2) ``` ```python display(sp.simplify(composite_rotation(R, qt.QH([s, u, v, w])).t)) display(sp.simplify(composite_rotation(R, qt.QH([s, u, v, w])).x)) is_quadratic(composite_rotation(R, qt.QH([s, u, v, w]))) ``` The next_rotation function can use any quaternion parameter $P$ as input and create another member of the group. This does not mean that rotations have four degrees of freedom. There is an equivalence relation involved since the product of a quaternion with its inverse has a norm of one. This algebraic constraint means the composite_rotation function has $4-1=3$ degrees of freedom. The composite_rotation function could be used to show that there is a real-valued quaternion representation of the compact Lie group $SO(3)$. Since it is well known quaternions can do this, such an effort will be skipped. ## Other Triple Products Lead to More Than Just Rotations Other triple products are possible. For example, the two quaternions could be on the same side. A number of years ago, a search for a real-valued quaternion function that could do a Lorentz boost turned up this difference between two one-sided triples, $ \frac{1}{2}((P P R)^* - (P^* P^* R)^*)$: ```python def triple_2_on_1(r, p=qt.QH([1, 0, 0, 0])): """The two are on one side, minus a different two on one side.""" ppr = p.product(p.product(r)).conj() pcpcr = p.conj().product(p.conj().product(r)).conj() pd = ppr.dif(pcpcr) pd_ave = pd.product(qt.QH([1/2, 0, 0, 0])) return pd_ave ``` ```python rq_321 = triple_2_on_1(R, P) display(sp.simplify(rq_321.t)) display(sp.simplify(rq_321.x)) display(sp.simplify(rq_321.y)) display(sp.simplify(rq_321.z)) ``` If $s=0$, then triple_2_on_1 would contribute nothing. Explore the hyperbolic sine and cosines: ```python phx = qt.QH([sp.cosh(a), sp.sinh(a), 0, 0]) ppr = triple_2_on_1(R, phx) display(sp.simplify(ppr.t)) ``` This is promising for doing a Lorentz boost. There is a direct link between hyperbolic trig functions and the relativistic velocity $\beta$ and stretch factor $\gamma$ of special relativity. $$\gamma = \cosh(\alpha)$$ $$\beta \gamma = \sinh(\alpha)$$ The trig functions are based on a circle in the plane, while the hyperbolic trig functions start with hyperbolas. The definitions are remarkably similar: $$\sin(\alpha) = \frac{e^{i \alpha} - e^{-i \alpha}}{2 i}$$ $$\cos(\alpha) = \frac{e^{i \alpha} + e^{-i \alpha}}{2 i}$$ $$\sinh(\alpha) = \frac{e^{\alpha} - e^{-\alpha}}{2}$$ $$\cosh(\alpha) = \frac{e^{\alpha} + e^{-\alpha}}{2}$$ The hyperbolic trig functions oddly are "more real", never needing an imaginary factor. The hyperbola of the hyperbolic cosine does touch the unit circle at its minimum, suggesting a solitary link to the trig functions. Combine the three triples and test if they do all the work of a Lorentz boost: $$\rm{triple-triple}(R, P) \equiv P R P^* + \frac{1}{2}((P P R)^* - (P^* P^* R)^*)$$ ```python def triple_triple(r, p=qt.QH([1, 0, 0, 0])): """Use three triple products for rotations and boosts.""" # Note: 'qtype' provides a record of what algrabric operations were done to create a quaternion. return triple_sandwich(r, p).add(triple_2_on_1(r, p), qtype="triple_triple") ``` Can this function do a rotation? If the first value of $P$ is equal to zero, then the two one-sided triple terms, $PPR$, will make no contribution, leaving the triple sandwich $PRP^*$. So long as the norm is equal to unity, then spatial rotations result. Do a rotation: ```python jk = qt.QH([0, 0, 3/5, 4/5]) display(sp.simplify(triple_triple(R, jk).t)) display(sp.simplify(triple_triple(R, jk).x)) display(sp.simplify(triple_triple(R, jk).y)) display(sp.simplify(triple_triple(R, jk).z)) is_quadratic(triple_triple(R, jk)) ``` Something important has changed going from the regular trig functions to these hyperbolic functions for rotations. The requirements that the first term must be zero while the other three terms are normalized to unity means that one cannot go an arbitrarily small distance away and find another transformation. If one wants a product of rotations, those rotations must be at right angles to each other. ```python Qi, Qj, Qk = qt.QH([0, 1, 0, 0]), qt.QH([0, 0, 1, 0]), qt.QH([0, 0, 0, 1]) print(triple_triple(triple_triple(R, Qi), Qj)) print(triple_triple(R, Qi.product(Qj))) ``` (t, -x, -y, z) triple_triple (t, -x, -y, z) triple_triple The fact that one cannot find a super close neighbor is a big technical change. What is so special about setting the first term equal to zero? Is there a more general form? Perhaps all that is needed is for the first term of the square to be equal to negative one. Test this out: ```python minus_1 = qt.QH([2, 2, 1, 0]) print(minus_1.square().t) display((triple_triple(R, minus_1).t, triple_triple(R, minus_1).x, triple_triple(R, minus_1).y, triple_triple(R, minus_1).z)) is_quadratic(triple_triple(R, minus_1)) ``` To be honest, this came as a surprise to me. Notice that the value for time changes, so a rotation is getting mixed in with a boost. This sort of mixing of rotations and boosts is known to happen when one does two boosts, one say along $x$, the other along $y$. Now we can say a similar thing is possible for rotations. If there scalar is zero then one gets a pure spatial rotation. When that is not the case, there is a mixture of rotations and boosts. Demonstrate that a boost along the $x$ axis works. ```python bx = qt.QH([sp.cosh(a), sp.sinh(a), 0, 0]) display(sp.simplify(bx.square().t)) display(sp.simplify(triple_triple(R, bx).t)) display(sp.simplify(triple_triple(R, bx).x)) display(sp.simplify(triple_triple(R, bx).y)) display(sp.simplify(triple_triple(R, bx).z)) is_quadratic(triple_triple(R, bx)) ``` Perfect. It was this result that began my investigation of triple_triple quaternion products. This is what the boost looks like using gammas and betas: $$(\gamma t - \gamma \beta x, \gamma x - \gamma \beta t, y, z)$$ The first term of the square of the hyperbolic parameter $P=bx$ is equal to positive one. So long as the triple_triple function is fed a quaternion parameter $P$ whose first term of the square has an absolute value of one, the interval is invariant. That is surprisingly simple. Note the double angle in the hyperbolic trig function that appeared earlier for rotations. ## Spatial Reflection and Time Reversal For a spatial reflection, just one spatial term flips signs. The first term of the square will not be altered. Yet the triple_triple function cannot flip only one sign. It can flip two terms. Thus, using just the triple_triple function one can go from all positive, to two positive-two negative, to all negative terms, but never one or three negative terms starting from an all positive quaternion $R$. The conjugate operator can do odd sign changes. Do a spatial reflection on $x$ only by rotating using $i$ and using the conjugate operator like so: ```python x_reflection = triple_triple(R, Qi).conj() print(x_reflection) is_quadratic(x_reflection) ``` Time reversal also cannot be done using triple_triple. The parameter $P$ is used twice, so its sign is of no consequence for the scalar in $R$. The entire quaternion $R$ must be multiplied by $-1$ then take a conjugate like so: ```python t_reversal = triple_triple(R).conj().product(qt.QH([-1, 0, 0, 0], qtype="sign_flip")) print(t_reversal) is_quadratic(t_reversal) ``` Rotations and boosts do not do the work of time reversal. Time reversal requires different algebraic tricks. ## Fixing the Limitations of the Triple_Triple Function The triple_triple function must be fed quaternions whose square is either exactly equal to plus or minus one. Create a function that can take in _any_ quaternion as a parameter and generate the next quadratic. The function must be scaled to the square root of the first term of the quaternion parameter $P$ squared. Expand the parameters so both spatial reflections and time reversals can be done. If the parameter $P$ is light-like, it cannot be used to do a boost. Feed the triple_triple function a light-like quaternion and it will always return zero. Light-like quaternions can do rotations. The next_rotation function is up to the task. ```python def next_quadratic(r, p=qt.QH([1, 0, 0, 0]), conj=False, sign_flip=False): """Generates another quadratic using a quaternion parameter p, if given any quaternion and whether a conjugate or sign flip is needed.""" pt_squared = p.square().t # Avoid using sp.Abs() so equations can be simplified. if isinstance(pt_squared, (int, float)): if pt_squared < 0: pt_squared *= -1 else: if pt_squared.is_negative: pt_squared *= -1 sqrt_pt_squared = sp.sqrt(pt_squared) # A light-like parameter P can rotate but not boost R. if sqrt_pt_squared == 0: rot_calc = next_rotation(r, p) else: p_normalized = p.product(qt.QH([1/sqrt_pt_squared, 0, 0, 0])) rot_calc = triple_triple(r, p_normalized) if conj: conj_calc = rot_calc.conj() else: conj_calc = rot_calc if sign_flip: sign_calc = conj_calc.product(qt.QH([-1, 0, 0, 0])) else: sign_calc = conj_calc calc_t = sp.simplify(sp.expand(sign_calc.t)) calc_x = sp.simplify(sp.expand(sign_calc.x)) calc_y = sp.simplify(sp.expand(sign_calc.y)) calc_z = sp.simplify(sp.expand(sign_calc.z)) return qt.QH([calc_t, calc_x, calc_y, calc_z], qtype="L") ``` ```python display(sp.simplify(next_quadratic(R, P, True, True).t)) display(sp.simplify(next_quadratic(R, P, True, True).x)) is_quadratic(next_quadratic(R, P, True, True)) ``` No matter what values are used for the parameter $P$, the next_quadratic function will preserve the interval of $R$. Even a light-like interval works: ```python print(next_quadratic(R, qt.QH([s, s, 0, 0]))) is_quadratic(next_quadratic(R, qt.QH([s, s, 0, 0]))) ``` Notice how the $y$ and $z$ terms flip positions, but the squaring process will put both into their proper spots in the first term of the square. ## The Lorentz Group and Functional Composition with the next_quadratic Function The Lorentz group is all possible ways to transform an event in space-time yet preserve the quadratic form: $$(t, x, y, z) \rightarrow t^2 - x^2 - y^2 - z^2$$ The elements of the group are the tuples (t, x, y, z) but not the rotation angles, boost velocities, conjugation and sign flips. A group is defined as a binary operation on a set of elements that has 4 qualities: 1. Closure 1. An inverse exists 1. There is an identity element 1. Associative The next_quadratic function acts on one element of the group. The binary operation is a composite function built from two next_quadratic functions. Take the result of one action of the next_quadratic function, and have that result go into another round of the next_quadratic function. ```python def composite_quadratic(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0]), conj1=False, conj2=False, sign_flip1=False, sign_flip2=False): """A composite function for the next_quadratic function.""" return next_quadratic(next_quadratic(r, p1, conj1, sign_flip1), p2, conj2, sign_flip2) ``` ```python print(composite_quadratic(R)) is_quadratic(composite_quadratic(R)) print(composite_quadratic(R, Qi, Qj, True, True, True, False)) is_quadratic(composite_quadratic(R, Qi, Qj, True, True, True, False)) print(composite_quadratic(R, minus_1, Qj, False, True, False, True)) is_quadratic(composite_quadratic(R, minus_1, Qj, False, True, False, True)) print(composite_quadratic(R, bx, P, True, False, True, False)) is_quadratic(composite_quadratic(R, bx, P, True, False, True, False)) print(composite_quadratic(composite_quadratic(R, bx, bx))) is_quadratic(composite_quadratic(composite_quadratic(R, bx, bx))) ``` Each of these composite functions generates exactly the same quadratic as required to be part of the Lorentz group. These five examples argue for closure: every possible choice for what one puts in the composite_quadratic function will have the same quadratic. I don't have the math skills to prove closure (unless one thinks the earlier general case is enough). Quaternions are a division algebra. As such, it is reasonable to expect an inverse to exist. Look for one for the $Qi$, $Qk$ parameter case: ```python print(composite_quadratic(R, Qi, Qj, True, True, True, False)) print(composite_quadratic(composite_quadratic(R, Qi, Qj, True, True, True, False), Qk)) ``` (-t, x, y, -z) L (-t, -x, -y, -z) L Close, but not quite. Add a sign_flip. ```python print(composite_quadratic(composite_quadratic(R, Qi, Qj, True, True, True, False), Qk, sign_flip1=True)) ``` (t, x, y, z) L The is back where we started with the quaternion $R$. Again, this is just an example and not a proof. Some inverses are easier to find than others like pure rotations or pure boosts with a rotation or opposite velocity. The identity composition was shown to do its fine work in the first composite_quadratic(R) example. Composite functions are associative, at least according to wikipedia. ## The Difference Between composite_rotation and composite_quadratic Both of these composite functions call another function twice, next_rotation and next_quadratic respectively. Both functions do a normalization. The next_rotation normalizes to the norm squared which can be zero if the parameter $P$ is zero, otherwise it is positive. The next_rotation function always does one thing, $P R P^{-1}$. The next_quadratic normalizes to the first term of the square of parameter $P$. That value can be positive, negative, or zero. When the first term of the square is positive or negative, the next_quadratic function treats both cases identically. Three triple quaternion products are used, $P R P^* + \frac{1}{2}((P P R)^* - (P^* P^* R)^*)$. The first term is identical to a rotation so long as the norm is equal to one. Otherwise, it is off just by a scaling factor. The difference happens when it is zero which indicates the properties of light come into play. It is the lightcone that separates time-like events from space-like events. For a time-like value of the parameter $P$, the triple-triple returns zero which is not a member of the group. If one uses the first triple, no matter what its norm of light-like parameter $P$ happens to be, the resulting $R->R'$ remains in the group. The rotation group $SO(3)$ is compact, while the Lorentz group $O(1, 3)$ is not. The change in algebra needed for light-light parameter $P$ may be another way to view this difference. ## Degrees of Freedom The typical representation of the Lorentz group $O(1, 3)$ says there are six independent variables needed to represent the Lorentz group: three for rotations and three for boosts. Yet when one does two boosts in different directions, it is a mix between a boost and a rotation. This suggests there is no such thing as a completely separate notion of rotations and boosts, that they have a capacity to mix. If true, that decreases the degrees of freedom. Two spacial rotations will result in spacial rotation: ```python print(composite_quadratic(R, qt.QH([0, 1,0,1]), qt.QH([0, 1,1,0]))) is_quadratic(composite_quadratic(R, qt.QH([0, 1,0,1]), qt.QH([0, 1,1,0]))) ``` Notice that the value of the first squared term is negative. That value gets normalized to negative one in the composite_quadratic function (via the next_quadratic function that gets called twice). What makes these rotations be only spacial is the zero in the first position of the parameter $P$. It is easy enough to look at situations where the first term of the square is negative, and the first term of the parameter is not equal to zero: ```python print(composite_quadratic(R, qt.QH([4, 5,0,0]))) is_quadratic(composite_quadratic(R, qt.QH([4, 5,0,0]))) ``` This is both a boost and a rotation. The boost effect can be seen in the first and second terms where there is a positve and negative term (the negative being the term that "doesn't belong", seeing the $x$ in the first term and $t$ in the second). The rotation appears in the sign flips for $y$ and $z$. If the 4 and 5 are switched, there is no rotation of these terms: ```python print(composite_quadratic(R, qt.QH([5, 4,0,0]))) ``` (4.55555555555556*t - 4.44444444444444*x, -4.44444444444444*t + 4.55555555555556*x, y, z) L The first two terms are exactly the same. Now the last two terms don't flip signs because there is no rotation. Both the (4, 5) and (5, 4) parameter composites will have the same first term for the square. This real-valued quaternion representation makes it possible to see. At first blush, one looks into the next_quadratic function and sees six degrees of freedom: four for the quaternion parameter $P$, one for the conjugate operator and one for the sign_flip. These last two are needed to generate spatial reflection and time reversal. The quaternion parameter $P$ normalizes to the first term of the square of the quaternion parameter $P$. This means that once three of the values are chosen, then the value of the fourth one is set by this algebraic constraint. The same thing happens with the composite_rotation function defined earlier: a 4D quaternion may go in, but they way it gets normalized means there is an equivalence class to those quaternions that have a norm of one, and thus only 3 degrees of freedom. Representing the Lorentz group with only five degrees of freedom with this real-valued quaternion representation would be an interesting result if it can be rigorously proved.
b61f65ba9c33948acf084b31a0b345fe4577c2a8
103,888
ipynb
Jupyter Notebook
Notebooks/triple_products_and_distance.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
Notebooks/triple_products_and_distance.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
Notebooks/triple_products_and_distance.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
57.683509
3,368
0.733136
true
5,872
Qwen/Qwen-72B
1. YES 2. YES
0.941654
0.880797
0.829406
__label__eng_Latn
0.994684
0.765322
### Algebraic definition A set G equipped with operation $\bullet$ > Closure: $\forall a,b \in G, a \bullet b \in G$ >Associativity: $(a \bullet b) \bullet c = a \bullet (b \bullet c)$ >Identity element: $\exists e \in G, such \ that \ \forall a \in G \ a \bullet e = a$ >Inverse element: $\forall a \in G, \exists a^{-1}, such \ that \ a \bullet a^{-1} = e$ Can be shown, that $e \bullet a = a$ and $a^{-1} \bullet a = e$ and inverse and e are unique **Abelian group**: $\forall a,b \in G, a \bullet b = b \bullet a$ If a group is abelian, usually group operation is denoted by + and the identity element by 0 As usual in algebra are difined Homomorphism, Isomorphism, Endomorphism, Automorphism. In Algebra isomorph groups are deemed the same. group homomorphism $f:G \to H$ such that $f(g_1 \bullet g_2) = f(g_1) * f(g_2)$ **kernel** of a homomorphism is defined as the set of elements that get mapped to the identity element in the image. Group operation may be viewed as bijection from G to G. **Subgroup**: if H subset of G, which is closed under group operation and inverse operation. $H \leq G$, proper subgroup: $H \lt G$ &nbsp; Finite groups can be described by **Cayley table**: $ \begin{bmatrix} & Elmnts & e & a & b & c \\ & e & e & a & b & c \\ & a & a & a \bullet a & a \bullet b & a \bullet c \\ & b & b & b \bullet a & b \bullet b & b \bullet c \\ & c & c & c \bullet a & c \bullet b & c \bullet c \\ \end{bmatrix} $ Group can be described by **Presentation of a group**: $\langle S \vert R \rangle$ where S is set of generators and R is a set of rules free group $F_S$ over a given set $S$ consists of all expressions (a.k.a. words, or terms) that can be built from members of S: $\langle S \vert \emptyset \rangle$ ### Normal Subgroup **Coset left** (right): if $H \lt G$, set defined by an element $gH = \{g \bullet h : h \in H\}$ ($Hg = \{h \bullet g : h \in H\}$) cosets form a partitioning of group, any element of a coset is a represantative of the coset. the number of distinct cosets is called index of subgroup. if $H \lt G$ then $ index= \frac {\vert G \vert} {\vert H \vert} $ . A finite groups of prime size can not have subgroups. if $\forall g \in G : gH = Hg$, then $H$ is said to be a normal subgroup. Subgroups of abelian group are normal. **Conjugacy class**: $Cl(a)=\{ b \in G : \exists g \in G \text{ such that } b = gag^-1\}$ Conjugacy class is an equivalence relation and thus partitions the group. For abelian group $Cl(a) = \{a\}$ group of **Inner automorphisms**; $Inn(G)$ >$\varphi_g: G \rightarrow G$ such that $\varphi_g(x)=gxg^-1$ > $Inn(AbelianGroup) \simeq \{e\}$ >Normal subgoup is preserved by inner automorphisms. >kernel of a homomorphism is equal to a normal subgroup **Quotient group (factor group)**: we can define a group operation on cosets as the following $g_1H*g_2H=(g_1 \bullet g_2)H$. If $H \vartriangleleft G$ then $G/H$ is a group. ( $G/\{e\}=G$ , $G/G=\{e\}$ ) &nbsp; **Direct product**: $G \times H$ is the ordered pairs $(g,h)$ where $g \in G$ and $h \in H$ such that $(g_1,h_1) \bullet (g_2,h_2) == (g_1 * g_2, h_1*'h_2)$ Let a group $P$ has 2 subgroups $G,H$. $ P=G \times H \Leftrightarrow \begin{cases} & G \cap H == \{e\} \\ & \text{ every element can be expressed in } g \bullet h \\ & G \text{ comutes with } H \text{( or stronger:} G \text{ and } H \text{ are normal subgroups} \\ \end{cases} $ ### Geometric definition **Transformation group (Group action)** : $\varphi$ is a function of G on set X $\varphi : G \times X \rightarrow X:(g,x) \mapsto \varphi(g,x) $ where $\varphi(e,x)=x$, and $\varphi$ is bijective map. **symmetric group**: defined over any set is the group whose elements are all the bijections from the set to itself. For finite set of size n, it is $S_n$: all permutations of finite set of size n. **Cayley's theorem**: every group can be realized as a subgroup of a symmetric group over some set **Orbit**: Suppose $G$ is a group acting on a set $S$. ${G_{.s}}$ the orbit of $s \in S$ is defined as $\{t \in S \vert \exists g \in G, g.t=s \}$ The set of orbits of (points x in) X under the action of G form a partition of X. A group action is termed **transitive** if it has exactly one orbit: $\forall x,y \in S, \exists g \in G , g.x = y$ **Stabilizer (isotropy group)**: denoted $Stab_G(s)$, is defined as: $\{ h \in G \mid \ h.s = s \} $. Actions of groups on vector spaces are called **representations** of the group Alternativly a **representation** of a group G on a vector space V is a group homomorphism $\varphi: G \rightarrow GL(V,F)$ **General linear group** : $GL(V,F)$ are all matrices over a field $F$, with 0 determinant (invertible). > $SL(n, F)$: determinant is 1. In $R^n$ volume and orientation preserving linear transformations > $O(n)$: orthogonal matrcies $O^TO=I$. Preserves the dot product of vectors. >$SU(n)$: n×n unitary matrices with determinant 1 on $\mathbb{C}$ A **faithful representation** is one in which the homomorphism G → GL(V) is injective; in other words, one whose kernel is the trivial subgroup {e} consisting only of the group's identity element. ### examples small groups of order n: > n=1, the only group {e} == **trivial group** > n=2, only one group $Z_2 \simeq S_2$ >> $ \begin{bmatrix} & & e & a \\ & e & e & a \\ & a & a & e \\ \end{bmatrix} $ > n=3, only one group $Z_3$ >>$\begin{bmatrix} & & e & a & b \\ & e & e & a & b \\ & a & a & b & e\\ & b & b & e & a\\ \end{bmatrix}$ >> that is $b = a \bullet a == a^2$ > n=4, two non isomorphic groups >> $Z_4$ >>>$\begin{bmatrix} & & e & g & g^2 & g^3 \\ & e & e & g & g^2 & g^3 \\ & g & g & g^2 & g^3 & e\\ & g^2 & g^2 & g^3 & e & g\\ & g^3 & g^3 & e & g & g^2\\ \end{bmatrix}$ >> this can be viewed as a group of multiplications of $\{1, i, -1, -i\}$ >> one proper subgroup $Z_2 \lt Z_4 : \{ e, g^2 \}$ >> this subgroup partitions the group to the following cosets: $\{e, g^2\}$, $\{g, g^3\}$ >> Klein four-group $K_4$: >>>$ \begin{bmatrix} & & e & a & b & c \\ & e & e & a & b & c \\ & a & a & e & c & b \\ & b & b & c & e & a \\ & c & c & b & a & e \\ \end{bmatrix} $ >> 3 proper subgroups : $\{ e, a \},\{ e, b \},\{ e, c \}$ >> $K_4 = Z_2 \times Z_2 $ and can be presented as $ \langle a, b \vert a^2,b^2, ab=ba \rangle$ **symmetric group**: $S_3$ > order of the group is $3!=6$ > cycle examples: >>$(1,2) == \begin{pmatrix} 1 & 2 & 3\\ 2 & 1 & 3 \end{pmatrix}$ >>$(1,2,3) == \begin{pmatrix} 1 & 2 & 3\\ 2 & 3 & 1 \end{pmatrix}$ > elements (in cycles): { e, (1,2), (1,3), (2,3), (1,2,3), (1,3,2) } > first non abelian group: (1,2)(2,3) = (1,2,3) and (2,3)(1,2)=(1,3,2) >presentations: $\langle a, b, c \vert a^2,b^2, c^3, abc \rangle$ or $\langle s_1, s_2 \vert s_1^2,s_2^2, (s_1s_2)^3 \rangle$ or $\langle s, t \vert s^3,t^2, (st)=ts^2 \rangle$ > proper subgroups: >>$Z_2$: {e,(1,2)}, {e,(1,3)}, {e,(2,3)} >> $Z_3$: {e,(1,2,3), (1,3,2)} > conjugacy class partitioning >> {e} >>{ (1,2), (1,3), (2,3) } >>{ (1,2,3), (1,3,2) } this is the only normal subgroup >$Inn(S_3) \simeq S_3$ **Cyclic group** of order n: $Z_n$ : $\langle a \vert a^n \rangle$, that is one generator and $a^n = e$ > $Z_n$ is abelian group > it may be looked as >> modulo n arithmetics >> rotations by $2\pi \frac {m} {n}$ **Free group** of order 1: $F_1: \langle a \vert \emptyset \rangle$ > consists of all possible strings of $a$ and $a^{-1}$ reducted by the group identity $a \bullet a^{-1} = e$ > for example $"aaaaaa"$ or $"a^{-1}a^{-1}a^{-1}"$ > $F_1 \simeq Z$ where $Z$ with + is treated as Cyclic goup ## example Dihedral transformation group $D_n$ is the group of symmetries of a regular polygon (includes rotations and reflections). Orbit of a point in $R^2$ is the set of vertexes of a polygon. $D_n = \langle r,s \vert r^n, s^2, (sr)^2 \rangle$, order of the group = 2n, $D_3 \simeq S_3$ $(sr)^2=e \Leftrightarrow srs=r^{-1}$ $r_i==r^i; s_i==r_is \Rightarrow r_ir_j=r_{i+j}, r_is_j=s_{i+j},s_jr_i=s_{i-j},s_is_j=r_{i-j}$ natural represantation in $R^2$: $ r_i=\begin{pmatrix} cos \frac{2 \pi i}{n} & -sin \frac{2 \pi i}{n}\\ sin \frac{2 \pi i}{n} & cos \frac{2 \pi i}{n} \end{pmatrix} , s_i=\begin{pmatrix} cos \frac{2 \pi i}{n} & sin \frac{2 \pi i}{n}\\ sin \frac{2 \pi i}{n} & -cos \frac{2 \pi i}{n} \end{pmatrix} $ $D_4 \lt O(2)$, $D_4 \lt SO(3)$ $D_4$: group of symmetries of a square > conjucacy classes: >> $\{e\}$ >> $\{r^2\}$ rotation by $\pi$ >> $\{s,s_2\}$ reflections horisontal and vertical >> $\{s_1,s_3\}$ diagonal reflections >> $\{r,r^3\}$ rotations by $\pi/2$ and $3\pi/2$ > proper subgroups: >> $\{e,r^2\}$ Normal subgroup >> $\{e,s\}$,$\{e,s_1\}$,$\{e,s_2\}$,$\{e,s_3\}$ >> $\{e,s,r^2,r^2s\}$, $\{e,rs,r^2,r^3s\}$ >> $\{e,r,r^2,r^3\}$ Normal subgroup > $Inn(D_4) \simeq K_4$ ```python # some sympy Permutation examples # this is needed for google.colab #from IPython.display import HTML #display(HTML("")) from sympy import init_printing init_printing(use_latex='mathjax') import sympy.combinatorics.permutations as P p1 = P.Permutation(4) display(p1.array_form) display(p1) #p1.cyclic_form print('='*50) p1 = P.Permutation(3,1)(0,2,4) display(p1.array_form) display(p1) print('='*50) p2=p1*p1 display(p2.array_form) display(p2) print('='*50) ``` $$\left [ 0, \quad 1, \quad 2, \quad 3, \quad 4\right ]$$ $$\left( 4\right)$$ ================================================== $$\left [ 2, \quad 3, \quad 4, \quad 1, \quad 0\right ]$$ $$\left( 0\; 2\; 4\right)\left( 1\; 3\right)$$ ================================================== $$\left [ 4, \quad 1, \quad 0, \quad 3, \quad 2\right ]$$ $$\left( 0\; 4\; 2\right)$$ ================================================== ```python p1('abcde') ``` ['c', 'd', 'e', 'b', 'a'] ```python import sympy.combinatorics.generators as G d = G.dihedral(4) list(d) ``` $$\left [ \left( 3\right), \quad \left( 0\; 3\right)\left( 1\; 2\right), \quad \left( 0\; 1\; 2\; 3\right), \quad \left( 1\; 3\right), \quad \left( 0\; 2\right)\left( 1\; 3\right), \quad \left( 0\; 1\right)\left( 2\; 3\right), \quad \left( 0\; 3\; 2\; 1\right), \quad \left( 0\; 2\right)\left( 3\right)\right ]$$ ```python #S3 from sympy.combinatorics.free_groups import free_group, FreeGroup import sympy.combinatorics.fp_groups F, a, b = free_group("a, b") G = FpGroup(F, [a**2, b**3, (a*b)**4]) print("Order is:",G.order()) #l = sympy.combinatorics.fp_groups.low_index_subgroups(G,2) #for t in l: # print(t.table) ``` Order is: 24 ## Links [The Group Properties Wiki](https://groupprops.subwiki.org/wiki/Main_Page) [Magma Computational Algebra System](http://magma.maths.usyd.edu.au/magma/) [GAP - Groups, Algorithms, Programming -a System for Computational Discrete Algebra](https://www.gap-system.org/) http://www.sagemath.org/ [Handbook of Computational Group Theory. Derek F. Holt, Bettina Eick, Eamonn A. O'Brien](https://books.google.am/books?id=rnTLBQAAQBAJ&source=gbs_book_other_versions) https://docs.sympy.org/latest/modules/combinatorics/perm_groups.html https://people.maths.bris.ac.uk/~matyd/GroupNames/index.html ```python ```
14163f558b14909503e6c3d2a3149d33b04abbdb
20,153
ipynb
Jupyter Notebook
Math/IntroToGroups1.ipynb
gate42qc/seminars
35ff77b902d9c2ede619fd6e2d9c3e80d20d78de
[ "MIT" ]
6
2018-12-07T10:02:06.000Z
2019-11-24T19:30:03.000Z
Math/IntroToGroups1.ipynb
gate42qc/seminars
35ff77b902d9c2ede619fd6e2d9c3e80d20d78de
[ "MIT" ]
null
null
null
Math/IntroToGroups1.ipynb
gate42qc/seminars
35ff77b902d9c2ede619fd6e2d9c3e80d20d78de
[ "MIT" ]
1
2019-08-22T12:07:40.000Z
2019-08-22T12:07:40.000Z
32.039746
368
0.487421
true
4,107
Qwen/Qwen-72B
1. YES 2. YES
0.959762
0.928409
0.891052
__label__eng_Latn
0.931009
0.908545
# Relações entre fasores para elementos de circuitos Jupyter Notebook desenvolvido por [Gustavo S.S.](https://github.com/GSimas) Se a corrente através de um resistor R for i = Im cos(wt + ϕ), a tensão nele será dada pela lei de Ohm, como segue: \begin{align} {\Large v(t) = iR = R I_m cos(\omega t + \phi)} \\{\Large V = RI_m \angle \phi} \\{\Large V = RI} \end{align} Para o indutor L, suponha que a corrente através dele seja i = Im cos(wt + ϕ). A tensão no indutor é: \begin{align} {\Large v(t) = L \frac{di}{dt} = - \omega L I_m sen(\omega t + \phi)} \\{\Large v(t) = \omega L I_m cos(\omega t + \phi + 90º)} \\{\Large V = \omega L I_m e^{j(\phi + 90º)} = \omega L I_m \angle \phi + 90º} \end{align} A partir da equação da corrente no indutor, podemos escrever: \begin{align} {\Large V = j \omega L I} \end{align} Para o capacitor C, suponha que a tensão nele seja v = Vm cos(wt + ϕ). A corrente através do capacitor é: \begin{align} {\Large i(t) = C \frac{dv}{dt}} \\{\Large I = j\omega C V} \\{\Large V = \frac{I}{j \omega C}} \end{align} **Exemplo 9.8** A tensão v = 12 cos(60t + 45°) é aplicada a um indutor de 0,1 H. Determine a corrente em regime estacionário através do indutor. ```python print("Exemplo 9.8") omega = 60 L = 0.1 V = 12 #v = 12[45º] #I = V/jwL[45 - 90] I = V/(omega*L) phi = 45 - 90 print("Corrente fasorial: {}[{}]".format(I,phi)) print("Corrente temporal: {}cos({}t + {})".format(I,omega,phi)) ``` Exemplo 9.8 Corrente fasorial: 2.0[-45] Corrente temporal: 2.0cos(60t + -45) **Problema Prático 9.8** Se a tensão v = 10 cos(100t + 30°) for aplicada a um capacitor de 50 uF, calcule a corrente através do capacitor. ```python print("Problema Prático 9.8") V = 10 u = 10**(-6) C = 50*u omega = 100 #I = jwCV[30 + 90] I = omega*C*V phi = 30 + 90 print("Corrente fasorial: {}[{}]".format(I,phi)) print("Corrente temporal: {}cos({}t + {})".format(I,omega,phi)) ``` Problema Prático 9.8 Corrente fasorial: 0.04999999999999999[120] Corrente temporal: 0.04999999999999999cos(100t + 120) ## Impedância e Admitância Das expressões de tensão e corrente fasorial que apresentamos, obtemos a lei de Ohm na forma fasorial para qualquer tipo de elemento: \begin{align} {\Large Z = \frac{V}{I}} \end{align} onde Z é um valor dependente da frequência conhecido como impedância e medido em ohms. **A impedância Z de um circuito é a razão entre a tensão fasorial V e a corrente fasorial I, medida em ohms (Ω).** A impedância representa a oposição que um circuito oferece ao fluxo de corrente senoidal. Embora seja a razão entre dois fasores, ela não é um fasor, pois não corresponde a uma quantidade que varia como uma senoide. Sendo um valor complexo, a impedância pode ser expressa na forma retangular como segue: \begin{align} {\Large Z = R + jX} \end{align} onde R = Re(Z) é a resistência e X = Im(Z) é a reatância. A reatância X pode ser positiva ou negativa. Dizemos que a impedância é indutiva quando X é positiva, ou capacitiva quando X é negativa. A impedância também pode ser expressa na forma polar como: \begin{align} {\Large Z = |Z| \angle \theta} \\{\Large |Z| = \sqrt{R^2 + X^2}} \\{\Large \theta = arctg(\frac{X}{R})} \\{\Large R = |Z|cos(\theta)} \\{\Large X = |Z|sen(\theta)} \end{align} **A admitância Y é o inverso da impedância, medida em siemens (S).** \begin{align} {\Large Y = \frac{1}{Z} = \frac{I}{V}} \\{\Large Y = \frac{1}{|Z|} \angle -\theta} \\{\Large Y = G + jB} \end{align} onde G = Re Y é chamada condutância e B = Im Y é denominada susceptância. \begin{align} {\Large G + jB = \frac{1}{R + jX} = \frac{R - jX}{R^2 + X^2}} \\{\Large G = \frac{R}{R^2 + X^2}} \\{\Large B = \frac{-X}{R^2 + X^2}} \end{align} **Exemplo 9.9** Determine v(t) e i(t) no circuito apresentado na Figura 9.16. ```python print("Exemplo 9.9") import numpy as np V = 10 C = 0.1 R = 5 omega = 4 Zc = 1/(omega*C) print("Impedância Z = {} - j{}".format(R,Zc)) Z = np.sqrt(R**2 + Zc**2) theta = np.arctan(Zc/R)*180/np.pi I = V/Z phi = 0 - theta print("I = {} [{}º]".format(I,phi)) V = I*Zc print("V = {} [{}º]".format(V,phi - 90)) ``` Exemplo 9.9 Impedância Z = 5 - j2.5 I = 1.7888543819998317 [-26.56505117707799º] V = 4.47213595499958 [-116.56505117707799º] **Problema Prático 9.9** Consulte a Figura 9.17. Determine v(t) e i(t). ```python print("Problema Prático 9.9") V = 20 omega = 10 phi = 30 R = 4 L = 0.2 Zl = omega*L print("Z = {} + j{}".format(R,Zl)) Z = np.sqrt(R**2 + Zl**2) theta = np.arctan(Zl/R)*180/np.pi I = V/Z alpha = phi - theta print("I = {}[{}º]".format(I,alpha)) print("i(t) = {}sen({}t + {}º)".format(I,omega,alpha)) Vl = Zl*I print("V = {}[{}º]".format(Vl,alpha + 90)) print("v(t) = {}sen({}t + {}º)".format(Vl,omega, alpha + 90)) ``` Problema Prático 9.9 Z = 4 + j2.0 I = 4.47213595499958[3.43494882292201º] i(t) = 4.47213595499958sen(10t + 3.43494882292201º) V = 8.94427190999916[93.43494882292201º] v(t) = 8.94427190999916sen(10t + 93.43494882292201º) ## Leis de Kirchhoff no Domínio da Frequência As Leis de Kirchhoff (dos nós e das malhas) também se aplicam para a análise de circuitos no domínio da frequência (fasorial). A associação de impedâncias segue o mesmo cálculo para a associação de resistências e a associação de admitâncias segue a de condutâncias: **Em série** \begin{align} {\Large Z_{eq} = Z_1 + Z_2 + ... + Z_N = \sum_{i = 1}^{N} Z_i } \end{align} **Em Paralelo** \begin{align} {\Large \frac{1}{Z_{eq}} = \frac{1}{Z_1} + \frac{1}{Z_2} + ... + \frac{1}{Z_N}} \\{\Large Z_{eq} = (\sum_{i=1}^{N} Z_i ^{-1})^{-1}} \end{align}
277817290d3a19c6a196d1a0b1228ffe6589baf8
9,651
ipynb
Jupyter Notebook
Aula 19 - Fasores e Elementos de Circuitos.ipynb
ofgod2/Circuitos-electricos-Boylestad-12ed-Portugues
60e815f6904858f3cda8b5c7ead8ea77aa09c7fd
[ "MIT" ]
7
2019-08-13T13:33:15.000Z
2021-11-16T16:46:06.000Z
Aula 19 - Fasores e Elementos de Circuitos.ipynb
ofgod2/Circuitos-electricos-Boylestad-12ed-Portugues
60e815f6904858f3cda8b5c7ead8ea77aa09c7fd
[ "MIT" ]
1
2017-08-24T17:36:15.000Z
2017-08-24T17:36:15.000Z
Aula 19 - Fasores e Elementos de Circuitos.ipynb
ofgod2/Circuitos-electricos-Boylestad-12ed-Portugues
60e815f6904858f3cda8b5c7ead8ea77aa09c7fd
[ "MIT" ]
8
2019-03-29T14:31:49.000Z
2021-12-30T17:59:23.000Z
27.574286
274
0.488758
true
2,136
Qwen/Qwen-72B
1. YES 2. YES
0.805632
0.718594
0.578923
__label__por_Latn
0.966104
0.183362
# CONTROLLABILITY OF BIOLOGICAL SYSTEMS This notebook explores the construction and interpretation of transfer functions for more complex networks. # Preliminaries ```python !pip -q install controlSBML import controlSBML as ctl import control from controlSBML.util import makeSimulationTimes import pandas as pd import matplotlib.pyplot as plt import numpy as np import tellurium as te import sympy print("controlSBML version: " + ctl.__version__) ``` controlSBML version: 0.2.10 # Transfer Functions in the ``control`` Package Suppose our transfer function is $G(s) = \frac{1}{s} \frac{k_1}{k_2 + s}$. The ``control`` packages provides a way to construct transfer functions that are a ratio of polynomials in $s$. ## Representing Polynomials A polynomial in $s$ is represented as a ``python`` list. For example, $s + k_2$ is the polynomial $(1)s^1 + (k_2) s^0$. It is represented by the list ``[1, k2]``, where ``k2`` is the floating point value of $k_2$. The interpretation is that the last element of the list is the coefficient of $s^0$; elements that preceed the last element represent successive powers of $s$. **Question: What is the list that represents the polynomial $3 S^3 + 2s$?** ## Creating Transfer Functions To create a transfer function, you provide the numerator and denominator polynomial. Consider the construction of the transfer function for $G(s) = \frac{1}{s} \frac{k_1}{k_2 + s} = \frac{k_1}{s^2 + k_2 s}$. **Question: What are the poles of $G(s)$?** ```python k1 = 1 k2 = 2 tf = control.TransferFunction([k1], [1, k2, 0]) tf ``` $$\frac{1}{s^2 + 2 s}$$ ## Using ``TransferFunction`` objects There are several ways to use transfer function objects. You can find the poles, calculate DC gain, and simulate impulse and step responses. ### Poles of a TransferFunction ```python # The poles should be 0, -k2 = -2 tf.pole() ``` array([-2., 0.]) ### DC Gain of a TransferFunction ```python tf.dcgain() ``` inf **Question: Why is DC Gain = $\infty$?** ### Impulse Response ```python # You can use controlSBML to make a sequence of simulation times. By default it's in increments # of 0.1 from 0 to 5. TIMES = ctl.makeSimulationTimes(end_time=10, points_per_time=5) TIMES ``` array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 1.2, 1.4, 1.6, 1.8, 2. , 2.2, 2.4, 2.6, 2.8, 3. , 3.2, 3.4, 3.6, 3.8, 4. , 4.2, 4.4, 4.6, 4.8, 5. , 5.2, 5.4, 5.6, 5.8, 6. , 6.2, 6.4, 6.6, 6.8, 7. , 7.2, 7.4, 7.6, 7.8, 8. , 8.2, 8.4, 8.6, 8.8, 9. , 9.2, 9.4, 9.6, 9.8, 10. ]) ```python result = control.impulse_response(tf, T=TIMES) ``` ``result`` is complicated objects with many properties. * ``result.outputs`` - ``result.outputs[n]`` is a time series for the output ``n`` * ``result.t`` - times simulated (which should be the same as ``TIMES``) ```python # Since we're doing single input single output (SISO) systems, # you can simplify matters by just flattening result.t, result.outputs plt.plot(result.t.flatten(), result.outputs.flatten()) ``` **Question: The above impulse response converges to 0.5. How is this predicted by $G(s)$?** Hint: Recall that for an impulse input, $y(\infty) = lim_{s \rightarrow 0} s G(s)$. ### Step Response **Question: Does $G(s)$ converge for a step response? If so, to what value?** Hint: Recall that for a step input, $y(\infty) = lim_{s \rightarrow 0} G(s)$. Consider $H(s) = \frac{k_1}{s + k_2}$. **Question: Construct ``control.TransferFunction`` object for $H(s)$. if $k_2 = 3$.** **REMOVE** ```python k2 = 3 tf = control.TransferFunction([k1], [1, k2]) tf ``` $$\frac{1}{s + 3}$$ ```python result = control.step_response(tf, T=TIMES) plt.plot(result.t.flatten(), result.outputs.flatten()) ``` **Question: The above step response converges to 0.33. How is this predicted by $H(s)$?** # State Space Representation in the ``control`` Package Consider the reaction network S1 -> S2 + S3; k1*S1 S2 -> S3; k2*S2 S3 -> ; k3*S3 k1 = 0.5 k2 = 1.5 k3 = 0.75 **Question: What are the system equations for this network?** **REMOVE** \begin{eqnarray} \dot{S}_1 & = & - k_1 S_1 \\ \dot{S}_2 & = & k_1 S_1 - k_2 S_2 \\ \dot{S}_3 & = & k_1 S_1 + k_2 S_2 - k_3 S_3 \\ \end{eqnarray} A state space representation of a SISO system is: \begin{eqnarray} \dot{\bf x} & = & {\bf A} {\bf x} + {\bf b} u \\ y & = & {\bf c} {\bf x} \end{eqnarray} where * ${\bf x}$ is a vector of state * $u$ is the scalar input * $y$ is the scalar output * ${\bf A}$, a matrix of constants, describes the relationships between state variables * ${\bf b}$ is a column vector that indicates how the input affects each state * ${\bf c}$ is a row vector that specifies how each state contributes to the output ### Creating State Space Objects for SISO Systems The ${\bf A}$ matrix is an alternative representation of the state equations. The rows are the equations. In our example, row 2 is the equation for $\dot{S}_2$. (Of course, the python index for this row is 1.) The columns, represent the state variable. So, column 3 is for $S_3$. That is $\dot{\bf x} = {\bf A} = \begin{bmatrix} -k_1 & 0 & 0\\ k_1 & -k_2 & 0 \\ k_1 & k_2 & -k_3 \end{bmatrix} {\bf x} + {\bf b}u$. **Question: If the input is $S_1$, what is ${\bf b}$?** **REMOVE** ${\bf b} = \begin{bmatrix} 1 \\ 0 \\ 0 \\ \end{bmatrix}. $ ### Creating ``control.StateSpace`` objects Consider a system with $S_1$ as input and $S_3$ as output. ```python k1 = 0.5 k2 = 1.5 k3 = 0.75 A = np.array( [ [-k1, 0, 0], [k1, -k2, 0], [k1, k2, -k3]]) B = [1, 0, 0] C = [0, 0, 1] state_space = control.StateSpace(A, B, C, 0) state_space ``` \[ \left(\begin{array}{rllrllrll|rll} -0.&\hspace{-1em}5&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}&1\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}\\ 0.&\hspace{-1em}5&\hspace{-1em}\phantom{\cdot}&-1.&\hspace{-1em}5&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}\\ 0.&\hspace{-1em}5&\hspace{-1em}\phantom{\cdot}&1.&\hspace{-1em}5&\hspace{-1em}\phantom{\cdot}&-0.&\hspace{-1em}75&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}\\ \hline 0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}&1\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}&0\phantom{.}&\hspace{-1em}&\hspace{-1em}\phantom{\cdot}\\ \end{array}\right) \] ### Using State Space Objects * simulation: ``control.input_output_response`` * convert to transfer function (since this is easier to deriving the transfer function: ``control.ss2tf`` ```python tf = control.ss2tf(state_space) tf ``` $$\frac{0.5 s + 1.5}{s^3 + 2.75 s^2 + 2.25 s + 0.5625}$$ ```python tf.dcgain() ``` 2.6666666666666683 # mTOR Signaling For your homework, you create a state space object for an SBML model. 1. Load the model into Tellurium 1. Simulate to the time for the operating point. 1. Construct ${\bf A}$ from the Jacobian (``getFullJacobian``). 1. Construct ${\bf b}$ based on the index of the chemical species being controlled. 1. Construct ${\bf c}$ based on the index of the chemical species for the output. 1. ``state_space = control.StateSpace(A, b, c, 0)``. ## Constructing State Space Objects from Tellurium ```python rr = te.loadSBMLModel("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000823.2?filename=Varusai2018.xml") #print(rr.getAntimony()) ``` There's a some work to get the indices right to construct ${\bf b}$, ${\bf c}$, and to get the species names. ## Constructing ``control`` objects using ``controlSBML`` ```python ctlsb = ctl.ControlSBML("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000823.2?filename=Varusai2018.xml", input_names=["pAkt"], output_names=["mTORC1_DEPTOR"]) mtor_nl = ctlsb.makeNonlinearIOSystem("mtor") mtor_ss = ctlsb.makeStateSpace() MTOR_TF = ctlsb.makeTransferFunction(atol=1e-5, time=1) ``` ## Controllability Analysis Suppose that we want to control the complex ``mTORC1_DEPTOR``. What inputs should we control? We start by controlling the amount of insullin. ```python MTOR_TF ``` $$\frac{-0.03472 s^3 - 0.1617 s^2 - 0.09749 s - 0.001885}{s^5 + 11.05 s^4 + 34.96 s^3 + 29.05 s^2 + 7.379 s + 0.1362}$$ ```python MTOR_TF.dcgain() ``` -0.013840450064546183 ```python MTOR_TF.pole() ``` array([-5.99984811, -3.96034236, -0.57142857, -0.50165764, -0.02 ])
63a70a1547035e93cc4e11ea8339c8fcaf221227
40,226
ipynb
Jupyter Notebook
Lecture_12-Controllability-Of-Biological-Systems/Controllability-Of-Biological-Systems.ipynb
joseph-hellerstein/advanced-controls-lectures
dc43f6c3517616da3b0ea7c93192d911414ee202
[ "MIT" ]
null
null
null
Lecture_12-Controllability-Of-Biological-Systems/Controllability-Of-Biological-Systems.ipynb
joseph-hellerstein/advanced-controls-lectures
dc43f6c3517616da3b0ea7c93192d911414ee202
[ "MIT" ]
null
null
null
Lecture_12-Controllability-Of-Biological-Systems/Controllability-Of-Biological-Systems.ipynb
joseph-hellerstein/advanced-controls-lectures
dc43f6c3517616da3b0ea7c93192d911414ee202
[ "MIT" ]
null
null
null
44.745273
9,036
0.715234
true
3,085
Qwen/Qwen-72B
1. YES 2. YES
0.893309
0.847968
0.757498
__label__eng_Latn
0.871094
0.598253
```python import sympy x, y, z = sympy.symbols("x y z") c11, c22, c33, c44, c55, c66 = sympy.symbols("c11 c22 c33 c44 c55 c66") c12, c13, c16, c22, c23, c26, c36, c45 = sympy.symbols("c12 c13 c16 c22 c23 c26 c36 c45") s1, s2, s3, p = sympy.symbols("s1 s2 s3 p") y11, y12, y13, y22, y23, y33, y31, y32, y21 = sympy.symbols("y11 y12 y13 y22 y23 y33, y31, y32, y21") y11 = c11*s1**2 + c66*s2**2 + c55*s3**2 + 2*c16*s1*s2 y22 = c66*s1**2 + c22*s2**2 + c44*s3**2 + 2*c26*s1*s2 y33 = c55*s1**2 + c44*s2**2 + c33*s3**2 + 2*c45*s1*s2 y12 = c16*s1**2 + c26*s2**2 + c45*s3**2 + (c12+c66)*s1*s2 y21 = y12 y13 = (c13+c55)*s1*s3 + (c36+c45)*s2*s3 y31 = y13 y23 = (c36+c45)*s1*s3 + (c23+c44)*s2*s3 y32 = y23 A = sympy.Matrix([[y11, y12, y13], [y21, y22, y23], [y31, y32, y33]]) I = sympy.eye(3) B = A - p*I S = sympy.collect(sympy.simplify(B.det()), s3) print(S) print('Done Solve') ``` c11*c22*c44*s1**2*s2**4 + 2*c11*c22*c45*s1**3*s2**3 + c11*c22*c55*s1**4*s2**2 - c11*c22*p*s1**2*s2**2 + 2*c11*c26*c44*s1**3*s2**3 + 4*c11*c26*c45*s1**4*s2**2 + 2*c11*c26*c55*s1**5*s2 - 2*c11*c26*p*s1**3*s2 + c11*c44*c66*s1**4*s2**2 - c11*c44*p*s1**2*s2**2 + 2*c11*c45*c66*s1**5*s2 - 2*c11*c45*p*s1**3*s2 + c11*c55*c66*s1**6 - c11*c55*p*s1**4 - c11*c66*p*s1**4 + c11*p**2*s1**2 - c12**2*c44*s1**2*s2**4 - 2*c12**2*c45*s1**3*s2**3 - c12**2*c55*s1**4*s2**2 + c12**2*p*s1**2*s2**2 - 2*c12*c16*c44*s1**3*s2**3 - 4*c12*c16*c45*s1**4*s2**2 - 2*c12*c16*c55*s1**5*s2 + 2*c12*c16*p*s1**3*s2 - 2*c12*c26*c44*s1*s2**5 - 4*c12*c26*c45*s1**2*s2**4 - 2*c12*c26*c55*s1**3*s2**3 + 2*c12*c26*p*s1*s2**3 - 2*c12*c44*c66*s1**2*s2**4 - 4*c12*c45*c66*s1**3*s2**3 - 2*c12*c55*c66*s1**4*s2**2 + 2*c12*c66*p*s1**2*s2**2 - c16**2*c44*s1**4*s2**2 - 2*c16**2*c45*s1**5*s2 - c16**2*c55*s1**6 + c16**2*p*s1**4 + 2*c16*c22*c44*s1*s2**5 + 4*c16*c22*c45*s1**2*s2**4 + 2*c16*c22*c55*s1**3*s2**3 - 2*c16*c22*p*s1*s2**3 + 2*c16*c26*c44*s1**2*s2**4 + 4*c16*c26*c45*s1**3*s2**3 + 2*c16*c26*c55*s1**4*s2**2 - 2*c16*c26*p*s1**2*s2**2 - 2*c16*c44*p*s1*s2**3 - 4*c16*c45*p*s1**2*s2**2 - 2*c16*c55*p*s1**3*s2 + 2*c16*p**2*s1*s2 + c22*c44*c66*s2**6 - c22*c44*p*s2**4 + 2*c22*c45*c66*s1*s2**5 - 2*c22*c45*p*s1*s2**3 + c22*c55*c66*s1**2*s2**4 - c22*c55*p*s1**2*s2**2 - c22*c66*p*s2**4 + c22*p**2*s2**2 - c26**2*c44*s2**6 - 2*c26**2*c45*s1*s2**5 - c26**2*c55*s1**2*s2**4 + c26**2*p*s2**4 - 2*c26*c44*p*s1*s2**3 - 4*c26*c45*p*s1**2*s2**2 - 2*c26*c55*p*s1**3*s2 + 2*c26*p**2*s1*s2 - c44*c66*p*s1**2*s2**2 - c44*c66*p*s2**4 + c44*p**2*s2**2 - 2*c45*c66*p*s1**3*s2 - 2*c45*c66*p*s1*s2**3 + 2*c45*p**2*s1*s2 - c55*c66*p*s1**4 - c55*c66*p*s1**2*s2**2 + c55*p**2*s1**2 + c66*p**2*s1**2 + c66*p**2*s2**2 - p**3 + s3**6*(c33*c44*c55 - c33*c45**2) + s3**4*(c11*c33*c44*s1**2 - 2*c12*c33*c45*s1*s2 - c13**2*c44*s1**2 + 2*c13*c23*c45*s1*s2 - 2*c13*c36*c44*s1*s2 + 2*c13*c36*c45*s1**2 - 2*c13*c44*c55*s1**2 + 2*c13*c45**2*s1**2 + 2*c16*c33*c44*s1*s2 - 2*c16*c33*c45*s1**2 + c22*c33*c55*s2**2 - c23**2*c55*s2**2 + 2*c23*c36*c45*s2**2 - 2*c23*c36*c55*s1*s2 - 2*c23*c44*c55*s2**2 + 2*c23*c45**2*s2**2 - 2*c26*c33*c45*s2**2 + 2*c26*c33*c55*s1*s2 + c33*c44*c66*s2**2 - c33*c44*p - 2*c33*c45*c66*s1*s2 + c33*c55*c66*s1**2 - c33*c55*p - c36**2*c44*s2**2 + 2*c36**2*c45*s1*s2 - c36**2*c55*s1**2 - 4*c36*c44*c55*s1*s2 + 4*c36*c45**2*s1*s2 - c44*c55*p + c45**2*p) + s3**2*(c11*c22*c33*s1**2*s2**2 - c11*c23**2*s1**2*s2**2 - 2*c11*c23*c36*s1**3*s2 - 2*c11*c23*c44*s1**2*s2**2 - 2*c11*c23*c45*s1**3*s2 + 2*c11*c26*c33*s1**3*s2 + c11*c33*c66*s1**4 - c11*c33*p*s1**2 - c11*c36**2*s1**4 - 2*c11*c36*c44*s1**3*s2 - 2*c11*c36*c45*s1**4 + c11*c44*c55*s1**4 - c11*c44*p*s1**2 - c11*c45**2*s1**4 - c12**2*c33*s1**2*s2**2 + 2*c12*c13*c23*s1**2*s2**2 + 2*c12*c13*c36*s1**3*s2 + 2*c12*c13*c44*s1**2*s2**2 + 2*c12*c13*c45*s1**3*s2 - 2*c12*c16*c33*s1**3*s2 + 2*c12*c23*c36*s1*s2**3 + 2*c12*c23*c45*s1*s2**3 + 2*c12*c23*c55*s1**2*s2**2 - 2*c12*c26*c33*s1*s2**3 - 2*c12*c33*c66*s1**2*s2**2 + 2*c12*c36**2*s1**2*s2**2 + 2*c12*c36*c44*s1*s2**3 + 4*c12*c36*c45*s1**2*s2**2 + 2*c12*c36*c55*s1**3*s2 + 2*c12*c44*c55*s1**2*s2**2 - 2*c12*c45**2*s1**2*s2**2 + 2*c12*c45*p*s1*s2 - c13**2*c22*s1**2*s2**2 - 2*c13**2*c26*s1**3*s2 - c13**2*c66*s1**4 + c13**2*p*s1**2 + 2*c13*c16*c23*s1**3*s2 + 2*c13*c16*c36*s1**4 + 2*c13*c16*c44*s1**3*s2 + 2*c13*c16*c45*s1**4 - 2*c13*c22*c36*s1*s2**3 - 2*c13*c22*c45*s1*s2**3 - 2*c13*c22*c55*s1**2*s2**2 + 2*c13*c23*c26*s1*s2**3 + 2*c13*c23*c66*s1**2*s2**2 - 2*c13*c26*c36*s1**2*s2**2 + 2*c13*c26*c44*s1*s2**3 - 2*c13*c26*c45*s1**2*s2**2 - 4*c13*c26*c55*s1**3*s2 + 2*c13*c36*p*s1*s2 + 2*c13*c44*c66*s1**2*s2**2 + 2*c13*c45*p*s1*s2 - 2*c13*c55*c66*s1**4 + 2*c13*c55*p*s1**2 - c16**2*c33*s1**4 + 2*c16*c22*c33*s1*s2**3 - 2*c16*c23**2*s1*s2**3 - 2*c16*c23*c36*s1**2*s2**2 - 4*c16*c23*c44*s1*s2**3 - 2*c16*c23*c45*s1**2*s2**2 + 2*c16*c23*c55*s1**3*s2 + 2*c16*c26*c33*s1**2*s2**2 - 2*c16*c33*p*s1*s2 - 2*c16*c36*c44*s1**2*s2**2 + 2*c16*c36*c55*s1**4 + 4*c16*c44*c55*s1**3*s2 - 2*c16*c44*p*s1*s2 - 4*c16*c45**2*s1**3*s2 + 2*c16*c45*p*s1**2 + c22*c33*c66*s2**4 - c22*c33*p*s2**2 - c22*c36**2*s2**4 - 2*c22*c36*c45*s2**4 - 2*c22*c36*c55*s1*s2**3 + c22*c44*c55*s2**4 - c22*c45**2*s2**4 - c22*c55*p*s2**2 - c23**2*c66*s2**4 + c23**2*p*s2**2 + 2*c23*c26*c36*s2**4 + 2*c23*c26*c45*s2**4 + 2*c23*c26*c55*s1*s2**3 + 2*c23*c36*p*s1*s2 - 2*c23*c44*c66*s2**4 + 2*c23*c44*p*s2**2 + 2*c23*c45*p*s1*s2 + 2*c23*c55*c66*s1**2*s2**2 - c26**2*c33*s2**4 - 2*c26*c33*p*s1*s2 + 2*c26*c36*c44*s2**4 - 2*c26*c36*c55*s1**2*s2**2 + 4*c26*c44*c55*s1*s2**3 - 4*c26*c45**2*s1*s2**3 + 2*c26*c45*p*s2**2 - 2*c26*c55*p*s1*s2 - c33*c66*p*s1**2 - c33*c66*p*s2**2 + c33*p**2 + c36**2*p*s1**2 + c36**2*p*s2**2 + 2*c36*c44*p*s1*s2 + 2*c36*c45*p*s1**2 + 2*c36*c45*p*s2**2 + 2*c36*c55*p*s1*s2 + 4*c44*c55*c66*s1**2*s2**2 - c44*c55*p*s1**2 - c44*c55*p*s2**2 - c44*c66*p*s2**2 + c44*p**2 - 4*c45**2*c66*s1**2*s2**2 + c45**2*p*s1**2 + c45**2*p*s2**2 + 2*c45*c66*p*s1*s2 - c55*c66*p*s1**2 + c55*p**2) Done Solve ```python c11*c22*c44*s1**2*s2**4 + 2*c11*c22*c45*s1**3*s2**3 + c11*c22*c55*s1**4*s2**2 - c11*c22*p*s1**2*s2**2 + 2*c11*c26*c44*s1**3*s2**3 + 4*c11*c26*c45*s1**4*s2**2 + 2*c11*c26*c55*s1**5*s2 - 2*c11*c26*p*s1**3*s2 + c11*c44*c66*s1**4*s2**2 - c11*c44*p*s1**2*s2**2 + 2*c11*c45*c66*s1**5*s2 - 2*c11*c45*p*s1**3*s2 + c11*c55*c66*s1**6 - c11*c55*p*s1**4 - c11*c66*p*s1**4 + c11*p**2*s1**2 - c12**2*c44*s1**2*s2**4 - 2*c12**2*c45*s1**3*s2**3 - c12**2*c55*s1**4*s2**2 + c12**2*p*s1**2*s2**2 - 2*c12*c16*c44*s1**3*s2**3 - 4*c12*c16*c45*s1**4*s2**2 - 2*c12*c16*c55*s1**5*s2 + 2*c12*c16*p*s1**3*s2 - 2*c12*c26*c44*s1*s2**5 - 4*c12*c26*c45*s1**2*s2**4 - 2*c12*c26*c55*s1**3*s2**3 + 2*c12*c26*p*s1*s2**3 - 2*c12*c44*c66*s1**2*s2**4 - 4*c12*c45*c66*s1**3*s2**3 - 2*c12*c55*c66*s1**4*s2**2 + 2*c12*c66*p*s1**2*s2**2 - c16**2*c44*s1**4*s2**2 - 2*c16**2*c45*s1**5*s2 - c16**2*c55*s1**6 + c16**2*p*s1**4 + 2*c16*c22*c44*s1*s2**5 + 4*c16*c22*c45*s1**2*s2**4 + 2*c16*c22*c55*s1**3*s2**3 - 2*c16*c22*p*s1*s2**3 + 2*c16*c26*c44*s1**2*s2**4 + 4*c16*c26*c45*s1**3*s2**3 + 2*c16*c26*c55*s1**4*s2**2 - 2*c16*c26*p*s1**2*s2**2 - 2*c16*c44*p*s1*s2**3 - 4*c16*c45*p*s1**2*s2**2 - 2*c16*c55*p*s1**3*s2 + 2*c16*p**2*s1*s2 + c22*c44*c66*s2**6 - c22*c44*p*s2**4 + 2*c22*c45*c66*s1*s2**5 - 2*c22*c45*p*s1*s2**3 + c22*c55*c66*s1**2*s2**4 - c22*c55*p*s1**2*s2**2 - c22*c66*p*s2**4 + c22*p**2*s2**2 - c26**2*c44*s2**6 - 2*c26**2*c45*s1*s2**5 - c26**2*c55*s1**2*s2**4 + c26**2*p*s2**4 - 2*c26*c44*p*s1*s2**3 - 4*c26*c45*p*s1**2*s2**2 - 2*c26*c55*p*s1**3*s2 + 2*c26*p**2*s1*s2 - c44*c66*p*s1**2*s2**2 - c44*c66*p*s2**4 + c44*p**2*s2**2 - 2*c45*c66*p*s1**3*s2 - 2*c45*c66*p*s1*s2**3 + 2*c45*p**2*s1*s2 - c55*c66*p*s1**4 - c55*c66*p*s1**2*s2**2 + c55*p**2*s1**2 + c66*p**2*s1**2 + c66*p**2*s2**2 - p**3 + s3**6*(c33*c44*c55 - c33*c45**2) + s3**4*( c11*c33*c44*s1**2 - 2*c12*c33*c45*s1*s2 - c13**2*c44*s1**2 + 2*c13*c23*c45*s1*s2 - 2*c13*c36*c44*s1*s2 + 2*c13*c36*c45*s1**2 - 2*c13*c44*c55*s1**2 + 2*c13*c45**2*s1**2 + 2*c16*c33*c44*s1*s2 - 2*c16*c33*c45*s1**2 + c22*c33*c55*s2**2 - c23**2*c55*s2**2 + 2*c23*c36*c45*s2**2 - 2*c23*c36*c55*s1*s2 - 2*c23*c44*c55*s2**2 + 2*c23*c45**2*s2**2 - 2*c26*c33*c45*s2**2 + 2*c26*c33*c55*s1*s2 + c33*c44*c66*s2**2 - c33*c44*p - 2*c33*c45*c66*s1*s2 + c33*c55*c66*s1**2 - c33*c55*p - c36**2*c44*s2**2 + 2*c36**2*c45*s1*s2 - c36**2*c55*s1**2 - 4*c36*c44*c55*s1*s2 + 4*c36*c45**2*s1*s2 - c44*c55*p + c45**2*p) + s3**2*(c11*c22*c33*s1**2*s2**2 - c11*c23**2*s1**2*s2**2 - 2*c11*c23*c36*s1**3*s2 - 2*c11*c23*c44*s1**2*s2**2 - 2*c11*c23*c45*s1**3*s2 + 2*c11*c26*c33*s1**3*s2 + c11*c33*c66*s1**4 - c11*c33*p*s1**2 - c11*c36**2*s1**4 - 2*c11*c36*c44*s1**3*s2 - 2*c11*c36*c45*s1**4 + c11*c44*c55*s1**4 - c11*c44*p*s1**2 - c11*c45**2*s1**4 - c12**2*c33*s1**2*s2**2 + 2*c12*c13*c23*s1**2*s2**2 + 2*c12*c13*c36*s1**3*s2 + 2*c12*c13*c44*s1**2*s2**2 + 2*c12*c13*c45*s1**3*s2 - 2*c12*c16*c33*s1**3*s2 + 2*c12*c23*c36*s1*s2**3 + 2*c12*c23*c45*s1*s2**3 + 2*c12*c23*c55*s1**2*s2**2 - 2*c12*c26*c33*s1*s2**3 - 2*c12*c33*c66*s1**2*s2**2 + 2*c12*c36**2*s1**2*s2**2 + 2*c12*c36*c44*s1*s2**3 + 4*c12*c36*c45*s1**2*s2**2 + 2*c12*c36*c55*s1**3*s2 + 2*c12*c44*c55*s1**2*s2**2 - 2*c12*c45**2*s1**2*s2**2 + 2*c12*c45*p*s1*s2 - c13**2*c22*s1**2*s2**2 - 2*c13**2*c26*s1**3*s2 - c13**2*c66*s1**4 + c13**2*p*s1**2 + 2*c13*c16*c23*s1**3*s2 + 2*c13*c16*c36*s1**4 + 2*c13*c16*c44*s1**3*s2 + 2*c13*c16*c45*s1**4 - 2*c13*c22*c36*s1*s2**3 - 2*c13*c22*c45*s1*s2**3 - 2*c13*c22*c55*s1**2*s2**2 + 2*c13*c23*c26*s1*s2**3 + 2*c13*c23*c66*s1**2*s2**2 - 2*c13*c26*c36*s1**2*s2**2 + 2*c13*c26*c44*s1*s2**3 - 2*c13*c26*c45*s1**2*s2**2 - 4*c13*c26*c55*s1**3*s2 + 2*c13*c36*p*s1*s2 + 2*c13*c44*c66*s1**2*s2**2 + 2*c13*c45*p*s1*s2 - 2*c13*c55*c66*s1**4 + 2*c13*c55*p*s1**2 - c16**2*c33*s1**4 + 2*c16*c22*c33*s1*s2**3 - 2*c16*c23**2*s1*s2**3 - 2*c16*c23*c36*s1**2*s2**2 - 4*c16*c23*c44*s1*s2**3 - 2*c16*c23*c45*s1**2*s2**2 + 2*c16*c23*c55*s1**3*s2 + 2*c16*c26*c33*s1**2*s2**2 - 2*c16*c33*p*s1*s2 - 2*c16*c36*c44*s1**2*s2**2 + 2*c16*c36*c55*s1**4 + 4*c16*c44*c55*s1**3*s2 - 2*c16*c44*p*s1*s2 - 4*c16*c45**2*s1**3*s2 + 2*c16*c45*p*s1**2 + c22*c33*c66*s2**4 - c22*c33*p*s2**2 - c22*c36**2*s2**4 - 2*c22*c36*c45*s2**4 - 2*c22*c36*c55*s1*s2**3 + c22*c44*c55*s2**4 - c22*c45**2*s2**4 - c22*c55*p*s2**2 - c23**2*c66*s2**4 + c23**2*p*s2**2 + 2*c23*c26*c36*s2**4 + 2*c23*c26*c45*s2**4 + 2*c23*c26*c55*s1*s2**3 + 2*c23*c36*p*s1*s2 - 2*c23*c44*c66*s2**4 + 2*c23*c44*p*s2**2 + 2*c23*c45*p*s1*s2 + 2*c23*c55*c66*s1**2*s2**2 - c26**2*c33*s2**4 - 2*c26*c33*p*s1*s2 + 2*c26*c36*c44*s2**4 - 2*c26*c36*c55*s1**2*s2**2 + 4*c26*c44*c55*s1*s2**3 - 4*c26*c45**2*s1*s2**3 + 2*c26*c45*p*s2**2 - 2*c26*c55*p*s1*s2 - c33*c66*p*s1**2 - c33*c66*p*s2**2 + c33*p**2 + c36**2*p*s1**2 + c36**2*p*s2**2 + 2*c36*c44*p*s1*s2 + 2*c36*c45*p*s1**2 + 2*c36*c45*p*s2**2 + 2*c36*c55*p*s1*s2 + 4*c44*c55*c66*s1**2*s2**2 - c44*c55*p*s1**2 - c44*c55*p*s2**2 - c44*c66*p*s2**2 + c44*p**2 - 4*c45**2*c66*s1**2*s2**2 + c45**2*p*s1**2 + c45**2*p*s2**2 + 2*c45*c66*p*s1*s2 - c55*c66*p*s1**2 + c55*p**2) ```
6ad838e91351d49872d24670739bfa3f559967f2
12,989
ipynb
Jupyter Notebook
notebooks/Test Sympy.ipynb
kwinkunks/rppy
91251d51797af79aaec0db16912c069f0fb1f13d
[ "BSD-2-Clause" ]
24
2015-10-08T17:51:54.000Z
2021-11-04T00:02:02.000Z
notebooks/Test Sympy.ipynb
shear/RPpy
5f08ca5212686670c3e15565c34a9fd913d15e87
[ "BSD-2-Clause" ]
36
2015-03-20T23:48:09.000Z
2015-07-24T04:58:03.000Z
notebooks/Test Sympy.ipynb
shear/RPpy
5f08ca5212686670c3e15565c34a9fd913d15e87
[ "BSD-2-Clause" ]
15
2015-10-08T17:51:45.000Z
2022-01-20T08:02:07.000Z
81.18125
4,999
0.513357
true
6,487
Qwen/Qwen-72B
1. YES 2. YES
0.953966
0.626124
0.597301
__label__yue_Hant
0.177845
0.226061
```python import numpy as np from skspatial.objects import plane from sympy import Plane def perpendicular(a): b = np.empty_like(a) b[0] = -a[1] b[1] = a[0] return b def normalize(a): a = np.array(a) return a/np.linalg.norm(a) def get2DProjection(origin, target_point): points = [origin, target_point, [target_point[0], target_point[1], target_point[2] + 1]] # add 1 in order not to be collinear plane = Plane(points[0], points[1], points[2]) plane_normal = np.array(plane.normal_vector) target_point, origin = np.array(target_point), np.array(origin) x_axis = normalize(np.array([target_point[0], target_point[1], 0])) y_axis = normalize(np.array([0, 0, 1])) s = np.dot(plane_normal, target_point-origin) x_coord = np.dot(x_axis, target_point-origin) y_coord = np.dot(y_axis, target_point-origin) return s, x_coord, y_coord if __name__ == "__main__": target_point = [2, 2, 1] origin = [0, 0, 0] s, t_1, t_2 = get2DProjection(target_point, origin) print("s:", s) print("t_1:", t_1) print("t_2:", t_2) ``` s: 0 t_1: nan t_2: -1.0 /var/folders/sd/1vc_q83x5rn9jjrd0x47_cc00000gn/T/ipykernel_92659/4172601782.py:14: RuntimeWarning: invalid value encountered in true_divide return a/np.linalg.norm(a) ```python ```
f229c8bdc4ddbc1736dec3d9774de79ae7b91f50
2,673
ipynb
Jupyter Notebook
Mathematics/Linear Algebra/Python Visualization Notebooks/scikit-spatial/Projecting-3D-points-to-2D-plane.ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
null
null
null
Mathematics/Linear Algebra/Python Visualization Notebooks/scikit-spatial/Projecting-3D-points-to-2D-plane.ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
null
null
null
Mathematics/Linear Algebra/Python Visualization Notebooks/scikit-spatial/Projecting-3D-points-to-2D-plane.ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
2
2022-02-09T15:41:33.000Z
2022-02-11T07:47:40.000Z
25.457143
150
0.505425
true
432
Qwen/Qwen-72B
1. YES 2. YES
0.914901
0.800692
0.732554
__label__eng_Latn
0.315742
0.5403
```python %matplotlib widget ``` ```python import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d import matplotlib.cm as cm from IPython.display import display, Math, clear_output import sympy from sympy import * from sympy.physics.vector import ReferenceFrame, CoordinateSym from sympy.vector import CoordSys3D, divergence, curl import ipyvolume as ipv import time from ipywidgets import Output, interact import ipywidgets as widgets np.seterr(divide='ignore', invalid='ignore') init_printing() ``` ## Coaxial Cable Challenge: * We will try to develop a coaxial cable that is perfect for our application. In the process we may discover that perfect depends on the context. ### Coaxial Cable Design A coaxial cable consists of * Two conductors: 1. A centre core solid internal cylinder which "carries the signal". 2. A metallic child hollow outer cylinder which is held at ground potential. * A dielectric insulator between the two conductors. By Tkgd2007 - Own work, CC BY 3.0, https://commons.wikimedia.org/w/index.php?curid=4138900 *** ### There are **three critical parameters** of the cable that we will use our electromagnetism skills to study. 1. The **capacitance**: $ C = \dfrac{Q}{V}$ which relates the amount of charge needed on the centre core to charge it to a certain voltage. 2. The **inductance**: $ L = \dfrac{V_{drop}}{(\dfrac{dI}{dt})}$ 3. And the **resistance**. * The voltage $V$ in the capacitance expression is the voltage of the centre core with respect to ground. * The voltage $V_{drop}$ is the voltage difference between two segments of the centre core that is induced by a time change of current. Referring to Griffiths problem 2.43 find the capacitance per unit length of two coaxial cylindrical tubes: ## Solve for the Capacitance #### Step 1: ** Find the E-Field ** (use cylindrical coordinates): $\int \mathbf{E}\cdot{\mathbf{dA}} = \dfrac{Q_{enc}}{\epsilon}$ By symmetry the E-field is parallel to the normal vector of the cylindrical surface (assume $Q$ charge over a length of $L$). So: $E(2\pi s*L) = \dfrac{Q}{\epsilon} \Rightarrow$ $\mathbf{E} = \dfrac{Q}{2\pi s L \epsilon} \hat{\mathbf{s}} $ #### Step 2: Find the potential. Capacitance depends on the voltage so let's find the potential between the inner and outer cylinders. $-\int_a^b \mathbf{E}\cdot \mathbf{dl} = V(b) - V(a) = -\dfrac{Q}{2\pi L \epsilon} \int_a^b \dfrac{ds}{s} \Rightarrow$ $ V(a) - V(b) = \dfrac{Q}{2\pi L \epsilon} \ln{\left(\dfrac{b}{a} \right)} $ Capacitance ($C$) per unit length ($l$): $ \dfrac{C}{l} = \dfrac{Q}{V} \dfrac{1}{l} \Rightarrow \dfrac{C}{l} = \dfrac{2 \pi \epsilon}{\ln{\left(\dfrac{b}{a} \right)}} $ ## Solve for the Inductance #### Step 1: ** Find the B-Field within the cable ** (use cylindrical coordinates and an Amperian Loop): $\oint \mathbf{B}\cdot{\mathbf{dl}} = \mu I$ By symmetry the E-field is parallel to the circular Amperian loop of radius $r$. $B(2\pi r) = \mu I$ So: $\mathbf{B} = \dfrac{\mu I}{2 \pi r} \hat{\mathbf{\phi}} $ #### Step 2: Find the magnetic flux through a loop between the two conductors. $\Phi = \int \mathbf{B}\cdot \mathbf{dA} = \int_{r=a}^{r=b} \dfrac{\mu I}{2 \pi r} l dr = \dfrac{\mu I}{2 \pi} l \ln{\left(\dfrac{b}{a} \right)}$ Inductance ($L$) per unit length ($l$): $ V_{drop} = \dfrac{d\Phi}{dt} = L \dfrac{dI}{dt} $ $ \dfrac{d\Phi}{dt} = \dfrac{\mu}{2 \pi} l \ln{\left(\dfrac{b}{a} \right) \dfrac{dI}{dt}}$ $ \dfrac{L}{l} = \dfrac{\mu}{2 \pi} \ln{\left(\dfrac{b}{a} \right)}$ ## Plot the Capacitance and Inductance Versus the inner diameter. Use: 1. a slider to adjust the outer diameter and 2. a drop down box to adjust the dielectric. ```python plt.close('all') a = np.linspace(0.05, 2, 1000) # inner radius [in mm] fig = plt.figure(figsize = (11,8)) # constants and equation for capacitance e0 = 8.85e-12 b = 3 cap = 2*np.pi*e0/np.log(b/a)*1e12 dielectrics = {'air': 1, 'polyethylene': 2.25, 'teflon_PTFE': 2.1} # plot capacitance ax1 = fig.add_subplot(1, 2, 1) line, = ax1.plot(a, cap) ax1.set_ylim([0, np.max(cap[np.isfinite(cap)])*1.1]) # label plot ax1.set_title('Capacitance') plt.ylabel('C [pF/m]') plt.xlabel('a:inner diameter [mm]') ax1.grid(True) # constants and equation for inducatance u0 = 4*np.pi*1e-7 ind = u0/(2*np.pi)*np.log(b/a)*1e9 ax2 = fig.add_subplot(1, 2, 2) line2, = ax2.plot(a, ind) ax2.set_ylim([0, np.max(ind[np.isfinite(ind)])*1.1]) # label plot ax2.set_title('Inductance') plt.ylabel('L [nH/m]') plt.xlabel('a:inner diameter [mm]') ax2.grid(True) def update(b=widgets.FloatSlider(min=0.0,max=100.0,step=0.1,value=5), dielectric = ['air', 'polyethylene', 'teflon_PTFE']): # capacitance er = dielectrics[dielectric] cap = 2*np.pi*e0*er/np.log(b/a)*1e12 # find negative values idx = cap<0 line.set_ydata(cap) line.set_marker('*') fig.canvas.draw() ax1.set_ylim([0, np.max([np.max(cap[np.isfinite(cap)])*1.1, 0.5])]) # inductance ind = u0/(2*np.pi)*np.log(b/a)*1e9 # remove any negative values idx = ind<0 line2.set_ydata(ind) line2.set_marker('*') fig.canvas.draw() ax2.set_ylim([0, np.max([np.max(ind[np.isfinite(ind)])*1.1, 0.5])]) interact(update); ``` A Jupyter Widget A Jupyter Widget ## Plot the Resistance Versus the inner diameter (no slider needed in this case). ```python a = np.linspace(0, 2, 1000) # inner radius [in mm] fig = plt.figure(figsize = (11,8)) ax1 = fig.add_subplot(1, 1, 1) ax1.set_title('Resistance -- no slider') plt.ylabel('R [milli-Ohm]') plt.xlabel('a:inner diameter [mm]') # resistance a = np.linspace(0.05, 2, 1000) # inner radius [in mm] rho = 1.68e-8 # Ohm * m data = rho/(np.pi * (a*1e-3)**2) line, = ax1.plot(a, data) line.set_marker('*') plt.grid(True) ``` ```python plt.close('all') ``` ```python ```
a46ff9ad30bfc99873e3ca66e67e3fc54f11ae52
33,366
ipynb
Jupyter Notebook
coaxial_cable_challenge.ipynb
lucask07/teaching-notebooks
732638b3bac528f85e0dc649c4671c005f58b22b
[ "MIT" ]
null
null
null
coaxial_cable_challenge.ipynb
lucask07/teaching-notebooks
732638b3bac528f85e0dc649c4671c005f58b22b
[ "MIT" ]
null
null
null
coaxial_cable_challenge.ipynb
lucask07/teaching-notebooks
732638b3bac528f85e0dc649c4671c005f58b22b
[ "MIT" ]
null
null
null
105.92381
23,476
0.838848
true
1,952
Qwen/Qwen-72B
1. YES 2. YES
0.872347
0.896251
0.781843
__label__eng_Latn
0.822766
0.654815
# Fourier methods > Fourier methods using Python - toc: true - badges: true - comments: true - categories: [jupyter] The Fourier transform (FT) for a well-behaved functions $f$ is defined as: $$f(k) = \int e^{-ikx} f(x) ~dx$$ The inverse FT is then $$f(x) = \frac{1}{2\pi} \int e^{ikx} f(k) ~dk$$ ## Discrete Fourier transforms (DFTs) If the function is periodic in real space, $f(x+L) = f(x)$, then the Fourier space is discrete with spacing $\frac{2\pi}{L}$. Moreover, if the real space is periodic as well as discrete with the spacing $h$, then the Fourier space is discrete as well as bounded. $$f(x) = \sum e^{ikx} f(k) ~dk~~~~~~ \text{where } k = \Bigg[ -\frac{\pi}{h}, \frac{\pi}{h}\Bigg];~~ \text{with interval} \frac{2\pi}{L} $$ This is very much in line with crystallography with $ [ -\frac{\pi}{h}, \frac{\pi}{h} ]$ being the first Brillouin zone. So we see that there is a concept of the maximum wavenumber $ k_{max}=\frac{\pi}{h} $, we will get back to this later in the notes. Usually in computations we need to find FT of discrete function rather than of a well defined analytic function. Since the real space is discrete and periodic, the Fourier space is also discrete and periodic or bounded. Also, the Fourier space is continuous if the real space is unbounded. If the function is defined at $N$ points in real space and one wants to calculate the function at $N$ points in Fourier space, then **DFT** is defined as $$f_k = \sum_{n=0}^{N-1} f_n ~ e^{-i\frac{2\pi~n~k}{N}}$$ while the inverse transform of this is $$f_n = \frac1N \sum_{n=0}^{N-1} f_k ~ e^{~i\frac{2\pi~n~k}{N}}$$ To calculate each $f_n$ one needs $N$ computations and it has to be done $N$ times, i.e, the algorithm is simply $\mathcal{O}(N^2)$. This can be implemented numerically as a matrix multiplication, $f_k = M\cdot f_n$, where $M$ is a $N\times N$ matrix. ## Fast fourier tranforms (FFTs) The discussion here is based on the Cooley-Tukey algorithm. FFTs improves on DFTs by exploiting their symmetries. $$ \begin{align} f_k &= \sum_{n=0}^{N-1} f_n e^{-i~\frac{2\pi~k~n}{N}} \\ &= \sum_{n=0}^{N/2-1} f_{2n} e^{-i~\frac{2\pi~k~2n}{N}} &+ \sum_{n=0}^{N/2-1} f_{2n + 1} e^{-i~\frac{2\pi~k~(n+1)}{N}}\\ &= \sum_{n=0}^{N/2 - 1} f_{2n} e^{-i~\frac{2\pi k~n}{N/2}} &+ e^{-i\frac{2\pi k}{N}} \sum_{n=0}^{N/2 - 1} f_{2n + 1} e^{-i~\frac{2\pi~k~n~}{N/2}}\\ &=\vdots &\vdots \end{align}$$ We can use the symmetry property, from the definition, $f_{N+k} = f_k$. Notice that, because of the tree structure, there are $\ln_2 N$ stages of the calculation. By applying the method of splitting the computation in two halves recursively, the complexity of the problem becomes $\mathcal{O}(N \ln N)$ while the naive algorithm is $\mathcal{O}(N^2)$. Below we will look at a simple implementation of FFT. ```python import numpy as np import matplotlib.pyplot as plt ``` ```python def testFFT(x): ''' FFT in 1d ''' N = x.shape[0] if N % 2 > 0: raise Exception('x must have even size') elif N <= 16: ''' this is the naive implementation using matrix multiplication''' n = np.arange(N) k = n.reshape((N, 1)) M = np.exp(-2j * np.pi * k * n / N) return np.dot(M, x) else: M1 = testFFT(x[::2]) M2 = testFFT(x[1::2]) fac = np.exp(-2j*np.pi*np.arange(N)/N) return np.concatenate([M1+fac[:N/2]*M2, M1+fac[N/2:]*M2]) ``` ```python x = np.random.random(1024) #np.allclose(np.fft.fftn(x), testFFT(x)) ``` ```python # TRANSLATION by e^{ikr}! L, N = 64, 128 ll = np.linspace(0, L, N) x, y = np.meshgrid(ll, ll) # Fourier grid. kx = 2 * np.pi / L * np.concatenate((np.arange(0, N/2+1,1),np.arange(-N/2+1, 0, 1))) # k = (2\pi)/L ky = 2 * np.pi / L * np.concatenate((np.arange(0, N/2+1,1),np.arange(-N/2+1, 0, 1))) kx, ky = np.meshgrid(kx, ky) def plotFirst(x, y, sig, n_): sp = f.add_subplot(1, 3, n_ ) plt.pcolormesh(x, y, np.real(sig), cmap=plt.cm.gist_heat_r) plt.axis('off'); f = plt.figure(figsize=(20, 5), dpi=80); rr = np.sqrt( (x - L/2)*(x - L/2) + (y - L/2)*(y - L/2) ) sig = np.fft.fftn(np.exp(-0.1*rr)) xx = ([-L/4, L/2, -L/2,]) yy = ([-L/8, -L/9, L/2]) for i in range(3): kdotr = kx*xx[i] + ky *yy[i] sig = sig*np.exp(-1j*kdotr) plotFirst(x, y, np.fft.ifftn(sig), i+1) ``` ## More examples ### Sampling: Aliasing error We saw that because of the smallest length scale, $h$, in the real space there is a corresponding largest wave-vector, $k_{max}$ in the Fourier space. The error is because of this $k_{max}$ and a signal which has $k>k_{max}$ can not be distinguished on this grid. In the given example, below, we see that if the real space has 10 points that one can not distinguish between $sin(2\pi x/L)$ and $sin(34 \pi x/L)$. In general, $sin(k_1 x)$ and $sin(k_2 x)$ can not be distinguished if $k_1 -k_2$ is a multiple of $\frac{2\pi}{h}$. This is a manifestation of the gem called the sampling theorem which is defined, as on wikipedia: If a function x(t) contains no frequencies higher than B hertz, it is completely determined by giving its ordinates at a series of points spaced 1/(2B) seconds apart. ```python L, N = 1, 16 x = np.arange(0, L, L/512) xx = np.arange(0, L, L/N) def ff(k, x): return sin(k*x) ``` ```python f = plt.figure(figsize=(17, 6), dpi=80); pi=np.pi; sin=np.sin; cos=np.cos plt.plot(x, ff(x, 2*pi), color="#A60628", linewidth=2); plt.plot(x, ff(x, 34*pi), color="#348ABD", linewidth=2); plt.plot(xx, ff(xx, 2*pi), 'o', color="#020e3e", markersize=8) plt.xlabel('x', fontsize=15); plt.ylabel('y(x)', fontsize=15); plt.title('Aliasing in sampling of $sin(2\pi x/L)$ and $sin(34 \pi x/L)$', fontsize=15); ``` ### Differentiation In this section we will use the in-built FFT modules of numpy and then perform differentiation. Differentiation in Fourier space is trivial, $\mathbf{\nabla}_{\alpha}$ gets replaced by $ik_{\alpha}$. Steps involved are * FFT the function to be differentiated. * multiply by suitable numbers of $ik$ * IFFT on the resulting thing to get the differentiated function in real space. ```python def f1(kk, x): return cos(kk*x) ``` ```python f = plt.figure(figsize=(10, 5), dpi=80); L, N = 1, 32 kk = 2*pi/L x = np.arange(0, N)*(L/N) k = np.concatenate(( np.arange(0, N/2+1,1), np.arange(-N/2+1, 0, 1) ))*(2*pi/L) fk = np.fft.fft(f1(kk, x)) f1_kk = -k*k*fk f1_xx = np.fft.ifft(f1_kk) plt.plot(x, -f1(kk, x)*kk*kk, color="#348ABD", label = 'analytical', linewidth=2) plt.plot(x, f1_xx, 'o', color="#A60628", label = 'numerical', markersize=6) plt.legend(loc = 'best') plt.xlabel('x', fontsize=15) plt.title('derivatives using FFT', fontsize=15) plt.xlim([0, max(x)]); ``` There are additional symmetry properties for a real function. Wavenumbers corresponding to $j$ and $N-j$ are same and hence we only need to consider wavenumber till N/2! ```python # since we know that function is real, then we could save some time and memory by using real FFTs. f = plt.figure(figsize=(10, 5), dpi=80); L, N = 1, 64 x = np.arange(0, L, L/N) kk = 4*pi/L k = np.arange(0, N/2 + 1)*(2*pi/L) # k[2] = 0 # all the pattern is dead at a particular k mode which coooresponds to the signal!! fk = np.fft.rfft(f1(kk, x)) fk = -k*k*fk fx = np.fft.irfft(fk) plt.plot(x, -kk*kk*f1(kk, x), color="#348ABD", label = 'analytical', linewidth=2) plt.plot(x, fx, 'o', color="#A60628", label = 'numerical') plt.legend(loc = 'best') plt.xlabel('x', fontsize=15) plt.title('derivatives using FFT', fontsize=15) plt.xlim([0, max(x)]); ```
c2f5c79f6c957b71e96f8fdb0fa29ac85e16cc72
240,699
ipynb
Jupyter Notebook
notebooks/2014/FourierSeries.ipynb
rajeshrinet/compPhy
cc0ce84ac07efc4b9372c01eba99ebccbc08bb41
[ "MIT" ]
47
2015-06-05T14:37:39.000Z
2022-01-06T06:35:30.000Z
notebooks/2014/FourierSeries.ipynb
rajeshrinet/compPhy
cc0ce84ac07efc4b9372c01eba99ebccbc08bb41
[ "MIT" ]
3
2017-10-23T06:44:38.000Z
2021-09-23T05:16:31.000Z
notebooks/2014/FourierSeries.ipynb
rajeshrinet/compPhy
cc0ce84ac07efc4b9372c01eba99ebccbc08bb41
[ "MIT" ]
46
2015-12-09T00:21:53.000Z
2022-02-03T20:44:38.000Z
676.120787
117,900
0.944271
true
2,615
Qwen/Qwen-72B
1. YES 2. YES
0.887205
0.805632
0.714761
__label__eng_Latn
0.964473
0.49896
# Lab 3 Exercises for COMP 432 Machine Learning In this lab you'll cluster and fit mixture models to data using the popular _scikit-learn_ package. Lab3 requires a good understanding of Numpy and Matplotlib. Please complete Lab1 before attempting Lab3. **Run the code cell below** to import the required packages. ```python import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import sklearn import sklearn.cluster # For KMeans class import sklearn.mixture # For GaussianMixture class import sklearn.preprocessing # For scale function import mpl_toolkits.mplot3d # For enabling projection='3d' feature in Matplotlib ``` <div style="border-bottom: 3px solid black; margin-bottom:5px"></div> <div style="border-bottom: 3px solid black"></div> # Introduction This "introduction" group of code cells has no exercises. The goal is to provide you with examples. Run the code cells and learn from them. Load the image *ladybug.png* as a Numpy array. The image should already have been unzipped into the same directory as this notebook. ```python image = plt.imread('ladybug.png') print(image.shape) ``` (100, 100, 3) Notice the shape of the array. Entry _image[y,x]_ in the array is a length-3 vector representing the RGB value of pixel at location $(x, y)$ in the image. Plot the image using Matplotlib's _imshow_ function. ```python plt.imshow(image); ``` Reshape the image into a 2-dimensional matrix suitable for clustering the pixels by their RGB colour value. The data should be in array format $$ \begin{bmatrix} r_1 & g_1 & b_1\\ r_2 & g_2 & b_2\\ \vdots & \vdots & \vdots\\ r_N & g_N & b_N \end{bmatrix} $$ where $N=10000$ and each $\begin{bmatrix} r_i & g_i & b_i \end{bmatrix}$ is the colour of pixel with index $i$. ```python RGB = image.reshape(-1, 3) print(RGB.shape) print(RGB) ``` (10000, 3) [[0.30588236 0.5568628 0.2 ] [0.27058825 0.53333336 0.16470589] [0.27058825 0.53333336 0.16470589] ... [0.4509804 0.6745098 0.34901962] [0.4392157 0.6784314 0.34509805] [0.46666667 0.6901961 0.37254903]] The top-left pixel (index $i=0$) has colour $\begin{bmatrix} 0.31 & 0.56 & 0.20 \end{bmatrix}$, which is green-ish. Makes sense! Plot the pixel RGB values in 3-dimensional colour space. ```python def plot_colour_space(RGB, title=None, hold=False, **kwargs): """ Plots Nx3 matrix RGB in 3 dimensions. The keyword arguments are passed to Matplotlib's scatter() function. If hold=True, the points will be added to the previous plot. Otherwise a new plot is generated. """ if hold: ax = plt.gca() else: fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') kwargs.setdefault('alpha', 1.0) ax.scatter(*RGB.T, **kwargs) ax.set_xlabel('R', color='r', fontweight='bold', fontsize=15) ax.set_ylabel('G', color='g', fontweight='bold', fontsize=15) ax.set_zlabel('B', color='b', fontweight='bold', fontsize=15) if title is not None: ax.set_title(title) plot_colour_space(RGB, c=RGB, s=0.5, marker='s', title="Pixels plotted in colour space") ``` Run the *K*-means clustering algorithm with *K*=5, using each RGB pixel colour vector as a data point. ```python def run_kmeans(data, k): """ Runs K-means on an NxD array using k clusters. Returns a KxD matrix of centroids and a length-N vector of labels (cluster assignments). """ kmeans = sklearn.cluster.KMeans(n_clusters=k, random_state=0).fit(data) return kmeans.cluster_centers_, kmeans.labels_ centroids, labels = run_kmeans(RGB, k=5) print(centroids) print(labels, len(labels)) ``` [[0.918301 0.11951366 0.04882497] [0.21993323 0.46929815 0.09348458] [0.8889072 0.67274106 0.681143 ] [0.07332361 0.23711264 0.04663859] [0.39092913 0.5740279 0.2899987 ]] [4 1 1 ... 4 4 4] 10000 The centroids array has shape (5,3) because there are *K*=5 centroids, a 3-dimensional vector. The labels are given as integer indices, as in the "alternate formulation for *K*-means" from Lecture 2. Plot the centroids in RGB colour space. ```python plot_colour_space(centroids, facecolors=centroids, edgecolor='black', s=1000, title="Centroids only") ``` Plot the pixels in RGB space but with colour matching that of the centroid the pixel was assigned to. Include the centroids themselves in the same plot. ```python # Create a new (10000,3) ndarray where values in row i are copied from centroids[labels[i],:] RGB_recoloured = centroids[labels] # Plot the re-coloured pixels, along with the corresponding centroids plot_colour_space(RGB, c=RGB_recoloured, marker='s', s=0.5, title="Centroids and recoloured pixels") plot_colour_space(centroids, facecolors=centroids, edgecolor='black', s=1000, hold=True) ``` ```python plt.imshow(RGB_recoloured.reshape(image.shape)); ``` Compare the above to Figure 9.3 (p.429) of the Bishop textbook. <div style="border-bottom: 3px solid black; margin-bottom:5px"></div> <div style="border-bottom: 3px solid black"></div> # 1. Spatial clustering with K-means Exercises 1.1&ndash;1.4 ask you to apply scikit-learn's **[sklearn.cluster.KMeans](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html)** object to the pixels from the introduction, but on slightly different data. It requires that you have already **run the code cells from the introduction**. ```python assert 'image' in globals(), "Did you run the code cells from introduction?" assert 'RGB' in globals(), "Did you run the code cells from introduction?" ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 1.1 &mdash; Add pixel location features The introduction showed you how to cluster pixels by colour, like Figure 9.3 (p.429) of the Bishop book. The data that we clustered was an array of 3-dimensional features $\begin{bmatrix} r_i & g_i & b_i \end{bmatrix}$, stored in the array object referenced by variable *RGB*. In exercises 1.2&ndash;1.4, you'll be asked to cluster the pixels by _colour_ (rgb) and _location_ (xy). To do this, you must first add pixel location features to the data. **Write a few lines of code** to create a new array called *RGBXY* where each row is a 5-dimensional feature vector $\begin{bmatrix} r_i & g_i & b_i & x_i & y_i\end{bmatrix}$, with $(x_i, y_i)$ being the location in the image where pixel $i$ came from in the original image. Use the *image* variable to get the height and width of the original ladybug image. Then use the [**np.arange**](https://numpy.org/doc/stable/reference/generated/numpy.arange.html) and [**np.meshgrid**](https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html) function to generate two arrays, each containing the individual $x_i$ and $y_i$ values respectively. Finally, use a matrix stacking function like [**np.column_stack**](https://numpy.org/doc/stable/reference/generated/numpy.column_stack.html) to create the new array object *RGBXY* from the original *RGB* data and your new pixel location arrays. If you have done everything correctly, your data with extra "pixel location features" should look like this: ```python >>> RGBXY [[ 0.30588236 0.55686277 0.2 0. 0. ] [ 0.27058825 0.53333336 0.16470589 1. 0. ] [ 0.27058825 0.53333336 0.16470589 2. 0. ] ... [ 0.4509804 0.67450982 0.34901962 97. 99. ] [ 0.43921569 0.67843139 0.34509805 98. 99. ] [ 0.46666667 0.6901961 0.37254903 99. 99. ]] ``` ```python # Your code here. Aim for 2-6 lines. y,x,d = image.shape mesh = np.array(np.meshgrid(np.arange(y), np.arange(y))).T.reshape(-1, 2).copy() mesh[:, [1, 0]] = mesh[:, [0, 1]] RGBXY = np.column_stack((image.reshape(x*y, 3), mesh)).astype(RGB.dtype) ``` **Check your answer** by running the code cell below. ```python assert 'RGBXY' in globals(), "You didn't create a variable called RGBXY" assert isinstance(RGBXY, np.ndarray), "Expected RGBXY to be ndarray" assert RGBXY.dtype == RGB.dtype, "RGBXY has wrong dtype" assert RGBXY.shape == (100*100,5), "RGBXY has wrong shape" assert np.array_equal(RGBXY[99:101], np.array([[0.38039216, 0.57647060, 0.15686275, 99., 0.], [0.25490198, 0.52156866, 0.14509805, 0., 1.]], dtype=np.float32)), "RGBXY wrong data" print("Correct!") ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 1.2 &mdash; Cluster pixels spatially **Write a few lines of code** to do the following: 1. Cluster the $\begin{bmatrix} r_i & g_i & b_i & x_i & y_i\end{bmatrix}$ values that are currently stored in the _RGBXY_ variable. Use *K*=6 clusters. You can use *sklearn.cluster.KMeans* directly or you can use the _run_kmeans_ function from the introduction. 2. Plot the re-coloured image, where each pixel's colour has been replaced by its corresponding centroid colour, as we did in the introduction. Your plot should look like this: _Hint:_ Your _RGBXY_ array has shape (10000,5), but the _imshow_ function expects an array of shape (*height*, *width*, 3). So you will need to slice out colour data and reshape it into a form that _imshow_ expects. ```python clusters, lab = run_kmeans(RGBXY, 6) colorData = clusters[lab][:, :3].reshape(x, y, 3) plt.imshow(colorData) ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 1.3 &mdash; More clusters! **Repeat Exercise 1.2** but this time use *K*=50 clusters. It may take a few seconds. If you do not understand the result that you see, ask for insight from a TA or from someone else. ```python clusters50, lab2 = run_kmeans(RGBXY, 50) colorData2 = clusters50[lab2][:, :3].reshape(x, y, 3) plt.imshow(colorData2) ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 1.4 &mdash; Feature scaling **Repeat Exercise 1.3** but this time scale the $x$ and $y$ features by a factor of 0.02 before clustering. Your answer should use Numpy's "broadcasting" mechanism to achieve the scaling in a single step. Be sure to store the transformed values in a new array called _RGBXY_scaled_ rather than modifying the original _RGBXY_ array. Otherwise you may end up having to re-run the code cell that generated the _RGBXY_ variable. ```python RGBXY_scaled = RGBXY * [1, 1, 1, 0.02, 0.02] # RGBXY_scaled[:, [3,4]] = RGBXY_scaled[:, [3,4]] * 0.02 scaledCluseter, lab3 = run_kmeans(RGBXY_scaled, 50) colorData3 = scaledCluseter[lab3][:, :3].reshape(x, y, 3) plt.imshow(colorData3) ``` The purpose of this exercise is to see that the clustering method is very sensitive to the scale of different features. If you were to scale the _XY_ components all the way to zero, you would be back to clustering _only by colour_, as we did in the introduction. Before continuing, try scaling $(x,y)$ by different values, such as $(0.05, 0.02)$ and see how that effects the pixel clusters. <div style="border-bottom: 3px solid black; margin-bottom:5px"></div> <div style="border-bottom: 3px solid black"></div> # 2. Selecting *K* in *K*-means Exercises 2.1&ndash;2.2 ask you to generate synthetic data and to apply the "elbow" heuristic for selecting optimal *K* in *K*-means. **Run the code cell below** to define a function for sampling data from a mixture of Gaussians. ```python def sample_gmm(means, covs=None, weights=None, N=1): """ Samples a K-component D-dimensional Gaussian mixture. The means are KxD. The covariances are KxDxD. The default covariance is the DxD identity matrix. The weights are length K and must sum to 1. The default is uniform weights. Returns (X, c) where X is an NxD array of samples and c is a length-N vector of component indices, i.e. X[i] was sampled from mixture component c[i]. """ K, D = means.shape # Valudate inputs and set default values if needed if covs is None: covs = np.tile(np.eye(D), (K,1,1)) # Stack of K D-dimensional identity matricies if weights is None: weights = np.full(K, 1/K) assert covs.shape == (K,D,D) # Sample a vector of component choices in proportion to weights, one for each sample c = np.random.choice(K, N, p=weights) # choice() checks that weights has shape (K,1) and sums to 1 # Fill an array of N samples, one component at a time X = np.empty((N,D)) for k in range(K): X[k==c] = np.random.multivariate_normal(means[k], covs[k], np.count_nonzero(k==c)) return X, c ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 2.1 &mdash; Generate synthetic GMM data **Write a few lines of code** to generate 1000 samples from a 9-component Gaussian mixture, where the means are arranged in a 3x3 grid as shown below. Use a single call to the *sample_gmm* function to generate your data. That means you must build a _means_ matrix with shape (9,2) containing the 9 means. You should know how to do this procedurally, either with for-loops or with a call to the _np.meshgrid_ and *np.column_stack* functions. (Do not write out all 9 of the mean vectors by hand.) Create a variable called _X_ that refers to your samples, and create a variable called _c_ that refers to the mixture component indices (this value is directly returned by *sample_gmm*, so just store the result). The `np.random.seed(0)` ensures you always generate the same data each time you run the code cell. ```python np.random.seed(0) seedx, seedy = np.meshgrid(np.linspace(-5, 5, 3), np.linspace(-5, 5, 3)) X, c = sample_gmm(np.column_stack((seedx.flatten(), seedy.flatten())), N=1000) ``` (9, 4) (1000, 2) **Plot your data** by running the code cell below. ```python assert 'X' in globals(), "You must create a variable called 'X' that refers to your samples!" assert 'c' in globals(), "You must create a variable called 'c' that refers to your component indices!" assert X.shape == (1000,2), "X should have shape (1000, 2)!" assert c.shape == (1000,), "c should have shape (1000,)!" plt.scatter(*X.T, c=c, s=5) # Plot each point (X[i,0], X[i,1]) using colour index c[i] and point size 5 plt.title("Synthetic GMM data") plt.xticks([-5, 0, 5]) plt.yticks([-5, 0, 5]) plt.gca().set_aspect('equal') # It's important to see this data with equal scales on each axis ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 2.2 &mdash; Choosing *K* with *K*-means When applying *K*-means, it may be difficult to choose the best *K* for a particular data set. Recall the "elbow curve" method for selecting *K* as reviewed in Lecture 2. You are asked to apply this method to select a "good" *K* for the data set generated in Exercise 2.1. **Write a few lines of code** to generate a plot of *K* ($x$-axis) versus the corresponding *K*-means objective value ($y$-axis) for each choice of $K \in \{2, \ldots 15\}$. Use the **[sklearn.cluster.KMeans](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html)** object in your answer. Once you have called the _fit_ method on a _KMeans_ object you can recover the final objective value (at convergence) from the object's *intertia_* attribute (as in `kmeans_object.inertia_`). If you have done this correctly, you should see a 'kink' at *K*=9, which makes sense given that the data was indeed generated from 9 clusters. ```python # Your code here. Aim for 2-4 lines of code plus a few lines for plotting. for i in range(2,16): plt.scatter(i, sklearn.cluster.KMeans(i).fit(X).inertia_) plt.plot() ``` <div style="border-bottom: 3px solid black; margin-bottom:5px"></div> <div style="border-bottom: 3px solid black"></div> # 3. Fitting a Gaussian Mixture Model (GMM) Exercises 3.1&ndash;3.4 ask you fit 2D data with scikit-learn's **[sklearn.mixture.GaussianMixture](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html)** object. **Run the code cell below** to define a function that will help to visualize the placement and shape of GMM components. ```python def plot_gmm(gmm): """ Plots the placement of components in a Gaussian Mixture. The gmm object should be of type sklean.mixture.GaussianMixture """ ax = plt.gca() for weight, mean, cov in zip(gmm.weights_, gmm.means_, gmm.covariances_): v, w = np.linalg.eigh(cov) v = 2*np.sqrt(2*v) u = w[0] / np.linalg.norm(w[0]) angle = 180 * (1 + np.arctan(u[1]/u[0]) / np.pi) # Ellipse() function needs degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], angle, edgecolor='k', facecolor='none', linestyle='--', linewidth=2, alpha=0.8) ax.add_artist(ell) ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 3.1 &mdash; Fit a GMM to non-overlapping, isotropic data **Write fitting code** to fit a **[sklearn.mixture.GaussianMixture](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html)** object to the synthetic data from Exercise 2.1. Use 9 mixture components in your answer. You should use the argument *random_state=0* to ensure reproducibility. Create a variable called _gmm_ that refers to your new *GaussianMixture* object. ```python gmm = sklearn.mixture.GaussianMixture(9, random_state=0) print(gmm) ``` GaussianMixture(n_components=9, random_state=0) **Write plotting code** using the *plot_gmm* function provided. Once you have the ellipses plotting correctly, use the **[predict](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture.predict)** method of the *GaussianMixture* object and use them to colour the points in the scatter plot. Your plot should look similar to: If you find that the GMM components do not fit perfectly to the data, try adding *n_init=5* to the arguments of your _GaussianMixture_ object. This will run the EM algorithm multiple times from different randomized initializations, and may give a higher chance of getting the 'right' clustering. ```python # Modify the code below. Aim for 2 additional lines. plot_gmm(gmm.fit(X)) plt.scatter(*X.T, s=5, c=gmm.predict(X)) plt.title('GMM fitted to anisotropic data') plt.gca().set_aspect('equal') ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 3.2 &mdash; Generate overlapping, anisotropic synthetic GMM data **Write some code** to generate 500 samples from 2-dimensional 3-component Gaussian mixture having the following parameters: $$ \begin{align} \pi_1 = 0.5, \quad &\boldsymbol{\mu}_1 = \begin{bmatrix} -2.0 & 0.0 \end{bmatrix}, &\boldsymbol{\Sigma}_1 = \begin{bmatrix}\phantom{-}2.0 & \phantom{-}1.0 \\ \phantom{-}1.0 & \phantom{-}2.0\end{bmatrix}\\ \pi_2 = 0.3, \quad &\boldsymbol{\mu}_2 = \begin{bmatrix} \phantom{-}2.0 & 0.0 \end{bmatrix}, &\boldsymbol{\Sigma}_2 = \begin{bmatrix}1.0 & -0.9 \\ -0.9 & 1.0\end{bmatrix}\\ \pi_3 = 0.2, \quad &\boldsymbol{\mu}_3 = \begin{bmatrix} \phantom{-}0.0 & 0.0 \end{bmatrix}, &\boldsymbol{\Sigma}_3 = \begin{bmatrix}10.0 & \phantom{-}0.0 \\ \phantom{-}0.0 & 10.0\end{bmatrix}\\ \end{align} $$ Use the *sample_gmm* function from part 2. Create a variable called _X_ that refers to your samples, and create a variable called _c_ that refers to the mixture component indices (this value is directly returned by *sample_gmm*, so just store the result). ```python np.random.seed(0) means = np.array([ [-2.0, 0.0], [2.0, 0.0], [0.0, 0.0] ]) covariance = np.array([ [[2.0, 1.0], [1.0, 2.0]], [[1.0, -0.9], [-0.9, 1.0]], [[10.0, 0.0], [0.0, 10.0]] ]) weights = np.array([0.5, 0.3, 0.2]) X, c = sample_gmm(means=means, covs=covariance, weights=weights, N=500) ``` **Plot your data** by running the code cell below. The plot should look like this: ```python assert 'X' in globals(), "You must create a variable called 'X' that refers to your samples!" assert 'c' in globals(), "You must create a variable called 'c' that refers to your component indices!" assert X.shape == (500,2), "X should have shape (500, 2)!" assert c.shape == (500,), "c should have shape (500,)!" plt.scatter(*X.T, c=c, s=5) # Plot each point (X[i,0], X[i,1]) using colour index c[i] and point size 5 plt.title("Synthetic GMM data") plt.xticks([-5, 0, 5]) plt.yticks([-5, 0, 5]) plt.gca().set_aspect('equal') # It's important to see this data with equal scales on each axis ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 3.3 &mdash; Fit a GMM to the anisotropic, overlapping data **Write fitting code** to fit a **[sklearn.mixture.GaussianMixture](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html)** object to the synthetic data you generated in Exercise 3.2. Use 3 mixture components in your answer. You should use the argument *random_state=0* to ensure reproducibility. Create a variable called _gmm_ that refers to your new *GaussianMixture* object. ```python gmm = sklearn.mixture.GaussianMixture(3, random_state=0) ``` **Write plotting code** just as you did for Exercise 3.1 using the **[predict](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture.predict)** method. Your plot should look like this: ```python plot_gmm(gmm.fit(X)) plt.scatter(*X.T, s=5, c=c) plt.title('GMM fitted to anisotropic data') plt.gca().set_aspect('equal') ``` **Write plotting code** to visualize the probability $p(z_{ik} \mid \mathbf{x}_i)$ that component $k$ generated data point $\mathbf{x}_i$. Use the **[predict_proba](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture.predict_proba)** method of the _GaussianMixture_ object to get the probabilities. Because there are $K=3$ components, you can visualize these probabilities by using $\begin{bmatrix} z_{i1} & z_{i2} & z_{i3} \end{bmatrix}$ as the values colour $\begin{bmatrix} r_i & g_i & b_i \end{bmatrix}$ for data point $i$. Your plot should look like this: ```python plot_gmm(gmm.fit(X)) plt.scatter(*X.T, s=20, c=gmm.predict_proba(X)) plt.title('GMM fitted to anisotropic data') plt.gca().set_aspect('equal') ``` <div style="border-bottom: 3px solid black;"></div> ### Exercise 3.4 &mdash; Sample from spatially clustered pixels using a GMM **Write fitting code** to fit a **[sklearn.mixture.GaussianMixture](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html)** object to the _RGBXY_ data (the ladybug) that you generated in Exercise 3.2. Use **15** mixture components in your answer. You should use the argument *random_state=0* to ensure reproducibility. Create a variable called _gmm_ that refers to your new *GaussianMixture* object. ```python gmm = sklearn.mixture.GaussianMixture(15, random_state=0).fit(RGBXY) ``` **Generate 5000 samples** from your fitted mixture model by using the **[sample](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture.sample)** method of the _GaussianMixture_ object. The resulting sample matrix should have shape (5000,5) where each row is a vector $\begin{bmatrix}r & g & b & x & y\end{bmatrix}$. Create a variable _X_ to refer to your new samples. Note that the _sample_ function also returns a component membership vector, but you can ignore it. ```python X, c = gmm.sample(5000) print(X) ``` [[ 3.46851326e-01 -9.02360738e-02 -2.43623256e-02 7.95827847e+01 5.22453003e+01] [ 1.38434547e+00 1.96541881e-01 4.96918224e-02 7.19965214e+01 4.97476077e+01] [ 4.79247157e-01 1.89137918e-01 1.10851799e-01 7.20397440e+01 7.23885958e+01] ... [ 2.85009137e-01 2.93851123e-01 4.14639804e-01 3.44345202e+01 4.05519970e+01] [ 8.55497336e-01 8.40602537e-01 9.04088935e-01 4.14135879e+01 5.18982229e+01] [ 5.11923476e-01 5.04867657e-01 5.92077136e-01 2.38742913e+01 5.89054347e+01]] **Plot the samples** as if they were pixels at location $(x,y)$ with colour $(r,g,b)$. Use a single call to Matplotlib's _scatter_ function. Your ladybug should look something like this: *Hint:* The _scatter_ function will fail if you use any colour values outside range $[0.0,1.0]$, yet there's no guarantee that a Gaussian will produce values within this range. Use the **[np.clip](https://numpy.org/devdocs/reference/generated/numpy.clip.html)** function to clamp the $(r,g,b)$ values to this range. ```python plt.scatter(*X[:, 3:5].T, c=np.clip(X[:, 0:3], 0, 1), s=10) # Keep these lines at the end. plt.gca().invert_yaxis() # This line makes sure the ladybug appears upright! plt.gca().set_aspect('equal') # This line makes sure the ladybug isn't squished! ```
92bbabaa8c8a705ef1d112993423e420ece0fcec
823,246
ipynb
Jupyter Notebook
Lab 3/lab3-exercises.ipynb
m-triassi/ml-exercises
92089577c99ed348d9034de7739d089f6e26d257
[ "MIT" ]
null
null
null
Lab 3/lab3-exercises.ipynb
m-triassi/ml-exercises
92089577c99ed348d9034de7739d089f6e26d257
[ "MIT" ]
null
null
null
Lab 3/lab3-exercises.ipynb
m-triassi/ml-exercises
92089577c99ed348d9034de7739d089f6e26d257
[ "MIT" ]
null
null
null
717.11324
112,372
0.947303
true
7,336
Qwen/Qwen-72B
1. YES 2. YES
0.800692
0.699254
0.559887
__label__eng_Latn
0.953278
0.139136
```python import numpy as np import matplotlib.pyplot as plt from sympy.solvers import solve from sympy import Symbol ``` ```python x = Symbol('x') sols = solve(8.99 * x - 6.56 * x - 1312.13, x) sols ``` [539.971193415638] ```python xs = np.linspace(0, 1000, 1000) plt.plot(xs, 2.43 * xs - 1312.13) plt.axhline(0, c='k') plt.show() ``` ```python sols1 = solve(400 * x - 3936.13, x) sols1 ``` [9.84032500000000] ```python xs = np.linspace(0, 10, 1000) plt.plot(xs, 400 * xs - 3936.13) plt.axhline(0, c='k') plt.show() ``` ```python ```
ce7e2d635fec44cc3c4f6529f627a456f6bf71a5
28,575
ipynb
Jupyter Notebook
Exercise03/Introduction_to_Break_Even_Analysis.ipynb
Develop-Packt/Using-Functions-and-Algebra-with-Python
5f5b4c37e40216cb5751687f5bb9d6378652ab14
[ "MIT" ]
null
null
null
Exercise03/Introduction_to_Break_Even_Analysis.ipynb
Develop-Packt/Using-Functions-and-Algebra-with-Python
5f5b4c37e40216cb5751687f5bb9d6378652ab14
[ "MIT" ]
null
null
null
Exercise03/Introduction_to_Break_Even_Analysis.ipynb
Develop-Packt/Using-Functions-and-Algebra-with-Python
5f5b4c37e40216cb5751687f5bb9d6378652ab14
[ "MIT" ]
1
2021-02-25T16:24:53.000Z
2021-02-25T16:24:53.000Z
201.232394
14,012
0.924759
true
222
Qwen/Qwen-72B
1. YES 2. YES
0.944995
0.828939
0.783343
__label__eng_Latn
0.292514
0.6583
# Introduction The next step is to provide some information about the mass and inertia of the bodies involved. Each of the three rigid bodies have both a mass which resists linear accelerations and inertia which resists rotational accelerations. In this notebook we will specify the mass of the three bodies, the inertia tensor/dyadic, and also create three `RigidBody` objects that hold all of the necessary information for each rigid body. # Setup First, we will import the results from the previous notebook. Even if you didn't get everything correctly working, the following import statement will bring in the correct solution so you can move forward. We will do this in all of the subsquent notebooks. ```python from __future__ import print_function, division from solution.kinematics import * ``` We will also need the function for easily generating inertial quantities and the `RigigBody` class so we can create some rigid bodies. ```python from sympy.physics.mechanics import inertia, RigidBody ``` We will need to specify some constants for the mass and inertia values. ```python from sympy import symbols ``` Once again, initalize SymPy printing so that we get nicely renderd symbols. ```python from sympy.physics.vector import init_vprinting init_vprinting(use_latex='mathjax', pretty_print=False) ``` # Mass The masses of each rigid body can be represented by constant values, so we create a symbol for each body. ```python lower_leg_mass, upper_leg_mass, torso_mass = symbols('m_L, m_U, m_T') ``` ```python lower_leg_mass ``` ```python upper_leg_mass ``` ```python torso_mass ``` # Inertia Since we are studying a 2D planar problem, we are only concerned about the rotational inertia about the $\hat{i}_z$ axis. We will assume that the rigid bodies are symmetric about the $XZ$ and $YZ$ planes, so we only need a single variable for each rigid body to specify the rotation inertia. ```python lower_leg_inertia, upper_leg_inertia, torso_inertia = symbols('I_Lz, I_Uz, I_Tz') ``` The `inertia()` function is a convenience function for creating inertia dyadics (i.e. basis dependent tensors). You specify a reference frame to define the inertia with respect to and at a minimum for symmetric bodies provide the diagonal entries of the inertia tensor. In our case the rotational inertia about the $x$ and $y$ are not neeed so they are set to zero and $z$ inertia entry is set to the defined variable. ```python lower_leg_inertia_dyadic = inertia(lower_leg_frame, 0, 0, lower_leg_inertia) ``` ```python lower_leg_inertia_dyadic ``` In general, we store the inertia as dyadics, i.e. basis dependent tensors. If you want to see what the inertia is expressed in a particular frame, use the `to_matrix()` method. ```python lower_leg_inertia_dyadic.to_matrix(lower_leg_frame) ``` We will also eventually need to know what point the inertia is defined with respect to. In our case, we will simply define all inertia's about the mass center. We can store the total information needed by PyDy in a tuple of an inertia `Dyadic` and a `Point`. ```python lower_leg_central_inertia = (lower_leg_inertia_dyadic, lower_leg_mass_center) ``` The upper leg and torso inertias are found in the same fashion. ```python upper_leg_inertia_dyadic = inertia(upper_leg_frame, 0, 0, upper_leg_inertia) upper_leg_inertia_dyadic.to_matrix(upper_leg_frame) ``` ```python upper_leg_central_inertia = (upper_leg_inertia_dyadic, upper_leg_mass_center) ``` ## Exercise Create a tuple of an inertia `Dyadic` and `Point` for the torso. ```python torso_inertia_dyadic = ``` ```python torso_central_inertia = ``` ```python %load exercise_solutions/n04_inertia_inertia-dyadic.py ``` # Rigid Bodies To completely define a rigid body, the mass center point, the reference frame, the mass, and the inertia defined about a point must be specified. ```python lower_leg = RigidBody('Lower Leg', lower_leg_mass_center, lower_leg_frame, lower_leg_mass, lower_leg_central_inertia) ``` ## Exercise Create RigidBody objects for the upper leg and torso ```python upper_leg = ``` ```python torso = ``` ```python %load exercise_solutions/n04_inertia_define-rigid-body.py ```
e14dcf4b37f2aa7a72f9f461d70c54b627b481e6
9,368
ipynb
Jupyter Notebook
notebooks/n04_inertia.ipynb
pydy/pydy-tutorial-human-standing
72b1d8513e339e9b10e501bd3490caa3fa997bc4
[ "CC-BY-4.0" ]
134
2015-05-19T15:24:18.000Z
2022-03-12T09:39:03.000Z
notebooks/n04_inertia.ipynb
pydy/pydy-tutorial-human-standing
72b1d8513e339e9b10e501bd3490caa3fa997bc4
[ "CC-BY-4.0" ]
46
2015-05-05T18:08:20.000Z
2022-01-28T11:12:42.000Z
notebooks/n04_inertia.ipynb
pydy/pydy-tutorial-pycon-2014
72b1d8513e339e9b10e501bd3490caa3fa997bc4
[ "CC-BY-4.0" ]
62
2015-06-16T01:50:51.000Z
2022-02-26T07:39:41.000Z
22.357995
432
0.58497
true
1,010
Qwen/Qwen-72B
1. YES 2. YES
0.870597
0.839734
0.73107
__label__eng_Latn
0.993937
0.536853
# Linear Discriminant Analysis (LDA) tutorial Notes from: [link 1](https://machinelearningmastery.com/linear-discriminant-analysis-for-machine-learning/), [link 2](https://www.python-course.eu/linear_discriminant_analysis.php) - Logistic regression classification is hardly applied to two-class problems. - LDA classification can be used for multi-class problems. - LDA searches for a projection of a dataset which maximizes the 'between class scatter to within class scatter' $\left(\frac{S_B}{S_W}\right)$ ratio of this projected dataset. - The goal is transform a dataset $A$ using a transformation matrix $w$ such that the between-class-scatter to within-class-scatter ratio $Y=w^T\cdot A$ is maximized. - Formally, find the linear combination $Z=a^T\cdot X$ such that the between class variance is maximized relative to the within class variance. - LDA makes the assumption that the distribution of the data is Gaussian - LDA consists of statistical properties of your data, calculated for each class. - For single variable class, the statistical properties that are calculated are the mean and variance. - For multiple variables, the statistical properties are the means and covariance matrix of the multivariate Gaussian. - An LDA model uses these properties to make predictions. **Assumptions** 1. That the data is Gaussian. 2. That each attribute has the same variance. ## Making predictions with LDA Given an instance $x$, determine which class or category $c$ it belongs to: \begin{equation} P(Y=c\mid X=x)=\frac{P(X=x\mid Y=c)P(Y=c)}{\sum_{classes~c}P(X=x\mid Y=c)P(Y=c)} \end{equation} ## Maths of LDA ```python #%%time import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from matplotlib import style ``` ```python %matplotlib inline style.use('seaborn') np.random.seed(42) ``` ```python # contrived data rectangles = np.array([[1,1.5,1.7,1.45,1.1,1.6,1.8],[1.8,1.55,1.45,1.6,1.65,1.7,1.75]]) triangles = np.array([[0.1,0.5,0.25,0.4,0.3,0.6,0.35,0.15,0.4,0.5,0.48],[1.1,1.5,1.3,1.2,1.15,1.0,1.4,1.2,1.3,1.5,1.0]]) circles = np.array([[1.5,1.55,1.52,1.4,1.3,1.6,1.35,1.45,1.4,1.5,1.48,1.51,1.52,1.49,1.41,1.39,1.6,1.35,1.55,1.47,1.57,1.48, 1.55,1.555,1.525,1.45,1.35,1.65,1.355,1.455,1.45,1.55,1.485,1.515,1.525,1.495,1.415,1.395,1.65,1.355,1.555,1.475,1.575,1.485] ,[1.3,1.35,1.33,1.32,1.315,1.30,1.34,1.32,1.33,1.35,1.30,1.31,1.35,1.33,1.32,1.315,1.38,1.34,1.28,1.23,1.25,1.29, 1.35,1.355,1.335,1.325,1.3155,1.305,1.345,1.325,1.335,1.355,1.305,1.315,1.355,1.335,1.325,1.3155,1.385,1.345,1.285,1.235,1.255,1.295]]) ``` ```python print(rectangles.shape, triangles.shape, circles.shape) ``` (2, 7) (2, 11) (2, 44) ### Scatter Within ($S_W$) $S_W = \sum_{classes~c}\sum_{j\in c}\left(x_j-\mu_c\right)\left(x_j-\mu_c\right)^T$ ```python # plot the data fig = plt.figure(figsize=(10,10)) ax0 = fig.add_subplot(111) ax0.scatter(rectangles[0], rectangles[1], marker='s', c='grey', edgecolor='black') ax0.scatter(triangles[0], triangles[1], marker='^', c='yellow', edgecolor='black') ax0.scatter(circles[0], circles[1], marker='o', c='blue', edgecolor='black') # Calculate the mean vectors per class mean_rectangles = np.mean(rectangles, axis=1).reshape(2,1) mean_triangles = np.mean(triangles, axis=1).reshape(2,1) mean_circles = np.mean(circles, axis=1).reshape(2,1) # Calculate the scatter matrices for the SW (Scatter Within) and sum the elements up scatter_rectangles = np.dot((rectangles-mean_rectangles),(rectangles-mean_rectangles).T) scatter_triangles = np.dot((triangles-mean_triangles),(triangles-mean_triangles).T) scatter_circles = np.dot((circles-mean_circles),(circles-mean_circles).T) # Calculate the SW by adding the scatters within classes SW = scatter_triangles + scatter_circles + scatter_rectangles print(SW) plt.show() ``` ### Scatter Between ($S_B$) $S_B=\sum_{classes~c}N_c\left(\mu_c-\mu\right)\left(\mu_c-\mu\right)^T$ Total scatter matrix, $S_T=S_W+\sum_{classes~c}n_c(\mu_c-\mu)(\mu_c-\mu)^T$, where $n_c$ is the number of rows in $c$ If we denote the transformed dataset as $Y$, we find $Y$ with $Y=w^T\cdot X$. Plugging the transformation matrix $w$ into the $S_W$ and $S_B$ equations we have: \begin{equation} S_W = \sum_{classes~c}\sum_{j\in c}\left(w^T\left(x_j-\mu_c\right)\right)\left(w^T\left(x_j-\mu_c\right)\right)^T = w^TS_Ww \end{equation} \begin{equation} S_B = \sum_{classes~c}N_c\left(w^T\left(x_j-\mu_c\right)\right)\left(w^T\left(x_j-\mu_c\right)\right)^T = w^TS_Bw \end{equation} Therefore, the scatter ratio becomes: \begin{equation} \frac{w^TS_Bw}{w^TS_Ww} \end{equation} So the **problem** now is: *how to find the $w$ that maximizes this equation.* It turns out that $w=S_W^{-1}S_B$ ### Main steps in LDA 1. Standardize the dataset (zero mean, unit variance) 2. Compute the total mean vector $\mu$ as well as the mean vectors per class $\mu_c$ 3. Compute the scatter within and scatter between matrices $S_B$ and $S_W$ 4. Compute the eigenvalues and eigenvectors of $S_W^-1S_B$ to find the $w$ which maximizes $\frac{w^TS_Bw}{w^TS_Ww}$ 5. Select the eigenvectors of the corresponding k largest eigenvalues to create a $d\times k$ dimensional transformation matrix $w$ where the eigenvectors are the columns of this matrix. 6. Use $w$ to transform the original $n\times d$ dimensional dataset $x$ into a lower, $n\times k$ dimensional dataset $y$ Dataset: [wine dataset](https://archive.ics.uci.edu/ml/datasets/wine) **Step 0: Load in the data and split the descriptive and the target feature** ```python df = pd.read_csv('data/wine.data', sep=',', names=['target','Alcohol','Malic_acid','Ash','Akcakinity','Magnesium','Total_pheonols','Flavanoids','Nonflavanoids','Proanthocyanins','Color_intensity','Hue','OD280','Proline']) X = df.iloc[:, 1:].copy() target = df['target'].copy() X_train, X_test, y_train, y_test = train_test_split(X, target, test_size=0.3, random_state=0) ``` **Step 1: Standardize the data** ```python for col in X_train.columns: X_train[col] = StandardScaler().fit_transform(X_train[col].values.reshape(-1,1)) ``` /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:595: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler. warnings.warn(msg, DataConversionWarning) /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:595: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler. warnings.warn(msg, DataConversionWarning) /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:595: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler. warnings.warn(msg, DataConversionWarning) /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:595: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler. warnings.warn(msg, DataConversionWarning) /home/bbrighttaer/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy ```python X.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Alcohol</th> <th>Malic_acid</th> <th>Ash</th> <th>Akcakinity</th> <th>Magnesium</th> <th>Total_pheonols</th> <th>Flavanoids</th> <th>Nonflavanoids</th> <th>Proanthocyanins</th> <th>Color_intensity</th> <th>Hue</th> <th>OD280</th> <th>Proline</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>14.23</td> <td>1.71</td> <td>2.43</td> <td>15.6</td> <td>127</td> <td>2.80</td> <td>3.06</td> <td>0.28</td> <td>2.29</td> <td>5.64</td> <td>1.04</td> <td>3.92</td> <td>1065</td> </tr> <tr> <th>1</th> <td>13.20</td> <td>1.78</td> <td>2.14</td> <td>11.2</td> <td>100</td> <td>2.65</td> <td>2.76</td> <td>0.26</td> <td>1.28</td> <td>4.38</td> <td>1.05</td> <td>3.40</td> <td>1050</td> </tr> <tr> <th>2</th> <td>13.16</td> <td>2.36</td> <td>2.67</td> <td>18.6</td> <td>101</td> <td>2.80</td> <td>3.24</td> <td>0.30</td> <td>2.81</td> <td>5.68</td> <td>1.03</td> <td>3.17</td> <td>1185</td> </tr> <tr> <th>3</th> <td>14.37</td> <td>1.95</td> <td>2.50</td> <td>16.8</td> <td>113</td> <td>3.85</td> <td>3.49</td> <td>0.24</td> <td>2.18</td> <td>7.80</td> <td>0.86</td> <td>3.45</td> <td>1480</td> </tr> <tr> <th>4</th> <td>13.24</td> <td>2.59</td> <td>2.87</td> <td>21.0</td> <td>118</td> <td>2.80</td> <td>2.69</td> <td>0.39</td> <td>1.82</td> <td>4.32</td> <td>1.04</td> <td>2.93</td> <td>735</td> </tr> </tbody> </table> </div> **Step 2: Compute the mean vector mu and the mean vector per class mu_k** ```python mu = np.mean(X_train, axis=0).values.reshape(13,1) mu_k = [] for i,orchid in enumerate(np.unique(df['target'])): mu_k.append(np.mean(X_train.where(df['target']==orchid),axis=0)) mu_k = np.array(mu_k).T ``` **Step 3: Compute the scatter within and Scatter between matrices** ```python data_SW = [] Nc = [] for i,orchid in enumerate(np.unique(df['target'])): a = np.array(X_train.where(df['target']==orchid).dropna().values-mu_k[:,i].reshape(1,13)) data_SW.append(np.dot(a.T,a)) Nc.append(np.sum(df['target']==orchid)) SW = np.sum(data_SW,axis=0) SB = np.dot(Nc*np.array(mu_k-mu),np.array(mu_k-mu).T) ``` **Step 4: Compute the eigenvalues and eigenvectors of $S_W$ and $S_B$** ```python eigval, eigvec = np.linalg.eig(np.dot(np.linalg.inv(SW),SB)) ``` **Step 5: Select the two largest eigenvalues** ```python eigen_pairs = [[np.abs(eigval[i]),eigvec[:,i]] for i in range(len(eigval))] eigen_pairs = sorted(eigen_pairs,key=lambda k: k[0],reverse=True) w = np.hstack((eigen_pairs[0][1][:,np.newaxis].real,eigen_pairs[1][1][:,np.newaxis].real)) # Select two largest ``` ```python w ``` array([[-0.08017035, 0.35966611], [ 0.03561443, 0.24949595], [-0.02768389, 0.36678459], [ 0.1989954 , -0.28816223], [-0.00688612, -0.02360798], [ 0.24246663, -0.00180873], [-0.76111744, -0.23908916], [-0.0810736 , -0.07461353], [ 0.09596011, -0.1569996 ], [ 0.21025035, 0.35956763], [-0.05311794, -0.20260029], [-0.3615518 , -0.05716745], [-0.34680517, 0.5738288 ]]) **Step 6: Transform the data with $Y=X*w$** ```python Y = X_train.dot(w) ``` ```python X_train.shape ``` (124, 13) ```python Y.shape ``` (124, 2) **Visualization** ```python fig = plt.figure(figsize=(10,10)) ax0 = fig.add_subplot(111) ax0.set_xlim(-3,3) ax0.set_ylim(-4,3) for l,c,m in zip(np.unique(y_train),['r','g','b'],['s','x','o']): ax0.scatter(Y[0][y_train==l], Y[1][y_train==l], c=c, marker=m, label=l,edgecolors='black') ax0.legend(loc='upper right') # Plot the voronoi spaces means = [] for m,target in zip(['s','x','o'],np.unique(y_train)): means.append(np.mean(Y[y_train==target],axis=0)) ax0.scatter(np.mean(Y[y_train==target],axis=0)[0],np.mean(Y[y_train==target],axis=0)[1],marker=m,c='black',s=100) mesh_x, mesh_y = np.meshgrid(np.linspace(-3,3),np.linspace(-4,3)) mesh = [] for i in range(len(mesh_x)): for j in range(len(mesh_x[0])): date = [mesh_x[i][j],mesh_y[i][j]] mesh.append((mesh_x[i][j],mesh_y[i][j])) NN = KNeighborsClassifier(n_neighbors=1) NN.fit(means,['r','g','b']) predictions = NN.predict(np.array(mesh)) ax0.scatter(np.array(mesh)[:,0],np.array(mesh)[:,1],color=predictions,alpha=0.3) plt.show() ``` ```python ```
9fdec66d323aa7552569a20e1257add5432c51f4
386,558
ipynb
Jupyter Notebook
LDA.ipynb
bbrighttaer/data_science_nbs
21c1b088e758b0cf801bc9c8da87dfd916561163
[ "MIT" ]
null
null
null
LDA.ipynb
bbrighttaer/data_science_nbs
21c1b088e758b0cf801bc9c8da87dfd916561163
[ "MIT" ]
null
null
null
LDA.ipynb
bbrighttaer/data_science_nbs
21c1b088e758b0cf801bc9c8da87dfd916561163
[ "MIT" ]
null
null
null
512.676393
341,664
0.935096
true
5,544
Qwen/Qwen-72B
1. YES 2. YES
0.861538
0.805632
0.694083
__label__eng_Latn
0.549115
0.450919
# POL280 Bayesian Modelling Memo & Codes ## Lecture 1: Introduction (04/13/2017) ### Monte Carlo Simulation ```R ## Monte Carlo Simulation ## #install.packages("plotrix") library(plotrix) library(grid) ## Plot Rectangle and Circle plot(c(-1, 1), c(-1, 1), type = "n", asp = 1) rect(-1, -1, 1, 1) draw.circle(0, 0, 1) ## Conduct Simulation nsamp <- 10000 # The size of sample inside <- NA # Storage for those values inside the cirlce for(i in 1:nsamp){ x <- runif(2, -1, 1) ## Draw two values from uniform distribution (-1, 1) if (sqrt(x[1] * x[1] + x[2] * x[2]) < 1) { inside[i] <- 1; points(x[1], x[2], col = "orange") } if (sqrt(x[1] * x[1] + x[2] * x[2]) > 1) { inside[i] <- 0; points(x[1], x[2], col = "black") } } table(inside) 4 * (7848 / 10000) ``` ## Lecture 2: Simple Bayesian Models and Priors (04/20/2017) ### The Monty Hall Problem Suppose you're on a game show and you're given teh choice of three doors: Behind one door is a car; behind the otheres, goats. You pick a door, say No.1, and the host, who knows what's behind the doors, opens another door, say No.3, which has a goat. He then says to you "Do you wan to pick door No.2?" Is it to your advantage to switch your choice? Let's pick door 1, and Monty then opens door 2 to show us a goat. Let A represent the outcome that the car is behind door 1. Let B represent the event that we're shown a goat behind door 2: $$P(A|B) = \frac{P(B|A) P(A)}{P(B)}$$ Let's Simulate by R! ```R ## Monty Hall Problem Simulation ## doors <- c("1", "2", "3") # Three Doors sim_montyhall <- function(door){ car <- sample(doors, 1) # define the door with car pick <- doors[door] # your pick of the door open <- sample(doors[which(doors!=car & doors!=pick)], 1) # defined the door opened return(pick==car) # return if the car is behind your picked door. } sims <- replicate(10000, sim_montyhall(1) ) #repeat the process many times mean(sims) # The probability ``` 0.3372 ### Bayesian Inference #### Do Statistical Inference In the statistical inference context, A is replaced with $\theta$ (unknown parameters), and B is replaced with $y$ (data). Therefore, the equation becomes: \begin{align} P(\theta|y) &= \frac{P(\theta) P(y|\theta)}{P(y)} \\ &\propto P(\theta) L(\theta|y) \end{align} Intuitively stated: Posteriot Probability (of parameter level) $\propto$ Prior Probability $\times$ Likelihood Function It's all about **distribution**. #### Setting Prior Given the function given above, **prior matters** the most when the **data is sparse** (small size?) Data dominates the prior when there is enough data. The variance of prior matters. As the **variance of prior distribution increases**, the **impact of data on poterior distribution increases**. ### Bayesian Mechanics #### Three Steps to Model Bayesan 1. Specify 1) the probability model of **how $y$ were generated**, & 2) **prior belief** of $\theta$ (probability distribution) 2. **Update $\theta$** by conditioning the probability model on the **data**. 3. Evaluate model fit and sensitivity. #### First Step (1): Probability Models An assumption about the probability distribution $p(y | \theta)$ that generated $y$. Defined as **PDF** (probability density functons) **if $y$ is continuous** random variables, defined as **PMF** (probability mass functions) **if $y$ is discrete** random variables. $p(y | \theta)$ is also called **likelihood function**. It is calculated as: $$L(\theta | y) = \Pi_{i=1}^n p(y_i | \theta)$$ Easier to with natural log of the likelihood: $$\mathit{l}(\theta | y) = \mbox{log}L(\theta | y)$$ Because the original likelihood often become very small, so we want to deal with the manageable numbers by taking log of it. * Use of MLE($\hat{\theta}$) by frequentists cannot answer questions such as: $p (\theta > 0)$; $p(\theta \in (a, b))$ or relative likelihood of competing models. #### First Step (2): Specifying Priors **Beta-Binomial Distribution** = Beta posterior \begin{align} \mbox{Prior } p(\theta) &= \frac{\Gamma(\alpha + \beta)}{\Gamma (\alpha) + \Gamma (\beta)} \theta^{\alpha-1} (1-\theta)^{\beta-1} \\ \mbox{Likelihood } &= \begin{pmatrix} n \\ y \end{pmatrix} \theta^y (1-\theta)^{n-y} \end{align} Then: \begin{align} \mbox{Posterior } &\propto \mbox{Prior} \times \mbox{Likelihood} \\ \pi(\theta | y) &\propto \left\{ \frac{\Gamma(\alpha + \beta)}{\Gamma (\alpha) + \Gamma (\beta)} \theta^{\alpha-1} (1-\theta)^{\beta-1} \right\} \times \left\{ \begin{pmatrix} n \\ y \end{pmatrix} \theta^y (1-\theta)^{n-y} \right\} \\ &\propto \theta^{\alpha - 1 + y} + (1 - \theta)^{\beta - 1 + n - y} \end{align} The Solution is: \begin{align} \pi (\theta | y) &\approx \mbox{Beta}( \alpha + y, \beta + n - y) \\ E(\theta | y) &\approx \frac{\alpha + y}{\alpha + \beta + n} \end{align} Try in R! (Data is not provided) ```R library(dplyr) setwd("c:/") load("Dropbox/Files/baseball.Rda") # career_filtered <- career %>% filter(AB >= 500) m <- MASS::fitdistr(career_filtered$average, dbeta, start = list(shape1 = 1, shape2 = 10)) alpha0 <- m$estimate[1] beta0 <- m$estimate[2] # career_eb <- career %>% mutate(eb_estimate = (H + alpha0) / (AB + alpha0 + beta0)) ``` **Gamma Distribution** = Gamma Posterior \begin{align} \mbox{Gamma } &= \frac{\beta^{\alpha}}{\Gamma (\alpha)} \theta^{\alpha-1} e^{- \beta \theta} \\ \mbox{Poisson PMF } &= p(y | \theta) = \frac{e^{-\theta} \theta^{y_i}}{y_i !} \\ \mbox{Poisson Likelihood } &= \mathit{L}(\theta | y) = \hat{\Pi}_{i=1}^n \frac{e^{-\theta} \theta^{y_i}}{y_i !} \\ &= \frac{e^{-\theta n} \theta^{\sum_{i=1}{n} y_i} }{y_1 ! y_2 ! \dots y_n !} \\ \pi(\theta | y) &\propto \frac{\beta^{\alpha}}{\Gamma (\alpha)} \theta^{\alpha-1} e^{- \beta \theta} \times \frac{e^{-\theta n} \theta^{\sum_{i=1}{n} y_i} }{y_1 ! y_2 ! \dots y_n !} \\ &\propto \theta^{\alpha - 1 + \Sigma y} e^{- \theta (\beta + n)} \\ &\propto \mbox{Gamma }(\alpha + \Sigma y, \beta + n) \end{align}
3c53db73705d73e05d5164eca3e9831d001b8bf4
100,814
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/POL280_Bayes_Memos_Codes-checkpoint.ipynb
gentok/Method_Notes
a7b60e50132fdda764efcfb1e163d1b31b2f99f7
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/POL280_Bayes_Memos_Codes-checkpoint.ipynb
gentok/Method_Notes
a7b60e50132fdda764efcfb1e163d1b31b2f99f7
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/POL280_Bayes_Memos_Codes-checkpoint.ipynb
gentok/Method_Notes
a7b60e50132fdda764efcfb1e163d1b31b2f99f7
[ "MIT" ]
null
null
null
286.403409
90,202
0.912324
true
1,956
Qwen/Qwen-72B
1. YES 2. YES
0.879147
0.79053
0.694992
__label__eng_Latn
0.807779
0.453031
# Intro to Deep Learning We're going to continue working with CAP imagery for the second half of this week. Recall the two main guiding questions for this week: - _What_ is in an image (e.g. debris, buildings, etc.)? - _Where_ are these things located _in 3D space_ ? ## Motivation We've already seen how structure from motion can begin to answer the second of those questions, at least in a coarse manner. The first question is more difficult than the second for one crucial reason: it (so far) can not be answered *solely* using image data. Recall that structure from motion was able to leverage image metadata, features and geometric constraints without having to insert any "outside" data. When detecting the components of an image, there is a certain amount of subjectivity that so far requires human input. For example, if you want to detect flooding in an image, there need to be some set of rules that the software can determine "if (rules are satisfied), then (flooding = True)". How can you start to tackle this problem? We're going to make the problem a bit simpler by turning it into a classification problem rather than a localization problem. That is, instead of finding where flooding is in an image, we'll find whether there is flooding at all. With that in mind, let's first try a naive approach and see if we can simply enumerate the rules. Let's look at some flooding images: So what do we see? It looks like flooding is this murky, brown color that covers most of the image. So let's make a rule: if some percentage of the image is this brownish color (you can think of detecting this by creating some sort of index, like you did with the satellite imagery), there is flooding in the image. This might work for the two images we saw, but the real test is seeing if it works with other images. So let's look at another one: This image has a large percentage of it covered by brownish looking water. However, it's clearly not flooding, but rather just a lake. No problem, let's just make another rule: if some percentage of the image is this brownish color *and* there are also lots of buildings (let's also make an index for that), there is flooding in the image. Let's see if this works: Again, this is just some lake (the same lake as before, in fact). At this point, we might be thinking of shifting our strategy a bit. Clearly having someone sit down and enumerate all of the rules is impractical in this case\*. There is simply too much variability in the images to come up with a set of rules that is remotely generalizable. This is one of the major motivations of machine learning. The idea is that, rather than enumerate the rules that determine flooding, we're going to just enumerate the outcomes (e.g. whether there is flooding or not) and develop algorithms for the computer to *learn* what the rules are. There is a rich literature (and a host of undergraduate and graduate courses) on how these algorithms are designed, but on these next couple of days we will instead focus on how to interpret and implement them. \* This isn't to say that enumerating the rules isn't practical in *every* case. It really depends on the characteristics of the data. ## A simple case study We're going to start with a simple dataset. We will use the Breast Cancer Wisconsin (Diagnostic) Data Set (https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)), a dataset that shows whether a tumor is benign or malign and various other features. ```python # If the cell below returns an error related to "as_frame" not working, # you need to update scikit-learn. To do this, uncomment the next # line and restart the kernel !pip install scikit-learn==0.23.1 ``` ```python import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.datasets import load_breast_cancer # read the dataframe df = load_breast_cancer(return_X_y=False, as_frame=True)["data"] df["target"] = load_breast_cancer(return_X_y=False, as_frame=True)["target"] # benign is 1, malign is 0 df ``` ```python # for now we're just going to focus on mean radius and mean texture. # we'll plot one against the other, and color code depending on survival df_b = df[df["target"] == 1] df_m = df[df["target"] == 0] # plotting plt.plot(df_b["mean radius"], df_b["mean texture"], "ro") plt.plot(df_m["mean radius"], df_m["mean texture"], "bo") plt.xlabel("mean radius") plt.ylabel("mean texture") plt.show() ``` So what do we see? Clearly there are factors beyond the ones plotted that explain whether a tumor is benign. However, it's not difficult to see a pattern here. The further up and to the right you are, the less likely you are to survive. So as a first attempt, we will have the software learn a *linear classifier*. This classifier will be a line that will classify you as benign if you are below the line, and malign otherwise. ```python from sklearn import svm from sklearn.model_selection import train_test_split try: from mlxtend.plotting import plot_decision_regions except: !pip install mlxtend from mlxtend.plotting import plot_decision_regions # setting the features and the targets X = df[["mean radius", "mean texture"]] y = df["target"] # splitting training and testing set 80/20 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # Initialize SVM classifier classifier = svm.SVC(kernel='linear') # Fit data classifier = classifier.fit(X_train, y_train) # Printing parameters coef = classifier.coef_ print(coef) # Plot decision boundary plot_decision_regions(X_train.values, y_train.values.astype(np.integer), clf=classifier, legend=2) plt.xlabel("mean radius") plt.ylabel("mean texture") plt.show() ``` There is a lot going on here, so let's try to break it down. Recall that we are trying to determine whether a patient survives using just the (normalized) age and detected nodes. These are called the explanatory variables or *features*, and we refer to it as $X$. The variable we are trying to predict is the prediction variable or *target*, and we refer to it as $y$. The underlying notion behind machine learning is that patterns that hold in a subset of data will hold in the population as a whole. So in a sense, we care about the performance of the classifier on data that the algorithm has *not* seen by learning patterns in that data that we have seen. To this end, one of the major paradigms in machine learning is to separate our data into a *training set* and a *testing set*. The training set is a subset of the given data that we will actually provide to the algorithm to learn, while the testing set will be used solely to report the performance on data that the algorithm has not seen. In our case, we set 80% of the data as training data and 20% as testing data. There are numerous algorithms that can train on data. One of the more popular ones (especially before neural networks were introduced) are called *support vector machines* (SVM). We will not discuss how it actually does the learning, but suffice to say it takes in the training data and finds the line that minimizes the classification error. This line is called the *decision boundary* and it is defined by the following equation: \begin{equation} C_1(mean\ radius) + C_2(mean\ texture) = 0 \end{equation} Where $C_1$ and $C_2$ are the coefficients of the linear classifier. So really, the function of the SVM is to find these two coefficients. The algorithm then does classification as follows: \begin{equation} y_{pred} = \begin{cases} 1 & \text{if } C_1(mean\ radius) + C_2(mean\ texture)\geq 0\\ 0, & \text{otherwise} \end{cases} \end{equation} ### Exercise - Look at the first 5 samples in the testing data set. Calculate the predicted target of each of the 5 samples? - Look at the corresponding targets. Are the predictions accurate? What proportion of the predicted values are correctly predicted? - Which of the two coefficients is most important? Why? ```python ``` How good is our classifier? We can look at the performance of our classifier on the testing set to find out. Of course, an important metric to look out for is the accuracy: \begin{equation} \text{Accuracy} = \frac{\#\ (y_{true} == y_{pred})}{\# \ \text{samples}} \end{equation} However, for most applications it is also worth understanding what your *false positive* and *false negative* rate is. A false positive is a sample that is classified as a 1 but is in reality a 0, and a false negative is the opposite\*. The false positive rate is defined as $\frac{\# \text{False positives}}{\# \ \text{False positives + True negatives}}$, while the false negative rate is defined as $\frac{\# \text{False negatives}}{\# \ \text{False negatives + True positives}}$. **Why would this be a metric we care about, especially in this case?** <details> <summary>ANSWER</summary> Imagine your tumor is predicted as malignant but is in fact benign. There is certainly now a cost you incur in further tests or surgeries, but ultimately it is a manageable cost. Now imagine if you predicted a tumor as benign even thoguh it is malignant. Here, the mistake of the classifier could certainly cost you your life. </details> It turns out there is a useful tool called a *confusion matrix* that handily lays it out for you. \* It's worth putting some effort into what we call positives and negatives. Here, we use the term positive to mean 1 (benign) and negative to mean 0 (malign). However, anyone that has had a test of any sort knows that positive usually means that you have the disease (in our case, malign tumor). A lot of data science is communicating results, so something worth thinking about. ```python from sklearn.metrics import plot_confusion_matrix predictions = classifier.predict(X_test) accuracy = np.sum(predictions == y_test)/np.size(predictions) print(accuracy) matrix = plot_confusion_matrix(classifier, X_test, y_test, cmap=plt.cm.Blues, normalize='true') plt.title('Confusion matrix for our classifier') plt.show(matrix) plt.show() ``` As a final note, we used two variables (radius and texture) because it made it possible to visualize. However, there is nothing preventing us from using the rest of the data set columns as part of our features. While visualization in more than 2D is much more difficult, the underlying principles are exactly the same. ### Exercise Explore this link which contains a number of classification datasets: https://archive.ics.uci.edu/ml/datasets.php?format=&task=cla&att=&area=&numAtt=&numIns=&type=&sort=nameUp&view=table. Using an SVM, attempt to predict the class of each sample. Consider the following: - While multiclass classification is possible, limit yourself thus far to datasets that have one class. Make sure that y is set up with two distinct numbers for the classes (e.g. 0 and 1) - Make sure that your dataset does not have missing values. If it does, handle them appropriately (e.g. by substituting it with 0 or with the mean of the column). How does your classifier perform? Report the accuracy and confusion matrix. What could be done to improve the performance? ```python ``` ## Deep learning So far, we've worked exclusively with linear decision boundaries. However, usually data is much more complicated (as you should have seen from the exercise). Therefore, we might want to look for something that has higher predictive power. One of the most popular tools in machine learning has been the *neural network*. A neural network is (loosely) inspired by the way neurons are connected and operate. Let's first look at the simplest possible neural network and work our way up: Neural networks are made up of *layers*. In the image above, the neural network has two layers. The first layer is called the *input layer*, and the last layer is called the *output layer*. Each layer is composed of *neurons*. In the input layer, each neuron is one of the features from the training data. For example, the first neuron might be the mean radius, and the second neuron might be the mean texture. We're going to refer to each neuron by $x_{l, n}$, where $l$ is the number of the layer and $n$ is the number of the neuron within the layer. Using this notation, mean radius = $x_{1, 1}$. Each neuron in the input layer is connected to *every* neuron in the following layer by a *weight*. We will denote each weight by $w_{l, i, o}$, where $l$ is the layer at the tail of the arrow, $i$ is the number of the neuron at the tail of the arrow and $o$ is the number of the neuron at the head of the arrow. In the figure above, the color of the arrow corresponds to the value of the weight (blue means negative, red means positive). Every neuron in subsequent layers is equal to the sum of the products of the neurons and the weight that connects them. In this case, the neuron in the output layer is defined as: \begin{equation} x_{2, 1} = x_{1, 1}w_{1, 1, 1} + x_{1, 2}w_{1, 2, 1} \end{equation} Once you have the value of the output layer, you can obtain the predicted classification by applying some function to $x_{2, 1}$. An example of such a function would be similar to what we did previously: \begin{equation} y_{pred} = \begin{cases} 1 & \text{if } x_{2, 1}\geq 0\\ 0, & \text{otherwise} \end{cases} \end{equation} So training a neural network is simply finding the weights $w$ that minimize the error. If you are keen-eyed though, you might have realized that the example above is identical to what we were doing before. Therefore, we still have not arrived at a more powerful classifier. Let's try a more complicated neural network. This network is similar to the previous one, but now we have an additional layer in between the input and output layers. All layers that are between input and output are called *hidden layers*, though functionally they behave in exactly the same way. As a quick concept question, **what is the value of $x_{2, 3}$? What about $x_{3, 1}$?** <details> <summary>ANSWER</summary> $x_{2, 3} = x_{1, 1}w_{1, 1, 3} + x_{1, 2}w_{1, 2, 3}$ , $x_{3, 1} = x_{2, 1}w_{2, 1, 1} + x_{2, 2}w_{2, 2, 1} + x_{2, 3}w_{2, 3, 1} + x_{2, 4}w_{2, 4, 1} + x_{2, 5}w_{2, 5, 1}$ </details> Why are we introducing a hidden layer? Notice that each of the neurons in the hidden layer are essentially acting as linear classifiers. By having a weighted sum of these neurons in the output layer, we are arriving at a function that, by virtue of being a sum of lines is not itself linear. One of the biggest results in machine learning is that, as you increase the number of neurons in the hidden layer, you can approximate *any* function, meaning that you can always make a more powerful classifier by just adding more neurons. However, usually this is not computationally efficient. Rather, most of the time it makes more sense to introduce more hidden layers as opposed to more neurons, as shown in the image below: The added "depth" as a result of more layers is where the "deep" in "deep learning" comes. As computational resources have increased, scientists have increasingly resorted to adding more and more layers to their neural networks. ### Exercise Let's start thinking about the LADI dataset. We want to use deep learning to detect whether there is flooding in the image. - Using the paradigm above, what would be the features of the dataset? What are the targets? - Propose a way that you would input the features into a neural network? - What problems might you run into if you directly put an image into the neural network above? ```python ``` ## Convolution Convolution is a fairly old technique in image processing. Convolution works by multiplying a matrix (called a *kernel*) with portions of the image and adding up the result. An animation of how this works is shown below. The result of applying different kernels is also shown: As you can see, different kernels can pick up different features depending on the specific coefficients of the kernel. In the late 1980s, scientists first thought to use neural networks to learn the kernel values themselves, which created the foundation for the *convolutional* neural network (CNN). These complement the hidden layers from the previous architecture with new convolutional layers, which perform convolution on the previous layer. In so doing, these networks conserve the relationship between nearby pixels and perform very well in image classification. ## The LADI dataset As we already saw, the LADI dataset contains CAP images as well as crowdsourced labels for a number of different categories. Today we're going to start the multi-day exercise of applying deep learning to the LADI dataset in order to do classification of flooding. This section borrows heavily from the following tutorial: https://github.com/LADI-Dataset/ladi-tutorial ### Data reading and cleaning We first have to go through some work to make sure that we only have the components of the dataset that we want. ```python # read the tsv file with the labels file = pd.read_csv("http://ladi.s3-us-west-2.amazonaws.com/Labels/ladi_aggregated_responses_url.tsv",delimiter='\t',header='infer') file ``` ```python # Strip off bracket and comma from the Answer catagory file["Answer"] = file["Answer"].str.strip('[|]') file["Answer"] = file["Answer"].str.split(",",expand = True) # Extract labels with damage and infrastructure categories label_damage_infra = file[file['Answer'].str.contains('damage|infrastructure',na=False,case=False)] #Filter out infrastructure label with label 'none' label_clean = label_damage_infra[~label_damage_infra['Answer'].str.contains('none',na=False,case=False)] # Extract data with label does contain 'flood' label_flood = label_clean[label_clean['Answer'].str.contains('flood',na=False,case=False)] # Extract url data with the label does contain 'flood' im_flood_lst = label_flood['url'].unique().tolist() # Extract url data with the label does not contain 'flood' label_notflood = label_damage_infra[~label_damage_infra['url'].isin(im_flood_lst)] im_not_flood_lst = label_notflood['url'].unique().tolist() ``` Now that we have the images we're interested in, we're going to generate the true/false labels ```python # Load ladi_images_metadata.csv metadata = pd.read_csv('http://ladi.s3-us-west-2.amazonaws.com/Labels/ladi_images_metadata.csv') # Generate flood and non-flood metadata flood_metadata = metadata[metadata['url'].isin(im_flood_lst)] not_flood_metadata = metadata[metadata['url'].isin(im_not_flood_lst)] # Generate url and s3_path features into list flood_meta_lst = flood_metadata['url'].tolist() flood_meta_s3_lst = flood_metadata['s3_path'].tolist() not_flood_meta_lst = not_flood_metadata['url'].tolist() not_flood_meta_s3_lst = not_flood_metadata['s3_path'].tolist() # Check how many images do not have metadata but have human labels human_label_only = list(set(im_flood_lst) - set(flood_meta_lst)) print(len(human_label_only)) human_label_non_flood = list(set(im_not_flood_lst) - set(not_flood_meta_lst)) print(len(human_label_non_flood)) ``` For the purposes of this exercise, we're going to take just a sample of the overall imagery. We're going to take 100 images that are labeled as flood and 100 that are not: ```python from random import sample # sampling flood_tiny_lst = sample(flood_meta_s3_lst, 100) not_flood_tiny_lst = sample(not_flood_meta_s3_lst, 100) flood_tiny_metadata = metadata[metadata['s3_path'].isin(flood_tiny_lst+not_flood_tiny_lst)] # creating the new datasets flood_data = [] for path in flood_tiny_lst: data_lst = [] data_lst.append(path) data_lst.append(True) flood_data.append(data_lst) not_flood_data = [] for path in not_flood_tiny_lst: data_lst = [] data_lst.append(path) data_lst.append(False) not_flood_data.append(data_lst) label_data = flood_data+not_flood_data label_df = pd.DataFrame(label_data, columns = ['s3_path', 'label']) flood_tiny_metadata.to_csv('flood_tiny_metadata.csv') label_df.to_csv('flood_tiny_label.csv') ``` ### Exercise Take a look at the LADI csv files. Find some classification task that you find meaningful (e.g. detecting a bridge in an image). Create a csv file like the ones above for the metadata and the label. You might have to do some additional steps in data cleaning, depending on your choice ```python ```
9ab61acac97c97701ee41271a37d611215571081
26,422
ipynb
Jupyter Notebook
10-Intro_to_deep_learning.ipynb
bwsi-hadr/10-Intro_to_deep_learning
571042b59823a57aa08d7cfa808acf53384f86c8
[ "MIT" ]
null
null
null
10-Intro_to_deep_learning.ipynb
bwsi-hadr/10-Intro_to_deep_learning
571042b59823a57aa08d7cfa808acf53384f86c8
[ "MIT" ]
null
null
null
10-Intro_to_deep_learning.ipynb
bwsi-hadr/10-Intro_to_deep_learning
571042b59823a57aa08d7cfa808acf53384f86c8
[ "MIT" ]
1
2021-06-23T14:13:13.000Z
2021-06-23T14:13:13.000Z
55.508403
730
0.665998
true
4,864
Qwen/Qwen-72B
1. YES 2. YES
0.70253
0.7773
0.546077
__label__eng_Latn
0.999087
0.107048
# Music Machine Learning - Bayesian inference ### Author: Philippe Esling (esling@ircam.fr) In this course we will cover 1. An introduction to [Bayesian inference](#bayesian) 2. A formal introduction to [Variational Auto-Encoders](#vae) (VAEs) 3. An explanation of the [implementation](#implem) of VAEs 4. Some [modifications and tips to improve the reconstruction](#improve) of VAEs **(exercise)** <a id="recap"> </a> ## Introduction on Bayesian inference Here, we discuss *Bayesian inference* and how to use the **Bayes theorem** to perform classification. First, we will see how to derive *estimators* for the different properties of the distributions, and verify that these are *unbiased*. Then, we will implement the **Maximum Likelihood** Estimators (MLE) in order to perform classification of a dataset. First, we will assess the case where parameters are known to implement the discriminant function and decision rule. Then, we will perform the MLE to obtain the means and covariance matrix for each class. To understand these concepts graphically, we will rely on both the `scikit-learn` and `Pytorch` libraries (with the `probability` package). ```python import torch import torch.distributions as distribution import torch.distributions.transforms as transform import matplotlib.pyplot as plt import numpy as np import seaborn as sns from helper_plot import hdr_plot_style, plot_gaussian_ellipsoid hdr_plot_style() ``` ### Bayesian framework Two alternative interpretations of probability can be considered * **Frequentist** (*classical* approach), assumes that probability is the *long-term frequency of events*. This becomes harder to interpret when events have no long-term frequency (eg. probability in an election, which happens only once). In that case, frequentists consider the *frequency of occurrences across alternative realities*, which defines the probability. * **Bayesian** interprets probability as measure of *believability in an event*. Therefore, a probability is measure of *belief*, or confidence, of an event occurring. This definition leaves room for conflicting beliefs based on the different *information* about the world. Hence, bayesian inference is mostly based on *updating your beliefs* after considering new *evidence*. To align with probability notation, we denote a belief about event $a$ as $p\left(a\right)$, called the *prior probability* of an event to occur. We denote the updated belief as $ p\left( a \mid e \right) $, interpreted as the probability of $a$ *given* the new evidence $e$, called the *posterior probability*. The prior belief is not completely removed, but we *re-weight this prior* to incorporate new evidence $e$ (i.e. we put more weight, or confidence, on some beliefs versus others). By introducing prior uncertainty about events, we admit that any guess we make can be wrong. As we gather an *infinite* amount of evidence $N \rightarrow \infty$, the Bayesian results (often) align with frequentist results. Hence for small $N$, inference is *unstable*, where frequentist estimates have more variance and larger confidence intervals. However, by introducing a prior, and returning probabilities, we *preserve the uncertainty* that reflects the instability on a small $N$ dataset. Updating the *prior belief* to obtain our *posterior belief* is done via the the Bayes' Theorem $$ \begin{equation} p\left( a \mid e \right) \propto \frac{ p\left(e \mid a\right) p\left(a\right) } {p\left(e\right) } \end{equation} $$ We see that our posterior belief of event $a$ given the new evidence $e$ is proportional to ($\propto$) the *likelihood* of observing this particular evidence $e$ given the event $a$ ($p\left(e \mid a\right)$) multiplied by our prior belief in that particular event $a$ ($p\left(a\right)$). ### Using Bayesian classification (in `scikit-learn`) The Bayesian classification methods rely on Bayes's theorem, where we are interested in finding the probability of a label $y$ given some observed features, which we can write as $p(y~|~{\rm features})$. Bayes's theorem tells us how to express this in terms of quantities that we can compute more directly $$ p(y~|~{\rm features}) = \frac{p({\rm features}~|~y)p(y)}{p({\rm features})} $$ If we are trying to decide between two labels ($y_1$ and $y_2$), then one way to make this decision is to compute the ratio of the posterior probabilities for each label $$ \frac{p(y_1~|~{\rm features})}{p(y_2~|~{\rm features})} = \frac{p({\rm features}~|~y_1)}{p({\rm features}~|~y_2)}\frac{p(y_1)}{p(y_2)} $$ All we need now is some model by which we can compute $p({\rm features}~|~y_i)$ for each label. Such a model is called a *generative model* because it specifies the hypothetical random process that generates the data. Specifying this generative model for each label is the main piece of the training of such a Bayesian classifier. The general version of such a training step is a very difficult task, but we can make it simpler through the use of some simplifying assumptions about the form of this model. If we make *very naive assumptions* (called *Naive Bayes*) about the generative model for each label, we can find a rough approximation of the generative model for each class, and then proceed with the Bayesian classification. #### Gaussian Naive Bayes Perhaps the easiest naive Bayes classifier to understand is Gaussian naive Bayes. In this classifier, the assumption is that *data from each label is drawn from a simple Gaussian distribution*. Imagine that we have the following data ```python hdr_plot_style() from sklearn.datasets import make_blobs X, y = make_blobs(200, 2, centers=2, random_state=2, cluster_std=2.2) plt.figure(figsize=(10, 8)); plt.scatter(X[:, 0], X[:, 1], c=y, s=80, cmap='RdBu', edgecolor='w'); plt.grid(True) ``` One extremely fast way to create a simple model is to assume that the data is described by a Gaussian distribution with no covariance between dimensions. This model can be fit by simply finding the mean and standard deviation of the points within each label, which is all you need to define such a distribution. With this generative model for each class, we have a simple recipe to compute the likelihood $P({\rm features}~|~L_1)$ for any data point, and thus we can quickly compute the posterior ratio and determine which label is the most probable for a given point. This procedure is implemented in Scikit-Learn's ``sklearn.naive_bayes.GaussianNB`` estimator ```python from sklearn.naive_bayes import GaussianNB model = GaussianNB() model.fit(X, y); ``` Now let's generate some new data and predict the label: ```python rng = np.random.RandomState(0) Xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2) ynew = model.predict(Xnew) ``` Now we can plot the data and see where the decision boundary is ```python fig = plt.figure(figsize=(10,8)); ax = fig.add_subplot(111) # predict the classification probabilities on a grid xlim = X[:, 0].min() - .5, X[:, 0].max() + .5 ylim = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 71), np.linspace(ylim[0], ylim[1], 81)) Z = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=plt.cm.RdBu, alpha=.8) ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu', edgecolor='w'); ``` We see a slightly curved boundary in the classifications (generally, the boundary in Gaussian naive Bayes is quadratic). A nice piece of this Bayesian formalism is that it naturally allows for probabilistic classification, which we can compute using the ``predict_proba`` method: ```python yprob = model.predict_proba(Xnew) yprob[-8:].round(2) ``` The columns give the posterior probabilities of the first and second label, respectively. If you are looking for estimates of uncertainty in your classification, Bayesian approaches like this can be a useful approach. Of course, the final classification will only be as good as the model assumptions that lead to it, which is why Gaussian naive Bayes often does not produce very good results. ## Bayesian inference Suppose we have coin and want to estimate the probability of heads ($p$) for it. The coin is Bernoulli distributed: $$ \begin{equation} \phi(x)= p^x (1-p)^{(1-x)} \end{equation} $$ where $x$ is the outcome, *1* for heads and *0* for tails. Based on $n$ *independent* flips, we have the likelihood: $$ \begin{equation} \mathcal{L}(p|\mathbf{x})= \prod_{i=1}^n p^{ x_i }(1-p)^{1-x_i} \end{equation} $$ (the independent-trials assumption allows us to just substitute everything into $ \phi(x)$). The idea of *maximum likelihood* will be to maximize this as the function of $p$ after given all of the $x_i$ data. This means that our estimator, $\hat{p}$, is a function of the observed $x_i$ data, and as such, is a random variable with its own distribution. **Defining the estimator** The only way to know for sure that our estimator is correctly defined is to check if the estimator is unbiased, namely, if $$ \mathbb{E}(\hat{p}) = p $$ ### Understanding this behavior with `pytorch` Without solving for the previous exercise, we can Let's say that we have 100 samples from a Bernoulli distribution. ```python bernoulli = distribution.Bernoulli(0.2) sample = bernoulli.sample((100, )) sns.distplot(sample) plt.title("Samples from a Bernoulli (p = .2)") torch.mean(sample) ``` Now we can use our estimator, and define our problem more formally, by defining our input samples as a dataset of observations $x$, and we are trying to model this set, as a `Bernoulli` distribution, for which the $p$ parameter is unknown (here defined as a `Variable`). ```python from torch.autograd import Variable x = Variable(sample).type(torch.FloatTensor) p = Variable(torch.rand(1), requires_grad=True) ``` Now we can use our estimator to gradually compute the Maximum Likelihood, in order to uncover the estimated probability of the underlying distribution, solely based on observing the samples. ```python learning_rate = 0.00002 for t in range(1000): NLL = -torch.sum(x*torch.log(p) + (1-x)*torch.log(1-p)) NLL.backward() if t % 200 == 0: print("loglik = %.4f - p = %.4f - dL/dp = %.4f"%(NLL.data.numpy(), p.data.numpy(), p.grad.data.numpy())) p.data -= learning_rate * p.grad.data p.grad.data.zero_() print('Final probability p =', p.data[0]) ``` *** **Exercise** 1. Compute the *log-likelihood* $J=\log(\mathcal{L}(p \mid \mathbf{x}))$ of our given problem 2. Based on this, compute its derivative $ \frac{dJ}{dp} $ 3. Solve it to find the estimator $\hat{p}$ 4. Verify that this estimator is unbiased $ \mathbb{E}(\hat{p}) = p $ 5. Compute the variance of the estimator $ \mathbb{E}\left(\hat{p}^2\right) $ *** ```python x = Variable(sample).type(torch.FloatTensor) p = torch.rand(1) sample = bernoulli.sample((100, )) ###################### # YOUR CODE GOES HERE ###################### # Questions : # - Pourquoi est-ce que la formule du haut marche quand même ? # - Koman on fé si l'on ne connaît pas la loi que suit la distribution ? # - _Model boosting_ : méthode où l'on choisit entre plusieurs modèle selon #  leur pertinence au problème a posteriori. ll = lambda p, x: np.sum(x*np.log(p) + (1-x)*np.log(1-p)) dll_dp = lambda p, x: np.sum(x/p - (1-x)/(1-p)) learning_rate = 0.00002 for t in range(1000): nll = -ll(p.data.numpy(),x.data.numpy()) grad = -dll_dp(p.data.numpy(), x.data.numpy()) if t % 200 == 0: print("loglik = %.4f - p = %.4f - dL/dp = %.4f"%(nll, p, grad)) p.data -= learning_rate * grad print('Final probability p =', p.data[0]) bias = torch.abs(p - torch.mean(sample)) diff = torch.pow(p-x, 2) variance = torch.sum(diff) print(f"Variance: {variance}") print(f"Bias: {bias}") ``` ### Full estimator density In general, computing the mean and variance of the estimator is insufficient to characterize the underlying probability density of $\hat{p}$, except if we knew that $\hat{p}$ were normally distributed. This is where the [*central limit theorem*](http://mathworld.wolfram.com/CentralLimitTheorem.html). Indeed, the form of the estimator, implies that $\hat{p}$ is normally distributed, but only *asymptotically*, which doesn't quantify how many samples $n$ we need. Unfortunately, in the real world, each sample may be precious. Hence, to write out the full density for $\hat{p}$, we first have to ask what is the probability that the estimator will equal a specific value such as $$ \begin{equation} \hat{p} = \frac{1}{n}\sum_{i=1}^n x_i = 0 \end{equation} $$ This can only happen when $x_i=0$, $\forall i$. The corresponding probability can be computed from the density $$ \begin{equation} f(\mathbf{x},p)= \prod_{i=1}^n \left(p^{x_i} (1-p)^{1-x_i} \right) \end{equation} $$ $$ \begin{equation} f\left(\sum_{i=1}^n x_i = 0,p\right)= \left(1-p\right)^n \end{equation} $$ Likewise, if $\lbrace x_i \rbrace$ has one $i^{th}$ value equal to one, then $$ \begin{equation} f\left(\sum_{i=1}^n x_i = 1,p\right)= n p \prod_{i=1}^{n-1} \left(1-p\right) \end{equation} $$ where the $n$ comes from the $n$ ways to pick one value equal to one from the $n$ elements $x_i$. Continuing this way, we can construct the entire density as $$ \begin{equation} f\left(\sum_{i=1}^n x_i = k,p\right)= \binom{n}{k} p^k (1-p)^{n-k} \end{equation} $$ where the term on the left is the binomial coefficient of $n$ things taken $k$ at a time. This is the binomial distribution and it's not the density for $\hat{p}$, but rather for $n\hat{p}$. We'll leave this as-is because it's easier to work with below. We just have to remember to keep track of the $n$ factor. ## Maximum Likelihood implementation Maximum Likelihood Estimate (MLE) allows to perform typical statistical pattern classification tasks. In the cases where **probabilistic models and parameters are known**, the design of a Bayes' classifier is rather easy. However, in real applications, we are rarely given this information and this is where the MLE comes into play. MLE still **requires partial knowledge** about the problem. We have to assume that the **model of the class conditional densities is known** (usually Gaussian distributions). Hence, Using MLE, we want to estimate the values of the parameters of a given distribution for the class-conditional densities, for example, the *mean* and *variance* assuming that the class-conditional densities are *normal* distributed (Gaussian) with $$ \begin{equation} p(\pmb x \mid y_i) \sim N(\mu, \sigma^2) \end{equation} $$ ### Parameters known Imagine that we want to classify data consisting of two-dimensional patterns, $\pmb{x} = [x_1, x_2] \in \mathbb{R}^{2}$ that could belong to 1 out of 3 classes $y_1,y_2,y_3$. Let's assume the following information about the model where we use continuous univariate normal (Gaussian) model for the class-conditional densities $$ \begin{equation} p(\pmb x \mid y_j) \sim N(\pmb \mu \mid \Sigma) = \frac{1}{(2\pi)^{d/2} \mid \Sigma|^{1/2}} exp \bigg[ -\frac{1}{2}(\pmb x - \pmb \mu)^t \Sigma^{-1}(\pmb x - \pmb \mu) \bigg] \end{equation} $$ Furthermore, we consider for this first problem that we know the distributions of the classes, ie. their mean and covariances. $$ \begin{equation} p([x_1, x_2]^t \mid y_1) ∼ N([0,0],3I), \\ p([x_1, x_2]^t \mid y_2) ∼ N([9,0],3I), \\ p([x_1, x_2]^t \mid y_3) ∼ N([6,6],4I), \end{equation} $$ Therefore, the means of the sample distributions for 2-dimensional features are defined as $$ \begin{equation} \pmb{\mu}_{\,1} = \bigg[ 0, 0 \bigg], \pmb{\mu}_{\,2} = \bigg[ 9, 0 \bigg], \pmb{\mu}_{\,3} = \bigg[ 6, 6 \bigg] \end{equation} $$ The **covariance matrices** for the statistically independent and identically distributed ('i.i.d') features $$ \begin{array}{ccc} \Sigma_1 = \bigg[ \begin{array}{cc} 3 & 0\\ 0 & 3 \\ \end{array} \bigg], \Sigma_2 = \bigg[ \begin{array}{cc} 3 & 0\\ 0 & 3 \\ \end{array} \bigg], \Sigma_3 = \bigg[ \begin{array}{cc} 4 & 0\\ 0 & 4 \\ \end{array} \bigg] \end{array}$$ Finally, we consider that all classes have an **equal prior probability** $$p(y_1) = p(y_2) = p(y_3) = \frac{1}{3}$$ *** **Exercise** 1. Generate some data (samples from the multivariate Gaussians) following classes distributions 2. Plot the class-dependent data *** ```python # 7.0 - Generate data with known parameters nb_patterns = 100 # Generate random patterns for class 1 mu1 = np.array([0,0]) cov1 = np.array([[1,0],[0,1]]) # Generate random patterns for class 2 mu2 = np.array([9,0]) cov2 = np.array([[3,0],[0,3]]) # Generate random patterns for class 3 mu3 = np.array([6,6]) cov3 = np.array([[7,0],[0,7]]) ###################### # YOUR CODE GOES HERE ###################### # Prepare concatenated versions of class properties mu_vals = {}; mu_vals[0] = np.array([mu1]).transpose() mu_vals[1] = np.array([mu2]).transpose() mu_vals[2] = np.array([mu3]).transpose() cov_vals = {} cov_vals[0] = cov1 cov_vals[1] = cov2 cov_vals[2] = cov3 # Plot the corresponding data plt.figure(figsize=(12, 8)) plt.scatter(x1samples[:, 0], x1samples[:, 1], s=60, marker='o', c=[0, 0.8, 0], edgecolors='w') plt.scatter(x2samples[:, 0], x2samples[:, 1], s=60, marker='o', c=[0, 0, 0.8], edgecolors='w') plt.scatter(x3samples[:, 0], x3samples[:, 1], s=60, marker='o', c=[0.8, 0, 0], edgecolors='w') h = plot_gaussian_ellipsoid(mu1, cov1, 2, color=[0, 0.8, 0]); h = plot_gaussian_ellipsoid(mu2, cov2, 2, color=[0, 0, 0.8]); h = plot_gaussian_ellipsoid(mu3, cov3, 2, color=[0.8, 0, 0]); plt.title('Training Dataset') plt.ylabel('x2') plt.xlabel('x1') ``` Here, our **objective function** is to maximize the discriminant function $g_i(\pmb x)$, which we define as the posterior probability to perform a **minimum-error classification** (Bayes classifier). $$ \begin{equation} g_1(\pmb x) = p(y_1 \mid \pmb{x}), \quad g_2(\pmb{x}) = p(y_2 \mid \pmb{x}), \quad g_3(\pmb{x}) = p(y_3 \mid \pmb{x}) \end{equation} $$ So that our decision rule is to choose the class $\omega_i$ for which $g_i(\pmb x)$ is max., where $$ \begin{equation} \quad g_i(\pmb{x}) = \pmb{x}^{\,t} \bigg( - \frac{1}{2} \Sigma_i^{-1} \bigg) \pmb{x} + \bigg( \Sigma_i^{-1} \pmb{\mu}_{\,i}\bigg)^t \pmb x + \bigg( -\frac{1}{2} \pmb{\mu}_{\,i}^{\,t} \Sigma_{i}^{-1} \pmb{\mu}_{\,i} -\frac{1}{2} ln(\left|\Sigma_i\right|)\bigg) \end{equation} $$ *** **Exercise** 1. Implement the discriminant function 2. Implement the decision rule (classifier) 3. Classify the data generated in the previous exercise 4. Plot the confusion matrix 5. Calculate the empirical error *** ```python import operator def discriminant_function(x_vec, mu_vec, cov_mat): # Calculates the value of the discriminant function for a dx1 dimensional # sample given the covariance matrix and mean vector. # # x_vec: A dx1 dimensional numpy array representing the sample. # cov_mat: numpy array of the covariance matrix. # mu_vec: dx1 dimensional numpy array of the sample mean. # # Returns a float value g as result of the discriminant function. ###################### # YOUR CODE GOES HERE ###################### return float(g) def classify_data(x_vec, g, mu_vecs, cov_mats): # Classifies an input sample into 1 out of 3 classes determined by # maximizing the discriminant function g_i(). # Keyword arguments: # x_vec: A dx1 dimensional numpy array representing the sample. # g: The discriminant function. # mu_vecs: A list of mean vectors as input for g. # cov_mats: A list of covariance matrices as input for g. # # Returns the max probability and class id. ###################### # YOUR CODE GOES HERE ###################### return maxVal, maxId x1classes = np.zeros((nb_patterns, 1)) conf_matrix = np.zeros((3, 3)) for i in range(nb_patterns): x, g = classify_data(x1samples[i, :], discriminant_function, mu_vals, cov_vals); x1classes[i] = g; conf_matrix[0, g] = conf_matrix[0, g] + 1; x2classes = np.zeros((nb_patterns, 1)) for i in range(nb_patterns): x, g = classify_data(x2samples[i, :], discriminant_function, mu_vals, cov_vals); x2classes[i] = g; conf_matrix[1, g] = conf_matrix[1, g] + 1; x3classes = np.zeros((nb_patterns, 1)) for i in range(nb_patterns): x, g = classify_data(x3samples[i, :], discriminant_function, mu_vals, cov_vals); x3classes[i] = g; conf_matrix[2, g] = conf_matrix[2, g] + 1; print('%16s \t %s \t %s \t %s\n' % (' ', 'class 1', 'class 2', 'class 3')); print('%16s \t %f \t %f \t %f\n' % ('class 1', conf_matrix[0, 0], conf_matrix[0, 1], conf_matrix[0, 2])); print('%16s \t %f \t %f \t %f\n' % ('class 2', conf_matrix[1, 0], conf_matrix[1, 1], conf_matrix[1, 2])); print('%16s \t %f \t %f \t %f\n' % ('class 3', conf_matrix[2, 0], conf_matrix[2, 1], conf_matrix[2, 2])); ``` ### Unknown parameters In contrast to the previous case, let us assume that we only know the number of parameters for the class conditional densities $p (\pmb x \mid y_i)$, and we want to use a Maximum Likelihood Estimation (MLE) to estimate the quantities of these parameters from the training data. Given the information about the our model (the data is normal distributed) the 2 parameters to be estimated for each class are $\pmb \mu_i$ and $\pmb \Sigma_i$, which are summarized by the parameter vector $$ \begin{equation} \pmb \theta_i = \bigg[ \begin{array}{c} \ \theta_{i1} \\ \ \theta_{i2} \\ \end{array} \bigg]= \bigg[ \begin{array}{c} \pmb \mu_i \\ \pmb \Sigma_i \\ \end{array} \bigg] \end{equation} $$ For the Maximum Likelihood Estimate (MLE), we assume that we have a set of samples $D = \left\{ \pmb x_1, \pmb x_2,..., \pmb x_n \right\} $ that are *i.i.d.* (independent and identically distributed, drawn with probability $p(\pmb x \mid y_i, \pmb \theta_i) )$. Thus, we can **work with each class separately** and omit the class labels, so that we write the probability density as $p(\pmb x \mid \pmb \theta)$ **Likelihood of $ \pmb \theta $** Thus, the probability of observing $D = \left\{ \pmb x_1, \pmb x_2,..., \pmb x_n \right\} $ is $$ \begin{equation} p(D \mid \pmb \theta ) = p(\pmb x_1 \mid \pmb \theta ) \cdot p(\pmb x_2 \mid \pmb \theta ) \cdot ... p(\pmb x_n \mid \pmb \theta ) = \prod_{k=1}^{n} p(\pmb x_k \pmb \mid \pmb \theta ) \end{equation} $$ Where $p(D \mid \pmb \theta )$ is also called the ***likelihood of $\pmb\ \theta$*** We know that $p([x_1,x_2]^t) ∼ N(\pmb \mu,\pmb \Sigma) $ (remember that we dropped the class labels, since we are working with every class separately). And the mutlivariate normal density is given as $$ \begin{equation} \quad \quad p(\pmb x) = \frac{1}{(2\pi)^{d/2} |\Sigma|^{1/2}} exp \bigg[ -\frac{1}{2}(\pmb x - \pmb \mu)^t \Sigma^{-1}(\pmb x - \pmb \mu) \bigg] \end{equation} $$ Therefore, we obtain $$ \begin{equation} p(D \mid \pmb \theta ) = \prod_{k=1}^{n} p(\pmb x_k \pmb \mid \pmb \theta ) = \prod_{k=1}^{n} \frac{1}{(2\pi)^{d/2} |\Sigma|^{1/2}} exp \bigg[ -\frac{1}{2}(\pmb x - \pmb \mu)^t \Sigma^{-1}(\pmb x - \pmb \mu) \bigg] \end{equation} $$ and the log of the multivariate density $$ \begin{equation} l(\pmb\theta) = \sum\limits_{k=1}^{n} - \frac{1}{2}(\pmb x - \pmb \mu)^t \pmb \Sigma^{-1} (\pmb x - \pmb \mu) - \frac{d}{2} ln 2\pi - \frac{1}{2} ln |\pmb\Sigma| \end{equation} $$ In order to obtain the MLE $\boldsymbol{\hat{\theta}}$, we maximize $l (\pmb \theta)$, which can be done via differentiation $$ \begin{equation} \nabla_{\pmb \theta} \equiv \begin{bmatrix} \frac{\partial }{\partial \theta_1} \\ \frac{\partial }{\partial \theta_2} \end{bmatrix} = \begin{bmatrix} \frac{\partial }{\partial \pmb \mu} \\ \frac{\partial }{\partial \pmb \sigma} \end{bmatrix} \end{equation} $$ $$ \begin{equation} \nabla_{\pmb \theta} l = \sum\limits_{k=1}^n \nabla_{\pmb \theta} ln p(\pmb x| \pmb \theta) = 0 \end{equation} $$ *** **Exercise** <div markdown = "1"> 1. Perform the differentiation for $\frac{\partial \mathcal{L}}{\partial \pmb \mu}$ to obtain the estimator of the mean $\hat{\mu}$ 2. Perform the differentiation for $\frac{\partial \mathcal{L}}{\partial \pmb \Sigma}$ to obtain the estimator of the covariance matrix $\hat{\Sigma}$ 3. Implement the two estimators as functions based on a set of data 4. Apply these estimators (MLE) in order to obtain estimated parameters 5. Re-compute the classification errors on the previous dataset *** ```python ######################################################## # YOUR CODE GOES HERE (Perform mu estimates) ######################################################## # mu_est_1 = ? # mu_est_2 = ? # mu_est_3 = ? print('%16s \t %s \t %s \t %s \t %s \t %s \t %s' % ('', 'mu1_1 ', 'mu1_2 ', 'mu2_1 ', 'mu2_2 ', 'mu3_1 ', 'mu3_2 ')); print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('MLE', mu_est_1[0], mu_est_1[1], mu_est_2[0], mu_est_2[1], mu_est_3[0], mu_est_3[1])); print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('Truth', mu1[0], mu1[1], mu2[0], mu2[1], mu3[0], mu3[1])); def mle_covariance(x_samples, mu_est): # Calculates the Maximum Likelihood Estimate for the covariance matrix. # # Keyword Arguments: # x_samples: np.array of the samples for 1 class, n x d dimensional # mu_est: np.array of the mean MLE, d x 1 dimensional # # Returns the MLE for the covariance matrix as d x d numpy array. ###################### # YOUR CODE GOES HERE ###################### return cov_est_ cov_est_1 = mle_covariance(x1samples, np.array([mu_est_1]).transpose()); cov_est_2 = mle_covariance(x2samples, np.array([mu_est_2]).transpose()); cov_est_3 = mle_covariance(x3samples, np.array([mu_est_3]).transpose()); print('%16s \t %s \t %s \t %s \t %s \t %s \t %s' % ('', 'cov1_1 ', 'cov1_2 ', 'cov2_1 ', 'cov2_2 ', 'cov3_1 ', 'cov3_2 ')); print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('MLE', cov_est_1[0, 0], cov_est_1[1, 0], cov_est_2[0, 0], cov_est_2[1, 0], cov_est_3[0, 0], cov_est_3[1, 0])); print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('Truth', cov1[0, 0], cov1[1, 0], cov2[0, 0], cov2[1, 0], cov3[0, 0], cov3[1, 0])); mu_estimates = {}; mu_estimates[0] = np.array([mu_est_1]).transpose() mu_estimates[1] = np.array([mu_est_2]).transpose() mu_estimates[2] = np.array([mu_est_3]).transpose() cov_estimates = {}; cov_estimates[0] = cov_est_1; cov_estimates[1] = cov_est_2; cov_estimates[2] = cov_est_3; # Plot the corresponding data plt.figure(figsize=(12, 8)) plt.scatter(x1samples[:,0], x1samples[:,1], s=40, marker='o', c=[0, 0.8, 0], edgecolors='w'); plt.scatter(x2samples[:,0], x2samples[:,1], s=40, marker='s', c=[0, 0, 0.8], edgecolors='w'); plt.scatter(x3samples[:,0], x3samples[:,1], s=40, marker='^', c=[0.8, 0, 0], edgecolors='w'); h = plot_gaussian_ellipsoid(mu1, cov1, 2, color=[0.2, 0.6, 0.2]); h = plot_gaussian_ellipsoid(mu2, cov2, 2, color=[0.2, 0.2, 0.6]); h = plot_gaussian_ellipsoid(mu3, cov3, 2, color=[0.6, 0.2, 0.2]); h = plot_gaussian_ellipsoid(mu_est_1, cov_est_1, 2, color=[0.1, 0.99, 0.1]); h = plot_gaussian_ellipsoid(mu_est_2, cov_est_2, 2, color=[0.1, 0.1, 0.99]); h = plot_gaussian_ellipsoid(mu_est_3, cov_est_3, 2, color=[0.99, 0.1, 0.1]); plt.title('Comparing estimated MLE Gaussians'); ``` ## Audio source separation The maximum likelihood estimator (MLE) is widely used in practical signal modeling and we can show that the MLE is equivalent to the least squares estimator for a wide class of problems, including well resolved sinusoids in white noise. We are going to consider a model consisting of a complex sinusoid in additive white (complex) noise: $$ \displaystyle x(n) = {\cal A}e^{j\omega_{0} n} + v(n) $$ Here, $ {\cal A}= A e^{j\phi} $ is the complex amplitude of the sinusoid, and $ v(n) $ is white noise that we assume to be Gaussian distributed with zero mean. Hence, we assume that its probability density function is given by $$ \displaystyle p_{v}(\nu) = \frac{1}{\pi \sigma_{v}^2} e^{-\frac{\vert\nu\vert^2}{\sigma_{v}^2}}. $$ We express the zero-mean Gaussian assumption by writing $$ \displaystyle v(n) \sim {\cal N}(0,\sigma_{v}^2) $$ The parameter $ \sigma_{v}^2 $ is the *variance* of the random process $ v(n) $ , and $ \sigma_{v} $ is its standard deviation. It turns out that when Gaussian random variables $ v(n) $ are uncorrelated (i.e., when $ v(n) $ is white noise), they are also independent. This means that the probability of observing particular values of $ v(n) $ and $ v(m) $ is given by the product of their respective probabilities. We will now use this fact to compute an explicit probability for observing any data sequence $ x(n) $ Since the sinusoidal part of our signal model, $ {\cal A}e^{j\omega_{0}n}$ , is deterministic; i.e., it does not including any random components; it may be treated as the time-varying mean of a Gaussian random process $ x(n) $ . That is, our signal model can be rewritten as $$ \displaystyle x(n) \sim {\cal N}({\cal A}e^{j\omega_{0} n},\sigma_{v}^2) $$ and the probability density function for the whole set of observations $ x(n) $ , $ n=0,1,2,\ldots,N-1 $ is given by $$ \displaystyle p(x) = p[x(0)] p[x(1)]\cdots p[x(N-1)] = \left(\frac{1}{\pi \sigma_v^2}\right)^N e^{-\frac{1}{\sigma_v^2}\sum_{n=0}^{N-1} \left\vert x(n) - {\cal A}e^{j\omega_0 n}\right\vert^2} $$ Thus, given the noise variance $ \sigma_v^2 $ and the three sinusoidal parameters $ A,\phi,\omega_0 $ (remember that $ {\cal A}= A e^{j\phi} $ ), we can compute the relative probability of any observed data samples $ x(n) $ . We can generalize this approach in order to perform a complete blind audio source separation algorithm, such as detailed in the following paper Févotte, C., & Cardoso, J. F. "Maximum likelihood approach for blind audio source separation using time-frequency Gaussian source models". *IEEE Workshop on Applications of Signal Processing to Audio and Acoustics*, 2005. (pp. 78-81). IEEE. You can try to implement this by relying on the [full paper](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.640.6981&rep=rep1&type=pdf) that details this method. *** **Exercise** <div markdown = "1"> 1. Implement the single sinusoid extraction 2. Apply this approach to multiple sinusoids 3. Follow the paper to implement blind source separation *** ```python ```
51f8687327653df6a7dc5485797d276f1a766524
41,233
ipynb
Jupyter Notebook
05a_bayesian_inference.ipynb
piptouque/atiam_ml
9da637eae179237d30a15dd9ce3e95a2a956c385
[ "MIT" ]
null
null
null
05a_bayesian_inference.ipynb
piptouque/atiam_ml
9da637eae179237d30a15dd9ce3e95a2a956c385
[ "MIT" ]
null
null
null
05a_bayesian_inference.ipynb
piptouque/atiam_ml
9da637eae179237d30a15dd9ce3e95a2a956c385
[ "MIT" ]
null
null
null
43.725345
1,004
0.571654
true
9,444
Qwen/Qwen-72B
1. YES 2. YES
0.874077
0.885631
0.77411
__label__eng_Latn
0.970605
0.63685
# On the fundamental differences between quantum states - an overview. ### University of Basel. Department of Physics #### Quantum Information. Frühjahrssemester 2020 **Professor**: James Wootton. Bowei Wu - bowei.wu@stud.unibas.ch (Responsible for the first part) José A. Hernández - ja.hernandezsanchez@stud.unibas.ch (Responsible for the second part) **NOTE:** Both authors contributed with their respective parts. A consensus was reached in order to paint a concise picture and thus the presented work is a collaborative effort. ## First part - an introduction: ## What even is a "quantum state"? Historically, the study of various microscopic phenomena have shown that our classical (and thus discreet) approach to the description and understanding of our reality is, at the microscopic level, inaccurate and missing "the bigger picture" at best and catastrophically off at worst. A concise example is the so-called "photoelectric effect", that reveals that nature is not continuous. The unit part of the energy in a beam of light with frequency $v$, is $\epsilon=hv$. This is abnormal in classical physical. But when the scale become smaller and smaller, the physical phenomenon become quantized. For a quantum system, the energy and the state are not arbitrary but discrete. For example, the particle in 1D box has eigenstates $$\psi_n(x)=\sqrt\frac{2}{L}sin\left(\frac{n\pi x}{L}\right)$$ The state of the particle can be $$|\psi\rangle=\sum c_n|\psi_n\rangle$$ Namely, the state of the particle can be different eigenstate at the same time. This is called superposition, which is a unique property given by quantum mechanics. If we do a measurement on the particle, we will find the particle is on one of the eigenstates. The case that the particle is at state $|\psi_n\rangle$ is of a probability $|c_n|^2$. So the coefficient should satisfy $$\sum |c_n|^2=1$$ ## The Qubit and Two-state quantum system Qubit is the basic unit in quantum computing. The name of 'qubit' comes from 'bit', which is the basic unit in classical computing. In classical computer, a bit can be either 0 or 1. Here 0 and 1 can correspond to the off and on of a switch, or low and high voltage in real physics. Then we can use multiple bits (bit string) to encode information. In quantum computer, we also use 0 and 1 corresponding to the classical binary bit. However, the qubit can be both 0 and 1 at same time. If we write the qubit down, it can be expressed as $$ |\psi\rangle=\alpha|0\rangle+\beta|1\rangle $$ Here $|0\rangle$ and $|1\rangle$ are 2 eigenstates, which are orthogonal to each other. For example, we can define a perpendicularly polarized photon as $|0\rangle$ and a horizontally polarized photon as $|1\rangle$. Or, a spin-up electron as $|0\rangle$ and a spin-down electron as $|1\rangle$. The value of $\alpha$ and $\beta$ are complex numbers. And they need to implement $|\alpha|^2+|\beta|^2=1$, because $|\alpha|^2$ and $|\beta|^2$ are the probabilities of the result being $|0\rangle$ and $|1\rangle$. After the measurement the state will collapse to either $|0\rangle$ or $|1\rangle$, which means that the uncertainty disappear and the state would no longer change. If we have multiple qubits, we can use tenser products to express the multi-qubit state. For example, 2-qubits state which contains qubit A and B can be expressed as $$|\psi\rangle_{AB}=|\psi\rangle_A\otimes\psi\rangle_B=c_{00}|00\rangle+c_{01}|01\rangle+c_{10}|10\rangle+c_{11}|11\rangle$$ What mentioned above are just abstract expressions. To put it into practice, we need to prepare some real qubits. Since the qubit has 2 eigenstates, it is just a two-state quantum system. ### Experimental approach towards Two-state quantum system The basic requirement of the two-state quantum system is that there are 2 stable enough states in the energy level where the physics process of our interest happens. That is to say, the half-life period of the two states should be far longer than their running time. The system even can have more than 2 states, as long as the other states have almost no influence on the two states during the physics process we are interested in. A possible method to build a two-state system is by Nuclear Magnetic Resonance (NMR). The qubits are the nucleus with spin $1\over2$ (like $^1H$, $^13C$, $^19F$, $^{15}N$, etc.). Define the spin-up nucleus as $|0\rangle$ and spin-down nucleus as $|1\rangle$. The qubits can be controlled by alternating magnetic field, since the spin will precess under external magnetic field. To build two-state systems, there are also some other methods like quantum dots (define spin-down dot as $|0\rangle$ and spin-up dot as $|1\rangle$), optical method (mentioned above), superconducting Josephson junction, etc. ## Quantum Gates Mathematically, if we call the operation on qubits is $U$, and the state of qubits are written down as a vector, $U$ must be a unitary matrix. That is to say, $UU^+=I$. Let us use the two-state NMR system as example to see how the mathematical unitary transform, i.e. the quantum logic gate, is realized on a real physics system. The gate we want to realzie is called $U(\theta,\phi)$. $$U(\theta,\phi)= \begin{pmatrix} cos\theta & -isin\theta e^{i\phi} \\ -isin\theta e^{-i\phi} & cos\theta \end{pmatrix} $$ To implement this, we need to create a magnetic field in X-Y plane existing in time interval $\tau$, which will let the spin precess in the orientation of the direction of magnetic field. If the magnetic field is $\textbf{B}=|\textbf{B}|(cos\phi, sin\phi, 0)$, the frequency of Larmor precession will be $\omega_L={2\over\hbar}|\mu||\textbf{B}|$. So the rotated angle is $\omega_L \tau$ and $\theta={\omega_L \tau\over2}$. Now we succeed to bulid the unitary transform $U(\theta,\phi)$. We can see that if we apply this on state $|0\rangle$, we get $cos\theta|0\rangle -ie^{i\phi}sin\theta|1\rangle$. On different real quantum systems we will have different approaches to realize different quantum gates. ## Entanglement Entanglement is a non-local property between sub-systems in a quantum system. If a state of several quantum states can't be expressed as the tenser product of each qubit, such state is called entangled state. For example, the state ${1\over\sqrt2}(|00\rangle+|10\rangle)$ can be expressed as ${1\over\sqrt2}(|0\rangle+|1\rangle)\otimes|0\rangle$. In contrast, the state ${1\over\sqrt2}(|00\rangle+|11\rangle)$ cannot be expressed as the tensor product of two qubits, which means this is an entangled state. Since entanglement is non-local, the state of one qubit will have influence on the state of other qubits which are entangled with it. The property of entanglement gives the quantum computing ability of parallel processing. For instance, given decimal number 10 and 5, we express them as $|1010\rangle)$ and $|0101\rangle)$. Then create a entangled state of them, $|1010\rangle)+|0101\rangle)$. For this state, if we apply some quantum algorithm, the number 10 and 5 can be computed separately. This is one of the reasons why quantum computer sometimes is faster than classical computer. However, the property of entanglement means that the quantum system can be entangled with not only the target but also the environment. Once it is entangled with the environment, the quantum coherence will decay with time, which is called quantum decoherence. ## Second part - on the "most" and "least" quantum states: ## The many-body quantum system All of these aforementioned characteristics and descriptions of quantum systems lay a solid foundation for even deeper questions to be posed; mainly concerning how we can exploit the seemingly erratic and unknowable features of the quantum world for our benefit. A question one might ask is how we could in principle use qubits to convey information to others just as some scribbles on paper do in our daily lives; and so, the quest for communication using many of these qubits begins. Consider then a system composed of $N$ identically prepared subsystems such that the Hilbert space where the complete system "lives" may be written in terms of the subsystem's Hilbert subspaces; namely: $$\mathcal{H}_{C}=\mathcal{H}_{S}^{\otimes N}$$ Additionally, the Hamiltonian with which this complete system will evolve in time can be written in terms of the Hamiltonians describing the evolution of the subsystems, so: $$H=\sum_{i=1}^{N}\mathbb{1}^{1}\otimes\ldots\otimes h^{i} \otimes\ldots\otimes\mathbb{1}^{N}$$ And the ith subsystem's Hamiltonian $h^{i}$ in turn may be written in some appropriate basis as: $$h^{i}=\begin{pmatrix}\lambda_{min} & & \\ & \ddots & \\ & &\lambda_{max}=\lambda_{min}+\Delta \end{pmatrix}$$ Assuming of course that there exists a finite spectral range and that the eigenvalues of said "sub Hamiltonian" are bounded and sorted from a minimum to a maximum. Now, from this, and by virtue of $h^{i}=h^{i\dagger}$. We can propose the decomposition of $h^{i}=g^{i}+\frac{\lambda_{min}+\lambda_{max}}{2}\mathbb{1}$, where: $$g^{i}=\begin{pmatrix}-\Delta/2 & & \\ & \ddots & \\ & &\Delta/2\end{pmatrix}$$ Now, if we consider the case where the encoding of such qubits is done in a unitary (and thus reversible) fashion, then we can write the unitary operator that describes the evolution of the whole system in terms of the unitaries which describe the evolution of the individual subsystems: $$U_{C}(t)=e^{-itH}=\bigotimes_{i=1}^{N}U_{S}^{i}(t)\longleftrightarrow U_{S}^{i}(t)=e^{-ith^{i}}$$ From this consideration and from the fact that the identity matrix $\mathbb{1}$ and $g^{i}$ commute, then the individual subsystem unitary may be rewritten as: $$U_{S}(t)=e^{-it\frac{\lambda_{min}+\lambda_{max}}{2}}e^{-itg^{i}}$$ Where in the end, the scalar phase factor the subsystems accrue $e^{-it\frac{\lambda_{min}+\lambda_{max}}{2}}$ has no physical relevance. From this, we can assume then that $h^{i}=g^{i}, \forall i$. The subsystem Hamiltonian's eigenvalues may be assumed to be within the interval $[-\Delta/2,\Delta/2]$ without loss of any generality. Moreover, the eigenstates corresponding to the extremal eigenvalues can be denoted to be: $$ \begin{align} h^{i}\left|s_{min}\right\rangle=&-\frac{\Delta}{2}\left|s_{min}\right\rangle\\ h^{i}\left|s_{max}\right\rangle=&\frac{\Delta}{2}\left|s_{max}\right\rangle \end{align} $$ ## Product states, GHZ states and the Quantum Fisher information measure To begin our discussion of quantum states, we begin with the "least" quantum state; the product state. Basically we consider the total state to be a direct product of the individual sub states. So for a fixed interaction time $t=1$ a product state can be written in a general way as follows: $$\left|\Psi(t)\right\rangle=e^{-iH}\left|\Psi(0)\right\rangle=e^{-iH}\bigotimes_{i=1}^{N}\left|\psi^{(i)}(0)\right\rangle$$ In the case for qubits, the sub states may be written in density matrix form as: $$\left|\psi^{(i)}\right\rangle\left\langle\psi^{(i)}\right|=\left|\vec{n}\right\rangle\left\langle\vec{n}\right|=\frac{1}{2}\left(\mathbb{1}+\vec{n}\cdot\vec{\sigma}\right)$$ Where $||\vec{n}||=1$ and $\vec{\sigma}=(\sigma_{X},\sigma_{Y},\sigma_{Z})$. Resulting in a product state for qubits: $$\left|\Psi(t)\right\rangle=e^{-iH}\frac{1}{\sqrt{N}}\left(\left|0\right\rangle+e^{i\varphi}\left|1\right\rangle\right)^{\otimes N}$$ On the other hand, the "most" quantum state we can prepare can be thought of as "the maximally entangled" state; such a state is usually called a GHZ state (as it was first studied for the case of at least three qubits by Greenberger, Horne and Zellinger in 1989). For qubits, this state may be written as follows: $$\left|GHZ\right\rangle=\frac{1}{\sqrt{2}}\left(\left|0\right\rangle^{\otimes N}+e^{i\varphi}\left|1\right\rangle^{\otimes N}\right)$$ All this talk about states is good and all, but when it comes to actual devices and experiments, is there a way to distinguish between these two states? Meaning, which quantity can we extract from the outcome of an experiment in order to show that the state we're working with is of which kind? The answer comes to us from an information-theoretical quantity called the **Fisher information** and its quantum counterpart the quantum Fisher information (QFI). Many developments have studied this quantity throughout the years, and as such it has many different interpretations and representations; in this work however we focus on one particular definition of it, mathematically it may be written as: $$\mathcal{F}(|\Psi(t)\rangle)=4\mathrm{Var}_{|\Psi(0)\rangle}(H)$$ I.e. four times the variance of a given Hamiltonian under an initial state quantifies the QFI of a given (strictly pure) quantum state. In short: the QFI is a measure for "how entangled" a quantum state is. To prove this let us calculate explicitly the QFI for the two types of states stated above, the product state and the GHZ state all with the Hamiltonian also previously stated. First, for the product state: $$\begin{align*} \mathcal{F}(|\Psi(t)\rangle)=&4\left(\left\langle\Psi(0)\right|H^{2}\left|\Psi(0)\right\rangle-(\left\langle\Psi(0)\right|H\left|\Psi(0)\right\rangle)^{2}\right) \end{align*}$$ Where the first moment about the mean is: $$\begin{align*} \left\langle\Psi(0)\right|H\left|\Psi(0)\right\rangle=&\sum_{i=1}^{N}\left\langle\Psi(0)\right|h^{(i)}\left|\Psi(0)\right\rangle\\ =&\sum_{i=1}^{N}\left\langle\psi^{(i)}(0)\right|h^{(i)}\left|\psi^{(i)}(0)\right\rangle\\ =&\sum_{i=1}^{N}\left\langle h^{(i)}\right\rangle \end{align*}$$ And the second moment. $$\begin{align*} \left\langle\Psi(0)\right|H^{2}\left|\Psi(0)\right\rangle=&\sum_{i,j=1}^{N}\left\langle\Psi(0)\right|h^{(i)}h^{(j)}\left|\Psi(0)\right\rangle\\ =&\sum_{i=j=1}^{N}\left\langle \left(h^{(i)}\right)^{2}\right\rangle+\sum_{i\neq j}^{N}\left\langle h^{(i)}\right\rangle\left\langle h^{(j)}\right\rangle \end{align*}$$ All in all, the QFI for a product state may be shown to be simply equivalent to the sum of the QFI of the individual sub states w.r.t. their corresponding sub Hamiltonians. I.e. $$\begin{align*} \mathcal{F}(\left|\Psi(t)\right\rangle)=&4\sum_{i=1}^{N}\left(\left\langle \left(h^{(i)}\right)^{2}\right\rangle-\left\langle h^{(i)}\right\rangle^{2}\right)=4\mathrm{Var}_{|\psi^{(i)}(0)}\left(h^{(i)}\right) \end{align*}$$ The best QFI attainable from this is then proportional up to a factor of $4$ to the maximum variance of a Hamiltonian about a particular state. For a product state considering the above expressions for the Hamiltonian and sub systems then: $$\begin{align*} \mathcal{F}_{\text{Product}}=&N\Delta^{2} \end{align*}$$ In contrast to this result, following analogously but now for a GHZ state, it is straightforward to show that: $$\begin{align*} \mathcal{F}_{\text{GHZ}}=&N^{2}\Delta^{2} \end{align*}$$ Which is indeed $N$ times larger than the QFI for that of a product state! Implying of course that our knowledge of the entanglement of the state scales linearly as the number of subsystems increases. So, we know that the QFI for different states is different, effectively quantifying "how entangled" a state is. The next obvious question to ask after investigating the "most" and "least" quantum states is, does an "intermediately" entangled state exist? And if so, how can we characterize _any_ state by virtue of its "entanglement depth"? We address this next. ## Entanglement depth and a Qiskit proposal Let us now consider an intermediate case between the completely product state and a GHZ "maximally" entangled state. Consider then that from the $N$ subsystems we form $K$ groups $G_{1}, G_{2},\ldots,G_{K}$ with at most $M$ subsystems in each group. By allowing a total state with this assumption to not be completely product but rather product among these different groups, then the total state may be written as: $$\begin{align*} \left|\Psi_{M}\right\rangle=&\bigotimes_{k=1}^{K}\left|\psi^{(G_{k})}\right\rangle \end{align*}$$ Computing the QFI corresponding to this state with our usual Hamiltonian, and recalling that for product states (even among groups!) the QFI is additive across subsystems, then: $$\begin{align*} \mathcal{F}_{\left|\Psi_{M}\right\rangle}(H)=&\sum_{k=1}^{K}\mathcal{F}_{\left|\psi^{(G_{k})}\right\rangle}\left(H\right)\leq\sum_{k=1}^{K}\left|G_{k}\right|^{2}\Delta^{2} \end{align*}$$ Where of course $\left|G_{k}\right|$ is the size of the $k$-th subgroup. One can then make the assertion that this QFI is then maximized for the case where $\left|G_{k}\right|=|M|,\forall k$ by construction of these groups. In this case then, the QFI may be rewritten as: $$\begin{align*} \mathcal{F}_{\left|\Psi_{M}\right\rangle}(H)\leq&\Delta^{2}\left(\frac{N}{M}\right)M^{2}=\Delta^{2}NM \end{align*}$$ Since $M=N/k$. This yields then the QFI of a system where $M$ particles are entangled (at the most!) from an $N$ particle total system. Pure states are experimentally difficult to prepare, lifting the QFI restriction for pure states can be done by writing the density state for a particular mixed state with "$M$-bound" entanglement in the following way: $$\begin{align*} \varrho_{M}=&\sum_{i}^{N}p_{(i)}\left|\Psi_{M}^{(i)}\right\rangle\left\langle\Psi_{M}^{(i)}\right| \end{align*}$$ The QFI may then be written for this more general state as: $$\begin{align*} \mathcal{F}_{\varrho_{M}}(H)\leq&\sum_{i}^{N}p_{(i)}\mathcal{F}_{\left|\Psi_{M}^{(i)}\right\rangle}(H)\leq\Delta^{2}NM \end{align*}$$ From this we can define the entanglement depth ($\mathcal{ED}$) of a given general state $\varrho$ by acknowledging that a witness or a measure for it is indeed the QFI of said state $\varrho$ for a given Hamiltonian describing the evolution of the system. Thus, $$\begin{equation*} \boxed{ \mathcal{ED}(\varrho)\geq\frac{\mathcal{F}_{\varrho}(H)}{\Delta^{2}N} } \end{equation*}$$ Where in particular, if $M=N$, i.e. $N$ subsystems (all of them) are entangled, then we recover the GHZ state previously discussed. Conversely, if $M=1$, meaning that no subsystems are entangled, a product state is recovered. In theory the characterization of any state by means of the entanglement of the particles conforming said state is rather straightforward: 1. **FIRST:** Define the Hamiltonian under which the system will evolve (possibility for error induction and characterization goes here) 2. **SECOND:** With knowledge of the state with which we are working with, define the spectral range $\Delta$ of the Hamiltonian by looking at the extremal cases. 3. **THIRD:** Compute the QFI for this state under said Hamiltonian. 4. **FOURTH:** Comparte the QFI with the spectral range and the number of subsystems in total. After all of this one can say with certainty that: $$\begin{align*} \text{If$\ldots$}& &\text{$\ldots$ then.}\\ \mathcal{ED(\varrho)}&>1 &\text{$\varrho$ is entangled!}\\ \mathcal{ED(\varrho)}&<1 &\text{$\varrho$ is not entangled!} \end{align*}$$ In Qiskit, this can be put to the test for the extreme cases (product state and the GHZ state). ```python # Initialization from qiskit import * from qiskit.aqua.operators import * import numpy as np ``` Setting the number of Qubits: ```python # Number of qubits and thus, quantum registers N = 8 if (N % 2) != 0: raise ValueError('The number of qubits is uneven!') qr = QuantumRegister(N) #cr = ClassicalRegister(N) ``` A GHZ state may of $N$ qubits may be prepared as follows: ```python # GHZ state preparation for N qubits GHZcircuit = QuantumCircuit(qr) GHZcircuit.h(0) for i in range(0,N-1): GHZcircuit.cx(i,i+1) GHZcircuit.h((qr[N-2],qr[N-1])) GHZcircuit.cx(N-1,N-2) GHZcircuit.h((qr[N-2],qr[N-1])) GHZcircuit.draw(output="mpl") ``` A randomly prepared but nonetheless product state can be prepared by using: ```python # Random, N qubit product state preparation Prodcircuit = QuantumCircuit(qr) Prodcircuit.h(qr) for i in range(0,N-1): if np.random.uniform(0.0,1.0) > 0.5: Prodcircuit.x(i) Prodcircuit.draw(output="mpl") ``` For a particular Hamiltonian (which can be user defined) a simple and rather naive algorithm for our eigenvalue sorting scheme can be: ```python # Eigenvalue sorting in the Hamiltonian's diagonal basis def EV_sorting(arr): for i in range(0,N-1): for j in range(0,N-1): if arr[i][j] != 0 and i < N/2: arr[i][j] = -1*arr[i][j] return arr ``` ```python # Arbitrary local Hamiltonian for i in range(0,N-1): hl = (1/2)*np.eye(N,N) h = EV_sorting(hl) h ``` array([[-0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [ 0. , -0.5, 0. , 0. , 0. , 0. , 0. , 0. ], [ 0. , 0. , -0.5, 0. , 0. , 0. , 0. , 0. ], [ 0. , 0. , 0. , -0.5, 0. , 0. , 0. , 0. ], [ 0. , 0. , 0. , 0. , 0.5, 0. , 0. , 0. ], [ 0. , 0. , 0. , 0. , 0. , 0.5, 0. , 0. ], [ 0. , 0. , 0. , 0. , 0. , 0. , 0.5, 0. ], [ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5]]) ```python # Arbitrary global Hamiltonian for i in range(0,N-1): Hg = N*(1/2)*np.eye(N,N) H = EV_sorting(Hg) H ``` array([[-4., 0., 0., 0., 0., 0., 0., 0.], [ 0., -4., 0., 0., 0., 0., 0., 0.], [ 0., 0., -4., 0., 0., 0., 0., 0.], [ 0., 0., 0., -4., 0., 0., 0., 0.], [ 0., 0., 0., 0., 4., 0., 0., 0.], [ 0., 0., 0., 0., 0., 4., 0., 0.], [ 0., 0., 0., 0., 0., 0., 4., 0.], [ 0., 0., 0., 0., 0., 0., 0., 4.]]) From this, it would be enough to compute the Variance of these Hamiltonians using the different state functions from the quantum circuits described above. Qiskit's own "StateFn" function does the trick with the only caveat being that one must take into account normalization of the states. ```python # Quantum Fisher information estimation as per above equation GHZ = StateFn(GHZcircuit,1/np.sqrt(2),True) Prod = StateFn(Prodcircuit,1/np.sqrt(N),True) ``` Results on real IBM devices can be shown to yield results similar to: ```python from IPython.display import Image ``` Where the plot in a) shows that as time progresses $Jt$ the state evolves and becomes entangled by virtue of $\mathcal{ED}>1$. Conversely the state analyzed in b) shows no sign of entanglement as $\mathcal{ED}<1$ for a particular time range after which it becomes entangled. ## Final remarks and sources Evolution/interaction times are also a factor to consider in future work as it will play a significant role in further quantum state characterization since as seen above, after a particular time, an initially non-entangled state may evolve to be entangled. Additionally, further research and application of Qiskit's own tools for QFI estimation is in order to be able to test these theoretical proceedings on real quantum devices. Additionally, this might serve as a way to calibrate and/or characterize errors on real quantum devices since the extremal cases were thoroughly studied. [1] Ozaeta, Asier, and Peter L McMahon. “Decoherence of up to 8-Qubit Entangled States in a 16-Qubit Superconducting Quantum Processor.” Quantum Science and Technology 4.2 (2019): 025015. Crossref. Web. [2] Suzuki, Yohichi et al. “Amplitude Estimation Without Phase Estimation.” Quantum Information Processing 19.2 (2020): n. pag. Crossref. Web. [3] Smith, Adam et al. “Simulating Quantum Many-Body Dynamics on a Current Digital Quantum Computer.” npj Quantum Information 5.1 (2019): n. pag. Crossref. Web. [4] Sekatski, Pavel et al. “Quantum Information: Applications to Metrology and Communications.” University of Basel lecture (FS 2020) [5] Wootton, James. “Quantum Information.” University of Basel lecture (FS 2020) ```python import qiskit qiskit.__qiskit_version__ ``` {'qiskit-terra': '0.14.1', 'qiskit-aer': '0.5.1', 'qiskit-ignis': '0.3.0', 'qiskit-ibmq-provider': '0.7.1', 'qiskit-aqua': '0.7.1', 'qiskit': '0.19.2'} ```python ```
71b3eaec15f8ce95c2f21843534d7f57b076960e
65,975
ipynb
Jupyter Notebook
Final_Projects/Hernandex_Wu/FinalProject_Hernandez_Wu.ipynb
ManuelRosenthaler/Quantum-information-course-Basel
860af318d296d8ac7c86e56f71d8493461399744
[ "Apache-2.0" ]
null
null
null
Final_Projects/Hernandex_Wu/FinalProject_Hernandez_Wu.ipynb
ManuelRosenthaler/Quantum-information-course-Basel
860af318d296d8ac7c86e56f71d8493461399744
[ "Apache-2.0" ]
null
null
null
Final_Projects/Hernandex_Wu/FinalProject_Hernandez_Wu.ipynb
ManuelRosenthaler/Quantum-information-course-Basel
860af318d296d8ac7c86e56f71d8493461399744
[ "Apache-2.0" ]
null
null
null
112.011885
21,108
0.799454
true
7,153
Qwen/Qwen-72B
1. YES 2. YES
0.822189
0.651355
0.535537
__label__eng_Latn
0.994027
0.082561
```python import numpy as np import matplotlib.pyplot as plt %matplotlib inline import control ``` # Efectos del lazo cerrado ## Objetivos - Determinar la estabilidad de sistemas de lazo abierto y lazo cerrado. - Verificar el efecto de cerrar un lazo de control sobre sistemas de tiempo continuo. ## Lazo cerrado Tomando como base el bucle típico de control de la siguiente figura. Puede deducirse que la función de transferencia desde la **referencia** $R(s)$ hasta la **controlada** $C(s)$. Observe que el error se define como: \begin{equation} E(s) = R(s) - H(s) C(s) \end{equation} La señal **controlada** corresponde a la transformación que realiza el **controlador**, el **actuador** y la **planta** sobre el **error**. \begin{align} \color{blue}{C(s)} &= \left (G_c(s)G_a(s)G_p(s) \right ) E(s) \\ \color{blue}{C(s)} &= \left (G_c(s)G_a(s)G_p(s) \right ) \left ( \color{blue}{\color{blue}{R(s)}} - H(s) \color{blue}{C(s)} \right ) \\ \color{blue}{C(s)} &= \left (G_c(s)G_a(s)G_p(s) \right ) \color{blue}{\color{blue}{R(s)}} - \left (G_c(s)G_a(s)G_p(s)H(s) \right )\color{blue}{C(s)} \\ \end{align} \begin{equation} \color{blue}{C(s)} + \left (G_c(s)G_a(s)G_p(s)H(s) \right )\color{blue}{C(s)} = \left (G_c(s)G_a(s)G_p(s) \right ) \color{blue}{R(s)} \end{equation} \begin{equation} \color{blue}{C(s)} \left ( 1 + G_c(s)G_a(s)G_p(s)H(s)\right ) = \left (G_c(s)G_a(s)G_p(s) \right ) \color{blue}{R(s)} \end{equation} Así, la función de transferencia de lazo cerrado es: \begin{equation} \frac{C(s)}{R(s)} = \frac{G_c(s)G_a(s)G_p(s)}{1 + G_c(s)G_a(s)G_p(s)H(s)} \end{equation} Para efectos prácticos se reune en un solo modelo a los sistemas **Actuador** y **Planta**, pues estos dos sistemas conforman el **Proceso** que se debe controlar. Así, la función de transferencia de lazo cerrado será: \begin{equation} \frac{C(s)}{R(s)} = \frac{G_c(s)G_p(s)}{1 + G_c(s)G_p(s)H(s)} \end{equation} Tenga en cuenta que $G_p(s)$ incorpora las dinámicas de **Actuador** y **Planta**. El rol del **Sensor** requiere respuestas "rápidas" y "precisas". El sistema **Controlador** debe ser diseñado para lograr comportamientos deseados en el sistema en lazo cerrado, es decir, se moldea la forma de $C(s)$ a partir de $R(s)$ ajustando $G_c(s)$ de forma apropiada. ## Lazo abierto vs Lazo cerrado Las características más relevantes de la respuesta transitoria de un sistema está relacionada con la ubicación de sus polos. De las ecuaciones anteriores, puede observarse que los polos se reubican en lazo cerrado de acuerdo con el controlador. Para este analisis considere los modelos lineales definidos como divisiones de polinomios. \begin{equation} \frac{C(s)}{R(s)} = \frac{G_c(s)G_p(s)}{1 + G_c(s)G_p(s)H(s)} \end{equation} \begin{equation} \frac{C(s)}{R(s)} = \frac{\frac{N_c(s)}{D_c(s)}\frac{N_p(s)}{D_p(s)}}{1 + \frac{N_c(s)}{D_c(s)}\frac{N_p(s)}{D_p(s)}H(s)} \end{equation} Considere que el sensor es perfecto, es decir, $H(s)=1$. La función de transferencia de lazo cerrado es: \begin{equation} \frac{C(s)}{R(s)} = \frac{\frac{N_c(s)N_p(s)}{D_c(s)D_p(s)}}{\frac{D_c(s)D_p(s) + N_c(s)N_p(s)}{D_c(s)D_p(s)}} = \frac{N_c(s)N_p(s)}{D_c(s)D_p(s) + N_c(s)N_p(s)} \end{equation} Mientras la función de transferencia de lazo abierto es: \begin{equation} \frac{C(s)}{R(s)} = \frac{N_c(s)N_p(s)}{D_c(s)D_p(s)} \end{equation} Observe que los numeradores se mantienen, esto indica que los ceros del sistema en lazo cerrado son los mismos que en lazo abierto. Observe que los denominadores cambian, esto indica que los polos del sistema en lazo cerrado cambian respecto al lazo abierto. **Ejemplo** Suponga un proceso modelado por: $$G_p(s) = \frac{2}{4s - 3}$$ $$G_p(s) = \frac{-2/3}{\frac{-4}{3}s + \frac{3}{3}}$$ $$G_p(s) = \frac{-2/3}{\frac{-4}{3}s + 1}$$ y una estrategia de contro definida por: $$G_c(s) = k_c$$ - ¿El sistema $G_p$ es estable? - ¿Qué efecto tiene realimentar el sistema con el controlador definido? En análisis se realizará a partir de las raíces del sistema teniendo en cuenta que la función de transferencia de lazo cerrado es: \begin{equation} \frac{C(s)}{R(s)} = \frac{N_c(s)N_p(s)}{D_c(s)D_p(s) + N_c(s)N_p(s)} \end{equation} $$G_{LC}(s) = \frac{2k_c}{4s - 3 + 2k_c}$$ Polo en $$4s-3+2k_c=0$$ $$s=\frac{3-2k_c}{4}$$ ```python # Se define la función de transferencia del proceso Gp = control.tf(2, [4,-3]) Gp ``` $$\frac{2}{4 s - 3}$$ ```python # Se hallan los polos del proceso polos = Gp.pole() polos ``` array([0.75]) ```python # Se hallan los ceros del proceso ceros = Gp.zero() ceros ``` array([], dtype=float64) ```python # Se grafica el mapa de polos y ceros control.pzmap(Gp) plt.grid(True) ``` - El sistema no tiene ceros. - El sistema tiene un polo en $s = 0.75$. - La respuesta dinámica del sistema está dominada por $e^{0.75t}$. - Este sistema es inestable. ```python # Se grafica la respuesta al escalón ts = np.linspace(0, 20, 1000) _, y = control.step_response(Gp, ts) plt.plot(ts, y) plt.grid(True) ``` Se tomarán distintos escenarios para $G_c(s) = k_c$. ```python Gc1 = 0.01 Gc2 = 0.1 Gc3 = 1.5 Gc4 = 3 Gc5 = 5 Gc6 = 1000 Gc7 = 1.501 ``` ```python # Caso 1 G_LC1 = control.feedback(Gc1*Gp,1) _, y1 = control.step_response(G_LC1, ts) G_LC1 ``` $$\frac{0.02}{4 s - 2.98}$$ ```python # Caso 2 G_LC2 = control.feedback(Gc2*Gp,1) _, y2 = control.step_response(G_LC2, ts) G_LC2 ``` $$\frac{0.2}{4 s - 2.8}$$ ```python # Caso 3 G_LC3 = control.feedback(Gc3*Gp,1) _, y3 = control.step_response(G_LC3, ts) G_LC3 ``` $$\frac{3}{4 s}$$ ```python # Caso 4 G_LC4 = control.feedback(Gc4*Gp,1) _, y4 = control.step_response(G_LC4, ts) G_LC4 ``` $$\frac{6}{4 s + 3}$$ ```python # Caso 5 G_LC5 = control.feedback(Gc5*Gp,1) _, y5 = control.step_response(G_LC5, ts) G_LC5 ``` $$\frac{10}{4 s + 7}$$ ```python # Caso 6 G_LC6 = control.feedback(Gc6*Gp,1) _, y6 = control.step_response(G_LC6, ts) G_LC6 ``` $$\frac{2000}{4 s + 1997}$$ ```python # Caso 7 G_LC7 = control.feedback(Gc7*Gp,1) _, y7 = control.step_response(G_LC7, ts) G_LC7 ``` $$\frac{3.002}{4 s + 0.002}$$ ```python # Se grafica el mapa de polos y ceros para todos los escenarios control.pzmap(Gp) control.pzmap(G_LC1) control.pzmap(G_LC2) control.pzmap(G_LC3) control.pzmap(G_LC4) control.pzmap(G_LC5) control.pzmap(G_LC6) control.pzmap(G_LC7) plt.grid(True) ``` ```python # Se grafica la respuesta al escalón para todos los escenarios plt.plot(ts, y,ts, y1,ts, y2,ts, y3,ts, y4,ts, y5,ts, y6) plt.legend(['Proceso', 'k=' + str(Gc1), 'k=' + str(Gc2), 'k=' + str(Gc3), 'k=' + str(Gc4), 'k=' + str(Gc5), 'k=' + str(Gc6), 'k=' + str(Gc7) ]) plt.grid(True) ``` ```python # Se grafica la respuesta al escalón para los escenarios inestables plt.plot(ts, y,ts, y1,ts, y2,ts, y3) plt.legend(['Proceso', 'k=' + str(Gc1), 'k=' + str(Gc2), 'k=' + str(Gc3)]) plt.grid(True) ``` ```python # La respuesta al escalón para los escenarios estables y el integrador plt.plot(ts, y3,ts, y4,ts, y5,ts,y6,ts,y7) plt.legend(['k=' + str(Gc3), 'k=' + str(Gc4), 'k=' + str(Gc5), 'k=' + str(Gc6), 'k=' + str(Gc7)]) plt.grid(True) ``` ```python # La respuesta al escalón para los escenarios estables y el integrador plt.plot(ts, y4,ts, y5,ts,y6) plt.legend(['k=' + str(Gc4), 'k=' + str(Gc5), 'k=' + str(Gc6)]) plt.grid(True) ``` **Observaciones** - El proceso es de primer orden inestable. - El controlador de modelo $G_c(s) = k_c$ fue capaz de estabilizar al sistema a partir de un valor determinado de $k_c$ - Una vez estable, el efecto de aumentar $k_c$ se aprecia en la reducción del tiempo de estabilización. **Preguntas** - ¿Puede identificar qué efecto tiene aumentar $k_c$ sobre el valor final de la señal controlada? - ¿Puede identificar qué efecto tiene aumentar $k_c$ sobre el valor final de la señal de error?
e08721cfbf501ba68ce3a901e005ab17129faf25
111,734
ipynb
Jupyter Notebook
EfectosLazoCerrado.ipynb
pierrediazp/Control
2a185eff5b5dc84045115009e62296174d072220
[ "MIT" ]
null
null
null
EfectosLazoCerrado.ipynb
pierrediazp/Control
2a185eff5b5dc84045115009e62296174d072220
[ "MIT" ]
null
null
null
EfectosLazoCerrado.ipynb
pierrediazp/Control
2a185eff5b5dc84045115009e62296174d072220
[ "MIT" ]
1
2021-11-18T13:08:36.000Z
2021-11-18T13:08:36.000Z
154.970874
17,632
0.891465
true
2,964
Qwen/Qwen-72B
1. YES 2. YES
0.749087
0.874077
0.65476
__label__spa_Latn
0.853854
0.359558
# Introduction ## Karatsuba Multiplication(卡拉楚巴算法) 一种快速乘法算法,使两个 $n$ 位数字相乘所需的一位数乘法次数减少到至多 $3n^{log_23}\approx3n^{1.585}$ 次。 ### 基本步骤 运用递归思想,将位数很多的两个大数 $x$ 和 $y$ 分成位数较少的数,每个数都是原来 $x$ 和 $y$ 位数的一半。这样处理后,简化为做三次乘法,并附带少量的加法操作和移位操作。 $$ \begin{align} x \cdot y \, & = (10^{\frac n2} \cdot a + b) \times (10^{\frac n2} \cdot c + d) \\[2ex] & = 10^n \cdot ac + 10^{\frac n2} \cdot (ad + bc) + bd \tag {*} \\[2ex] \end{align} $$ > ${\it {e.g.}} \quad$ $x = 1234, \; y = 5678$,则此时 $a = 12, \; b = 34, \; c = 56, \; d = 78$ `*` 号算式可以进行进一步简化: $$ \begin{align} \because \qquad \; \; (a + b) \cdot (c + d) &= ac + ad + bc + bd \\[2ex] \therefore \qquad \qquad \qquad ad + bc &= (a + b) \cdot (c + d)- ac - bd \\[2ex] \end{align} $$ 所以我们只需递归计算出 $ac$、$bc$ 和 $(a + b) \cdot (c + d)$ 的值,即可计算出 $x \cdot y$ 的值。 ```python def karatsuba(x: int, y: int): if x < 10 or y < 10: return x * y # 递归边界 x_str, y_str = str(x), str(y) # 处理负数 if x_str[0] == '-': return -karatsuba(-x, y) if y_str[0] == '-': return -karatsuba(x, -y) # 补0 max_length = max(len(x_str), len(y_str)) x_str = f'{x_str:0>{max_length}}' y_str = f'{y_str:0>{max_length}}' split_positon = max_length // 2 a = int(x_str[:-split_positon]) # x 高位 b = int(x_str[-split_positon:]) # x 低位 c = int(y_str[:-split_positon]) # y 高位 d = int(y_str[-split_positon:]) # y 低位 z0 = karatsuba(a + b, c + d) # 计算 (a+b)(c+d) z1 = karatsuba(a, c) # 计算 ac z2 = karatsuba(b, d) # 计算 bd return z1 * 10 ** (2 * split_positon) + \ (z0 - z1 - z2) * 10 ** (split_positon) + \ z2 karatsuba(1234, 5678) ``` 7006652 ___ # Divide and Conquer(分治法) ## Merge Sort(归并排序) ### Motivation and Example $$ \def\arraystretch{1.5} \begin{array}{c} % 总表格 \begin{array}{c} % 第一行一列 \begin{array}{|c|c|c|c|c|c|c|c|} \hline 5 & 4 & 1 & 8 & 7 & 2 & 6 & 3 \\ \hline \end{array} \end{array} \\ \begin{array}{cccc} % 第二行两列(两个箭头) \dArr & \quad & \quad & \dArr \end{array} \\ \begin{array}{cc} % 第三行两列(分割出的两部分) \begin{array}{|c|c|c|c|} \hline 5 & 4 & 1 & 8 \\ \hline \end{array} & \begin{array}{|c|c|c|c|} \hline 7 & 2 & 6 & 3 \\ \hline \end{array} \end{array} \end{array} $$
9f652cd4e237ce2db9d7293d47f3fe9cda620b8a
4,305
ipynb
Jupyter Notebook
Divide and Conquer, Sorting and Searching, and Randomized Algorithms/Week 1.ipynb
Alice0621/Notes-for-Stanford-Algorithms
11a46c8e03cbac8e02f4350042c27bfddbc3b43e
[ "MIT" ]
null
null
null
Divide and Conquer, Sorting and Searching, and Randomized Algorithms/Week 1.ipynb
Alice0621/Notes-for-Stanford-Algorithms
11a46c8e03cbac8e02f4350042c27bfddbc3b43e
[ "MIT" ]
null
null
null
Divide and Conquer, Sorting and Searching, and Randomized Algorithms/Week 1.ipynb
Alice0621/Notes-for-Stanford-Algorithms
11a46c8e03cbac8e02f4350042c27bfddbc3b43e
[ "MIT" ]
null
null
null
27.774194
107
0.416725
true
1,144
Qwen/Qwen-72B
1. YES 2. YES
0.861538
0.808067
0.696181
__label__yue_Hant
0.260044
0.455793
### BEFORE YOU DO ANYTHING... In the terminal: 1. Navigate to __inside__ your ILAS_Python repository. 2. __COMMIT__ any un-commited work on your personal computer. 3. __PULL__ any changes *you* have made using another computer. 4. __PULL__ textbook updates (including homework answers). 1. __Open Jupyter notebook:__ Start >> Programs (すべてのプログラム) >> Programming >> Anaconda3 >> JupyterNotebook 1. __Navigate to the ILAS_Python folder__. 1. __Open today's seminar__ by clicking on 7_Numerical_computation_with_Numpy. <h1>Numerical Computation with Numpy</h1> <h1>Lesson Goal</h1> To make solutions to mathematical problems involving large data sets, multiple repeated computations and simultaneous solutions more efficient using programming. To acheive this we will cover an overview of some widely used tools from the Python Numpy package and related packages such as Matplotlib (plotting), Scipy (scientific computation) and Sympy (symbolic). ## Lesson Structure - The `array` data structure for numerical computation. - Subpackages - Array manipulation (independent study) - Introduction to plotting. - Mathematics with arrays - Mathematics with vectors - Mathematics with matrices - Useful matrix operations (independent study) - Simultaneous equations - Rates of change - Vectorised functions - Broadcasting - Reshaping and resizing ## Why are we studying this? Numerical computation is central to almost all scientific and engineering problems. There are programming languages specifically designed for numerical computation: - Fortran - MATLAB There are libraries dedicated to efficient numerical computations: - Numpy - Scipy - Sympy ... NumPy (http://www.numpy.org/) - The most widely used Python library for numerical computations. - Large, extensive library of data structures and functions for numerical computation. - Useful for perfoming operation you will learn on mathematics-based courses. Scipy (https://www.scipy.org/) - Builds on Numpy, additional functionality - More specialised data structures and functions over NumPy. Matplotlib (https://matplotlib.org/) - A library for generating plots. - Complementary to numerical computation libraries. If you are familiar with MATLAB, NumPy and SciPy provide similar functionality. Last week we covered an introduction to some basic functions of Numpy. NumPy is a very extensive library. This seminar will: - Introduce some useful functions - Briefly discuss how to search for additional functions you may need. Use online resources to search for functions you need e.g. http://stackoverflow.com/. ## Importing the NumPy module To make NumPy functions and variables available to use in our program in our programs, we need to __import__ it using. `import numpy` We typically import all modules at the start of a program or notebook. ```python import numpy as np ``` The shortened name `np` is often used for numpy. All Numpy functions can be called using `np.function()`. ## Data Structure: The Numpy `array` ### Why do we need another data structure? Python lists hold 'arrays' of data. Lists are very flexible. e.g. holding mixed data type. There is a trade off between flexibility and performance e.g. speed. Science engineering and mathematics problems often involve large amounts of data and numerous operations. We therefore use specialised functions and data structures for numerical computation. ```python ``` ## Numpy array A numpy array is a grid of values, *all of the same type*. To create an array we use the Numpy `np.array()` function. We can create an array in a number of ways. Let's start with something that is already familiar to you... We can give a data structure (list, tuple) as an *argument* to convert it to a numpy array: ```python a = (4.0,) b = np.array(a) print(type(a)) print(type(b)) print(b.dtype) ``` <class 'tuple'> <class 'numpy.ndarray'> float64 The method `dtype` tells us the type of the data contained in the array. __Note:__The data type can be optionally set by the user when creating the array. This can be useful when you need more control over how your data is stored in memory and on disk. Especially in cases where you’re working with large data, it’s good that you know to control the storage type. ```python c = [4.0, 5, 6.0] d = np.array(c) print(type(c)) print(type(d)) print(d.dtype) ``` <class 'list'> <class 'numpy.ndarray'> float64 ## Multi-dimensional arrays. Unlike the data types we have studied so far, arrays can have multiple dimensions. __`shape`:__ a *tuple* of *integers* giving the *size* of the array along each *dimension*. We define the dimensions of an array using square brackets ```python # 1-dimensional array a = np.array([1, 2, 3]) # 2-dimensional array b = np.array([[1, 2, 3], [4, 5, 6]]) b = np.array([[1, 2, 3], [4, 5, 6]]) print(a.shape) print(b.shape) ``` (3,) (2, 3) ```python # 2-dimensional array c = np.array([[1, 2, 3]]) # 2-dimensional array d = np.array([[1], [4]]) print(c.shape) print(d.shape) ``` (1, 3) (2, 1) ```python # 3-dimensional array c = np.array( [[[1, 1], [1, 1]], [[1, 1], [1, 1]]]) print(c.shape) c = np.array( [[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]]]) print(c.shape) ``` (2, 2, 2) (3, 2, 2) ```python # 3-dimensional array import numpy as np c = np.array( [[[1, 1], [1, 1]], [[1, 1], [1, 1]]]) # 4-dimensional array d = np.array( [[[[1, 1], [1, 1]], [[1, 1], [1, 1]]], [[[1, 1], [1, 1]], [[1, 1], [1, 1]]]]) print(c) print(d.shape) ``` [[[1 1] [1 1]] [[1 1] [1 1]]] (2, 2, 2, 2) <a name="CreatingArray"></a> ## Creating a numpy array. We don't always have to manually create the individual elements of an array. There are several other ways to do this. For example, if you don’t know what data you want to put in your array you can initialise it with placeholders and load the data you want to use later. ```python # Create an empty matrix # The empty() function argument is the shape. # Shape: tuple of integers giving the size along each dimension. import numpy as np x = np.empty((4)) print(x) print() x = np.empty((4,4)) print(x) ``` [ 0. 0. 0. 0.] [[ 6.23042070e-307 4.67296746e-307 1.69121096e-306 7.56595733e-307] [ 1.89146896e-307 1.37961302e-306 1.05699242e-307 1.69119330e-306] [ 1.11261027e-306 1.11261502e-306 1.42410839e-306 7.56597770e-307] [ 6.23059726e-307 1.42419530e-306 7.56599128e-307 0.00000000e+000]] ```python # Create an array of elements with the same value # The full() function arguments are # 1) Shape: tuple of integers giving the size along each dimension. # 2) The constant value import numpy as np y = np.full((1,1), 3) print(y) print(y.shape) print() y = np.full((2,2), 4) print(y) print(y.shape) ``` [[3]] (1, 1) [[4 4] [4 4]] (2, 2) ```python # Create a 1D array of evenly spaced values # The arange() function arguments are the same as the range() function. # Shape: tuple of integers giving the size along each dimension. z = np.arange(5,10) print(z) print() z = np.arange(5, 10, 2) print(z) ``` [5 6 7 8 9] [5 7 9] ```python # Create a 1D array of evenly spaced values # The linspace() function arguments are # The lower limit of the range of values # The upper limit of the range of values (inclusive) # The desired number of equally spaced values z = np.linspace(-4, 4, 10) print(z) ``` [-4. -3.11111111 -2.22222222 -1.33333333 -0.44444444 0.44444444 1.33333333 2.22222222 3.11111111 4. ] ```python # Create an array of all zeros # The zeros() function argument is the shape. # Shape: tuple of integers giving the size along each dimension. a = np.zeros(5) print(a) print() a = np.zeros((2,2)) print(a) ``` [ 0. 0. 0. 0. 0.] [[ 0. 0.] [ 0. 0.]] ```python # Create an array of all ones b = np.ones(5) print(b) print() b = np.ones((1, 4)) print(b) ``` [ 1. 1. 1. 1. 1.] [[ 1. 1. 1. 1.]] ```python # Create a constant array # The second function argument is the constant value c = np.full(6, 8) print(c) print() c = np.full((2,2,2), 7) print(c) ``` [8 8 8 8 8 8] [[[7 7] [7 7]] [[7 7] [7 7]]] <a name="Subpackages"></a> ## Subpackages Packages can also have subpackages. The `numpy` package has a subpackage called `random`. It contains functions to deal with random variables. If the `numpy` package is imported with `import numpy as np`, functions in the `random` subpackage can be called using `np.random.function()`. ```python # Create an array filled with random values in the range 0.0 to 1.0 e = np.random.rand(1) print(e) print() e = np.random.rand(3,2,1) print(e) print() e = np.random.random((2,2)) print(e) ``` [ 0.92807895] [[[ 0.96055227] [ 0.07622129]] [[ 0.05794609] [ 0.36078051]] [[ 0.53688902] [ 0.30828508]]] [[ 0.65979051 0.06838517] [ 0.86629646 0.87640925]] ```python # Create an array filled with random integer values ``` ```python # integer values between 4 and 16 e = np.random.randint(4, 16, size=(4,4)) print(e) print() # integer values between 1 and 8 e = np.random.randint(1, 8, size=(2, 2, 2)) print(e) ``` [[14 9 8 10] [ 8 8 15 10] [12 13 11 14] [ 8 15 13 8]] [[[1 6] [7 2]] [[6 2] [4 7]]] <a id='Indexing'></a> ## Indexing into multi-dimensional arrays. We can index into an array exactly the same way as the other data structures we have studied. ```python import numpy as np x = np.array([1, 2, 3, 4, 5]) # Select a single element print(x[4]) # Select elements from 2 to the end print(x[:4]) ``` 5 [1 2 3 4] For an n-dimensional (nD) matrix we need n index values to address an element or range of elements. Example: The index of a 2D array is specified with two values: - first the row index - then the column index. Note the order in which dimensions are addressed. ```python # 2 dimensional array y = np.array([[1, 2, 3], [4, 5, 6]]) # Select a single element print(y[1,2]) # Select elements that are both in rows 1 to the end AND columns 0 to 2 print(y[1:, 0:2]) ``` 6 [[4 5]] We can address elements by selecting a range with a step: For example the index: `z[0, 0:]` selects every element of row 0 in array, `z` The index: `z[0, 0::2]` selects every *other* element of row 0 in array, `z` ```python # 2 dimensional array z = np.zeros((4,8)) # Change every element of row 0 z[0, 0:] = 10 # Change every other element of row 1 z[1, 0::2] = 10 print(z) ``` [[ 10. 10. 10. 10. 10. 10. 10. 10.] [ 10. 0. 10. 0. 10. 0. 10. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0.]] ```python z = np.zeros((4,8)) # Change the last 4 elements of row 2, in negative direction # You MUST include a step to count in the negative direction z[2, -1:-5:-1] = 10 # Change every other element of the last 6 elements of row 3 # in negative direction z[3, -2:-7:-2] = 10 print(z) ``` [[ 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 10. 10. 10. 10.] [ 0. 0. 10. 0. 10. 0. 10. 0.]] ```python # 3-dimensional array c = np.array( [[[2, 1, 4], [2, 6, 8]], [[0, 1, 5], [7, 8, 9]]]) print(c[0, 1, 2]) ``` 8 Where we want to select all elements in one dimension we can use : __Exception__: If it is the last element , we can omit it. ```python print(c[0, 1]) print(c[0, :, 1]) ``` [2 6 8] [1 6] <a name="BooleanIndexing"></a> ### Boolean array indexing Recall that we can use *conditional operators* to check the value of a single variable against a condition. The value returned is a Boolean True or False value. ```python a = 4 print('a < 2:', a < 2) print('a > 2:', a > 2) ``` a < 2: False a > 2: True If we instead use *conditional operators* to check the value of an array against a condition. The value returned is an *array* of Boolean True or False values. ```python a = np.array([[1,2], [3, 4], [5, 6]]) idx = a > 2 print(idx) ``` [[False False] [ True True] [ True True]] A particular elements of an array can be are specified by using a boolean array as an index. Only the values of the array where the boolean array is `True` are selected. The varaible `idx` can therefore now be used as the index to select all elements greater than 2. ```python print(a[idx]) ``` [3 4 5 6] To do the whole process in a single step ```python print(a[a > 2]) ``` [3 4 5 6] To apply multiple conditions, use () parentheses to sperate different conditions. Use `&` for elementwise `and`. Use `|` for elementwise `or`. ```python x = np.array([[4, 2, 3, 1], [2, 4, 2, 8], [2, 3, 3, 27], [4, 1, 4, 64]]) # elements of x that are greater then 2 AND less than 10 print(x[(2 < x) & (x < 10)]) # elements of x that are less then 2 OR greater than 10 print(x[(2 < x) or (x < 10)]) ``` Multiple conditions can also be applied to a subsection of an array. <br>For example to select elements $>2$ and $<4$ in the first row of `x` only (`x[0]`): ```python x = np.array([[4, 2, 3, 1], [2, 4, 2, 8], [2, 3, 3, 27], [4, 1, 4, 64]]) print(x[0][(2 < x[0]) & (x[0] < 4)]) ``` ## Iterating over multi-dimensional arrays. We can iterate over a 1D array in the same way as the data structures we have previously studied. ```python A = np.array([1, 2, 3, 4, 5]) ``` ```python for a in A: print(a) ``` To loop through individual elements of a multi-dimensional array, we use a nested loop for each dimension of the array. ```python B = np.array([[1, 2, 3], [4, 5, 6]]) for row in B: print("-----") for col in row: print(col) ``` ## Manipulating arrays We can use many of the same operations to manipulate arrays as we use for lists. However, it is important to note a few subtle differences in how array manipulations behave. ```python # Length of an array a = np.array([1, 3, 4, 17, 3, 21, 2, 12]) b = np.array([[1, 3, 4, 17], [3, 21, 2, 12]]) print(len(a)) print(len(b)) ``` Note the length is the length of the first dimension (e.g. indexing). ```python # Sort an array a = np.array([1, 3, 4, 17, 3, 21, 2, 12]) b = np.array([[1, 3, 4, 17], [3, 12, 2, 12]]) # The function sorted applies to 1D data structures only print(sorted(a)) print(sorted(b[1])) # The method sort() applies to arrays of any size a.sort() b.sort() print(a) print(b) ``` Arrays are *immutable* (unchangeable). Technically you cannot add or delete items of an array. However, you can make a *new* array (which may have the same name as the original array), with the values ammended as required: #### Appending Arrays Appending connects array-like (integer, list....) value to the *end* of the original array. By default, 2D arrays are appended as if joining lists. The new array is a 1D array ```python # 2D array a = np.array([[0], [1], [2]]) print(a) print() # 2D array b = np.array([[3], [4]]) print(b) print() # 1D array c = np.array([3, 4]) print(b) print() # integer d = 1 print(f"original 2D array shapes: a = {a.shape}, b = {b.shape}") print() X = np.append(a, b) print(X) print(f"new array shape: {a.shape}") print() X = np.append(b, d) print(X) print(f"new array shape: {a.shape}") print() X = np.append(c, d) print(X) print(f"new array shape: {a.shape}") print() ``` The axis on which to append an array can be optionally specified. e.g. 2D array: - 0: columns - 1: rows The arrays must have the same shape, except in the dimension corresponding to the specified axis ```python # 2D array a = np.array([[0], [1], [2]]) print(a) print() # 2D array b = np.array([[3], [4]]) print(b) print() new2d = np.append(a, b, axis=0) print(new2d) print(f"new array shape: {new2d.shape}") ``` For example, in the cell above, if you change `axis=0` to `axis=1`, <br>you are trying to connect the side of `a` with length=3 to the side of `b` with length=2. There are dedicated functions to simplify joining or merging arrays. <br>If you are interested to expeirment further with joiing arrays you can try out the following functions: - `np.concatenate()` : Joins a sequence of arrays along an existing axis. - `np.vstack()` or `np.r_[]`: Stacks arrays row-wise - `np.hstack()` : Stacks arrays horizontally - `np.column_stack()` or `np.c_[]` : Stacks arrays column-wise Refer to last week's seminar for how to inpterpret the function documentation. It can also be useful to remove individual (single or multiple) elements. For example, the following expand the locations within the array that you can change beyond the location at the *end* of the array. #### Adding elements to an array ```python # Add items to an array # The insert() function arguments are # 1) The array to insert to # 2) The index of the inserted element # 3) The value of the inserted element a = ([1, 2, 3]) a = np.insert(a, 1, 4) print(a) ``` Notice that, again, the output is a 1D aray by default ```python # Add items to an array b = np.array([[1, 1], [2, 2], [3, 3]]) print(f"original array shape: {b.shape}") b = np.insert(b, 1, [4, 4]) print(b) print(f"new array shape: {b.shape}") ``` To preserve the multi-dimensional structure of an array, we can specify the axis on which to insert an element or range of elements. <br> In the example below, a column is inserted at element 1 of axis 1. ```python # Add items to an array b = np.array([[1, 1], [2, 2], [3, 3]]) b = np.insert(b, 1, [3, 2, 1], axis=1) print(b) ``` Notice what happens when we insert a *single* value on a specified axis ```python b = np.insert(b, 1, 4, axis=1) print(b) ``` This behaviour is due to a very useful property called *broadcasting*. <br>We will study the rules governing broadcasting later in this seminar. #### Deleting items from an array ```python # Items are deleted from their position in a 1D array by default z = np.array([1, 3, 4, 5, 6, 7, 8, 9]) z = np.delete(z, 3) print(z) z = np.delete(z, [0, 1, 2]) print(z) ``` ```python # Again, axes to delete can be optionally specified: z = np.array([[1, 3, 4, 5], [6, 7, 8, 9]]) print(z) print() z = np.delete(z, 3, axis=1) print(z) print() z = np.delete(z, [0, 1, 2], axis=1) print(z) print() ``` <a name="Changing"></a> #### Changing items in an array ```python c = np.array([1, 2, 3]) c[1] = 4 print(c) ``` <a id='Plotting'></a> ## Introduction to Plotting It is often useful to represent data visually. Matplotlib is a library for creating beautiful graphics. First we will import the plotting part of `matplotlib`, renaming it `plt`. Second we will use a command which tells Python to show any graphs inside the Notebook(not in a separate window). ```python import matplotlib.pyplot as plt %matplotlib inline ``` <a id='LinePlot'></a> ### A simple line plot It is very easy to generate simlpe plots from arrays or other data structures. A single parameter is automatically plotted against the index of each element. ```python x = np.array([1,7,2,3,3,4,5]) plt.plot(x) ``` To plot one parameter against another parameter: ```python x = [1, 2, 5, 7, 12] y = [3, 2, 0.7, 0.6, 0.1] plt.plot(x, y) ``` <a id='ScatterPlot'></a> ### A scatter plot To plot a scatter plot of points instead of a connected line add a `formatstring`: ```python x = [1, 2, 5, 7, 12] y = [3, 2, 0.7, 0.6, 0.1] plt.plot(x, y, 'o') ``` ## Mathematics with arrays. Unlike lists, NumPy arrays support common arithmetic operations, such as addition of two arrays. ```python # To add the elements of two lists we need the Numpy function: add a = [1, 2, 3] b = [4, 5, 6] c = a + b print(c) c = np.add(a, b) print(c) ``` To add the elements of two arrays we can just use regular arithmetic operators. The two arrays should have the same dimensions. ```python a = np.array([1, 2, 3]) b = np.ones((1,3)) c = a + b print(c) ``` Algebraic operations are appled *elementwise* to an array. This means the function is applied individually to each element in the list. ```python a = np.array([1.0, 0.2, 1.2]) b = np.array([2.0, 0.1, 2.1]) print(a - b) print(np.subtract(a, b)) ``` ```python a = np.array([[1.0, 0.2, 1], [1.0, 0.2, 1]]) b = np.array([[2.0, 0.1, 2], [1.0, 0.2, 1]]) # Elementwise multiplication of a and b print(a * b) print() print(np.multiply(a, b)) print(),print() # Elementwise division of a and b print(a / b) print() print(np.divide(a, b)) ``` Apply a mathematical function to a range of values. Example: Evaluate $y = x^4 - 16$ between $x=-10$ and $x=10$ ```python x = np.linspace(-10, 10, 100) y = x**4 - 16 plt.plot(x, y) ``` ```python x = np.linspace(-2*np.pi, 2*np.pi, 100) y = np.cos(x) plt.plot(x, y) ``` ## Mathematics with Vectors (1D arrays) 1D arrays are a useful ways to represent vectors. A vector is a quantity with a direction and a magnitude. Let's look at a previous example for computing the dot product of two vectors. The dot product of two $n$-length-vectors: <br> $ \mathbf{A} = [A_1, A_2, ... A_n]$ <br> $ \mathbf{B} = [B_1, B_2, ... B_n]$ \begin{align} \mathbf{A} \cdot \mathbf{B} = \sum_{i=1}^n A_i B_i. \end{align} We learnt to solve this very easily using a Python `for` loop. With each iteration of the loop we increase the value of `dot_product` (initial value = 0.0) by the product of `a` and `b`. ```python A = [1.0, 3.0, -5.0] B = [4.0, -2.0, -1.0] # Create a variable called dot_product with value, 0. dot_product = 0.0 for a, b in zip(A, B): dot_product += a * b print(dot_product) ``` Numpy makes solving the dot product even easier. We can use the Numpy function `dot()`. <br>`dot()` can also takes lists as inputs. We can also use `@` <br>`@` does not work on lists or in Python 2 Example: Computing the dot product of two vectors. ```python import numpy as np A = [9, 2, 7] B = [4, 8, 10] print(np.dot(A,B)) A = np.array([9, 2, 7]) B = np.array([4, 8, 10]) print(np.dot(A,B)) print(A @ B) ``` __Try it yourself__ In the cell below write a user-defined function that takes two lists and returns the dot product. *Hint: use the code from Seminar 4: Data Structures (shown above).* Use the magic function `%timeit` to compare the speed of the for loop with the Numpy `dot()` function for solving the dot product. ```python # Write a function for the dot product of two vectors expressed as lists # Compare the speed of your function to the Numpy function A = [1.0, 3.0, -5.0] B = [4.0, -2.0, -1.0] # Create a variable called dot_product with value, 0. dot_product = 0.0 for a, b in zip(A, B): dot_product += a * b %timeit dot_product import numpy as np %timeit np.dot(A,B) ``` ## Mathematics with Matrices (2D arrays) If you have previously studied matrices, the operations in this section will be familiar. If you have not yet studied matrices, you may want to refer back to this section once matrices have been covered in your mathematics classes. Matrix operations will not be included in the exam. 2D arrays are a convenient way to represents matrices. For example, the 2x3 matrix $$ A = \begin{bmatrix} 3 & 5 & 7\\ 2 & 4 & 6 \end{bmatrix} $$ can be represented as a 2D array. ```python A = np.array([[3, 5, 7], [2, 4, 6]]) print(A) ``` We can use shape to return the matrix dimensions. ```python print(A.shape) print(f"Number of rows is {A.shape[0]}, number of columns is {A.shape[1]}") print(f"A is an {A.shape[0]} by {A.shape[1]} matrix") ``` #### Matrix multiplication rules. If the number of __columns in A__ <br>is the same as number of __rows in B__, <br>we can find the matrix product of $\mathbf{A}$ and $\mathbf{B}$. <br> $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$ For example: <br>$\mathbf{A}$ has 3 rows and __3 columns__ <br>$\mathbf{B}$ has __3 rows__ and 1 column <br>($\mathbf{B}$ is a vector represented as a matrix) \begin{equation*} \underbrace{ \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} 10 \\ 20 \\ 30 \\ \end{bmatrix} }_{\mathbf{B} \text{ 3 rows} \text{ 1 column}} \end{equation*} So we can multiply them... In matrix $\mathbf{C}$, the element in __row $i$__, __column $j$__ is equal to the dot product of the $i$th __row__ of $\mathbf{A}$, $j$th __column__ of $\mathbf{B}$.m \begin{equation*} \underbrace{ \begin{bmatrix} \color{red}1 & \color{red}2 & \color{red}3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} \color{red}{10} \\ \color{red}{20} \\ \color{red}{30} \\ \end{bmatrix} }_{\mathbf{B} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} \color{red}{1 \cdot 10 \quad + \quad 2 \cdot 20 \quad + \quad 3 \cdot 30} \\ 4 \cdot 10 \quad + \quad 5 \cdot 20 \quad + \quad 6 \cdot 30 \\ 7 \cdot 10 \quad + \quad 8 \cdot 20 \quad + \quad 9 \cdot 30 \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} \color{red}{140} \\ 320 \\ 500 \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column1}} \end{equation*} \begin{equation*} \underbrace{ \begin{bmatrix} 1 & 2 & 3 \\ \color{red}4 & \color{red}5 & \color{red}6 \\ 7 & 8 & 9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} \color{red}{10} \\ \color{red}{20} \\ \color{red}{30} \\ \end{bmatrix} }_{\mathbf{B} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} 1 \cdot 10 \quad + \quad 2 \cdot 20 \quad + \quad 3 \cdot 30 \\ \color{red}{4 \cdot 10 \quad + \quad 5 \cdot 20 \quad + \quad 6 \cdot 30} \\ 7 \cdot 10 \quad + \quad 8 \cdot 20 \quad + \quad 9 \cdot 30 \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} 140 \\ \color{red}{320} \\ 500 \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column1}} \end{equation*} \begin{equation*} \underbrace{ \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \color{red}7 & \color{red}8 & \color{red}9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} \color{red}{10} \\ \color{red}{20} \\ \color{red}{30} \\ \end{bmatrix} }_{\mathbf{B} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} 1 \cdot 10 \quad + \quad 2 \cdot 20 \quad + \quad 3 \cdot 30 \\ 4 \cdot 10 \quad + \quad 5 \cdot 20 \quad + \quad 6 \cdot 30 \\ \color{red}{7 \cdot 10 \quad + \quad 8 \cdot 20 \quad + \quad 9 \cdot 30} \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} 140 \\ 320 \\ \color{red}{500} \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column1}} \end{equation*} \begin{equation*} \underbrace{ \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} 10 \\ 20 \\ 30 \\ \end{bmatrix} }_{\mathbf{B} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} 1 \cdot 10 \quad + \quad 2 \cdot 20 \quad + \quad 3 \cdot 30 \\ 4 \cdot 10 \quad + \quad 5 \cdot 20 \quad + \quad 6 \cdot 30 \\ 7 \cdot 10 \quad + \quad 8 \cdot 20 \quad + \quad 9 \cdot 30 \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column}} =\underbrace{ \begin{bmatrix} 140 \\ 320 \\ 500 \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column1}} \end{equation*} Matrix $\mathbf{C}$ therefore has: - the same number of __rows__ as $\mathbf{A}$, - the same number of __columns__ as $\mathbf{B}$. ```python # In the equation above, vector B must be represented as a column vector A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # In Python, 1D arrays are ALWAYS represented horizontally # This does not define the array as a row vector B = np.array([10, 20, 30]) # For example, C is represented horizontally C = np.dot(A,B) print(C) ``` As an example, if $\mathbf{B}$ were a row vector: \begin{equation*} \underbrace{ \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} 10 & 20 & 30 \\ \end{bmatrix} }_{\mathbf{B} \text{ 1 row} \text{ 3 columns}} \end{equation*} We *cannot* find the dot product $\mathbf{B}\cdot\mathbf{A}$. <br>The number of columns in $\mathbf{A}$ __is not__ the same as number of rows in $\mathbf{B}$. We can swap the order of $\mathbf{A}$ and $\mathbf{B}$. <br>The multiplication is now possible. <br>However, the outcome is different. \begin{equation*} \underbrace{ \begin{bmatrix} 10 & 20 & 30 \\ \end{bmatrix} }_{\mathbf{B} \text{ 1 row} \text{ 3 columns}} \cdot \underbrace{ \begin{bmatrix} \color{red}1 & \color{blue}2 & \color{green}3 \\ \color{red}4 & \color{blue}5 & \color{green}6 \\ \color{red}7 & \color{blue}8 & \color{green}9 \\ \end{bmatrix} }_{\mathbf{A} \text{ 3 rows} \text{ 3 columns}} =\underbrace{ \begin{bmatrix} \color{red}{10 \cdot 1 + 20 \cdot 4 + 30 \cdot 7} & \color{blue}{4 \cdot 10 + 5 \cdot 20 + 6 \cdot 30} & \color{green}{7 \cdot 10 + 8 \cdot 20 + 9 \cdot 30} \\ \end{bmatrix} }_{\mathbf{C} \text{ 1 row} \text{ 3 columns}} =\underbrace{ \begin{bmatrix} \color{red}{140} & \color{blue}{320} & \color{green}{500} \\ \end{bmatrix} }_{\mathbf{C} \text{ 3 rows} \text{ 1 column1}} \end{equation*} In Python, normal matrix multiplication rules apply to 2D arrays. <br>This holds even if the length of one of the dimensions of the 2D array is equal to 1. ```python A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # 2D array X = np.array([[10, 20, 30]]) # 2D array Y = np.array([[10], [20], [30]]) print(np.dot(X, A)) #, print(np.dot(A, X)) print(np.dot(A, Y)) #, print(np.dot(Y, A)) ``` However, the orientation with which 1D arrays are shown (always horizontal) does not impact their allowbale placement in an expression. Python will automatially treat the 1D as a column where appropriate. ```python A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # 1D array Z = np.array([10, 20, 30]) print(np.dot(Z, A)) print(np.dot(A, Z)) ``` ### Useful Matrix Operations #### Inverse of a square matrix ```python A = np.array([[1,2], [3, 4]]) Ainv = np.linalg.inv(A) print(f"A = \n {A}") print(f"Inverse of A = \n {Ainv}") ``` #### Determinant of a square matrix ```python A = np.array([[1,2], [3, 4]]) Adet = np.linalg.det(A) print(f"A = \n {A}") print(f"Determinant of A = {round(Adet, 2)}") ``` #### Transpose of a matrix - The columns of the transpose matrix are the rows of the original matrix. - The rows of the transopse matrix are the columns of the original matrix. ```python a = np.zeros((2,4)) print(a) print() print(a.T) print() #or print(np.transpose(a)) ``` #### Generate Identity Matrix ```python I = np.eye(2) print(I) print() I = np.eye(4) print(I) ``` <a id='SimultaneousEqtns'></a> ### Useful Matrix Applications: Solving Simultaneous Equations A mathematical problem that arises in a wide range of engineering applications; the simultaneous solution to a set of n linear equations with n unknown values. Matrices are a widely used and popular technique for solving this type of problem. Solving problems of this __computationally__ is particularly useful where: - we need to solve a large number of equations simultaneously e.g. - the force distribution in a truss structure - the electrical current flowing in and out of each node in a circuit. - we need to solve the equations over multiple steps<br>e.g. observing the progression of a system over time. ### Systems of Equations Example: An Electrical Circuit In electrical systems, network theory is the study of how to solve circuit problems. <br>The voltage and current in a closed circuit obey two simple rules; - Kirchoff's current law (first law) - Kirchhoff's voltage law (second law) #### Kichoff's Current Law The algebraic sum of ALL the currents entering and leaving a junction must be equal to zero. $\sum i_{in} - \sum i_{out} = 0 $ #### Kirchhoff's Voltage Law For a closed loop series path the algebraic sum of all the *voltages* and *voltage drops* around any closed loop in a circuit is equal to zero. $\sum E - \sum V = 0 $ #### Electrical Elements Obey Ohm's Law The current through a conductor (I, units amps) is the voltage measured across the conductor (V, units volts) divided by the resistance (R, units Ohms). $$V = IR$$ A Wheatstone bridge circuit is a very widely used circuit in scientific equipment. It is found in strain gauges, heart rate monitors, wind tunnels....etc. Fundamentally, it is used to measure resistance. A wheatstone bridge circuit has mutiple loops and nodes. It has one voltage source, $E$. It has five resisters $R_1, R_2, R_3, R_4, R_5, R_6$. <br> *Let's say we want to find the current in each branch $i_{1-6}$...* The circuit has: - 4 loops (A-D) - a nodes (a-d) We can use Kirchhoff's laws and Ohm's law with $E$ and $R_{1-6}$. $\sum i_{in} - \sum i_{out} = 0 $ $\sum E - \sum V = 0 $ $V = IR$ 6 unknown values ($i_{1-6}$), 6 simultaneous equations. __Loop A:__ &nbsp; $R_1 i_1 + R_2 i_2 + R_5 i_5 = E$<br> __Loop B:__ &nbsp; $R_4 i_4 - R_5 i_5 + R_6 i_6 = 0$<br> __Loop C:__ &nbsp; $R_2 i_2 - R_3 i_3 + R_4 i_4 = 0$ __Node a:__ &nbsp; $i_2 - i_4 - i_5 = 0$<br> __Node b:__ &nbsp; $i_1 - i_2 - i_3 = 0$<br> __Node c:__ &nbsp; $i_5 + i_6 - i_1 = 0$<br> We can express these equations as matrices: \begin{equation*} \underbrace{ \begin{bmatrix} R_1 & R_2 & 0 & 0 & R_5 & 0 \\ 0 & 0 & 0 & R_4 & -R_5 & R_6 \\ 0 & R_2 & -R_3 & R_4 & 0 & 0 \\ 0 & 1 & 0 & -1 & -1 & 0 \\ 1 & -1 & -1 & 0 & 0 & 0 \\ -1 & 0 & 0 & 0 & 1 & 1 \\ \end{bmatrix} }_{\mathbf{R}} \cdot \underbrace{ \begin{bmatrix} i_1 \\ i_2 \\ i_3 \\ i_4 \\ i_5 \\ i_6 \\ \end{bmatrix} }_{\mathbf{I}} =\underbrace{ \begin{bmatrix} E \\ 0 \\ 0 \\ 0 \\ 0 \\ 0 \\ \end{bmatrix} }_{\mathbf{V}} \end{equation*} ```python # Let's assign some values to our known variables: E = 5 R1, R2, R3, R4, R5, R6 = 300, 500, 1000, 500, 1000, 500 R = np.array([[R1, R2, 0, 0, R5, 0 ], [0, 0, 0, R4, -R5, R6], [0, R2, -R3, R4, 0, 0 ], [0, 1, 0, -1, -1, 0 ], [1, -1, -1, 0, 0, 0 ], [-1, 0, 0, 0, 1, 1]]) V = np.array([E, 0, 0, 0, 0, 0]) ``` We can find $\mathbf{I}$ by matrix multiplication: \begin{align*} \mathbf{R}\cdot\mathbf{I}&=\mathbf{V}\\ \end{align*} but we have to rearrange the equation. \begin{align*} \mathbf{I} &=\frac{\mathbf{V}}{\mathbf{R}}\\ &=\mathbf{R^{-1}}\cdot\mathbf{V}\\ \end{align*} ```python R_inv = np.linalg.inv(R) I = np.dot(R_inv, V) print(I) ``` Numpy conveniently solves linear matrix equations of the form `ax = b` so we don't need to rearrange. ```python I = np.linalg.solve(R, V) print(I) ``` You can check your answer by confirming that the dot product, $\mathbf{R}\cdot\mathbf{I}$ is equal to $\mathbf{V}$. ```python print(R @ I) print(np.around(R @ I, 2)) ``` This process is a fast and simple way to solve simultaneous linear equations, for example when compared to eilimination methods. <a id='DiffEqtns'></a> ### Systems of Equations Example: Rates of Change A well known problem concerns an imaginary country with three cities, A, B and C. <br>At the end of each year, a fraction, $n$ of the people must leave each city. <br>Half of the people leaving a city move to one of the two options, and half to the other. This gives us a system of simultaneous equations \begin{align*} \Delta A = \frac{Bn}{2} + \frac{Cn}{2} - An \\ \Delta B = \frac{An}{2} + \frac{Cn}{2} - Bn\\ \Delta C = \frac{An}{2} + \frac{Bn}{2} -Cn \\ \end{align*} Matrices are a convenient way to represent this problem. \begin{equation*} \begin{bmatrix} \Delta A \\ \Delta B \\ \Delta C \\ \end{bmatrix} = \underbrace{ \begin{bmatrix} -n & \frac{n}{2} & \frac{n}{2} \\ \frac{n}{2} & -n & \frac{n}{2} \\ \frac{n}{2} & \frac{n}{2} & -n \\ \end{bmatrix} }_{\mathbf{migration}} \cdot \underbrace{ \begin{bmatrix} A \\ B \\ C \\ \end{bmatrix} }_{\mathbf{population}} \end{equation*} Assigning values to $\mathbf{M}$ and $\mathbf{P}$. \begin{equation*} \begin{bmatrix} \Delta a \\ \Delta b \\ \Delta c \\ \end{bmatrix} = \underbrace{ \begin{bmatrix} -0.2 & 0.1 & 0.1 \\ 0.1 & -0.2 & 0.1 \\ 0.1 & 0.1 & -0.2 \\ \end{bmatrix} }_{\mathbf{M}} \cdot \underbrace{ \begin{bmatrix} A \\ B \\ C \\ \end{bmatrix} }_{\mathbf{P}} \end{equation*} By solving the problem computationally, we can quickly solve multiple iterations. In the example below there are two functions. The first, `pop_change` computes the rate of change of the population. The second, `pop_plot`: - calls `pop_change` inside of it - generates a line plot showing the population change over time ```python # Initial population P0 = np.array([190.0, 500.0, 30.0]) # Matrix migration or M migration = np.full((3, 3), 0.01) np.fill_diagonal(migration, -0.02) def pop_change(P, M=migration): "Computes the change in population of three cities" return np.dot(P, M) ``` ```python def pop_plot(iterations, population): "Plots the population over the number of iterations specified" # make 2D array to append values to a column for each city record_pop = population.reshape((1,3)) for i in range(iterations): # Increment population size population = population + pop_change(population) # Save the population data record_pop = (np.append(record_pop, population.reshape((1,3)), # make 2D array axis = 0)) plt.plot(record_pop[:,0] , label="A") plt.plot(record_pop[:,1] , label="B") plt.plot(record_pop[:,2] , label="C") plt.xlabel("Years") plt.ylabel("Population") plt.legend(); #plt.plot(record_pop) #print(record_pop) ``` ```python # Plot the change in the populations of the cities over time pop_plot(iterations=120, population=P0) print(f"total population = {np.sum(P0)}") ``` In the example above the array `migration` or `M` just describes the change in a quantity; the population of each city. When a change in quantity is measured against a change in __time__ it describes the rate of change or flow of that quantity. Rates are expressed as differential equations: the change in one parameter in relation to a change in another parameter. e.g. - velocity = change in position / change in time - acceleration = change in velocity / change in time The Scipy library has easy to use tools for estimating the solutions to systems of (or single) differential equations like the example we just looked at. The function `odeint` solves first order differential equations. ```python from scipy.integrate import odeint ``` The function `odeint` takes a user-defined function as an argument. This input function should simply define the derivative (e.g. rate of change) you want to solve. For example, a function to calculate the derivative: $\frac{dx}{dt} = t - x$ with initial value $x(0) = 1$ we can use: ```python def dx_dt(x,t): return t - x ``` Two additional arguments are needed: - the value(s) of t at which to evaluate x, starting with the initial value - the initial value - (if `dx_dt` takes any other argumemts they are entered as a tuple as the third argument) ```python ts = np.linspace(0,5,100) # the value(s) of t at which to evaluate x x0 = 1.0 # the initial value # odeint returns x at each value of t xs = odeint(dx_dt, x0, ts) ``` The function `odeint` returns an nx1 array (2D column). <br>To plot the output we have to "flatten" it to a 1D array. ```python # The function #print(xs) xs = np.array(xs).flatten() #print(xs) # plot the function plt.xlabel("x") plt.ylabel("t") plt.plot(ts, xs); ``` We can use `odeint` to solve mulitple ordinary differential equations simultaneously (systems of ODEs). In this case the value of each variable at the initial position should be input as a single data structure. The function should output the rate of change of each variable as a single list. For example we can use `odeint` to solve the population problem from earlier. The function dP_dt is exactly the same as the function `change_pop` that we used earlier. The only difference is that `dP_dt` returns the individual rates of chnage of each population as a list. ```python migration = np.full((3, 3), 0.01) np.fill_diagonal(migration, -0.02) # Initial population P0 = np.array([190.0, 500.0, 30.0]) # Time steps to evaluate ts = np.arange(0, 150) def dP_dt(P, t, M): dP_dt = np.dot(P, M) return [dP_dt[0], dP_dt[1], dP_dt[2]] Ps = odeint(dP_dt, P0, ts, args=(migration,)) plt.plot(ts, Ps[:,0], label="A") plt.plot(ts, Ps[:,1], label="B") plt.plot(ts, Ps[:,2], label="C") plt.xlabel("Time") plt.ylabel("Population") plt.legend(); ``` For comparison, here is the original solution, generated using the user-defined `pop_plot`function. ```python pop_init = np.array([190.0, 500.0, 30.0]) pop_plot(iterations=150, population=pop_init) ``` ## Curve Fitting When using functions to produce mathematical models of practical problems we often need to 'fit' the function to experimental data. Python has several tools to find the curve which best describes the data set. An example of this might be the data read from a sensor during an experiment. In the experiment shown below, the bend in the arm is used to control the LED light. The sensor used to detect the bend in the arm is made from conductive paint. The resistance of the paint changes with the bend in the arm. This change in resistance is detected as a voltage. A microcontroller adjusts the light in repsonse to the voltage. The circuit on which the sensor is based is very similar to the Wheatsone bridge circuit. When calibrating a sensor like this, we need to find a relationship between the bend in the arm and the change in resisitance. After that the voltage alone can be used to 'read' the bend in the arm. This section will cover some useful Numpy features for curve fitting. ### Root mean square error (RMSE) One way to quantify the fit between data and a model is to compute the RMSE. __Error/residual:__ the difference between the observed value $y_i$ at $x$ and the modeled value $a(x_i)$ $$ \varepsilon_i = a(x_i) - y_i $$ For $N$ data points, the *sum* of the squared errors is $$S = \sum_{i=1}^{N}\varepsilon_i^2$$ The RMSE $E$ is computed as $$E=\sqrt{\frac{1}{N}S}=\sqrt{\frac{1}{N}\sum{\varepsilon_i^2}}$$ The RMSE can be computed for any model and any data set as an indicator of the "goodness of fit". We will use it to assess the fit of the curves that we generate using Numpy. The optimisation approach that involves minimising the sum of the squared errors (minimising the RMSE) is often referred to as a *least squares* approach. ```python # Example data set x = [0.000000000000000000e+00, 1.052631578947368363e+00, 2.105263157894736725e+00, 3.157894736842105310e+00,4.210526315789473450e+00, 5.263157894736841591e+00, 6.315789473684210620e+00,7.368421052631578760e+00,8.421052631578946901e+00,9.473684210526315042e+00,1.052631578947368318e+01,1.157894736842105132e+01,1.263157894736842124e+01,1.368421052631578938e+01,1.473684210526315752e+01, 1.578947368421052566e+01,1.684210526315789380e+01,1.789473684210526372e+01,1.894736842105263008e+01,2.000000000000000000e+01] y = [7.445192947240600745e+01, 4.834835792411828947e+01, 6.873305436340778840e+01, 5.979576407972768948e+01,6.404530772390434379e+01,6.090548420541189500e+01, 7.157546008677115879e+01, 8.620253336570679892e+01, 1.138154622045899913e+02, 8.493639813028174501e+01, 9.783457330550828601e+01, 1.082064229481453594e+02, 1.063876210674365979e+02, 1.001971993955305038e+02, 1.061496321788094832e+02, 1.279575585921491836e+02, 1.556956405962417875e+02, 1.584164804859289859e+02, 1.753888794716459358e+02, 1.980941276403034124e+02] x = np.array(x) y = np.array(y) ``` <a id='FittingLinear'></a> ### Fitting a Linear Function A straight line can be fitted through a data set using the `linregress` function from `scipy.stats`. ```python from scipy.stats import linregress ``` Example: `linregress(x, y)` returns the components of an equation of the form: $$ y = mx + c $$ It returns five values. <br>The first two are the gradient $m$ and y-intercept $c$ of the line. <br>The last three are statistical parameters that are outside of the scope of this class. ```python m, c, r_value, p_value, std_err = linregress(x, y) yfit = float(m) * x + c # plot a scatter plot by setting 'o' as the marker plt.plot(x, y, 'o', label='experiment data') # plot the fitted linear function plt.plot(x, yfit, label='fit'); ``` <a id='RMSE'></a> The "goodness of fit" than then be assessed by calculating the RMSE. Error, $\varepsilon_i = a(x_i) - y_i$ RMSE, $E=\sqrt{\frac{1}{N}\sum{\varepsilon_i^2}}$ ```python # error e = (yfit - y) # RMSE rmse = np.sqrt(np.sum(e**2)/ len(y)) ``` <a id='Title'></a> #### Adding a title We can then a title. ```python plt.plot(x, y, 'o', label='experiment data') plt.plot(x, yfit, label='fit') # print the RMSE as the title plt.title('RMSE: '+str(rmse)) ``` <a id='FittingPolynomial'></a> ### Fitting a polynomial function A polynomial curve can be fitted through a data set using the `polyfit` function from `numpy`. The function arguments are: - the two variables to analyse - the order of the polynomial The function returns: <br>the coefficients of each term of the polynomial. <br> e.g. if we request a polynomial of order 2, we will have terms for, - x^2, x and x^0 and we should expect 3 return arguments. ```python c, d, e = np.polyfit(x, y, 2) f, g, h, i = np.polyfit(x, y, 3) j, k, l, m, n = np.polyfit(x, y, 4) ``` The function `poly1D` can then be used to find the solution to y for all x, using the polynomial generated. ```python yfit2 = np.poly1d([c, d, e])(x) yfit3 = np.poly1d([f, g, h, i])(x) yfit4 = np.poly1d([j, k, l, m, n])(x) # Which produces the same output as yfit2 = c*x**2 + d*x + e yfit3 = f*x**3 + g*x**2 + h*x + i yfit4 = j*x**4 + k*x**3 + l*x**2 + m*x + n ``` The RMSE can be found as before. <br>__NOTE__ It is neater to write a function to do this. ```python # error rmse2 = round(np.sqrt(np.sum((yfit2 - y)**2)/ len(y)), 2) rmse3 = round(np.sqrt(np.sum((yfit3 - y)**2)/ len(y)), 2) rmse4 = round(np.sqrt(np.sum((yfit4 - y)**2)/ len(y)), 2) ``` <a id='Legend'></a> #### Adding a legend We can add a legend to label each line on the graph. <br>Add a `label` when creating each plot. <br>Finally add the `legend` to the plot. <br>The argument `loc='best'` chooses the 'best' location for the legend to avoid obscuring your plotted data. ```python # plot a scatter plot by setting 'o' as the marker plt.plot(x, y, 'o', label='experiment data') # plot the fitted 2nd order function plt.plot(x, yfit2, label= f'2nd order RMSE{rmse2}') # plot the fitted 3rd order function plt.plot(x, yfit3, label= f'3rd order RMSE{rmse3}') # plot the fitted 4th order function plt.plot(x, yfit4, label= f'4th order RMSE{rmse4}') # add a legend plt.legend(loc='best') ``` <a id='FittingArbitrary'></a> ### Fitting an arbitrary function There are many Python functions available for curve fitting. The function `curve_fit` can be used to fit a user-defined fuction. We first define a function that we want to fit. __Example 1__ ```Python def linear(x, a, b): return a * x + b ``` >`curve_fit(func, x, y)` will find values of `a` and `b` that fits the curve $y = ax + b$ to our data. __Example 2__ ```Python def exponential(x, a, b): y = a * np.exp(b*x) return y ``` >`curve_fit(func, x, y)` will find values of `a` and `b` that fits the curve $y = a e^{bx}$ to our data. `curve_fit` then returns two arguments: - an array with the optimised parameters - the covariance of the parameters (a statistical measure of accuracy) ```python from scipy.optimize import curve_fit def exponential(x, a, b): y = a * np.exp(b*x) return y opt, cov = curve_fit(exponential, x, y) # value of x for all x investigated yfit = exponential(x, *opt) plt.plot(x, y, 'o', label='experiment data') plt.plot(x, yfit, 'r', label='fit') plt.legend(loc='best') rmse = np.sqrt(np.sum((yfit - y) ** 2) / len(y)) plt.title('RMSE: '+str(rmse)); print(f"y = {round(opt[0],2)} * e**({round(opt[1],2)}*x)") ``` <a name="Vectorising"></a> ## Vectorising Functions Numpy functions applied to a single array, will be performed on each element in the array. The function takes an array of values as an input argument. ```python print(np.sqrt(a)) print(a ** (1/2)) ``` For example, we can apply trigonometric functions, elementwise, to arrays, lists and tuples. ```python x = np.array([0.0, np.pi/2, np.pi, 3*np.pi/2]) y = [0.0, np.pi/2, np.pi, 3*np.pi/2] z = (0.0, np.pi/2, np.pi, 3*np.pi/2) print(np.sin(x)) print(np.cos(y)) print(np.tan(z)) ``` An array of values does not work as an input for all functions. ```python def func(x): if x < 0: f = 2 * x else: f = 3 * x return f x = np.array([2, -2]) # y = func(x) # Run this line after removing the # to see the error generated ``` This doesn't work because Python doesn't know what to do with the line `if x < 0` when `x` contains many values. For some values of `x` the `if` statement may be `True`, for others it may be `False`. A simple way around this problem is to vectorise the function. We create a new function that is a *vectorized* form of the original function. The new function and can be called with an array as an argument. ```python funcvec = np.vectorize(func) print(funcvec(x)) ``` ### Functions as function arguments Functions that receive vectorised arguments are automatically applied to all elements of array inputs. This is better exlpained with an example. Recall the function `is_positive` from Seminar 5: Functions ```python def is_positive(f, a): "Checks if the function value f(x) is positive" return f(a) > 0 # Apply is_positive to a non-vectorised function print(is_positive(func, -3)) print() # Apply is_positive to a vectorised function print(x) is_positive(funcvec, x) ``` <a name="Broadcasting"></a> ## Broadcasting Another source of incompatibility that you are likely to encounter is in trying to use arrays with different shapes for arithmetic operations. For example, you have one array that larger and another array that is smaller. <br>You may want to use the smaller array multiple times to perform an operation (such as a sum, multiplication, etc.) on the larger array. This is achieved using the broadcasting mechanism. The arrays can be broadcast together if all dimensions of the arrays are *compatible* ##### Dimensions are compatible when they are equal. Consider the example below. `x` and `y` are the same shape, so we can addd them. ```python x = np.ones((3,4)) print(x.shape) y = np.full((3,4), 4) print(y.shape) # Add `x` and `y` x + y ``` ##### Dimensions are compatible when the length of at least one of them is equal to 1. ```python # 1 x 3 array a = np.arange(1,4) # integer b = 2 # 1 x 3 array result = a * b print(a) print() print(b) print() print(result) ``` In the dimension where `b` has size 1 and `a` has a size greater than 1 (i.e. 3), `b` behaves as if it were copied along that dimension. ```python # 4 x 1 array x = np.array([[0], [10], [20], [30]]) # 1 x 3 array y = np.ones(3) # 4 x 3 array a = x * y print(x) print() print(y) print() print(a) ``` ```python # a: 4 x 3 array (see cell above) # 1 x 3 array b = np.arange(3) # 4 x 3 array result = a + b print(a) print() print(b) print() print(result) ``` The size of the output array is the maximum size along each dimension of the input arrays. The 4x3 and 1x4 arrays shown in the cell below cannot be broadcast together. <br>The dimensions 3 and 4 are incompatible. Note that if the array dimensions are incompatible, it will generate a ValueError. Recall, the function `np.insert` that we used earlier. An integer (length=1) can be broadcast into an array of any size. ```python # Add items to an array b = np.array([[1, 1], [2, 2], [3, 3]]) b = np.insert(b, 1, 4, axis=1) print(b) ``` Here are some examples of practical applications of broadcasting. ### Broadcasting Example: Calorie Calculator Let's say we have a large data set; each datum is a list of parameters. Example datum: a type of food and the the amount of fat, protein and carbohydrate in a serving of that food. Our data set of food nutrients might look something like the table below: |Food (100g) |Fat(g)|Protein (g)|Carbohydrate (g)| |------------|----- |-----------|----------------| |Almonds | 49| 21| 22| |Peas | 0| 5| 14| |Avocado | 15| 2| 9| |Kale | 1| 3| 10| By applying the following sclaing factors, we can calculate the number of calories in a food type due to fat, protein and carbohydrate: - fat: 9 cal/g - protein: 4 cal/g - carbohydrate 4 cal/g Using what we have studied so far, we could convert the table to calories using a loop: ```python nutrients = np.array([[49, 21, 22], [0, 5, 14], [15, 2, 9], [ 1, 3, 10]]) cal_convert = np.array([9, 4, 4]) calories = np.empty((4,3)) for index, value in enumerate(nutrients): calories[index] = value * cal_convert ``` However, it is faster and more concise to broadcast the two arrays together: ```python nutrients = np.array([[49, 21, 22], [0, 5, 14], [15, 2, 9], [ 1, 3, 10]]) cal_convert = np.array([9, 4, 4]) calories = nutrients * cal_convert print(calories) ``` ### Broadcasting Example: Vector Quantisation Algorithm This is a simple algorithm used for catagorisation. <br>It determines which catagory a data point should belong to from its closest proximity to a set of values representing possible catagories. <br>Each value represents the mean of the corresponding catagory. <br>For example, colour quantisation is used in image processing reduces the number of distinct colors used in an image, while maintianing visual similarity to the original image. <table><tr><td> </td><td> </td><td> </table> CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=1477223 <br>In the plot below, each of the circles represents the mean height and weight of athletes grouped by type. <br>The square represents the height and weight of an athlete to be classified. To find the closet point: 1. Use broadcasting to find the difference between the position of the __square__ and the position of each __circle__ in the x and y directions. <br> 1. Find the distance, $d$ from the square, $s$ to each circle, $c$ using: <br>$d = \sqrt{(x_{c}-x_{s})^2 + (y_{c}-y_{s})^2}$ <br> 1. Choose the group corresponding to the minimum distance, $d_{min}$ ```python athlete = np.array([111.0,188.0]) categories = np.array([[102.0, 203.0], [132.0, 193.0], [45.0, 155.0], [57.0, 173.0]]) # 1. broadcast diff = categories - athlete # 2. distance to each point (magnitude of values along axis 1 for each datum) # dist = np.linalg.norm(diff,axis=1) dist = np.sqrt(np.sum(diff**2,axis=1)) # 3. which group? nearest = np.argmin(dist) print(nearest) ``` The nearest group is index 0 of the array `catagories`. <br>Based on mean height and weight, the athlete is most likely to be a basketball player. ## Resizing and Reshaping We can change the size of an array in each dimension. For example, you may want to edit the length of a dimension of an array to make it compatible with another array for broadcasting. ### Resizing We can resize an array. <br>If the new array size is smaller, the original array will be trimmed to the new size. ```python a=np.array([[0,1], [2,3]]) b = np.resize(a,(2,1)) print(b) print() a.resize(2,1) print(a) ``` If the new array size is larger, the extra space can either be filled with repeating copies of the original array. ```python a=np.array([[0,1], [2,3]]) b = np.resize(a,(4,4)) print(b) ``` or with zeros. ```python a=np.array([[0,1], [2,3]]) a.resize(4,4) print(a) ``` ### Reshaping You can re-shape the array. The new array must have the __same number of elements__ as the original array. __Example:__ Using range to create a column vector: ```python x = np.arange(0, 31, 10) y = x.reshape((4,1)) # which can be written in one line as: z = np.arange(0, 31, 10).reshape((4,1)) print(x) print() print(y) print() print(z) ``` ## Review Exercises The folowing exercises are provided to practise what you have learnt in today's seminar. The extension excercises cover topics that will not be included inthe exam but may be useful to you if you are familiar with using matrices and want to practise matrix manipulation using Python. If you have not yet studied matrices, you can come back to this section when the mathematics used is more familiar to you. ### Review Exercise: 1D Arrays - Indexing and changing values. In the cell below: 1. Create an array of zeros with length 25. <br> <a href="#Zeros">Jump to Creating a Numpy Array of Zeros.</a> 2. Change the first 10 values to 5. <br> <a href="#Changing">Jump to Changing items in an array.</a> 3. Change the next 10 values to a sequence starting at 12 and increasig with steps of 2 to 30 - do this with one command. 4. Change the final 5 values to 30. ```python # 1D array x=np.zeros(25) x[: 10]=5 x[10:20]=12:2:30 x[20: ]=30 print(x) ``` ```python # Example Solution # 1 a = np.zeros(25) #2 a[: 10] = 5 #3 a[10: 20] = range(12, 31, 2) #4 a[20:] = 30 print(a) ``` ### Review Exercise: 1D Arrays - Correct the error The code below, is supposed to: - change the last 5 values of the array x to the values [50, 52, 54, 56, 58] - print the result There are some errors in the code. Remove the comment markers and run the code to see the error message. Then fix the code and run it again. ```python x = np.ones(10) x[5:] = np.arange(50, 59, 2) print(x) print("There are some errors in the code") ``` ```python # Example Solution import numpy as np x = np.ones(10) x[-5:] = range(50, 59, 2) print(x) ``` ### Review Exercise: Creating 2D Arrays Print: __(A)__ An array with 3 rows and 2 columns (3 by 2), where all elements are 1.0. <a href="#CreatingArray">Jump to Creating a Numpy Array.</a> __(B)__ An array of randomly generated integers, in the range 1 to 10, with 4 rows and one column (4 by 1). <a href="#Subpackages">Jump to Subpackages.</a> __(C)__ An array with 2 rows and 2 columns (2 by 2), where all elements are 4. ```python # 2D arrays a = np.full((3,2), 1.0) print(a) b = np.random.randint(1, 10, size=(4,1)) print(b) c = np.full((2,2), 4) print(c) ``` ```python ### Example Solution # A print(np.ones((3,2))) # B print(np.random.randint(1, 10, size=(4,1))) # C print(np.full((2,2), 4)) ``` ### Review Exercise: 2D Arrays - Indexing and Basic Plotting __(A)__ In the cell below, for the array `x`, write code to print: * the first row of `x` * the first column of `x` * the third row of `x` * the four values in the upper right hand corner of `x` * the four values at the center of `x` * the last two columns (third and fourth column) of `x` <a href="#Indexing">Jump to Indexing into multi-dimensional arrays.</a> __(B)__ You have just *printed* the last two columns of `x`. <br>Now plot the third column of `x` against the fourth column of `x` as a line graph. <a href="#Plotting">Jump to Introduction to Plotting.</a> ```python # 2D array x = np.array([[4, 2, 1, 1], [2, 4, 2, 8], [2, 4, 3, 27], [4, 1, 4, 64]]) print(x[0]) print(x[0:,0]) print(x[2]) print(x[0:2, 2:4]) print(x[1:3, 1:3]) print(x[0:4, 2:4]) plt.plot(x[0:4, 2:4]) ``` ```python # Example Solution x = np.array([[4, 2, 1, 1], [2, 4, 2, 8], [2, 4, 3, 27], [4, 1, 4, 64]]) print(x[0]) print(x[:, 0]) print(x[2]) print(x[0:2, 2:4]) print(x[1:3, 1:3]) print(x[:, 2:4]) plt.plot(x[:, 2], x[:, 3]) ``` ### Review Exercise: Boolean array indexing Print the values of array `x` in the cell below that are... __(A)__ ...greater than 3. __(B)__ ...greater than 2 and less than 8. __(C)__ ...less than 2 or greater than 8. __(D)__ ...a multiple of `y`. <br> __(E)__ Change all odd numbers in x to 0 <a href="#BooleanIndexing">Jump to Boolean Array Indexing.</a> ```python x = np.arange(10) y = 2 idx= x >3 for n in x: if n < 2 or n > 8: print(n) for d in x: if d > 2 and d < 8: print(d) for a in x: if a % 2 == 0: print(a) else: print(0) ``` ### Review Exercise: Curve Fitting Using the example data set `a`,`b`: __(A)__ Plot a scatter graph of `a` against `b`, with `a` on the horizontal axis and `b` on the vertical axis. <br><a href="#ScatterPlot">Jump to Scatter plot.</a> __(B)__ Fit __three different__ curves to the data. You can choose from: - a linear relationship. <a href="#FittingLinear">Jump to Fitting a linear relationship.</a> - a polynomial curve. <a href="#FittingPolynomial">Jump to Fitting a polynomial curve.</a> - an arbitrary function <a href="#FittingArbitrary">Jump to Fitting an arbitrary curve.</a> __(C)__ Plot each curve as a line of on the graph. <br><a href="#LinePlot">Jump to Line plot.</a> __(D)__ Find the route mean square error (RMSE) of each curve relative to the data set. <br><a href="#RMSE">Jump to Route mean square error.</a> __(E)__ Display the RMSE of each curve as a figure legend. <br><a href="#Legend">Jump to Adding a legend.</a> __(F)__ Print the equation of the *optimal* curve i.e. the curve with the *smallest* RMSE. ```python a = np.array([88438,45505,75127,115571,89911,87432,100083,85589,73104,86890,78580,70785,41050,57610,107537,59262,73038,87891,75368,111638,74911,71599,96774,79667,90725,93816,75859,64969,205688,71500,53098,71250,89615,94747,50400,63673,78257,72785,83015,150000,84699,67191,86298,117705,88935,89643,106678,97894,132164,59387,60684,96151,68794,74559,29430,88362,111792,57205,83651,87518,80129,86801,110761,63274,66143,110694,52590,59994,80460,103589,68298,59056,40294,161848,103100,86354,37428,43307,80792,77368,109159,71538,84783,86250,82900,74728,48597,75549,106942,102167,62708,60630,70273,84918,88693,74141,46627,119112,88260,97262,86095,110472,82734,84761,91715,103292,86339,147993,77560,100625,68094,78250,75426,86138,112344,115000,98846,90499,80029,61959,76779,68833,81026,66361,92737,76692,64974,103869,51951,108854,61038,75938,75346,40639,73156,80067,82322,52353, 62832,207262,160106,77740,72011,167094,58458,41639,79528,66583,83993,138082,77366]) b = np.array([1.7,-0.4,0.5,2.6,1.4,1.5,1.5,1.7,-0.5,1.6,0.9,1.1,-1.7,0.3,1.8,0.5,1,1.9,0.1,2,1.7,1,1.2,1.5,1,1.1,1.2,0,2.6,1.4,-0.8,1.6,1.1,1.2,-1.4,-0.5,1.9,0,1.5,2.4,1.5,0.7,1.8,2,2.4,1.6,2,2.3,2,0.1,0.3,2.3,0,0,-1.7,1.9,2,0,0.9,1.3,0.4,1.6,2.3,-0.1,1.7,2.1,-0.9,0.1,1,1.9,0.4,-0.3,-2.4,2.7,1.3,2,-1.3,-1.5,0.7,1.1,2.3,1.1,0.7,0.9,1.1,0.1,-0.9,1.4,2.1,1.2,0.1,0.8,0.3,1.4,1.5,1,-0.5,2.4,0.9,1.5,1.6,1.2,1.3,1.8,0.8,1.8,1.9,2.6,1.5,1.8,1.8,0.6,0.7,1.2,1.5,2.5,1.1,1.6,1.6,1,0,0,1,0.5,1.7,0.6,0.1,1.7,0.2,2.1,0.1,0.9,0.8,-1.3,1.3,0.5,1.5,-0.6,1.2,2.4,2.6,1.1,0.8,2.5,-0.2,-2,0.1,0.1,1.6,2.6,1.2]) plt.plot(a, b, 'o') c, d, e = np.polyfit(a, b, 2) f, g, h, i = np.polyfit(a, b, 3) j, k, l, m, z = np.polyfit(a, b, 4) yfit2 = np.poly1d([c, d, e])(a) yfit3 = np.poly1d([f, g, h, i])(a) yfit4 = np.poly1d([j, k, l, m, z])(a) plt.plot(a, yfit2, 'o') plt.plot(a, yfit3, 'o') plt.plot(a, yfit4, 'o') def RMSE(ideal, real): return round( np.sqrt (np.sum ((ideal - real)**2)/ len(real)) , 4) rmse2 = RMSE(yfit2,b) rmse3 = RMSE(yfit3,b) rmse4 = RMSE(yfit4,b) plt.legend(loc='best') print(np.min([rmse2, rmse3, rmse4])) ``` ```python ### Example solution a = np.array([88438,45505,75127,115571,89911,87432,100083,85589,73104,86890,78580,70785,41050,57610,107537,59262,73038,87891,75368,111638,74911,71599,96774,79667,90725,93816,75859,64969,205688,71500,53098,71250,89615,94747,50400,63673,78257,72785,83015,150000,84699,67191,86298,117705,88935,89643,106678,97894,132164,59387,60684,96151,68794,74559,29430,88362,111792,57205,83651,87518,80129,86801,110761,63274,66143,110694,52590,59994,80460,103589,68298,59056,40294,161848,103100,86354,37428,43307,80792,77368,109159,71538,84783,86250,82900,74728,48597,75549,106942,102167,62708,60630,70273,84918,88693,74141,46627,119112,88260,97262,86095,110472,82734,84761,91715,103292,86339,147993,77560,100625,68094,78250,75426,86138,112344,115000,98846,90499,80029,61959,76779,68833,81026,66361,92737,76692,64974,103869,51951,108854,61038,75938,75346,40639,73156,80067,82322,52353, 62832,207262,160106,77740,72011,167094,58458,41639,79528,66583,83993,138082,77366]) b = np.array([1.7,-0.4,0.5,2.6,1.4,1.5,1.5,1.7,-0.5,1.6,0.9,1.1,-1.7,0.3,1.8,0.5,1,1.9,0.1,2,1.7,1,1.2,1.5,1,1.1,1.2,0,2.6,1.4,-0.8,1.6,1.1,1.2,-1.4,-0.5,1.9,0,1.5,2.4,1.5,0.7,1.8,2,2.4,1.6,2,2.3,2,0.1,0.3,2.3,0,0,-1.7,1.9,2,0,0.9,1.3,0.4,1.6,2.3,-0.1,1.7,2.1,-0.9,0.1,1,1.9,0.4,-0.3,-2.4,2.7,1.3,2,-1.3,-1.5,0.7,1.1,2.3,1.1,0.7,0.9,1.1,0.1,-0.9,1.4,2.1,1.2,0.1,0.8,0.3,1.4,1.5,1,-0.5,2.4,0.9,1.5,1.6,1.2,1.3,1.8,0.8,1.8,1.9,2.6,1.5,1.8,1.8,0.6,0.7,1.2,1.5,2.5,1.1,1.6,1.6,1,0,0,1,0.5,1.7,0.6,0.1,1.7,0.2,2.1,0.1,0.9,0.8,-1.3,1.3,0.5,1.5,-0.6,1.2,2.4,2.6,1.1,0.8,2.5,-0.2,-2,0.1,0.1,1.6,2.6,1.2]) # A plt.plot(a, b,'o') # B c, d, e = np.polyfit(a, b, 2) f, g, h, i = np.polyfit(a, b, 3) j, k, l, m, n = np.polyfit(a, b, 4) # C yfit2 = np.poly1d([c, d, e])(a) yfit3 = np.poly1d([f, g, h, i])(a) yfit4 = np.poly1d([j, k, l, m, n])(a) # yfit2 = c*a**2 + d*a + e # yfit3 = f*a**3 + g*a**2 + h*a + i # yfit4 = j*a**4 + k*a**3 + l*a**2 + m*a + n plt.plot(a, yfit2,'o') plt.plot(a, yfit3,'o') plt.plot(a, yfit4,'o') # D # Define a function def RMSE(ideal, real): return round( np.sqrt (np.sum ((ideal - real)**2)/ len(real)) , 4) #ask where ideal and real come from # error rmse2 = RMSE(yfit2, b) rmse3 = RMSE(yfit3, b) rmse4 = RMSE(yfit4, b) # E # You don't need to replot the data as shown here, you can just add the labels to your answer to part C plt.plot(a, yfit2,'o', label = f"RMSE={rmse2}") plt.plot(a, yfit3,'o', label = f"RMSE={rmse3}") plt.plot(a, yfit4,'o', label = f"RMSE={rmse4}") plt.legend(loc='best') # F print(np.min([rmse2, rmse3, rmse4])) ``` ### Review Exercise: Vectorising a function Recall the function `is_positive` that we looked at in Seminar 5: Functions. A similar function is shown in the cell below. It takes an argument, `x` and returns: - `True` if `x` is positive. - `False` if `x` is negative. Write a vectorised version of the function that will accept a Numpy array as an argument. <br><a href="#Vectorising">Jump to Vectorising a function.</a> Demonstrate your function works by calling it. ```python X = np.array(0,10) def is_positive(x): "Checks if the function value f(x) is positive" for x in X: return x > 0 posreturn = np.vectorize(is_positive) print(posreturn(6)) ``` ```python # Example Solution def is_positive(x): "Checks if the function value f(x) is positive" return x > 0 is_positive_vec = np.vectorize(is_positive) z = [1, 2, 3] is_positive_vec(z) ``` ### Review Exercise: Broadcasting Use a single broadcasting operation to: - add 2 to every element in the first column of `a` - add 3 to every element in the second column of `a` - subtract 1 from every element in the third column of `a` ```python a = np.array([ [49, 21, 22], [0, 5, 14], [15, 2, 9], [ 1, 3, 10]]) print(a) b=np.array([[2, 3, -1], [2, 3, -1], [2, 3, -1], [2, 3, -1]]) print(a+b) ``` ```python # Example Solution print(a + np.array([2, 3, -1])) ``` ### Extension Exercise: Broadcasting and Mathematics with matrices - Simultaneous Equations, Polynomial Coeffcients The equation of a parabola can be fully defined using three points that lie on the curve (provided the curve is not a straight line). The equation for a parabola is $y=ax^2+bx+c$, where $a$, $b$, and $c$ are constants. __(A)__Given three points $(x_1,y_1)$, $(x_2,y_2)$, $(x_3,y_3)$, <br>three simultaneous equation may be written: $$ \begin{split} x_1^2a+x_1b+c&=y_1 \\ x_2^2a+x_2b+c&=y_2 \\ x_3^2a+x_3b+c&=y_3 \\ \end{split} $$ Given that: <br>$(x_1,y_1)=(-2,2)$ <br>$(x_2,y_2)=(1,-1)$ <br>$(x_3,y_3)=(4,4)$ <br>use matrix multiplication to find $a$,$b$ and $c$. <a href="#SimultaneousEqtns">Jump to Simultaneous Equations.</a> <br><a href="#Broadcasting">Jump to Broadcasting.</a> Hint: <br>Build a 1D array, `y`, containing values $y_1$, $y_2$ and $y_3$. <br>Build a 2D array, `x`, with which to multiply `y` to find a 1D array containing $a$, $b$ and $c$. __(B)__ Show that your solution is correct by: - plotting a graph of $y=ax^2+bx+c$ against $x$ for range of x. - plotting the three points: <br>$(x_1,y_1)=(-2,2)$ <br>$(x_2,y_2)=(1,-1)$ <br>$(x_3,y_3)=(4,4)$ <br>as points to check that they lie on the curve. ```python # Solve the simultaneous equations to find a, b and c ``` ```python # Example solution # 1D array, y y = np.array([2, -1, 4]) # 2D array, x x = np.array([[(-2**2), -2, 1], [(1**2), 1, 1], [(4**2), 4, 1]]) abc = np.linalg.solve(x, y) print(abc) # Another way to create matrix x without writing it out explicitly is to use: # - broadcasting # - the numpy function, power (raises first argument to power of second) xs = np.array([-2, 1, 4]) xs = xs.reshape((3,1)) power = np.array([2, 1, 0]) x = np.power(xs, power) ``` ### Extension Exercise: Solving systems of first order differential equations - Predator & prey The predator-prey equations represent a simplified model of two species interacting; for example: - rabbits $x$(prey) - population $y$: foxes (predators) *The more foxes there are*, the more rabbits get eaten by foxes and the population of rabbits gets smaller... <br>...so there is less food for the foxes so the population of foxes gets smaller... <br>...so there is less danger to the rabbits, so the population fo rabbits gets bigger... <br>...so there is more food for the foxes, so the population of foxes gets bigger... <br>...*The more foxes there are*.... We can expect to see the two populations rise and fall cyclically with time. The two related populations can be described by a pair of first-order non-linear ordinary differential equations. \begin{align*} \frac{dx}{dt}&=x(a-by)\\ \frac{dy}{dt}&=-y(c-dx)\\ \end{align*} Where: <br>$a$ represents rabbit births <br>$b$ represents rabbit deaths due to foxes <br>$c$ represents fox deaths in the absence of rabbits <br>$d$ represents foxes kept alive by feeding on rabbits <br>($a,b,c,d$ are assumed to be positive) __(A)__ Use `odeint` to estimate the solution to the couple differntial equations: - between time = 0 and time = 20 units (suggested time step = 0.1 unit) - where $a=b=c=d=1$ - for initial conditions:<br>$x(0)=150$ <br>$y(0)=100$ <a href="#DiffEqtns">Jump to Systems of Equations for Rates of Change.</a> __(B)__ Plot the two populations using a graph. <br>Include a legend to label the two populations. __(C)__ Try changing the values of $a,b,c,d$ and the initial number of rabbits and foxes and observe the output in your graph. ```python # Predator-prey equations ``` ```python # Example solution # A # Between time = 0 and time = 100 ts = np.linspace(0, 100, 100) a,b,c,d = 1,1,1,1 # Define function that returns differential def dP_dt(P, t): return [P[0]*(a - b*P[1]), -P[1]*(c - d*P[0])] # Initial conditions P0 = [150, 100] # Solve using odeint Ps = odeint(dP_dt, P0, ts) # B # plot the two populations using a graph foxes = Ps[:,0] rabbits = Ps[:,1] plt.plot(ts, rabbits, label="rabbits") plt.plot(ts, foxes,label="foxes") t = ts[0:50:4] r = rabbits[0:50:4] f = foxes[0:50:4] plt.xlabel("Time") plt.ylabel("Population") # Include a legend plt.legend(); print(t) print(r) print(f) ``` # Summary - Numpy arrays are a type of data structure optimised for numerical computations. - Unlike data structure such as lists and tuples, an array can store data in multiple dimensions. - Numpy array are a convenient way to represent vectors and matrices. - Numpy funcions generally behave in an "elementwise" way. - We can "vectorise" other functions so that they accept data structure as arguments. - Broasdcasting is a useful tool for applying information in one numpy array to another without having to repeat or reshape the arrays to match one another. - The matplotlib library can be used to quicky produce simple plots to visualise and check your solutions to mathematical problems. - This includes fitting a curve or a relationship to a dataset. ###### Homework 1. __PULL__ the changes you made in-class today to your personal computer. 1. __COMPLETE__ any unfinished Review Exercises. 1. __PUSH__ the changes you make at home to your online repository. 1. Install __ffmpeg__... #### Installing FFmpeg FFmpeg is software for handling multimedia data. <br>You must have it installed to view the animatied figures we will produce in next week's seminar. ###### Installing FFmpeg on mac Open a terminal. Copy and paste the following command into the terminal to install homebrew (a linux-like package manager): >`ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"` Copy and paste the following command into the terminal to install FFmpeg: >`brew install ffmpeg` To check ffmpeg has installed copy and paste the following command into the terminal: >`ffmpeg -version` If FFmpeg has installed a few lines of code will appear, starting with the version number which will be something like: >`ffmpeg version 3.4 Copyright (c) 2000-2017 ` ###### Installing FFmpeg on linux Open a terminal. Copy and paste the following commands into the terminal (one-by-one, pressing enter after one) to install ffmpeg: >`sudo add-apt-repository ppa:kirillshkrogalev/ffmpeg-next sudo apt-get update sudo apt-get install ffmpeg` To check ffmpeg has installed copy and paste the following command into the terminal: >`ffmpeg -version` If FFmpeg has installed a few lines of code will appear, starting with the version number which will be something like: >`ffmpeg version 3.4 Copyright (c) 2000-2017 ` ###### Installing FFmpeg on windows Open a terminal. To find out what version of windows you have, copy and paste the following command into the terminal to see if your computer has a 32 or 64 bit CPU: >`wmic os get osarchitecture` Go to ffmpeg.zeranoe.com/builds/ - Click the appropraite __Architecture__ for your computer. - Click Static __Linking__ - Leave __Version__ as the default. - Click __Download Build__ Go to your computer's Downloads folder. - Right click on the newly downloaded .zip folder (it's name will start with `ffmpeg`). - Choose __Extract All__ from the drop-down menu. - If given the option to choose a location to extract the files to, choose your Program Files folder.<br>Otherwise, a non-.zip folder of the same name as the one you downloaded will appear in the Downloads folder. <br>Copy the folder to your Program Files folder. - Change rename of the folder you just copied into Program Files with the name: <br>`ffmpeg` Go back to the terminal and copy and paste the following command into the terminal to add ffmpeg to the windows path: >`PATH=C:\Program Files\ffmpeg\bin;%PATH%` To check ffmpeg has installed copy and paste the following command into the terminal: >`ffmpeg -version` If FFmpeg has installed a few lines of code will appear, starting with the version number which will be something like: >`ffmpeg version 3.4 Copyright (c) 2000-2017 ` Please do this before next week's class. ```python ```
bdca087b6605a4724effc1ee9931938c7d651f81
147,919
ipynb
Jupyter Notebook
7_Numerical_computation_with_Numpy.ipynb
michaelnicht/python-class
a614132229f4a70a1ca4eb2c979586177db329f1
[ "MIT" ]
null
null
null
7_Numerical_computation_with_Numpy.ipynb
michaelnicht/python-class
a614132229f4a70a1ca4eb2c979586177db329f1
[ "MIT" ]
null
null
null
7_Numerical_computation_with_Numpy.ipynb
michaelnicht/python-class
a614132229f4a70a1ca4eb2c979586177db329f1
[ "MIT" ]
null
null
null
24.897997
878
0.503309
true
25,459
Qwen/Qwen-72B
1. YES 2. YES
0.672332
0.874077
0.58767
__label__eng_Latn
0.970129
0.203684
# Hauptkomponentenanalyse # (Principal Component Analysis, PCA) # vs. # Denoising Variational Autoencoders ### _an Hand von Beispielen_ jupyter nbconvert PCAvsDVAEde.ipynb --to slides --post serve # Eine intuitive Perspektive ... #### "... realistische, hochdimensionale Daten konzentrieren sich in der Nähe einer nichtlinearen, niedrigdimensionalen Mannigfaltigkeit ..." [Lei et al., 2018] # Eine intuitive Perspektive ... #### Aber wie lernt man die Mannigfaltigkeit und die Wahrscheinlichkeitsverteilung darauf? # PCA vs. DVAE an Hand von Beispielen # PCA vs. DVAE an Hand von Beispielen Der __MNIST (Modified National Institute of Standards and Technology) Datensatz__ von handgeschriebenen Zahlen besteht aus __60,000 Trainings- und 10,000 Test-Beispielen__. Die Zahlen wurden hinsichtlich Ihrer Größe __normalisiert und in einem Bild fester Größe zentriert__. # Vorstellung der Wettbewerber # PCA # PCA * __Unüberwachtes__ Lernen * __Lineare Transformation__ * __"Transformiere"__ eine Menge von Beobachtungen in ein __anderes Koordinatensystem__, in dem die Werte der ersten Koordinate (Komponente) die __größtmögliche Varianz__ aufweisen [Friedman et al., 2017] * Die __resultierenden Koordinaten (Komponenten)__ sind __nicht__ mit den ursprünglichen Koordinaten __korreliert__ # PCA * Wird zur __Dimensions-Reduzierung__ genutzt (Komprimierung) * Die __Rekonstruktion der Beobachtungen__("decoding") aus den führenden __Hauptkomponenten__ hat den __niedrigsten quadratischen Fehler__ # Autoencoders # Autoencoders * unüberwachtes __neuronales Netz__ * __minimiert__ den Fehler zwischen Rekonstruktionen und Beobachtungen [Goodfellow et al., 2016] * lernt die __Identitätsfunktion__ * wird mit Hilfe von __Fehlerrückführung (Backpropagation) trainiert__ * aufgetrennt um __Kodierung und Dekodierung__ # Autoencoders # PCA vs. Autoencoders ## Implementierung ```python # import all necessary libs from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm import tensorflow as tf # we use Keras to implement, layer-by-layer the DVAE and PCA from keras.layers import Input, Dense, Lambda from keras.models import Model from keras import backend as K from keras import metrics from keras.datasets import mnist from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers import Conv2DTranspose,Reshape from sklearn.decomposition import PCA import os %matplotlib inline os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/' ``` ```python # we use Keras to implement, layer-by-layer the DVAE and PCA from keras.layers import Input, Dense, Lambda from keras.models import Model from keras import backend as K from keras import metrics from keras.datasets import mnist from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers import Conv2DTranspose,Reshape from sklearn.decomposition import PCA ``` ## Experiment Parametrierung ```python # training params for PCA vs. DVAE num_train = 50000 n_images = 6 batch_size = 205 original_dim = 784 latent_dim = 8 epochs = 1000 epsilon_std = 1.0 noise_factor = 0.5 ``` ```python # get the MNIST digits (x_train, y_train), (x_test, y_test) = mnist.load_data() # prepare data for DVAE x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = x_train.reshape((len(x_train), 28,28,1)) x_test = x_test.reshape((len(x_test), 28,28,1)) noise_train = x_train + noise_factor * np.random.randn(*x_train.shape) noise_test = x_test + noise_factor * np.random.randn(*x_test.shape) # clip the images to be between 0 and 1 noise_train = np.clip(noise_train, 0., 1.) noise_test = np.clip(noise_test, 0., 1.) # prepare data for PCA # training x_test_pca = x_test shape_x_test = x_test_pca.shape pcaInputTest = np.reshape(x_test,[shape_x_test[0],shape_x_test[1]*shape_x_test[2]]).astype('float32')/255 ``` ```python # display the images (28x28 px) digit_size = 28 figure = np.zeros((digit_size * 2, digit_size * n_images)) showidx=np.random.randint(0,num_train,n_images) # display input, noisy input for i,idx in enumerate (showidx): figure[0: 28,i *28: (i + 1) * 28] = np.reshape(x_train[idx], [28, 28]) figure[28: 56,i *28: (i + 1) * 28] = np.reshape(noise_train[idx], [28, 28]) plt.figure(figsize=(28*2, 28*n_images)) plt.imshow(figure, cmap='Greys_r') plt.show() ``` ## Grundlegende Mathematik der PCA ### Lineare Transformation Es sei $\{y_i\}^N_{i=1}$ eine Menge von $N$ Beobachtungs-Vektoren der Dimension $n$ mit $n\leq N$. Eine __lineare Transformation__ eines __endlich-dimensionalen__ Vektors kann als __Matrix Multiplikation__ ausgedrückt werden: $$ \begin{align} x_i = W y_i \end{align} $$ mit $y_i \in R^{n}, x_i \in R^{m}$ und $W \in R^{nxm}$. ### Lineare Transformation * Das $j-te$ Element in $x_i$ ist das __Innere Produkt__ von $y_i$ und der $j-ten$ Spalte der Matrix $W$, welche wir durch $w_j$ bezeichen. Es sei $Y \in R^{nxN}$ die Matrix, welche wir durch horizontale Aneinanderreihung der Vektoren $\{y_i\}^N_{i=1}$ erhalten, $$ Y = \begin{bmatrix} | ... | \\ y_1 ... y_N \\ | ... | \end{bmatrix} $$ * Aus der __linearen Transformation__ folgt: $$ X = W^TY, X_0 = W^TY_0, $$ wobei $Y_0$ die __Matrix der zentrierten Elemente__ (d.h. wir subtrahieren den Mittelwert von jeder Beobachtung) bezeichnet, und __Kovarianzmatrix__ $Y_0Y_0^T$. ### Dimensionsreduzierung, Komprimierung PCA wird zur __Dimensions-Reduktion__ verwendet, da sie durch die durch eine lineare Transformation die __Anzahl der Variablen reduziert__. Da nur die ersten __$m$ Hauptkomponenten erhalten__ werden, __verliert__ PCA __information__ (d.h. __verlustreiche Komprimierung__). Der __Verlust__ ( _Summe des quadratischen Rekonstruktions-Fehlers_ ) wird jedoch durch die __Maximierung der Komponenten-Varianzen minimiert__ $$ \min_{W \in R^{nxm}} \| Y_0 - WW^TY_0 \|_F^2, W^TW = I_{mxm}$$ wobei $F$ die Frobenius-Norm bezeichnet. ### Minimaler quadratischer Gesamtrekonstruktionsfehler Die Transformations-Matrix $P_m$ kann ebenfalls durch Lösung der folgenden Gleichung berechnet werden: $$ \min_{W \in R^{nxm}} \| Y_0 - WW^TY_0 \|_F^2, W^TW = I_{mxm}$$ wobei $F$ die Frobenius-Norm bezeichnet. Daraus folgt, dass $P_m$ __jeden zentrierten Vektor__ der Länge $n$ in einen Vektor der Länge $m$ mit ($ m < n$) derart __komprimiert__, dass die __Summe des quadratischen Rekonstruktions-Fehlers minimiert wird__. ### Skalierung Zur Berechung der PCA können viele verschiedene __iterative Algorithmen__ eingesetzt werden * QR Algorithmen * Jacobi Algorithmus * Power methode * Singulärwert-Zerlegung (Singular Value Decomposition, SVD) Für __sehr große Datenmengen__ eignen sich diese Algorithmen __nicht__! ```python # analytical PCA of the training set def analytical_pca(y): # variance to explain pca = PCA() # apply PCA pca.fit(y) # extract the components loadings = pca.components_ # apply the transformation components = pca.transform(y) # reconstruct from components for visualization filtered = pca.inverse_transform(components) return filtered ``` ## Grundlegende Mathematik der Autoencoder Für jeden Eingangsvektor $x$ der Dimension $d$ des kompletten Datensaztes der Länge $n$ generiert das neuronale Netz eine Rekonstruktion $x'$ durch: * __Kodierung der Eingangsdaten__ (d.h. verwende die lineare / nicht-lineare Transformation $g_\phi(.)$) * dies liefert eine __komprimierte Kodierung__ in der dünnsten Netzwerk-Ebene, $z$ * __Dekodierung der komprimierten Eingangsdaten__ durch Anwendung der linearen / nicht-linearen Transformation $f_\theta(.)$ Die __Parameter $(\theta, \phi)$ werden im Verlauf des Training derart optimiert__, dass ein den Eingangsdaten möglichst ähnliches Ergebnis , $x \approx f_\theta(g_\phi(x))$, produziert wird. In anderen Worten: __die Indentitäts-Funktion wird erlernt__ mit __Cross-Entropy (bei sigmoid Aktivierungsfuntionen)__, __mittlere quadratische Fehler (MSE)__ etc.: # Denoising Variational Autoencoders (DVAE) Das Funktionsprinzip __unterscheidet sich__ vom grundlegenden Autoencoder dahingehend, dass ein gewisses Maß an __Störrauschen__ (einer __gewissen Wahrscheinlichkeitsverteilung__ folgend) den __Eingangsdaten hinzugefügt wird__ und dass die __verborgenen Ebenen__ dieses Rauschen __ausgleichen muss__ um die Eingangsdaten zu __rekonstruieren__ [Im, Bengio et al., 2017, Kingma et al., 2017]. Für jeden gestörten Eingangsvektor $\tilde x$ eines originalen Vektors $x$ der Dimension $d$, generiert das neuronale Netz eine Rekonstruktion $x'$ durch: * __Kodierung der Eingangsdaten__, welche die Abbildung als Wahrscheinlichkeit der Schätzung von $z$ unter Verwendung der Eingangsdaten darstellt * dies liefert eine __komprimierte Kodierung in der dünnsten Netzwerk-Ebene__ $z$, welche der Verteilung $q_\phi(z|x)$ folgt * __Dekodierung der komprimierten Eingangsdaten__ an der Ausgangsebene unter Einhaltung des __Beobachtungs-Modells__ $p_\theta(x|z)$ ```python # Implement the DVAE # encoder part x_noise = Input(shape=(28,28,1)) conv_1 = Conv2D(64,(3, 3), padding='valid',activation='relu')(x_noise) conv_2 = Conv2D(64,(3, 3), padding='valid',activation='relu')(conv_1) pool_1 = MaxPooling2D((2, 2))(conv_2) conv_3 = Conv2D(32,(3, 3), padding='valid',activation='relu')(pool_1) pool_2 = MaxPooling2D((2, 2))(conv_3) h=Flatten()(pool_2) z_mean = Dense(latent_dim)(h) z_log_var = Dense(latent_dim)(h) ``` ```python # Implement the DVAE # reparameterization trick def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp(z_log_var / 2) * epsilon z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var]) ``` ```python # Implement the DVAE # decoder part # we instantiate these layers separately so as to reuse them later z=Reshape([1,1,latent_dim])(z) conv_0T = Conv2DTranspose(128,(1, 1), padding='valid',activation='relu')(z)#1*1 conv_1T = Conv2DTranspose(64,(3, 3), padding='valid',activation='relu')(conv_0T)#3*3 conv_2T = Conv2DTranspose(64,(3, 3), padding='valid',activation='relu')(conv_1T)#5*5 conv_3T = Conv2DTranspose(48,(3, 3), strides=(2, 2),padding='same',activation='relu')(conv_2T)#10*10 conv_4T = Conv2DTranspose(48,(3, 3), padding='valid',activation='relu')(conv_3T)#12*12 conv_5T = Conv2DTranspose(32,(3, 3), strides=(2, 2),padding='same',activation='relu')(conv_4T)#24*24 conv_6T = Conv2DTranspose(16,(3, 3), padding='valid',activation='relu')(conv_5T)#26*26 x_out = Conv2DTranspose(1,(3, 3), padding='valid',activation='sigmoid')(conv_6T)#28*28 ``` ## DVAE * DVAE __Verlustfunktion__ beinhaltet die Erstellung von Beispielen aus $z \backsim q_\phi(z|x)$. Dies ist ein __stochastischer Prozess__ und eignet sich daher __nicht zur Fehlerrückführung__. * Die __geschätzte Posteriori-Verteilung $q_\phi(z|x)$__ approximiert die tatsächliche Verteilung $p_\theta(z|x)$. * Wir können die __Kullback-Leibler Abweichung__, $D_{KL}$ benutzen um die __Differenz der beiden Verteilungen__ zu quantifizieren. ```python # Implement the DVAE # reparameterization trick def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp(z_log_var / 2) * epsilon z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var]) ``` ## DVAE Durch __Minimierung des Verlusts__, __maximieren__ wir daher die __untere Schranke der Wahrscheinlichkeit__ (__evidence lower bound (ELBO)__) zur Generierung echter Daten-Beispiele. ```python # Implement the DVAE # instantiate model dvae = Model(x_noise, x_out) dvae.summary() # Compute loss def DVAE_loss(x_origin,x_out): x_origin=K.flatten(x_origin) x_out=K.flatten(x_out) xent_loss = original_dim * metrics.binary_crossentropy(x_origin, x_out) kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) dvae_loss = K.mean(xent_loss + kl_loss) return dvae_loss # compile the model dvae.compile(optimizer='adam', loss=DVAE_loss) ``` ```python # Train the DVAE dvae.fit(noise_train,x_train, shuffle=True, epochs=epochs, batch_size=batch_size, validation_data=(noise_test, x_test)) # Comparison PCA vs. DVAE # testing the DVAE num_test=10000 showidx=np.random.randint(0,num_test,n_images) x_out=dvae.predict(x_test[showidx]) # prepare data for testing PCA pcaInputTest = np.reshape(x_test,[shape_x_test[0],shape_x_test[1]*shape_x_test[2]]).astype('float32')/255 pcaOutput = analytical_pca(pcaInputTest) ``` ```python # Display results digit_size = 28 figure = np.zeros((digit_size * 4, digit_size * n_images)) for i,idx in enumerate (showidx): figure[0: 28,i *28: (i + 1) * 28] = np.reshape(x_test[idx], [28, 28]) # input data figure[28: 28 * 2,i *28: (i + 1) * 28] = np.reshape(noise_test[idx], [28, 28]) # noisy input data figure[28 * 2: 28 * 3,i *28: (i + 1) * 28] = np.reshape(x_out[i], [28, 28]) # DVAE output figure[28 * 3: 28 * 4,i *28: (i + 1) * 28] = np.reshape(pcaOutput[idx], [28, 28]) # PCA output plt.figure(figsize=(28 * 4, 28*n_images)) plt.imshow(figure, cmap='Greys_r') # plt.savefig('inference_output.png') plt.show() ``` ```python %%HTML <style> td { font-size: 15px } </style> ``` # Vergleich von PCA und DVAE ### Inferenz # Vergleich von PCA und DVAE ### Kostenfunktion # Vergleich zwischen PCA und DVAE # Vergleich zwischen PCA und DVAE # PCA vs. Autoencoders ## "Zwei identische Fremde" # PCA vs. Autoencoders * Ein __Autoencoder__ mit einer einzelnen __voll verbundenen (fully-connected) versteckten Ebene__, einer __linearen Aktivierungsfunktion__ und dem __quadratischen Fehler als Kostenfunktion__ ist __eng mit der PCA verwandt__ - seine __Gewichten__ spannen den __Untervektorraum der Hauptkomponenten__ auf [Plaut, 2018] * Bei __Autoencodern__ sorgt die __diagonale Approximation beim Kodiervorgang__ zusammen mit der __inhärenten Stochastizität__ für lokale __Orthogonalität beim Dekodieren__ [Rolinek et al, 2019] # Lieraturverzeichnis [Goodfellow et al., 2016] Ian Goodfellow, Yoshua Bengio and Aaron Courville, Deep Learning, MIT Press, 2016. [Friedman et al., 2017] Jerome H. Friedman, Robert Tibshirani, and Trevor Hastie, The Elements of Statistical Learning: Data Mining, Inference, and Prediction, Springer, 2017. [Plaut, 2018] Plaut, E., 2018. From principal subspaces to principal components with linear autoencoders. arXiv preprint arXiv:1804.10253. [Im, Bengio et al., 2017] Im, D.I.J., Ahn, S., Memisevic, R. and Bengio, Y., 2017, February. Denoising criterion for variational auto-encoding framework. In Thirty-First AAAI Conference on Artificial Intelligence. [Rolinek et al, 2019] Rolinek, M., Zietlow, D. and Martius, G., 2019. Variational Autoencoders Pursue PCA Directions (by Accident). In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (pp. 12406-12415). [Lei et al., 2018] Lei, N., Luo, Z., Yau, S.T. and Gu, D.X., 2018. Geometric understanding of deep learning. arXiv preprint arXiv:1805.10451. [Kingma et al., 2013] Kingma, D.P. and Welling, M., 2013. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114. ### Maximale Varianzkomponenten, Kovarianz und Dekorrelation * Der erste Ladungs-Vektor ist derjenige Einheitsvektor mit dem das innere Produkt der Beobachtungs-Vektoren die __größte Varianz__ aufweisen: $$ \max w_1^T Y_0Y_0^Tw_1, w_1^Tw_1 = 1$$ * Die Lösung der vorherigen leichung ist der erste Eigenvektor der __Kovarianz-Matrix__ $Y_0Y_0^T$, welcher zum größten Eigenwert gehört. * Die Matrix $P$ kann durch __Diagonalisierung der Kovarianz-Matrix__ berechnet werden: $$ Y_0Y_0^T = P \Lambda P^{-1} = P \Lambda P^T $$ $\Lambda = Y_0Y_0^T $ ist eine Diagonal-Matrix, deren Diagonal-Elemente $\{\lambda_i\}^N_{i=1}$ der Größe nach absteigend sortiert sind. $ Y = PX $ liefert die inverse Tranformation. Da die Kovarianz-Matrix von $X$ diagonal ist, ist die PCA eine __dekorrelierende Transformation__. ### Singulärwert-Zerlegung ### (Singular Value Decomposition, SVD) Ein Vektor $v$ der Dimension $N$ ist ein __Eigenvektor__ einer quadratischen N × N Matrix $A$, wenn diese die folgende __lineare Gleichung__ erfüllt $$Av =\lambda v$$ wobei $λ$ ein skalarer Wert ist, welcher als der __zum Eigenvektor v gehörende Eigenwert__ bezeichnet wird. ### Singulärwert-Zerlegung ### (Singular Value Decomposition, SVD) Die Matrix $Y_0 \in R^{nxN}$ kann __faktorisert__ werden als $Y_0 = U \Sigma V^T$, wobei $U \in R^{nxn}$ und $V \in R^{NxN}$ __orthogonale Matrizen__ sind und $\Sigma \in R^{nxN}$ abgesehen von der Diagonalwerten (den sogenannten __Singulär-Werten__) nur aus Nullen besteht. Die Singulärwertzerlegung von $Y_0$ ist äquivalent zur __Eigenwertzerlegung__ von $Y_0T_0^T$. <style> td { font-size: 15px } </style> # Vergleich von PCA und DVAE ### Lernen der Mannigfaltigkeit |__PCA__|__DVAE__| |:-----|:---| | Kodierung/Dekodierung, keine Robustheit gegen Rauschen | nicht-linear, probabilistische Kodierung/Dekodierung mit Robustheit gegen Rauschen und nicht-linearen Aktivierungsfunktionen| | unkorrelierte Koordinaten | korrelierte Ausgansdaten an der dünnsten Netzwerkebene | | Koordinaten sind in absteigener Reihenfolge der Varianz geordnet | Koordinaten sind ungeordnet | | die Spalten der Transformations-Matrix sind orthonormal | die Spalten der Transformations-Matrix sind nicht notwendigerweise orthonormal | | Robustheit gegenüber moderatem Rauschen mit bekannten Verteilungen | Robustheit gegen eine Vielzahl verschiedener Arten und Größenordnungen an injeziertem Rauschen (masking noise, Gaussian noise, salt-and-pepper noise), da das Entrauschen entscheidung für die Generalisierung ist | | einfacher Algorithmus (ohne Regularisierung), geringe Robustheit | die Punkte in niedrig-dimensionalen Mannifaltigkeiten sind robust gegen Rauschen im hoch-dimensionalen Beobachtungs-Raum | <style> td { font-size: 15px } </style> # Vergleich zwischen PCA und DVAE ### Training |__PCA__|__DVAE__| |:-----|:---| | Abbildung der Eingangsdaten auf einen festen Vektor | Abbildung der Eingangsdaten auf eine Wahrscheinlichkeitsverteilung | | iterative Methoden: QR Zerlegung, Jacobi Algorithmus, Singulärwertzerlegung | Fehlerrückführung (Backpropagation) | | aufgrund der Kovarianz-Berechnung ineffizient bei großen Datenmengen | effizient bei großen Datenmengen aufgrund der starken Fähigkeit des Erlernens der Mannigfaltigkeit | | basiert auf der Korrelations-/Kovarianz-Matrix, welche - zumindest in der Theorie - sehr empfindlich gegenüber Ausreißern sein kann | kann Beispiele direkt aus dem Eingangsraum generieren und daher die Eigenschfaten des Eingangsrauschens beschreiben ("reparametrization trick") |
8d03cafef04b067113c85aa246442a653b9287f6
33,033
ipynb
Jupyter Notebook
PCAvsDVAEde.ipynb
caxenie/pca-vs-dvae
2d9f8529c5b482bb5356f2ae1aa7e7a4af826f87
[ "MIT" ]
null
null
null
PCAvsDVAEde.ipynb
caxenie/pca-vs-dvae
2d9f8529c5b482bb5356f2ae1aa7e7a4af826f87
[ "MIT" ]
null
null
null
PCAvsDVAEde.ipynb
caxenie/pca-vs-dvae
2d9f8529c5b482bb5356f2ae1aa7e7a4af826f87
[ "MIT" ]
null
null
null
29.921196
400
0.593134
true
5,969
Qwen/Qwen-72B
1. YES 2. YES
0.695958
0.774583
0.539078
__label__deu_Latn
0.812926
0.090788
# 実践データ科学入門 2020年度木曜4限 # 第3回 その1 重線形回帰 ```python %matplotlib inline #%matplotlib notebook # if necessary to rotate figures in 3D plot import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import art3d from ipywidgets import interact ``` ## 重線形回帰とは 重回帰は目的変数に対して説明変数が複数ある場合に用いる手法で, $$ y = a_0 + a_1 x_1 + a_2 x_2 + \ldots + a_M x_M = a_0 + \sum_{j=1}^M a_j x_j + \xi $$ というモデルを当てはめる方法である.ただし, - $y$ は目的変数(データとして与えられるもの) - $x_j \ (j=1,2,\ldots,M)$ は説明変数(データとして与えられるもの) - $a_j \ (j=0,1,2,\ldots,M)$ は回帰係数(データから求めるもの) - $\xi$ はノイズ(モデルでは当てはめられないランダムな要因) 以下,特に注意しない限り__データは実数値__とする. $M$ 個の説明変数と $M$ 個の回帰係数を $$ \begin{align} & \mathbf X = (x_1, x_2, \ldots, x_M) \in \mathbb R^M,\\ & \mathbf A = (a_1, a_2, \ldots, a_M) \in \mathbb R^M \end{align} $$ とそれぞれ $M$ 次元ベクトルで表すことで $$ y = a_0 + \mathbf X \cdot \mathbf A + \xi $$ と内積の形で表示することができる.さらに定数の部分もまとめて $$ \begin{align} & \tilde{\mathbf X} = (1, x_1, x_2, \ldots, x_M) \in \mathbb R^M,\\ & \tilde{\mathbf A} = (a_0, a_1, a_2, \ldots, a_M) \in \mathbb R^M \end{align} $$ とおくことで $$ y = \tilde{\mathbf X} \cdot \tilde{\mathbf A} + \xi $$ と表すこともできる. ノイズはしばしば平均 $0$ の正規分布 $\mathrm N(0, \sigma^2)$ に従う確率変数とみなして用いる. ### 単回帰のとき(復習) 回帰変数1つの単回帰では,データ空間 $(x, y)$ の2次元平面に回帰直線を引き, データから直線までの高さの二乗和が最小になるようにパラメータを選んだ. ```python # 真のパラメータ A0 = 1.2 A1 = 2.6 # dataset X = np.arange(0, 3, 0.3) Y = A0 + A1*X + np.random.randn(X.size) ``` ```python # 回帰直線のプロット def plot_XY_and_regressionline(a0=0.0, a1=1.0): fig = plt.figure() ax = plt.axes() ax.set_xlabel("X", size=20) ax.set_ylabel("Y", size=20) ax.set_xticks ax.set_ylim(-1, 9) ax.scatter(X, Y) ax.plot([np.min(X), np.max(X)], [a0+a1*np.min(X), a0+a1*np.max(X)], linewidth=3, color='tab:red') ax.set_title('MSE = %f'%(np.sum((Y-a0-a1*X)**2)/Y.size), size=20) ax.tick_params(labelsize=12) for x, y in zip(X, Y): ypred = a0 + a1*x sq = patches.Rectangle(xy=(x, ypred), width=np.abs(y-ypred), height=np.abs(y-ypred), ec='k', fc='lime', alpha=0.8) ax.add_patch(sq) ``` ```python interact(plot_XY_and_regressionline, a0=(-5.0, 5.0, 0.1), a1=(-5.0, 10.0, 0.1)) # 青点がデータ点 # 赤が回帰直線 # 緑の長方形が二乗誤差:長方形の総面積が誤差二乗和 # MSE = Mean Squared Error = 平均二乗誤差 # 必ずしも真のパラメータのときに平均二乗誤差最小になるわけではないことに注意しよう. ``` ### 重回帰では 回帰変数2つの重回帰では,データ空間 $(x_1, x_2, y)$ の3次元空間に回帰平面を引き,データから平面までの高さの二乗和が最小になるように選ぶ. ```python # 真のパラメータ B0 = 1.2 B1 = 1.8 B2 = 0.2 # dataset N = 1000 X1 = np.random.rand(N) * 3 X2 = np.random.rand(N) * 3 Y = B0 + B1*X1 + B2*X2 + np.random.randn(X1.size) ``` ```python # 回帰平面のプロット def plot_X1X2Y_and_regressionplane(b0=0.0, b1=0.0, b2=0.0, azimuth=30, elevation=30): fig = plt.figure() ax = Axes3D(fig) ax.view_init(azim=azimuth, elev=elevation) ax.set_xlabel("X1") ax.set_ylabel("X2") ax.set_zlabel("Y") ax.set_zlim(-1, 9) ax.scatter(X1, X2, Y, s=200) X2mesh = X1mesh = np.arange(0, 3, 0.3) X1mesh, X2mesh = np.meshgrid(X1mesh, X2mesh) Ymesh = b0 + b1*X1mesh + b2*X2mesh ax.plot_wireframe(X1mesh, X2mesh, Ymesh, linewidth=3, color='tab:red') ax.set_title('MSE = %f'%(np.sum((Y-b0-b1*X1-b2*X2)**2)/Y.size), size=20) ``` ```python interact(plot_X1X2Y_and_regressionplane, b0=(-5.0, 5.0, 0.1), b1=(-5.0, 10.0, 0.1), b2=(-5.0, 10.0, 0.1), azimuth=(0, 360, 1), elevation=(0, 90, 1)) # 青点がデータ点 # 赤が回帰平面 # MSE = Mean Squared Error = 平均二乗誤差 # 必ずしも真のパラメータのときに平均二乗誤差最小になるわけではないことに注意しよう. ``` ## 学習方法 上では手動で回帰係数を動かして回帰モデルを作った.最適なパラメータを計算してみよう. --- ### モデル推定値 $N$ 組みのデータセット $$ \left\{ (x_1^{(i)}, x_2^{(i)}, \ldots, x_M^{(i)}, y^{(i)}) \right\}_{i=1}^N \quad \left( = \left\{ (\mathbf x^{(i)}, y^{(i)}) \right\}_{i=1}^N \right) $$ があるとしよう.パラメータ $\mathbf A$ で作った重回帰モデルの推定値は $$ a_0 + \sum_{j=1}^M a_j x_j^{(i)} + \xi^{(i)} $$ である.ここで $\xi^{(i)}$ は $i$ 番目のデータに対するランダムさ $\xi$ の実現値である. $\xi$ は平均 $0$ だったので,推定値の期待値は $$ a_0 + \sum_{j=1}^M a_j x_j^{(i)} $$ である. --- ### 回帰残差 したがって,パラメータ $\mathbf A$ で作った重回帰モデルによって得られる $i$ 番目のデータに対する推定値の誤差(回帰残差)の期待値は $$ e^{(i)} := y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} $$ である. --- ### 平均二乗誤差 誤差平方(二乗誤差)は $$ (e^{(i)})^2 = \left( y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} \right)^2 $$ なので,平均二乗誤差は,$N$ 個のデータセットに対する誤差平方の平均値であるから, $$ E(a_0, a_1, \ldots, a_N) := \frac1N \sum_{i=1}^N (e^{(i)})^2 = \frac1N \sum_{i=1}^N \left( y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} \right)^2 $$ となる. 平均二乗誤差は,モデルのパラメータ $\tilde{\mathbf A} = (a_0, a_1, \ldots, a_M)$ が変わるごとに変化するので,$a_0, a_1, \ldots, a_M$ の関数 $E(a_0, a_1, \ldots, a_M)$ として置くことにする. --- ## 最小二乗法 重回帰モデルのパラメータを決定するために,平均二乗誤差を最小にするパラメータを求めることにする.これを最小二乗法と呼ぶ.(平均二乗誤差最小以外のパラメータフィッティングの方法もある.) つまり,平均二乗誤差の関数 $E(a_0, a_1, \ldots, a_M)$ が最小の値となるパラメータ $a_0, a_1, \ldots, a_M$ を求める.計算方法は単純に多変数関数の極値問題を解けば良い. 極値の候補を求めるには,$a_0, a_1, \ldots, a_M$ それぞれの変数に関する偏導関数を計算して,全ての偏導関数の値が $0$ になる点(勾配が零ベクトルとなる点)を求めれば良い. --- ### 回帰残差の勾配 先に回帰残差について偏導関数を計算しておこう. $$ \begin{align} & \frac{\partial e^{(i)}}{\partial a_0} = \frac{\partial}{\partial a_0} \left( y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} \right) = -1, \\ (k=1, 2, \ldots, M) \quad & \frac{\partial e^{(i)}}{\partial a_k} = \frac{\partial}{\partial a_k} \left( y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} \right) = -\sum_{j=1}^M \frac{\partial}{\partial a_k} a_j x_j^{(i)} = -\sum_{j=1}^M \delta_{jk} x_j^{(i)} = -x_k^{(i)}. \end{align} $$ --- ### 平均二乗誤差の勾配 これにより平均二乗誤差の偏導関数は $$ \begin{align} & \frac{\partial E}{\partial a_0} = \frac{\partial}{\partial a_0} \frac1N \sum_{i=1}^N (e^{(i)})^2 = \frac2N \sum_{i=1}^N e^{(i)} \frac{\partial e^{(i)}}{\partial a_0} = -\frac2N \sum_{i=1}^N e^{(i)} \\ & \phantom{\frac{\partial E}{\partial a_0}} = -\frac2N \sum_{i=1}^N \left( y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} \right) = -2 \left\{ \frac1N \sum_{i=1}^N y^{(i)} - a_0 - \sum_{j=1}^M a_j \left( \frac1N \sum_{i=1}^N x_j^{(i)} \right) \right\} \\ & \phantom{\frac{\partial E}{\partial a_0}} := -2 \left( \langle y \rangle - a_0 - \sum_{j=1}^M a_j \langle x_j \rangle \right), \\ (k=1, 2, \ldots, M) \quad & \frac{\partial E}{\partial a_k} = \frac{\partial}{\partial a_k} \frac1N \sum_{i=1}^N (e^{(i)})^2 = \frac2N \sum_{i=1}^N e^{(i)} \frac{\partial e^{(i)}}{\partial a_k} = -\frac2N \sum_{i=1}^N e^{(i)} x_k^{(i)} = \frac2N \sum_{i=1}^N e^{(i)} \frac{\partial e^{(i)}}{\partial a_k} = -\frac2N \sum_{i=1}^N e^{(i)} x_k^{(i)} \\ & \phantom{\frac{\partial E}{\partial a_k}} = -\frac2N \sum_{i=1}^N \left( y^{(i)} - a_0 - \sum_{j=1}^M a_j x_j^{(i)} \right) x_k^{(i)} = -2 \left\{ \frac1N \sum_{i=1}^N y^{(i)} x_k^{(i)} - a_0 \frac1N \sum_{i=1}^N x_k^{(i)} - \sum_{j=1}^M a_j \left( \frac1N \sum_{i=1}^N x_j^{(i)} x_k^{(i)} \right) \right\} \\ & \phantom{\frac{\partial E}{\partial a_0}} := -2 \left( \langle y x_k \rangle - a_0 \langle x_k \rangle - \sum_{j=1}^M a_j \langle x_k x_j \rangle \right), \end{align} $$ となる. --- ### 平均二乗誤差の極値 $\frac{\partial E}{\partial a_k}=0$ が全ての $k=0, 1, 2, \ldots, M$ で成り立つとすると $$ \begin{align} & \begin{cases} \displaystyle \frac{\partial E}{\partial a_0} = 0 \\[5pt] \displaystyle \frac{\partial E}{\partial a_k} = 0 & (k = 1, 2, \ldots, M) \end{cases} \quad \Longleftrightarrow \quad \begin{cases} \displaystyle \langle y \rangle - a_0 - \sum_{j=1}^M a_j \langle x_j \rangle = 0 \\ \displaystyle \langle y x_k \rangle - a_0 \langle x_k \rangle - \sum_{j=1}^M a_j \langle x_k x_j \rangle = 0 & (k = 1, 2, \ldots, M) \end{cases} \\ & \Longleftrightarrow \quad \begin{cases} \displaystyle a_0 + \sum_{j=1}^M a_j \langle x_j \rangle = \langle y \rangle \\ \displaystyle a_0 \langle x_k \rangle + \sum_{j=1}^M a_j \langle x_k x_j \rangle = \langle y x_k \rangle & (k = 1, 2, \ldots, M) \end{cases} \\ & \Longleftrightarrow \quad \underbrace{\begin{pmatrix} 1 & \langle x_1 \rangle & \langle x_2 \rangle & \ldots & \langle x_M \rangle \\ \langle x_1 \rangle & \langle (x_1)^2 \rangle & \langle x_1 x_2 \rangle & \ldots & \langle x_1 x_M \rangle \\ \langle x_2 \rangle & \langle x_1 x_2 \rangle & \langle (x_2)^2 \rangle & \ldots & \langle x_2 x_M \rangle \\ \vdots & \vdots & & \ddots & \vdots \\ \langle x_M \rangle & \langle x_M x_2 \rangle & \langle x_M x_2 \rangle & \ldots & \langle (x_M)^2 \rangle \end{pmatrix} }_{=: \mathbb M \\ \scriptsize \mbox{given by data}} \underbrace{\begin{pmatrix} a_0 \\ a_1 \\ a_2 \\ \vdots \\ a_M \end{pmatrix} }_{= \tilde{\mathbf A}^{\mathrm T}\\ \scriptsize \mbox{parameters} \\ \scriptsize \mbox{to be fitted}} = \underbrace{\begin{pmatrix} \langle y \rangle \\ \langle y x_1 \rangle \\ \langle y x_2 \rangle \\ \vdots \\ \langle y x_M \rangle \end{pmatrix} }_{=: \mathbf B \\ \scriptsize\mbox{given by data}} \\ & \Longleftrightarrow \quad \mathbb M \tilde{\mathbf A}^{\mathrm T} = \mathbf B \end{align} $$ という連立一次方程式の解となることがわかる. ただし, - $\displaystyle \langle y \rangle = \frac1N \sum_{i=1}^M y^{(i)}$ は目的変数 $y$ の標本平均 - $\displaystyle \langle x_k \rangle = \frac1N \sum_{i=1}^M x_k^{(i)}$ $(k=1, 2, \ldots, M)$ は各回帰変数 $x_k$ の標本平均 - $\displaystyle \langle x_k x_j \rangle = \frac1N \sum_{i=1}^M x_j^{(i)} x_k^{(i)}$ $(j, k=1, 2, \ldots, M)$ は回帰変数 $x_k$ と $x_j$ の積の標本平均 - $\displaystyle \langle y x_k \rangle = \frac1N \sum_{i=1}^M y x_k^{(i)}$ $(j, k=1, 2, \ldots, M)$ は目的変数 $y$ と回帰変数 $x_k$ の積の標本平均 この連立一次方程式の係数行列 $\mathbb M$ は非負定値対称行列(固有値は全て非負)である. --- ### グラム行列 $N \times (M+1)$ 行列を以下のように $$ \mathbb X = \begin{pmatrix} \tilde{\mathbf X}^{(1)} \\ \tilde{\mathbf X}^{(2)} \\ \vdots \\ \tilde{\mathbf X}^{(N)} \end{pmatrix} = \begin{pmatrix} 1 & x_1^{(1)} & x_2^{(1)} & \ldots & x_M^{(1)} \\ 1 & x_1^{(2)} & x_2^{(2)} & \ldots & x_M^{(2)} \\ \vdots \\ 1 & x_1^{(N)} & x_2^{(N)} & \ldots & x_M^{(N)} \\ \end{pmatrix} $$ 置くと,上の連立一次方程式の係数行列は $$ \mathbb M = \frac1N \mathbb X^{\mathrm T} \mathbb X $$ と表すことができる.ただし ${}^{\mathrm T}$ は転置を表す.$\mathbb X^{\mathrm T} \mathbb X$ の形の行列を $\mathbb X$ の__グラム行列__という. $\mathbb M$ は,$\mathbb X$ のグラム行列ではなく,正確には $\frac1{\sqrt N}\mathbb X$ のグラム行列であるが,以下では $\mathbb M$ もグラム行列とよぶことにする. $N\times(M+1)$-実行列 $\mathbb X$ のグラム行列に対して一般に成り立つ重要な性質としては - $\mathbb X^{\mathrm T} \mathbb X$ は非負定値対称行列 - $\mathbb X^{\mathrm T} \mathbb X$ の階数は $\mathbb X$ の階数に等しい.つまり $\mathrm{rank}(\mathbb X^{\mathrm T} \mathbb X) = \mathrm{rank}(\mathbb X)$ - $\mathbb X^{\mathrm T} \mathbb X$ が正定値となるための必要十分条件は $\mathbb X$ の階数 (rank) が $M+1$ となること(つまり $\mathbb X$ は full rank) グラム行列が正定値になるには,データ数が回帰変数の数 $M$ を上回る必要がある.(もちろん上回るだけでは必ずしも正定値になるとは限らない.線型独立なインスタンスが $M+1$ 個必要ということ.) --- ### グラム行列の非負定値性(参考) $\mathbb X$ を $$ \mathbb X = (\mathbf x_0, \mathbf x_1, \mathbf x_2, \ldots, \mathbf x_M), \quad \mathbf x_0 = \begin{pmatrix} 1 \\ 1 \\ \vdots \\ 1 \end{pmatrix}, \ \mathbf x_j = \begin{pmatrix} x_j^{(1)} \\ x_j^{(2)} \\ \vdots \\ x_j^{(N)} \end{pmatrix} \ (j=1, 2, \ldots, M) $$ と $N$ 次の列ベクトル $M+1$ 個に分割しておくと,グラム行列 $\mathbb M$ の $(i, j)$ 成分 $M_{ij}$ $(i, j = 1, 2, \ldots, M+1)$ は $$ M_{ij} = \frac1N \mathbf x_{i+1}^{\mathrm T} \mathbf x_{j+1} \quad \left( = \langle x_{i+1} x_{j+1} \rangle \right) $$ というように,$\mathbb X$ の列ベクトルの内積で表される. データを実数値に限っておくと,$\mathbb X^{\mathrm T} \mathbb X$ は実対称行列なので適当な直交行列 $\mathbb U$ を用いて $$ \mathbb U^{\mathrm T} \mathbb M \mathbb U = \mathbb U^{\mathrm T} (\mathbb X^{\mathrm T} \mathbb X) \mathbb U = \begin{pmatrix} \lambda_0 & & & O \\ & \lambda_1 \\ & & \ddots \\ O & & & \lambda_M \end{pmatrix} $$ と対角行列に変形できる. $\mathbb X \mathbb U =: \mathbb V$ と置き,$\mathbb V$ を $$ \mathbb V = (\mathbf v_0, \mathbf v_1, \ldots, \mathbf v_M) $$ のように列ベクトル分割をしておくと,上の左辺は $$ \mathbb U^{\mathrm T} (\mathbb X^{\mathrm T} \mathbb X) \mathbb U = (\mathbb X \mathbb U)^{\mathrm T} (\mathbb X \mathbb U) = \mathbb V^{\mathrm T} \mathbb V = \begin{pmatrix} \mathbf v_0^{\mathrm T} \mathbf v_0 & \mathbf v_0^{\mathrm T} \mathbf v_1 & \ldots & \mathbf v_0^{\mathrm T} \mathbf v_M \\ \mathbf v_1^{\mathrm T} \mathbf v_0 & \mathbf v_1^{\mathrm T} \mathbf v_1 & \ldots & \mathbf v_1^{\mathrm T} \mathbf v_M \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf v_M^{\mathrm T} \mathbf v_0 & \mathbf v_M^{\mathrm T} \mathbf v_1 & \ldots & \mathbf v_M^{\mathrm T} \mathbf v_M \end{pmatrix} $$ となる.両辺比較すると $$ \mathbf v_i^{\mathrm T} \mathbf v_j = \delta_{ij} \lambda_i, $$ 特に $$ \lambda_i = \| \mathbf v_i \|^2 \ge 0 $$ とわかる.ただし $\| \cdot \|$ はユークリッドノルム(2-ノルム)である. したがって係数行列 $\mathbb X^{\mathrm T} \mathbb X$ の固有値は全て非負とわかった. --- ### グラム行列が正定値になるとき $\mathbb X^{\mathrm T}\mathbb X$ は $(M+1)$ 次正方行列であり, $$ \mathrm{rank}{\mathbb X^{\mathrm T}\mathbb X} = \mathrm{rank}(\mathbb X) $$ であることから,$\mathrm{rank}(\mathbb X) = M+1$ であることが,$\mathbb X^{\mathrm T}\mathbb X$ の正定値性の必要十分条件であることがわかる. --- ### $\mathbb X$ が full rank のときの最適パラメータ $\mathbb X$ が full rank であるとしよう.このとき,連立一次方程式の係数行列 $\mathbb M$ は正則なので,連立一次方程式 $\mathbb M \tilde{\mathbf A}^{\mathrm T} = \mathbf B$ の解はただ一つである.つまり,平均二乗誤差を与える関数 $E(a_0, a_1, \ldots, a_N)$ の極値の候補は一つだけということがわかる.実際にこの点で極小点を与え,さらに最小であることもわかる.そのパラメータ点は $$ \mathbb M \tilde{\mathbf A}^{\mathrm T} = \mathbf B \ \Longleftrightarrow \ \tilde{\mathbf A}^{\mathrm T} = \mathbb M^{-1} \mathbf B $$ と表される.この $\tilde{\mathbf A}^{\mathrm T} = \mathbb M^{-1} \mathbf B$ で定められるパラメータ $\tilde{\mathbf A} = (a_0, a_1, a_2, \ldots, a_M)$ が,重回帰モデルにおいて平均二乗誤差最小を与えるパラメータである. ここで $$ \tilde{\mathbf A}^{\mathrm T}_* = \mathbb M^{-1} \mathbf B $$ と区別しておこう. --- ### $\tilde{\mathbf A}^{\mathrm T}_* = \mathbb M^{-1} \mathbf B$ において極小を取ること(参考) $\tilde{\mathbf A}^{\mathrm T}_* = \mathbb M^{-1} \mathbf B$ が極小値となることを示すには,$E(\tilde{\mathbf A})$ のヘッセ行列が $\tilde{\mathbf A}^{\mathrm T}_* = \mathbb M^{-1} \mathbf B$ において正定値であることを示せばよい. ヘッセ行列 $\mathbb H = (H_{kl})$ の $(k,l)$ 成分は $\frac{\partial^2 E}{\partial a_k \partial a_l}$ で与えられる.すると $$ \begin{align} & H_{11} = \frac{\partial^2 E}{\partial (a_0)^2} = -2 \frac{\partial}{\partial a_0} \left( \langle y \rangle - a_0 - \sum_{j=1}^M a_j \langle x_j \rangle \right) = 2 = 2 M_{11}, \\ (k = 1, 2, \ldots, M) \quad & H_{1,k+1} = \frac{\partial^2 E}{\partial a_0 \partial a_k} = -2 \frac{\partial}{\partial a_k} \left( \langle y \rangle - a_0 - \sum_{j=1}^M a_j \langle x_j \rangle \right) = 2 \langle x_k \rangle = 2M_{1, k+1}, \\ (k = 1, 2, \ldots, M) \quad & H_{k+1, 1} = H_{1, k+1} = 2 \langle x_k \rangle = 2M_{k+1, 1}, \\ (k, l = 1, 2, \ldots, M) \quad & \frac{\partial^2 E}{\partial a_k \partial a_l} = -2 \frac{\partial}{\partial a_l} \left( \langle y x_k \rangle - a_0 \langle x_k \rangle - \sum_{j=1}^M a_j \langle x_k x_j \rangle \right) = 2 \langle x_k x_l \rangle = 2 M_{kl} \end{align} $$ となることから, $$ \mathbb H = 2 \mathbb M $$ とわかる.$\mathbb H$ は $a_0, a_1, \ldots, a_M$ には依存しない定行列である. $\mathbb M$ が正定値であれば $\mathbb H$ も正定値となるので,$\tilde{\mathbf A}^{\mathrm T}_* = \mathbb M^{-1} \mathbf B$ は $E(a_0, a_1, \ldots, a_M)$ の極小を与える. --- ### 最小値となること $\mathbb X$ が full rank のとき,$E(a_0, a_1, \ldots, a_M)$ のヘッセ行列 $\mathbb H$ は全ての点で正定値なので,$E(a_0, a_1, \ldots, a_M)$ は狭義凸関数である. 狭義凸関数の極小点は最小点となるため,上で得られた $\tilde{\mathbf A}_*$ は $E(a_0, a_1, \ldots, a_M)$ の最小を与えるパラメータであることがわかる. --- ### 凸関数の補足(参考) ここで $n$ 変数関数 $f$ が(広義)凸関数とは, $$ f((1-c)\mathbf x+c\mathbf y) \le (1-c)f(\mathbf x) + c f(\mathbf y) $$ が全ての $\mathbf x, \mathbf y \in \mathbb R^n$ と $0<c<1$ なる全ての $c$ に対して成り立つときをいう. この不等号において等号が成り立つのが $\mathbf x = \mathbf y$ のときだけであるとき,特に狭義凸関数とよぶ. $f$ が狭義凸となる十分条件として,$f$ のヘッセ行列が全ての点で正定値となることが知られている.したがって,$E(a_0, a_1, \ldots, a_M)$ は狭義凸関数である. 凸関数のもう一つの重要な性質は $$ f(\mathbf a) + \nabla f(\mathbf a) (\mathbf x - \mathbf a) \le f(\mathbf x) $$ が全ての $\mathbf a, \mathbf x \in \mathbb R^N$ において成り立つということ. これを $E(a_0, a_1, \ldots, a_M)$ に適用すると, $$ E(\tilde{\mathbf A}_*) + \nabla E(\tilde{\mathbf A}_*) (\tilde{\mathbf A} - \tilde{\mathbf A}_*) \le E(\tilde{\mathbf A}) $$ が全ての $\tilde{\mathbf A} \in \mathbb R^{M+1}$ に対して成り立つ. $\tilde{\mathbf A}_*$ は $E$ の極値なので $\nabla E(\tilde{\mathbf A}_*)= \mathbf 0$ が成り立つ.したがって $$ E(\tilde{\mathbf A}_*) \le E(\tilde{\mathbf A}) $$ となるため,$E(\tilde{\mathbf A}_*)$ が最小値であることがわかる. --- ## では計算してみよう ```python # 真のパラメータ B0 = 1.2 B1 = 1.8 B2 = 0.2 # dataset N = 10 X1 = np.random.rand(N) * 3 X2 = np.random.rand(N) * 3 Y = B0 + B1*X1 + B2*X2 + np.random.randn(X1.size) # この場合では N = 10, M = 2 である M = 2 ``` $$ \mathbb X = \begin{pmatrix} \tilde{\mathbf X}^{(1)} \\ \tilde{\mathbf X}^{(2)} \\ \vdots \\ \tilde{\mathbf X}^{(N)} \end{pmatrix} = \begin{pmatrix} 1 & x_1^{(1)} & x_2^{(1)} \\ 1 & x_1^{(2)} & x_2^{(2)} \\ \vdots \\ 1 & x_1^{(N)} & x_2^{(N)} \\ \end{pmatrix} $$ ```python # データ行列 XX = np.ones((N, M+1)) # XX の第0列は全て1,第1列は X1, 第2列は X2 XX[:, 1] = X1 XX[:, 2] = X2 # M = (X^T)X / N MM = np.dot(XX.T, XX) / N # 行列の積には np.dot を用いる.XX.T で XX の転置が取れる BB = np.zeros(M+1) BB[0] = np.mean(Y) BB[1] = np.mean(Y*X1) BB[2] = np.mean(Y*X2) # 連立一次方程式 MA = B を解く AA = np.linalg.solve(MM, BB) print('最小二乗法で得られた重回帰モデル: %9.6f + %9.6f * x1 + %9.6f * x2'%(AA[0], AA[1], AA[2])) print('真の重回帰モデル: %9.6f + %9.6f * x1 + %9.6f * x2'%(B0, B1, B2)) ``` 学習で得られたパラメータは必ずしも真のパラメータに近いとは言えない.(ただし,与えられたデータセットに対して平均二乗誤差が最小であることは間違いない.) --- scikit-learn の結果と一致することを確かめてみよう ```python from sklearn import linear_model reg = linear_model.LinearRegression() X_train = XX[:, 1:3] Y_train = Y reg.fit(X_train, Y_train) print('scikit-learn の LinearRegression で得られた重回帰モデル: %9.6f + %9.6f * x1 + %9.6f * x2' %(reg.intercept_, reg.coef_[0], reg.coef_[1])) ``` データ数 $N$ を変えてフィッテイングの精度を調べてみよう. ```python # 真のパラメータ B0 = 1.2 B1 = 1.8 B2 = 0.2 # 回帰変数の数 M = 2 # データ数の常用対数の最大 Lmax = 7 B0_pred = np.zeros(Lmax) B1_pred = np.zeros(Lmax) B2_pred = np.zeros(Lmax) reg = linear_model.LinearRegression() for L in range(1, Lmax): # データ数を変える N = 10**L # dataset X1 = np.random.rand(N) * 3 X2 = np.random.rand(N) * 3 Y = B0 + B1*X1 + B2*X2 + np.random.randn(X1.size) X_train = np.zeros((N, M)) X_train[:, 0] = X1 X_train[:, 1] = X2 # sklearn の LinearRegression で fitting reg.fit(X_train, Y) B0_pred[L] = reg.intercept_ B1_pred[L] = reg.coef_[0] B2_pred[L] = reg.coef_[1] print('scikit-learn の LinearRegression で得られた重回帰モデル: %9.6f + %9.6f * x1 + %9.6f * x2' %(reg.intercept_, reg.coef_[0], reg.coef_[1])) fig = plt.figure() ax = plt.axes() ax.set_xlabel("L", size=20) ax.plot( np.arange(1, Lmax+1, 1), np.log10(np.abs(B0-B0_pred)), 'o-', label='$\log|B0 - B0_{pred}|$') ax.plot( np.arange(1, Lmax+1, 1), np.log10(np.abs(B1-B1_pred)), 'o-', label='$\log|B1 - B1_{pred}|$') ax.plot( np.arange(1, Lmax+1, 1), np.log10(np.abs(B2-B2_pred)), 'o-', label='$\log|B2 - B2_{pred}|$') ax.tick_params(labelsize=12) ax.legend() ``` # 演習問題 3-1 第2回その3で Iris データセットを用いて線形回帰モデルを適用した.そこでは目的変数として `Petal Length` を取り,単回帰では説明変数に `Sepal Length` を,重回帰では説明変数に `Sepal Length` と `Petal Width` を用いてモデルを立てた.さらには `Species` でデータを分けて種別に回帰することでよりフィッティングが合うようにした. では,目的変数と説明変数を全ての組み合わせを取り替えて線形回帰モデルを試し,回帰変数の数とフィッティング精度に傾向があるか確かめよ.(植物学的に意味があるかはさておき)一番フィッティングの合う線形回帰モデルは何か.その理由も考えよ. <h3><div style="text-align: right;">その2につづく</div></h3> ```python ``` ```python ```
1ec3832b827ef503d3a3cdebfaa697dd977be83d
28,678
ipynb
Jupyter Notebook
Intro2PracDS_2020_03-1_MultipleRegression.ipynb
NTNKN/Intro2PracDS
a82631c4d9e31318a85bc41131e9c32d1cd4a2a5
[ "BSD-3-Clause" ]
1
2020-10-01T07:04:28.000Z
2020-10-01T07:04:28.000Z
Intro2PracDS_2020_03-1_MultipleRegression.ipynb
NTNKN/Intro2PracDS
a82631c4d9e31318a85bc41131e9c32d1cd4a2a5
[ "BSD-3-Clause" ]
null
null
null
Intro2PracDS_2020_03-1_MultipleRegression.ipynb
NTNKN/Intro2PracDS
a82631c4d9e31318a85bc41131e9c32d1cd4a2a5
[ "BSD-3-Clause" ]
null
null
null
32.887615
344
0.481066
true
10,692
Qwen/Qwen-72B
1. YES 2. YES
0.893309
0.66888
0.597517
__label__yue_Hant
0.553659
0.226562
# Model Project We start by importing necessary packages: ```python import pandas as pd import numpy as np import matplotlib.pyplot as plt import ipywidgets as widgets import time from scipy import linalg from scipy import optimize import sympy as sm ``` # Solow model with climate change Consider the standard Solow-model with an indicator for climate change where: $K_t$ is capital $L_t$ is labor (growing with a constant rate of $n$) $A_t$ is technology (growing with a constant rate of $g$) $D_t$ is the damage function (growing with a constant rate of $n$) $Y_t = F(K_t,A_t,L_t,D_t)$ is GDP The Damage-function depends on temperature T $$ D_t = 1-\frac{1}{1+\theta_1T_t^{\theta_2}} =< 1 $$ $T_t^{\theta_2}$ is the global mean temperature change at time t. $\theta_1$ is the damage parameter. Cobb-Douglas production-function $$ F(K_{t},A_{t}L_{t},D_{t})= (1-D_{t})A_{t}K_{t}^{\alpha}L_{t}^{1-\alpha}$$ It is convenient to express all variables of interest in terms of per effective worker, as this can we used as a measure of welfare of sociely. We use small letters to denote per effective worker, so $y=Y/AL$ and $k=K/AL $, which gives: $$ f({k}_{t})=(1-D){k}_{t}^{\alpha} $$ Saving is a constant fraction of GDP $$ S_t = sY_t,\,s\in(0,1) $$ **Capital accumulates** according to \\[ K_{t+1}=S_{t}+(1-\delta)K_{t}=sF(K_{t},A_{t}L_{t})+(1-\delta)K_{t}, \delta \in (0,1) \\] The economy is growing over time (due to exogenous technological progress and population growth), which makes it useful to focus on the behavior of capital stock per unit of effective labor. The transition in the model can be decribed by \\[ \tilde{k}_{t+1}= \frac{1}{(1+n)(1+g)}[sf(\tilde{k}_{t})+(1-\delta)\tilde{k}_{t}] \\] # Level of output for changes in K at different levels of Damage The output level depends on the damage. Therefore, different levels of damage will lead to different levels of output. Define parameters and arrays ```python K_size = 100 # Model domain alpha = (1/3) D = 0.1 k = np.arange(K_size) ``` Output values ```python def output(k, D): y = (1-D) * (k)**(alpha) return y y = output(k, D) y2 = output(k, D + 0.1) y3 = output(k, D + 0.2) y4 = output(k, D + 0.3) y5 = output(k, D + 0.4) y6 = output(k, D + 0.5) ``` Plot the production function for different values of damage ```python fig, ax = plt.subplots(figsize=(12, 12)) ax.set(title="Output", xlabel="Capital", ylabel="Output") ax.set_ylim([0,20]) ax.grid() ax.plot(k, y, "b-", alpha=1.00, label="D= 0.1") ax.plot(k, y2, "b-", alpha=0.85, label="D= 0.2") ax.plot(k, y3, "b-", alpha=0.70, label="D= 0.3") ax.plot(k, y4, "b-", alpha=0.55, label="D= 0.4") ax.plot(k, y5, "b-", alpha=0.40, label="D= 0.5") ax.plot(k, y6, "b-", alpha=0.25, label="D= 0.6") ax.legend() ax.set_ylim(0, 6) ax.set_xlim(0, 100) plt.show() ``` With an increase in damage the output decreases. The level of damage seems to have an immense effect on the output. # Graph: The Steady-State (Model with Climate Change) In the following cells we are going to code the graph for our solow model with climate change. The graph shows the output, investments (=savings) and depreciation in our model. Define parameters and arrays ```python K_size = 101 A = 1 L = 5 alpha = (1/3) s = 0.2 delta = 0.10 g = 0.02 n = 0.01 D_ = 0.1 K = np.arange(K_size) # Create empty array of K ``` Define Functions ```python def output(K): # Cobb-Douglas Production Function Y = (1-D_) * A * (K)**(alpha) * (L)**(1-alpha) return Y ``` Populate arrays ```python Y = output(K) D = (delta)*K S = s*Y ``` Steady State Values In a closed economy with no government, consumption per capita is the difference between income and investment: $$c=y−i$$ There is a capital depreciation: $$ d_t = {\delta}*k_t $$ ```python Kstar = ((delta+(g*n)+n+g)/s*(1-D_))**(1/(alpha-1)) Ystar = (1-D_)*Kstar**alpha Sstar = s*Ystar Cstar = Ystar - Sstar Dstar = (delta)*Kstar ``` Plot the graph ```python fig, ax = plt.subplots(figsize=(10, 8)) ax.plot(K, Y, "k", ls = '-', label="Output") ax.plot(K, S, "b", ls = '-', label="Investment") ax.plot(K, D, "r", ls = '-', label="Depreciation") ax.set(title="Solow Model", xlabel="Capital Stock") plt.text(77, 12, r'$Y = A \cdot K^{\alpha} L^{1-\alpha}$') plt.text(90, 10, r'$D = {\delta}K$') plt.text(90, 3, r'$S = sY$') plt.legend(loc=2) ax.set_xlim(0, 100) ax.set_ylim(0, 15) plt.show() ``` # Behavior of capital stock per unit of effective labor over time: Comparing Model with and without Climate Change ## Model including climate change ```python k = sm.symbols('k') alpha = sm.symbols('alpha') delta = sm.symbols('delta') s = sm.symbols('s') A = sm.symbols('A') D = sm.symbols('D') n = sm.symbols('n') g = sm.symbols('g') sm.init_printing(use_unicode=True) ``` Equation of capital stock per unit of effective labour ```python ss1 = sm.Eq(k,(s*((1-D)*((k)**alpha))+(1-delta)*k)/((1+n)*(1+g))) ss1 ``` Create a vector for time, t ```python x_vec = np.zeros((100,1)) ``` ```python x_vec[0] = 1 ``` Then we select some parameter values, Let $$s=0.2 $$ $$ g=0.02 $$ $$ n=0.01 $$ $$ delta=0.1$$ $$ alpha=1/3 $$ $$ D=0,1 $$ ```python s = 0.2 g = 0.02 n = 0.01 delta = 0.1 alpha = (1/3) D = 0.1 ``` Now, plug vector x_vec into the capital transition equation: ```python for i in range(1, 100): x_vec[i] = (s*(1-D)*x_vec[i-1]**alpha+(1-delta)*x_vec[i-1])/((1+g)*(1+n)) ``` Now we can check whether the x_vec [steady state capital] converges to a steady state. ```python x_vec ``` array([[1. ], [1.04834013], [1.09334207], [1.13516079], [1.17396056], [1.20991008], [1.24317873], [1.27393383], [1.30233866], [1.328551 ], [1.35272218], [1.3749964 ], [1.39551038], [1.4143932 ], [1.43176626], [1.44774341], [1.46243112], [1.47592874], [1.48832876], [1.49971719], [1.51017383], [1.51977267], [1.5285822 ], [1.53666575], [1.54408184], [1.5508845 ], [1.55712355], [1.56284493], [1.56809096], [1.57290059], [1.5773097 ], [1.58135125], [1.58505559], [1.58845058], [1.59156185], [1.59441292], [1.5970254 ], [1.59941913], [1.60161232], [1.60362169], [1.60546255], [1.60714899], [1.60869391], [1.61010912], [1.6114055 ], [1.61259299], [1.6136807 ], [1.61467699], [1.61558955], [1.61642538], [1.61719092], [1.61789208], [1.61853426], [1.61912241], [1.61966108], [1.62015441], [1.62060623], [1.62102003], [1.62139899], [1.62174605], [1.62206389], [1.62235497], [1.62262154], [1.62286567], [1.62308924], [1.62329398], [1.62348148], [1.62365319], [1.62381044], [1.62395444], [1.62408632], [1.62420708], [1.62431768], [1.62441896], [1.6245117 ], [1.62459664], [1.62467442], [1.62474564], [1.62481087], [1.6248706 ], [1.6249253 ], [1.6249754 ], [1.62502127], [1.62506328], [1.62510174], [1.62513697], [1.62516923], [1.62519877], [1.62522583], [1.6252506 ], [1.62527329], [1.62529406], [1.62531309], [1.62533051], [1.62534646], [1.62536107], [1.62537445], [1.6253867 ], [1.62539792], [1.6254082 ]]) ## Model ignoring climate change ```python ss2 = sm.Eq(k,(s*(((k)**alpha))+(1-delta)*k)/((1+n)*(1+g))) ss2 ``` Create a vector to plug in for time, t ```python y_vec = np.zeros((100,1)) ``` ```python y_vec[0] = 1 ``` plug vector y_vec in the equation for behavior of capital stock per unit of effective labour. ```python for i in range(1, 100): y_vec[i] = (s*y_vec[i-1]**alpha+(1-delta)*y_vec[i-1])/((1+g)*(1+n)) ``` ### Plot both functions in the same graph ```python fid, ax = plt.subplots() ax.plot(x_vec, label="model including climate change") ax.plot(y_vec, label="model ignoring climate change") ax.set(xlabel = 'time, t', ylabel ='k(t)') plt.title('capital stock per unit of effective labour over time') plt.legend() plt.show() ``` Both equations converge to a steady state over time. The model including climate change has a lower steady state because it implements climate change as a external cost. # Steady state calculations We now want to find an **analytic an analytical** expression for the steady state, that is: where $ k_{t+1} = k_{t} =k_{ss}$ ```python k = sm.symbols('k') alpha = sm.symbols('alpha') delta = sm.symbols('delta') s = sm.symbols('s') A = sm.symbols('A') D = sm.symbols('D') n = sm.symbols('n') g = sm.symbols('g') ``` ```python sm.init_printing(use_unicode=True) f = (1-D)*((k)**alpha) ss = sm.Eq(k,(s*f+(1-delta)*k)/((1+n)*(1+g))) kss = sm.solve(ss,k)[0] kss ``` In the standard Solow model, **without the damage-function**, the steady state capital is given by: ```python f1= (k)**alpha ss1 = sm.Eq(k,(s*f1+(1-delta)*k)/((1+n)*(1+g))) kss1 = sm.solve(ss1,k)[0] kss1 ``` From these equations it becomes clear that, with D≤1, the steady state capital growth is reduced by including the damage-function, compared to the the model without We can also find the **specific numerical value** for steady state capital. Usning the same parameter values as before, we have $$s=0.2 $$ $$ g=0.02 $$ $$ n=0.01 $$ $$ delta=0.1$$ $$ alpha=1/3 $$ $$ D=0,1 $$ ```python ss_func = sm.lambdify((s,g,n,delta,alpha,D),kss) ``` Steady state capital, including the damage function, is then given by: ```python ss_func(0.2,0.02,0.01,0.1,1/3, 0.1) ``` By comparison, excluding the damage function, the steady state is given by: ```python ss_func(0.2,0.02,0.01,0.1,1/3, 0) ``` Notice how these values corresponds to steady state levels the figure. It becomes clear that the steady state capital excluding the damage function (representing climate change) is higher than the steady state capital in case of climate damage. Climate change is a negative externality for the society. For example, increased temperature causes sea level to rise and drought periods. This again, can increase human conflicts. Today's generation benefits from the use of fossil fuels, which releases greenhouse gases into the atmosphere and in turn causes climate change. But the benefit from using fossil fuels, does not internalize these external costs of climate change. # Impact of damage on steady state capital In order to visualize the impact of damage due to climate change (ceteris paribus), we plot the steady state as a dependent variable of D. First, create a list that contains all values we want D to take ```python Ds = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] ``` Define the function that returns the steady state only dependent on D. ```python def calc_ss(D): return ss_func(0.2,0.02,0.01,0.1,1/3, D) ``` Create a while loop so that a new list (nDs)is generated that contains all steady states for the respective D. ```python index = 0 while index <= len(Ds)-1: Ds[index] = calc_ss(Ds[index]) index = index +1 print(Ds) ``` [1.903831539231319, 1.6255198652134988, 1.3622709566173723, 1.1150018222907796, 0.8848209414512797, 0.6731060958136442, 0.4816355156188146, 0.31283144391803264, 0.17028386957717148, 0.06020443945235181] ```python Damage = [0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] ``` ## Plot: ```python fid, ax = plt.subplots() ax.plot(Damage, Ds) ax.set(xlabel = 'Damage', ylabel ='Steady state') plt.show() ``` From the plot we can see that as damage increases, the steady state capital decreases. This could be due to higher expenses in order to compensate for i.e. agricultural damage. ```python ``` ```python ```
d4e2c821fcb8279c6e902b26709f36d825a50324
170,112
ipynb
Jupyter Notebook
modelproject/modelproject.ipynb
NumEconCopenhagen/projects-2019-tba
eea677b973b0205f293272027623ca3c13a3c23e
[ "MIT" ]
null
null
null
modelproject/modelproject.ipynb
NumEconCopenhagen/projects-2019-tba
eea677b973b0205f293272027623ca3c13a3c23e
[ "MIT" ]
13
2019-04-08T17:01:11.000Z
2019-05-14T18:47:37.000Z
modelproject/modelproject.ipynb
NumEconCopenhagen/projects-2019-tba
eea677b973b0205f293272027623ca3c13a3c23e
[ "MIT" ]
2
2019-03-22T14:44:02.000Z
2019-03-22T14:44:26.000Z
155.49543
57,960
0.888926
true
4,028
Qwen/Qwen-72B
1. YES 2. YES
0.917303
0.841826
0.772209
__label__eng_Latn
0.883305
0.632432
```python #Import necessary packages import numpy as np import matplotlib.pyplot as plt import scipy as sp import math from scipy.stats import linregress from scipy.optimize import curve_fit #from sympy import Symbol, Derivative #from scipy.signal import savgol_filter as sf import pandas as pd ``` ```python #List of Bus Parameters: #For 40 foot bus mass = 12927 #Mass of bus in kg width = 2.6 # in m height = 3.3 #in m A = width * height fr = 0.01 #friction coefficient estimate from paper, come back to this cd = 0.34 #drag coefficient estimate from paper, come back to this rw = 0.28575 #radius of wheel in m ``` ```python #Other parameters g = 9.81 rhoa = 1.225 # air density in kg/m3; consant for now, eventaully input from weather API Vwind = float(5) #wind speed in km per hour; figure out component, and also will come from weather API ``` ```python #Read csv data file data = pd.read_csv('./route45csv.csv',index_col='Index') data.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Dist. Route (m)</th> <th>Elev. Lidar (m)</th> <th>Velocity (m/s)</th> <th>Accel (m/s^2)</th> <th>Unnamed: 5</th> </tr> <tr> <th>Index</th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0.000000</td> <td>19.812000</td> <td>0.0</td> <td>0.0</td> <td>NaN</td> </tr> <tr> <th>1</th> <td>9.997548</td> <td>19.893331</td> <td>5.0</td> <td>1.0</td> <td>NaN</td> </tr> <tr> <th>2</th> <td>19.995546</td> <td>20.116800</td> <td>6.0</td> <td>0.7</td> <td>NaN</td> </tr> <tr> <th>3</th> <td>29.993996</td> <td>23.332807</td> <td>7.0</td> <td>0.7</td> <td>NaN</td> </tr> <tr> <th>4</th> <td>39.991544</td> <td>20.116800</td> <td>8.0</td> <td>0.7</td> <td>NaN</td> </tr> </tbody> </table> </div> ```python #Velcity and Acceleration Data #vel = np.resize([0, 2.5, 5, 7.5, 10, 7.5, 5, 2.5], len(data)) #accel = np.resize([0.5, 0, -0.5, 0], len(data)) #vel2 = [] #for i in range(0,len(data)): #x = (vel[i]-Vwind)**2 #vel2.append(x) ``` ```python #GPS data collected from barometric altimeter gps = pd.read_csv('./rt45_11_13.csv',index_col='FID') ``` ```python #Insert outlier scrubbing/pinging code here ``` ```python #Polynomial fitting and derivative calculation window=50 #Number of points per iteration n = window // 2 #Half of Window r2=0.95 #R2 threshold rmse_threshold = 0.25 #RMSE threshold (2nd order) line = [] #Empty list for y-values of fitted points sin = [] #Empty list for derivative grade = [] #The grade of the fitted point #Define functions for regression #Define function def third_order (x,param3,param2,param1,param0): return (param3*(x**3))+(param2*(x**2))+(param1*x) + param0 def rmse(predictions, targets): differences = predictions - targets differences_squared = differences ** 2 mean_of_differences_squared = differences_squared.mean() rmse_val=np.sqrt(mean_of_differences_squared) return rmse_val #First, fit a line to the points that will not be in the first window for i in range (0,n): tempdata = data.iloc[0:i+n] xl = tempdata['Dist. Route (m)'].values #x values of data yl= tempdata['Elev. Lidar (m)'].values #y values of data mid_point = data["Dist. Route (m)"].iloc[i] param3, var3 = sp.optimize.curve_fit(third_order,xl,yl) y_value = third_order(mid_point,*param3) deriv = (3*param3[0]*(mid_point**2))+(2*param3[1]*mid_point)+param3[2] line.append(y_value) sin.append(deriv) grade.append(deriv*100) #Now, fit a line around point i, where i is the mid-point of the window for i in range(n,int(len(data))): tempdata = data.iloc[(i-n):(i+n)] xl = tempdata['Dist. Route (m)'].values #x values of data yl= tempdata['Elev. Lidar (m)'].values #y values of data mid_point = data["Dist. Route (m)"].iloc[i] param3, var3 = sp.optimize.curve_fit(third_order,xl,yl) y_value = third_order(mid_point,*param3) deriv = (3*param3[0]*(mid_point**2))+(2*param3[1]*mid_point)+param3[2] line.append(y_value) sin.append(deriv) grade.append(deriv*100) ``` ```python #Calculate the road angle angle = [] for i in range(len(sin)): x = math.asin(sin[i]) angle.append(x) cos = [] for i in range(len(angle)): x = math.cos(angle[i]) cos.append(x) ``` ```python fig = plt.figure(figsize=(13.69,3.13)) #Connect all the fitted lines ax1 = fig.add_subplot(111) ax1.plot(data['Dist. Route (m)'].values/1000,data['Elev. Lidar (m)'],'o',label='original',ms = 8) ax1.plot((data["Dist. Route (m)"].values)/1000,line,'o',label='fitted', lw = 3.5) #ax1.plot((gps['Total Distance'].values/1000),gps['Elevation'],'o',label='altimeter',lw = 3) #ax2 = ax1.twinx() #ax2.plot(gps['Total Distance'].values/1000,gps['Speed'],'o',label='speed',ms= 5) ax1.set_title('Route 45',fontsize = 22) #ax1.set_xticks(np.arange(0,(data['Dist. Route (m)'][len(data)-1])/1000,2)) ax1.set_xlabel("Distance (km)",fontsize = 22) ax1.set_ylabel("Elevation(m)",fontsize = 22) ax1.legend(fontsize = 16) plt.tight_layout() ``` ```python delta = data['Elev. Lidar (m)']-line avg = np.median(delta) avg ``` -0.3400690415062968 ```python fig = plt.figure(figsize=(10,8), dpi =100) ax1 = fig.add_subplot(211) ax1.hist(delta,bins = len(line)) ax1.set_title('Data - Fit Line',fontsize = 22) ax1.tick_params(axis='both', which='major', labelsize=18) ax1.set_xlabel("Difference (m)",fontsize = 22) ax1.set_ylabel("Frequency",fontsize = 22) plt.tight_layout() ``` ```python working=[0] for i in range(1,len(data)): oldslope=((data['Elev. Lidar (m)'][i]-data['Elev. Lidar (m)'][i-1])/(data['Dist. Route (m)'][i]-data['Dist. Route (m)'][i-1]))*100 working.append(oldslope) data['slope'] = working ``` ```python fig = plt.figure(figsize=(13.5,3)) ax1 = fig.add_subplot(111) ax1.set_title('Route 50',fontsize = 22) ax1.plot((data["Dist. Route (m)"].values)/1000,data['slope'],'o',label='original',color = 'k',ms = 8) ax1.plot((data["Dist. Route (m)"].values)/1000,grade,'o',label='filtered',color = 'g',ms = 8) #ax1.plot(grade.index,grade,'o-',label='filtered') ax1.set_xticks(np.arange(0,(data['Dist. Route (m)'][len(data)-1])/1000,2)) ax1.tick_params(axis='both', which='major', labelsize=18) ax1.set_xlabel("Distance (km)",fontsize = 22) ax1.set_ylabel("Grade (%)",fontsize = 22) ax1.legend() ax1.set_ylim(-10,10) #ax1.set_xlim(0,50) #ax1.legend() #ax1.title("Route 5") plt.tight_layout() ``` ```python #Plot for comparing filtered LiDAR elevation data to collected data #fig = plt.figure(figsize=(13.69,3.13)) #Connect all the fitted lines #plotdata = data.iloc[400:450] #plotline = line[400:450] #plotgps = gps.iloc[140:155] #ax1 = fig.add_subplot(111) #ax1.plot(plotdata['Dist. Route (m)'].values/1000,plotdata['Elev. Lidar (m)'],'o',label='original (LiDAR)',ms = 8) #ax1.plot((plotdata["Dist. Route (m)"].values)/1000,plotline,'-',label='fitted', lw = 3.5) #ax1.plot((plotgps['Total Distance'].values/1000),plotgps['Elevation'],'s',label='altimeter',ms = 10) #ax1.plot(data['Dist. Route (m)'].values/1000,data['Literature'],'o',label='literature',ms= 5) #ax1.set_title('Route 45',fontsize = 22) #ax1.set_xticks(np.arange(0,(data['Dist. Route (m)'][len(data)-1])/1000,2)) #ax1.tick_params(axis='both', which='major', labelsize=18) #ax1.set_xlabel("Distance (km)",fontsize = 22) #ax1.set_ylabel("Elevation(m)",fontsize = 22) #ax1.legend(fontsize = 16) #plt.tight_layout() ``` ```python #Now, calculating the forces on the bus forces = pd.DataFrame() #Calculate the gravitational force forces['Fg'] = mass * g * np.asarray(sin) #Calculate the rolling friction forces['Fr'] = fr * mass * g * np.asarray(cos) #Calculate the inertial force forces['Fi'] = mass * (data['Accel (m/s^2)']) #Calculate the aerodynamic drag forces['Fa'] = cd * A * (rhoa/2)*(data['Velocity (m/s)']-5) forces.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Fg</th> <th>Fr</th> <th>Fi</th> <th>Fa</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>6949.154566</td> <td>1266.233267</td> <td>0.0</td> <td>-8.933925</td> </tr> <tr> <th>1</th> <td>5017.559348</td> <td>1267.145679</td> <td>12927.0</td> <td>0.000000</td> </tr> <tr> <th>2</th> <td>4091.442962</td> <td>1267.478509</td> <td>9048.9</td> <td>1.786785</td> </tr> <tr> <th>3</th> <td>2884.455127</td> <td>1267.810615</td> <td>9048.9</td> <td>3.573570</td> </tr> <tr> <th>4</th> <td>2178.018205</td> <td>1267.951650</td> <td>9048.9</td> <td>5.360355</td> </tr> </tbody> </table> </div> ```python #Calculate torque/load #forces['torque (N*m)'] = (forces['Fg']+forces['Fr']+forces['Fi']+forces['Fa'])*rw forces['Power (w)'] = (forces['Fg']+forces['Fr']+forces['Fi']+forces['Fa'])*data['Velocity (m/s)'] forces.head(11) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Fg</th> <th>Fr</th> <th>Fi</th> <th>Fa</th> <th>torque (N*m)</th> <th>Power (w)</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>6949.154566</td> <td>1266.233267</td> <td>0.0</td> <td>-8.933925</td> <td>2344.994204</td> <td>0.000000</td> </tr> <tr> <th>1</th> <td>5017.559348</td> <td>1267.145679</td> <td>12927.0</td> <td>0.000000</td> <td>5489.744711</td> <td>96058.525135</td> </tr> <tr> <th>2</th> <td>4091.442962</td> <td>1267.478509</td> <td>9048.9</td> <td>1.786785</td> <td>4117.545559</td> <td>86457.649537</td> </tr> <tr> <th>3</th> <td>2884.455127</td> <td>1267.810615</td> <td>9048.9</td> <td>3.573570</td> <td>3773.254258</td> <td>92433.175179</td> </tr> <tr> <th>4</th> <td>2178.018205</td> <td>1267.951650</td> <td>9048.9</td> <td>5.360355</td> <td>3571.940783</td> <td>100001.841681</td> </tr> <tr> <th>5</th> <td>1718.793861</td> <td>1268.022215</td> <td>9048.9</td> <td>7.147140</td> <td>3441.248164</td> <td>108385.768938</td> </tr> <tr> <th>6</th> <td>1409.690738</td> <td>1268.060345</td> <td>9048.9</td> <td>8.933925</td> <td>3353.443416</td> <td>117355.850087</td> </tr> <tr> <th>7</th> <td>1216.675776</td> <td>1268.080334</td> <td>0.0</td> <td>10.720710</td> <td>713.082501</td> <td>27450.245014</td> </tr> <tr> <th>8</th> <td>1083.878463</td> <td>1268.092380</td> <td>0.0</td> <td>10.720710</td> <td>675.139111</td> <td>25989.607079</td> </tr> <tr> <th>9</th> <td>975.408874</td> <td>1268.101187</td> <td>0.0</td> <td>10.720710</td> <td>644.146443</td> <td>24796.538481</td> </tr> <tr> <th>10</th> <td>867.318265</td> <td>1268.109040</td> <td>0.0</td> <td>3.573570</td> <td>611.219500</td> <td>14973.006129</td> </tr> </tbody> </table> </div> ```python ``` ```python fig = plt.figure(figsize=(13.69,3.13)) #Connect all the fitted lines ax = fig.add_subplot(111) #ax1.plot(data['Dist. Route (m)'].values/1000,line,'o',label='Elevation (m)',ms = 8) #ax2 = ax1.twinx() ax.plot((data["Dist. Route (m)"].values)/1000,forces['Power (w)']/1000,'-o', lw = 2,color = 'r') #ax1.plot((gps['Total Distance'].values/1000),gps['Elevation'],'o',label='altimeter',lw = 3) #ax1.plot(data['Dist. Route (m)'].values/1000,data['Literature'],'o',label='literature',ms= 5) #ax1.set_title('Route 45',fontsize = 22) ax.set_xticks(np.arange(0, 1.1, step=0.1)) ax.tick_params(axis='both', which='major', labelsize=18) #ax.tick_params(axis='both', labelsize=18) ax.set_xlabel("Distance (km)",fontsize = 22) ax.set_xlim(0,1) #ax1.set_ylabel("Grade(%)",fontsize = 18) #ax1.set_ylim(15,25) ax.set_ylabel("Load (kW)",fontsize=18) #fig.legend(fontsize = 16) plt.tight_layout() plt.savefig('1_24.png') ``` ```python fig = plt.figure(figsize=(13.69,3.13)) #Connect all the fitted lines plotgps = gps.iloc[0:1200] ax1 = fig.add_subplot(111) ax1.plot(plotgps['Total Distance'].values/1000,plotgps['Speed'],'o',label='speed',ms = 8) ax2 = ax1.twinx() ax2.plot(plotgps['Total Distance'].values/1000,plotgps['Accel.'],'v',label='acceleration',color = 'r',ms=8) #ax1.plot((gps['Total Distance'].values/1000),gps['Elevation'],'o',label='altimeter',lw = 3) #ax1.plot(data['Dist. Route (m)'].values/1000,data['Literature'],'o',label='literature',ms= 5) ax1.set_title('Route 45',fontsize = 22) #ax1.set_xticks(np.arange(0,(data['Dist. Route (m)'][len(data)-1])/1000,2)) ax1.tick_params(axis='both', which='major', labelsize=18) ax2.tick_params(axis='both', labelsize=18) ax1.set_xlabel("Distance (km)",fontsize = 16) ax1.set_ylabel("Speed (m/s)",fontsize = 16) ax2.set_ylabel("Accel. (m/s^2)",fontsize = 16) fig.legend(fontsize = 14,loc = 2) plt.tight_layout() ``` ```python forces['torque (N*m)'].max() ``` 4897.527622122828 ```python ```
ac275ff4cfbb8e77f92b735dd68f7ce9ba76e44b
216,897
ipynb
Jupyter Notebook
development_notebooks/ericas_OG_calc.ipynb
SacPec/Route_Dynamics_S-dev
97214724dd520d3e618304e7516de79e7731bed5
[ "MIT" ]
4
2019-06-14T20:54:55.000Z
2021-02-26T03:15:20.000Z
development_notebooks/ericas_OG_calc.ipynb
SacPec/Route_Dynamics_S-dev
97214724dd520d3e618304e7516de79e7731bed5
[ "MIT" ]
9
2019-05-13T14:49:42.000Z
2020-12-17T04:48:33.000Z
development_notebooks/ericas_OG_calc.ipynb
SacPec/Route_Dynamics_S-dev
97214724dd520d3e618304e7516de79e7731bed5
[ "MIT" ]
7
2020-02-04T20:12:42.000Z
2021-11-03T19:27:01.000Z
248.165904
58,012
0.901188
true
5,153
Qwen/Qwen-72B
1. YES 2. YES
0.845942
0.779993
0.659829
__label__eng_Latn
0.235439
0.371335
# 3M1 Introduction to optimization Luca Magri (lm547@cam.ac.uk), office ISO-44, Hopkinson Lab. (With many thanks to Professor Gábor Csányi.) [Booklist](https://www.vle.cam.ac.uk/mod/book/view.php?id=364091&chapterid=49051): - Antoniou, A. & Lu, W.-S. Practical Optimization: Algorithms and Engineering Applications, Springer, 2007. Engineering Library: ER.227 and Part IIA Tripos shelves (3M) - Gill, P.E., Murray, W. & Wright, M.H. Practical Optimization, Academic Press, 1981. Engineering Library: ER.115 - Luenberger, D.G. & Ye, Y. Linear and Non-Linear Programming, Springer, 4th edition 2016. Engineering Library: ER.239.4 How to get these jupyter books: - Click on this link https://notebooks.azure.com/lm547/projects/3M1OptLecNotes-LM will take you to the Microsoft Azure cloud system. - The jupyter books will be maintained on this link, where you will find the most updated version of the book. - After you have clicked on "clone", you will be asked to log in and use your Cambridge CrsID. - You will get your own copy of the jupyter books on your account ## Topics for the seven optimization lectures: - Introduction to optimization - Unconstrained optimization - Line search - Gradient methods - Constrained Optimization - Linear programming: Simplex Algorithm - Lagrange and Karush-Kuhn-Tucker (KKT) multipliers - Note that the Karush-Kuhn-Tucker (KKT) multipliers are also known as Kuhn-Tucker (KT) multipliers - Barrier and penalty methods - Global optimisation: Simulated annealing - Principal component analysis ## Lecture 1: List of contents 1. Introduction to optimisation 1. Definitions 1. A simple example of a can ## Nomenclature - $f(x): \mathbb{R}^N\rightarrow\mathbb{R}$ is a nonlinear function, which we want to minimize - $x\in\mathbb{R}^N$ is the vector containing the variables $x_1, x_2, \ldots, x_N$ - $\nabla f = \begin{pmatrix} \frac{\partial f}{\partial x_1}, \frac{\partial f}{\partial x_2}, \ldots, \frac{\partial f}{\partial x_N} \end{pmatrix}^T = \frac{\partial f}{\partial x_i}$, $i=1,2,\ldots, N$ is the gradient - $H=\nabla(\nabla f(x))$ is the Hessian $\left(H_{i,j}=\frac{\partial^2 f}{\partial x_i\partial x_j}\right)$ ## Aims - Optimisation is the mathematical theory and computational practice of making a choice to achieve the best outcome. - In order to optimise, we must <!---1. Formalize the problem mathematically---> 1. Model the problem 1. Identify parameters that can be changed 1. Formulate a mathematical criterion for what is "best" 1. Identify potential constraints 1. Select an appropriate algorithm 1. Correctly interpret the result ## Goal of optimisation - Find the parameters (or independent variables) that minimize/maximize a given quantity... - ... possibly subject to some restrictions on the allowed range of parameters ## Definitions - The quantity to be minimized/maximized is called the __objective function__, or __cost function__, or __utility function__, or __loss function__ - This will be usually denoted $f(x)$ in these lectures, unless otherwise specified - The parameters that can be changed are called __control__ or __decision variables__ - The restrictions on the allowed parameter values are called __constraints__ - Mathematically, the optimization problem is minimize $$ \quad f(x), \quad x = (x_1,x_2,x_3,\ldots x_N)^T $$ subject to $$ \quad c_i(x) = 0, \quad i=1,\ldots ,m'\quad(\textrm{equality}) $$ and $$ \quad\qquad c_i(x) \ge 0, \quad i=m'+1,\ldots, m\quad(\textrm{inequality}) $$ - A minimum $x$ of the function $-f(x)$ is a maximum $x$ of the function $f(x)$ - Therefore, maximization problems can be cast as minimization problems - $f(x)$ is the __objective function__ - $x$ is the column vector of $N$ __control variables__ - $\{c_i(x)\}$ is the set of __constraint functions__ - Inequality constraints that are restrictions on the allowed values of a single control variable are called __bounds__, e.g. $x_{i\textrm{min}} \le x_i \le x_{i\textrm{max}}$ ## Types of functions - __Linear__ $$ f(x) = b^T x + c $$ - __Quadratic__ $$ f(x) = x^T A x + b^T x + c $$ - __Nonlinear__ typically means neither linear nor quadratic, for example $$ f(x) = \exp(x_1) + x^T A x + b^T x + c $$ - Nonlinear optimisation problems are typically more difficult to solve ## Types of constraints - _Equality_ constraints can sometimes be eliminated by substitution - _Inequality_ constraints can sometimes be left out and candidate results checked - We will learn to treat them formally with the KKT multipliers - In general, constrained optimization is more difficult to solve than unconstrained optimization ## Optimisation methods First, we define the __optimality criteria__. Then, - Solve analytically. Equations derived from criteria and solved for variables - Solve numerically. Search methods: 1. Initial trial point selected 1. A move is proposed. If the objective function is reduced, the new point is retained 1. Repeat until criteria satisfied (minimum is reached) or we run out of resources Search methods are needed when - The number of variables is large - The equations cannot be solved analytically These are typical situations in engineering problems. Different algorithms correspond to different ways of updating the variables. ## Example: What are the optimal dimensions of a 330 cc can that minimise the amount of material? - Assume: - Cylindrical shape - 4.5% of "air space" - Observe: amount of material is proportional to surface area. - We neglect the thickness of the can - Two independent variables - base radius, $r$ - height, $h$ - The radius should be greater than or equal to $25$ $mm$ and smaller than or equal to $50$ $mm$ - The objective function to minimize is the surface area $$A = 2\pi r^2 + 2\pi r h = 2\pi (r^2 + rh)$$ - The equality constraint to impose is the can volume $$V = \pi r^2 h = 330\cdot 104.5\%\approx345 cm^3$$ - The inequality constraints to impose are \begin{align} 25 \textrm{ mm} & \le r \le 50 \textrm{ mm}\\ h &> 0 \end{align} - Ignore inequality constraints for now - Eliminate $h$ using the equality constraint $$ A = f(r) = 2\pi\left(r^2 + \frac{V}{\pi r}\right) $$ ## Definitions - When minimizing $f(x)$ subject to constraints \begin{align} S & \;\;\;\;\textbf{is the feasible region}\\ \text{any}\;x\;\;\; \in S & \;\;\;\text{is a }\textbf{feasible solution}\\ \end{align} for an unconstrained problem, $S$ is infinitely large. - The __gradient__ is $$ g(x) = \nabla f(x) = \left[\frac{\partial f}{\partial x_1},\frac{\partial f}{\partial x_2},\ldots, \frac{\partial f}{\partial x_N}\right]^T $$ - The __Hessian__ is $$ H(x) = \nabla(\nabla f(x)) = \begin{bmatrix} \frac{\partial^2 f}{\partial x_1^2} & \ldots & \frac{\partial^2 f}{\partial x_1 \partial x_N}\\ \vdots & \ddots & \vdots\\ \frac{\partial^2 f}{\partial x_N \partial x_1} & \ldots & \frac{\partial^2 f}{\partial x_N^2} \end{bmatrix} $$ The Hessian is a symmetric matrix by definition. ## Feasible directions At a feasible point $x$, a direction $d$ is a __feasible direction__ if an arbitrary small move from $x$ in direction $d$ remains feasible ```python from pylab import * import numpy as np ``` /home/nbuser/anaconda2_501/lib/python2.7/site-packages/matplotlib/font_manager.py:281: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment. 'Matplotlib is building the font cache using fc-list. ' ```python fig=figure(figsize=(12,8)) x = np.linspace(0,1, 50) plot(x, np.sqrt(1-x**2), 'b') text(0.2,0.6, 'infeasible space') a = 1/np.sqrt(2); arrow(a, a, 0.2, 0, width=0.01) arrow(a, a, 0.16, 0.16, width=0.01) arrow(a, a, 0, 0.2, width=0.01) arrow(a, a, 0.16, -0.16, width=0.01) arrow(a, a, -0.16, 0.16, width=0.01) text(0.8, 1.0, 'feasible directions') text(0.0, 1.05, 'constraint boundary', color='b') axis('equal') axis((0.0, 1.3, 0.0, 1.2)) show() ``` ## Stationary point If $f(x)$ is smooth so that $\nabla f(x)$ exists, then $x^*$ is a __stationary point__ of $f$ if $$ \nabla f(x^*) = 0 $$ - Minima, maxima and saddle points are stationary points ## Types of minima $$ $$ \begin{align} \textbf{Global minimum }\quad & f(x^*) \le f(y) \qquad \forall\, y \in S \\ \\ \textbf{Strong global minimum }\quad & f(x^*) \lt f(y) \qquad \forall\, y \in S, y \neq x^* \\ \\ \textbf{Weak local minimum }\quad & f(x^*) \le f(y) \qquad \forall\, y = x^*+\varepsilon d \in S, y \neq x^* \\ \\ \textbf{Strong local minimum }\quad & f(x^*) < f(y) \qquad \forall\, y = x^*+\varepsilon d \in S, y \neq x^* \end{align} - Local maxima and minima are local extrema - If we say "local minimum / maximum" we will refer to an _interior_ "local minimum / maximum", unless otherwise specified ```python x = np.linspace(-1.5,1.5,100) figure(figsize=(12,8)) plot(x, 1.45*x**4 + sin(6*x), 'b') # Note that the point labelled "weak local minimum" is not mathematically a weak local minimum for this function (show it!). This function is used only for visualization purposes. axis((-1.6, 1.6,-2.5, 8)) annotate(s="global minimum", xy=(-0.25,-1.1), xytext=(-1.2, -2), arrowprops=dict(arrowstyle='->')) annotate(s="weak local minimum", xy=(-1.1,1.8), xytext=(-1.2, 4), arrowprops=dict(arrowstyle='->')) annotate(s="strong local minima", xy=(0.7,-0.4), xytext=(0.1, 3), arrowprops=dict(arrowstyle='->')) annotate(s="", xy=(-0.25,-0.9), xytext=(0.25, 2.9), arrowprops=dict(arrowstyle='->')) show() ``` ## With constraints, a global minimum might not be a stationary point ```python import matplotlib.patches as patches x = np.linspace(-1.5,1.5,100) figure(figsize=(12,8)) plot(x, 1.45*x**4 + sin(6*x), 'b') axis((-1.6, 1.6,-2.5, 8)) annotate(s="global minimum (not stationary)", xy=(-0.09,-0.7), xytext=(0.2, -2), arrowprops=dict(arrowstyle='->')) annotate(s="strong local minimum", xy=(0.7,-0.4), xytext=(0.1, 3), arrowprops=dict(arrowstyle='->')) gca().add_patch(patches.Rectangle((-1.6, -2.5), 1.5, 10.5, hatch='/',fill=False)) text(0.0, 7, 'feasible region') text(-0.8, 7, 'infeasible region') annotate(s="constraint", xy=(-0.09, 4.5), xytext=(0.2, 6), arrowprops=dict(arrowstyle='->')) show() ``` ## Unimodality - A function is __unimodal__ if it has a single extremum - It is __strongly unimodal__ if along a straight line from every point to the extremum the gradient is negative (for a minimum) or positive (for a maximum) - Example of a unimodal function: The Rosenbrock's function $$ f(x_1, x_2) = 100(x_2-x_1^2)^2 + (1-x_1)^2$$ ```python x,y = np.meshgrid(np.linspace(-1.7,1.7,100), np.linspace(-0.8,3,100)) R = (1-x)**2 + 10*(y-x**2)**2 ``` ```python from mpl_toolkits.mplot3d import Axes3D fig = figure(figsize=(16,8)) fig.suptitle('Rosenbrock function') ax = fig.add_subplot(1,2,1, projection='3d') ax.plot_surface(x, y, R, rstride=5, cstride=5, cmap=cm.jet, linewidth=0) ax.set_xlabel('x1') ax.set_ylabel('x2') ax.view_init(elev=90, azim=-90) ax = fig.add_subplot(1,2,2) ax.contour(x, y, R, np.logspace(-1,2, 8)) show() ``` - Example of a strongly unimodal function $$ f(x_1, x_2)=x_1^2 + x_2^2 - 0.2x_1x_2$$ ```python x, y = np.meshgrid(np.linspace(-1,1,50), np.linspace(-1,1,50)) f = x**2+y**2 - 0.2*x*y fig = figure(figsize=(16,8)) fig.suptitle('Quadratic function') ax = fig.add_subplot(1,2,1, projection='3d') ax.plot_surface(x, y, f, rstride=1, cstride=1, cmap=cm.jet, linewidth=0) ax.set_xlabel('x1') ax.set_ylabel('x2') ax.view_init(elev=0, azim=-120) ax = fig.add_subplot(1,2,2) contour(x, y, f, linspace(0, 0.8, 10)) plot([0], [0], 'bx', markersize=20, markeredgewidth=3) annotate(s="global minimum", xy=(0,0), xytext=(0,0.5), arrowprops=dict(arrowstyle='->')) show() ``` ## Convex functions - A function is convex if its graph at any point $y$ is never below the tangent at any other point $x$ - Mathematically $$ f(y) \ge f(x) + \nabla f(x)^T (y-x)$$ - It is strictly convex if instead of $\ge$ we use $>$ - Convex functions have a _global_ minimum - Convex functions are unimodal, but not all unimodal functions are convex - Example: The negative Gaussian distribution - Non-convex functions have often multiple local minima - Finding the global minimum is hard - Many, but not all, engineering problems are non-convex ```python from mpl_toolkits.mplot3d import Axes3D fig = figure(figsize=(16,16)) fig.suptitle('The negative Gaussian is unimodal but not convex') x, y = np.meshgrid(np.linspace(-7,7,50), np.linspace(-7,7,50)) ax = fig.add_subplot(1,2,1, projection='3d') ax.plot_surface(x, y, -exp(0.1*(-x**2-y**2)), rstride=1, cstride=1, cmap=cm.jet, linewidth=0) ax.set_xlabel('x') ax.set_ylabel('y') ax.view_init(elev=0, azim=-120) show() ``` ## Necessary condition for a local minimum A __necessary__ condition for $x^*$ to be a _local minimum_ of $f(x)$ in $S$ is $$ \nabla f(x^*) \cdot d \ge 0 $$ for all feasible directions $d$. If $x^*$ is an __interior point__, i.e., it is not at the boundary of the feasible region: - because all directions are feasible, $x^*$ must be a stationary point when $$\nabla f(x^*) = 0$$ - The condition $\nabla f(x^*) = 0$ is __necessary but not sufficient__ for $x^*$ to be a minimum - This is because the same condition holds for maxima and saddle points ## Sufficient condition for a local minimum (univariate) For the _univariate_ case (i.e., $x\in\mathbb{R})$, the __Taylor expansion__ around $x^*$ is \begin{align} f(x) &= f(x^*) + (x-x^*) f'(x^*) + \frac12 (x-x^*)^2 f''(x^*) + h.o.t.\\ f(x) - f(x^*) &= (x-x^*) f'(x^*) + \frac12 (x-x^*)^2 f''(x^*) + h.o.t.\\ \end{align} If $x^*$ is an interior stationary point, $f'(x^*) = 0$, so we have a strong local minimum if $$ f(x) - f(x^*) \approx \frac12 (x-x^*)^2 f''(x^*)\gt 0 $$ $$\boxed{\large f''(x^*) \gt 0}\strut$$ - At a stationary point $x^*$, $f''(x^*)>0$ is a sufficient condition of strong local minimum - At a stationary point, $f''(x^*)\ge0$ is a necessary condition of strong local minimum (it is necessary that $f''(x^*)$ be non-negative. If it is negative, $x^*$ cannot be a minimum.) - If $f''(x^*)=0$, we need to analyse the higher order terms ## Sufficient condition for a local minimum (multivariate) For the _multivariate case_ (i.e., $x\in\mathbb{R}^N$ with $N>1$), the Taylor series is $$ f(x) = f(x^*) + (x-x^*)^T \nabla f(x^*) + \frac12 (x-x^*)^T H(x^*) (x-x^*) + h.o.t. $$ If $x^*$ is an interior stationary point, $\nabla f(x^*) = 0$. Let $d = x-x^*$. Then $x^*$ is a strong local minimum if $$ \boxed{\large d^T H(x^*) d > 0 \qquad \forall d\strut} $$ - At a stationary point $x^*$, $\large d^T H(x^*) d > 0$ (the Hessian is __positive definite__) is a sufficient condition of strong local minimum. - At a stationary point $x^*$, $\large d^T H(x^*) d \ge 0$ (the Hessian is __positive semidefinite__) is a necessary condition of strong local minimum. - Test for a $\bf{2\times2}$ matrix: It is positive definite if $$H_{11}>0 \;\;\;\textrm{and}\;\;\; \det(H)>0$$. - If $H(x)$ is positive definite everywhere (e.g. for a quadratic function), $f$ is a __convex function__, and therefore the minimum is unique and a _global minimum_. - A matrix is positive definite if and only if all its __eigenvalues are positive__. - If $H(x^*) = 0$, higher order terms determine whether $x^*$ is a minimum or not. ## Back to the can optimization Objective function: $$f(r) = r^2 + \frac{V}{\pi r}$$ Necessary condition for local minimum: $$0 = \frac{df}{dr} = 2r-\frac{V}{\pi r^2}$$ Candidate solution: $$ r^* = \left(\frac{V}{2\pi}\right)^{1/3}$$ - For $V=345 cm^3$, $r^* = 38\text{ mm}$ - This satisfies the constraints $25\text{ mm} \le r^* \le 50\text{ mm}$, hence, it is feasible. - We need to check sufficient condition $$ \frac{d^2 f}{d r^2} = 2+\frac{2V}{\pi r^3} \ge 0$$ - Hence, $r^*$ is a minimum, and $h^* = V/(\pi r^2) = 76\text{ mm}$. <!--- - Real cans have $r=33\text{ mm}$.---> - The upcoming optimization problems will be less straightforward!
df6e34f9a6ec6d63350e36e92ebfefeda1ec5bf1
623,003
ipynb
Jupyter Notebook
Lecture_1_Introduction_to_optimization_3M1_LM.ipynb
LukeMagher/3M1
d3b6f06d8ecde209c405b412dcdcf1af3c9cfb98
[ "BSD-2-Clause" ]
2
2020-09-23T08:16:18.000Z
2021-12-28T12:35:26.000Z
Lecture_1_Introduction_to_optimization_3M1_LM.ipynb
LukeMagher/3M1
d3b6f06d8ecde209c405b412dcdcf1af3c9cfb98
[ "BSD-2-Clause" ]
null
null
null
Lecture_1_Introduction_to_optimization_3M1_LM.ipynb
LukeMagher/3M1
d3b6f06d8ecde209c405b412dcdcf1af3c9cfb98
[ "BSD-2-Clause" ]
null
null
null
1,263.697769
223,935
0.947904
true
5,190
Qwen/Qwen-72B
1. YES 2. YES
0.831143
0.901921
0.749625
__label__eng_Latn
0.968653
0.579962
# Simon's Algorithm **Abstract:** We study a quantum algorithm known as Simon's algorithm, which provided the first example of an exponential speedup over the best known classical algorithm by using a quantum computer to solve a particular problem. Originally published in 1994, Simon's algorithm was a precursor to Shor's well-known factoring algorithm, and it served as inspiration for many of the seminal works in quantum computation that followed. This notebook is aimed at users with some basic knowledge of quantum computing and quantum circuits. ### Table of Contents * [Simon's Problem Statement](#statement) * [Example for n=3](#example) * [Classical Complexity](#classicalcomplexity) * [Quantum Algorithm for Simon's Problem](#quantumalgorithm) * [Quantum Circuit](#circuit) * [Running Simon's Algorithm](#runningintro) * [Classical Post-Processing](#postprocessintro) * [Quantum Complexity](#quantumcomplexity) * [Implementing Simon's Algorithm in Amazon Braket](#implementation) * [References](#references) * [Appendix](#appendix) * [Implementing an Oracle Function](#oracleimplementation) * [Implementing the Classical Post-Processing](#appendixpostprocessing) ## Simon's Problem Statement: <a name="statement"></a> Suppose we’re given a function $f:\{0,1\}^n \rightarrow \{0,1\}^n$ that maps bit strings to bit strings along with the promise that $$\forall x,y \in \{0,1\}^n, \quad f(x) = f(y) \iff x=y\oplus s,$$ for some unknown $n$-bit string $s \in \{0,1\}^n$, and where $\oplus$ means bitwise addition modulo 2. Said another way, there exists an unknown string $s$ such that, $\forall x, \; f(x)=f(x\oplus s)$. When $s$ is non-zero, the function is two-to-one as it maps *exactly* two inputs to every unique output. The goal of Simon's problem is to determine if $f$ is one-to-one, or two-to-one, or equivalently to find the secret string $s$. Since we're given the promise that $f(x)=f(y)\implies x=y\oplus s$, this means that $s=x\oplus y$ whenever $f(x)=f(y)$. Thus, one way to solve this problem is to find two inputs to the function $f$ that produce the *same* output; $s$ is then the XOR of those two input strings. See [[1]](#References) for more details. <div class="alert alert-block alert-info"><a name="example"></a> <h3>Example for n=3:</h3> Consider the function $f:\{0,1\}^3\to\{0,1\}^3$ defined by the truth table below. <table> <thead> <tr> <th><center>$$x$$</center></th> <th><center>$$f(x)$$</center></th> </tr> </thead> <tr> <td><center>$$000$$</center></td> <td><center>$$000$$</center></td> </tr> <tr> <td><center>$$001$$</center></td> <td><center>$$001$$</center></td> </tr> <tr> <td><center>$$010$$</center></td> <td><center>$$001$$</center></td> </tr> <tr> <td><center>$$011$$</center></td> <td><center>$$000$$</center></td> </tr> <tr> <td><center>$$100$$</center></td> <td><center>$$100$$</center></td> </tr> <tr> <td><center>$$101$$</center></td> <td><center>$$101$$</center></td> </tr> <tr> <td><center>$$110$$</center></td> <td><center>$$101$$</center></td> </tr> <tr> <td><center>$$111$$</center></td> <td><center>$$100$$</center></td> </tr> </table> By inspection, we can see that $f$ satisfies the properties described in the statement of Simon's problem. In particular, note that each output $f(x)$ appears twice for two distinct inputs. We are given the promise that, for each of these two inputs $x$ and $y$ with the same output $f(x)=f(y)$, we have $x \oplus s = y$ for a yet to be determined $s$, and therefore $x\oplus y = s$. For concreteness, notice that the input strings $001$ and $010$ are both mapped by $f$ to the same output string $001$. Taking the bitwise XOR of $001$ and $010$ we obtain the secret string $s$: $$s=001 \oplus 010 = 011$$ Therefore, in this example, the secret string is $s = 011$. In this specific example, we also see that the string $000$ is mapped to itself. Since $x\oplus y=s$ for two inputs $x$ and $y$ with the same output, we must have that $s$ is also mapped to $000$, since $s=000\oplus s$. Indeed, we see that $011$ maps to $000$, as expected. </div> ## Classical Complexity<a name="classicalcomplexity"></a> To solve Simon's problem classically, one needs to find two different inputs $x$ and $y$ for which $f(x)=f(y)$. As we saw above, one can then determine $s=x\oplus y$. How hard is it to find two distinct inputs that map to the same output, given the function $f$ as a black box? For $n$-bit strings, there are $2^n$ possible inputs. Thus, in the worst case, one would need to check at most $2^n$ different inputs to find a pair that maps to the same output; this provides an upper bound on the required query complexity. It turns out that a *lower* bound on the classical query complexity of Simon's algorithm can also be found: $\Omega ({\sqrt {2^{n}}})$. Proving this lower bound requires a little more work and is outside the scope of this notebook, so instead we will just provide some intuition. As mentioned above, the goal is to find a pair of input strings $x$ and $y$ that map to the *same* output string $f(x)=f(y)$ -- a collision. Finding a collision in a set is an instance of the well-known (generalized) birthday problem [[2]](#References): within a group of people, what is the probability that two of them share the same birthday? One can turn this problem around and ask "how many people do we need in a room to ensure that the probability that at least two of them share a birthday is greater than some fixed number?" This latter question gets to the heart of solving Simon's problem classically: how many queries to the function $f$ do we need to make to guarantee that we find a collision, with high probability? In the case of the birthday problem, we would need enough people in the room so that when we generate all possible pairings of people, we would have about 365 possible pairs. That way, we'd have a good chance that at least one of those pairs of people share a birthday. Using this intuition, we need to query $f$ enough times to generate a set of *pairs* with roughly the same size as the number of possible inputs ($2^n$). If we make $k$ queries to the function $f$, we can generate ${k \choose 2}=\frac{k(k-1)}{2}\sim k^2$ pairs. Thus, we need to make $k$ queries such that ${k\choose 2}\gtrsim 2^n$ to have a high probability of generating a collision, and therefore, $k>\Omega(\sqrt{2^n})$. ## Quantum Algorithm for Simon's Problem<a name="quantumalgorithm"></a> Simon's algorithm is a scheme for solving the problem above using exponentially fewer queries to the function $f$. In order for Simon's algorithm to work, one needs to be able to implement the unknown function $f$ using quantum logic. That is, given an input *quantum state* $|x\rangle$, one needs a *unitary* $U_f$ satisfying $$U_f|x\rangle |0\rangle = |x\rangle |f(x)\rangle.$$ This unitary is an *oracle* for $f$, and the goal is to query it as few times as possible to learn the secret string $s$. ### Quantum Circuit<a name="circuit"></a> Simon's algorithm involves both quantum and classical components. The quantum part of Simon's algorithm is used to query the oracle efficiently, while the classical component is used to process measurement results and determine the hidden string $s$. A circuit for the quantum component of Simon's algorithm is shown below. <div align="center"> </div> For a function $f$ acting on $n$-bit strings, the circuit above acts on $2n$ qubits, as needed for the definition of $U_f$. Only the first $n$ qubits are measured; the remaining qubits are unused after the application of $U_f$. ### Running Simon's Algorithm<a name="runningintro"></a> To solve Simon's problem, one needs to run the quantum circuit above several times. After each run of the circuit, the measurements of the first $n$ qubits produce an output bit string, which we denote by $z$. An analysis of the circuit above shows that each output bit string $z$ satisfies the following condition: $$ z\cdot s = 0 \; \mod{2}.$$ Let us now analyze the above circuit step-by-step: 1. Initialize all qubits in the $|0\rangle$ state. That is, we start in the state $|0\rangle^{\otimes n} \otimes |0\rangle^{\otimes n}$. We will use the shorthand $|0\rangle^{\otimes n}\equiv |0^n\rangle$ 2. Apply Hadamard gates to each of the first $n$ qubits, placing them in the equal superposition state: $$\frac{1}{\sqrt{2^n}}\sum_{x \in \{0, 1\}^n} |x\rangle |0^n\rangle$$. 3. Apply the oracle $U_f$, which computes the function $f$ into the last $n$ qubits, giving the state $$\frac{1}{\sqrt{2^n}}\sum_{x \in \{0, 1\}^n} |x\rangle |f(x)\rangle$$ 4. Measure the last $n$ qubits, giving a random result $f(x)$. If $f$ is one-to-one, this output of $f$ corresponds to an input of $x$. If $f$ is two-to-one, the output of $f$ corresponds to an input of either either $x$ or $y = x \oplus s$, where $x$ and $y$ are the two different inputs to $f$ that gave the *same* output $f(x)=f(y)$. Hence we are left with the first $n$ qubits in the state; <center>A. $|x\rangle$, &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&ensp; if $f$ is one-to-one;</center> <center>B. $\frac{1}{\sqrt{2}} (|x\rangle + |y\rangle)$, where $x \oplus y = s$,&emsp; if $f$ is two-to-one.</center> Note that this step is not strictly necessary, since we do not need the measurement result, but we include it as it makes the analysis easier. 5. Apply Hadamard gates to each of the first $n$ qubits. If $f$ is one-to-one, the state $|x\rangle$ is mapped to $$H^{\otimes n} |x\rangle = \frac{1}{\sqrt{2^n}} \sum_{z \in \{0, 1\}^n} (-1)^{x \cdot z}|z\rangle,$$ where $x\cdot z$ is the dot product between the two strings represented as vectors (modulo 2). Similarly, if $f$ is two-to-one, the state $\frac{1}{\sqrt{2}}(|x\rangle + |y\rangle)$ is mapped to $$\frac{1}{\sqrt{2^{n+1}}} \sum_{z \in \{0, 1\}^n} [(-1)^{x \cdot z} + (-1)^{y \cdot z}]|z\rangle$$. 6. Measure the first $n$ qubits. 1. If $f$ is one-to-one, measurements return a random bit string $z$ uniformly chosen from $\{0,1\}^n$. 2. If $f$ is two-to-one, measurements return a random bit string $z$ such that $x \cdot z = y \cdot z \,\mathrm{mod}\, 2$, since otherwise the amplitude $(-1)^{x \cdot z} + (-1)^{y \cdot z}$ cancels out. Using the criterion for Simons problem (i.e,. $f(x)=f(y) \implies x=y\oplus s$), we find that \begin{align*} x\cdot z &= y\cdot z & \mod{2}\\ x\cdot z &= (x\oplus s)\cdot z & \mod{2}\\ x\cdot z &= x\cdot z\oplus s\cdot z & \mod{2}\\ 0 &= s\cdot z & \mod{2} \end{align*} Thus, in both cases we obtain a random bit string $z$ such that $s \cdot z = 0$. Therefore, each time we run the quantum circuit above, we find a bit string $z$ that is orthogonal to the secret string $s$. ### Classical Post-Processing<a name="postprocessintro"></a> From the measurement results $\{z_1, \dots, z_k\}$, we can form a system of equations: $$ \begin{aligned}z_{1}\cdot s&=0\mod{2}\\z_{2}\cdot s&=0\mod{2}\\&\,\,\vdots \\z_{k}\cdot s&=0\mod{2}\end{aligned}$$ There are $k$ equations and $n$ unknowns (the elements of $s$). If we run the quantum part enough times so that we find $n$ **independent** equations, then we can solve these equations (using, e.g., Gaussian elimination) to recover the secret string $s$. This is precisely the classical post-processing required: solve the system of equations found above to recover the string $s$. We refer the interested reader to the [Appendix](#Classical-post-processing) for details. ## Quantum Complexity<a name="quantumcomplexity"></a> How many queries do we need to make to $U_f$ in the quantum case? Above we saw that we need to run the quantum part of the algorithm $k$ times to generate a system of equations. We need to find $n$ linearly independent equations for the system to be determined. Thus, we need at least $n$ queries to $U_f$ to find such a system. It is possible, however, that we will get the same measurement outcome on different runs of the quantum algorithm, so we would need to re-do those runs that do not produce distinct measurement outcomes. Fortunately, these repeated outcomes are unlikely, so we only need $O(n)$ queries to the oracle $U_f$. Comparing the quantum and classical algorithms, we saw that the classical algorithm requires at least $\Omega(2^{0.5 n})$ queries to $f$, whereas the quantum algorithm requires only $O(n)$. Thus, we have established an *exponential* speedup by using the quantum algorithm above. # Implementing Simon's Algorithm in Amazon Braket<a name="implementation"></a> ```python # Imports and Setup from braket.circuits import Circuit, circuit from braket.devices import LocalSimulator import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Sets the device to run the circuit on device = LocalSimulator() ``` We also import a method called `simons_oracle`, which generates a circuit implementing an example oracle function. The code for `simons_oracle` is defined in the `simons_utils.py` module, and it is shown in the [Appendix](#Implementing-an-oracle-function) for completeness. ```python #Import local utils from simons_utils import simons_oracle ``` We now define the secret string, $s$: ```python s = '101011' # Other examples to try: # s = '011' # s = '00000' # s = '1' # Generate a random string of random length from 1 to 10: # s="".join(str(np.random.randint(2)) for _ in range(np.random.randint(1,10))) print("The secret string is: "+ s) ``` The secret string is: 101011 ### Circuit Definition We now define the quantum circuit for Simon's algorithm: 1. Apply Hadamard gates to the first $n$-qubits. 2. Query the oracle (i.e., the $U_f$ gate). In this example, the oracle is defined dynamically, based on our chosen value of $s$. You can try experimenting with different values of $s$ (with differing lengths). 3. Apply Hadamard gates to the first $n$-qubits. ```python n = len(s) circ = Circuit() # Apply Hadamard gates to first n qubits circ.h(range(n)) # Now apply the Oracle for f circ.simons_oracle(s) # Apply Hadamard gates to the first n qubits circ.h(range(n)) print(circ) ``` T : |0| 1 | 2 |3|4|5|6| q0 : -H-C-----------C---C-C-C-H- | | | | | q1 : -H-|-C---------|-H-|-|-|--- | | | | | | q2 : -H-|-|-C-------|-H-|-|-|--- | | | | | | | q3 : -H-|-|-|-C-----|-H-|-|-|--- | | | | | | | | q4 : -H-|-|-|-|-C---|-H-|-|-|--- | | | | | | | | | q5 : -H-|-|-|-|-|-C-|-H-|-|-|--- | | | | | | | | | | q6 : ---X-|-|-|-|-|-X---|-|-|--- | | | | | | | | q7 : -----X-|-|-|-|-----|-|-|--- | | | | | | | q8 : -------X-|-|-|-----X-|-|--- | | | | | q9 : ---------X-|-|-------|-|--- | | | | q10 : -----------X-|-------X-|--- | | q11 : -------------X---------X--- T : |0| 1 | 2 |3|4|5|6| ### Now run the circuit We need enough shots to obtain $n$ linearly independent bit strings in the output measurements. We have chosen `4n` shots in the example below, just to be on the safe side. ```python task = device.run(circ, shots=4*n) ``` ### Analyze the results We can retrieve the measurement results on all $2n$ qubits as follows: ```python result = task.result() counts = result.measurement_counts plt.bar(counts.keys(), counts.values()); plt.xlabel('bit strings'); plt.ylabel('counts'); plt.xticks(rotation=90) plt.show() ``` #### Aggregate the results The measurements are performed on all $2n$ qubits, but we are only interested in the first $n$ qubits. Thus, we need to aggregate the results by ignoring the measurement outcomes on the last $n$ qubits: ```python new_results = {} for bitstring, count in result.measurement_counts.items(): # Only keep the outcomes on first n qubits trunc_bitstring = bitstring[:n] # Add the count to that of the of truncated bit string new_results[trunc_bitstring] = new_results.get(trunc_bitstring, 0) + count plt.bar(new_results.keys(), new_results.values()) plt.xlabel('bit strings'); plt.ylabel('counts'); plt.xticks(rotation=70) plt.show() ``` In practice, we only need the measurement results (i.e., the bit strings, not the counts) from the first $n$ qubits. These measurement outcomes correspond to bit strings that satisfy the equations: $$ \begin{aligned}z_{1}\cdot s&=0\mod{2}\\z_{2}\cdot s&=0\mod{2}\\&\,\,\vdots \\z_n\cdot s&=0\mod{2}\end{aligned}$$ With these $n$ linear equations in hand, we can use classical post-processing to solve for the unknown string $s$. Note that we may have too many bit strings in the above, since we ran the task with $2n$ shots just to be safe. In this case, not all of the bit strings will be linearly independent. Moreover, the all-zeros string $0\dots0$ may also be an outcome, but this bit string satisfies the equations above trivially, so we exclude it if needed. At this stage, the quantum portion of Simon's algorithm is complete. Any remaining steps are just classical postprocessing, which we cover in the Appendix. ### References<a name="references"></a> [1] Wikipedia: [Simon's Problem](https://en.wikipedia.org/wiki/Simon%27s_problem) [2] Wikipedia: [Birthday Problem](https://en.wikipedia.org/wiki/Birthday_problem) [3] Wikipedia: [Computing a kernel by Gaussian elimination](https://en.wikipedia.org/wiki/Kernel_(linear_algebra)#Computation_by_Gaussian_elimination) [4] StackExchange: [Sympy: Solving Matrices in a finite field](https://stackoverflow.com/questions/31190182/sympy-solving-matrices-in-a-finite-field) --- ## Appendix<a name="appendix"></a> ### Implementing an Oracle Function<a name="oracleimplementation"></a> In order to run the algorithm, we will need a unitary function that we can use as an oracle to query the function $f$. There are many possible ways of implementing a function with the desired property that $f(x)=f(y) \implies x=y\oplus s$. We will pick one implementation that is commonly used in example code, and we will try to give some intuition for why this oracle works. #### Classical Intuition Behind the Function $f$ Generating a function that is one-to-one is conceptually straightforward, as any such function of the bit strings $\{0,1\}^n$ will just be a permutation of the inputs. Generating a two-to-one function is a little trickier, though there are many ways to do it. The goal is to define a function that splits the inputs into two groups, such that one element from each group maps to the same output (i.e., $x$ must be in one group, while $x\oplus s$ must be in the other group.) We will implement one simple choice for $f$, in which we define the split based on the value of one of the bits in the string. In this way, exactly half of the inputs will have that bit with value $0$, while the other half will have that bit with value $1$. Our approach will be to choose a flag bit in the input bit strings that we will use to split the inputs. We then $\mathrm{XOR}$ the input string with $s$ whenever the flag bit is $1$. With this definition, half of the input strings will be untouched, while half of the strings will be $\mathrm{XOR}$'ed with $s$. Clearly, this function does nothing to the all-zeros string $0\dots 0$, since any choice of the flag bit will always be $0$. Thus, we need to ensure that our definition also maps the string $s$ to the all-zeros string $0\dots 0$. In other words, we need to ensure that our flag bit is $1$ when the input string is $s$. One way to ensure the function acts correctly on the input $s$ is to just define the flag bit to be the first bit in the string $s$ that is equal to $1$. For example, if $s=011$, we can choose the flag bit to be the second bit. Concretely: $$f(x) = \left\{\begin{array}{lr} x, & \text{if } x_j=0\\ x\oplus s, & \text{if } x_j=1\\ \end{array}\right\},$$ where $x_j$ is the $j^\text{th}$ bit of $x$, and $j$ is the flag bit in $s$. <div class="alert alert-block alert-info"> <h4>Example for n=3:</h4> We now revisit the example in the introduction. Suppose the secret string $s=011$. Since the first appearance of $1$ in $s$ occurs at the second bit, we will use the second bit in the input strings as our flag bit. We take the $\mathrm{XOR}$ of the input with $s$ whenever the flag bit in the input is 1. This definition results in the following truth table: <table> <thead> <tr> <th><center>$$x$$</center></th> <th><center>$$f(x)$$</center></th> </tr> </thead> <tr> <td><center>$$000$$</center></td> <td><center>$$000$$</center></td> </tr> <tr> <td><center>$$001$$</center></td> <td><center>$$001$$</center></td> </tr> <tr> <td><center>$$010$$</center></td> <td><center>$$001$$</center></td> </tr> <tr> <td><center>$$011$$</center></td> <td><center>$$000$$</center></td> </tr> <tr> <td><center>$$100$$</center></td> <td><center>$$100$$</center></td> </tr> <tr> <td><center>$$101$$</center></td> <td><center>$$101$$</center></td> </tr> <tr> <td><center>$$110$$</center></td> <td><center>$$101$$</center></td> </tr> <tr> <td><center>$$111$$</center></td> <td><center>$$100$$</center></td> </tr> </table> </div> We leave it as an exercise to the reader to verify that this definition works for any input string size (i.e., $n$ for inputs $\{0,1\}^n$, and that it is in fact two-to-one, rather than many-to-one. Note that the function defined in this way is not a general two-to-one function, but it is simple a choice that is easy to implement both classically and as a quantum circuit. #### Quantum Implementation of $U_f$ We now define the unitary using the `@circuit.subroutine` functionality of the Amazon Braket SDK. The following code was imported from the `simons_utils.py` module, and is shown below for reference. In the quantum setting, we first copy the input register into some ancillary qubits: $$ |x\rangle|0\rangle\mapsto |x\rangle|x\rangle.$$ We then perform the quantum analog of $\mathrm{XOR}$, which means we apply an $X$ gate to the $k^\text{th}$ qubit whenever the $k^\text{th}$ bit of $s$ is $1$. However, we only apply this $X$ gate when the flag qubit is also $|1\rangle$. Thus, our $X$ gate becomes a $\mathrm{CNOT}$ gate between the flag qubit on the input register, and the $k^\text{th}$ qubit on the output. ```python from braket.circuits import Circuit, circuit @circuit.subroutine(register=True) def simons_oracle(secret_s: str): """ Quantum circuit implementing a particular oracle for Simon's problem. Details of this implementation are explained in the Simons Algorithm demo notebook. Args: secret_s (str): secret string we wish to find """ # Find the index of the first 1 in s, to be used as the flag bit flag_bit=secret_s.find('1') n=len(secret_s) circ = Circuit() # First copy the first n qubits, so that |x>|0> -> |x>|x> for i in range(n): circ.cnot(i, i+n) # If flag_bit=-1, s is the all-zeros string, and we do nothing else. if flag_bit != -1: # Now apply the XOR with s whenever the flag bit is 1. for index,bit_value in enumerate(secret_s): if bit_value not in ['0','1']: raise Exception ('Incorrect char \'' + bit_value + '\' in secret string s:' + secret_s) # XOR with s whenever the flag bit is 1. # In terms of gates, XOR means we apply an X gate only whenever the corresponding bit in s is 1. # Applying this X only when the flag qubit is 1 means this is a CNOT gate. if(bit_value == '1'): circ.cnot(flag_bit,index+n) return circ ``` ### Implementing the Classical Post-Processing<a name="appendixpostprocessing"></a> We will now solve the system of linear equations above using Gaussian elimination. We first convert the results into matrix form, then we use `sympy`'s `Matrix.rref()` method to transform the matrix into reduced row echelon form. ```python !pip3 install sympy --quiet from sympy import Matrix ``` Generate a matrix from the bit string outputs. We first check that we have sufficiently many output strings to be able to solve the system of equations. If not: output and error and re-run the algorithm. ```python if len(new_results.keys()) < len(s): raise Exception ('System will be underdetermined. Minimum ' + str(n) + ' bistrings needed, but only ' + str(len(new_results.keys())) +' returned. Please rerun Simon\'s algorithm.') string_list = [] for key in new_results.keys(): # if key!= "0"*n: string_list.append( [ int(c) for c in key ] ) print('The result in matrix form is :') for a in string_list: print (a) ``` The result in matrix form is : [1, 0, 0, 0, 0, 1] [0, 1, 0, 0, 0, 0] [0, 0, 1, 0, 0, 1] [1, 1, 1, 1, 1, 1] [0, 1, 1, 0, 1, 0] [0, 1, 1, 1, 1, 0] [1, 0, 1, 0, 1, 1] [0, 1, 0, 0, 1, 1] [1, 1, 0, 1, 0, 1] [1, 1, 0, 0, 0, 1] [0, 0, 1, 1, 0, 1] [0, 1, 0, 1, 0, 0] [0, 1, 1, 0, 0, 1] [0, 0, 1, 0, 1, 0] [1, 0, 1, 1, 1, 1] [1, 0, 1, 0, 0, 0] [0, 1, 1, 1, 0, 1] [1, 0, 0, 1, 0, 1] [1, 0, 0, 1, 1, 0] [0, 0, 0, 0, 0, 0] Now solve the system $Ms=0$ by finding the kernel of $M$. We do this using Gaussian elimination on the augmented matrix $\left[A|I\right]$ to bring it to row echelon form. Converting the solution to numbers $\mathrm{mod }\,2$, we can then read off the solution from the last row of the reduced matrix. See [[3]](#References) and [[4]](#References) for more details. ```python M=Matrix(string_list).T # Construct the agumented matrix M_I = Matrix(np.hstack([M,np.eye(M.shape[0],dtype=int)])) # Perform row reduction, working modulo 2. We use the iszerofunc property of rref # to perform the Gaussian elimination over the finite field. M_I_rref = M_I.rref(iszerofunc=lambda x: x % 2==0) # In row reduced echelon form, we can end up with a solution outside of the finite field {0,1}. # Thus, we need to revert the matrix back to this field by treating fractions as a modular inverse. # Since the denominator will always be odd (i.e. 1 mod 2), it can be ignored. # Helper function to treat fractions as modular inverse: def mod2(x): return x.as_numer_denom()[0] % 2 # Apply our helper function to the matrix M_I_final = M_I_rref[0].applyfunc(mod2) # Extract the kernel of M from the remaining columns of the last row, when s is nonzero. if all(value == 0 for value in M_I_final[-1,:M.shape[1]]): result_s="".join(str(c) for c in M_I_final[-1,M.shape[1]:]) # Otherwise, the sub-matrix will be full rank, so just set s=0...0 else: result_s='0'*M.shape[0] # Check whether result_s is equal to initial s: print ('Secret string: ' + s) print ('Result string: ' + result_s) if (result_s == s): print ('We found the correct answer.') else: print ('Error. The answer is wrong!') ``` Secret string: 101011 Result string: 101011 We found the correct answer.
66deaf946b8e0ab0403f43857f0207ae6d096145
154,774
ipynb
Jupyter Notebook
examples/advanced_circuits_algorithms/Simons_Algorithm/Simons_Algorithm.ipynb
virajvchaudhari/amazon-braket-examples
4d48555f4aa5cbf86ece8a472b9913f14b22b768
[ "Apache-2.0" ]
null
null
null
examples/advanced_circuits_algorithms/Simons_Algorithm/Simons_Algorithm.ipynb
virajvchaudhari/amazon-braket-examples
4d48555f4aa5cbf86ece8a472b9913f14b22b768
[ "Apache-2.0" ]
null
null
null
examples/advanced_circuits_algorithms/Simons_Algorithm/Simons_Algorithm.ipynb
virajvchaudhari/amazon-braket-examples
4d48555f4aa5cbf86ece8a472b9913f14b22b768
[ "Apache-2.0" ]
null
null
null
192.504975
84,964
0.875683
true
7,936
Qwen/Qwen-72B
1. YES 2. YES
0.749087
0.757794
0.567654
__label__eng_Latn
0.99414
0.15718
```python from preamble import * %matplotlib inline ``` ## Model Evaluation and Improvement ```python from sklearn.datasets import make_blobs from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split # create a synthetic dataset X, y = make_blobs(random_state=0) # split data and labels into a training and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # instantiate a model and fit it to the training set logreg = LogisticRegression().fit(X_train, y_train) # evaluate the model on the test set print("Test set score: {:.2f}".format(logreg.score(X_test, y_test))) ``` Test set score: 0.88 ### Cross-Validation ```python mglearn.plots.plot_cross_validation() ``` #### Cross-Validation in scikit-learn ```python from sklearn.model_selection import cross_val_score from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression iris = load_iris() logreg = LogisticRegression(max_iter=1000) scores = cross_val_score(logreg, iris.data, iris.target) print("Cross-validation scores: {}".format(scores)) ``` Cross-validation scores: [0.967 1. 0.933 0.967 1. ] ```python scores = cross_val_score(logreg, iris.data, iris.target, cv=5) print("Cross-validation scores: {}".format(scores)) ``` Cross-validation scores: [0.967 1. 0.933 0.967 1. ] ```python print("Average cross-validation score: {:.2f}".format(scores.mean())) ``` Average cross-validation score: 0.97 ```python from sklearn.model_selection import cross_validate res = cross_validate(logreg, iris.data, iris.target, cv=5, return_train_score=True) display(res) ``` {'fit_time': array([0.013, 0.017, 0.012, 0.013, 0.012]), 'score_time': array([0. , 0. , 0.001, 0. , 0.001]), 'test_score': array([0.967, 1. , 0.933, 0.967, 1. ]), 'train_score': array([0.967, 0.967, 0.983, 0.983, 0.975])} ```python res_df = pd.DataFrame(res) display(res_df) print("Mean times and scores:\n", res_df.mean()) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>fit_time</th> <th>score_time</th> <th>test_score</th> <th>train_score</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0.01</td> <td>0.00e+00</td> <td>0.97</td> <td>0.97</td> </tr> <tr> <th>1</th> <td>0.02</td> <td>0.00e+00</td> <td>1.00</td> <td>0.97</td> </tr> <tr> <th>2</th> <td>0.01</td> <td>9.94e-04</td> <td>0.93</td> <td>0.98</td> </tr> <tr> <th>3</th> <td>0.01</td> <td>0.00e+00</td> <td>0.97</td> <td>0.98</td> </tr> <tr> <th>4</th> <td>0.01</td> <td>9.98e-04</td> <td>1.00</td> <td>0.97</td> </tr> </tbody> </table> </div> Mean times and scores: fit_time 1.34e-02 score_time 3.98e-04 test_score 9.73e-01 train_score 9.75e-01 dtype: float64 #### Benefits of Cross-Validation ### Stratified K-Fold cross-validation and other strategies ```python from sklearn.datasets import load_iris iris = load_iris() print("Iris labels:\n{}".format(iris.target)) ``` Iris labels: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] ```python mglearn.plots.plot_stratified_cross_validation() ``` #### More control over cross-validation ```python from sklearn.model_selection import KFold kfold = KFold(n_splits=5) ``` ```python print("Cross-validation scores:\n{}".format( cross_val_score(logreg, iris.data, iris.target, cv=kfold))) ``` Cross-validation scores: [1. 1. 0.867 0.933 0.833] ```python kfold = KFold(n_splits=3) print("Cross-validation scores:\n{}".format( cross_val_score(logreg, iris.data, iris.target, cv=kfold))) ``` Cross-validation scores: [0. 0. 0.] ```python kfold = KFold(n_splits=3, shuffle=True, random_state=0) print("Cross-validation scores:\n{}".format( cross_val_score(logreg, iris.data, iris.target, cv=kfold))) ``` Cross-validation scores: [0.98 0.96 0.96] #### Leave-one-out cross-validation ```python from sklearn.model_selection import LeaveOneOut loo = LeaveOneOut() scores = cross_val_score(logreg, iris.data, iris.target, cv=loo) print("Number of cv iterations: ", len(scores)) print("Mean accuracy: {:.2f}".format(scores.mean())) ``` Number of cv iterations: 150 Mean accuracy: 0.97 #### Shuffle-split cross-validation ```python mglearn.plots.plot_shuffle_split() ``` ```python from sklearn.model_selection import ShuffleSplit shuffle_split = ShuffleSplit(test_size=.5, train_size=.5, n_splits=10) scores = cross_val_score(logreg, iris.data, iris.target, cv=shuffle_split) print("Cross-validation scores:\n{}".format(scores)) ``` Cross-validation scores: [0.947 0.987 0.973 0.96 0.933 0.96 0.987 0.947 0.947 0.933] ##### Cross-validation with groups ```python mglearn.plots.plot_group_kfold() ``` ```python from sklearn.model_selection import GroupKFold # create synthetic dataset X, y = make_blobs(n_samples=12, random_state=0) # assume the first three samples belong to the same group, # then the next four, etc. groups = [0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3] scores = cross_val_score(logreg, X, y, groups=groups, cv=GroupKFold(n_splits=3)) print("Cross-validation scores:\n{}".format(scores)) ``` Cross-validation scores: [0.75 0.6 0.667] ### Grid Search #### Simple Grid Search ```python # naive grid search implementation from sklearn.svm import SVC X_train, X_test, y_train, y_test = train_test_split( iris.data, iris.target, random_state=0) print("Size of training set: {} size of test set: {}".format( X_train.shape[0], X_test.shape[0])) best_score = 0 for gamma in [0.001, 0.01, 0.1, 1, 10, 100]: for C in [0.001, 0.01, 0.1, 1, 10, 100]: # for each combination of parameters, train an SVC svm = SVC(gamma=gamma, C=C) svm.fit(X_train, y_train) # evaluate the SVC on the test set score = svm.score(X_test, y_test) # if we got a better score, store the score and parameters if score > best_score: best_score = score best_parameters = {'C': C, 'gamma': gamma} print("Best score: {:.2f}".format(best_score)) print("Best parameters: {}".format(best_parameters)) ``` Size of training set: 112 size of test set: 38 Best score: 0.97 Best parameters: {'C': 100, 'gamma': 0.001} #### The danger of overfitting the parameters and the validation set ```python mglearn.plots.plot_threefold_split() ``` ```python from sklearn.svm import SVC # split data into train+validation set and test set X_trainval, X_test, y_trainval, y_test = train_test_split( iris.data, iris.target, random_state=0) # split train+validation set into training and validation sets X_train, X_valid, y_train, y_valid = train_test_split( X_trainval, y_trainval, random_state=1) print("Size of training set: {} size of validation set: {} size of test set:" " {}\n".format(X_train.shape[0], X_valid.shape[0], X_test.shape[0])) best_score = 0 for gamma in [0.001, 0.01, 0.1, 1, 10, 100]: for C in [0.001, 0.01, 0.1, 1, 10, 100]: # for each combination of parameters, train an SVC svm = SVC(gamma=gamma, C=C) svm.fit(X_train, y_train) # evaluate the SVC on the validation set score = svm.score(X_valid, y_valid) # if we got a better score, store the score and parameters if score > best_score: best_score = score best_parameters = {'C': C, 'gamma': gamma} # rebuild a model on the combined training and validation set, # and evaluate it on the test set svm = SVC(**best_parameters) svm.fit(X_trainval, y_trainval) test_score = svm.score(X_test, y_test) print("Best score on validation set: {:.2f}".format(best_score)) print("Best parameters: ", best_parameters) print("Test set score with best parameters: {:.2f}".format(test_score)) ``` Size of training set: 84 size of validation set: 28 size of test set: 38 Best score on validation set: 0.96 Best parameters: {'C': 10, 'gamma': 0.001} Test set score with best parameters: 0.92 #### Grid Search with Cross-Validation ```python for gamma in [0.001, 0.01, 0.1, 1, 10, 100]: for C in [0.001, 0.01, 0.1, 1, 10, 100]: # for each combination of parameters, # train an SVC svm = SVC(gamma=gamma, C=C) # perform cross-validation scores = cross_val_score(svm, X_trainval, y_trainval, cv=5) # compute mean cross-validation accuracy score = np.mean(scores) # if we got a better score, store the score and parameters if score > best_score: best_score = score best_parameters = {'C': C, 'gamma': gamma} # rebuild a model on the combined training and validation set svm = SVC(**best_parameters) svm.fit(X_trainval, y_trainval) ``` SVC(C=10, gamma=0.1) ```python mglearn.plots.plot_cross_val_selection() ``` ```python mglearn.plots.plot_grid_search_overview() ``` ```python param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]} print("Parameter grid:\n{}".format(param_grid)) ``` Parameter grid: {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]} ```python from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC grid_search = GridSearchCV(SVC(), param_grid, cv=5, return_train_score=True) ``` ```python X_train, X_test, y_train, y_test = train_test_split( iris.data, iris.target, random_state=0) ``` ```python grid_search.fit(X_train, y_train) ``` GridSearchCV(cv=5, estimator=SVC(), param_grid={'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}, return_train_score=True) ```python print("Test set score: {:.2f}".format(grid_search.score(X_test, y_test))) ``` Test set score: 0.97 ```python print("Best parameters: {}".format(grid_search.best_params_)) print("Best cross-validation score: {:.2f}".format(grid_search.best_score_)) ``` Best parameters: {'C': 10, 'gamma': 0.1} Best cross-validation score: 0.97 ```python print("Best estimator:\n{}".format(grid_search.best_estimator_)) ``` Best estimator: SVC(C=10, gamma=0.1) ##### Analyzing the result of cross-validation ```python import pandas as pd # convert to Dataframe results = pd.DataFrame(grid_search.cv_results_) # show the first 5 rows display(results.head()) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>mean_fit_time</th> <th>std_fit_time</th> <th>mean_score_time</th> <th>std_score_time</th> <th>...</th> <th>split3_train_score</th> <th>split4_train_score</th> <th>mean_train_score</th> <th>std_train_score</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>5.99e-04</td> <td>4.89e-04</td> <td>1.99e-04</td> <td>3.99e-04</td> <td>...</td> <td>0.37</td> <td>0.36</td> <td>0.37</td> <td>5.58e-03</td> </tr> <tr> <th>1</th> <td>5.98e-04</td> <td>4.89e-04</td> <td>0.00e+00</td> <td>0.00e+00</td> <td>...</td> <td>0.37</td> <td>0.36</td> <td>0.37</td> <td>5.58e-03</td> </tr> <tr> <th>2</th> <td>3.99e-04</td> <td>4.89e-04</td> <td>3.99e-04</td> <td>4.89e-04</td> <td>...</td> <td>0.37</td> <td>0.36</td> <td>0.37</td> <td>5.58e-03</td> </tr> <tr> <th>3</th> <td>7.98e-04</td> <td>3.99e-04</td> <td>0.00e+00</td> <td>0.00e+00</td> <td>...</td> <td>0.37</td> <td>0.36</td> <td>0.37</td> <td>5.58e-03</td> </tr> <tr> <th>4</th> <td>3.99e-04</td> <td>4.89e-04</td> <td>2.00e-04</td> <td>3.99e-04</td> <td>...</td> <td>0.37</td> <td>0.36</td> <td>0.37</td> <td>5.58e-03</td> </tr> </tbody> </table> <p>5 rows × 22 columns</p> </div> ```python scores = np.array(results.mean_test_score).reshape(6, 6) # plot the mean cross-validation scores mglearn.tools.heatmap(scores, xlabel='gamma', xticklabels=param_grid['gamma'], ylabel='C', yticklabels=param_grid['C'], cmap="viridis") ``` ```python fig, axes = plt.subplots(1, 3, figsize=(13, 5)) param_grid_linear = {'C': np.linspace(1, 2, 6), 'gamma': np.linspace(1, 2, 6)} param_grid_one_log = {'C': np.linspace(1, 2, 6), 'gamma': np.logspace(-3, 2, 6)} param_grid_range = {'C': np.logspace(-3, 2, 6), 'gamma': np.logspace(-7, -2, 6)} for param_grid, ax in zip([param_grid_linear, param_grid_one_log, param_grid_range], axes): grid_search = GridSearchCV(SVC(), param_grid, cv=5) grid_search.fit(X_train, y_train) scores = grid_search.cv_results_['mean_test_score'].reshape(6, 6) # plot the mean cross-validation scores scores_image = mglearn.tools.heatmap( scores, xlabel='gamma', ylabel='C', xticklabels=param_grid['gamma'], yticklabels=param_grid['C'], cmap="viridis", ax=ax) plt.colorbar(scores_image, ax=axes.tolist()) ``` ```python param_grid = [{'kernel': ['rbf'], 'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}, {'kernel': ['linear'], 'C': [0.001, 0.01, 0.1, 1, 10, 100]}] print("List of grids:\n{}".format(param_grid)) ``` List of grids: [{'kernel': ['rbf'], 'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}, {'kernel': ['linear'], 'C': [0.001, 0.01, 0.1, 1, 10, 100]}] ```python grid_search = GridSearchCV(SVC(), param_grid, cv=5, return_train_score=True) grid_search.fit(X_train, y_train) print("Best parameters: {}".format(grid_search.best_params_)) print("Best cross-validation score: {:.2f}".format(grid_search.best_score_)) ``` Best parameters: {'C': 10, 'gamma': 0.1, 'kernel': 'rbf'} Best cross-validation score: 0.97 ```python results = pd.DataFrame(grid_search.cv_results_) # we display the transposed table so that it better fits on the page: display(results.T) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>0</th> <th>1</th> <th>2</th> <th>3</th> <th>...</th> <th>38</th> <th>39</th> <th>40</th> <th>41</th> </tr> </thead> <tbody> <tr> <th>mean_fit_time</th> <td>0.0008</td> <td>0.0006</td> <td>0.0008</td> <td>0.0008</td> <td>...</td> <td>0</td> <td>0</td> <td>0.0002</td> <td>0.001</td> </tr> <tr> <th>std_fit_time</th> <td>0.0004</td> <td>0.00049</td> <td>0.0004</td> <td>0.0004</td> <td>...</td> <td>0</td> <td>0</td> <td>0.00041</td> <td>1.6e-05</td> </tr> <tr> <th>mean_score_time</th> <td>0.0002</td> <td>0.0004</td> <td>0.0002</td> <td>0.0002</td> <td>...</td> <td>0.0008</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>std_score_time</th> <td>0.0004</td> <td>0.00049</td> <td>0.0004</td> <td>0.0004</td> <td>...</td> <td>0.0004</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>param_C</th> <td>0.001</td> <td>0.001</td> <td>0.001</td> <td>0.001</td> <td>...</td> <td>0.1</td> <td>1</td> <td>10</td> <td>100</td> </tr> <tr> <th>param_gamma</th> <td>0.001</td> <td>0.01</td> <td>0.1</td> <td>1</td> <td>...</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> </tr> <tr> <th>param_kernel</th> <td>rbf</td> <td>rbf</td> <td>rbf</td> <td>rbf</td> <td>...</td> <td>linear</td> <td>linear</td> <td>linear</td> <td>linear</td> </tr> <tr> <th>params</th> <td>{'C': 0.001, 'gamma': 0.001, 'kernel': 'rbf'}</td> <td>{'C': 0.001, 'gamma': 0.01, 'kernel': 'rbf'}</td> <td>{'C': 0.001, 'gamma': 0.1, 'kernel': 'rbf'}</td> <td>{'C': 0.001, 'gamma': 1, 'kernel': 'rbf'}</td> <td>...</td> <td>{'C': 0.1, 'kernel': 'linear'}</td> <td>{'C': 1, 'kernel': 'linear'}</td> <td>{'C': 10, 'kernel': 'linear'}</td> <td>{'C': 100, 'kernel': 'linear'}</td> </tr> <tr> <th>split0_test_score</th> <td>0.35</td> <td>0.35</td> <td>0.35</td> <td>0.35</td> <td>...</td> <td>1</td> <td>1</td> <td>1</td> <td>0.96</td> </tr> <tr> <th>split1_test_score</th> <td>0.35</td> <td>0.35</td> <td>0.35</td> <td>0.35</td> <td>...</td> <td>0.91</td> <td>0.96</td> <td>1</td> <td>0.96</td> </tr> <tr> <th>split2_test_score</th> <td>0.36</td> <td>0.36</td> <td>0.36</td> <td>0.36</td> <td>...</td> <td>1</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>split3_test_score</th> <td>0.36</td> <td>0.36</td> <td>0.36</td> <td>0.36</td> <td>...</td> <td>0.91</td> <td>0.95</td> <td>0.91</td> <td>0.91</td> </tr> <tr> <th>split4_test_score</th> <td>0.41</td> <td>0.41</td> <td>0.41</td> <td>0.41</td> <td>...</td> <td>0.95</td> <td>0.95</td> <td>0.95</td> <td>0.95</td> </tr> <tr> <th>mean_test_score</th> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>...</td> <td>0.96</td> <td>0.97</td> <td>0.97</td> <td>0.96</td> </tr> <tr> <th>std_test_score</th> <td>0.022</td> <td>0.022</td> <td>0.022</td> <td>0.022</td> <td>...</td> <td>0.04</td> <td>0.022</td> <td>0.036</td> <td>0.029</td> </tr> <tr> <th>rank_test_score</th> <td>27</td> <td>27</td> <td>27</td> <td>27</td> <td>...</td> <td>8</td> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>split0_train_score</th> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>...</td> <td>0.97</td> <td>0.99</td> <td>0.99</td> <td>0.98</td> </tr> <tr> <th>split1_train_score</th> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>...</td> <td>0.97</td> <td>0.98</td> <td>0.99</td> <td>0.99</td> </tr> <tr> <th>split2_train_score</th> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>...</td> <td>0.94</td> <td>0.98</td> <td>0.98</td> <td>0.99</td> </tr> <tr> <th>split3_train_score</th> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>...</td> <td>0.97</td> <td>0.99</td> <td>0.99</td> <td>1</td> </tr> <tr> <th>split4_train_score</th> <td>0.36</td> <td>0.36</td> <td>0.36</td> <td>0.36</td> <td>...</td> <td>0.97</td> <td>0.99</td> <td>1</td> <td>1</td> </tr> <tr> <th>mean_train_score</th> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>0.37</td> <td>...</td> <td>0.96</td> <td>0.98</td> <td>0.99</td> <td>0.99</td> </tr> <tr> <th>std_train_score</th> <td>0.0056</td> <td>0.0056</td> <td>0.0056</td> <td>0.0056</td> <td>...</td> <td>0.0088</td> <td>0.0055</td> <td>0.007</td> <td>0.0084</td> </tr> </tbody> </table> <p>23 rows × 42 columns</p> </div> #### Using different cross-validation strategies with grid search #### Nested cross-validation ```python param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]} scores = cross_val_score(GridSearchCV(SVC(), param_grid, cv=5), iris.data, iris.target, cv=5) print("Cross-validation scores: ", scores) print("Mean cross-validation score: ", scores.mean()) ``` Cross-validation scores: [0.967 1. 0.967 0.967 1. ] Mean cross-validation score: 0.9800000000000001 ```python def nested_cv(X, y, inner_cv, outer_cv, Classifier, parameter_grid): outer_scores = [] # for each split of the data in the outer cross-validation # (split method returns indices of training and test parts) for training_samples, test_samples in outer_cv.split(X, y): # find best parameter using inner cross-validation best_parms = {} best_score = -np.inf # iterate over parameters for parameters in parameter_grid: # accumulate score over inner splits cv_scores = [] # iterate over inner cross-validation for inner_train, inner_test in inner_cv.split( X[training_samples], y[training_samples]): # build classifier given parameters and training data clf = Classifier(**parameters) clf.fit(X[inner_train], y[inner_train]) # evaluate on inner test set score = clf.score(X[inner_test], y[inner_test]) cv_scores.append(score) # compute mean score over inner folds mean_score = np.mean(cv_scores) if mean_score > best_score: # if better than so far, remember parameters best_score = mean_score best_params = parameters # build classifier on best parameters using outer training set clf = Classifier(**best_params) clf.fit(X[training_samples], y[training_samples]) # evaluate outer_scores.append(clf.score(X[test_samples], y[test_samples])) return np.array(outer_scores) ``` ```python from sklearn.model_selection import ParameterGrid, StratifiedKFold scores = nested_cv(iris.data, iris.target, StratifiedKFold(5), StratifiedKFold(5), SVC, ParameterGrid(param_grid)) print("Cross-validation scores: {}".format(scores)) ``` Cross-validation scores: [0.967 1. 0.967 0.967 1. ] ##### Parallelizing cross-validation and grid search ### Evaluation Metrics and Scoring #### Keep the End Goal in Mind #### Metrics for Binary Classification ##### Kinds of errors ##### Imbalanced datasets ```python from sklearn.datasets import load_digits digits = load_digits() y = digits.target == 9 X_train, X_test, y_train, y_test = train_test_split( digits.data, y, random_state=0) ``` ```python from sklearn.dummy import DummyClassifier dummy_majority = DummyClassifier(strategy='most_frequent').fit(X_train, y_train) pred_most_frequent = dummy_majority.predict(X_test) print("Unique predicted labels: {}".format(np.unique(pred_most_frequent))) print("Test score: {:.2f}".format(dummy_majority.score(X_test, y_test))) ``` Unique predicted labels: [False] Test score: 0.90 ```python from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(max_depth=2).fit(X_train, y_train) pred_tree = tree.predict(X_test) print("Test score: {:.2f}".format(tree.score(X_test, y_test))) ``` Test score: 0.92 ```python from sklearn.linear_model import LogisticRegression dummy = DummyClassifier().fit(X_train, y_train) pred_dummy = dummy.predict(X_test) print("dummy score: {:.2f}".format(dummy.score(X_test, y_test))) logreg = LogisticRegression(max_iter=1000, C=0.1).fit(X_train, y_train) pred_logreg = logreg.predict(X_test) print("logreg score: {:.2f}".format(logreg.score(X_test, y_test))) ``` dummy score: 0.84 logreg score: 0.98 c:\users\t3kci\checkout\scikit-learn\sklearn\dummy.py:132: FutureWarning: The default value of strategy will change from stratified to prior in 0.24. "stratified to prior in 0.24.", FutureWarning) ##### Confusion matrices ```python from sklearn.metrics import confusion_matrix confusion = confusion_matrix(y_test, pred_logreg) print("Confusion matrix:\n{}".format(confusion)) ``` Confusion matrix: [[402 1] [ 6 41]] ```python mglearn.plots.plot_confusion_matrix_illustration() ``` ```python mglearn.plots.plot_binary_confusion_matrix() ``` ```python print("Most frequent class:") print(confusion_matrix(y_test, pred_most_frequent)) print("\nDummy model:") print(confusion_matrix(y_test, pred_dummy)) print("\nDecision tree:") print(confusion_matrix(y_test, pred_tree)) print("\nLogistic Regression") print(confusion_matrix(y_test, pred_logreg)) ``` Most frequent class: [[403 0] [ 47 0]] Dummy model: [[362 41] [ 45 2]] Decision tree: [[390 13] [ 24 23]] Logistic Regression [[402 1] [ 6 41]] ###### Relation to accuracy \begin{equation} \text{Accuracy} = \frac{\text{TP} + \text{TN}}{\text{TP} + \text{TN} + \text{FP} + \text{FN}} \end{equation} ##### Precision, recall and f-score \begin{equation} \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} \end{equation} \begin{equation} \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} \end{equation} \begin{equation} \text{F} = 2 \cdot \frac{\text{precision} \cdot \text{recall}}{\text{precision} + \text{recall}} \end{equation} ```python from sklearn.metrics import f1_score print("f1 score most frequent: {:.2f}".format( f1_score(y_test, pred_most_frequent))) print("f1 score dummy: {:.2f}".format(f1_score(y_test, pred_dummy))) print("f1 score tree: {:.2f}".format(f1_score(y_test, pred_tree))) print("f1 score logistic regression: {:.2f}".format( f1_score(y_test, pred_logreg))) ``` f1 score most frequent: 0.00 f1 score dummy: 0.04 f1 score tree: 0.55 f1 score logistic regression: 0.92 ```python from sklearn.metrics import classification_report print(classification_report(y_test, pred_most_frequent, target_names=["not nine", "nine"])) ``` precision recall f1-score support not nine 0.90 1.00 0.94 403 nine 0.00 0.00 0.00 47 accuracy 0.90 450 macro avg 0.45 0.50 0.47 450 weighted avg 0.80 0.90 0.85 450 c:\users\t3kci\checkout\scikit-learn\sklearn\metrics\_classification.py:1221: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) ```python print(classification_report(y_test, pred_dummy, target_names=["not nine", "nine"])) ``` precision recall f1-score support not nine 0.89 0.90 0.89 403 nine 0.05 0.04 0.04 47 accuracy 0.81 450 macro avg 0.47 0.47 0.47 450 weighted avg 0.80 0.81 0.81 450 ```python print(classification_report(y_test, pred_logreg, target_names=["not nine", "nine"])) ``` precision recall f1-score support not nine 0.99 1.00 0.99 403 nine 0.98 0.87 0.92 47 accuracy 0.98 450 macro avg 0.98 0.93 0.96 450 weighted avg 0.98 0.98 0.98 450 ##### Taking uncertainty into account ```python X, y = make_blobs(n_samples=(400, 50), cluster_std=[7.0, 2], random_state=22) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) svc = SVC(gamma=.05).fit(X_train, y_train) ``` ```python mglearn.plots.plot_decision_threshold() ``` ```python print(classification_report(y_test, svc.predict(X_test))) ``` precision recall f1-score support 0 0.97 0.89 0.93 104 1 0.35 0.67 0.46 9 accuracy 0.88 113 macro avg 0.66 0.78 0.70 113 weighted avg 0.92 0.88 0.89 113 ```python y_pred_lower_threshold = svc.decision_function(X_test) > -.8 ``` ```python print(classification_report(y_test, y_pred_lower_threshold)) ``` precision recall f1-score support 0 1.00 0.82 0.90 104 1 0.32 1.00 0.49 9 accuracy 0.83 113 macro avg 0.66 0.91 0.69 113 weighted avg 0.95 0.83 0.87 113 ##### Precision-Recall curves and ROC curves ```python from sklearn.metrics import precision_recall_curve precision, recall, thresholds = precision_recall_curve( y_test, svc.decision_function(X_test)) ``` ```python # Use more data points for a smoother curve X, y = make_blobs(n_samples=(4000, 500), cluster_std=[7.0, 2], random_state=22) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) svc = SVC(gamma=.05).fit(X_train, y_train) precision, recall, thresholds = precision_recall_curve( y_test, svc.decision_function(X_test)) # find threshold closest to zero close_zero = np.argmin(np.abs(thresholds)) plt.plot(precision[close_zero], recall[close_zero], 'o', markersize=10, label="threshold zero", fillstyle="none", c='k', mew=2) plt.plot(precision, recall, label="precision recall curve") plt.xlabel("Precision") plt.ylabel("Recall") plt.legend(loc="best") ``` ```python from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=0, max_features=2) rf.fit(X_train, y_train) # RandomForestClassifier has predict_proba, but not decision_function precision_rf, recall_rf, thresholds_rf = precision_recall_curve( y_test, rf.predict_proba(X_test)[:, 1]) plt.plot(precision, recall, label="svc") plt.plot(precision[close_zero], recall[close_zero], 'o', markersize=10, label="threshold zero svc", fillstyle="none", c='k', mew=2) plt.plot(precision_rf, recall_rf, label="rf") close_default_rf = np.argmin(np.abs(thresholds_rf - 0.5)) plt.plot(precision_rf[close_default_rf], recall_rf[close_default_rf], '^', c='k', markersize=10, label="threshold 0.5 rf", fillstyle="none", mew=2) plt.xlabel("Precision") plt.ylabel("Recall") plt.legend(loc="best") ``` ```python print("f1_score of random forest: {:.3f}".format( f1_score(y_test, rf.predict(X_test)))) print("f1_score of svc: {:.3f}".format(f1_score(y_test, svc.predict(X_test)))) ``` f1_score of random forest: 0.610 f1_score of svc: 0.656 ```python from sklearn.metrics import average_precision_score ap_rf = average_precision_score(y_test, rf.predict_proba(X_test)[:, 1]) ap_svc = average_precision_score(y_test, svc.decision_function(X_test)) print("Average precision of random forest: {:.3f}".format(ap_rf)) print("Average precision of svc: {:.3f}".format(ap_svc)) ``` Average precision of random forest: 0.660 Average precision of svc: 0.666 ##### Receiver Operating Characteristics (ROC) and AUC \begin{equation} \text{FPR} = \frac{\text{FP}}{\text{FP} + \text{TN}} \end{equation} ```python from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_test, svc.decision_function(X_test)) plt.plot(fpr, tpr, label="ROC Curve") plt.xlabel("FPR") plt.ylabel("TPR (recall)") # find threshold closest to zero close_zero = np.argmin(np.abs(thresholds)) plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10, label="threshold zero", fillstyle="none", c='k', mew=2) plt.legend(loc=4) ``` ```python fpr_rf, tpr_rf, thresholds_rf = roc_curve(y_test, rf.predict_proba(X_test)[:, 1]) plt.plot(fpr, tpr, label="ROC Curve SVC") plt.plot(fpr_rf, tpr_rf, label="ROC Curve RF") plt.xlabel("FPR") plt.ylabel("TPR (recall)") plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10, label="threshold zero SVC", fillstyle="none", c='k', mew=2) close_default_rf = np.argmin(np.abs(thresholds_rf - 0.5)) plt.plot(fpr_rf[close_default_rf], tpr[close_default_rf], '^', markersize=10, label="threshold 0.5 RF", fillstyle="none", c='k', mew=2) plt.legend(loc=4) ``` ```python from sklearn.metrics import roc_auc_score rf_auc = roc_auc_score(y_test, rf.predict_proba(X_test)[:, 1]) svc_auc = roc_auc_score(y_test, svc.decision_function(X_test)) print("AUC for Random Forest: {:.3f}".format(rf_auc)) print("AUC for SVC: {:.3f}".format(svc_auc)) ``` AUC for Random Forest: 0.937 AUC for SVC: 0.916 ```python y = digits.target == 9 X_train, X_test, y_train, y_test = train_test_split( digits.data, y, random_state=0) plt.figure() for gamma in [1, 0.05, 0.01]: svc = SVC(gamma=gamma).fit(X_train, y_train) accuracy = svc.score(X_test, y_test) auc = roc_auc_score(y_test, svc.decision_function(X_test)) fpr, tpr, _ = roc_curve(y_test , svc.decision_function(X_test)) print("gamma = {:.2f} accuracy = {:.2f} AUC = {:.2f}".format( gamma, accuracy, auc)) plt.plot(fpr, tpr, label="gamma={:.3f}".format(gamma)) plt.xlabel("FPR") plt.ylabel("TPR") plt.xlim(-0.01, 1) plt.ylim(0, 1.02) plt.legend(loc="best") ``` #### Metrics for Multiclass Classification ```python from sklearn.metrics import accuracy_score X_train, X_test, y_train, y_test = train_test_split( digits.data, digits.target, random_state=0) lr = LogisticRegression().fit(X_train, y_train) pred = lr.predict(X_test) print("Accuracy: {:.3f}".format(accuracy_score(y_test, pred))) print("Confusion matrix:\n{}".format(confusion_matrix(y_test, pred))) ``` Accuracy: 0.951 Confusion matrix: [[37 0 0 0 0 0 0 0 0 0] [ 0 40 0 0 0 0 0 0 2 1] [ 0 1 40 3 0 0 0 0 0 0] [ 0 0 0 43 0 0 0 0 1 1] [ 0 0 0 0 37 0 0 1 0 0] [ 0 0 0 0 0 46 0 0 0 2] [ 0 1 0 0 0 0 51 0 0 0] [ 0 0 0 1 1 0 0 46 0 0] [ 0 3 1 0 0 0 0 0 43 1] [ 0 0 0 0 0 1 0 0 1 45]] c:\users\t3kci\checkout\scikit-learn\sklearn\linear_model\_logistic.py:762: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG) ```python scores_image = mglearn.tools.heatmap( confusion_matrix(y_test, pred), xlabel='Predicted label', ylabel='True label', xticklabels=digits.target_names, yticklabels=digits.target_names, cmap=plt.cm.gray_r, fmt="%d") plt.title("Confusion matrix") plt.gca().invert_yaxis() ``` ```python print(classification_report(y_test, pred)) ``` precision recall f1-score support 0 1.00 1.00 1.00 37 1 0.89 0.93 0.91 43 2 0.98 0.91 0.94 44 3 0.91 0.96 0.93 45 4 0.97 0.97 0.97 38 5 0.98 0.96 0.97 48 6 1.00 0.98 0.99 52 7 0.98 0.96 0.97 48 8 0.91 0.90 0.91 48 9 0.90 0.96 0.93 47 accuracy 0.95 450 macro avg 0.95 0.95 0.95 450 weighted avg 0.95 0.95 0.95 450 ```python print("Micro average f1 score: {:.3f}".format( f1_score(y_test, pred, average="micro"))) print("Macro average f1 score: {:.3f}".format( f1_score(y_test, pred, average="macro"))) ``` Micro average f1 score: 0.951 Macro average f1 score: 0.952 #### Regression metrics ### Using evaluation metrics in model selection ```python # default scoring for classification is accuracy print("Default scoring: {}".format( cross_val_score(SVC(), digits.data, digits.target == 9, cv=5))) # providing scoring="accuracy" doesn't change the results explicit_accuracy = cross_val_score(SVC(), digits.data, digits.target == 9, scoring="accuracy", cv=5) print("Explicit accuracy scoring: {}".format(explicit_accuracy)) roc_auc = cross_val_score(SVC(), digits.data, digits.target == 9, scoring="roc_auc", cv=5) print("AUC scoring: {}".format(roc_auc)) ``` Default scoring: [0.975 0.992 1. 0.994 0.981] Explicit accuracy scoring: [0.975 0.992 1. 0.994 0.981] AUC scoring: [0.997 0.999 1. 1. 0.984] ```python res = cross_validate(SVC(), digits.data, digits.target == 9, scoring=["accuracy", "roc_auc", "recall_macro"], return_train_score=True, cv=5) display(pd.DataFrame(res)) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>fit_time</th> <th>score_time</th> <th>test_accuracy</th> <th>train_accuracy</th> <th>test_roc_auc</th> <th>train_roc_auc</th> <th>test_recall_macro</th> <th>train_recall_macro</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0.02</td> <td>8.98e-03</td> <td>0.97</td> <td>0.99</td> <td>1.00</td> <td>1.0</td> <td>0.89</td> <td>0.97</td> </tr> <tr> <th>1</th> <td>0.02</td> <td>7.98e-03</td> <td>0.99</td> <td>1.00</td> <td>1.00</td> <td>1.0</td> <td>0.96</td> <td>0.98</td> </tr> <tr> <th>2</th> <td>0.02</td> <td>7.98e-03</td> <td>1.00</td> <td>1.00</td> <td>1.00</td> <td>1.0</td> <td>1.00</td> <td>0.98</td> </tr> <tr> <th>3</th> <td>0.02</td> <td>8.98e-03</td> <td>0.99</td> <td>1.00</td> <td>1.00</td> <td>1.0</td> <td>0.97</td> <td>0.98</td> </tr> <tr> <th>4</th> <td>0.02</td> <td>7.98e-03</td> <td>0.98</td> <td>1.00</td> <td>0.98</td> <td>1.0</td> <td>0.90</td> <td>0.99</td> </tr> </tbody> </table> </div> ```python X_train, X_test, y_train, y_test = train_test_split( digits.data, digits.target == 9, random_state=0) # we provide a somewhat bad grid to illustrate the point: param_grid = {'gamma': [0.0001, 0.01, 0.1, 1, 10]} # using the default scoring of accuracy: grid = GridSearchCV(SVC(), param_grid=param_grid) grid.fit(X_train, y_train) print("Grid-Search with accuracy") print("Best parameters:", grid.best_params_) print("Best cross-validation score (accuracy)): {:.3f}".format(grid.best_score_)) print("Test set AUC: {:.3f}".format( roc_auc_score(y_test, grid.decision_function(X_test)))) print("Test set accuracy: {:.3f}".format(grid.score(X_test, y_test))) ``` Grid-Search with accuracy Best parameters: {'gamma': 0.0001} Best cross-validation score (accuracy)): 0.976 Test set AUC: 0.992 Test set accuracy: 0.973 ```python # using AUC scoring instead: grid = GridSearchCV(SVC(), param_grid=param_grid, scoring="roc_auc") grid.fit(X_train, y_train) print("\nGrid-Search with AUC") print("Best parameters:", grid.best_params_) print("Best cross-validation score (AUC): {:.3f}".format(grid.best_score_)) print("Test set AUC: {:.3f}".format( roc_auc_score(y_test, grid.decision_function(X_test)))) print("Test set accuracy: {:.3f}".format(grid.score(X_test, y_test))) ``` Grid-Search with AUC Best parameters: {'gamma': 0.01} Best cross-validation score (AUC): 0.998 Test set AUC: 1.000 Test set accuracy: 1.000 ```python from sklearn.metrics import SCORERS print("Available scorers:") print(sorted(SCORERS.keys())) ``` Available scorers: ['accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'average_precision', 'balanced_accuracy', 'completeness_score', 'explained_variance', 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'fowlkes_mallows_score', 'homogeneity_score', 'jaccard', 'jaccard_macro', 'jaccard_micro', 'jaccard_samples', 'jaccard_weighted', 'max_error', 'mutual_info_score', 'neg_brier_score', 'neg_log_loss', 'neg_mean_absolute_error', 'neg_mean_gamma_deviance', 'neg_mean_poisson_deviance', 'neg_mean_squared_error', 'neg_mean_squared_log_error', 'neg_median_absolute_error', 'neg_root_mean_squared_error', 'normalized_mutual_info_score', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc', 'roc_auc_ovo', 'roc_auc_ovo_weighted', 'roc_auc_ovr', 'roc_auc_ovr_weighted', 'v_measure_score'] ### Summary and Outlook ```python ```
3e17cd2cd6d008b25bedab40db580b803092ac5c
925,009
ipynb
Jupyter Notebook
05-model-evaluation-and-improvement.ipynb
mbooali/introduction-to-machine
3f75f9897f1f63f07bb6eace312fa35e16786623
[ "MIT" ]
null
null
null
05-model-evaluation-and-improvement.ipynb
mbooali/introduction-to-machine
3f75f9897f1f63f07bb6eace312fa35e16786623
[ "MIT" ]
null
null
null
05-model-evaluation-and-improvement.ipynb
mbooali/introduction-to-machine
3f75f9897f1f63f07bb6eace312fa35e16786623
[ "MIT" ]
null
null
null
302.488228
109,348
0.916648
true
14,274
Qwen/Qwen-72B
1. YES 2. YES
0.718594
0.782662
0.562417
__label__eng_Latn
0.238593
0.145013
# SciPy / Havana In case I get to go to [SciPy / Havana](http://conf.scipyla.org/), I'm boning up on SymPy, an important component within the SciPy Ecosystem. Here's the kind of thing one might do with SymPy, a computer algebra system: ```python import sympy as sym ``` ```python x = sym.symbols('x') sym.init_printing() ``` ```python sym.Integral(sym.sin(x), (x, 0, sym.pi)) ``` ```python sym.Integral(sym.sin(x), (x, 0, sym.pi)).doit() ``` In addition, we have numpy, with its multi-dimensional array object. Numpy comes with a host of numeric recipes, already built in... ```python import numpy as np ``` [What are the ethics](http://ucsmp.uchicago.edu/resources/conferences/2012-03-01/) of using CAS in a poorly developing country, such as the United States, known for high infant mortality and poverty rates? Not everyone can easily afford a TI N-spire, and schools tend to not provide adequate computer infrastructure, even for accessing free and open source tools. Fortunately, even without a TI, we have CAS in the form of the SciPy ecosystem. Adapting a solution from a CAS blog post, [Quadratics and CAS](https://casmusings.wordpress.com/2017/06/30/quadratics-and-cas/), I was able to derive the same solution for the coefficients a, b and c. ```python np.polyfit(x=[10,5,-2],y=[210, 40, -30], deg=2) ``` array([ 2., 4., -30.]) But don't we want algebra students to be able to derive the solution manually? Yes, we do. However, once this step becomes a means to an end, rather than an end in itself, a CAS will save time and facilitate deeper explorations. For further reading: * [Learning Math with Python: Youtube Lectures by Juan Klopper](https://youtu.be/P8DIM41B-mM?list=PLsu0TcgLDUiIqN76ZuRkhDUCd-c_EjIkN) * [Tutorial de SymPy: introducción y comandos básicos ... ](https://youtu.be/OGQRcYVys1Q)
958a48933b124b0a9a645b2b91d6fb469228b161
6,763
ipynb
Jupyter Notebook
Using SymPy.ipynb
4dsolutions/Python5
8d80753e823441a571b827d24d21577446409b52
[ "MIT" ]
11
2016-08-17T00:15:26.000Z
2020-07-17T21:31:10.000Z
Using SymPy.ipynb
4dsolutions/Python5
8d80753e823441a571b827d24d21577446409b52
[ "MIT" ]
null
null
null
Using SymPy.ipynb
4dsolutions/Python5
8d80753e823441a571b827d24d21577446409b52
[ "MIT" ]
5
2017-02-22T05:15:52.000Z
2019-11-08T06:17:34.000Z
37.994382
2,120
0.692148
true
523
Qwen/Qwen-72B
1. YES 2. YES
0.888759
0.849971
0.755419
__label__eng_Latn
0.966117
0.593424
# Maximum Mean Discrepancy drift detector on CIFAR-10 ### Method The [Maximum Mean Discrepancy (MMD)](http://jmlr.csail.mit.edu/papers/v13/gretton12a.html) detector is a kernel-based method for multivariate 2 sample testing. The MMD is a distance-based measure between 2 distributions *p* and *q* based on the mean embeddings $\mu_{p}$ and $\mu_{q}$ in a reproducing kernel Hilbert space $F$: \begin{align} MMD(F, p, q) & = || \mu_{p} - \mu_{q} ||^2_{F} \\ \end{align} We can compute unbiased estimates of $MMD^2$ from the samples of the 2 distributions after applying the kernel trick. We use by default a [radial basis function kernel](https://en.wikipedia.org/wiki/Radial_basis_function_kernel), but users are free to pass their own kernel of preference to the detector. We obtain a $p$-value via a [permutation test](https://en.wikipedia.org/wiki/Resampling_(statistics)) on the values of $MMD^2$. This method is also described in [Failing Loudly: An Empirical Study of Methods for Detecting Dataset Shift](https://arxiv.org/abs/1810.11953). ### Dataset [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) consists of 60,000 32 by 32 RGB images equally distributed over 10 classes. We evaluate the drift detector on the CIFAR-10-C dataset ([Hendrycks & Dietterich, 2019](https://arxiv.org/abs/1903.12261)). The instances in CIFAR-10-C have been corrupted and perturbed by various types of noise, blur, brightness etc. at different levels of severity, leading to a gradual decline in the classification model performance. We also check for drift against the original test set with class imbalances. ```python import matplotlib.pyplot as plt import os import numpy as np import tensorflow as tf from tensorflow.keras.layers import Conv2D, Dense, Flatten, InputLayer, Reshape from alibi_detect.cd import MMDDrift from alibi_detect.cd.preprocess import UAE, HiddenOutput from alibi_detect.models.resnet import scale_by_instance from alibi_detect.utils.fetching import fetch_tf_model from alibi_detect.utils.kernels import gaussian_kernel from alibi_detect.utils.prediction import predict_batch from alibi_detect.utils.saving import save_detector, load_detector from alibi_detect.datasets import fetch_cifar10c, corruption_types_cifar10c ``` ### Load data Original CIFAR-10 data: ```python (X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data() X_train = X_train.astype('float32') / 255 X_test = X_test.astype('float32') / 255 y_train = y_train.astype('int64').reshape(-1,) y_test = y_test.astype('int64').reshape(-1,) ``` For CIFAR-10-C, we can select from the following corruption types at 5 severity levels: ```python corruptions = corruption_types_cifar10c() print(corruptions) ``` ['brightness', 'contrast', 'defocus_blur', 'elastic_transform', 'fog', 'frost', 'gaussian_blur', 'gaussian_noise', 'glass_blur', 'impulse_noise', 'jpeg_compression', 'motion_blur', 'pixelate', 'saturate', 'shot_noise', 'snow', 'spatter', 'speckle_noise', 'zoom_blur'] Let's pick a subset of the corruptions at corruption level 5. Each corruption type consists of perturbations on all of the original test set images. ```python corruption = ['gaussian_noise', 'motion_blur', 'brightness', 'pixelate'] X_corr, y_corr = fetch_cifar10c(corruption=corruption, severity=5, return_X_y=True) X_corr = X_corr.astype('float32') / 255 ``` We split the original test set in a reference dataset and a dataset which should not be rejected under the *H<sub>0</sub>* of the MMD test. We also split the corrupted data by corruption type: ```python np.random.seed(0) n_test = X_test.shape[0] idx = np.random.choice(n_test, size=n_test // 2, replace=False) idx_h0 = np.delete(np.arange(n_test), idx, axis=0) X_ref,y_ref = X_test[idx], y_test[idx] X_h0, y_h0 = X_test[idx_h0], y_test[idx_h0] print(X_ref.shape, X_h0.shape) ``` (5000, 32, 32, 3) (5000, 32, 32, 3) ```python # check that the classes are more or less balanced classes, counts_ref = np.unique(y_ref, return_counts=True) counts_h0 = np.unique(y_h0, return_counts=True)[1] print('Class Ref H0') for cl, cref, ch0 in zip(classes, counts_ref, counts_h0): assert cref + ch0 == n_test // 10 print('{} {} {}'.format(cl, cref, ch0)) ``` Class Ref H0 0 472 528 1 510 490 2 498 502 3 492 508 4 501 499 5 495 505 6 493 507 7 501 499 8 516 484 9 522 478 ```python X_c = [] n_corr = len(corruption) for i in range(n_corr): X_c.append(X_corr[i * n_test:(i + 1) * n_test]) ``` We can visualise the same instance for each corruption type: ```python i = 4 n_test = X_test.shape[0] plt.title('Original') plt.axis('off') plt.imshow(X_test[i]) plt.show() for _ in range(len(corruption)): plt.title(corruption[_]) plt.axis('off') plt.imshow(X_corr[n_test * _+ i]) plt.show() ``` We can also verify that the performance of a classification model on CIFAR-10 drops significantly on this perturbed dataset: ```python dataset = 'cifar10' model = 'resnet32' clf = fetch_tf_model(dataset, model) acc = clf.evaluate(scale_by_instance(X_test), y_test, batch_size=128, verbose=0)[1] print('Test set accuracy:') print('Original {:.4f}'.format(acc)) clf_accuracy = {'original': acc} for _ in range(len(corruption)): acc = clf.evaluate(scale_by_instance(X_c[_]), y_test, batch_size=128, verbose=0)[1] clf_accuracy[corruption[_]] = acc print('{} {:.4f}'.format(corruption[_], acc)) ``` Test set accuracy: Original 0.9278 gaussian_noise 0.2208 motion_blur 0.6339 brightness 0.8913 pixelate 0.3666 Given the drop in performance, it is important that we detect the harmful data drift! ### Detect drift We are trying to detect data drift on high-dimensional (*32x32x3*) data using a multivariate MMD permutation test. It therefore makes sense to apply dimensionality reduction first. Some dimensionality reduction methods also used in [Failing Loudly: An Empirical Study of Methods for Detecting Dataset Shift](https://arxiv.org/pdf/1810.11953.pdf) are readily available: **UAE** (Untrained AutoEncoder), **BBSDs** (black-box shift detection using the classifier's softmax outputs) and **PCA**. #### Untrained AutoEncoder First we try UAE: ```python tf.random.set_seed(0) # define encoder encoding_dim = 32 encoder_net = tf.keras.Sequential( [ InputLayer(input_shape=(32, 32, 3)), Conv2D(64, 4, strides=2, padding='same', activation=tf.nn.relu), Conv2D(128, 4, strides=2, padding='same', activation=tf.nn.relu), Conv2D(512, 4, strides=2, padding='same', activation=tf.nn.relu), Flatten(), Dense(encoding_dim,) ] ) uae = UAE(encoder_net=encoder_net) preprocess_kwargs = {'model': uae, 'batch_size': 128} # initialise drift detector cd = MMDDrift( p_val=.05, # p-value for permutation test X_ref=X_ref, # reference data to test against preprocess_kwargs=preprocess_kwargs, kernel=gaussian_kernel, # use the default Gaussian kernel in MMD kernel_kwargs={'sigma': np.array([1.])}, chunk_size=1000, n_permutations=5 # nb of permutations in the test, set to 5 for runtime ) # purposes; should be much higher for a real test # we can also save/load an initialised detector filepath = 'my_path' # change to directory where detector is saved save_detector(cd, filepath) cd = load_detector(filepath, **{'preprocess_kwargs': preprocess_kwargs}) ``` WARNING:alibi_detect.utils.saving:Directory my_path does not exist and is now created. The optional `chunk_size` variable will be used to compute the maximum mean discrepancy distance between the 2 samples in chunks using [dask](https://dask.org/) to avoid potential out-of-memory errors. In terms of speed, the optimal `chunk_size` is application and hardware dependent, so it is often worth to test a few different values, including *None*. *None* means that the computation is done in-memory in NumPy. Let's check whether the detector thinks drift occurred within the original test set: ```python preds_h0 = cd.predict(X_h0, return_p_val=True) labels = ['No!', 'Yes!'] print('Drift? {}'.format(labels[preds_h0['data']['is_drift']])) ``` Drift? No! As expected, no drift occurred. The p-value of the permutation test is above the $0.05$ threshold: ```python print(preds_h0['data']['p_val']) ``` 0.8 Let's now check the predictions on the perturbed data: ```python for x, c in zip(X_c, corruption): preds = cd.predict(x, return_p_val=True) print(f'Corruption type: {c}') print('Drift? {}'.format(labels[preds['data']['is_drift']])) print('Feature-wise p-values:') print(preds['data']['p_val']) print('') ``` Corruption type: gaussian_noise Drift? Yes! Feature-wise p-values: 0.0 Corruption type: motion_blur Drift? Yes! Feature-wise p-values: 0.0 Corruption type: brightness Drift? Yes! Feature-wise p-values: 0.0 Corruption type: pixelate Drift? Yes! Feature-wise p-values: 0.0 #### BBSDs For **BBSDs**, we use the classifier's softmax outputs for black-box shift detection. This method is based on [Detecting and Correcting for Label Shift with Black Box Predictors](https://arxiv.org/abs/1802.03916). The ResNet classifier is trained on data standardised by instance so we need to rescale the data. ```python X_train = scale_by_instance(X_train) X_test = scale_by_instance(X_test) for i in range(n_corr): X_c[i] = scale_by_instance(X_c[i]) X_ref = scale_by_instance(X_ref) X_h0 = scale_by_instance(X_h0) ``` Initialisation of the drift detector. Here we use the output of the softmax layer to detect the drift, but other hidden layers can be extracted as well by setting *'layer'* to the index of the desired hidden layer in the model: ```python # use output softmax layer preprocess_kwargs = {'model': HiddenOutput(model=clf, layer=-1), 'batch_size': 128} cd = MMDDrift( p_val=.05, X_ref=X_ref, preprocess_kwargs=preprocess_kwargs, kernel_kwargs={'sigma': np.array([1.])}, chunk_size=1000, n_permutations=5 ) ``` There is no drift on the original held out test set: ```python preds_h0 = cd.predict(X_h0) print('Drift? {}'.format(labels[preds_h0['data']['is_drift']])) print(preds_h0['data']['p_val']) ``` Drift? No! 0.4 We compare this with the perturbed data: ```python for x, c in zip(X_c, corruption): preds = cd.predict(x) print(f'Corruption type: {c}') print('Drift? {}'.format(labels[preds['data']['is_drift']])) print('Feature-wise p-values:') print(preds['data']['p_val']) print('') ``` Corruption type: gaussian_noise Drift? Yes! Feature-wise p-values: 0.0 Corruption type: motion_blur Drift? Yes! Feature-wise p-values: 0.0 Corruption type: brightness Drift? Yes! Feature-wise p-values: 0.0 Corruption type: pixelate Drift? Yes! Feature-wise p-values: 0.0 ### Kernel bandwidth So far we have defined a specific bandwidth `sigma` for the Gaussian kernel. We can however also sum over a number of different kernel bandwidths or infer `sigma` from *X_ref* and *X* using the following heuristic: compute the pairwise distances between each of the instances in *X_ref* and *X*, and set `sigma` to the median distance. Let's first try a range of bandwidths: ```python cd = MMDDrift( p_val=.05, X_ref=X_ref, preprocess_kwargs=preprocess_kwargs, kernel_kwargs={'sigma': np.array([.5, 1., 5.])}, chunk_size=1000, n_permutations=5 ) ``` ```python preds_h0 = cd.predict(X_h0) print('Original test set sample') print('Drift? {}'.format(labels[preds_h0['data']['is_drift']])) print(preds_h0['data']['p_val']) print('') for x, c in zip(X_c, corruption): preds = cd.predict(x) print(f'Corruption type: {c}') print('Drift? {}'.format(labels[preds['data']['is_drift']])) print('Feature-wise p-values:') print(preds['data']['p_val']) print('') ``` Original test set sample Drift? No! 0.2 Corruption type: gaussian_noise Drift? Yes! Feature-wise p-values: 0.0 Corruption type: motion_blur Drift? Yes! Feature-wise p-values: 0.0 Corruption type: brightness Drift? Yes! Feature-wise p-values: 0.0 Corruption type: pixelate Drift? Yes! Feature-wise p-values: 0.0 A bandwidth can also be inferred from *X_ref* and *X* using the heuristic: ```python cd = MMDDrift( p_val=.05, X_ref=X_ref, preprocess_kwargs=preprocess_kwargs, chunk_size=1000, n_permutations=5 ) ``` ```python preds_h0 = cd.predict(X_h0) print('Drift? {}'.format(labels[preds_h0['data']['is_drift']])) print(preds_h0['data']['p_val']) ``` Drift? No! 0.2 ```python print('Inferred bandwidth: {:.4f}'.format(cd.permutation_test.keywords['sigma'].item())) ``` Inferred bandwidth: 1.4132 ### Label drift We can also check what happens when we introduce class imbalances between the reference data *X_ref* and the tested data *X_imb*. The reference data will use $75$% of the instances of the first 5 classes and only $25$% of the last 5. The data used for drift testing then uses respectively $25$% and $75$% of the test instances for the first and last 5 classes. ```python np.random.seed(0) # get index for each class in the test set num_classes = len(np.unique(y_test)) idx_by_class = [np.where(y_test == c)[0] for c in range(num_classes)] # sample imbalanced data for different classes for X_ref and X_imb perc_ref = .75 perc_ref_by_class = [perc_ref if c < 5 else 1 - perc_ref for c in range(num_classes)] n_by_class = n_test // num_classes X_ref = [] X_imb, y_imb = [], [] for _ in range(num_classes): idx_class_ref = np.random.choice(n_by_class, size=int(perc_ref_by_class[_] * n_by_class), replace=False) idx_ref = idx_by_class[_][idx_class_ref] idx_class_imb = np.delete(np.arange(n_by_class), idx_class_ref, axis=0) idx_imb = idx_by_class[_][idx_class_imb] assert idx_ref != idx_imb X_ref.append(X_test[idx_ref]) X_imb.append(X_test[idx_imb]) y_imb.append(y_test[idx_imb]) X_ref = np.concatenate(X_ref) X_imb = np.concatenate(X_imb) y_imb = np.concatenate(y_imb) print(X_ref.shape, X_imb.shape, y_imb.shape) ``` (5000, 32, 32, 3) (5000, 32, 32, 3) (5000,) Update reference dataset for the detector and make predictions: ```python cd.X_ref = cd.preprocess_fn(X_ref) ``` ```python preds_imb = cd.predict(X_imb) print('Drift? {}'.format(labels[preds_imb['data']['is_drift']])) print(preds_imb['data']['p_val']) ``` Drift? Yes! 0.0
5877afd98f0716120a42a49d436dbf6e8a33a6ef
65,207
ipynb
Jupyter Notebook
examples/cd_mmd_cifar10.ipynb
jklaise/alibi-detect
fd5f21cb071462f6701761dc13003824a0749ef7
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
null
null
null
examples/cd_mmd_cifar10.ipynb
jklaise/alibi-detect
fd5f21cb071462f6701761dc13003824a0749ef7
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
null
null
null
examples/cd_mmd_cifar10.ipynb
jklaise/alibi-detect
fd5f21cb071462f6701761dc13003824a0749ef7
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
null
null
null
75.471065
10,016
0.811876
true
4,089
Qwen/Qwen-72B
1. YES 2. YES
0.76908
0.675765
0.519717
__label__eng_Latn
0.863145
0.045806
# Symbolic Mathematics in Python There are times when you need to solve a difficult problem symbollically or analytically. If you have ever used Wolfram Alpha, then you have already done this. Sympy is a python library that allows you to do symbolic mathematics in python. ```python import sympy as sym ``` ## 1. Introduction ### Example 1.1 If you try to write the follwing in python by itself, you will get an error telling you x is undefined: $$x-x$$ ```python x-x ``` (The error above is on purpose). Variables in python need to be defined before you can say something specific about them ```python x=102 x-x ``` If you are trying to show that $x-x=0$ is true for any $x$, the above answer would not be valid. Instead you can use a symbolic expression to show that it is true **First we define the variable as a symmbolic expression** ```python x = sym.symbols('x') ``` **Now we can use the variable in a symbolic expression** ```python x-x ``` ### Example 1.2 Sympy can be used to perform algebreic operations (among other things). Consider the following expression: $$(3a-4b)^3$$ We can use symppy to expand the expression algebraically. **First we need to define the variables as symbolic expressions** ```python a,b = sym.symbols('a,b') ``` **Side note** Notice that the left hand side of the epression has two variables being defined. Python can define more than one variable at a time: ```python x1,y1 =10,20 print(x1) print(y1) ``` 10 20 **Back to the expression** We can define an expression using the variables $a$ and $b$. ```python expr = (3*a-4*b)**3 print(expr) ``` (3*a - 4*b)**3 We can also make it look nicer in our notebook. This doesn't affect the math, but it makes our notebook more readable. ```python sym.init_printing() ``` ```python expr ``` **Now we expand the function algebreically** ```python expr.expand() ``` Sympy can also factor the equation ```python sym.factor(26*a**3-108*a**2*b+144*a*b**2-64*b**3) ``` If you want to copy and paste a result, you print the result. ```python print(sym.factor(26*a**3-108*a**2*b+144*a*b**2-64*b**3)) ``` 2*(a - 2*b)*(13*a**2 - 28*a*b + 16*b**2) You can also chain together functions ```python expr.expand().factor() ``` ### Exercise 1.1 Show that the following two expressions are true. $$(2w-3z)(2w+3z)=4w^2-9z^2$$ $$(2w-3z)^2\ne4w^2-9z^2$$ ```python w,z = sym.symbols('w,z') expr = (2*w-3*z)*(2*w+3*z) expr.expand() expr2= (2*w-3*z)**2 expr2.expand() print(expr.expand()) print(expr2.expand()) print("This proves the second equation is not true") ``` 4*w**2 - 9*z**2 4*w**2 - 12*w*z + 9*z**2 This proves the second equation is not true ```python ## 2. Solving Equations #Sympy can be used to symbolilically solve equations. As before, you need to define which variables are symbols ``` ### Example 2.1 Use sympy to solve the following equation $$ax^3+bx^2+cx+d=0$$ ```python # Define the variables a,b,c,d,x = sym.symbols('a,b,c,d,x') ``` ```python # Define the expression expr=a*x**3+b*x**2+c*x+d expr ``` We can use the `solvset` function to solve this equation ```python solutions=sym.solveset(expr,x) ``` ```python print(solutions) ``` {-(-3*c/a + b**2/a**2)/(3*(sqrt(-4*(-3*c/a + b**2/a**2)**3 + (27*d/a - 9*b*c/a**2 + 2*b**3/a**3)**2)/2 + 27*d/(2*a) - 9*b*c/(2*a**2) + b**3/a**3)**(1/3)) - (sqrt(-4*(-3*c/a + b**2/a**2)**3 + (27*d/a - 9*b*c/a**2 + 2*b**3/a**3)**2)/2 + 27*d/(2*a) - 9*b*c/(2*a**2) + b**3/a**3)**(1/3)/3 - b/(3*a), -(-3*c/a + b**2/a**2)/(3*(-1/2 - sqrt(3)*I/2)*(sqrt(-4*(-3*c/a + b**2/a**2)**3 + (27*d/a - 9*b*c/a**2 + 2*b**3/a**3)**2)/2 + 27*d/(2*a) - 9*b*c/(2*a**2) + b**3/a**3)**(1/3)) - (-1/2 - sqrt(3)*I/2)*(sqrt(-4*(-3*c/a + b**2/a**2)**3 + (27*d/a - 9*b*c/a**2 + 2*b**3/a**3)**2)/2 + 27*d/(2*a) - 9*b*c/(2*a**2) + b**3/a**3)**(1/3)/3 - b/(3*a), -(-3*c/a + b**2/a**2)/(3*(-1/2 + sqrt(3)*I/2)*(sqrt(-4*(-3*c/a + b**2/a**2)**3 + (27*d/a - 9*b*c/a**2 + 2*b**3/a**3)**2)/2 + 27*d/(2*a) - 9*b*c/(2*a**2) + b**3/a**3)**(1/3)) - (-1/2 + sqrt(3)*I/2)*(sqrt(-4*(-3*c/a + b**2/a**2)**3 + (27*d/a - 9*b*c/a**2 + 2*b**3/a**3)**2)/2 + 27*d/(2*a) - 9*b*c/(2*a**2) + b**3/a**3)**(1/3)/3 - b/(3*a)} ```python solutions ``` What if I need help. You can do this with any python function. `function?` ```python # Run this command to see a help box sym.solveset? ``` ### Exercise 2.1 Use the `solveset` function to solve the following chemical problem. Phosgene gas, $\text{COCl}_2$, dissociates at high temperatures according to the following equilibrium: $$ \text{COCl}_2 \rightleftharpoons \text{CO} + \text{Cl}_2 $$ At $\text{400 C}$, the equilibrium constant $K_c=8.05$.  If you start with a $\text{0.250 M}$ phosgene sample at $\text{400 C}$, determine the concentrations of all species at equilibrium. ```python # Your code here #x^2/(.250-x) - 8.05 #sym.solveset(f, symbol=x) x = sym.symbols('x') sym.solveset(x**2/(.250-x)-8.05,x) ``` ```python #Why did you pick your answer? print("The answer should be 0.242683798033777 because the concentration should be positive") ``` The answer should be 0.242683798033777 because the concentration should be positive ## 3. Calculus We can use also Sympy to differentiate and integrate. Let us experiment with differentiating the following expression: $$x ^ 2 - \cos(x)$$ ```python sym.diff(x ** 2 - sym.cos(x), x) ``` Similarly we can integrate: ```python sym.integrate(x ** 2 - sym.cos(x), x) ``` We can also carry out definite integrals: ```python sym.integrate(x ** 2 - sym.cos(x), (x, 0, 5)) ``` ### Exercise 3.1 Use Sympy to calculate the following: 1. $\frac{d(x ^2 + xy - \ln(y))}{dy}$ 1. $\int_0^5 e^{2x}\;dx$ ```python x,y = sym.symbols('x,y') sym.diff(x ** 2 + x*y-sym.ln(y), x) ``` ```python #This took me forevverrrr import sympy as sym import numpy as num import math from sympy import exp x = sym.symbols('x') integ = exp(2*x) sym.integrate(integ,(x,0,5)) ``` -1/2 + exp(10)/2 ### Exercise 3.2 Solve the following definate integral $$\int\limits_{ - \infty }^\infty {\frac{1}{{\sigma \sqrt {2\pi } }}{e^{ - \frac{1}{2}{{\left( {\frac{{x - \mu }}{\sigma }} \right)}^2}}}}$$ Hint, the sympy symbol for infinity is `oo` ```python # Your code here import math import sympy as sym import numpy as num from sympy import exp from sympy import sqrt sym.init_printing() z,b,c = sym.symbols('z,b,c') g1= exp((-1/2)*((z - b)/c)**2) g2=(1/c*sqrt(2*num.pi)) gaussian= g1*g2 sym.integrate(gaussian,(x,'-oo','oo')) ``` Lookup Gaussian functions: https://en.wikipedia.org/wiki/Gaussian_function Does your answer maake sense? ## 4. Plotting with Sympy Finally Sympy can be used to plot functions. Note that this makes use of [matplotlib](http://matplotlib.org/). Let us plot $x^2$: ```python expr = x **2 p=sym.plot(expr) ``` ### Exercise 4.1 Plot the following function: 1. $y=x + cos(x)$ 1. ${\frac{1}{{ \sqrt {2\pi } }}{e^{ - \frac{x^2}{2}}}}$ ```python x = sym.symbols('x') e1 = x+sym.cos(x) p1 = sym.plot(e1) ``` ```python x = sym.symbols('x') e1 = (1/sqrt(2*num.pi)) e2 = exp((-x**2)/2) gaussian= e1*e2 p1 = sym.plot(gaussian) ``` # Lecture ## L1. Hydrogen Atom Sympy has built in modules for the eigenfunctions of the hydrogen atom. ```python ``` ```python import sympy.physics.hydrogen import numpy as np ``` You can caluclate the eigenvalues ($E$) in Hartrees `sym.physics.hydrogen.E_nl(n,Z)` ```python sym.physics.hydrogen.E_nl(1,1) ``` We can use a loop to print out many energies ```python for n in range(1,5): print(sym.physics.hydrogen.E_nl(n,1)) ``` -1/2 -1/8 -1/18 -1/32 We can plot the hydrogen radial wavefunction (1s orbital) ```python r = sympy.symbols('r') sympy.physics.hydrogen.R_nl(1, 0, r, 1) #Principle quantum, angular momentum, variable, and Z ``` ```python sym.plot(sympy.physics.hydrogen.R_nl(1, 0, r, 1),(r,0,10.50)) ``` And the probablity distribution function ```python sympy.symbols('r') prob_1s=sympy.physics.hydrogen.R_nl(1, 0, r, 1)*sympy.physics.hydrogen.R_nl(1, 0, r, 1) prob_1s #Principle quantum, angular momentum, variable, and Z ``` ```python sym.plot(prob_1s,(r,0,10)) ``` Plot a 2s orbital ```python sympy.symbols('r') prob_2s=sympy.physics.hydrogen.R_nl(2, 0, r, 1)*sympy.physics.hydrogen.R_nl(2, 0, r, 1) prob_2s #Principle quantum, angular momentum, variable, and Z ``` ```python sym.plot(prob_2s,(r,0,10)) ``` We can change the range to see the node better. ```python sym.plot(prob_2s,(r,1,8)) ``` Notice the node! ### Exercise L1.1 Plot the radial distriubution function for a 2p, 3s, 4s, and 3d orbital. ```python #I STILL HAVE TO DO 4S, AND 3D print("2p") sympy.symbols('r') prob_2p=sympy.physics.hydrogen.R_nl(2, 1, r, 1)*sympy.physics.hydrogen.R_nl(2, 1, r, 1) prob_2p #Principle quantum, angular momentum, variable, and Z ``` ```python print("2p") sym.plot(prob_2p,(r,-1.5,12)) ``` ```python print("3s") sympy.symbols('r') prob_3s=sympy.physics.hydrogen.R_nl(3, 0, r, 1)*sympy.physics.hydrogen.R_nl(3, 0, r, 1) prob_3s ``` ```python print("3s") sym.plot(prob_3s,(r,-1.5,12)) ``` ```python print("4s") sympy.symbols('r') prob_4s=sympy.physics.hydrogen.R_nl(4, 0, r, 1)*sympy.physics.hydrogen.R_nl(4, 0, r, 1) prob_4s ``` ```python print("4s") sym.plot(prob_4s,(r,-1.5,12)) ``` ```python print("3d") sympy.symbols('r') prob_3d=sympy.physics.hydrogen.R_nl(3, 2, r, 1)*sympy.physics.hydrogen.R_nl(3, 2, r, 1) prob_3d ``` ```python print("3d") sym.plot(prob_3d,(r,-1.5,12)) ``` ```python ```
8133f703170455921bdb38533e5e7036ebb7ae53
271,365
ipynb
Jupyter Notebook
symbolic_math.ipynb
sju-chem264-2019/9-26-2019-symbolic-math-NatalieWilliams16
f081f9c72193138a8f63a52b7d7126daca705b5c
[ "MIT" ]
null
null
null
symbolic_math.ipynb
sju-chem264-2019/9-26-2019-symbolic-math-NatalieWilliams16
f081f9c72193138a8f63a52b7d7126daca705b5c
[ "MIT" ]
null
null
null
symbolic_math.ipynb
sju-chem264-2019/9-26-2019-symbolic-math-NatalieWilliams16
f081f9c72193138a8f63a52b7d7126daca705b5c
[ "MIT" ]
null
null
null
143.731462
25,740
0.84379
true
3,454
Qwen/Qwen-72B
1. YES 2. YES
0.885631
0.835484
0.739931
__label__eng_Latn
0.862993
0.557438
# From Oliver Durr ## Variational Autoencoder (VAE) A tutorial with code for a VAE as described in [Kingma and Welling, 2013](http://arxiv.org/abs/1312.6114). A talk with more details was given at the [DataLab Brown Bag Seminar](https://home.zhaw.ch/~dueo/bbs/files/vae.pdf). Much of the code was taken, from https://jmetzen.github.io/2015-11-27/vae.html. However, I tried to focus more on the mathematical understanding, not so much on design of the algorithm. ### Some theoretical considerations #### Outline Situation: $x$ is from a high-dimensional space and $z$ is from a low-dimensional (latent) space, from which we like to reconstruct $p(x)$. We consider a parameterized model $p_\theta(x|z)$ (with parameter $\theta$), to construct x for a given value of $z$. We build this model: * $p_\theta(x | z)$ with a neural network determening the parameters $\mu, \Sigma$ of a Gaussian (or as done here with a Bernoulli-Density). #### Inverting $p_\theta(x | z)$ The inversion is not possible, we therefore approximate $p(z|x)$ by $q_\phi (z|x)$ again a combination of a NN determening the parameters of a Gaussian * $q_\phi(z | x)$ with a neural network + Gaussian #### Training We train the network treating it as an autoencoder. #### Lower bound of the Log-Likelihood The likelihood cannot be determined analytically. Therefore, in a first step we derive a lower (variational) bound $L^{v}$ of the log likelihood, for a given image. Technically we assume a discrete latent space. For a continous case simply replace the sum by the appropriate integral over the respective densities. We replace the inaccessible conditional propability $p(z|x)$ with an approximation $q(z|x)$ for which we later use a neural network topped by a Gaussian. \begin{align} L & = \log\left(p(x)\right) &\\ & = \sum_z q(z|x) \; \log\left(p(x)\right) &\text{multiplied with 1 }\\ & = \sum_z q(z|x) \; \log\left(\frac{p(z,x)}{p(z|x)}\right) &\\ & = \sum_z q(z|x) \; \log\left(\frac{p(z,x)}{q(z|x)} \frac{q(z|x)}{p(z|x)}\right) &\\ & = \sum_z q(z|x) \; \log\left(\frac{p(z,x)}{q(z|x)}\right) + \sum_z q(z|x) \; \log\left(\frac{q(z|x)}{p(z|x)}\right) &\\ & = L^{\tt{v}} + D_{\tt{KL}} \left( q(z|x) || p(z|x) \right) &\\ & \ge L^{\tt{v}} \\ \end{align} The KL-Divergence $D_{\tt{KL}}$ is always positive, and the smaller the better $q(z|x)$ approximates $p(z|x)$ ### Rewritting $L^\tt{v}$ We split $L^\tt{v}$ into two parts. \begin{align} L^{\tt{v}} & = \sum_z q(z|x) \; \log\left(\frac{p(z,x)}{q(z|x)}\right) & \text{with} \;\;p(z,x) = p(x|z) \,p(z)\\ & = \sum_z q(z|x) \; \log\left(\frac{p(x|z) p(z)}{q(z|x)}\right) &\\ & = \sum_z q(z|x) \; \log\left(\frac{p(z)}{q(z|x)}\right) + \sum_z q(z|x) \; \log\left(p(x|z)\right) &\\ & = -D_{\tt{KL}} \left( q(z|x) || p(z) \right) + \mathbb{E}_{q(z|x)}\left( \log\left(p(x|z)\right)\right) &\text{putting in } x^{(i)} \text{ for } x\\ & = -D_{\tt{KL}} \left( q(z|x^{(i)}) || p(z) \right) + \mathbb{E}_{q(z|x^{(i)})}\left( \log\left(p(x^{(i)}|z)\right)\right) &\\ \end{align} Approximating $\mathbb{E}_{q(z|x^{(i)})}$ with sampling form the distribution $q(z|x^{(i)})$ #### Sampling With $z^{(i,l)}$ $l = 1,2,\ldots L$ sampled from $z^{(i,l)} \thicksim q(z|x^{(i)})$ \begin{align} L^{\tt{v}} & = -D_{\tt{KL}} \left( q(z|x^{(i)}) || p(z) \right) + \mathbb{E}_{q(z|x^{(i)})}\left( \log\left(p(x^{(i)}|z)\right)\right) &\\ L^{\tt{v}} & \approx -D_{\tt{KL}} \left( q(z|x^{(i)}) || p(z) \right) + \frac{1}{L} \sum_{i=1}^L \log\left(p(x^{(i)}|z^{(i,l)})\right) &\\ \end{align} #### Calculation of $D_{\tt{KL}} \left( q(z|x^{(i)}) || p(z) \right)$ TODO ```python ```
1c8d29fa54dff5c59ec5079935503c71bac46f5e
5,154
ipynb
Jupyter Notebook
autoencoder_keras/vae_theory_mardown_only.ipynb
OliverColeman/neuralnets
cf77fe28beda3705f21fd64d072139128d1f3aa6
[ "MIT" ]
180
2017-01-18T12:29:29.000Z
2022-03-17T23:36:27.000Z
autoencoder_keras/vae_theory_mardown_only.ipynb
OliverColeman/neuralnets
cf77fe28beda3705f21fd64d072139128d1f3aa6
[ "MIT" ]
12
2017-03-12T21:09:08.000Z
2019-04-01T12:14:38.000Z
autoencoder_keras/vae_theory_mardown_only.ipynb
mzaradzki/neuralnets
84921f770a0413ac1bc829764cbc51065f289e0b
[ "MIT" ]
117
2017-03-19T08:15:09.000Z
2020-07-14T08:06:19.000Z
44.431034
477
0.523671
true
1,341
Qwen/Qwen-72B
1. YES 2. YES
0.83762
0.795658
0.666459
__label__eng_Latn
0.864142
0.386739
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i> <i>Licensed under the MIT License.</i> # Bayesian Personalized Ranking (BPR) This notebook serves as an introduction to Bayesian Personalized Ranking (BPR) model for implicit feedback. In this tutorial, we focus on learning the BPR model using matrix factorization approach, hence, the model is sometimes also named BPRMF. The implementation of the model is from [Cornac](https://github.com/PreferredAI/cornac), which is a framework for recommender systems with a focus on models leveraging auxiliary data (e.g., item descriptive text and image, social network, etc). ## 0 Global Settings and Imports ```python import sys sys.path.append("../../") import os import cornac import papermill as pm import pandas as pd from reco_utils.dataset import movielens from reco_utils.dataset.python_splitters import python_random_split from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k from reco_utils.recommender.cornac.cornac_utils import predict_ranking from reco_utils.common.timer import Timer from reco_utils.common.constants import SEED print("System version: {}".format(sys.version)) print("Cornac version: {}".format(cornac.__version__)) ``` System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)] Cornac version: 1.1.2 ```python # Select MovieLens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' # top k items to recommend TOP_K = 10 # Model parameters NUM_FACTORS = 200 NUM_EPOCHS = 100 ``` ## 1 BPR Algorithm ### 1.1 Personalized Ranking from Implicit Feedback The task of personalized ranking aims at providing each user a ranked list of items (recommendations). This is very common in scenarios where recommender systems are based on implicit user behavior (e.g. purchases, clicks). The available observations are only positive feedback where the non-observed ones are a mixture of real negative feedback and missing values. One usual approach for item recommendation is directly predicting a preference score $\hat{x}_{u,i}$ given to item $i$ by user $u$. BPR uses a different approach by using item pairs $(i, j)$ and optimizing for the correct ranking given preference of user $u$, thus, there are notions of *positive* and *negative* items. The training data $D_S : U \times I \times I$ is defined as: $$D_S = \{(u, i, j) \mid i \in I^{+}_{u} \wedge j \in I \setminus I^{+}_{u}\}$$ where user $u$ is assumed to prefer $i$ over $j$ (i.e. $i$ is a *positive item* and $j$ is a *negative item*). ### 1.2 Objective Function From the Bayesian perspective, BPR maximizes the posterior probability over the model parameters $\Theta$ by optimizing the likelihood function $p(i >_{u} j | \Theta)$ and the prior probability $p(\Theta)$. $$p(\Theta \mid >_{u}) \propto p(i >_{u} j \mid \Theta) \times p(\Theta)$$ The joint probability of the likelihood over all users $u \in U$ can be simplified to: $$ \prod_{u \in U} p(>_{u} \mid \Theta) = \prod_{(u, i, j) \in D_S} p(i >_{u} j \mid \Theta) $$ The individual probability that a user $u$ prefers item $i$ to item $j$ can be defined as: $$ p(i >_{u} j \mid \Theta) = \sigma (\hat{x}_{uij}(\Theta)) $$ where $\sigma$ is the logistic sigmoid: $$ \sigma(x) = \frac{1}{1 + e^{-x}} $$ The preference scoring function $\hat{x}_{uij}(\Theta)$ could be an arbitrary real-valued function of the model parameter $\Theta$. Thus, it makes BPR a general framework for modeling the relationship between triplets $(u, i, j)$ where different model classes like matrix factorization could be used for estimating $\hat{x}_{uij}(\Theta)$. For the prior, one of the common pratices is to choose $p(\Theta)$ following a normal distribution, which results in a nice form of L2 regularization in the final log-form of the objective function. $$ p(\Theta) \sim N(0, \Sigma_{\Theta}) $$ To reduce the complexity of the model, all parameters $\Theta$ are assumed to be independent and having the same variance, which gives a simpler form of the co-variance matrix $\Sigma_{\Theta} = \lambda_{\Theta}I$. Thus, there are less number of hyperparameters to be determined. The final objective of the maximum posterior estimator: $$ J = \sum_{(u, i, j) \in D_S} \text{ln } \sigma(\hat{x}_{uij}) - \lambda_{\Theta} ||\Theta||^2 $$ where $\lambda_\Theta$ are the model specific regularization paramerters. ### 1.3 Learning with Matrix Factorization #### Stochastic Gradient Descent As the defined objective function is differentible, gradient descent based method for optimization is naturally adopted. The gradient of the objective $J$ with respect to the model parameters: $$ \begin{align} \frac{\partial J}{\partial \Theta} & = \sum_{(u, i, j) \in D_S} \frac{\partial}{\partial \Theta} \text{ln} \ \sigma(\hat{x}_{uij}) - \lambda_{\Theta} \frac{\partial}{\partial \Theta} ||\Theta||^2 \\ & \propto \sum_{(u, i, j) \in D_S} \frac{-e^{-\hat{x}_{uij}}}{1 + e^{-\hat{x}_{uij}}} \cdot \frac{\partial}{\partial \Theta} \hat{x}_{uij} - \lambda_{\Theta} \Theta \end{align} $$ Due to slow convergence of full gradient descent, we prefer using stochastic gradient descent to optimize the BPR model. For each triplet $(u, i, j) \in D_S$, the update rule for the parameters: $$ \Theta \leftarrow \Theta + \alpha \Big( \frac{e^{-\hat{x}_{uij}}}{1 + e^{-\hat{x}_{uij}}} \cdot \frac{\partial}{\partial \Theta} \hat{x}_{uij} + \lambda_\Theta \Theta \Big) $$ #### Matrix Factorization for Preference Approximation As mentioned earlier, the preference scoring function $\hat{x}_{uij}(\Theta)$ could be approximated by any real-valued function. First, the estimator $\hat{x}_{uij}$ is decomposed into: $$ \hat{x}_{uij} = \hat{x}_{ui} - \hat{x}_{uj} $$ The problem of estimating $\hat{x}_{ui}$ is a standard collaborative filtering formulation, where matrix factorization approach has shown to be very effective. The prediction formula can written as dot product between user feature vector $w_u$ and item feature vector $h_i$: $$ \hat{x}_{ui} = \langle w_u , h_i \rangle = \sum_{f=1}^{k} w_{uf} \cdot h_{if} $$ The derivatives of matrix factorization with respect to the model parameters are: $$ \frac{\partial}{\partial \theta} \hat{x}_{uij} = \begin{cases} (h_{if} - h_{jf}) & \text{if } \theta = w_{uf} \\ w_{uf} & \text{if } \theta = h_{if} \\ -w_{uf} & \text{if } \theta = h_{jf} \\ 0 & \text{else} \end{cases} $$ In theory, any kernel can be used to estimate $\hat{x}_{ui}$ besides the dot product $ \langle \cdot , \cdot \rangle $. For example, k-Nearest-Neighbor (kNN) has also been shown to achieve good performance. #### Analogies to AUC optimization By optimizing the objective function of BPR model, we effectively maximizing [AUC](https://towardsdatascience.com/understanding-auc-roc-curve-68b2303cc9c5) measurement. To keep the notebook focused, please refer to the [paper](https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf) for details of the analysis (Section 4.1.1). ## 2 Cornac implementation of BPR BPR is implemented in the [Cornac](https://cornac.readthedocs.io/en/latest/index.html) framework as part of the model collections. * Detailed documentations of the BPR model in Cornac can be found [here](https://cornac.readthedocs.io/en/latest/models.html#bayesian-personalized-ranking-bpr). * Source codes of the BPR implementation is available on the Cornac Github repository, which can be found [here](https://github.com/PreferredAI/cornac/blob/master/cornac/models/bpr/recom_bpr.pyx). ## 3 Cornac BPR movie recommender ### 3.1 Load and split data To evaluate the performance of item recommendation, we adopted the provided `python_random_split` tool for the consistency. Data is randomly split into training and test sets with the ratio of 75/25. Note that Cornac also cover different [built-in schemes](https://cornac.readthedocs.io/en/latest/eval_methods.html) for model evaluation. ```python data = movielens.load_pandas_df( size=MOVIELENS_DATA_SIZE, header=["userID", "itemID", "rating"] ) data.head() ``` 100%|███████████████████████████████████████████████████████████████████████████████████████| 4.81k/4.81k [00:08<00:00, 590KB/s] <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>userID</th> <th>itemID</th> <th>rating</th> </tr> </thead> <tbody> <tr> <td>0</td> <td>196</td> <td>242</td> <td>3.0</td> </tr> <tr> <td>1</td> <td>186</td> <td>302</td> <td>3.0</td> </tr> <tr> <td>2</td> <td>22</td> <td>377</td> <td>1.0</td> </tr> <tr> <td>3</td> <td>244</td> <td>51</td> <td>2.0</td> </tr> <tr> <td>4</td> <td>166</td> <td>346</td> <td>1.0</td> </tr> </tbody> </table> </div> ```python train, test = python_random_split(data, 0.75) ``` ### 3.2 Cornac Dataset To work with models implemented in Cornac, we need to construct an object from [Dataset](https://cornac.readthedocs.io/en/latest/data.html#module-cornac.data.dataset) class. Dataset Class in Cornac serves as the main object that the models will interact with. In addition to data transformations, Dataset provides a bunch of useful iterators for looping through the data, as well as supporting different negative sampling techniques. ```python train_set = cornac.data.Dataset.from_uir(train.itertuples(index=False), seed=SEED) print('Number of users: {}'.format(train_set.num_users)) print('Number of items: {}'.format(train_set.num_items)) ``` Number of users: 943 Number of items: 1642 ### 3.3 Train the BPR model The BPR has a few important parameters that we need to consider: - `k`: controls the dimension of the latent space (i.e. the size of the vectors $w_u$ and $h_i$ ). - `max_iter`: defines the number of iterations of the SGD procedure. - `learning_rate`: controls the step size $\alpha$ in the gradient update rules. - `lambda_reg`: controls the L2-Regularization $\lambda$ in the objective function. Note that different values of `k` and `max_iter` will affect the training time. We will here set `k` to 200, `max_iter` to 100, `learning_rate` to 0.01, and `lambda_reg` to 0.001. To train the model, we simply need to call the `fit()` method. ```python bpr = cornac.models.BPR( k=NUM_FACTORS, max_iter=NUM_EPOCHS, learning_rate=0.01, lambda_reg=0.001, verbose=True, seed=SEED ) ``` ```python with Timer() as t: bpr.fit(train_set) print("Took {} seconds for training.".format(t)) ``` 100%|██████████████████████████████████████████████████████████| 100/100 [00:07<00:00, 13.27it/s, correct=92.19%, skipped=9.38%] Optimization finished! Took 7.6953 seconds for training. ### 3.4 Prediction and Evaluation Now that our model is trained, we can produce the ranked lists for recommendation. Every recommender models in Cornac provide `rate()` and `rank()` methods for predicting item rated value as well as item ranked list for a given user. To make use of the current evaluation schemes, we will through `predict_rating()` and `predict_ranking()` functions inside `cornac_utils` to produce the predictions. Note that BPR model is effectively designed for item ranking. Hence, we only measure the performance using ranking metrics. ```python with Timer() as t: all_predictions = predict_ranking(bpr, train, usercol='userID', itemcol='itemID', remove_seen=True) print("Took {} seconds for prediction.".format(t)) ``` Took 1.7393803596496582 seconds for prediction. ```python all_predictions.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>userID</th> <th>itemID</th> <th>prediction</th> </tr> </thead> <tbody> <tr> <td>75000</td> <td>811</td> <td>755</td> <td>0.117239</td> </tr> <tr> <td>75001</td> <td>811</td> <td>287</td> <td>2.579992</td> </tr> <tr> <td>75002</td> <td>811</td> <td>181</td> <td>3.743980</td> </tr> <tr> <td>75003</td> <td>811</td> <td>96</td> <td>1.959841</td> </tr> <tr> <td>75004</td> <td>811</td> <td>83</td> <td>1.122369</td> </tr> </tbody> </table> </div> ```python k = 10 eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=k) eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=k) eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=k) eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=k) print("MAP:\t%f" % eval_map, "NDCG:\t%f" % eval_ndcg, "Precision@K:\t%f" % eval_precision, "Recall@K:\t%f" % eval_recall, sep='\n') ``` MAP: 0.109077 NDCG: 0.403395 Precision@K: 0.354989 Recall@K: 0.180183 ```python # Record results with papermill for tests pm.record("map", eval_map) pm.record("ndcg", eval_ndcg) pm.record("precision", eval_precision) pm.record("recall", eval_recall) ``` ## References 1. Rendle, S., Freudenthaler, C., Gantner, Z., & Schmidt-Thieme, L. (2009, June). BPR: Bayesian personalized ranking from implicit feedback. https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf 2. Pan, R., Zhou, Y., Cao, B., Liu, N. N., Lukose, R., Scholz, M., & Yang, Q. (2008, December). One-class collaborative filtering. https://cseweb.ucsd.edu/classes/fa17/cse291-b/reading/04781145.pdf 3. **Cornac** - A Comparative Framework for Multimodal Recommender Systems. https://cornac.preferred.ai/
f8fe1ddcff1c1f3f811b209153648f0a823f7ca3
21,567
ipynb
Jupyter Notebook
notebooks/02_model/cornac_bpr_deep_dive.ipynb
elogicaadith/recommenders
7c0a6a3e23dee047b9afbf8564bd236a8300454e
[ "MIT" ]
3
2019-12-13T22:35:55.000Z
2020-01-05T22:19:56.000Z
notebooks/02_model/cornac_bpr_deep_dive.ipynb
awesomemachinelearning/recommenders
de3782cce370a446e14e6b47e87686867fb7e069
[ "MIT" ]
1
2019-06-05T00:24:27.000Z
2019-06-05T00:24:27.000Z
notebooks/02_model/cornac_bpr_deep_dive.ipynb
awesomemachinelearning/recommenders
de3782cce370a446e14e6b47e87686867fb7e069
[ "MIT" ]
4
2019-06-05T00:04:11.000Z
2019-06-08T02:20:35.000Z
36.186242
410
0.536839
true
4,116
Qwen/Qwen-72B
1. YES 2. YES
0.798187
0.721743
0.576086
__label__eng_Latn
0.860986
0.17677
# 運動学的方程式の導出 (XYZオイラー角) 吉田勝俊(宇都宮大学) ## 参考情報 - [SymPyで代数演算してみる - Qiita](https://qiita.com/zawawahoge/items/1be137a8147902a5e6cb) - [Matrices (linear algebra) &#8212; SymPy 1.6.2 documentation](https://docs.sympy.org/latest/modules/matrices/matrices.html) ```python import sympy as sym #数式処理ライブラリ sym.init_printing() #数式を綺麗に表示する設定 ``` ## Sympy 変数・関数 ```python om1, om2, om3 = sym.symbols('ω_1 ω_2 ω_3') th1, th2, th3 = sym.symbols('θ_1 θ_2 θ_3', Real=True, positive=True) dth1, dth2, dth3 = sym.symbols('\dot{θ}_1 \dot{θ}_2 \dot{θ}_3', Real=True, positive=True) ``` ```python th = sym.Matrix([th1,th2,th3]) #オイラー角 dth = sym.Matrix([dth1,dth2,dth3]) #その時間微分 display(th) display(dth) ``` #### テスト ```python th.dot(dth) ``` ## ZYZオイラー角による回転行列 ```python def RotX(th): #X軸回転の行列 return sym.Matrix([ [1, 0, 0], [0, sym.cos(th), -sym.sin(th)], [0, sym.sin(th), sym.cos(th)], ]) def RotY(th): #Y軸回転の行列 return sym.Matrix([ [sym.cos(th), 0, sym.sin(th)], [0, 1, 0], [-sym.sin(th), 0, sym.cos(th)], ]) def RotZ(th): #Z軸回転の行列 return sym.Matrix([ [sym.cos(th), -sym.sin(th), 0], [sym.sin(th), sym.cos(th), 0], [0, 0, 1], ]) Rot = RotZ(th3)*RotY(th2)*RotX(th1) Rot ``` ### LaTeXソースの出力 ```python print(sym.latex(Rot)) ``` ### 回転行列の時間微分 ```python def sympy_dt_param(expr, param_vec, dparam_vec): """ パラメータを時間の関数として時間微分する関数 param_vec: パラメータベクトル sympy.Matrix([sympyのシンボル達]) dparam_vec: パラメータベクトルの微分 sympy.Matrix([sympyのシンボル達]) """ dim = len(param_vec) result = expr # 仮の時間変数 t = sym.symbols('t') # パラメータを時間関数で置換 func_vec = [] for i in range(dim): lab = 'param' + str(i) #仮の変数表示名 fun = sym.symbols(lab, cls=sym.Function, Real=True) func_vec.append( fun(t) ) func_vec = sym.Matrix(func_vec) #sympy型へ変換 for i in range(dim): result = result.replace(param_vec[i],func_vec[i]) # 時間微分を実行 result = result.diff() dfunc_vec = func_vec.diff() # 時間関数を元のパラメータに戻す for i in range(dim): result = result.replace(dfunc_vec[i], dparam_vec[i]) result = result.replace(func_vec[i], param_vec[i]) return result ``` 回転行列を実際に時間微分する ```python dRot = sympy_dt_param(Rot, th, dth) dRot ``` ### 回転行列の逆行列 ```python invRot = Rot.transpose() #回転行列なので転置で求まる invRot ``` ### 角速度ベクトルの外積行列 $[\boldsymbol{\omega}\times]:=R^{-1}\dot{R}$ ```python OMcross = sym.simplify(invRot*dRot) ``` ### 外積行列 $[\boldsymbol{\omega}\times]:=R^{-1}\dot{R}$ から角速度ベクトル $\boldsymbol{\omega}$ を抽出 ```python OMvec = sym.Matrix([OMcross[2,1],OMcross[0,2],OMcross[1,0]]) OMvec ``` ### 行列表示 $\boldsymbol{\omega}=\Omega \dot{\boldsymbol{\theta}}$ の表現行列 $\Omega$ ```python tmp = OMvec OMmat = tmp.jacobian(dth) OMmat ``` ### LaTeXソースの出力 ```python print(sym.latex(OMmat)) ``` ### $\Omega(\boldsymbol{\theta},\dot{\boldsymbol{\theta}})$ を $\dot{\boldsymbol{\theta}}$ について解き,運動学的方程式 $\dot{\boldsymbol{\theta}}=K(\boldsymbol{\theta},\boldsymbol{\omega})$ を導く #### $\dot{\boldsymbol{\theta}}$ について解く ```python var = dth oms = sym.Matrix([om1, om2, om3]) equations = OMmat*var - oms result = sym.solve(equations, var) result ``` #### 解から$K(\boldsymbol{\theta},\boldsymbol{\omega})$を作る ```python Kfunc = sym.Matrix([result[dth[i]] for i in range(3)]) Kfunc = sym.simplify(Kfunc) Kfunc ``` ## 運動学的方程式 $\dot{\boldsymbol{\theta}}=K(\boldsymbol{\theta},\boldsymbol{\omega})$ の行列表示 - 線形変換 $K(\boldsymbol{q},\boldsymbol{\omega})$ のヤコビ行列=表現行列なので,ヤコビ行列を求める関数 [jacobian(X)](https://docs.sympy.org/latest/modules/matrices/matrices.html#sympy.matrices.matrices.MatrixCalculus.jacobian) を流用しました. ### $\dot{\boldsymbol{\theta}} = K_{\omega}\boldsymbol{\theta}$ 型の表現行列 オイラー角は,$\theta_1$, $\theta_2$, $\theta_3$ に関して非線形(三角関数の中身)なので,この型の表現行列は取れません. ### $\dot{\boldsymbol{\theta}} = K_{\boldsymbol{\theta}}\boldsymbol{\omega}$ 型の表現行列 ```python Kth = Kfunc.jacobian(oms) Kth ``` ### LaTeXソースの出力 ```python print(sym.latex(Kth)) ```
3e8366359cb78e7158d9bb9e4fa78c2f0b72294a
8,109
ipynb
Jupyter Notebook
m3d/Colab/Python_9.2.ipynb
ktysd/_colab_test
0ffc4a63dce926e21647f4497269ac90f4eaa941
[ "MIT" ]
null
null
null
m3d/Colab/Python_9.2.ipynb
ktysd/_colab_test
0ffc4a63dce926e21647f4497269ac90f4eaa941
[ "MIT" ]
null
null
null
m3d/Colab/Python_9.2.ipynb
ktysd/_colab_test
0ffc4a63dce926e21647f4497269ac90f4eaa941
[ "MIT" ]
null
null
null
8,109
8,109
0.630657
true
1,738
Qwen/Qwen-72B
1. YES 2. YES
0.913677
0.831143
0.759396
__label__yue_Hant
0.387264
0.602663
# Python _for fun and profit_ ###### Juan Luis Cano Rodríguez ###### Madrid, 2016-05-13 @ ETS Asset Management Factory ## Outline * Introduction * Python for Data Science * Python for IT * General advice * Conclusions ## Outline * Introduction * Python for Data Science * Interactive computation with Jupyter * Numerical analysis with NumPy, SciPy * Visualization with matplotlib and others * Data manipulation with pandas * Machine Learning with scikit-learn * Python for IT * Data gathering with Requests and Scrapy * Information extraction with lxml, BeautifulSoup and others * User interfaces with PyQt, xlwings and others * Other: memcached, SOA * General advice * Python packaging * The future of Python * Conclusions ## `>>> print(self)` * _Almost_ **Aerospace Engineer** * Quant Developer for BBVA at Indizen * Writer and furious tweeter at **Pybonacci** * Chair ~~and BDFL~~ of **Python España** non-profit * Co-creator and charismatic leader of **AeroPython** (\*not the Lorena Barba course) * _When time permits (rare) [writes some open source Python code](https://github.com/Juanlu001/)_ # Python for Data Science * **Python is a dynamic, interpreted\* language that is easy to learn** * Very popular in science, research * Rich ecosystem of packages that interoperate * Multiple languages are used (FORTRAN, C/C++) and wrapped from Python for a convenient interface ## Jupyter * **Interactive computation environment in a browser** * Traces its roots to IPython, created in 2001 * Nowadays it's language-agnostic (**40 languages**) Jupyter * Notebook * Exporting * Interactive Widgets * Slides * Extensions https://github.com/ipython-contrib/IPython-notebook-extensions It's a notebook! * Code is computed in cells * These can contain text, code, images, videos... * All resulting plots can be integrated in the interface * We can export it to different formats using `nbconvert` or from the UI ```python ``` ```python ``` ```python ``` It's interactive! ```python from ipywidgets import interact, fixed ``` ```python from sympy import init_printing, Symbol, Eq, factor init_printing(use_latex=True) x = Symbol('x') def factorit(n): return Eq(x**n-1, factor(x**n-1)) ``` ```python interact(factorit, n=(2,40)) ``` ```python # Import matplotlib (plotting), skimage (image processing) and interact (user interfaces) # This enables their use in the Notebook. %matplotlib inline from matplotlib import pyplot as plt from skimage import data from skimage.feature import blob_doh from skimage.color import rgb2gray # Extract the first 500px square of the Hubble Deep Field. image = data.hubble_deep_field()[0:500, 0:500] image_gray = rgb2gray(image) def plot_blobs(max_sigma=30, threshold=0.1, gray=False): """ Plot the image and the blobs that have been found. """ blobs = blob_doh(image_gray, max_sigma=max_sigma, threshold=threshold) fig, ax = plt.subplots(figsize=(8,8)) ax.set_title('Galaxies in the Hubble Deep Field') if gray: ax.imshow(image_gray, interpolation='nearest', cmap='gray_r') circle_color = 'red' else: ax.imshow(image, interpolation='nearest') circle_color = 'yellow' for blob in blobs: y, x, r = blob c = plt.Circle((x, y), r, color=circle_color, linewidth=2, fill=False) ax.add_patch(c) ``` ```python interact(plot_blobs, max_sigma=(10, 40, 2), threshold=(0.005, 0.02, 0.001)) ``` It's highly extensible! * Some extensions https://github.com/ipython-contrib/IPython-notebook-extensions * A thorough guide http://mindtrove.info/4-ways-to-extend-jupyter-notebook/ ## NumPy * N-dimensional data structure. * Homogeneously typed. * Efficient! A universal function (or ufunc for short) is a function that operates on ndarrays. It is a “vectorized function". ```python import numpy as np ``` ```python my_list = list(range(0,100000)) res1 = %timeit -o sum(my_list) ``` 1000 loops, best of 3: 1.14 ms per loop ```python array = np.arange(0, 100000) res2 = %timeit -o np.sum(array) ``` 10000 loops, best of 3: 61.1 µs per loop ```python res1.best / res2.best ``` 18.68618617922427 NumPy is much more: * Advanced manipulation tricks: broadcasting, fancy indexing * Functions: generalized linear algebra, Fast Fourier transforms * **Use case**: - In-memory, fits-in-my-computer, homogeneous data - Easily vectorized operations ## SciPy General purpose scientific computing library * `scipy.linalg`: ATLAS LAPACK and BLAS libraries * `scipy.stats`: distributions, statistical functions... * `scipy.integrate`: integration of functions and ODEs * `scipy.optimization`: local and global optimization, fitting, root finding... * `scipy.interpolate`: interpolation, splines... * `scipy.fftpack`: Fourier trasnforms * `scipy.signal`: Signal processing * `scipy.special`: Special functions * `scipy.io`: Reading/Writing scientific formats ## matplotlib * The father of all Python visualization packages * Modeled after MATLAB API * Powerful and versatile, but often complex and not so well documented * Undergoing a deep default style change ```python # This line integrates matplotlib with the notebook %matplotlib inline import matplotlib.pyplot as plt ``` ```python import numpy as np x = np.linspace(-2, 10) plt.plot(x, np.sin(x) / x) ``` ```python def g(x, y): return np.cos(x) + np.sin(y) ** 2 x = np.linspace(-2, 3, 1000) y = np.linspace(-2, 3, 1000) xx, yy = np.meshgrid(x, y) zz = g(xx, yy) fig = plt.figure(figsize=(6, 6)) cs = plt.contourf(xx, yy, zz, np.linspace(-1, 2, 13), cmap=plt.cm.viridis) plt.colorbar() cs = plt.contour(xx, yy, zz, np.linspace(-1, 2, 13), colors='k') plt.clabel(cs) plt.xlabel("x") plt.ylabel("y") plt.title(r"Function $g(x, y) = \cos{x} + \sin^2{y}$") plt.close() ``` ```python fig ``` There are **many** alternatives to matplotlib, each one with its use cases, design decisions, and tradeoffs. Here are some of them: * `seaborn`: High level layer on top of matplotlib, easier API and beautiful defaults for common visualizations * `ggplot`: For those who prefer R-like plotting (API and appearance) * `plotly`: 2D and 3D interactive plots in the browser as a web service * `Bokeh`: targets modern web browsers and big data * `pyqtgraph`: Qt embedding, realtime plots Others: `pygal`, `mpld3`, `bqplot`... Use the best tool for the job! And in case of doubt, just get matplotlib :) ## pandas * **High-performance, easy-to-use data structures and data analysis** * Inspired by R DataFrames * _Not just NumPy on steroids_ * Input/Output functions for a variety of formats * SQL and query-like operations ```python import numpy as np import pandas as pd dates = pd.date_range('20130101', periods=6) df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD')) df ``` ## scikit-learn and others * `scikit-learn`: **A high quality machine learning package** * `Theano`: primitives for building neural networks * `TensorFlow`: Google's take on machine learning and deep learning * `keras`: deep learning built on top of Theano and TensorFlow Many possibilities! # Infrastructure ## Information retrieval extraction * Web spraping: Requests, Scrapy * Information extraction: lxml, json, BeautifulSoup, pyparsing - Many options depending on the specific format * Cache systems: memcached, redis-py - Python wrappers for existing, mature systems ## User interfaces and applications * GUI toolkits - PyQt is the more powerful, but watch out license terms - Other: Tkinter, PyGTK... - Are desktop apps dead anyway? Perhaps move to browser? * Service-oriented architectures (SOA) - Flask: Small web frameworks focused on little services - Django + Django REST framework: More complex, difficult to master, very powerful ## Python + Excel * Most interesting option: xlwings - Possibility to create User Defined Functions (UDFs) in Python to be used from Excel - Also, call VBA Subs and Functions from Python! - Creation of Excel Addins - Support for pandas DataFrames, charts, CSE formulas and more - Debugging helpers, easier than you might think! * Challenges: _deployment_ * Other options: openpyxl, xlwt, XlsxWriter # General advice ## Python packaging Remember this picture? It's _so_ 2012! And by the way, a bit too optimistic: * `setuptools` is back * `easy_install` is not gone ## The future of Python * Origins: 1995-2005 - Python is first used in science - Thin wrappers around LAPACK and other Netlib libraries - No notion of good practices, GitHub or whatsoever - Several array packages - In 2001 IPython and matplotlib appear - Year 2002 marks the beginning of the digital age - In 2005 NumPy is born to rule them all * Growth: 2005-2015 - The community starts gaining traction - GitHub brings a new era on online collaboration, development pace accelerates - In 2008 pandas is born - In 2012 IPython receives $1.15M from the Sloan Foundation and Continuum Analytics is born - Big Data starts getting more and more attention > "Prediction is very difficult, especially about the future." * Future: 2015-2025? - In 2016 Continuum brings Python to HDFS - _Highly opinionated_: Oracle v Google poses threat to Java - The Jupyter project diversifies, different notebook interfaces and use cases appear - Replacements for NumPy and pandas are developed (DyND, Blaze) - Julia and other competitors mature - Python 2 will reach EOL in 2020 # Muchas gracias 😊 * This talk: https://github.com/Juanlu001/python-fun-and-profit/ * My LinkedIn https://es.linkedin.com/in/juanluiscanor/en * 📫 hello@juanlu.space _Per Python ad astra!_
be60b18c903c897f42af4824fe7e88ed52053399
769,437
ipynb
Jupyter Notebook
Python for fun and profit.ipynb
Juanlu001/python-fun-and-profit
ac9be81e9a151024be7d3123901f031665d9e766
[ "CC0-1.0" ]
1
2019-02-04T08:59:51.000Z
2019-02-04T08:59:51.000Z
Python for fun and profit.ipynb
Juanlu001/python-fun-and-profit
ac9be81e9a151024be7d3123901f031665d9e766
[ "CC0-1.0" ]
null
null
null
Python for fun and profit.ipynb
Juanlu001/python-fun-and-profit
ac9be81e9a151024be7d3123901f031665d9e766
[ "CC0-1.0" ]
null
null
null
817.680128
622,632
0.942076
true
2,556
Qwen/Qwen-72B
1. YES 2. YES
0.712232
0.800692
0.570279
__label__eng_Latn
0.926111
0.163278
```python from sympy import * init_printing(use_unicode=True) ``` # Matriz con respecto a la base canónica de la proyección ortogonal sobre un subespacio $$ \left[P_{gen\{G^{-1}w\}}\right]^{E}_{E} $$ ```python def matriz_de_proyeccion_ortogonal_en_base_canonica(S, dimension, G=None): ''' w: es el vector que genera al subespacio del cuál queremos obtener la matriz respecto a la base canónica de la proyección ortogonal si el producto interno fuese el canónico Ejemplo: si S = {x en R^3 : x1 + x2 + x3 = 0} => w = [1, 1, 1] dimension: es la dimension del espacio euclídeo en cuestión G: es la matriz de Gram del espacio euclídeo en cuestión ''' if S.rank() > 2: raise NotImplementedError("Esta función sólo funciona para subespacios de dimensión <= 2") if not G: G = eye(dimension) if S.rank() == 2: S = S.col(0).cross(S.col(1)) G_inv = G.inv() return (1 / (S.T * G_inv * S)[0]) * G_inv * S * S.T ``` # Ejemplo En $\mathbb{R}^{3}$ con el producto interno $\langle\cdot, \cdot\rangle$ definido por $$ \langle x, y \rangle = y^{T} \begin{bmatrix}2 & -2 & 0\\ -2 & 5 & 4\\ 0 & 4 & 6\end{bmatrix} x $$ se consideran los subespacios $$ \mathbb{S}_{1} = \{x \in \mathbb{R}^{3} : x_{1} + x_{2} + x_{3} = 0\} \text{ y } \mathbb{S}_{2} = \{x \in \mathbb{R}^{3} : x_{1} - x_{3} = 0\} $$ 3.7 (a) Hallar las matrices con respecto a la base canónica de las proyecciones ortogonales de $\mathbb{R}^{3}$ sobre $\mathbb{S}_{1}^{\perp}$ y sobre $\mathbb{S}_{2}^{\perp}$ ```python S1_ort = Matrix([1, 1, 1]) S2_ort = Matrix([1, 0, -1]) G = Matrix([[2, -2, 0], [-2, 5, 4], [0, 4, 6]]) dim = 3 P_S1_ort = matriz_de_proyeccion_ortogonal_en_base_canonica(S1, dim, G) P_S2_ort = matriz_de_proyeccion_ortogonal_en_base_canonica(S2_ort, dim, G) display(P_S1_ort) display(P_S2_ort) ``` # Distancia de un vector a un subespacio $$ \text{Esta función computa }||P_{\mathbb{S}^{\perp}}(x)|| \text{ a partir de un subespacio } \mathbb{S} \text{ con } dim(\mathbb{S}) \leq 2, \text{ o a partir de } \mathbb{S}^{\perp} \text{ con } dim(\mathbb{S}) \leq 2. $$ ```python def distancia_de_un_vector_a_un_subespacio(x: Matrix, S, dimension: int, G: Matrix=None): ''' x: es el vector que queremos proyectar sobre el subespacio ortogonal de dimensión 1 S: es el subespacio que queremos saber a qué distancia de x se encuentra dimension: es la dimension del espacio euclídeo en cuestión G: es la matriz de Gram del espacio euclídeo en cuestión Esta función calcula la distancia del vector x al subespacio S, sea S de dimensión 1, o 2 ''' if S.rank() > 2: raise NotImplementedError("Esta función sólo funciona para subespacios de dimensión <= 2") if not G: G = eye(dimension) if S.rank() == 2: S = S.col(0).cross(S.col(1)) return S.T * x / sqrt((S.T * G.inv() * w)[0]) ``` ## Ejemplo Sea $(\mathbb{V}, \langle\cdot, \cdot\rangle)$ un $\mathbb{R}$-espacio euclídeo de dimsensión 3, y sea $\{v_{1}, v_{2}, v_{3}\}$ un base de $\mathbb{V}$ cuya matriz de Gram es $$ \begin{bmatrix} \frac{1}{5} & 0 & \frac{1}{3}\\ 0 & \frac{1}{3} & 0\\ \frac{1}{3} & 0 & 1\\ \end{bmatrix} $$ La distancia del vector $2v_{1} + 5v_{2} + 3v_{3}$ al subespacio gen$\{v_{1}, v_{2}\}$ es ```python r = Rational # Misma función, nombre más corto - Es para que las fracciones no aparezcan en decimal G_B = Matrix([[r('1/5'), 0, r('1/3')], [0, r('1/3'), 0], [r('1/3'), 0, 1]]) q_B = Matrix([2, 5, 3]) w = Matrix([0, 0, 1]) distancia_de_un_vector_a_un_subespacio(x=q_B, S=w, dimension=3, G=G_B) ```
c993f7cd3531f0fca671fdfa772c17b3dd8b2abb
13,844
ipynb
Jupyter Notebook
notebooks_varios/Proyecciones Ortogonales y Distancia de un Vector a un Subespacio.ipynb
ilitteri/manim-algebra-notebooks
d57461de498ca574e7d866dae1d091a2c5a6880a
[ "MIT" ]
2
2021-11-12T21:23:35.000Z
2022-02-27T14:43:48.000Z
notebooks_varios/Proyecciones Ortogonales y Distancia de un Vector a un Subespacio.ipynb
ilitteri/manim-algebra-notebooks
d57461de498ca574e7d866dae1d091a2c5a6880a
[ "MIT" ]
null
null
null
notebooks_varios/Proyecciones Ortogonales y Distancia de un Vector a un Subespacio.ipynb
ilitteri/manim-algebra-notebooks
d57461de498ca574e7d866dae1d091a2c5a6880a
[ "MIT" ]
1
2021-11-12T18:24:56.000Z
2021-11-12T18:24:56.000Z
53.451737
3,124
0.692791
true
1,362
Qwen/Qwen-72B
1. YES 2. YES
0.894789
0.855851
0.765807
__label__spa_Latn
0.762062
0.617557
```python from sympy import * ``` ```python s=10 ``` ```python x = Symbol("x") ``` ```python y = x #y = (2*x-1) ``` ```python legpols = [] ``` ```python for k in range(0,s+1): legpol = 0 for j in range(0, k//2+1): legpol += (-1)**j * factorial(2*k - 2*j) / factorial(k-j) / factorial(k-2*j) / factorial(j) / 2**k * y**(k-2*j) legpols.append(expand(legpol)) ``` ```python legpols ``` [1, x, 3*x**2/2 - 1/2, 5*x**3/2 - 3*x/2, 35*x**4/8 - 15*x**2/4 + 3/8, 63*x**5/8 - 35*x**3/4 + 15*x/8, 231*x**6/16 - 315*x**4/16 + 105*x**2/16 - 5/16, 429*x**7/16 - 693*x**5/16 + 315*x**3/16 - 35*x/16, 6435*x**8/128 - 3003*x**6/32 + 3465*x**4/64 - 315*x**2/32 + 35/128, 12155*x**9/128 - 6435*x**7/32 + 9009*x**5/64 - 1155*x**3/32 + 315*x/128, 46189*x**10/256 - 109395*x**8/256 + 45045*x**6/128 - 15015*x**4/128 + 3465*x**2/256 - 63/256] ```python ```
217e20cc4bdf2d2aba015ba5b3dfdd1e8e6d7c23
2,665
ipynb
Jupyter Notebook
prototyping/Legendre_Polynomials_Standard.ipynb
krystophny/GeometricIntegrators.jl
7855e977b014c8ba119f6bb73c6ed9bf96f04b1d
[ "MIT" ]
6
2020-12-29T10:41:35.000Z
2022-03-21T11:48:39.000Z
prototyping/Legendre_Polynomials_Standard.ipynb
krystophny/GeometricIntegrators.jl
7855e977b014c8ba119f6bb73c6ed9bf96f04b1d
[ "MIT" ]
15
2020-11-16T16:45:50.000Z
2022-03-09T17:51:11.000Z
prototyping/Legendre_Polynomials_Standard.ipynb
krystophny/GeometricIntegrators.jl
7855e977b014c8ba119f6bb73c6ed9bf96f04b1d
[ "MIT" ]
2
2021-05-05T12:54:38.000Z
2021-12-17T18:19:13.000Z
19.595588
128
0.451032
true
444
Qwen/Qwen-72B
1. YES 2. YES
0.872347
0.727975
0.635047
__label__yue_Hant
0.243585
0.313759
# Developing Quaternions for iPython In this notebook, tools for working with quaternions for physics issues are developed. The class QH treat quaternions as Hamilton would have done: as a 4-vector over the real numbers. ```python import math import numpy as np import pdb import random import sympy as sp import unittest from copy import deepcopy from IPython.display import display ``` Define the stretch factor $\gamma$ and the $\gamma \beta$ used in special relativity. ```python def sr_gamma(beta_x=0, beta_y=0, beta_z=0): """The gamma used in special relativity using 3 velocites, some may be zero.""" return 1 / (1 - beta_x ** 2 - beta_y ** 2 - beta_z ** 2) ** (1/2) def sr_gamma_betas(beta_x=0, beta_y=0, beta_z=0): """gamma and the three gamma * betas used in special relativity.""" g = sr_gamma(beta_x, beta_y, beta_z) return [g, g * beta_x, g * beta_y, g * beta_z] ``` ## Quaternions for Hamilton Define a class QH to manipulate quaternions as Hamilton would have done it so many years ago. The "qtype" is a little bit of text to leave a trail of breadcrumbs about how a particular quaternion was generated. ```python class QH(object): """Quaternions as Hamilton would have defined them, on the manifold R^4.""" def __init__(self, values=None, qtype="Q", representation=""): if values is None: self.t, self.x, self.y, self.z = 0, 0, 0, 0 elif len(values) == 4: self.t, self.x, self.y, self.z = values[0], values[1], values[2], values[3] elif len(values) == 8: self.t, self.x = values[0] - values[1], values[2] - values[3] self.y, self.z = values[4] - values[5], values[6] - values[7] self.representation = representation if representation != "": self.t, self.x, self.y, self.z = self.representation_2_txyz(representation) self.qtype = qtype def __str__(self, quiet=False): """Customize the output.""" qtype = self.qtype if quiet: qtype = "" if self.representation == "": string = "({t}, {x}, {y}, {z}) {qt}".format( t=self.t, x=self.x, y=self.y, z=self.z, qt=qtype) elif self.representation == "polar": rep = self.txyz_2_representation("polar") string = "({A} A, {thetaX} 𝜈x, {thetaY} 𝜈y, {thetaZ} 𝜈z) {qt}".format( A=rep[0], thetaX=rep[1], thetaY=rep[2], thetaZ=rep[3], qt=qtype) elif self.representation == "spherical": rep = self.txyz_2_representation("spherical") string = "({t} t, {R} R, {theta} θ, {phi} φ) {qt}".format( t=rep[0], R=rep[1], theta=rep[2], phi=rep[3], qt=qtype) return string def print_state(self, label, spacer=True, quiet=True): """Utility for printing a quaternion.""" print(label) print(self.__str__(quiet)) if spacer: print("") def is_symbolic(self): """Figures out if an expression has symbolic terms.""" symbolic = False if hasattr(self.t, "free_symbols") or hasattr(self.x, "free_symbols") or \ hasattr(self.y, "free_symbols") or hasattr(self.z, "free_symbols"): symbolic = True return symbolic def txyz_2_representation(self, representation): """Converts Cartesian txyz into an array of 4 values in a different representation.""" symbolic = self.is_symbolic() if representation == "": rep = [self.t, self.x, self.y, self.z] elif representation == "polar": amplitude = (self.t ** 2 + self.x ** 2 + self.y **2 + self.z **2) ** (1/2) abs_v = self.abs_of_vector().t if symbolic: theta = sp.atan2(abs_v, self.t) else: theta = math.atan2(abs_v, self.t) if abs_v == 0: thetaX, thetaY, thetaZ = 0, 0, 0 else: thetaX = theta * self.x / abs_v thetaY = theta * self.y / abs_v thetaZ = theta * self.z / abs_v rep = [amplitude, thetaX, thetaY, thetaZ] elif representation == "spherical": t = self.t R = (self.x ** 2 + self.y **2 + self.z **2) ** (1/2) if R == 0: theta = 0 else: if symbolic: theta = sp.acos(self.z / R) else: theta = math.acos(self.z / R) if symbolic: phi = sp.atan2(self.y, self.x) else: phi = math.atan2(self.y, self.x) rep = [t, R, theta, phi] else: print("Oops, don't know representation: ", representation) return rep def representation_2_txyz(self, representation): """Convert from a representation to Cartesian txyz.""" symbolic = False if hasattr(self.t, "free_symbols") or hasattr(self.x, "free_symbols") or \ hasattr(self.y, "free_symbols") or hasattr(self.z, "free_symbols"): symbolic = True if representation == "": t, x, y, z = self.t, self.x, self.y, self.z elif representation == "polar": amplitude, thetaX, thetaY, thetaZ = self.t, self.x, self.y, self.z theta = (thetaX ** 2 + thetaY ** 2 + thetaZ ** 2) ** (1/2) if theta == 0: t = self.t x, y, z = 0, 0, 0 else: if symbolic: t = amplitude * sp.cos(theta) x = self.x / theta * amplitude * sp.sin(theta) y = self.y / theta * amplitude * sp.sin(theta) z = self.z / theta * amplitude * sp.sin(theta) else: t = amplitude * math.cos(theta) x = self.x / theta * amplitude * math.sin(theta) y = self.y / theta * amplitude * math.sin(theta) z = self.z / theta * amplitude * math.sin(theta) elif representation == "spherical": t, R, theta, phi = self.t, self.x, self.y, self.z if symbolic: x = R * sp.sin(theta) * sp.cos(phi) y = R * sp.sin(theta) * sp.sin(phi) z = R * sp.cos(theta) else: x = R * math.sin(theta) * math.cos(phi) y = R * math.sin(theta) * math.sin(phi) z = R * math.cos(theta) else: print("Oops, don't know representation: ", representation) txyz = [t, x, y, z] return txyz def check_representations(self, q1): """If they are the same, report true. If not, kick out an exception. Don't add apples to oranges.""" if self.representation == q1.representation: return True else: raise Exception("Oops, 2 quaternions have different representations: {}, {}".format(self.representation, q1.representation)) return False def display_q(self, label = ""): """Display each terms in a pretty way.""" if label: print(label) display(self.t) display(self.x) display(self.y) display(self.z) return def simple_q(self, label=""): """Simplify each term.""" if label: print(label) self.t = sp.simplify(self.t) self.x = sp.simplify(self.x) self.y = sp.simplify(self.y) self.z = sp.simplify(self.z) return self def expand_q(self): """Expand each term.""" self.t = sp.expand(self.t) self.x = sp.expand(self.x) self.y = sp.expand(self.y) self.z = sp.expand(self.z) return self def subs(self, symbol_value_dict): """Evaluates a quaternion using sympy values and a dictionary {t:1, x:2, etc}.""" t1 = self.t.subs(symbol_value_dict) x1 = self.x.subs(symbol_value_dict) y1 = self.y.subs(symbol_value_dict) z1 = self.z.subs(symbol_value_dict) q_txyz = QH([t1, x1, y1, z1], qtype=self.qtype, representation=self.representation) return q_txyz def scalar(self, qtype="scalar"): """Returns the scalar part of a quaternion.""" end_qtype = "scalar({})".format(self.qtype) s = QH([self.t, 0, 0, 0], qtype=end_qtype, representation=self.representation) return s def vector(self, qtype="v"): """Returns the vector part of a quaternion.""" end_qtype = "vector({})".format(self.qtype) v = QH([0, self.x, self.y, self.z], qtype=end_qtype, representation=self.representation) return v def xyz(self): """Returns the vector as an np.array.""" return np.array([self.x, self.y, self.z]) def q_0(self, qtype="0"): """Return a zero quaternion.""" q0 = QH([0, 0, 0, 0], qtype=qtype, representation=self.representation) return q0 def q_1(self, n=1, qtype="1"): """Return a multiplicative identity quaternion.""" q1 = QH([n, 0, 0, 0], qtype=qtype, representation=self.representation) return q1 def q_i(self, n=1, qtype="i"): """Return i.""" qi = QH([0, n, 0, 0], qtype=qtype, representation=self.representation) return qi def q_j(self, n=1, qtype="j"): """Return j.""" qj = QH([0, 0, n, 0], qtype=qtype, representation=self.representation) return qj def q_k(self, n=1, qtype="k"): """Return k.""" qk = QH([0, 0, 0, n], qtype=qtype, representation=self.representation) return qk def q_random(self, qtype="?"): """Return a random-valued quaternion.""" qr = QH([random.random(), random.random(), random.random(), random.random()], qtype=qtype) return qr def dupe(self, qtype=""): """Return a duplicate copy, good for testing since qtypes persist""" du = QH([self.t, self.x, self.y, self.z], qtype=self.qtype, representation=self.representation) return du def equals(self, q1): """Tests if two quaternions are equal.""" self.check_representations(q1) self_t, self_x, self_y, self_z = sp.expand(self.t), sp.expand(self.x), sp.expand(self.y), sp.expand(self.z) q1_t, q1_x, q1_y, q1_z = sp.expand(q1.t), sp.expand(q1.x), sp.expand(q1.y), sp.expand(q1.z) if math.isclose(self_t, q1_t) and math.isclose(self_x, q1_x) and math.isclose(self_y, q1_y) and math.isclose(self_z, q1_z): return True else: return False def conj(self, conj_type=0, qtype="*"): """Three types of conjugates.""" t, x, y, z = self.t, self.x, self.y, self.z conj_q = QH() if conj_type == 0: conj_q.t = t if x != 0: conj_q.x = -1 * x if y != 0: conj_q.y = -1 * y if z != 0: conj_q.z = -1 * z elif conj_type == 1: if t != 0: conj_q.t = -1 * t conj_q.x = x if y != 0: conj_q.y = -1 * y if z != 0: conj_q.z = -1 * z qtype += "1" elif conj_type == 2: if t != 0: conj_q.t = -1 * t if x != 0: conj_q.x = -1 * x conj_q.y = y if z != 0: conj_q.z = -1 * z qtype += "2" conj_q.qtype = self.qtype + qtype conj_q.representation = self.representation return conj_q def conj_q(self, q1): """Given a quaternion with 0's or 1's, will do the standard conjugate, first conjugate second conjugate, sign flip, or all combinations of the above.""" _conj = deepcopy(self) if q1.t: _conj = _conj.conj(conj_type=0) if q1.x: _conj = _conj.conj(conj_type=1) if q1.y: _conj = _conj.conj(conj_type=2) if q1.z: _conj = _conj.flip_signs() return _conj def flip_signs(self, qtype="-"): """Flip the signs of all terms.""" end_qtype = "-{}".format(self.qtype) t, x, y, z = self.t, self.x, self.y, self.z flip_q = QH(qtype=end_qtype, representation=self.representation) if t != 0: flip_q.t = -1 * t if x != 0: flip_q.x = -1 * x if y != 0: flip_q.y = -1 * y if z != 0: flip_q.z = -1 * z return flip_q def vahlen_conj(self, conj_type="-", qtype="vc"): """Three types of conjugates -'* done by Vahlen in 1901.""" t, x, y, z = self.t, self.x, self.y, self.z conj_q = QH() if conj_type == '-': conj_q.t = t if x != 0: conj_q.x = -1 * x if y != 0: conj_q.y = -1 * y if z != 0: conj_q.z = -1 * z qtype += "*-" if conj_type == "'": conj_q.t = t if x != 0: conj_q.x = -1 * x if y != 0: conj_q.y = -1 * y conj_q.z = z qtype += "*'" if conj_type == '*': conj_q.t = t conj_q.x = x conj_q.y = y if z != 0: conj_q.z = -1 * z qtype += "*" conj_q.qtype = self.qtype + qtype conj_q.representation = self.representation return conj_q def _commuting_products(self, q1): """Returns a dictionary with the commuting products.""" s_t, s_x, s_y, s_z = self.t, self.x, self.y, self.z q1_t, q1_x, q1_y, q1_z = q1.t, q1.x, q1.y, q1.z products = {'tt': s_t * q1_t, 'xx+yy+zz': s_x * q1_x + s_y * q1_y + s_z * q1_z, 'tx+xt': s_t * q1_x + s_x * q1_t, 'ty+yt': s_t * q1_y + s_y * q1_t, 'tz+zt': s_t * q1_z + s_z * q1_t} return products def _anti_commuting_products(self, q1): """Returns a dictionary with the three anti-commuting products.""" s_x, s_y, s_z = self.x, self.y, self.z q1_x, q1_y, q1_z = q1.x, q1.y, q1.z products = {'yz-zy': s_y * q1_z - s_z * q1_y, 'zx-xz': s_z * q1_x - s_x * q1_z, 'xy-yx': s_x * q1_y - s_y * q1_x, 'zy-yz': - s_y * q1_z + s_z * q1_y, 'xz-zx': - s_z * q1_x + s_x * q1_z, 'yx-xy': - s_x * q1_y + s_y * q1_x } return products def _all_products(self, q1): """Returns a dictionary with all possible products.""" products = self._commuting_products(q1) products.update(self._anti_commuting_products(q1)) return products def square(self, qtype="^2"): """Square a quaternion.""" end_qtype = "{}{}".format(self.qtype, qtype) qxq = self._commuting_products(self) sq_q = QH(qtype=end_qtype, representation=self.representation) sq_q.t = qxq['tt'] - qxq['xx+yy+zz'] sq_q.x = qxq['tx+xt'] sq_q.y = qxq['ty+yt'] sq_q.z = qxq['tz+zt'] return sq_q def norm_squared(self, qtype="|| ||^2"): """The norm_squared of a quaternion.""" end_qtype = "||{}||^2".format(self.qtype, qtype) qxq = self._commuting_products(self) n_q = QH(qtype=end_qtype, representation=self.representation) n_q.t = qxq['tt'] + qxq['xx+yy+zz'] return n_q def norm_squared_of_vector(self, qtype="|V( )|^2"): """The norm_squared of the vector of a quaternion.""" end_qtype = "|V({})|^2".format(self.qtype) qxq = self._commuting_products(self) nv_q = QH(qtype=end_qtype, representation=self.representation) nv_q.t = qxq['xx+yy+zz'] return nv_q def abs_of_q(self, qtype="||"): """The absolute value, the square root of the norm_squared.""" end_qtype = "|{}|".format(self.qtype) a = self.norm_squared() sqrt_t = a.t ** (1/2) a.t = sqrt_t a.qtype = end_qtype a.representation = self.representation return a def normalize(self, n=1, qtype="U"): """Normalize a quaternion""" end_qtype = "{}{}".format(self.qtype, qtype) abs_q_inv = self.abs_of_q().inverse() n_q = self.product(abs_q_inv).product(QH([n, 0, 0, 0])) n_q.qtype = end_qtype n_q.representation = self.representation return n_q def abs_of_vector(self, qtype="|V( )|"): """The absolute value of the vector, the square root of the norm_squared of the vector.""" end_qtype = "|V({})|".format(self.qtype) av = self.norm_squared_of_vector(qtype=end_qtype) sqrt_t = av.t ** (1/2) av.t = sqrt_t av.representation = self.representation return av def add(self, qh_1, qtype=""): """Form a add given 2 quaternions.""" self.check_representations(qh_1) end_qtype = "{f}+{s}".format(f=self.qtype, s=qh_1.qtype) t_1, x_1, y_1, z_1 = self.t, self.x, self.y, self.z t_2, x_2, y_2, z_2 = qh_1.t, qh_1.x, qh_1.y, qh_1.z add_q = QH(qtype=end_qtype, representation=self.representation) add_q.t = t_1 + t_2 add_q.x = x_1 + x_2 add_q.y = y_1 + y_2 add_q.z = z_1 + z_2 return add_q def dif(self, qh_1, qtype=""): """Form a add given 2 quaternions.""" self.check_representations(qh_1) end_qtype = "{f}-{s}".format(f=self.qtype, s=qh_1.qtype) t_1, x_1, y_1, z_1 = self.t, self.x, self.y, self.z t_2, x_2, y_2, z_2 = qh_1.t, qh_1.x, qh_1.y, qh_1.z dif_q = QH(qtype=end_qtype, representation=self.representation) dif_q.t = t_1 - t_2 dif_q.x = x_1 - x_2 dif_q.y = y_1 - y_2 dif_q.z = z_1 - z_2 return dif_q def product(self, q1, kind="", reverse=False, qtype=""): """Form a product given 2 quaternions. Kind can be '' aka standard, even, odd, or even_minus_odd. Setting reverse=True is like changing the order.""" self.check_representations(q1) commuting = self._commuting_products(q1) q_even = QH() q_even.t = commuting['tt'] - commuting['xx+yy+zz'] q_even.x = commuting['tx+xt'] q_even.y = commuting['ty+yt'] q_even.z = commuting['tz+zt'] anti_commuting = self._anti_commuting_products(q1) q_odd = QH() if reverse: q_odd.x = anti_commuting['zy-yz'] q_odd.y = anti_commuting['xz-zx'] q_odd.z = anti_commuting['yx-xy'] else: q_odd.x = anti_commuting['yz-zy'] q_odd.y = anti_commuting['zx-xz'] q_odd.z = anti_commuting['xy-yx'] if kind == "": result = q_even.add(q_odd) times_symbol = "x" elif kind.lower() == "even": result = q_even times_symbol = "xE" elif kind.lower() == "odd": result = q_odd times_symbol = "xO" elif kind.lower() == "even_minus_odd": result = q_even.dif(q_odd) times_symbol = "xE-O" else: raise Exception("Four 'kind' values are known: '', 'even', 'odd', and 'even_minus_odd'.") if reverse: times_symbol = times_symbol.replace('x', 'xR') if qtype: result.qtype = qtype else: result.qtype = "{f}{ts}{s}".format(f=self.qtype, ts=times_symbol, s=q1.qtype) result.representation = self.representation return result def Euclidean_product(self, q1, kind="", reverse=False, qtype=""): """Form a product p* q given 2 quaternions, not associative.""" self.check_representations(q1) pq = QH(qtype, representation=self.representation) pq = self.conj().product(q1, kind, reverse) return pq def inverse(self, qtype="^-1", additive=False): """The additive or multiplicative inverse of a quaternion.""" if additive: end_qtype = "-{}".format(self.qtype, qtype) q_inv = self.flip_signs() q_inv.qtype = end_qtype else: end_qtype = "{}{}".format(self.qtype, qtype) q_conj = self.conj() q_norm_squared = self.norm_squared() if (not self.is_symbolic()) and (q_norm_squared.t == 0): return self.q_0() q_norm_squared_inv = QH([1.0 / q_norm_squared.t, 0, 0, 0]) q_inv = q_conj.product(q_norm_squared_inv) q_inv.qtype = end_qtype q_inv.representation = self.representation return q_inv def divide_by(self, q1, qtype=""): """Divide one quaternion by another. The order matters unless one is using a norm_squared (real number).""" self.check_representations(q1) end_qtype = "{f}/{s}".format(f=self.qtype, s=q1.qtype) q1_inv = q1.inverse() q_div = self.product(q1.inverse()) q_div.qtype = end_qtype q_div.representation = self.representation return q_div def triple_product(self, q1, q2): """Form a triple product given 3 quaternions.""" self.check_representations(q1) self.check_representations(q2) triple = self.product(q1).product(q2) triple.representation = self.representation return triple # Quaternion rotation involves a triple product: u R 1/u def rotate(self, u, qtype="rot"): """Do a rotation using a triple product: u R 1/u.""" end_qtype = "{}{}".format(self.qtype, qtype) u_abs = u.abs_of_q() u_norm_squaredalized = u.divide_by(u_abs) q_rot = u_norm_squaredalized.triple_product(self, u_norm_squaredalized.conj()) q_rot.qtype = end_qtype q_rot.representation = self.representation return q_rot # A boost also uses triple products like a rotation, but more of them. # This is not a well-known result, but does work. # b -> b' = h b h* + 1/2 ((hhb)* -(h*h*b)*) # where h is of the form (cosh(a), sinh(a)) OR (0, a, b, c) def boost(self, h, qtype="boost"): """A boost or rotation or both.""" end_qtype = "{}{}".format(self.qtype, qtype) boost = h b_conj = boost.conj() triple_1 = boost.triple_product(self, b_conj) triple_2 = boost.triple_product(boost, self).conj() triple_3 = b_conj.triple_product(b_conj, self).conj() triple_23 = triple_2.dif(triple_3) half_23 = triple_23.product(QH([0.5, 0, 0, 0])) triple_123 = triple_1.add(half_23, qtype=end_qtype) triple_123.qtype = end_qtype triple_123.representation = self.representation return triple_123 # g_shift is a function based on the space-times-time invariance proposal for gravity, # which proposes that if one changes the distance from a gravitational source, then # squares a measurement, the observers at two different hieghts agree to their # space-times-time values, but not the intervals. # g_form is the form of the function, either minimal or exponential # Minimal is what is needed to pass all weak field tests of gravity def g_shift(self, dimensionless_g, g_form="exp", qtype="g_shift"): """Shift an observation based on a dimensionless GM/c^2 dR.""" end_qtype = "{}{}".format(self.qtype, qtype) if g_form == "exp": g_factor = sp.exp(dimensionless_g) elif g_form == "minimal": g_factor = 1 + 2 * dimensionless_g + 2 * dimensionless_g ** 2 else: print("g_form not defined, should be 'exp' or 'minimal': {}".format(g_form)) return self g_q = QH(qtype=end_qtype) g_q.t = self.t / g_factor g_q.x = self.x * g_factor g_q.y = self.y * g_factor g_q.z = self.z * g_factor g_q.qtype = end_qtype g_q.representation = self.representation return g_q def sin(self, qtype="sin"): """Take the sine of a quaternion, (sin(t) cosh(|R|), cos(t) sinh(|R|) R/|R|)""" end_qtype = "sin({sq})".format(sq=self.qtype) abs_v = self.abs_of_vector() if abs_v.t == 0: return QH([math.sin(self.t), 0, 0, 0], qtype=end_qtype) sint = math.sin(self.t) cost = math.cos(self.t) sinhR = math.sinh(abs_v.t) coshR = math.cosh(abs_v.t) k = cost * sinhR / abs_v.t q_out = QH() q_out.t = sint * coshR q_out.x = k * self.x q_out.y = k * self.y q_out.z = k * self.z q_out.qtype = end_qtype q_out.representation = self.representation return q_out def cos(self, qtype="sin"): """Take the cosine of a quaternion, (cos(t) cosh(|R|), sin(t) sinh(|R|) R/|R|)""" end_qtype = "cos({sq})".format(sq=self.qtype) abs_v = self.abs_of_vector() if abs_v.t == 0: return QH([math.cos(self.t), 0, 0, 0], qtype=end_qtype) sint = math.sin(self.t) cost = math.cos(self.t) sinhR = math.sinh(abs_v.t) coshR = math.cosh(abs_v.t) k = -1 * sint * sinhR / abs_v.t q_out = QH() q_out.t = cost * coshR q_out.x = k * self.x q_out.y = k * self.y q_out.z = k * self.z q_out.qtype = end_qtype q_out.representation = self.representation return q_out def tan(self, qtype="sin"): """Take the tan of a quaternion, sin/cos""" end_qtype = "tan({sq})".format(sq=self.qtype) abs_v = self.abs_of_vector() if abs_v.t == 0: return QH([math.tan(self.t), 0, 0, 0], qtype=end_qtype) sinq = self.sin() cosq = self.cos() q_out = sinq.divide_by(cosq) q_out.qtype = end_qtype q_out.representation = self.representation return q_out def sinh(self, qtype="sinh"): """Take the sinh of a quaternion, (sinh(t) cos(|R|), cosh(t) sin(|R|) R/|R|)""" end_qtype = "sinh({sq})".format(sq=self.qtype) abs_v = self.abs_of_vector() if abs_v.t == 0: return QH([math.sinh(self.t), 0, 0, 0], qtype=end_qtype) sinht = math.sinh(self.t) cosht = math.cosh(self.t) sinR = math.sin(abs_v.t) cosR = math.cos(abs_v.t) k = cosht * sinR / abs_v.t q_out = QH(qtype=end_qtype, representation=self.representation) q_out.t = sinht * cosR q_out.x = k * self.x q_out.y = k * self.y q_out.z = k * self.z return q_out def cosh(self, qtype="sin"): """Take the cosh of a quaternion, (cosh(t) cos(|R|), sinh(t) sin(|R|) R/|R|)""" end_qtype = "cosh({sq})".format(sq=self.qtype) abs_v = self.abs_of_vector() if abs_v.t == 0: return QH([math.cosh(self.t), 0, 0, 0], qtype=end_qtype) sinht = math.sinh(self.t) cosht = math.cosh(self.t) sinR = math.sin(abs_v.t) cosR = math.cos(abs_v.t) k = sinht * sinR / abs_v.t q_out = QH(qtype=end_qtype, representation=self.representation) q_out.t = cosht * cosR q_out.x = k * self.x q_out.y = k * self.y q_out.z = k * self.z return q_out def tanh(self, qtype="tanh"): """Take the tanh of a quaternion, sin/cos""" end_qtype = "tanh({sq})".format(sq=self.qtype) abs_v = self.abs_of_vector() if abs_v.t == 0: return QH([math.tanh(self.t), 0, 0, 0], qtype=end_qtype) sinhq = self.sinh() coshq = self.cosh() q_out = sinhq.divide_by(coshq) q_out.qtype = end_qtype q_out.representation = self.representation return q_out def exp(self, qtype="exp"): """Take the exponential of a quaternion.""" # exp(q) = (exp(t) cos(|R|, exp(t) sin(|R|) R/|R|) end_qtype = "exp({st})".format(st=self.qtype) abs_v = self.abs_of_vector() et = math.exp(self.t) if (abs_v.t == 0): return QH([et, 0, 0, 0], qtype=end_qtype) cosR = math.cos(abs_v.t) sinR = math.sin(abs_v.t) k = et * sinR / abs_v.t expq = QH([et * cosR, k * self.x, k * self.y, k * self.z], qtype=end_qtype, representation=self.representation) return expq def ln(self, qtype="ln"): """Take the natural log of a quaternion.""" # ln(q) = (0.5 ln t^2 + R.R, atan2(|R|, t) R/|R|) end_qtype = "ln({st})".format(st=self.qtype) abs_v = self.abs_of_vector() if (abs_v.t == 0): if self.t > 0: return(QH([math.log(self.t), 0, 0, 0], qtype=end_qtype)) else: # I don't understant this, but mathematica does the same thing. return(QH([math.log(-self.t), math.pi, 0, 0], qtype=end_type)) return QH([lt, 0, 0, 0]) t_value = 0.5 * math.log(self.t * self.t + abs_v.t * abs_v.t) k = math.atan2(abs_v.t, self.t) / abs_v.t expq = QH([t_value, k * self.x, k * self.y, k * self.z], qtype=end_qtype, representation=self.representation) return expq def q_2_q(self, q1, qtype="P"): """Take the natural log of a quaternion.""" # q^p = exp(ln(q) * p) self.check_representations(q1) end_qtype = "{st}^P".format(st=self.qtype) q2q = self.ln().product(q1).exp() q2q.qtype = end_qtype q2q.representation = self.representation return q2q def trunc(self): """Truncates values.""" self.t = math.trunc(self.t) self.x = math.trunc(self.x) self.y = math.trunc(self.y) self.z = math.trunc(self.z) return self ``` Write tests the QH class. ```python if __name__ == "__main__": class TestQH(unittest.TestCase): """Class to make sure all the functions work as expected.""" Q = QH([1, -2, -3, -4], qtype="Q") P = QH([0, 4, -3, 0], qtype="P") R = QH([3, 0, 0, 0], qtype="R") C = QH([2, 4, 0, 0], qtype="C") t, x, y, z = sp.symbols("t x y z") q_sym = QH([t, x, y, x * y * z]) def test_qt(self): self.assertTrue(self.Q.t == 1) def test_subs(self): q_z = self.q_sym.subs({self.t:1, self.x:2, self.y:3, self.z:4}) print("t x y xyz sub 1 2 3 4: ", q_z) self.assertTrue(q_z.equals(QH([1, 2, 3, 24]))) def test_scalar(self): q_z = self.Q.scalar() print("scalar(q): ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_vector(self): q_z = self.Q.vector() print("vector(q): ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == -2) self.assertTrue(q_z.y == -3) self.assertTrue(q_z.z == -4) def test_xyz(self): q_z = self.Q.xyz() print("q.xyz()): ", q_z) self.assertTrue(q_z[0] == -2) self.assertTrue(q_z[1] == -3) self.assertTrue(q_z[2] == -4) def test_q_0(self): q_z = self.Q.q_0() print("q_0: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_q_1(self): q_z = self.Q.q_1() print("q_1: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_q_i(self): q_z = self.Q.q_i() print("q_i: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == 1) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_q_j(self): q_z = self.Q.q_j() print("q_j: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 1) self.assertTrue(q_z.z == 0) def test_q_k(self): q_z = self.Q.q_k() print("q_k: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 1) def test_q_random(self): q_z = QH().q_random() print("q_random():", q_z) self.assertTrue(q_z.t >= 0 and q_z.t <= 1) self.assertTrue(q_z.x >= 0 and q_z.x <= 1) self.assertTrue(q_z.y >= 0 and q_z.y <= 1) self.assertTrue(q_z.z >= 0 and q_z.z <= 1) def test_equals(self): self.assertTrue(self.Q.equals(self.Q)) self.assertFalse(self.Q.equals(self.P)) def test_conj_0(self): q_z = self.Q.conj() print("q_conj 0: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == 4) def test_conj_1(self): q_z = self.Q.conj(1) print("q_conj 1: ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == -2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == 4) def test_conj_2(self): q_z = self.Q.conj(2) print("q_conj 2: ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == -3) self.assertTrue(q_z.z == 4) def test_conj_q(self): q_z = self.Q.conj_q(self.Q) print("conj_q(conj_q): ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == -4) def sign_flips(self): q_z = self.Q.sign_flips() print("sign_flips: ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == 4) def test_vahlen_conj_minus(self): q_z = self.Q.vahlen_conj() print("q_vahlen_conj -: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == 4) def test_vahlen_conj_star(self): q_z = self.Q.vahlen_conj('*') print("q_vahlen_conj *: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == -2) self.assertTrue(q_z.y == -3) self.assertTrue(q_z.z == 4) def test_vahlen_conj_prime(self): q_z = self.Q.vahlen_conj("'") print("q_vahlen_conj ': ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == -4) def test_square(self): q_z = self.Q.square() print("square: ", q_z) self.assertTrue(q_z.t == -28) self.assertTrue(q_z.x == -4) self.assertTrue(q_z.y == -6) self.assertTrue(q_z.z == -8) def test_norm_squared(self): q_z = self.Q.norm_squared() print("norm_squared: ", q_z) self.assertTrue(q_z.t == 30) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_norm_squared_of_vector(self): q_z = self.Q.norm_squared_of_vector() print("norm_squared_of_vector: ", q_z) self.assertTrue(q_z.t == 29) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_abs_of_q(self): q_z = self.P.abs_of_q() print("abs_of_q: ", q_z) self.assertTrue(q_z.t == 5) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_normalize(self): q_z = self.P.normalize() print("q_normalized: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == 0.8) self.assertAlmostEqual(q_z.y, -0.6) self.assertTrue(q_z.z == 0) def test_abs_of_vector(self): q_z = self.P.abs_of_vector() print("abs_of_vector: ", q_z) self.assertTrue(q_z.t == 5) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_add(self): q_z = self.Q.add(self.P) print("add: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 2) self.assertTrue(q_z.y == -6) self.assertTrue(q_z.z == -4) def test_dif(self): q_z = self.Q.dif(self.P) print("dif: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == -6) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == -4) def test_product(self): q_z = self.Q.product(self.P) print("product: ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == -8) self.assertTrue(q_z.y == -19) self.assertTrue(q_z.z == 18) def test_product_even(self): q_z = self.Q.product(self.P, kind="even") print("product, kind even: ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == 4) self.assertTrue(q_z.y == -3) self.assertTrue(q_z.z == 0) def test_product_odd(self): q_z = self.Q.product(self.P, kind="odd") print("product, kind odd: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == -12) self.assertTrue(q_z.y == -16) self.assertTrue(q_z.z == 18) def test_product_even_minus_odd(self): q_z = self.Q.product(self.P, kind="even_minus_odd") print("product, kind even_minus_odd: ", q_z) self.assertTrue(q_z.t == -1) self.assertTrue(q_z.x == 16) self.assertTrue(q_z.y == 13) self.assertTrue(q_z.z == -18) def test_product_reverse(self): q1q2_rev = self.Q.product(self.P, reverse=True) q2q1 = self.P.product(self.Q) self.assertTrue(q1q2_rev.equals(q2q1)) def test_Euclidean_product(self): q_z = self.Q.Euclidean_product(self.P) print("Euclidean product: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 16) self.assertTrue(q_z.y == 13) self.assertTrue(q_z.z == -18) def test_inverse(self): q_z = self.P.inverse() print("inverse: ", q_z) self.assertTrue(q_z.t == 0) self.assertTrue(q_z.x == -0.16) self.assertTrue(q_z.y == 0.12) self.assertTrue(q_z.z == 0) def test_divide_by(self): q_z = self.Q.divide_by(self.Q) print("divide_by: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == 0) self.assertTrue(q_z.y == 0) self.assertTrue(q_z.z == 0) def test_triple_product(self): q_z = self.Q.triple_product(self.P, self.Q) print("triple product: ", q_z) self.assertTrue(q_z.t == -2) self.assertTrue(q_z.x == 124) self.assertTrue(q_z.y == -84) self.assertTrue(q_z.z == 8) def test_rotate(self): q_z = self.Q.rotate(QH([0, 1, 0, 0])) print("rotate: ", q_z) self.assertTrue(q_z.t == 1) self.assertTrue(q_z.x == -2) self.assertTrue(q_z.y == 3) self.assertTrue(q_z.z == 4) def test_boost(self): q1_sq = self.Q.square() h = QH(sr_gamma_betas(0.003)) q_z = self.Q.boost(h) q_z2 = q_z.square() print("q1_sq: ", q1_sq) print("boosted: ", q_z) print("boosted squared: ", q_z2) self.assertTrue(round(q_z2.t, 5) == round(q1_sq.t, 5)) def test_g_shift(self): q1_sq = self.Q.square() q_z = self.Q.g_shift(0.003) q_z2 = q_z.square() q_z_minimal = self.Q.g_shift(0.003, g_form="minimal") q_z2_minimal = q_z_minimal.square() print("q1_sq: ", q1_sq) print("g_shift: ", q_z) print("g squared: ", q_z2) self.assertTrue(q_z2.t != q1_sq.t) self.assertTrue(q_z2.x == q1_sq.x) self.assertTrue(q_z2.y == q1_sq.y) self.assertTrue(q_z2.z == q1_sq.z) self.assertTrue(q_z2_minimal.t != q1_sq.t) self.assertTrue(q_z2_minimal.x == q1_sq.x) self.assertTrue(q_z2_minimal.y == q1_sq.y) self.assertTrue(q_z2_minimal.z == q1_sq.z) def test_sin(self): self.assertTrue(QH([0, 0, 0, 0]).sin().equals(QH().q_0())) self.assertTrue(self.Q.sin().equals(QH([91.7837157840346691, -21.8864868530291758, -32.8297302795437673, -43.7729737060583517]))) self.assertTrue(self.P.sin().equals(QH([0, 59.3625684622310033, -44.5219263466732542, 0]))) self.assertTrue(self.R.sin().equals(QH([0.1411200080598672, 0, 0, 0]))) self.assertTrue(self.C.sin().equals(QH([24.8313058489463785, -11.3566127112181743, 0, 0]))) def test_cos(self): self.assertTrue(QH([0, 0, 0, 0]).cos().equals(QH().q_1())) self.assertTrue(self.Q.cos().equals(QH([58.9336461679439481, 34.0861836904655959, 51.1292755356983974, 68.1723673809311919]))) self.assertTrue(self.P.cos().equals(QH([74.2099485247878476, 0, 0, 0]))) self.assertTrue(self.R.cos().equals(QH([-0.9899924966004454, 0, 0, 0]))) self.assertTrue(self.C.cos().equals(QH([-11.3642347064010600, -24.8146514856341867, 0, 0]))) def test_tan(self): self.assertTrue(QH([0, 0, 0, 0]).tan().equals(QH().q_0())) self.assertTrue(self.Q.tan().equals(QH([0.0000382163172501, -0.3713971716439372, -0.5570957574659058, -0.7427943432878743]))) self.assertTrue(self.P.tan().equals(QH([0, 0.7999273634100760, -0.5999455225575570, 0]))) self.assertTrue(self.R.tan().equals(QH([-0.1425465430742778, 0, 0, 0]))) self.assertTrue(self.C.tan().equals(QH([-0.0005079806234700, 1.0004385132020521, 0, 0]))) def test_sinh(self): self.assertTrue(QH([0, 0, 0, 0]).sinh().equals(QH().q_0())) self.assertTrue(self.Q.sinh().equals(QH([0.7323376060463428, 0.4482074499805421, 0.6723111749708131, 0.8964148999610841]))) self.assertTrue(self.P.sinh().equals(QH([0, -0.7671394197305108, 0.5753545647978831, 0]))) self.assertTrue(self.R.sinh().equals(QH([10.0178749274099026, 0, 0, 0]))) self.assertTrue(self.C.sinh().equals(QH([-2.3706741693520015, -2.8472390868488278, 0, 0]))) def test_cosh(self): self.assertTrue(QH([0, 0, 0, 0]).cosh().equals(QH().q_1())) self.assertTrue(self.Q.cosh().equals(QH([0.9615851176369565, 0.3413521745610167, 0.5120282618415251, 0.6827043491220334]))) self.assertTrue(self.P.cosh().equals(QH([0.2836621854632263, 0, 0, 0]))) self.assertTrue(self.R.cosh().equals(QH([10.0676619957777653, 0, 0, 0]))) self.assertTrue(self.C.cosh().equals(QH([-2.4591352139173837, -2.7448170067921538, 0, 0]))) def test_tanh(self): self.assertTrue(QH([0, 0, 0, 0]).tanh().equals(QH().q_0())) self.assertTrue(self.Q.tanh().equals(QH([1.0248695360556623, 0.1022956817887642, 0.1534435226831462, 0.2045913635775283]))) self.assertTrue(self.P.tanh().equals(QH([0, -2.7044120049972684, 2.0283090037479505, 0]))) self.assertTrue(self.R.tanh().equals(QH([0.9950547536867305, 0, 0, 0]))) self.assertTrue(self.C.tanh().equals(QH([1.0046823121902353, 0.0364233692474038, 0, 0]))) def test_exp(self): self.assertTrue(QH([0, 0, 0, 0]).exp().equals(QH().q_1())) self.assertTrue(self.Q.exp().equals(QH([1.6939227236832994, 0.7895596245415588, 1.1843394368123383, 1.5791192490831176]))) self.assertTrue(self.P.exp().equals(QH([0.2836621854632263, -0.7671394197305108, 0.5753545647978831, 0]))) self.assertTrue(self.R.exp().equals(QH([20.0855369231876679, 0, 0, 0]))) self.assertTrue(self.C.exp().equals(QH([-4.8298093832693851, -5.5920560936409816, 0, 0]))) def test_ln(self): self.assertTrue(self.Q.ln().exp().equals(self.Q)) self.assertTrue(self.Q.ln().equals(QH([1.7005986908310777, -0.5151902926640850, -0.7727854389961275, -1.0303805853281700]))) self.assertTrue(self.P.ln().equals(QH([1.6094379124341003, 1.2566370614359172, -0.9424777960769379, 0]))) self.assertTrue(self.R.ln().equals(QH([1.0986122886681098, 0, 0, 0]))) self.assertTrue(self.C.ln().equals(QH([1.4978661367769954, 1.1071487177940904, 0, 0]))) def test_q_2_q(self): self.assertTrue(self.Q.q_2_q(self.P).equals(QH([-0.0197219653530713, -0.2613955437374326, 0.6496281248064009, -0.3265786562423951]))) suite = unittest.TestLoader().loadTestsFromModule(TestQH()) _results = unittest.TextTestRunner().run(suite); ``` ```python if __name__ == "__main__": class TestQHRep(unittest.TestCase): Q12 = QH([1, 2, 0, 0]) Q1123 = QH([1, 1, 2, 3]) Q11p = QH([1, 1, 0, 0], representation="polar") Q12p = QH([1, 2, 0, 0], representation="polar") Q12np = QH([1, -2, 0, 0], representation="polar") Q21p = QH([2, 1, 0, 0], representation="polar") Q23p = QH([2, 3, 0, 0], representation="polar") Q13p = QH([1, 3, 0, 0], representation="polar") Q5p = QH([5, 0, 0, 0], representation="polar") def test_txyz_2_representation(self): qr = QH(self.Q12.txyz_2_representation("")) self.assertTrue(qr.equals(self.Q12)) qr = QH(self.Q12.txyz_2_representation("polar")) self.assertTrue(qr.equals(QH([2.23606797749979, 1.10714871779409, 0, 0]))) qr = QH(self.Q1123.txyz_2_representation("spherical")) self.assertTrue(qr.equals(QH([1.0, 3.7416573867739413, 0.640522312679424, 1.10714871779409]))) def test_representation_2_txyz(self): qr = QH(self.Q12.representation_2_txyz("")) self.assertTrue(qr.equals(self.Q12)) qr = QH(self.Q12.representation_2_txyz("polar")) self.assertTrue(qr.equals(QH([-0.4161468365471424, 0.9092974268256817, 0, 0]))) qr = QH(self.Q1123.representation_2_txyz("spherical")) self.assertTrue(qr.equals(QH([1.0, -0.9001976297355174, 0.12832006020245673, -0.4161468365471424]))) def test_polar_products(self): qr = self.Q11p.product(self.Q12p) print("polar 1 1 0 0 * 1 2 0 0: ", qr) self.assertTrue(qr.equals(self.Q13p)) qr = self.Q12p.product(self.Q21p) print("polar 1 2 0 0 * 2 1 0 0: ", qr) self.assertTrue(qr.equals(self.Q23p)) def test_polar_conj(self): qr = self.Q12p.conj() print("polar conj of 1 2 0 0: ", qr) self.assertTrue(qr.equals(self.Q12np)) suite = unittest.TestLoader().loadTestsFromModule(TestQHRep()) _results = unittest.TextTestRunner().run(suite); ``` ## QHStates - n quaternions that are a semi-group with inverses Any quaternion can be viewed as the sum of n other quaternions. This is common to see in quantum mechanics, whose needs are driving the development of this class and its methods. ```python class QHStates(QH): """A class made up of many quaternions.""" QS_TYPES = ["scalar", "bra", "ket", "op", "operator"] def __init__(self, qs=None, qs_type="ket", rows=0, columns=0): self.qs = qs self.qs_type = qs_type self.rows = rows self.columns = columns self.qtype = "" if qs_type not in self.QS_TYPES: print("Oops, only know of these quaternion series types: {}".format(self.QS_TYPES)) return None if qs is None: self.d, self.dim, self.dimensions = 0, 0, 0 else: self.d, self.dim, self.dimensions = int(len(qs)), int(len(qs)), int(len(qs)) self.set_qs_type(qs_type, rows, columns, copy=False) def set_qs_type(self, qs_type="", rows=0, columns=0, copy=True): """Set the qs_type to something sensible.""" # Checks. if (rows) and (columns) and rows * columns != self.dim: print("Oops, check those values again for rows:{} columns:{} dim:{}".format( rows, columns, self.dim)) self.qs, self.rows, self.columns = None, 0, 0 return None new_q = self if copy: new_q = deepcopy(self) # Assign values if need be. if new_q.qs_type != qs_type: new_q.rows = 0 if qs_type == "ket" and not new_q.rows: new_q.rows = new_q.dim new_q.columns = 1 elif qs_type == "bra" and not new_q.rows: new_q.rows = 1 new_q.columns = new_q.dim elif qs_type in ["op", "operator"] and not new_q.rows: # Square series root_dim = math.sqrt(new_q.dim) if root_dim.is_integer(): new_q.rows = int(root_dim) new_q.columns = int(root_dim) qs_type = "op" elif rows * columns == new_q.dim and not new_q.qs_type: if new_q.dim == 1: qs_type = "scalar" elif new_q.rows == 1: qs_type = "bra" elif new_q.columns == 1: qs_type = "ket" else: qs_type = "op" if not qs_type: print("Oops, please set rows and columns for this quaternion series operator. Thanks.") return None if new_q.dim == 1: qs_type = "scalar" new_q.qs_type = qs_type return new_q def bra(self): """Quickly set the qs_type to bra by calling set_qs_type().""" if self.qs_type == "bra": return self bra = deepcopy(self).conj() bra.rows = 1 bra.columns = self.dim if self.dim > 1: bra.qs_type = "bra" return bra def ket(self): """Quickly set the qs_type to ket by calling set_qs_type().""" if self.qs_type == "ket": return self ket = deepcopy(self).conj() ket.rows = self.dim ket.columns = 1 if self.dim > 1: ket.qs_type = "ket" return ket def op(self, rows, columns): """Quickly set the qs_type to op by calling set_qs_type().""" if rows * columns != self.dim: print("Oops, rows * columns != dim: {} * {}, {}".formaat(rows, columns, self.dim)) return None op_q = deepcopy(self) op_q.rows = rows op_q.columns = columns if self.dim > 1: op_q.qs_type = "op" return op_q def __str__(self, quiet=False): """Print out all the states.""" states = '' for n, q in enumerate(self.qs, start=1): states = states + "n={}: {}\n".format(n, q.__str__(quiet)) return states.rstrip() def print_state(self, label, spacer=True, quiet=True, sum=False): """Utility for printing states as a quaternion series.""" print(label) # Warn if empty. if self.qs is None or len(self.qs) == 0: print("Oops, no quaternions in the series.") return for n, q in enumerate(self.qs): print("n={}: {}".format(n + 1, q.__str__(quiet))) if sum: print("sum= {ss}".format(ss=self.summation())) print("{t}: {r}/{c}".format(t=self.qs_type, r=self.rows, c=self.columns)) if spacer: print("") def equals(self, q1): """Test if two states are equal.""" if self.dim != q1.dim: return False result = True for selfq, q1q in zip(self.qs, q1.qs): if not selfq.equals(q1q): result = False return result def conj(self, conj_type=0): """Take the conjgates of states, default is zero, but also can do 1 or 2.""" new_states = [] for ket in self.qs: new_states.append(ket.conj(conj_type)) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def conj_q(self, q1): """Does multicate conjugate operators.""" new_states = [] for ket in self.qs: new_states.append(ket.conj_q(q1)) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def display_q(self, label): """Try to display algebra in a pretty way.""" if label: print(label) for i, ket in enumerate(self.qs, start=1): print(f"n={i}") ket.display_q() print("") def simple_q(self): """Simplify the states.""" new_states = [] for ket in self.qs: new_states.append(ket.simple_q()) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def subs(self, symbol_value_dict, qtype="scalar"): """Substitutes values into .""" new_states = [] for ket in self.qs: new_states.append(ket.subs(symbol_value_dict)) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def scalar(self, qtype="scalar"): """Returns the scalar part of a quaternion.""" new_states = [] for ket in self.qs: new_states.append(ket.scalar()) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def vector(self, qtype="v"): """Returns the vector part of a quaternion.""" new_states = [] for ket in self.qs: new_states.append(ket.vector()) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def xyz(self): """Returns the vector as an np.array.""" new_states = [] for ket in self.qs: new_states.append(ket.xyz()) return new_states def flip_signs(self): """Flip signs of all states.""" new_states = [] for ket in self.qs: new_states.append(ket.flip_signs()) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def inverse(self, additive=False): """Inverseing bras and kets calls inverse() once for each. Inverseing operators is more tricky as one needs a diagonal identity matrix.""" if self.qs_type in ["op", "operator"]: if additive: q_flip = self.inverse(additive=True) q_inv = q_flip.diagonal(self.dim) else: if self.dim == 1: q_inv =QHStates(self.qs[0].inverse()) elif self.qs_type in ["bra", "ket"]: new_qs = [] for q in self.qs: new_qs.append(q.inverse()) q_inv = QHStates(new_qs, qs_type=self.qs_type, rows=self.rows, columns=self.columns) elif self.dim == 4: det = self.determinant() detinv = det.inverse() q0 = self.qs[3].product(detinv) q1 = self.qs[1].flip_signs().product(detinv) q2 = self.qs[2].flip_signs().product(detinv) q3 = self.qs[0].product(detinv) q_inv =QHStates([q0, q1, q2, q3], qs_type=self.qs_type, rows=self.rows, columns=self.columns) elif self.dim == 9: det = self.determinant() detinv = det.inverse() q0 = self.qs[4].product(self.qs[8]).dif(self.qs[5].product(self.qs[7])).product(detinv) q1 = self.qs[7].product(self.qs[2]).dif(self.qs[8].product(self.qs[1])).product(detinv) q2 = self.qs[1].product(self.qs[5]).dif(self.qs[2].product(self.qs[4])).product(detinv) q3 = self.qs[6].product(self.qs[5]).dif(self.qs[8].product(self.qs[3])).product(detinv) q4 = self.qs[0].product(self.qs[8]).dif(self.qs[2].product(self.qs[6])).product(detinv) q5 = self.qs[3].product(self.qs[2]).dif(self.qs[5].product(self.qs[0])).product(detinv) q6 = self.qs[3].product(self.qs[7]).dif(self.qs[4].product(self.qs[6])).product(detinv) q7 = self.qs[6].product(self.qs[1]).dif(self.qs[7].product(self.qs[0])).product(detinv) q8 = self.qs[0].product(self.qs[4]).dif(self.qs[1].product(self.qs[3])).product(detinv) q_inv =QHStates([q0, q1, q2, q3, q4, q5, q6, q7, q8], qs_type=self.qs_type, rows=self.rows, columns=self.columns) else: print("Oops, don't know how to inverse.") q_inv =QHStates([QH().q_0()]) else: new_states = [] for bra in self.qs: new_states.append(bra.inverse(additive=additive)) q_inv =QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) return q_inv def norm(self): """Norm of states.""" new_states = [] for bra in self.qs: new_states.append(bra.norm()) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def normalize(self, n=1, states=None): """Normalize all states.""" new_states = [] zero_norm_count = 0 for bra in self.qs: if bra.norm_squared().t == 0: zero_norm_count += 1 new_states.append(QH().q_0()) else: new_states.append(bra.normalize(n)) new_states_normalized = [] non_zero_states = self.dim - zero_norm_count for new_state in new_states: new_states_normalized.append(new_state.product(QH([math.sqrt(1/non_zero_states), 0, 0, 0]))) return QHStates(new_states_normalized, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def orthonormalize(self): """Given a quaternion series, resturn a normalized orthoganl basis.""" last_q = self.qs.pop(0).normalize(math.sqrt(1/self.dim)) orthonormal_qs = [last_q] for q in self.qs: qp = q.Euclidean_product(last_q) orthonormal_q = q.dif(qp).normalize(math.sqrt(1/self.dim)) orthonormal_qs.append(orthonormal_q) last_q = orthonormal_q return QHStates(orthonormal_qs, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def determinant(self): """Calculate the determinant of a 'square' quaternion series.""" if self.dim == 1: q_det = self.qs[0] elif self.dim == 4: ad =self.qs[0].product(self.qs[3]) bc = self.qs[1].product(self.qs[2]) q_det = ad.dif(bc) elif self.dim == 9: aei = self.qs[0].product(self.qs[4].product(self.qs[8])) bfg = self.qs[3].product(self.qs[7].product(self.qs[2])) cdh = self.qs[6].product(self.qs[1].product(self.qs[5])) ceg = self.qs[6].product(self.qs[4].product(self.qs[2])) bdi = self.qs[3].product(self.qs[1].product(self.qs[8])) afh = self.qs[0].product(self.qs[7].product(self.qs[5])) sum_pos = aei.add(bfg.add(cdh)) sum_neg = ceg.add(bdi.add(afh)) q_det = sum_pos.dif(sum_neg) else: print("Oops, don't know how to calculate the determinant of this one.") return None return q_det def add(self, ket): """Add two states.""" if ((self.rows != ket.rows) or (self.columns != ket.columns)): print("Oops, can only add if rows and columns are the same.") print("rows are: {}/{}, columns are: {}/{}".format(self.rows, ket.rows, self.columns, ket.columns)) return None new_states = [] for bra, ket in zip(self.qs, ket.qs): new_states.append(bra.add(ket)) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def summation(self): """Add them all up, return one quaternion.""" result = None for q in self.qs: if result == None: result = q else: result = result.add(q) return result def dif(self, ket): """Take the difference of two states.""" new_states = [] for bra, ket in zip(self.qs, ket.qs): new_states.append(bra.dif(ket)) return(QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns)) def diagonal(self, dim): """Make a state dim*dim with q or qs along the 'diagonal'. Always returns an operator.""" diagonal = [] if len(self.qs) == 1: q_values = [self.qs[0]] * dim elif len(self.qs) == dim: q_values = self.qs elif self.qs is None: print("Oops, the qs here is None.") return None else: print("Oops, need the length to be equal to the dimensions.") return None for i in range(dim): for j in range(dim): if i == j: diagonal.append(q_values.pop(0)) else: diagonal.append(QH().q_0()) return QHStates(diagonal, qs_type="op", rows=dim, columns=dim) def trace(self): """Return the trace as a scalar quaternion series.""" if self.rows != self.columns: print("Oops, not a square quaternion series.") return None else: trace = self.qs[0] for i in range(1, self.rows): trace = trace.add(self.qs[i * (self.rows + 1)]) return QHStates([trace]) @staticmethod def identity(dim, operator=False, additive=False, non_zeroes=None, qs_type="ket"): """Identity operator for states or operators which are diagonal.""" if additive: id_q = [QH().q_0() for i in range(dim)] elif non_zeroes is not None: id_q = [] if len(non_zeroes) != dim: print("Oops, len(non_zeroes)={nz}, should be: {d}".format(nz=len(non_zeroes), d=dim)) return QHStates([QH().q_0()]) else: for non_zero in non_zeroes: if non_zero: id_q.append(QH().q_1()) else: id_q.append(QH().q_0()) else: id_q = [QH().q_1() for i in range(dim)] if operator: q_1 = QHStates(id_q) ident = QHStates.diagonal(q_1, dim) else: ident = QHStates(id_q, qs_type=qs_type) return ident def product(self, q1, kind="", reverse=False): """Forms the quaternion product for each state.""" self_copy = deepcopy(self) q1_copy = deepcopy(q1) # Diagonalize if need be. if ((self.rows == q1.rows) and (self.columns == q1.columns)) or \ ("scalar" in [self.qs_type, q1.qs_type]): if self.columns == 1: qs_right = q1_copy qs_left = self_copy.diagonal(qs_right.rows) elif q1.rows == 1: qs_left = self_copy qs_right = q1_copy.diagonal(qs_left.columns) else: qs_left = self_copy qs_right = q1_copy # Typical matrix multiplication criteria. elif self.columns == q1.rows: qs_left = self_copy qs_right = q1_copy else: print("Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}".format( self.rows, self.columns, q1.rows, q1.columns)) return None # Operator products need to be transposed. operator_flag = False if qs_left in ['op', 'operator'] and qs_right in ['op', 'operator']: operator_flag = True outer_row_max = qs_left.rows outer_column_max = qs_right.columns shared_inner_max = qs_left.columns projector_flag = (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1) result = [[QH().q_0(qtype='') for i in range(outer_column_max)] for j in range(outer_row_max)] for outer_row in range(outer_row_max): for outer_column in range(outer_column_max): for shared_inner in range(shared_inner_max): # For projection operators. left_index = outer_row right_index = outer_column if outer_row_max >= 1 and shared_inner_max > 1: left_index = outer_row + shared_inner * outer_row_max if outer_column_max >= 1 and shared_inner_max > 1: right_index = shared_inner + outer_column * shared_inner_max result[outer_row][outer_column] = result[outer_row][outer_column].add( qs_left.qs[left_index].product( qs_right.qs[right_index], kind=kind, reverse=reverse)) # Flatten the list. new_qs = [item for sublist in result for item in sublist] new_states = QHStates(new_qs, rows=outer_row_max, columns=outer_column_max) if projector_flag or operator_flag: return new_states.transpose() else: return new_states def Euclidean_product(self, q1, kind="", reverse=False): """Forms the Euclidean product, what is used in QM all the time.""" return self.conj().product(q1, kind, reverse) @staticmethod def bracket(bra, op, ket): """Forms <bra|op|ket>. Note: if fed 2 kets, will take a conjugate.""" flip = 0 if bra.qs_type == 'ket': bra = bra.bra() flip += 1 if ket.qs_type == 'bra': ket = ket.ket() flip += 1 if flip == 1: print("fed 2 bras or kets, took a conjugate. Double check.") b = bra.product(op).product(ket) return b @staticmethod def braket(bra, ket): """Forms <bra|ket>, no operator. Note: if fed 2 kets, will take a conjugate.""" flip = 0 if bra.qs_type == 'ket': bra = bra.bra() flip += 1 if ket.qs_type == 'bra': ket = ket.ket() flip += 1 if flip == 1: print("fed 2 bras or kets, took a conjugate. Double check.") else: print("Assumes your <bra| already has been conjugated. Double check.") b = bra.product(ket) return b def op_n(self, n, first=True, kind="", reverse=False): """Mulitply an operator times a number, in that order. Set first=false for n * Op""" new_states = [] for op in self.qs: if first: new_states.append(op.product(n, kind, reverse)) else: new_states.append(n.product(op, kind, reverse)) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def square(self): """The square of each state.""" new_states = [] for bra in self.qs: new_states.append(bra.square()) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def norm_squared(self): """Take the inner product, returning a scalar series.""" return self.set_qs_type("bra").conj().product(self.set_qs_type("ket")) def norm_squared_of_vector(self): """Take the inner product of the vector, returning a scalar series.""" return self.set_qs_type("bra").vector().conj().product(self.set_qs_type("ket").vector()) def transpose(self, m=None, n=None): """Transposes a series.""" if m is None: # test if it is square. if math.sqrt(self.dim).is_integer(): m = int(sp.sqrt(self.dim)) n = m if n is None: n = int(self.dim / m) if m * n != self.dim: return None matrix = [[0 for x in range(m)] for y in range(n)] qs_t = [] for mi in range(m): for ni in range(n): matrix[ni][mi] = self.qs[mi * n + ni] qs_t = [] for t in matrix: for q in t: qs_t.append(q) # Switch rows and columns. return QHStates(qs_t, rows=self.columns, columns=self.rows) def Hermitian_conj(self, m=None, n=None, conj_type=0): """Returns the Hermitian conjugate.""" return self.transpose(m, n).conj(conj_type) def dagger(self, m=None, n=None, conj_type=0): """Just calls Hermitian_conj()""" return self.Hermitian_conj(m, n, conj_type) def is_square(self): """Tests if a quaternion series is square, meaning the dimenion is n^2.""" return math.sqrt(self.dim).is_integer() def is_Hermitian(self): """Tests if a series is Hermitian.""" hc = self.Hermitian_conj() return self.equals(hc) @staticmethod def sigma(kind, theta=None, phi=None): """Returns a sigma when given a type like, x, y, z, xy, xz, yz, xyz, with optional angles theta and phi.""" q0, q1, qi =QH().q_0(),QH().q_1(),QH().q_i() # Should work if given angles or not. if theta is None: sin_theta = 1 cos_theta = 1 else: sin_theta = math.sin(theta) cos_theta = math.cos(theta) if phi is None: sin_phi = 1 cos_phi = 1 else: sin_phi = math.sin(phi) cos_phi = math.cos(phi) x_factor = q1.product(QH([sin_theta * cos_phi, 0, 0, 0])) y_factor = qi.product(QH([sin_theta * sin_phi, 0, 0, 0])) z_factor = q1.product(QH([cos_theta, 0, 0, 0])) sigma = {} sigma['x'] =QHStates([q0, x_factor, x_factor, q0], "op") sigma['y'] =QHStates([q0, y_factor, y_factor.flip_signs(), q0], "op") sigma['z'] =QHStates([z_factor, q0, q0, z_factor.flip_signs()], "op") sigma['xy'] = sigma['x'].add(sigma['y']) sigma['xz'] = sigma['x'].add(sigma['z']) sigma['yz'] = sigma['y'].add(sigma['z']) sigma['xyz'] = sigma['x'].add(sigma['y']).add(sigma['z']) if kind not in sigma: print("Oops, I only know about x, y, z, and their combinations.") return None return sigma[kind].normalize() def sin(self): """sine of states.""" new_states = [] for ket in self.qs: new_states.append(ket.sin(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def cos(self): """cosine of states.""" new_states = [] for ket in self.qs: new_states.append(ket.cos(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def tan(self): """tan of states.""" new_states = [] for ket in self.qs: new_states.append(ket.tan(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def sinh(self): """sinh of states.""" new_states = [] for ket in self.qs: new_states.append(ket.sinh(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def cosh(self): """cosh of states.""" new_states = [] for ket in self.qs: new_states.append(ket.cosh(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def tanh(self): """tanh of states.""" new_states = [] for ket in self.qs: new_states.append(ket.tanh(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) def exp(self): """exponential of states.""" new_states = [] for ket in self.qs: new_states.append(ket.exp(qtype="")) return QHStates(new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns) ``` ```python if __name__ == "__main__": class TestQHStates(unittest.TestCase): """Test states.""" q_0 = QH().q_0() q_1 = QH().q_1() q_i = QH().q_i() q_n1 = QH([-1,0,0,0]) q_2 = QH([2,0,0,0]) q_n2 = QH([-2,0,0,0]) q_3 = QH([3,0,0,0]) q_n3 = QH([-3,0,0,0]) q_4 = QH([4,0,0,0]) q_5 = QH([5,0,0,0]) q_6 = QH([6,0,0,0]) q_10 = QH([10,0,0,0]) q_n5 = QH([-5,0,0,0]) q_7 = QH([7,0,0,0]) q_8 = QH([8,0,0,0]) q_9 = QH([9,0,0,0]) q_n11 = QH([-11,0,0,0]) q_21 = QH([21,0,0,0]) q_n34 = QH([-34,0,0,0]) v3 = QHStates([q_3]) v1123 = QHStates([q_1, q_1, q_2, q_3]) v3n1n21 = QHStates([q_3,q_n1,q_n2,q_1]) v9 = QHStates([q_1, q_1, q_2, q_3, q_1, q_1, q_2, q_3, q_2]) v9i = QHStates([QH([0,1,0,0]), QH([0,2,0,0]), QH([0,3,0,0]), QH([0,4,0,0]), QH([0,5,0,0]), QH([0,6,0,0]), QH([0,7,0,0]), QH([0,8,0,0]), QH([0,9,0,0])]) vv9 = v9.add(v9i) q_1d0 = QH([1.0, 0, 0, 0]) q12 = QHStates([q_1d0, q_1d0]) q14 = QHStates([q_1d0, q_1d0, q_1d0, q_1d0]) q19 = QHStates([q_1d0, q_0, q_1d0, q_1d0, q_1d0, q_1d0, q_1d0, q_1d0, q_1d0]) qn627 = QH([-6,27,0,0]) v33 = QHStates([q_7, q_0, q_n3, q_2, q_3, q_4, q_1, q_n1, q_n2]) v33inv = QHStates([q_n2, q_3, q_9, q_8, q_n11, q_n34, q_n5, q_7, q_21]) q_i3 = QHStates([q_1, q_1, q_1]) q_i2d = QHStates([q_1, q_0, q_0, q_1]) q_i3_bra = QHStates([q_1, q_1, q_1], "bra") q_6_op = QHStates([q_1, q_0, q_0, q_1, q_i, q_i], "op") q_6_op_32 = QHStates([q_1, q_0, q_0, q_1, q_i, q_i], "op", rows=3, columns=2) q_i2d_op = QHStates([q_1, q_0, q_0, q_1], "op") q_i4 = QH([0,4,0,0]) q_0_q_1 = QHStates([q_0, q_1]) q_1_q_0 = QHStates([q_1, q_0]) q_1_q_i = QHStates([q_1, q_i]) q_1_q_0 = QHStates([q_1, q_0]) q_0_q_i = QHStates([q_0, q_i]) A = QHStates([QH([4,0,0,0]), QH([0,1,0,0])], "bra") B = QHStates([QH([0,0,1,0]), QH([0,0,0,2]), QH([0,3,0,0])]) Op = QHStates([QH([3,0,0,0]), QH([0,1,0,0]), QH([0,0,2,0]), QH([0,0,0,3]), QH([2,0,0,0]), QH([0,4,0,0])], "op", rows=2, columns=3) Op4i = QHStates([q_i4, q_0, q_0, q_i4, q_2, q_3], "op", rows=2, columns=3) Op_scalar = QHStates([q_i4], "scalar") q_1234 = QHStates([QH([1, 1, 0, 0]), QH([2, 1, 0, 0]), QH([3, 1, 0, 0]), QH([4, 1, 0, 0])]) sigma_y = QHStates([QH([1, 0, 0, 0]), QH([0, -1, 0, 0]), QH([0, 1, 0, 0]), QH([-1, 0, 0, 0])]) qn = QHStates([QH([3,0,0,4])]) q_bad = QHStates([q_1], rows=2, columns=3) b = QHStates([q_1, q_2, q_3], qs_type="bra") k = QHStates([q_4, q_5, q_6], qs_type="ket") o = QHStates([q_10], qs_type="op") def test_1000_init(self): self.assertTrue(self.q_0_q_1.dim == 2) def test_1010_set_qs_type(self): bk = self.b.set_qs_type("ket") self.assertTrue(bk.rows == 3) self.assertTrue(bk.columns == 1) self.assertTrue(bk.qs_type == "ket") self.assertTrue(self.q_bad.qs is None) def test_1020_set_rows_and_columns(self): self.assertTrue(self.q_i3.rows == 3) self.assertTrue(self.q_i3.columns == 1) self.assertTrue(self.q_i3_bra.rows == 1) self.assertTrue(self.q_i3_bra.columns == 3) self.assertTrue(self.q_i2d_op.rows == 2) self.assertTrue(self.q_i2d_op.columns == 2) self.assertTrue(self.q_6_op_32.rows == 3) self.assertTrue(self.q_6_op_32.columns == 2) def test_1030_equals(self): self.assertTrue(self.A.equals(self.A)) self.assertFalse(self.A.equals(self.B)) def test_1031_subs(self): t, x, y, z = sp.symbols("t x y z") q_sym = QHStates([QH([t, x, y, x * y * z])]) q_z = q_sym.subs({t:1, x:2, y:3, z:4}) print("t x y xyz sub 1 2 3 4: ", q_z) self.assertTrue(q_z.equals(QHStates([QH([1, 2, 3, 24])]))) def test_1032_scalar(self): qs = self.q_1_q_i.scalar() print("scalar(q_1_q_i)", qs) self.assertTrue(qs.equals(self.q_1_q_0)) def test_1033_vector(self): qv = self.q_1_q_i.vector() print("vector(q_1_q_i)", qv) self.assertTrue(qv.equals(self.q_0_q_i)) def test_1034_xyz(self): qxyz = self.q_1_q_i.xyz() print("q_1_q_i.xyz()", qxyz) self.assertTrue(qxyz[0][0] == 0) self.assertTrue(qxyz[1][0] == 1) def test_1040_conj(self): qc = self.q_1_q_i.conj() qc1 = self.q_1_q_i.conj(1) print("q_1_q_i*: ", qc) print("q_1_qc*1: ", qc1) self.assertTrue(qc.qs[1].x == -1) self.assertTrue(qc1.qs[1].x == 1) def test_1042_conj_q(self): qc = self.q_1_q_i.conj_q(self.q_1) qc1 = self.q_1_q_i.conj_q(self.q_1) print("q_1_q_i conj_q: ", qc) print("q_1_qc*1 conj_q: ", qc1) self.assertTrue(qc.qs[1].x == -1) self.assertTrue(qc1.qs[1].x == -1) def test_1050_flip_signs(self): qf = self.q_1_q_i.flip_signs() print("-q_1_q_i: ", qf) self.assertTrue(qf.qs[1].x == -1) def test_1060_inverse(self): inv_v1123 = self.v1123.inverse() print("inv_v1123 operator", inv_v1123) vvinv = inv_v1123.product(self.v1123) vvinv.print_state("vinvD x v") self.assertTrue(vvinv.equals(self.q14)) inv_v33 = self.v33.inverse() print("inv_v33 operator", inv_v33) vv33 = inv_v33.product(self.v33) vv33.print_state("inv_v33D x v33") self.assertTrue(vv33.equals(self.q19)) Ainv = self.A.inverse() print("A ket inverse, ", Ainv) AAinv = self.A.product(Ainv) AAinv.print_state("A x AinvD") self.assertTrue(AAinv.equals(self.q12)) def test_1070_normalize(self): qn = self.qn.normalize() print("Op normalized: ", qn) self.assertAlmostEqual(qn.qs[0].t, 0.6) self.assertTrue(qn.qs[0].z == 0.8) def test_1080_determinant(self): det_v3 = self.v3.determinant() print("det v3:", det_v3) self.assertTrue(det_v3.equals(self.q_3)) det_v1123 = self.v1123.determinant() print("det v1123", det_v1123) self.assertTrue(det_v1123.equals(self.q_1)) det_v9 = self.v9.determinant() print("det_v9", det_v9) self.assertTrue(det_v9.equals(self.q_9)) det_vv9 = self.vv9.determinant() print("det_vv9", det_vv9) self.assertTrue(det_vv9.equals(self.qn627)) def test_1090_summation(self): q_01_sum = self.q_0_q_1.summation() print("sum: ", q_01_sum) self.assertTrue(type(q_01_sum) is QH) self.assertTrue(q_01_sum.t == 1) def test_1100_add(self): q_0110_add = self.q_0_q_1.add(self.q_1_q_0) print("add 01 10: ", q_0110_add) self.assertTrue(q_0110_add.qs[0].t == 1) self.assertTrue(q_0110_add.qs[1].t == 1) def test_1110_dif(self): q_0110_dif = self.q_0_q_1.dif(self.q_1_q_0) print("dif 01 10: ", q_0110_dif) self.assertTrue(q_0110_dif.qs[0].t == -1) self.assertTrue(q_0110_dif.qs[1].t == 1) def test_1120_diagonal(self): Op4iDiag2 = self.Op_scalar.diagonal(2) print("Op4i on a diagonal 2x2", Op4iDiag2) self.assertTrue(Op4iDiag2.qs[0].equals(self.q_i4)) self.assertTrue(Op4iDiag2.qs[1].equals(QH().q_0())) def test_1125_trace(self): trace = self.v1123.op(2, 2).trace() print("trace: ", trace) self.assertTrue(trace.equals(QHStates([self.q_4]))) def test_1130_identity(self): I2 = QHStates().identity(2, operator=True) print("Operator Idenity, diagonal 2x2", I2) self.assertTrue(I2.qs[0].equals(QH().q_1())) self.assertTrue(I2.qs[1].equals(QH().q_0())) I2 = QHStates().identity(2) print("Idenity on 2 state ket", I2) self.assertTrue(I2.qs[0].equals(QH().q_1())) self.assertTrue(I2.qs[1].equals(QH().q_1())) def test_1140_product(self): self.assertTrue(self.b.product(self.o).equals(QHStates([QH([10,0,0,0]),QH([20,0,0,0]),QH([30,0,0,0])]))) self.assertTrue(self.b.product(self.k).equals(QHStates([QH([32,0,0,0])]))) self.assertTrue(self.b.product(self.o).product(self.k).equals(QHStates([QH([320,0,0,0])]))) self.assertTrue(self.b.product(self.b).equals(QHStates([QH([1,0,0,0]),QH([4,0,0,0]),QH([9,0,0,0])]))) self.assertTrue(self.o.product(self.k).equals(QHStates([QH([40,0,0,0]),QH([50,0,0,0]),QH([60,0,0,0])]))) self.assertTrue(self.o.product(self.o).equals(QHStates([QH([100,0,0,0])]))) self.assertTrue(self.k.product(self.k).equals(QHStates([QH([16,0,0,0]),QH([25,0,0,0]),QH([36,0,0,0])]))) self.assertTrue(self.k.product(self.b).equals(QHStates([QH([4,0,0,0]),QH([5,0,0,0]),QH([6,0,0,0]), QH([8,0,0,0]),QH([10,0,0,0]),QH([12,0,0,0]), QH([12,0,0,0]),QH([15,0,0,0]),QH([18,0,0,0])]))) def test_1150_product_AA(self): AA = self.A.product(self.A.set_qs_type("ket")) print("AA: ", AA) self.assertTrue(AA.equals(QHStates([QH([15, 0, 0, 0])]))) def test_1160_Euclidean_product_AA(self): AA = self.A.Euclidean_product(self.A.set_qs_type("ket")) print("A* A", AA) self.assertTrue(AA.equals(QHStates([QH([17, 0, 0, 0])]))) def test_1170_product_AOp(self): AOp = self.A.product(self.Op) print("A Op: ", AOp) self.assertTrue(AOp.qs[0].equals(QH([11, 0, 0, 0]))) self.assertTrue(AOp.qs[1].equals(QH([0, 0, 5, 0]))) self.assertTrue(AOp.qs[2].equals(QH([4, 0, 0, 0]))) def test_1180_Euclidean_product_AOp(self): AOp = self.A.Euclidean_product(self.Op) print("A* Op: ", AOp) self.assertTrue(AOp.qs[0].equals(QH([13, 0, 0, 0]))) self.assertTrue(AOp.qs[1].equals(QH([0, 0, 11, 0]))) self.assertTrue(AOp.qs[2].equals(QH([12, 0, 0, 0]))) def test_1190_product_AOp4i(self): AOp4i = self.A.product(self.Op4i) print("A Op4i: ", AOp4i) self.assertTrue(AOp4i.qs[0].equals(QH([0, 16, 0, 0]))) self.assertTrue(AOp4i.qs[1].equals(QH([-4, 0, 0, 0]))) def test_1200_Euclidean_product_AOp4i(self): AOp4i = self.A.Euclidean_product(self.Op4i) print("A* Op4i: ", AOp4i) self.assertTrue(AOp4i.qs[0].equals(QH([0, 16, 0, 0]))) self.assertTrue(AOp4i.qs[1].equals(QH([4, 0, 0, 0]))) def test_1210_product_OpB(self): OpB = self.Op.product(self.B) print("Op B: ", OpB) self.assertTrue(OpB.qs[0].equals(QH([0, 10, 3, 0]))) self.assertTrue(OpB.qs[1].equals(QH([-18, 0, 0, 1]))) def test_1220_Euclidean_product_OpB(self): OpB = self.Op.Euclidean_product(self.B) print("Op B: ", OpB) self.assertTrue(OpB.qs[0].equals(QH([0, 2, 3, 0]))) self.assertTrue(OpB.qs[1].equals(QH([18, 0, 0, -1]))) def test_1230_product_AOpB(self): AOpB = self.A.product(self.Op).product(self.B) print("A Op B: ", AOpB) self.assertTrue(AOpB.equals(QHStates([QH([0, 22, 11, 0])]))) def test_1240_Euclidean_product_AOpB(self): AOpB = self.A.Euclidean_product(self.Op).product(self.B) print("A* Op B: ", AOpB) self.assertTrue(AOpB.equals(QHStates([QH([0, 58, 13, 0])]))) def test_1250_product_AOp4i(self): AOp4i = self.A.product(self.Op4i) print("A Op4i: ", AOp4i) self.assertTrue(AOp4i.qs[0].equals(QH([0, 16, 0, 0]))) self.assertTrue(AOp4i.qs[1].equals(QH([-4, 0, 0, 0]))) def test_1260_Euclidean_product_AOp4i(self): AOp4i = self.A.Euclidean_product(self.Op4i) print("A* Op4i: ", AOp4i) self.assertTrue(AOp4i.qs[0].equals(QH([0, 16, 0, 0]))) self.assertTrue(AOp4i.qs[1].equals(QH([4, 0, 0, 0]))) def test_1270_product_Op4iB(self): Op4iB = self.Op4i.product(self.B) print("Op4i B: ", Op4iB) self.assertTrue(Op4iB.qs[0].equals(QH([0, 6, 0, 4]))) self.assertTrue(Op4iB.qs[1].equals(QH([0, 9, -8, 0]))) def test_1280_Euclidean_product_Op4iB(self): Op4iB = self.Op4i.Euclidean_product(self.B) print("Op4i B: ", Op4iB) self.assertTrue(Op4iB.qs[0].equals(QH([0, 6, 0, -4]))) self.assertTrue(Op4iB.qs[1].equals(QH([0, 9, 8, 0]))) def test_1290_product_AOp4iB(self): AOp4iB = self.A.product(self.Op4i).product(self.B) print("A* Op4i B: ", AOp4iB) self.assertTrue(AOp4iB.equals(QHStates([QH([-9, 24, 0, 8])]))) def test_1300_Euclidean_product_AOp4iB(self): AOp4iB = self.A.Euclidean_product(self.Op4i).product(self.B) print("A* Op4i B: ", AOp4iB) self.assertTrue(AOp4iB.equals(QHStates([QH([9, 24, 0, 24])]))) def test_1305_bracket(self): bracket1234 = QHStates().bracket(self.q_1234, QHStates().identity(4, operator=True), self.q_1234) print("bracket <1234|I|1234>: ", bracket1234) self.assertTrue(bracket1234.equals(QHStates([QH([34, 0, 0, 0])]))) def test_1310_op_n(self): opn = self.Op.op_n(n=self.q_i) print("op_n: ", opn) self.assertTrue(opn.qs[0].x == 3) def test_1312_square(self): ns = self.q_1_q_i.square() ns.print_state("q_1_q_i square") self.assertTrue(ns.equals(QHStates([self.q_1, self.q_n1]))) def test_1315_norm_squared(self): ns = self.q_1_q_i.norm_squared() ns.print_state("q_1_q_i norm squared") self.assertTrue(ns.equals(QHStates([QH([2,0,0,0])]))) def test_1318_norm_squared_of_vector(self): ns = self.q_1_q_i.norm_squared_of_vector() ns.print_state("q_1_q_i norm squared of vector") self.assertTrue(ns.equals(QHStates([self.q_1]))) def test_1320_transpose(self): opt = self.q_1234.transpose() print("op1234 transposed: ", opt) self.assertTrue(opt.qs[0].t == 1) self.assertTrue(opt.qs[1].t == 3) self.assertTrue(opt.qs[2].t == 2) self.assertTrue(opt.qs[3].t == 4) optt = self.q_1234.transpose().transpose() self.assertTrue(optt.equals(self.q_1234)) def test_1330_Hermitian_conj(self): q_hc = self.q_1234.Hermitian_conj() print("op1234 Hermtian_conj: ", q_hc) self.assertTrue(q_hc.qs[0].t == 1) self.assertTrue(q_hc.qs[1].t == 3) self.assertTrue(q_hc.qs[2].t == 2) self.assertTrue(q_hc.qs[3].t == 4) self.assertTrue(q_hc.qs[0].x == -1) self.assertTrue(q_hc.qs[1].x == -1) self.assertTrue(q_hc.qs[2].x == -1) self.assertTrue(q_hc.qs[3].x == -1) def test_1340_is_Hermitian(self): self.assertTrue(self.sigma_y.is_Hermitian()) self.assertFalse(self.q_1234.is_Hermitian()) def test_1350_is_square(self): self.assertFalse(self.Op.is_square()) self.assertTrue(self.Op_scalar.is_square()) suite = unittest.TestLoader().loadTestsFromModule(TestQHStates()) _results = unittest.TextTestRunner().run(suite); ``` ```python if __name__ == "__main__": !jupyter nbconvert --to script QH.ipynb !black QH.py !In_remover.sh QH.py ``` ```python ```
d626b00c24f2cf342d0f583d0d0f1a5de0b4d20c
121,187
ipynb
Jupyter Notebook
Notebooks/QH.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
Notebooks/QH.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
Notebooks/QH.ipynb
dougsweetser/AIG
ce23119bbde41671438fb805dfba4b04b42d84d6
[ "Apache-2.0" ]
null
null
null
43.781431
218
0.430657
true
26,281
Qwen/Qwen-72B
1. YES 2. YES
0.899121
0.779993
0.701308
__label__eng_Latn
0.415636
0.467706
```python from sympy.physics.mechanics import ReferenceFrame,Point,dynamicsymbols from sympy.physics.mechanics import Point from sympy import latex,pprint,symbols,init_printing from sympy.algebras.quaternion import Quaternion import numpy as np init_printing() # Para visualizar símbolos ``` ## 3. Cinemática de puntos ## 3.1 Velocidad y aceleración de puntos ```python # Para el ejemplo de atracción de parque de diversiones a=ReferenceFrame('A') #Defina el punto O o=Point('O') # Parámetros del modelo l,r=symbols('L,R') # Variables de movimiento q1,q2=dynamicsymbols('q1,q2') # Marco de referencia intermedio E e=a.orientnew('E','Axis',(q1,a.y)) # Marco de referencia intermedio B b=e.orientnew('B','Axis',(q2,-e.z)) p=o.locatenew('P',l*b.z) q=p.locatenew('Q',r*b.y) #Calcule el vector desde el centro del eje de rotación del péndulo hasta el usuario r_qo=q.pos_from(o) #La velocidad del usuario es: r_qo.dt(a) #La aceleración del usuario es: #r_qo.dt(a).dt(a).subs(q2.diff().diff(),0).simplify() ``` ```python q2.diff().diff() ``` ## 3.2 Velocidad y aceleración de puntos fijos en un cuerpo ```python # Para el ejemplo de atracción de parque de diversiones a=ReferenceFrame('A') #Defina el punto O o=Point('O') # Parámetros del modelo l,r=symbols('L,R') # Variables de movimiento q1,q2=dynamicsymbols('q1,q2') # Marco de referencia intermedio E e=a.orientnew('E','Axis',(q1,a.y)) # Marco de referencia intermedio B b=e.orientnew('B','Axis',(q2,-e.z)) p=o.locatenew('P',l*b.z) q=p.locatenew('Q',r*b.y) ``` ```python #Utilice el método v2pt_theory para calcular la velocidad de un punto con la velocidad #de otro punto fijo en el mismo marco. #Primero defina la velocidad de O en el marco A, y de P en B como 0 ya que son puntos fijos en estos #marcos. o.set_vel(a,0) p.set_vel(b,0) #Luego utilice v2pt_theory para definir la velocidad de P en a respecto al punto O p.v2pt_theory(o,a,b) #Ahora utilice v2pt_theory para definir la velocidad de Q en a respecto al punto P q.v2pt_theory(p,a,b) #Observe que el resultado es el mismo que calculando la derivada total de r_qo #Ahora pruebe utilizar el metodo a2pt_theory para calcular la aceleracion ``` ## 3.3 Velocidad y aceleración de puntos móviles sobre un cuerpo ```python # Modificamos el ejemplo de atracción de parque de diversiones para incluir la variable # q3. a=ReferenceFrame('A') #Defina el punto O o=Point('O') # Parametros del modelo l,r=symbols('L,R') # Variables de movimiento q1,q2,q3=dynamicsymbols('q1,q2,q3') # Marco de referencia intermedio E e=a.orientnew('E','Axis',(q1,a.y)) # Marco de referencia B b=e.orientnew('B','Axis',(q2,-e.z)) p=o.locatenew('P',l*b.z) q=p.locatenew('Q',q3*b.x) #Esperamos que q3<R por el bien del usuario de la atracción :) #Primero defina la velocidad de O en el marco A, y de P en B como 0 ya que son puntos fijos en estos #marcos. o.set_vel(a,0) p.set_vel(b,0) #Luego utilice v2pt_theory para definir la velocidad de P en a respecto al punto O p.v2pt_theory(o,a,b) #Ya no deberiamos utilizar v2pt_theory para definir la velocidad de Q puesto que #no es un punto fijo en B. #Dado que q fue definido en coordenadas de B, ya tiene la velocidad definida en este marco. #Verifique pprint(q.vel(b)) #Ahora utilizamos v1pt_theory para encontrar la velocidad de q q.v1pt_theory(p,a,b) ```
db90e522c630118e4322361fc0b1baf734ecd2e4
29,562
ipynb
Jupyter Notebook
Capitulo_3/Capitulo3.ipynb
JonathanCamargo/Dinamica_Mecanica_Material_Interactivo
58fd21f1efccd1eda88f03e39cf69d122d280fdd
[ "MIT" ]
null
null
null
Capitulo_3/Capitulo3.ipynb
JonathanCamargo/Dinamica_Mecanica_Material_Interactivo
58fd21f1efccd1eda88f03e39cf69d122d280fdd
[ "MIT" ]
null
null
null
Capitulo_3/Capitulo3.ipynb
JonathanCamargo/Dinamica_Mecanica_Material_Interactivo
58fd21f1efccd1eda88f03e39cf69d122d280fdd
[ "MIT" ]
null
null
null
103.003484
6,356
0.815134
true
1,073
Qwen/Qwen-72B
1. YES 2. YES
0.934395
0.808067
0.755054
__label__spa_Latn
0.955436
0.592576
# Check CFHT Zero Point Value ## Theoretical Background The Zero Point value (in ADU/sec) for the `r` filter in the [CFHT specifications page](https://www.cfht.hawaii.edu/Instruments/Imaging/Megacam/generalinformation.html) is 26.22 and it seems to be over estimated. In the [LSST GitHub code](https://github.com/LSSTDESC/WeakLensingDeblending/blob/9f851f79f6f820f815528d11acabf64083b6e111/descwl/survey.py#L288) it is recomputed and its new value is 10.72. In the following we are going to recompute the Zero Point value using the formulas in the [LSST GitHub issue #1](https://github.com/LSSTDESC/WeakLensingDeblending/issues/1). We have: \begin{equation} z_p = m_0+2.5 \log_{10}(s_0[R,m_0]) \quad, \end{equation} where $z_p$ is the Zero Point AB magnitude, $m_0$ is a fiducial source of AB magnitude of value 24 and $s_0[R,m_0]$ the corresponding flux, such that: \begin{equation} s_0[R,m_0]=s[f_{AB},R]\cdot 10^{-0.4m_0} \quad, \end{equation} with $s[f_{AB},R]$ is defined as: \begin{equation} s[f_{AB},R]=A \int_0^{+\infty}f(\lambda)\cdot\frac{\lambda}{hc}\cdot R(\lambda) d\lambda \quad , \end{equation} here $A=8.022$ is telescope effective area in $m^2$, $f(\lambda)=f_{AB}(\lambda)=1.08855 \cdot 10^{-5} (J/s/m^2/nm)$, $h$ is the Planck constant, $c$ is the speed of light and $\lambda$ (in nm) and $R(\lambda)$ value are given in the R.MP9602 third generation [filter response table](http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html). ## Numerical Application First, extract and convert $\lambda$ and $R(\lambda)$ values, load other constants and compute $s[f_{AB},R]$. > In the table, $\lambda$ values are in Angstroms and should be converted to nm. ```python import numpy as np from scipy.constants import h,c file_name = 'r_lambda.txt' lamb, r = np.loadtxt(file_name, unpack=True) lamb /= 10 #convert from Angstrom to nm m0 = 24 f = 1.08855/(lamb**2) * 10 ** (-5) # J/s/m^2/nm^-1 A = 8.022 #m^2 # use the trapezoidal rule to perform a numerical integration sAB = A * np.trapz(f*lamb*r/(h*c), lamb) print(r'The value of s[f_AB,R] is {}'.format(np.format_float_scientific(sAB))) ``` The value of s[f_AB,R] is 5.155817453958427e+19 Deduce the values of $s_0[R,m_0]$ then $z_p$. ```python s0 = sAB * 10 ** (-0.4*m0) zp = m0 + 2.5*np.log10(s0) print(r'The value of s_0[R, m_0] is {}'.format(np.format_float_scientific(s0))) print(r'The magnitude of the zero point is {}'.format(np.format_float_scientific(zp))) ``` The value of s_0[R, m_0] is 1.2950827905938398e+10 The magnitude of the zero point is 4.928074383098289e+01
d34c3cf17c63917bf41fd3996cbb2095132a08cf
4,052
ipynb
Jupyter Notebook
data/CFHT/ZP_check.ipynb
CosmoStat/ShapeDeconv
3869cb6b9870ff1060498eedcb99e8f95908f01a
[ "MIT" ]
4
2020-12-17T14:58:28.000Z
2022-01-22T06:03:55.000Z
data/CFHT/ZP_check.ipynb
CosmoStat/ShapeDeconv
3869cb6b9870ff1060498eedcb99e8f95908f01a
[ "MIT" ]
9
2021-01-13T10:38:28.000Z
2021-07-06T23:37:08.000Z
data/CFHT/ZP_check.ipynb
CosmoStat/ShapeDeconv
3869cb6b9870ff1060498eedcb99e8f95908f01a
[ "MIT" ]
null
null
null
33.213115
410
0.578727
true
895
Qwen/Qwen-72B
1. YES 2. YES
0.956634
0.828939
0.792991
__label__eng_Latn
0.869241
0.680717
# [1] Scientific computation There are several packages that provide multidimensional data manipulation, optimization, regression, interpolation and visualization, among other possibilities. ## 0. Some arithmetic insights ### [Integers](https://docs.python.org/3/c-api/long.html) In python, integers have arbitrary precision and therefore we can represent an arbitrarily large range of integers (only limited by the available memory). ```python x = 7**273 print(x) print(type(x)) ``` 514949835672429280118340184962210764526557059561644551308225135656646772567852802341125202657808555553768425192667497268234055382589043229395747814284702360571674673819941501856557513477604134701802987908639670470095067188938102407 <class 'int'> ### [Floats](https://docs.python.org/3/tutorial/floatingpoint.html) Python uses (hardware) [754 double precision representation](https://en.wikipedia.org/wiki/Double-precision_floating-point_format#IEEE_754_double-precision_binary_floating-point_format:_binary64) for floats. This means that some floats can be only represented approximately. * Using [string format](https://docs.python.org/3.4/library/string.html#string-formatting) to see the precision limitation of **doubles** in Python. For example, it is impossible to represent exactly the number `0.1`: ```python format(0.1, '.80f') ``` '0.10000000000000000555111512312578270211815834045410156250000000000000000000000000' * This can give us *surprises*: ```python .1 + .1 + .1 == .3 ``` False ```python .1 + .1 == .2 ``` True * For "infinite" precision float arithmetic you can use [decimal](https://docs.python.org/3/library/decimal.html#module-decimal) or [mpmath](http://mpmath.org): ```python from decimal import Decimal, getcontext ``` * Getting 30 digits of 1/7: ```python getcontext().prec=80 format(Decimal(1)/Decimal(7), '.80f') ``` '0.14285714285714285714285714285714285714285714285714285714285714285714285714285714' * We can see how many digits are true of 1/7 using doubles: ```python format(1/7, '.80f') ``` '0.14285714285714284921269268124888185411691665649414062500000000000000000000000000' ```python #12345678901234567 (17 digits) ``` * Decimal arithmetic produces decimal objects: ```python Decimal(1)/Decimal(7) ``` Decimal('0.14285714285714285714285714285714285714285714285714285714285714285714285714285714') * Decimal objects can be printed with `format`: ```python print('{:.50f}'.format(Decimal(1)/Decimal(7))) ``` 0.14285714285714285714285714285714285714285714285714 * A more complex example: lets compute 1000 digits of the $\pi$ number using the [Bailey–Borwein–Plouffe formula](https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula): $$ \pi = \sum_{k = 0}^{\infty}\Bigg[ \frac{1}{16^k} \left( \frac{4}{8k + 1} - \frac{2}{8k + 4} - \frac{1}{8k + 5} - \frac{1}{8k + 6} \right) \Bigg] $$ ```python # https://stackoverflow.com/questions/28284996/python-pi-calculation from decimal import Decimal, getcontext getcontext().prec=1000 my_pi= sum(1/Decimal(16)**k * (Decimal(4)/(8*k+1) - Decimal(2)/(8*k+4) - Decimal(1)/(8*k+5) - Decimal(1)/(8*k+6)) for k in range(1000)) '{:.1000f}'.format(my_pi) ``` '3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201930' You can visit [100,000 Digits of Pi](http://www.geom.uiuc.edu/~huberty/math5337/groupe/digits.html) or [One Million Digits of Pi](http://www.piday.org/million/) to check the correctness this code. ## 1. SciPy.org's [Numpy](http://www.numpy.org/) Numpy provides a high-performance multidimensional array object. ### 1.1. Installation ``` pip install numpy ``` ### 1.2. Why numpy? Good running times. ```python import numpy as np ``` * Lets define a list and compute the sum of its elements, timing it: ```python l = list(range(0,100000)); print(type(l), l[:10]) %timeit sum(l) ``` <class 'list'> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 858 µs ± 56.1 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) * An now, lets create a numpy's array and time the sum of its elements: ```python a = np.arange(0, 100000); print(type(a), a[:10]) %timeit np.sum(a) ``` <class 'numpy.ndarray'> [0 1 2 3 4 5 6 7 8 9] 69.1 µs ± 3.57 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) * And what about a *pure* C implementation of an equivalent computation: ```python !cat sum_array.c !gcc -O3 sum_array.c -o sum_array %timeit !./sum_array ``` #include <stdio.h> #include <time.h> #include "sum_array_lib.c" #define N 100000 int main() { double a[N]; int i; clock_t start, end; double cpu_time; for(i=0; i<N; i++) { a[i] = i; } start = clock(); double sum = sum_array(a,N); end = clock(); printf("%f ", sum); cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC; cpu_time *= 1000000; printf("%f usegs\n", cpu_time); } 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 15625.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 15625.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 15625.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 15625.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 15625.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 4999950000.000000 0.000000 usegs 240 ms ± 34.4 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) * Looking for informayion of numpy's *something*: ```python np.lookfor('invert') ``` Search results for 'invert' --------------------------- numpy.bitwise_not Compute bit-wise inversion, or bit-wise NOT, element-wise. numpy.matrix.getI Returns the (multiplicative) inverse of invertible `self`. numpy.in1d Test whether each element of a 1-D array is also present in a second array. numpy.isin Calculates `element in test_elements`, broadcasting over `element` only. numpy.transpose Permute the dimensions of an array. numpy.linalg.inv Compute the (multiplicative) inverse of a matrix. numpy.linalg.pinv Compute the (Moore-Penrose) pseudo-inverse of a matrix. numpy.linalg.tensorinv Compute the 'inverse' of an N-dimensional array. numpy.lib.tests.test_arraysetops.TestSetOps.test_in1d_invert Test in1d's invert parameter numpy.core.tests.test_ufunc.TestUfunc.test_all_ufunc Try to check presence and results of all ufuncs. * Remember that it's possible to use the tabulator to extend some command or to use a wildcard in Ipython to get the numpy's stuff: ```python np.*? ``` ### 1.3. Creating (simple) [arrays](https://docs.scipy.org/doc/numpy-dev/user/quickstart.html) in Numpy A simple array is a grid of values, all of the same type, indexed by a tuple of nonnegative integers. ### 1.3.1. 1D arrays * Creating an array using a list: ```python a = np.array([1, 2, 3]) print(type([1, 2, 3])) print(type(a)) ``` <class 'list'> <class 'numpy.ndarray'> * Getting the number of dimensions of an array: ```python print(a.ndim) ``` 1 * Printing an array: ```python print(a) ``` [1 2 3] * Printing the *shape* (which always is a tuple) of an array: ```python print(a.shape) ``` (3,) * Native Python's [`len()`](https://docs.python.org/3.6/library/functions.html#len) also works: ```python print(len(a)) ``` 3 * More a exotic definition using [`linspace()`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linspace.html): ```python np.linspace(1., 4., 6) ``` array([1. , 1.6, 2.2, 2.8, 3.4, 4. ]) * And input the data from a file: ```python np.genfromtxt('data.txt') ``` array([[ 1., 200.], [ 2., 150.], [ 3., 250.]]) ```python !cat data.txt ``` 1 200 2 150 3 250 * Arrays can be created from different types of contaniers (which store complex numbers in this case): ```python c = [[1,1.0],(1+1j,.3)] print(type(c), type(c[0]), type(c[1])) x = np.array(c) x ``` <class 'list'> <class 'list'> <class 'tuple'> array([[1. +0.j, 1. +0.j], [1. +1.j, 0.3+0.j]]) * Accessing to an element: ```python print(a, a[0], a[1]) ``` [1 2 3] 1 2 ```python a[0] = 0 print(a) ``` [0 2 3] ### 1.3.3. 2D arrays * Creating a 2D array with two 1D arrays: ```python b = np.array([[1,2,3],[4,5,6]]) print(b) print(b.shape) print(b[1, 1]) ``` [[1 2 3] [4 5 6]] (2, 3) 5 * With zeroes: ```python a = np.zeros((5,5)) print(a) ``` [[0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] * The default dtype is `float64`: ```python print(type(a[0][0])) ``` <class 'numpy.float64'> * With ones: ```python a = np.ones((5,5)) print(a) ``` [[1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.] [1. 1. 1. 1. 1.]] * With an arbitrary scalar: ```python a = np.full((5,5), 2) print(a) ``` [[2 2 2 2 2] [2 2 2 2 2] [2 2 2 2 2] [2 2 2 2 2] [2 2 2 2 2]] * The identity matrix: ```python a = np.eye(5) print(a) ``` [[1. 0. 0. 0. 0.] [0. 1. 0. 0. 0.] [0. 0. 1. 0. 0.] [0. 0. 0. 1. 0.] [0. 0. 0. 0. 1.]] * With random data: ```python a = np.random.random((5,5)) print(a) ``` [[0.50755507 0.0211933 0.43352176 0.44631306 0.23881999] [0.83024573 0.74476418 0.586479 0.49286785 0.48735588] [0.2667407 0.6050111 0.75354372 0.27058423 0.52230328] [0.09832853 0.71363667 0.88404059 0.56705442 0.99448158] [0.17873977 0.01220009 0.45699848 0.93175194 0.84602469]] ```python a = np.random.random((5,5)) print(a) ``` [[0.47332988 0.90255503 0.22599553 0.30415374 0.71499388] [0.72409148 0.01867644 0.2858131 0.58048634 0.93078663] [0.3389969 0.12008312 0.51627271 0.69920706 0.29864068] [0.86160962 0.9058072 0.76858325 0.26123164 0.9384556 ] [0.93864246 0.74504455 0.91073504 0.23722471 0.49496735]] * Filled with [arbitrary](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.empty_like.html) data and with a previously defined shape: ```python b = np.empty_like(a) print(b) ``` [[0.47332988 0.90255503 0.22599553 0.30415374 0.71499388] [0.72409148 0.01867644 0.2858131 0.58048634 0.93078663] [0.3389969 0.12008312 0.51627271 0.69920706 0.29864068] [0.86160962 0.9058072 0.76858325 0.26123164 0.9384556 ] [0.93864246 0.74504455 0.91073504 0.23722471 0.49496735]] * With a 1D list comprehension: ```python a = np.array([i for i in range(5)]) print(a, a[1], a.shape) ``` [0 1 2 3 4] 1 (5,) * With a 2D list comprehension: ```python a = np.array([[j+i*5 for j in range(10)] for i in range(5)]) print(a, a.shape) ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] (5, 10) * Accessing to a row of a matrix: ```python a[1] # Get row 2 ``` array([ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) * Accessing to an element of a matrix: ```python a[1][2] # Get column 3 of row 2 ``` 7 ```python a[1,2] # Get element of coordinates (1,2) ``` 7 * Getting elements of a matrix using "integer array indexing": ```python print(a) print(a[[0, 1, 2], [3, 2, 1]]) ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] [ 3 7 11] * The same integer array indexing using comprehension lists: ```python print(a[np.array([i for i in range(3)]), np.array([i for i in range(3,0,-1)])]) ``` [ 3 7 11] * The same using [`np.arange()`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.arange.html): ```python print(np.arange(3)) print(np.arange(3,0,-1)) print(a[np.arange(3), np.arange(3,0,-1)]) ``` [0 1 2] [3 2 1] [ 3 7 11] ### 1.4. Slicing ```python a = np.array([[j+i*5 for j in range(10)] for i in range(5)]) a ``` array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) * Get all rows of a matrix (the whole matrix): ```python a[:] ``` array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) ```python print(a) a[::] # Notation: [starting index : stoping index : step] # By default, start = 0, stop = maximum, step = 1 ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) ```python print(a) a[0:] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) ```python print(a) a[0::] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) ```python a[:a.shape[1]] ``` array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) ```python a[:a.shape[1]:] ``` array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) * Get all rows of a matrix, except the first one: ```python print(a) a[1:] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) ```python print(a) a[1::] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) * Get the first two rows of a matrix: ```python print(a) a[0:2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]]) * Get the even rows of a matrix: ```python print(a) a[0::2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) * Get the odd columns of a matrix: ```python print(a) a[:,1::2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 1, 3, 5, 7, 9], [ 6, 8, 10, 12, 14], [11, 13, 15, 17, 19], [16, 18, 20, 22, 24], [21, 23, 25, 27, 29]]) * Get the odd rows of a matrix: ```python print(a) a[:][1::2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24]]) ```python a[1::2] ``` array([[ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15, 16, 17, 18, 19, 20, 21, 22, 23, 24]]) * Getting the second row: ```python print(a) a[1,:] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) * Getting the third column: ```python print(a) a[:,2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([ 2, 7, 12, 17, 22]) * Getting a top-left $2\times 2$ submatrix: ```python print(a) a[:2,:2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[0, 1], [5, 6]]) * Getting a bottom-right $2\times 2$ submatrix: ```python print(a) a[a.shape[0]-2:,a.shape[1]-2:] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[23, 24], [28, 29]]) ```python a[a.shape[0]-2::,a.shape[1]-2::] ``` array([[23, 24], [28, 29]]) * Sampling in horizontal every 2 elements, starting at row 2: ```python print(a) a[:,1::2] ``` [[ 0 1 2 3 4 5 6 7 8 9] [ 5 6 7 8 9 10 11 12 13 14] [10 11 12 13 14 15 16 17 18 19] [15 16 17 18 19 20 21 22 23 24] [20 21 22 23 24 25 26 27 28 29]] array([[ 1, 3, 5, 7, 9], [ 6, 8, 10, 12, 14], [11, 13, 15, 17, 19], [16, 18, 20, 22, 24], [21, 23, 25, 27, 29]]) ### 1.5. Boolean array indexing * Finding the elements bigger than ... ```python bool_idx = (a>12) print(bool_idx) ``` [[False False False False False False False False False False] [False False False False False False False False True True] [False False False True True True True True True True] [ True True True True True True True True True True] [ True True True True True True True True True True]] * Printing the elements bigger than ... ```python print(a[bool_idx]) ``` [13 14 13 14 15 16 17 18 19 15 16 17 18 19 20 21 22 23 24 20 21 22 23 24 25 26 27 28 29] ### 1.6. Elementwise (vectorial-vectorial and vectorial-scalar) math * Create an zero-ed matrix: ```python a = np.zeros((5,5), np.int32) print(a) ``` [[0 0 0 0 0] [0 0 0 0 0] [0 0 0 0 0] [0 0 0 0 0] [0 0 0 0 0]] * Change to 1 from coordinate (1,1) to coordinate (4,4): ```python a[1:4,1:4] = 1 print(a) ``` [[0 0 0 0 0] [0 1 1 1 0] [0 1 1 1 0] [0 1 1 1 0] [0 0 0 0 0]] * Vectorial-scalar addition: ```python a[1:4, 1:4] += 1 print(a) ``` [[0 0 0 0 0] [0 2 2 2 0] [0 2 2 2 0] [0 2 2 2 0] [0 0 0 0 0]] * A new matrix: ```python b = np.ones((5,5), np.int32) print(b) ``` [[1 1 1 1 1] [1 1 1 1 1] [1 1 1 1 1] [1 1 1 1 1] [1 1 1 1 1]] * Vectorial addition: ```python c = a + b print(c) ``` [[1 1 1 1 1] [1 3 3 3 1] [1 3 3 3 1] [1 3 3 3 1] [1 1 1 1 1]] * Vectorial substraction: ```python d = c - b print(d) ``` [[0 0 0 0 0] [0 2 2 2 0] [0 2 2 2 0] [0 2 2 2 0] [0 0 0 0 0]] * Vectorial multiplication (not matrix multiplication!): ```python c = c * d print(c) ``` [[0 0 0 0 0] [0 6 6 6 0] [0 6 6 6 0] [0 6 6 6 0] [0 0 0 0 0]] * Floating-point vectorial division: ```python c = c / b print(c) ``` [[0. 0. 0. 0. 0.] [0. 6. 6. 6. 0.] [0. 6. 6. 6. 0.] [0. 6. 6. 6. 0.] [0. 0. 0. 0. 0.]] * Fixed-point (integer) vectorial division: ```python c = d // b print(c) ``` [[0 0 0 0 0] [0 2 2 2 0] [0 2 2 2 0] [0 2 2 2 0] [0 0 0 0 0]] ### 1.7. Broadcasting In vectorized operations, NumPy "extends" scalars and arrays with one of its dimensions equal to 1 to the size of the other(s) array(s). ```python a = np.ones((5,3)) a ``` array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) ```python b = np.arange(1) b ``` array([0]) ```python b += 1 b ``` array([1]) * Broadcasting of a $1\times 1$ matrix: ```python a+b # 'a' is 5x3 and 'b' is 1x1 ``` array([[2., 2., 2.], [2., 2., 2.], [2., 2., 2.], [2., 2., 2.], [2., 2., 2.]]) * Broadcasting of a $1\times 3$ matrix: ```python b = np.arange(3) b ``` array([0, 1, 2]) ```python a+b # 'a' is 5x3 and 'b' is '1x3' ``` array([[1., 2., 3.], [1., 2., 3.], [1., 2., 3.], [1., 2., 3.], [1., 2., 3.]]) * Broadcasting of a $5\times 1$ matrix: ```python b = np.arange(5) b ``` array([0, 1, 2, 3, 4]) ```python b = b.reshape((5,1)) # (Rows, Columns) b ``` array([[0], [1], [2], [3], [4]]) ```python a+b ``` array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.], [4., 4., 4.], [5., 5., 5.]]) * If the arrays have different shapes and s can not be "broadcasted", `ValueError: frames are not aligned` is thrown. ```python b = np.arange(4)[:, None] b ``` array([[0], [1], [2], [3]]) ```python a.shape ``` (5, 3) ```python b.shape ``` (4, 1) ```python a+b ``` ### 1.8. Matricial math Provides basic matrix computation. * Let's define a "chessboard" matrix: ```python a = np.array([[(i+j)%2 for j in range(10)] for i in range(10)]) print(a, a.shape) ``` [[0 1 0 1 0 1 0 1 0 1] [1 0 1 0 1 0 1 0 1 0] [0 1 0 1 0 1 0 1 0 1] [1 0 1 0 1 0 1 0 1 0] [0 1 0 1 0 1 0 1 0 1] [1 0 1 0 1 0 1 0 1 0] [0 1 0 1 0 1 0 1 0 1] [1 0 1 0 1 0 1 0 1 0] [0 1 0 1 0 1 0 1 0 1] [1 0 1 0 1 0 1 0 1 0]] (10, 10) ... and a 1-column matrix: ```python b = np.array([[1] for i in range(10)]) print(b, b.shape) ``` [[1] [1] [1] [1] [1] [1] [1] [1] [1] [1]] (10, 1) * Product matrix-matrix: ```python c = np.dot(a,b) print(c) ``` [[5] [5] [5] [5] [5] [5] [5] [5] [5] [5]] * Sum of all elements of a matrix: ```python print(np.sum(c)) ``` 50 ```python print(np.sum(a)) ``` 50 * Compute the maximum of a matrix: ```python print(np.max(c)) ``` 5 * Matrix transpose: ```python print(c.T, c.T.shape, c.shape) ``` [[5 5 5 5 5 5 5 5 5 5]] (1, 10) (10, 1) ### 1.9. How fast is Numpy's array math? ```python a = np.array([[(i*10+j) for j in range(10)] for i in range(10)]) print(a, a.shape) ``` [[ 0 1 2 3 4 5 6 7 8 9] [10 11 12 13 14 15 16 17 18 19] [20 21 22 23 24 25 26 27 28 29] [30 31 32 33 34 35 36 37 38 39] [40 41 42 43 44 45 46 47 48 49] [50 51 52 53 54 55 56 57 58 59] [60 61 62 63 64 65 66 67 68 69] [70 71 72 73 74 75 76 77 78 79] [80 81 82 83 84 85 86 87 88 89] [90 91 92 93 94 95 96 97 98 99]] (10, 10) ```python a[:1] # First row (a matrix) ``` array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]) ```python a[:1].shape ``` (1, 10) ```python a[:1][0] # First element of a matrix of one elment (a vector) ``` array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) ```python a[:1][0].shape ``` (10,) ```python b = a[:1][0] b ``` array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) * Add `b[]` to all the rows of `a[][]` using scalar arithmetic: ```python c = np.empty_like(a) def add(): for i in range(a.shape[1]): for j in range(a.shape[0]): c[i, j] = a[i, j] + b[j] %timeit add() print(c) ``` 53.5 µs ± 6.86 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) [[ 0 2 4 6 8 10 12 14 16 18] [ 10 12 14 16 18 20 22 24 26 28] [ 20 22 24 26 28 30 32 34 36 38] [ 30 32 34 36 38 40 42 44 46 48] [ 40 42 44 46 48 50 52 54 56 58] [ 50 52 54 56 58 60 62 64 66 68] [ 60 62 64 66 68 70 72 74 76 78] [ 70 72 74 76 78 80 82 84 86 88] [ 80 82 84 86 88 90 92 94 96 98] [ 90 92 94 96 98 100 102 104 106 108]] * Add `b[]` to all the rows of `a[][]` using vectorial arithmetic: ```python c = np.empty_like(a) def add(): for i in range(a.shape[1]): c[i, :] = a[i, :] + b %timeit add() print(c) ``` 21.3 µs ± 4.98 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) [[ 0 2 4 6 8 10 12 14 16 18] [ 10 12 14 16 18 20 22 24 26 28] [ 20 22 24 26 28 30 32 34 36 38] [ 30 32 34 36 38 40 42 44 46 48] [ 40 42 44 46 48 50 52 54 56 58] [ 50 52 54 56 58 60 62 64 66 68] [ 60 62 64 66 68 70 72 74 76 78] [ 70 72 74 76 78 80 82 84 86 88] [ 80 82 84 86 88 90 92 94 96 98] [ 90 92 94 96 98 100 102 104 106 108]] * Add `b[]` to all the rows of `a[][]` using fully vectorial arithmetic: ```python %timeit c = a + b # <- broadcasting is faster print(c) ``` ### 1.10. Structured arrays * Create a 1D array of (two) records, where each record has the structure (int, float, char[10]). ```python x = np.array([(1,2.,'Hello'), (3,4.,"World")], dtype=[('first', 'i4'),('second', 'f4'), ('third', 'S10')]) x ``` * Get the first element of every record: ```python x['first'] ``` * Get the first record: ```python x[0] ``` * Get the second element of every record: ```python x['second'] ``` * Third element of every record: ```python x['third'] ``` ## 2. [Matplotlib](http://matplotlib.org) A Python 2D plotting library. ### 2.1. Installation ``` pip install matplotlib ``` ### 2.2. Configure Matplotlib in-line of IPython (~Jupyter) notebook ```python %matplotlib inline ``` ### 2.3. Importing it ```python import matplotlib.pyplot as plt ``` ### 2.4. Drawing data structures (matrices): ```python chess_board = np.zeros([8, 8], dtype=int) chess_board[0::2, 1::2] = 1 chess_board[1::2, 0::2] = 1 plt.matshow(chess_board, cmap=plt.cm.gray) ``` ### 2.5. Drawing 2D curves ```python resolution = 100 x = np.arange(0, 3*np.pi, np.pi/resolution) si = np.sin(x) co = np.cos(x) plt.plot(x, si, c = 'r') plt.plot(x, co, c = 'g') plt.legend(['$\sin(x)$', '$\cos(x)$']) plt.xlabel('radians') plt.title('sine($x$) vs. cosine($x$)') plt.xticks(x*resolution, ['0', '$\pi$', '$2\pi$'], rotation='horizontal') plt.xlim(0,3*np.pi) plt.show() ``` ### 2.6. Drawing 3D curves ```python x = np.array([[(x+y)/25 for x in range(256)] for y in range(256)]) si = np.sin(x) plt.imshow(si, cmap='hot', interpolation='nearest') plt.show() ``` ```python # https://github.com/AeroPython/Taller-Aeropython-PyConEs16 def funcion(x,y): return np.cos(x) + np.sin(y) x_1d = np.linspace(0, 5, 100) y_1d = np.linspace(-2, 4, 100) X, Y = np.meshgrid(x_1d, y_1d) Z = funcion(X,Y) plt.contourf(X, Y, Z, np.linspace(-2, 2, 100),cmap=plt.cm.Spectral) plt.colorbar() cs = plt.contour(X, Y, Z, np.linspace(-2, 2, 9), colors='k') plt.clabel(cs) ``` ## 3. [SciPy](https://docs.scipy.org/doc/scipy/reference/) [SciPy](http://cs231n.github.io/python-numpy-tutorial/#numpy-array-indexing) provides a large number of functions that operate on numpy arrays and are useful for different types of scientific and engineering applications such as: 1. [Custering](https://docs.scipy.org/doc/scipy/reference/cluster.html). 2. [Discrete Fourier Analysis](https://docs.scipy.org/doc/scipy/reference/fftpack.html). 3. [Interpolation](https://docs.scipy.org/doc/scipy/reference/interpolate.html). 4. [Linear algebra](https://docs.scipy.org/doc/scipy/reference/linalg.html). 5. [Signal](https://docs.scipy.org/doc/scipy/reference/signal.html) and [Image processing](https://docs.scipy.org/doc/scipy/reference/ndimage.html). 6. [Optimization](https://docs.scipy.org/doc/scipy/reference/optimize.html). 7. [Sparse matrix](https://docs.scipy.org/doc/scipy/reference/sparse.html) and [sparse linear algebra](https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html). ### 3.1. Installation ``` pip install scipy ``` ### 3.1.1. Optimization example ```python # http://www.scipy-lectures.org/advanced/mathematical_optimization/ from scipy import optimize ``` ```python def f(x): return -np.exp(-(x - .7)**2) ``` ```python sol = optimize.brent(f) print('min =', sol, '\nx =', f(sol)) ``` ```python x = np.arange(-10, 10, 0.1) plt.plot(x, f(x)) plt.plot([sol],[f(sol)], 'ro') plt.show() ``` ## 4. [Pandas](http://pandas.pydata.org/) High-performance data structures and data analysis tools for the Python programming language (similar to [R](https://en.wikipedia.org/wiki/R_(programming_language)). Some tools are: 1. [Statistical functions (covariance, correlation)](http://pandas.pydata.org/pandas-docs/stable/computation.html#statistical-functions). 2. [Window functions](http://pandas.pydata.org/pandas-docs/stable/computation.html#window-functions). 3. [Time series](http://pandas.pydata.org/pandas-docs/stable/timeseries.html). 4. [Analysis of sparse data](http://pandas.pydata.org/pandas-docs/stable/sparse.html). ### 4.1. Installation ``` pip3 install pandas ``` ### 4.2. Example Create a table with data: ```python import numpy as np import pandas as pd df = pd.DataFrame({'int_col' : [1, 2, 6, 8, -1], 'float_col' : [0.1, 0.2, 0.2, 10.1, None], 'str_col' : ['a', 'b', None, 'c', 'a']}) print(df) df ``` Arithmetic average of a column: ```python df2 = df.copy() mean = df2['float_col'].mean() mean ``` Replace undefined elements: ```python df3 = df['float_col'].fillna(mean) df3 ``` Create a table by means of columns: ```python df4 = pd.concat([df3, df['int_col'], df['str_col']], axis=1) df4 ``` ## 5. [SymPy](http://www.sympy.org/en/index.html) A Python library for symbolic mathematics. Among others things, it provides: 1. [Symbolic simplification](http://docs.sympy.org/latest/tutorial/simplification.html). 2. [Calculus (derivatives, integrals, limits, and series expansions)](http://docs.sympy.org/latest/tutorial/calculus.html). 3. [Algebraic solver](http://docs.sympy.org/latest/tutorial/solvers.html). 4. [Matrix operations](http://docs.sympy.org/latest/tutorial/matrices.html). 5. [Combinatorics](http://docs.sympy.org/latest/modules/combinatorics/index.html) 6. [Cryptography](http://docs.sympy.org/latest/modules/crypto.html). ### 5.1. Install ``` pip install sympy ``` ### 5.2. Example ```python from sympy import init_session init_session(use_latex='matplotlib') ``` ```python # https://github.com/AeroPython/Taller-Aeropython-PyConEs16 expr = cos(x)**2 + sin(x)**2 expr ``` ```python simplify(expr) ``` ```python expr.subs(x, y**2) ``` ```python expr = (x + y) ** 2 expr ``` ```python expr = expr.expand() expr ``` ```python expr = expr.factor() expr ``` ```python expr = expr.integrate(x) expr ``` ```python expr = expr.diff(x) expr ```
74ec891bc6bd2b9e6b3528b78f37d9604bed0346
80,275
ipynb
Jupyter Notebook
20-scientific_computation.ipynb
leowindwave/YAPT
ee5ec568ed746f90a18dc514836624d435a7ccdb
[ "CC0-1.0" ]
null
null
null
20-scientific_computation.ipynb
leowindwave/YAPT
ee5ec568ed746f90a18dc514836624d435a7ccdb
[ "CC0-1.0" ]
null
null
null
20-scientific_computation.ipynb
leowindwave/YAPT
ee5ec568ed746f90a18dc514836624d435a7ccdb
[ "CC0-1.0" ]
null
null
null
21.509914
1,013
0.465712
true
15,604
Qwen/Qwen-72B
1. YES 2. YES
0.91611
0.835484
0.765395
__label__eng_Latn
0.243223
0.6166
\title{myHDL Combinational Logic Elements: Demultiplexers (DEMUXs))} \author{Steven K Armour} \maketitle <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Refrances" data-toc-modified-id="Refrances-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Refrances</a></span></li><li><span><a href="#Libraries-and-Helper-functions" data-toc-modified-id="Libraries-and-Helper-functions-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Libraries and Helper functions</a></span></li><li><span><a href="#Demultiplexers" data-toc-modified-id="Demultiplexers-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Demultiplexers</a></span></li><li><span><a href="#1-Channel-Input:-2-Channel-Output-demultiplexer-in-Gate-Level-Logic" data-toc-modified-id="1-Channel-Input:-2-Channel-Output-demultiplexer-in-Gate-Level-Logic-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic</a></span><ul class="toc-item"><li><span><a href="#Sympy-Expression" data-toc-modified-id="Sympy-Expression-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Sympy Expression</a></span></li><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-4.4"><span class="toc-item-num">4.4&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbech" data-toc-modified-id="myHDL-to-Verilog-Testbech-4.5"><span class="toc-item-num">4.5&nbsp;&nbsp;</span>myHDL to Verilog Testbech</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-4.6"><span class="toc-item-num">4.6&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-4.6.1"><span class="toc-item-num">4.6.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraints" data-toc-modified-id="Board-Constraints-4.6.2"><span class="toc-item-num">4.6.2&nbsp;&nbsp;</span>Board Constraints</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-4.6.3"><span class="toc-item-num">4.6.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1-Channel-Input:4-Channel-Output-demultiplexer-in-Gate-Level-Logic" data-toc-modified-id="1-Channel-Input:4-Channel-Output-demultiplexer-in-Gate-Level-Logic-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic</a></span><ul class="toc-item"><li><span><a href="#Sympy-Expression" data-toc-modified-id="Sympy-Expression-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Sympy Expression</a></span></li><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-5.4"><span class="toc-item-num">5.4&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-5.5"><span class="toc-item-num">5.5&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-5.6"><span class="toc-item-num">5.6&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-5.6.1"><span class="toc-item-num">5.6.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraints" data-toc-modified-id="Board-Constraints-5.6.2"><span class="toc-item-num">5.6.2&nbsp;&nbsp;</span>Board Constraints</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-5.6.3"><span class="toc-item-num">5.6.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1-Channel-Input:4-Channel-Output-demultiplexer-via-DEMUX-Stacking" data-toc-modified-id="1-Channel-Input:4-Channel-Output-demultiplexer-via-DEMUX-Stacking-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>1 Channel Input:4 Channel Output demultiplexer via DEMUX Stacking</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-6.3"><span class="toc-item-num">6.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-6.4"><span class="toc-item-num">6.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-6.5"><span class="toc-item-num">6.5&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-6.5.1"><span class="toc-item-num">6.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraint" data-toc-modified-id="Board-Constraint-6.5.2"><span class="toc-item-num">6.5.2&nbsp;&nbsp;</span>Board Constraint</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-6.5.3"><span class="toc-item-num">6.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1:2-DEMUX-via-Behavioral-IF" data-toc-modified-id="1:2-DEMUX-via-Behavioral-IF-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>1:2 DEMUX via Behavioral IF</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-7.3"><span class="toc-item-num">7.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-7.4"><span class="toc-item-num">7.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-7.5"><span class="toc-item-num">7.5&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-7.5.1"><span class="toc-item-num">7.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraint" data-toc-modified-id="Board-Constraint-7.5.2"><span class="toc-item-num">7.5.2&nbsp;&nbsp;</span>Board Constraint</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-7.5.3"><span class="toc-item-num">7.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#1:4-DEMUX-via-Behavioral-if-elif-else" data-toc-modified-id="1:4-DEMUX-via-Behavioral-if-elif-else-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>1:4 DEMUX via Behavioral if-elif-else</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-8.1"><span class="toc-item-num">8.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-8.2"><span class="toc-item-num">8.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-8.3"><span class="toc-item-num">8.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-8.4"><span class="toc-item-num">8.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Deployment" data-toc-modified-id="PYNQ-Z1-Deployment-8.5"><span class="toc-item-num">8.5&nbsp;&nbsp;</span>PYNQ-Z1 Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-8.5.1"><span class="toc-item-num">8.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraint" data-toc-modified-id="Board-Constraint-8.5.2"><span class="toc-item-num">8.5.2&nbsp;&nbsp;</span>Board Constraint</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-8.5.3"><span class="toc-item-num">8.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li><li><span><a href="#Demultiplexer-1:4-Behavioral-via-Bitvectors" data-toc-modified-id="Demultiplexer-1:4-Behavioral-via-Bitvectors-9"><span class="toc-item-num">9&nbsp;&nbsp;</span>Demultiplexer 1:4 Behavioral via Bitvectors</a></span><ul class="toc-item"><li><span><a href="#myHDL-Module" data-toc-modified-id="myHDL-Module-9.1"><span class="toc-item-num">9.1&nbsp;&nbsp;</span>myHDL Module</a></span></li><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-9.2"><span class="toc-item-num">9.2&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Conversion" data-toc-modified-id="Verilog-Conversion-9.3"><span class="toc-item-num">9.3&nbsp;&nbsp;</span>Verilog Conversion</a></span></li><li><span><a href="#myHDL-to-Verilog-Testbench" data-toc-modified-id="myHDL-to-Verilog-Testbench-9.4"><span class="toc-item-num">9.4&nbsp;&nbsp;</span>myHDL to Verilog Testbench</a></span></li><li><span><a href="#PYNQ-Z1-Board-Deployment" data-toc-modified-id="PYNQ-Z1-Board-Deployment-9.5"><span class="toc-item-num">9.5&nbsp;&nbsp;</span>PYNQ-Z1 Board Deployment</a></span><ul class="toc-item"><li><span><a href="#Board-Circuit" data-toc-modified-id="Board-Circuit-9.5.1"><span class="toc-item-num">9.5.1&nbsp;&nbsp;</span>Board Circuit</a></span></li><li><span><a href="#Board-Constraints" data-toc-modified-id="Board-Constraints-9.5.2"><span class="toc-item-num">9.5.2&nbsp;&nbsp;</span>Board Constraints</a></span></li><li><span><a href="#Video-of-Deployment" data-toc-modified-id="Video-of-Deployment-9.5.3"><span class="toc-item-num">9.5.3&nbsp;&nbsp;</span>Video of Deployment</a></span></li></ul></li></ul></li></ul></div> # Refrances # Libraries and Helper functions ```python #This notebook also uses the `(some) LaTeX environments for Jupyter` #https://github.com/ProfFan/latex_envs wich is part of the #jupyter_contrib_nbextensions package from myhdl import * from myhdlpeek import Peeker import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from sympy import * init_printing() import itertools #EE drawing tools in python from https://cdelker.bitbucket.io/SchemDraw/ import SchemDraw as schem import SchemDraw.elements as e import SchemDraw.logic as l #https://github.com/jrjohansson/version_information %load_ext version_information %version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, itertools, SchemDraw ``` <table><tr><th>Software</th><th>Version</th></tr><tr><td>Python</td><td>3.6.2 64bit [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]</td></tr><tr><td>IPython</td><td>6.2.1</td></tr><tr><td>OS</td><td>Linux 4.15.0 30 generic x86_64 with debian stretch sid</td></tr><tr><td>myhdl</td><td>0.10</td></tr><tr><td>myhdlpeek</td><td>0.0.6</td></tr><tr><td>numpy</td><td>1.13.3</td></tr><tr><td>pandas</td><td>0.23.4</td></tr><tr><td>matplotlib</td><td>2.1.0</td></tr><tr><td>sympy</td><td>1.3</td></tr><tr><td>itertools</td><td>The 'itertools' distribution was not found and is required by the application</td></tr><tr><td>SchemDraw</td><td>0.3.0</td></tr><tr><td colspan='2'>Sun Sep 23 18:24:14 2018 MDT</td></tr></table> ```python #helper functions to read in the .v and .vhd generated files into python def VerilogTextReader(loc, printresult=True): with open(f'{loc}.v', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***Verilog modual from {loc}.v***\n\n', VerilogText) return VerilogText def VHDLTextReader(loc, printresult=True): with open(f'{loc}.vhd', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText) return VerilogText def ConstraintXDCTextReader(loc, printresult=True): with open(f'{loc}.xdc', 'r') as xdcText: ConstraintText=xdcText.read() if printresult: print(f'***Constraint file from {loc}.xdc***\n\n', ConstraintText) return ConstraintText ``` ```python def TruthTabelGenrator(BoolSymFunc): """ Function to generate a truth table from a sympy boolian expression BoolSymFunc: sympy boolian expression return TT: a Truth table stored in a pandas dataframe """ colsL=sorted([i for i in list(BoolSymFunc.rhs.atoms())], key=lambda x:x.sort_key()) colsR=sorted([i for i in list(BoolSymFunc.lhs.atoms())], key=lambda x:x.sort_key()) bitwidth=len(colsL) cols=colsL+colsR; cols TT=pd.DataFrame(columns=cols, index=range(2**bitwidth)) for i in range(2**bitwidth): inputs=[int(j) for j in list(np.binary_repr(i, bitwidth))] outputs=BoolSymFunc.rhs.subs({j:v for j, v in zip(colsL, inputs)}) inputs.append(int(bool(outputs))) TT.iloc[i]=inputs return TT ``` # Demultiplexers \begin{definition}\label{def:MUX} A Demultiplexer, typically referred to as a DEMUX, is a Digital(or analog) switching unit that takes one input channel to be streamed to a single output channel from many via a control input. For single input DEMUXs with $2^n$ outputs, there are then $n$ input selection signals that make up the control word to select the output channel for the input. Thus a DEMUX is the conjugate digital element to the MUX such that a MUX is an $N:1$ mapping device and a DEMUX is a $1:N$ mapping device. From a behavioral standpoint DEMUXs are implemented with the same `if-elif-else (case)` control statements as a MUX but for each case, all outputs must be specified. Furthermore, DEMUXs are often implemented via stacked MUXs since there governing equation is the Product SET (Cartesian product) all internal products of a MUXs SOP equation \end{definition} # 1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic \begin{figure} \centerline{\includegraphics{DEMUX12Gate.png}} \caption{\label{fig:D12G} 1:2 DEMUX Symbol and Gate internals} \end{figure} ## Sympy Expression ```python x, s, y0, y1=symbols('x, s, y_0, y_1') y12_0Eq=Eq(y0, ~s&x) y12_1Eq=Eq(y1, s&x) y12_0Eq, y12_1Eq ``` ```python T0=TruthTabelGenrator(y12_0Eq) T1=TruthTabelGenrator(y12_1Eq) T10=pd.merge(T1, T0, how='left') T10 ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>s</th> <th>x</th> <th>y_1</th> <th>y_0</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>0</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>2</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python y12_0EqN=lambdify([s, x], y12_0Eq.rhs, dummify=False) y12_1EqN=lambdify([s, x], y12_1Eq.rhs, dummify=False) SystmaticVals=np.array(list(itertools.product([0,1], repeat=2))) print(SystmaticVals) print(y12_0EqN(SystmaticVals[:, 0], SystmaticVals[:, 1]).astype(int)) print(y12_1EqN(SystmaticVals[:, 0], SystmaticVals[:, 1]).astype(int)) ``` [[0 0] [0 1] [1 0] [1 1]] [0 1 0 0] [0 0 0 1] ## myHDL Module ```python @block def DEMUX1_2_Combo(x, s, y0, y1): """ 1:2 DEMUX written in full combo Inputs: x(bool): input feed s(bool): channel select Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 """ @always_comb def logic(): y0.next= not s and x y1.next= s and x return instances() ``` ## myHDL Testing ```python TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=2)) xTVs=np.array([i[1] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) sTVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) sTVs=np.append(sTVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, sTVs, xTVs ``` ([(0, 0), (0, 1), (1, 0), (1, 1)], array([0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0]), array([0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1])) ```python Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s=Signal(bool(0)); Peeker(s, 's') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') DUT=DEMUX1_2_Combo(x, s, y0, y1) def DEMUX1_2_Combo_TB(): """ myHDL only testbench for module `DEMUX1_2_Combo` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s.next=int(sTVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_2_Combo_TB(), *Peeker.instances()).run() ``` ```python Peeker.to_wavedrom('x', 's', 'y0','y1') ``` <div></div> ```python DEMUX1_2_ComboData=Peeker.to_dataframe() DEMUX1_2_ComboData=DEMUX1_2_ComboData[['x', 's', 'y0','y1']] DEMUX1_2_ComboData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s</th> <th>y0</th> <th>y1</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>9</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>13</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python DEMUX1_2_ComboData['y0Ref']=DEMUX1_2_ComboData.apply(lambda row:y12_0EqN(row['s'], row['x']), axis=1).astype(int) DEMUX1_2_ComboData['y1Ref']=DEMUX1_2_ComboData.apply(lambda row:y12_1EqN(row['s'], row['x']), axis=1).astype(int) DEMUX1_2_ComboData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s</th> <th>y0</th> <th>y1</th> <th>y0Ref</th> <th>y1Ref</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>9</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>13</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python Test0=(DEMUX1_2_ComboData['y0']==DEMUX1_2_ComboData['y0Ref']).all() Test1=(DEMUX1_2_ComboData['y1']==DEMUX1_2_ComboData['y1Ref']).all() Test=Test0&Test1 print(f'Module `DEMUX1_2_Combo` works as exspected: {Test}') ``` Module `DEMUX1_2_Combo` works as exspected: True ## Verilog Conversion ```python DUT.convert() VerilogTextReader('DEMUX1_2_Combo'); ``` ***Verilog modual from DEMUX1_2_Combo.v*** // File: DEMUX1_2_Combo.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:15 2018 `timescale 1ns/10ps module DEMUX1_2_Combo ( x, s, y0, y1 ); // 1:2 DEMUX written in full combo // Inputs: // x(bool): input feed // s(bool): channel select // Outputs: // y0(bool): ouput channel 0 // y1(bool): ouput channel 1 input x; input s; output y0; wire y0; output y1; wire y1; assign y0 = ((!s) && x); assign y1 = (s && x); endmodule \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_Combo_RTL.png}} \caption{\label{fig:D12CRTL} DEMUX1_2_Combo RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_Combo_SYN.png}} \caption{\label{fig:D12CSYN} DEMUX1_2_Combo Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_Combo_IMP.png}} \caption{\label{fig:D12CIMP} DEMUX1_2_Combo Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbech ```python #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] sTVs=intbv(int(''.join(sTVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), sTVs, bin(sTVs) ``` (intbv(5479), '1010101100111', intbv(3830), '111011110110') ```python @block def DEMUX1_2_Combo_TBV(): """ myHDL -> testbench for module `DEMUX1_2_Combo` """ x=Signal(bool(0)) s=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) @always_comb def print_data(): print(x, s, y0, y1) #Test Signal Bit Vectors xTV=Signal(xTVs) sTV=Signal(sTVs) DUT=DEMUX1_2_Combo(x, s, y0, y1) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s.next=int(sTV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_2_Combo_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_2_Combo_TBV'); ``` <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> ***Verilog modual from DEMUX1_2_Combo_TBV.v*** // File: DEMUX1_2_Combo_TBV.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:16 2018 `timescale 1ns/10ps module DEMUX1_2_Combo_TBV ( ); // myHDL -> testbench for module `DEMUX1_2_Combo` reg x = 0; reg s = 0; wire y0; wire y1; wire [13:0] xTV; wire [13:0] sTV; assign xTV = 14'd5479; assign sTV = 14'd3830; always @(x, y0, s, y1) begin: DEMUX1_2_COMBO_TBV_PRINT_DATA $write("%h", x); $write(" "); $write("%h", s); $write(" "); $write("%h", y0); $write(" "); $write("%h", y1); $write("\n"); end assign y0 = ((!s) && x); assign y1 = (s && x); initial begin: DEMUX1_2_COMBO_TBV_STIMULES integer i; for (i=0; i<14; i=i+1) begin x <= xTV[i]; s <= sTV[i]; # 1; end $finish; end endmodule /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: xTV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: sTV category=ToVerilogWarning ## PYNQ-Z1 Deployment ### Board Circuit \begin{figure} \centerline{\includegraphics[width=5cm]{DEMUX12PYNQZ1Circ.png}} \caption{\label{fig:D12Circ} 1:2 DEMUX PYNQ-Z1 (Non SoC) conceptualized circuit} \end{figure} ### Board Constraints ```python ConstraintXDCTextReader('DEMUX1_2'); ``` ***Constraint file from DEMUX1_2.xdc*** ## Switches set_property -dict {PACKAGE_PIN M20 IOSTANDARD LVCMOS33} [get_ports {s}]; ##SW0 ## Buttons set_property -dict {PACKAGE_PIN L19 IOSTANDARD LVCMOS33} [get_ports {x}]; ##BT3 ## RGBLEDs set_property -dict { PACKAGE_PIN L15 IOSTANDARD LVCMOS33 } [get_ports {y0}]; ##LD4_Blue set_property -dict { PACKAGE_PIN G17 IOSTANDARD LVCMOS33 } [get_ports {y1}]; ##LD4_Green ### Video of Deployment DEMUX1_2_Combo on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=LJG4Z2kxEKE)) # 1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic ## Sympy Expression ```python x, s0, s1, y0, y1, y2, y3=symbols('x, s0, s1, y0, y1, y2, y3') y14_0Eq=Eq(y0, ~s0&~s1&x) y14_1Eq=Eq(y1, s0&~s1&x) y14_2Eq=Eq(y2, ~s0&s1&x) y14_3Eq=Eq(y3, s0&s1&x) y14_0Eq, y14_1Eq, y14_2Eq, y14_3Eq ``` ```python T0=TruthTabelGenrator(y14_0Eq) T1=TruthTabelGenrator(y14_1Eq) T2=TruthTabelGenrator(y14_2Eq) T3=TruthTabelGenrator(y14_3Eq) T10=pd.merge(T1, T0, how='left') T20=pd.merge(T2, T10, how='left') T30=pd.merge(T3, T20, how='left') T30 ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>s0</th> <th>s1</th> <th>x</th> <th>y3</th> <th>y2</th> <th>y1</th> <th>y0</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>2</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>4</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> </tbody> </table> </div> ```python y14_0EqN=lambdify([x, s0, s1], y14_0Eq.rhs, dummify=False) y14_1EqN=lambdify([x, s0, s1], y14_1Eq.rhs, dummify=False) y14_2EqN=lambdify([x, s0, s1], y14_2Eq.rhs, dummify=False) y14_3EqN=lambdify([x, s0, s1], y14_3Eq.rhs, dummify=False) SystmaticVals=np.array(list(itertools.product([0,1], repeat=3))) print(SystmaticVals) print(y14_0EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) print(y14_1EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) print(y14_2EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) print(y14_3EqN(SystmaticVals[:, 2], SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)) ``` [[0 0 0] [0 0 1] [0 1 0] [0 1 1] [1 0 0] [1 0 1] [1 1 0] [1 1 1]] [0 1 0 0 0 0 0 0] [0 0 0 1 0 0 0 0] [0 0 0 0 0 1 0 0] [0 0 0 0 0 0 0 1] ## myHDL Module ```python @block def DEMUX1_4_Combo(x, s0, s1, y0, y1, y2, y3): """ 1:4 DEMUX written in full combo Inputs: x(bool): input feed s0(bool): channel select 0 s1(bool): channel select 1 Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 y2(bool): ouput channel 2 y3(bool): ouput channel 3 """ @always_comb def logic(): y0.next= (not s0) and (not s1) and x y1.next= s0 and (not s1) and x y2.next= (not s0) and s1 and x y3.next= s0 and s1 and x return instances() ``` ## myHDL Testing ```python TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=3)) xTVs=np.array([i[2] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) s0TVs=np.array([i[1] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) s0TVs=np.append(s0TVs, np.random.randint(0,2, TestLen)).astype(int) s1TVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(17) s1TVs=np.append(s1TVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, xTVs, s0TVs, s1TVs ``` ([(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1]), array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0]), array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1])) ```python Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s0=Signal(bool(0)); Peeker(s0, 's0') s1=Signal(bool(0)); Peeker(s1, 's1') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') y2=Signal(bool(0)); Peeker(y2, 'y2') y3=Signal(bool(0)); Peeker(y3, 'y3') DUT=DEMUX1_4_Combo(x, s0, s1, y0, y1, y2, y3) def DEMUX1_4_Combo_TB(): """ myHDL only testbench for module `DEMUX1_4_Combo` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s0.next=int(s0TVs[i]) s1.next=int(s1TVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_Combo_TB(), *Peeker.instances()).run() ``` ```python Peeker.to_wavedrom('x', 's1', 's0', 'y0', 'y1', 'y2', 'y3') ``` <div></div> ```python DEMUX1_4_ComboData=Peeker.to_dataframe() DEMUX1_4_ComboData=DEMUX1_4_ComboData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] DEMUX1_4_ComboData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s1</th> <th>s0</th> <th>y0</th> <th>y1</th> <th>y2</th> <th>y3</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>13</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>14</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>15</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>16</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python DEMUX1_4_ComboData['y0Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_0EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData['y1Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_1EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData['y2Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_2EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData['y3Ref']=DEMUX1_4_ComboData.apply(lambda row:y14_3EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_ComboData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s1</th> <th>s0</th> <th>y0</th> <th>y1</th> <th>y2</th> <th>y3</th> <th>y0Ref</th> <th>y1Ref</th> <th>y2Ref</th> <th>y3Ref</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>13</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>14</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>15</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>16</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python Test0=(DEMUX1_4_ComboData['y0']==DEMUX1_4_ComboData['y0Ref']).all() Test1=(DEMUX1_4_ComboData['y1']==DEMUX1_4_ComboData['y1Ref']).all() Test2=(DEMUX1_4_ComboData['y2']==DEMUX1_4_ComboData['y2Ref']).all() Test3=(DEMUX1_4_ComboData['y3']==DEMUX1_4_ComboData['y3Ref']).all() Test=Test0&Test1&Test2&Test3 print(f'Module `DEMUX1_4_Combo` works as exspected: {Test}') ``` Module `DEMUX1_4_Combo` works as exspected: True ## Verilog Conversion ```python DUT.convert() VerilogTextReader('DEMUX1_4_Combo'); ``` ***Verilog modual from DEMUX1_4_Combo.v*** // File: DEMUX1_4_Combo.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:17 2018 `timescale 1ns/10ps module DEMUX1_4_Combo ( x, s0, s1, y0, y1, y2, y3 ); // 1:4 DEMUX written in full combo // // Inputs: // x(bool): input feed // s0(bool): channel select 0 // s1(bool): channel select 1 // // Outputs: // y0(bool): ouput channel 0 // y1(bool): ouput channel 1 // y2(bool): ouput channel 2 // y3(bool): ouput channel 3 input x; input s0; input s1; output y0; wire y0; output y1; wire y1; output y2; wire y2; output y3; wire y3; assign y0 = ((!s0) && (!s1) && x); assign y1 = (s0 && (!s1) && x); assign y2 = ((!s0) && s1 && x); assign y3 = (s0 && s1 && x); endmodule \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_Combo_RTL.png}} \caption{\label{fig:D14CRTL} DEMUX1_4_Combo RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_Combo_SYN.png}} \caption{\label{fig:D14CSYN} DEMUX1_4_Combo Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_Combo_IMP.png}} \caption{\label{fig:D14CIMP} DEMUX1_4_Combo Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ```python #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] s0TVs=intbv(int(''.join(s0TVs.astype(str)), 2))[TestLen:] s1TVs=intbv(int(''.join(s1TVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), s0TVs, bin(s0TVs), s1TVs, bin(s1TVs) ``` (intbv(87399), '10101010101100111', intbv(52982), '1100111011110110', intbv(16277), '11111110010101') ```python @block def DEMUX1_4_Combo_TBV(): """ myHDL -> testbench for module `DEMUX1_4_Combo` """ x=Signal(bool(0)) s0=Signal(bool(0)) s1=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) y2=Signal(bool(0)) y3=Signal(bool(0)) @always_comb def print_data(): print(x, s0, s1, y0, y1, y2, y3) #Test Signal Bit Vectors xTV=Signal(xTVs) s0TV=Signal(s0TVs) s1TV=Signal(s1TVs) DUT=DEMUX1_4_Combo(x, s0, s1, y0, y1, y2, y3) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s0.next=int(s0TV[i]) s1.next=int(s1TV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_4_Combo_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_4_Combo_TBV'); ``` <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> ***Verilog modual from DEMUX1_4_Combo_TBV.v*** // File: DEMUX1_4_Combo_TBV.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:18 2018 `timescale 1ns/10ps module DEMUX1_4_Combo_TBV ( ); // myHDL -> testbench for module `DEMUX1_4_Combo` reg x = 0; wire y0; wire y1; reg s0 = 0; reg s1 = 0; wire y2; wire y3; wire [17:0] xTV; wire [17:0] s0TV; wire [17:0] s1TV; assign xTV = 18'd87399; assign s0TV = 18'd52982; assign s1TV = 18'd16277; always @(x, y0, s1, s0, y3, y2, y1) begin: DEMUX1_4_COMBO_TBV_PRINT_DATA $write("%h", x); $write(" "); $write("%h", s0); $write(" "); $write("%h", s1); $write(" "); $write("%h", y0); $write(" "); $write("%h", y1); $write(" "); $write("%h", y2); $write(" "); $write("%h", y3); $write("\n"); end assign y0 = ((!s0) && (!s1) && x); assign y1 = (s0 && (!s1) && x); assign y2 = ((!s0) && s1 && x); assign y3 = (s0 && s1 && x); initial begin: DEMUX1_4_COMBO_TBV_STIMULES integer i; for (i=0; i<18; i=i+1) begin x <= xTV[i]; s0 <= s0TV[i]; s1 <= s1TV[i]; # 1; end $finish; end endmodule /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: xTV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: s0TV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: s1TV category=ToVerilogWarning ## PYNQ-Z1 Deployment ### Board Circuit \begin{figure} \centerline{\includegraphics[width=5cm]{DEMUX14PYNQZ1Circ.png}} \caption{\label{fig:D14Circ} 1:4 DEMUX PYNQ-Z1 (Non SoC) conceptualized circuit} \end{figure} ### Board Constraints ```python ConstraintXDCTextReader('DEMUX1_4'); ``` ***Constraint file from DEMUX1_4.xdc*** ## Switches set_property -dict {PACKAGE_PIN M20 IOSTANDARD LVCMOS33} [get_ports {s0}]; ##SW0 set_property -dict {PACKAGE_PIN M19 IOSTANDARD LVCMOS33} [get_ports {s1}]; ##SW1 ## Buttons set_property -dict {PACKAGE_PIN L19 IOSTANDARD LVCMOS33} [get_ports {x}]; ##BT3 ## RGBLEDs set_property -dict { PACKAGE_PIN L15 IOSTANDARD LVCMOS33 } [get_ports {y0}]; ##LD4_Blue set_property -dict { PACKAGE_PIN G17 IOSTANDARD LVCMOS33 } [get_ports {y1}]; ##LD4_Green set_property -dict { PACKAGE_PIN L14 IOSTANDARD LVCMOS33 } [get_ports {y2}]; ##LD5_Green set_property -dict { PACKAGE_PIN M15 IOSTANDARD LVCMOS33 } [get_ports {y3}]; ##LD5_Red ### Video of Deployment DEMUX1_4_Combo on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=VT5Iyy8dMdg)) # 1 Channel Input:4 Channel Output demultiplexer via DEMUX Stacking ## myHDL Module ```python @block def DEMUX1_4_DMS(x, s0, s1, y0, y1, y2, y3): """ 1:4 DEMUX via DEMUX Stacking Inputs: x(bool): input feed s0(bool): channel select 0 s1(bool): channel select 1 Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 y2(bool): ouput channel 2 y3(bool): ouput channel 3 """ s0_y0y1_WIRE=Signal(bool(0)) s0_y2y3_WIRE=Signal(bool(0)) x_s1_DEMUX=DEMUX1_2_Combo(x, s1, s0_y0y1_WIRE, s0_y2y3_WIRE) s1_y0y1_DEMUX=DEMUX1_2_Combo(s0_y0y1_WIRE, s0, y0, y1) s1_y2y3_DEMUX=DEMUX1_2_Combo(s0_y2y3_WIRE, s0, y2, y3) return instances() ``` ## myHDL Testing ```python TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=3)) xTVs=np.array([i[2] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) s0TVs=np.array([i[1] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) s0TVs=np.append(s0TVs, np.random.randint(0,2, TestLen)).astype(int) s1TVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(17) s1TVs=np.append(s1TVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, xTVs, s0TVs, s1TVs ``` ([(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1]), array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0]), array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1])) ```python Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s0=Signal(bool(0)); Peeker(s0, 's0') s1=Signal(bool(0)); Peeker(s1, 's1') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') y2=Signal(bool(0)); Peeker(y2, 'y2') y3=Signal(bool(0)); Peeker(y3, 'y3') DUT=DEMUX1_4_DMS(x, s0, s1, y0, y1, y2, y3) def DEMUX1_4_DMS_TB(): """ myHDL only testbench for module `DEMUX1_4_DMS` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s0.next=int(s0TVs[i]) s1.next=int(s1TVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_DMS_TB(), *Peeker.instances()).run() ``` ```python Peeker.to_wavedrom('x', 's1', 's0', 'y0', 'y1', 'y2', 'y3') ``` <div></div> ```python DEMUX1_4_DMSData=Peeker.to_dataframe() DEMUX1_4_DMSData=DEMUX1_4_DMSData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] DEMUX1_4_DMSData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s1</th> <th>s0</th> <th>y0</th> <th>y1</th> <th>y2</th> <th>y3</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>13</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>14</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>15</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>16</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python Test=DEMUX1_4_DMSData==DEMUX1_4_ComboData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] Test=Test.all().all() print(f'DEMUX1_4_DMS equivlinet to DEMUX1_4_Combo: {Test}') ``` DEMUX1_4_DMS equivlinet to DEMUX1_4_Combo: True ## Verilog Conversion ```python DUT.convert() VerilogTextReader('DEMUX1_4_DMS'); ``` ***Verilog modual from DEMUX1_4_DMS.v*** // File: DEMUX1_4_DMS.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:20 2018 `timescale 1ns/10ps module DEMUX1_4_DMS ( x, s0, s1, y0, y1, y2, y3 ); // 1:4 DEMUX via DEMUX Stacking // Inputs: // x(bool): input feed // s0(bool): channel select 0 // s1(bool): channel select 1 // // Outputs: // y0(bool): ouput channel 0 // y1(bool): ouput channel 1 // y2(bool): ouput channel 2 // y3(bool): ouput channel 3 input x; input s0; input s1; output y0; wire y0; output y1; wire y1; output y2; wire y2; output y3; wire y3; wire s0_y2y3_WIRE; wire s0_y0y1_WIRE; assign s0_y0y1_WIRE = ((!s1) && x); assign s0_y2y3_WIRE = (s1 && x); assign y0 = ((!s0) && s0_y0y1_WIRE); assign y1 = (s0 && s0_y0y1_WIRE); assign y2 = ((!s0) && s0_y2y3_WIRE); assign y3 = (s0 && s0_y2y3_WIRE); endmodule \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_DMS_RTL.png}} \caption{\label{fig:D14DMSRTL} DEMUX1_4_DMS RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_DMS_SYN.png}} \caption{\label{fig:D14DMSSYN} DEMUX1_4_DMS Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_DMS_IMP.png}} \caption{\label{fig:D14DMSIMP} DEMUX1_4_DMS Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ```python #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] s0TVs=intbv(int(''.join(s0TVs.astype(str)), 2))[TestLen:] s1TVs=intbv(int(''.join(s1TVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), s0TVs, bin(s0TVs), s1TVs, bin(s1TVs) ``` (intbv(87399), '10101010101100111', intbv(52982), '1100111011110110', intbv(16277), '11111110010101') ```python @block def DEMUX1_4_DMS_TBV(): """ myHDL -> testbench for module `DEMUX1_4_DMS` """ x=Signal(bool(0)) s0=Signal(bool(0)) s1=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) y2=Signal(bool(0)) y3=Signal(bool(0)) @always_comb def print_data(): print(x, s0, s1, y0, y1, y2, y3) #Test Signal Bit Vectors xTV=Signal(xTVs) s0TV=Signal(s0TVs) s1TV=Signal(s1TVs) DUT=DEMUX1_4_DMS(x, s0, s1, y0, y1, y2, y3) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s0.next=int(s0TV[i]) s1.next=int(s1TV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_4_DMS_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_4_DMS_TBV'); ``` <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> ***Verilog modual from DEMUX1_4_DMS_TBV.v*** // File: DEMUX1_4_DMS_TBV.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:21 2018 `timescale 1ns/10ps module DEMUX1_4_DMS_TBV ( ); // myHDL -> testbench for module `DEMUX1_4_DMS` reg x = 0; wire y0; wire y1; reg s0 = 0; reg s1 = 0; wire y2; wire y3; wire [17:0] xTV; wire [17:0] s0TV; wire [17:0] s1TV; wire DEMUX1_4_DMS0_0_s0_y2y3_WIRE; wire DEMUX1_4_DMS0_0_s0_y0y1_WIRE; assign xTV = 18'd87399; assign s0TV = 18'd52982; assign s1TV = 18'd16277; always @(x, y0, s1, s0, y3, y2, y1) begin: DEMUX1_4_DMS_TBV_PRINT_DATA $write("%h", x); $write(" "); $write("%h", s0); $write(" "); $write("%h", s1); $write(" "); $write("%h", y0); $write(" "); $write("%h", y1); $write(" "); $write("%h", y2); $write(" "); $write("%h", y3); $write("\n"); end assign DEMUX1_4_DMS0_0_s0_y0y1_WIRE = ((!s1) && x); assign DEMUX1_4_DMS0_0_s0_y2y3_WIRE = (s1 && x); assign y0 = ((!s0) && DEMUX1_4_DMS0_0_s0_y0y1_WIRE); assign y1 = (s0 && DEMUX1_4_DMS0_0_s0_y0y1_WIRE); assign y2 = ((!s0) && DEMUX1_4_DMS0_0_s0_y2y3_WIRE); assign y3 = (s0 && DEMUX1_4_DMS0_0_s0_y2y3_WIRE); initial begin: DEMUX1_4_DMS_TBV_STIMULES integer i; for (i=0; i<18; i=i+1) begin x <= xTV[i]; s0 <= s0TV[i]; s1 <= s1TV[i]; # 1; end $finish; end endmodule /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: xTV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: s0TV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: s1TV category=ToVerilogWarning ## PYNQ-Z1 Deployment ### Board Circuit See Board Circuit for "1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Board Constraint uses same 'DEMUX1_4.xdc' as "# 1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Video of Deployment DEMUX1_4_DMS on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=8A9iDf4nh74)) # 1:2 DEMUX via Behavioral IF ## myHDL Module ```python @block def DEMUX1_2_B(x, s, y0, y1): """ 1:2 DMUX in behavioral Inputs: x(bool): input feed s(bool): channel select Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 """ @always_comb def logic(): if s==0: #take note that since we have #two ouputs there next state values #must both be set, else the last #value will presist till it changes y0.next=x y1.next=0 else: y0.next=0 y1.next=x return instances() ``` ## myHDL Testing ```python TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=2)) xTVs=np.array([i[1] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) sTVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) sTVs=np.append(sTVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, sTVs, xTVs ``` ([(0, 0), (0, 1), (1, 0), (1, 1)], array([0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0]), array([0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1])) ```python Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s=Signal(bool(0)); Peeker(s, 's') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') DUT=DEMUX1_2_B(x, s, y0, y1) def DEMUX1_2_B_TB(): """ myHDL only testbench for module `DEMUX1_2_B` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s.next=int(sTVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_2_B_TB(), *Peeker.instances()).run() ``` ```python Peeker.to_wavedrom('x', 's', 'y0','y1') ``` <div></div> ```python DEMUX1_2_BData=Peeker.to_dataframe() DEMUX1_2_BData=DEMUX1_2_BData[['x', 's', 'y0','y1']] DEMUX1_2_BData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s</th> <th>y0</th> <th>y1</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>9</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>13</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python Test=DEMUX1_2_BData==DEMUX1_2_ComboData[['x', 's', 'y0','y1']] Test=Test.all().all() print(f'DEMUX1_2_BD is equivlent to DEMUX1_2_Combo: {Test}') ``` DEMUX1_2_BD is equivlent to DEMUX1_2_Combo: True ## Verilog Conversion ```python DUT.convert() VerilogTextReader('DEMUX1_2_B'); ``` ***Verilog modual from DEMUX1_2_B.v*** // File: DEMUX1_2_B.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:23 2018 `timescale 1ns/10ps module DEMUX1_2_B ( x, s, y0, y1 ); // 1:2 DMUX in behavioral // Inputs: // x(bool): input feed // s(bool): channel select // Outputs: // y0(bool): ouput channel 0 // y1(bool): ouput channel 1 input x; input s; output y0; reg y0; output y1; reg y1; always @(s, x) begin: DEMUX1_2_B_LOGIC if ((s == 0)) begin y0 = x; y1 = 0; end else begin y0 = 0; y1 = x; end end endmodule \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_B_RTL.png}} \caption{\label{fig:D12BRTL} DEMUX1_2_B RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_B_SYN.png}} \caption{\label{fig:D12BSYN} DEMUX1_2_B Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_2_B_IMP.png}} \caption{\label{fig:D12BIMP} DEMUX1_2_B Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ```python #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] sTVs=intbv(int(''.join(sTVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), sTVs, bin(sTVs) ``` (intbv(5479), '1010101100111', intbv(3830), '111011110110') ```python @block def DEMUX1_2_B_TBV(): """ myHDL -> testbench for module `DEMUX1_2_B` """ x=Signal(bool(0)) s=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) @always_comb def print_data(): print(x, s, y0, y1) #Test Signal Bit Vectors xTV=Signal(xTVs) sTV=Signal(sTVs) DUT=DEMUX1_2_B(x, s, y0, y1) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s.next=int(sTV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_2_B_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_2_B_TBV'); ``` <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> ***Verilog modual from DEMUX1_2_B_TBV.v*** // File: DEMUX1_2_B_TBV.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:23 2018 `timescale 1ns/10ps module DEMUX1_2_B_TBV ( ); // myHDL -> testbench for module `DEMUX1_2_B` reg x = 0; reg s = 0; reg y0 = 0; reg y1 = 0; wire [13:0] xTV; wire [13:0] sTV; assign xTV = 14'd5479; assign sTV = 14'd3830; always @(x, y0, s, y1) begin: DEMUX1_2_B_TBV_PRINT_DATA $write("%h", x); $write(" "); $write("%h", s); $write(" "); $write("%h", y0); $write(" "); $write("%h", y1); $write("\n"); end always @(s, x) begin: DEMUX1_2_B_TBV_DEMUX1_2_B0_0_LOGIC if ((s == 0)) begin y0 = x; y1 = 0; end else begin y0 = 0; y1 = x; end end initial begin: DEMUX1_2_B_TBV_STIMULES integer i; for (i=0; i<14; i=i+1) begin x <= xTV[i]; s <= sTV[i]; # 1; end $finish; end endmodule /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: xTV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: sTV category=ToVerilogWarning ## PYNQ-Z1 Deployment ### Board Circuit See Board Circuit for "1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic" ### Board Constraint uses same 'DEMUX1_2.xdc' as "1 Channel Input: 2 Channel Output demultiplexer in Gate Level Logic" ### Video of Deployment DEMUX1_2_B on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=Ux0olpi2ppI)) # 1:4 DEMUX via Behavioral if-elif-else ## myHDL Module ```python @block def DEMUX1_4_B(x, s0, s1, y0, y1, y2, y3): """ 1:4 DEMUX written via behaviorial Inputs: x(bool): input feed s0(bool): channel select 0 s1(bool): channel select 1 Outputs: y0(bool): ouput channel 0 y1(bool): ouput channel 1 y2(bool): ouput channel 2 y3(bool): ouput channel 3 """ @always_comb def logic(): if s0==0 and s1==0: y0.next=x; y1.next=0 y2.next=0; y3.next=0 elif s0==1 and s1==0: y0.next=0; y1.next=x y2.next=0; y3.next=0 elif s0==0 and s1==1: y0.next=0; y1.next=0 y2.next=x; y3.next=0 else: y0.next=0; y1.next=0 y2.next=0; y3.next=x return instances() ``` ## myHDL Testing ```python TestLen=10 SystmaticVals=list(itertools.product([0,1], repeat=3)) xTVs=np.array([i[2] for i in SystmaticVals]).astype(int) np.random.seed(15) xTVs=np.append(xTVs, np.random.randint(0,2, TestLen)).astype(int) s0TVs=np.array([i[1] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(16) s0TVs=np.append(s0TVs, np.random.randint(0,2, TestLen)).astype(int) s1TVs=np.array([i[0] for i in SystmaticVals]).astype(int) #the random genrator must have a differint seed beween each generation #call in order to produce differint values for each call np.random.seed(17) s1TVs=np.append(s1TVs, np.random.randint(0,2, TestLen)).astype(int) TestLen=len(xTVs) SystmaticVals, xTVs, s0TVs, s1TVs ``` ([(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1]), array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0]), array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1])) ```python Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') s0=Signal(bool(0)); Peeker(s0, 's0') s1=Signal(bool(0)); Peeker(s1, 's1') y0=Signal(bool(0)); Peeker(y0, 'y0') y1=Signal(bool(0)); Peeker(y1, 'y1') y2=Signal(bool(0)); Peeker(y2, 'y2') y3=Signal(bool(0)); Peeker(y3, 'y3') DUT=DEMUX1_4_B(x, s0, s1, y0, y1, y2, y3) def DEMUX1_4_B_TB(): """ myHDL only testbench for module `DEMUX1_4_Combo` """ @instance def stimules(): for i in range(TestLen): x.next=int(xTVs[i]) s0.next=int(s0TVs[i]) s1.next=int(s1TVs[i]) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_B_TB(), *Peeker.instances()).run() ``` ```python Peeker.to_wavedrom('x', 's1', 's0', 'y0', 'y1', 'y2', 'y3') ``` <div></div> ```python DEMUX1_4_BData=Peeker.to_dataframe() DEMUX1_4_BData=DEMUX1_4_BData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] DEMUX1_4_BData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>s1</th> <th>s0</th> <th>y0</th> <th>y1</th> <th>y2</th> <th>y3</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>4</th> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>5</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>6</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>7</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>13</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>14</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>15</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>16</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> </tbody> </table> </div> ```python Test=DEMUX1_4_BData==DEMUX1_4_ComboData[['x', 's1', 's0', 'y0', 'y1', 'y2', 'y3']] Test=Test.all().all() print(f'DEMUX1_4_B equivlinet to DEMUX1_4_Combo: {Test}') ``` DEMUX1_4_B equivlinet to DEMUX1_4_Combo: True ## Verilog Conversion ```python DUT.convert() VerilogTextReader('DEMUX1_4_B'); ``` ***Verilog modual from DEMUX1_4_B.v*** // File: DEMUX1_4_B.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:25 2018 `timescale 1ns/10ps module DEMUX1_4_B ( x, s0, s1, y0, y1, y2, y3 ); // 1:4 DEMUX written via behaviorial // // Inputs: // x(bool): input feed // s0(bool): channel select 0 // s1(bool): channel select 1 // // Outputs: // y0(bool): ouput channel 0 // y1(bool): ouput channel 1 // y2(bool): ouput channel 2 // y3(bool): ouput channel 3 input x; input s0; input s1; output y0; reg y0; output y1; reg y1; output y2; reg y2; output y3; reg y3; always @(x, s1, s0) begin: DEMUX1_4_B_LOGIC if (((s0 == 0) && (s1 == 0))) begin y0 = x; y1 = 0; y2 = 0; y3 = 0; end else if (((s0 == 1) && (s1 == 0))) begin y0 = 0; y1 = x; y2 = 0; y3 = 0; end else if (((s0 == 0) && (s1 == 1))) begin y0 = 0; y1 = 0; y2 = x; y3 = 0; end else begin y0 = 0; y1 = 0; y2 = 0; y3 = x; end end endmodule \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_B_RTL.png}} \caption{\label{fig:D14BRTL} DEMUX1_4_B RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_B_SYN.png}} \caption{\label{fig:D14BSYN} DEMUX1_4_B Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_B_IMP.png}} \caption{\label{fig:D14BIMP} DEMUX1_4_B Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench ```python #create BitVectors xTVs=intbv(int(''.join(xTVs.astype(str)), 2))[TestLen:] s0TVs=intbv(int(''.join(s0TVs.astype(str)), 2))[TestLen:] s1TVs=intbv(int(''.join(s1TVs.astype(str)), 2))[TestLen:] xTVs, bin(xTVs), s0TVs, bin(s0TVs), s1TVs, bin(s1TVs) ``` (intbv(87399), '10101010101100111', intbv(52982), '1100111011110110', intbv(16277), '11111110010101') ```python @block def DEMUX1_4_B_TBV(): """ myHDL -> testbench for module `DEMUX1_4_B` """ x=Signal(bool(0)) s0=Signal(bool(0)) s1=Signal(bool(0)) y0=Signal(bool(0)) y1=Signal(bool(0)) y2=Signal(bool(0)) y3=Signal(bool(0)) @always_comb def print_data(): print(x, s0, s1, y0, y1, y2, y3) #Test Signal Bit Vectors xTV=Signal(xTVs) s0TV=Signal(s0TVs) s1TV=Signal(s1TVs) DUT=DEMUX1_4_B(x, s0, s1, y0, y1, y2, y3) @instance def stimules(): for i in range(TestLen): x.next=int(xTV[i]) s0.next=int(s0TV[i]) s1.next=int(s1TV[i]) yield delay(1) raise StopSimulation() return instances() TB=DEMUX1_4_B_TBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('DEMUX1_4_B_TBV'); ``` <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> <class 'myhdl._Signal._Signal'> <class '_ast.Name'> ***Verilog modual from DEMUX1_4_B_TBV.v*** // File: DEMUX1_4_B_TBV.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:24:26 2018 `timescale 1ns/10ps module DEMUX1_4_B_TBV ( ); // myHDL -> testbench for module `DEMUX1_4_B` reg x = 0; reg y0 = 0; reg y1 = 0; reg s0 = 0; reg s1 = 0; reg y2 = 0; reg y3 = 0; wire [17:0] xTV; wire [17:0] s0TV; wire [17:0] s1TV; assign xTV = 18'd87399; assign s0TV = 18'd52982; assign s1TV = 18'd16277; always @(x, y0, s1, s0, y3, y2, y1) begin: DEMUX1_4_B_TBV_PRINT_DATA $write("%h", x); $write(" "); $write("%h", s0); $write(" "); $write("%h", s1); $write(" "); $write("%h", y0); $write(" "); $write("%h", y1); $write(" "); $write("%h", y2); $write(" "); $write("%h", y3); $write("\n"); end always @(x, s1, s0) begin: DEMUX1_4_B_TBV_DEMUX1_4_B0_0_LOGIC if (((s0 == 0) && (s1 == 0))) begin y0 = x; y1 = 0; y2 = 0; y3 = 0; end else if (((s0 == 1) && (s1 == 0))) begin y0 = 0; y1 = x; y2 = 0; y3 = 0; end else if (((s0 == 0) && (s1 == 1))) begin y0 = 0; y1 = 0; y2 = x; y3 = 0; end else begin y0 = 0; y1 = 0; y2 = 0; y3 = x; end end initial begin: DEMUX1_4_B_TBV_STIMULES integer i; for (i=0; i<18; i=i+1) begin x <= xTV[i]; s0 <= s0TV[i]; s1 <= s1TV[i]; # 1; end $finish; end endmodule /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: xTV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: s0TV category=ToVerilogWarning /home/iridium/anaconda3/lib/python3.6/site-packages/myhdl/conversion/_toVerilog.py:349: ToVerilogWarning: Signal is not driven: s1TV category=ToVerilogWarning ## PYNQ-Z1 Deployment ### Board Circuit See Board Circuit for "1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Board Constraint uses same 'DEMUX1_4.xdc' as "# 1 Channel Input:4 Channel Output demultiplexer in Gate Level Logic" ### Video of Deployment DEMUX1_4_B on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=MezWijKoDuk)) # Demultiplexer 1:4 Behavioral via Bitvectors ## myHDL Module ```python @block def DEMUX1_4_BV(x, S, Y): """ 1:4 DEMUX written via behaviorial with bit vectors Inputs: x(bool): input feed S(2bit vector): channel select bitvector; min=0, max=3 Outputs: Y(4bit vector): ouput channel bitvector; values min=0, max=15; allowed is: 0,1,2,4,8 in this application """ @always_comb def logic(): #here concat is used to build up the word #from the x input if S==0: Y.next=concat(intbv(0)[3:], x); '0001' elif S==1: Y.next=concat(intbv(0)[2:], x, intbv(0)[1:]); '0010' elif S==2: Y.next=concat(intbv(0)[1:], x, intbv(0)[2:]); '0100' else: Y.next=concat(x, intbv(0)[3:]); '1000' return instances() ``` ## myHDL Testing ```python xTVs=np.array([0,1]) xTVs=np.append(xTVs, np.random.randint(0,2,6)).astype(int) TestLen=len(xTVs) np.random.seed(12) STVs=np.arange(0,4) STVs=np.append(STVs, np.random.randint(0,4, 5)) TestLen, xTVs, STVs ``` (8, array([0, 1, 0, 1, 0, 0, 1, 1]), array([0, 1, 2, 3, 3, 3, 2, 1, 1])) ```python Peeker.clear() x=Signal(bool(0)); Peeker(x, 'x') S=Signal(intbv(0)[2:]); Peeker(S, 'S') Y=Signal(intbv(0)[4:]); Peeker(Y, 'Y') DUT=DEMUX1_4_BV(x, S, Y) def DEMUX1_4_BV_TB(): @instance def stimules(): for i in STVs: for j in xTVs: S.next=int(i) x.next=int(j) yield delay(1) raise StopSimulation() return instances() sim=Simulation(DUT, DEMUX1_4_BV_TB(), *Peeker.instances()).run() ``` ```python Peeker.to_wavedrom('x', 'S', 'Y', start_time=0, stop_time=2*TestLen+2) ``` <div></div> ```python DEMUX1_4_BVData=Peeker.to_dataframe() DEMUX1_4_BVData=DEMUX1_4_BVData[['x', 'S', 'Y']] DEMUX1_4_BVData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>S</th> <th>Y</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>4</th> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>6</th> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>12</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>14</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>16</th> <td>0</td> <td>2</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>2</td> <td>4</td> </tr> <tr> <th>18</th> <td>0</td> <td>2</td> <td>0</td> </tr> <tr> <th>19</th> <td>1</td> <td>2</td> <td>4</td> </tr> <tr> <th>20</th> <td>0</td> <td>2</td> <td>0</td> </tr> <tr> <th>22</th> <td>1</td> <td>2</td> <td>4</td> </tr> <tr> <th>24</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>25</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>26</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>27</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>28</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>30</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>32</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>33</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>34</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>35</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>36</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>38</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>40</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>41</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>42</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>43</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>44</th> <td>0</td> <td>3</td> <td>0</td> </tr> <tr> <th>46</th> <td>1</td> <td>3</td> <td>8</td> </tr> <tr> <th>48</th> <td>0</td> <td>2</td> <td>0</td> </tr> <tr> <th>49</th> <td>1</td> <td>2</td> <td>4</td> </tr> <tr> <th>50</th> <td>0</td> <td>2</td> <td>0</td> </tr> <tr> <th>51</th> <td>1</td> <td>2</td> <td>4</td> </tr> <tr> <th>52</th> <td>0</td> <td>2</td> <td>0</td> </tr> <tr> <th>54</th> <td>1</td> <td>2</td> <td>4</td> </tr> <tr> <th>56</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>57</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>58</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>59</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>60</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>62</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>64</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>65</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>66</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>67</th> <td>1</td> <td>1</td> <td>2</td> </tr> <tr> <th>68</th> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>70</th> <td>1</td> <td>1</td> <td>2</td> </tr> </tbody> </table> </div> ```python DEMUX1_4_BVData['y0']=None; DEMUX1_4_BVData['y1']=None; DEMUX1_4_BVData['y2']=None; DEMUX1_4_BVData['y3']=None DEMUX1_4_BVData[['y3', 'y2', 'y1', 'y0']]=DEMUX1_4_BVData[['Y']].apply(lambda bv: [int(i) for i in bin(bv, 4)], axis=1, result_type='expand') DEMUX1_4_BVData['s0']=None; DEMUX1_4_BVData['s1']=None DEMUX1_4_BVData[['s1', 's0']]=DEMUX1_4_BVData[['S']].apply(lambda bv: [int(i) for i in bin(bv, 2)], axis=1, result_type='expand') DEMUX1_4_BVData=DEMUX1_4_BVData[['x', 'S', 's0', 's1', 'Y', 'y3', 'y2', 'y1', 'y0']] DEMUX1_4_BVData ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>S</th> <th>s0</th> <th>s1</th> <th>Y</th> <th>y3</th> <th>y2</th> <th>y1</th> <th>y0</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>4</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>6</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>12</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>14</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>16</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>18</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>19</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>20</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>22</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>24</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>25</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>26</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>27</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>28</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>30</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>32</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>33</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>34</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>35</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>36</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>38</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>40</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>41</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>42</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>43</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>44</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>46</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>48</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>49</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>50</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>51</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>52</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>54</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>56</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>57</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>58</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>59</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>60</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>62</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>64</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>65</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0 <b>limit_output extension: Maximum message size of 10000 exceeded with 10802 characters</b> ```python DEMUX1_4_BVData['y0Ref']=DEMUX1_4_BVData.apply(lambda row:y14_0EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData['y1Ref']=DEMUX1_4_BVData.apply(lambda row:y14_1EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData['y2Ref']=DEMUX1_4_BVData.apply(lambda row:y14_2EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData['y3Ref']=DEMUX1_4_BVData.apply(lambda row:y14_3EqN(row['x'], row['s0'], row['s1']), axis=1).astype(int) DEMUX1_4_BVData ``` /home/iridium/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy """Entry point for launching an IPython kernel. /home/iridium/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy /home/iridium/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy This is separate from the ipykernel package so we can avoid doing imports until /home/iridium/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy after removing the cwd from sys.path. <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>x</th> <th>S</th> <th>s0</th> <th>s1</th> <th>Y</th> <th>y3</th> <th>y2</th> <th>y1</th> <th>y0</th> <th>y0Ref</th> <th>y1Ref</th> <th>y2Ref</th> <th>y3Ref</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>1</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>2</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>3</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>4</th> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>6</th> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>8</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>9</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>10</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>11</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>12</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>14</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>2</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>16</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>17</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>18</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>19</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>20</th> <td>0</td> <td>2</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>22</th> <td>1</td> <td>2</td> <td>0</td> <td>1</td> <td>4</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>0</td> </tr> <tr> <th>24</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>25</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>26</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>27</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>28</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>30</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>32</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>33</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>34</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>35</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>36</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <th>38</th> <td>1</td> <td>3</td> <td>1</td> <td>1</td> <td>8</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr> <tr> <th>40</th> <td>0</td> <td>3</td> <td>1</td> <td>1</td <b>limit_output extension: Maximum message size of 10000 exceeded with 16125 characters</b> ```python Test=DEMUX1_4_BVData[['y0', 'y1', 'y2', 'y3']].sort_index(inplace=True)==DEMUX1_4_BVData[['y0Ref', 'y1Ref', 'y2Ref', 'y3Ref']].sort_index(inplace=True) print(f'Module `DEMUX1_4_BVData` works as exspected: {Test}') ``` Module `DEMUX1_4_BVData` works as exspected: True ## Verilog Conversion ```python DUT.convert() VerilogTextReader('DEMUX1_4_BV'); ``` ***Verilog modual from DEMUX1_4_BV.v*** // File: DEMUX1_4_BV.v // Generated by MyHDL 0.10 // Date: Sun Sep 23 18:48:43 2018 `timescale 1ns/10ps module DEMUX1_4_BV ( x, S, Y ); // 1:4 DEMUX written via behaviorial with // bit vectors // // Inputs: // x(bool): input feed // S(2bit vector): channel select bitvector; // min=0, max=3 // // Outputs: // Y(4bit vector): ouput channel bitvector; // values min=0, max=15; allowed is: 0,1,2,4,8 // in this application // input x; input [1:0] S; output [3:0] Y; reg [3:0] Y; always @(x, S) begin: DEMUX1_4_BV_LOGIC case (S) 'h0: begin Y = {3'h0, x}; // 0001 end 'h1: begin Y = {2'h0, x, 1'h0}; // 0010 end 'h2: begin Y = {1'h0, x, 2'h0}; // 0100 end default: begin Y = {x, 3'h0}; // 1000 end endcase end endmodule \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_BV_RTL.png}} \caption{\label{fig:D14BVRTL} DEMUX1_4_BV RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_BV_SYN.png}} \caption{\label{fig:D14BVSYN} DEMUX1_4_BV Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{DEMUX1_4_BV_IMP.png}} \caption{\label{fig:D14BVIMP} DEMUX1_4_BV Implementated Schematic; Xilinx Vivado 2017.4} \end{figure} ## myHDL to Verilog Testbench (To Do!) ## PYNQ-Z1 Board Deployment ### Board Circuit ### Board Constraints ```python ConstraintXDCTextReader('DEMUX1_4_BV'); ``` ***Constraint file from DEMUX1_4_BV.xdc*** ## Switches set_property -dict {PACKAGE_PIN M20 IOSTANDARD LVCMOS33} [get_ports {S[0]}]; ##SW0 set_property -dict {PACKAGE_PIN M19 IOSTANDARD LVCMOS33} [get_ports {S[1]}]; ##SW1 ## Buttons set_property -dict {PACKAGE_PIN L19 IOSTANDARD LVCMOS33} [get_ports {x}]; ##BT3 ## RGBLEDs set_property -dict { PACKAGE_PIN L15 IOSTANDARD LVCMOS33 } [get_ports {Y[0]}]; ##LD4_Blue set_property -dict { PACKAGE_PIN G17 IOSTANDARD LVCMOS33 } [get_ports {Y[1]}]; ##LD4_Green set_property -dict { PACKAGE_PIN L14 IOSTANDARD LVCMOS33 } [get_ports {Y[2]}]; ##LD5_Green set_property -dict { PACKAGE_PIN M15 IOSTANDARD LVCMOS33 } [get_ports {Y[3]}]; ##LD5_Red ### Video of Deployment DEMUX1_4_BV on PYNQ-Z1 ([YouTube](https://www.youtube.com/watch?v=mVyTlkbJpKg))
0eba95224c0d4fcd85e1b599895e001437e51dc9
210,860
ipynb
Jupyter Notebook
myHDL_DigLogicFundamentals/myHDL_Combinational/Demultiplexers(DEMUX).ipynb
PyLCARS/PythonUberHDL
f7ae2293d6efaca7986d62540798cdf061383d06
[ "BSD-3-Clause" ]
31
2017-10-09T12:15:14.000Z
2022-02-28T09:05:21.000Z
myHDL_DigLogicFundamentals/myHDL_Combinational/Demultiplexers(DEMUX).ipynb
cfelton/PythonUberHDL
f7ae2293d6efaca7986d62540798cdf061383d06
[ "BSD-3-Clause" ]
null
null
null
myHDL_DigLogicFundamentals/myHDL_Combinational/Demultiplexers(DEMUX).ipynb
cfelton/PythonUberHDL
f7ae2293d6efaca7986d62540798cdf061383d06
[ "BSD-3-Clause" ]
12
2018-02-09T15:36:20.000Z
2021-04-20T21:39:12.000Z
30.440306
11,221
0.380115
true
44,691
Qwen/Qwen-72B
1. YES 2. YES
0.685949
0.815232
0.559208
__label__yue_Hant
0.237068
0.137558
<a href="https://colab.research.google.com/github/kojiyam/information-theory/blob/main/it3.ipynb" target="_parent"></a> # 巡回符号・ガロア体上の多項式環における剰余 参考 https://stackoverflow.com/questions/14173007/polynomial-with-modular-coefficients-library-in-python https://numpy.org/doc/stable/reference/generated/numpy.poly1d.html#numpy.poly1d https://stackoverflow.com/questions/28646336/pretty-printing-polynomials-in-ipython-notebook (表記) ``` # import numpy.polynomial.polynomial import numpy as np ``` ``` # 表示だけ.計算には不要 import sympy from sympy.abc import x sympy.init_printing() def printp(poly): return sympy.Poly(poly.coef,x).as_expr() ``` ## 実数体上の多項式 $\mathbb{R}[x]$ とその演算を Python + NumPy で行う方法.表示にはSymPy この計算は,(教科書で出てくる)整数環上の多項式 $\mathbb{Z}[x]$ の演算ではない ``` A = np.poly1d([1,2,3,4]) print(A) ``` 3 2 1 x + 2 x + 3 x + 4 ``` printp(A) ``` ``` # 加算,減算,乗算 A = np.poly1d([1,2]) B = np.poly1d([3,1]) printp( A ), printp( B ), printp( A + B ), printp( A - B ), printp( A * B ) ``` ``` # 除算 A = np.poly1d([1,2,3]) B = np.poly1d([1,1]) printp( A ), printp( B ) ``` ``` A / B ``` (poly1d([1., 1.]), poly1d([2.])) ``` # 商 printp( (A / B)[0] ) ``` ``` # 剰余 printp( (A / B)[1] ) ``` A/Bは商と剰余をリストで返す.(A/B)[1]とすると剰余だけ ## 有限体上の多項式 $\mathbb{F}_2[x]$ の演算 係数を(0,1)に限って$\mathbb{R}[x]$ の演算を行った後で mod 2すればよい(例外的なものがあるかもしれない) ``` # 情報ビット X = np.poly1d([1,1,1,0]) # 生成多項式 G = np.poly1d([1,0,1,1]) printp(X), printp(G) ``` ``` xm = np.poly1d( [1] + [0] * G.order ) printp(xm) ``` $ x^m = x^{\mathrm{deg}(G(x))}$ $ C(x) = X(x) \, x^{\mathrm{deg}(G(x))} \bmod G(x) $ $ \mathbb{R}[x]$ 上の剰余 ``` printp( (X*xm/G)[1] ) ``` $ \mathbb{F}_2[x] $上の剰余 ``` np.remainder( (X*xm/G)[1], 2 ) ``` array([1., 0., 0.]) ``` printp( np.poly1d( np.remainder( (X*xm/G)[1], 2 ) ) ) ``` np.remainderは正の値,np.fmodはdividendと同じ符号 ``` np.remainder(-5,3), np.fmod(-5,3) ``` (1, -2) ``` ## 4週目.巡回ハミング符号.単一誤りパターンに対するシンドロームの計算 ``` ``` for i in range(7): E = np.poly1d( [1] + [0] * i ) S = np.poly1d( np.remainder( (E/G)[1], 2 ) ) print("i = %d" % i), print("%s = E(x)" % E), print("%s = S(x)" % S), print('\n===\n') ``` i = 0 1 = E(x) 1 = S(x) === i = 1 1 x = E(x) 1 x = S(x) === i = 2 2 1 x = E(x) 2 1 x = S(x) === i = 3 3 1 x = E(x) 1 x + 1 = S(x) === i = 4 4 1 x = E(x) 2 1 x + 1 x = S(x) === i = 5 5 1 x = E(x) 2 1 x + 1 x + 1 = S(x) === i = 6 6 1 x = E(x) 2 1 x + 1 = S(x) === forループの中ではprintpは動かないみたい ### 参考 SymPyに,$ \mathbb{F}_2[x]$ をちゃんと計算する方法はある ただし,上記の方が筆算の途中経過等が分かりやすいと思われる https://docs.sympy.org/latest/modules/polys/internals.html https://stackoverflow.com/questions/48065360/interpolate-polynomial-over-a-finite-field?rq=1 ``` from sympy.polys.domains import ZZ from sympy.polys.galoistools import gf_div, gf_add_mul print(gf_div(ZZ.map([1,1,1,0,0,0,0]), ZZ.map([1,0,1,1]), 2, ZZ)[1]) ``` [1, 0, 0] ``` P = ZZ.map([1,1,1,0,0,0,0]) sympy.Poly(P,x).as_expr() ``` ``` ```
a68784cc2b4f183bca35ec54a9b6e230ac2bb482
30,889
ipynb
Jupyter Notebook
it3.ipynb
kojiyam/information-theory
601cf119b8c7b5d24bd61594e2a4d7b9a8c6f223
[ "MIT" ]
null
null
null
it3.ipynb
kojiyam/information-theory
601cf119b8c7b5d24bd61594e2a4d7b9a8c6f223
[ "MIT" ]
null
null
null
it3.ipynb
kojiyam/information-theory
601cf119b8c7b5d24bd61594e2a4d7b9a8c6f223
[ "MIT" ]
null
null
null
41.075798
2,774
0.618473
true
1,549
Qwen/Qwen-72B
1. YES 2. YES
0.787931
0.737158
0.58083
__label__yue_Hant
0.323146
0.187792
# Description: * calculations for modeling fragments in a CsCl gradient under non-equilibrium conditions # Notes * Good chapter on determining G+C content from CsCl gradient analysis http://www.academia.edu/428160/Using_Analytical_Ultracentrifugation_of_DNA_in_CsCl_Gradients_to_Explore_Large-Scale_Properties_of_Genomes http://www.analyticalultracentrifugation.com/dynamic_density_gradients.htm Meselson et al. - 1957 - Equilibrium Sedimentation of Macromolecules in Den Vinograd et al. - 1963 - Band-Centrifugation of Macromolecules and Viruses http://onlinelibrary.wiley.com.proxy.library.cornell.edu/doi/10.1002/bip.360101011/pdf ## Ultracentrigation book http://books.google.com/books?hl=en&lr=&id=vxcSBQAAQBAJ&oi=fnd&pg=PA143&dq=Measurement+of+Density+Heterogeneity+by+Sedimentation+in&ots=l8ObYN-zVv&sig=Vcldf9_aqrJ-u7nQ1lBRKbknHps#v=onepage&q&f=false ## Forum info * http://stackoverflow.com/questions/18624005/how-do-i-perform-a-convolution-in-python-with-a-variable-width-gaussian * http://timstaley.co.uk/posts/convolving-pdfs-in-python/ ## Possible workflows: ### KDE convolution * KDE of fragment GC values * bandwidth cross validation: https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/ * convolution of KDE with diffusion function: * gaussian w/ mean of 0 and scale param = 44.5 (kb) / (mean fragment length) * http://www.academia.edu/428160/Using_Analytical_Ultracentrifugation_of_DNA_in_CsCl_Gradients_to_Explore_Large-Scale_Properties_of_Genomes * http://nbviewer.ipython.org/github/timstaley/ipython-notebooks/blob/compiled/probabilistic_programming/convolving_distributions_illustration.ipynb ##variable KDE * variable KDE of fragment GC values where kernel sigma is determined by mean fragment length * gaussian w/ scale param = 44.5 (kb) / fragment length # Standard deviation of homogeneous DNA fragments Vinograd et al., 1963; (band-centrifugation): \begin{align} \sigma^2 = \frac{r_0}{r_0^0} \left\{ \frac{r_0}{r_0^0} + 2D \left( t - t^0 \right) \right\} \end{align} ## Standard deviation of Gaussian band (assuming equilibrium), Meselson et al., 1957: \begin{align} \sigma^2 = -\sqrt{w} \\ w = \textrm{molecular weight} \end{align} ## Standard deviation of Gaussian band at a given time, Meselson et al., 1957: \begin{equation} t^* = \frac{\sigma^2}{D} \left(ln \frac{L}{\sigma} + 1.26 \right), \quad L\gg\sigma \\ \sigma^2 = \textrm{stdev at equilibrium} \\ L = \textrm{length of column} \end{equation} * Gaussian within 1% of equillibrium value from center. * ! assumes density gradient established at t = 0 ### Alternative form: \begin{align} t = \frac{\beta^{\circ}(p_p - p_m)}{w^4 r_p^2 s} * \left(1.26 + ln \frac{r_b - r_t}{\sigma}\right) \end{align} \begin{equation} t = \textrm{time in seconds} \\ \beta^{\circ} = \beta^{\circ} \textrm{ of salt forming the density gradient (CsCl = ?)} \\ p_p = \textrm{buoyant density of the the particle in the salt} \\ p_m = \textrm{density of the medium (at the given radius?)} \\ w = \textrm{angular velocity} \\ r_p = \textrm{distance (cm) of particle from from the axis of rotation} \\ s = \textrm{sedimentation rate } (S_{20,w} * 10^{-13}) \\ r_b = \textrm{distance to top of gradient} \\ r_t = \textrm{distance to bottom of gradient} \\ r_b - r_t = \textrm{length of gradient (L)} \end{equation} ### Solving for sigma: \begin{align} \sigma = \frac{L}{e^{\left(\frac{w^4 r_p^2 s t}{\beta^{\circ}(p_p - p_m)} - 1.26\right)}} \end{align} # Variables specific to the Buckley lab setup \begin{equation} \omega = (2\pi \times \textrm{RPM}) /60, \quad \textrm{RPM} = 55000 \\ \beta^{\circ} = 1.14 \times 10^9 \\ r_b = 4.85 \\ r_t = 2.6 \\ L = r_b - r_t \\ s = S_{20,w} * 10^{-13} = 2.8 + 0.00834 * (l*666)^{0.479}, \quad \textrm{where l = length of fragment} \\ p_m = 1.7 \\ p_p = \textrm{buoyant density of the particle in CsCl} \\ r_p = ? \\ t = \textrm{independent variable} \end{equation} __isoconcentration point__ \begin{equation} r_c = \sqrt{(r_t^2 + r_t * r_b + r_b^2)/3} \end{equation} __r<sub>p</sub> in relation to the particle's buoyant density (assuming equilibrium?)__ \begin{equation} r_p = \sqrt{ ((p_p-p_m)*2*\frac{\beta^{\circ}}{w}) + r_c^2 } \\ p_p = \textrm{buoyant density} \end{equation} Maybe this should be drawn from a uniform distribution (particules distributed evenly across the gradient)??? __buoyant density of a DNA fragment in CsCl__ \begin{equation} p_p = 0.098F + 1.66, \quad \textrm{where F = G+C molar fraction} \end{equation} __calculating gradient density at specific radius (to calculate p_m)__ ?? __info needed on a DNA fragment to determine it's sigma of the Guassian distribution__ * fragment length * fragment G+C # Graphing the equations above ``` %pylab inline ``` Populating the interactive namespace from numpy and matplotlib ``` import scipy as sp import numpy as np import pandas as pd import matplotlib.pyplot as plt import mixture #import sklearn.mixture as mixture ``` ## Generating fragments ``` n_frags = 10000 frag_GC = np.random.normal(0.5,0.1,n_frags) frag_GC[frag_GC < 0] = 0 frag_GC[frag_GC > 1] = 1 frag_len = np.random.normal(10000,1000,n_frags) ``` ``` ret = plt.hist2d(frag_GC, frag_len, bins=100) ``` ## Setting variables ``` RPM = 55000 omega = (2 * np.pi * RPM) / 60 beta_o = 1.14 * 10**9 radius_bottom = 4.85 radius_top = 2.6 col_len = radius_bottom - radius_top density_medium = 1.7 ``` ## Calculation functions ``` # BD from GC frag_BD = 0.098 * frag_GC + 1.66 ret = plt.hist(frag_BD, bins=100) ``` ``` sedimentation = (frag_len*666)**0.479 * 0.00834 + 2.8 # l = length of fragment ret = plt.hist(sedimentation, bins=100) ``` ``` # sedimentation as a function of fragment length len_range = np.arange(1,10000, 100) ret = plt.scatter(len_range, 2.8 + 0.00834 * (len_range*666)**0.479 ) ``` ``` # isoconcentration point iso_point = sqrt((radius_top**2 + radius_top * radius_bottom + radius_bottom**2)/3) iso_point ``` 3.7812035121109258 ``` # radius of particle #radius_particle = np.sqrt( (frag_BD - density_medium)*2*(beta_o/omega) + iso_point**2 ) #ret = plt.hist(radius_particle) ``` array([ 62.83472857, 65.24947889, 71.07927139, 39.2627203 , 83.56106181, 60.12239077, 73.64807884, nan, 91.27392451]) ``` ``` # Testing out speed of mixture models ``` n_dists = 10 n_samp = 10000 ``` ``` def make_mm(n_dists): dist_loc = np.random.uniform(0,1,n_dists) dist_scale = np.random.uniform(0,0.1, n_dists) dists = [mixture.NormalDistribution(x,y) for x,y in zip(dist_loc, dist_scale)] eq_weights = np.array([1.0 / n_dists] * n_dists) eq_weights[0] += 1.0 - np.sum(eq_weights) return mixture.MixtureModel(n_dists, eq_weights, dists) ``` ``` mm = make_mm(n_dists) ``` ``` %%timeit smp = mm.sampleDataSet(n_samp).getInternalFeature(0).flatten() ``` 10 loops, best of 3: 98.4 ms per loop ``` %%timeit smp = np.array([mm.sample() for i in arange(n_samp)]) ``` 10 loops, best of 3: 65.4 ms per loop ``` n_dists = 1000 mm = make_mm(n_dists) ``` ``` %%timeit smp = mm.sampleDataSet(n_samp).getInternalFeature(0).flatten() ``` 1 loops, best of 3: 1.7 s per loop ``` %%timeit smp = np.array([mm.sample() for i in arange(n_samp)]) ``` 1 loops, best of 3: 1.64 s per loop ``` n_dists = 10000 mm = make_mm(n_dists) ``` ``` %%timeit smp = mm.sampleDataSet(n_samp).getInternalFeature(0).flatten() ``` 1 loops, best of 3: 17 s per loop ``` %%timeit smp = np.array([mm.sample() for i in arange(n_samp)]) ``` 1 loops, best of 3: 16.7 s per loop ``` n_samp = 100000 ``` ``` %%timeit smp = mm.sampleDataSet(n_samp).getInternalFeature(0).flatten() ``` 1 loops, best of 3: 2min 51s per loop ``` %%timeit smp = np.array([mm.sample() for i in arange(n_samp)]) ``` __Notes:__ * a mixture model with many distributions (>1000) is very slow for sampling ``` x = np.random.normal(3, 1, 100) y = np.random.normal(1, 1, 100) H, xedges, yedges = np.histogram2d(y, x, bins=100) ``` ``` H ``` *** *** # Workflow for modeling DNA fragment locations in a gradient For each genome in mock community, simulate N fragments and calculate their Guassian distributions in the gradient. Create a mixture model of those Guassian distributions to sample A<sub>a</sub> fragments, where A<sub>a</sub> = the absolute abundance of the taxon in the mock community. One mixture model per genome. ## User defined: * Rotor specs * cfg parameters (RPM, time) ## Generate fragment density distributions * For each genome in the mock community: * Simulate fragments * Calculate sigma of Gaussian density distribution * Create mixture model from all Gaussians of the fragments ## Simulate fraction communities * For each genome in mock community: * sample fragments from mixture model based on total abundance of taxon in mock community * bin fragments into gradient fractions ``` ```
e5c5f731a819b1a46e17efdfcc7b86f6751f9d40
60,930
ipynb
Jupyter Notebook
ipynb/.ipynb_checkpoints/non-equilibrium_calcs-checkpoint.ipynb
arischwartz/test
87a8306a294f59b0eef992529ce900cea876c605
[ "MIT" ]
2
2019-03-15T09:46:48.000Z
2019-06-05T18:16:39.000Z
ipynb/.ipynb_checkpoints/non-equilibrium_calcs-checkpoint.ipynb
arischwartz/test
87a8306a294f59b0eef992529ce900cea876c605
[ "MIT" ]
1
2020-11-01T23:18:10.000Z
2020-11-01T23:18:10.000Z
ipynb/.ipynb_checkpoints/non-equilibrium_calcs-checkpoint.ipynb
arischwartz/test
87a8306a294f59b0eef992529ce900cea876c605
[ "MIT" ]
null
null
null
81.348465
17,851
0.80389
true
2,911
Qwen/Qwen-72B
1. YES 2. YES
0.782662
0.665411
0.520792
__label__eng_Latn
0.545708
0.048303
```python import numpy as np import sympy sympy.init_printing(use_unicode=True) from sympy import symbols,simplify,diff,latex,Piecewise from sympy.solvers import solve from IPython.display import display from typing import Callable from sympy.utilities.lambdify import lambdify, implemented_function %matplotlib inline import matplotlib.pyplot as plt def simplified(exp, title=None): simp = simplify(exp) if simplified.LOG: if title: display(title,simp) else: display(simp) return simp simplified.LOG = True def firstOrderCondition(exp, var): diffExp = simplified(diff(exp, var)) solutions = solve(diffExp, var) if firstOrderCondition.LOG: display(solutions) return solutions firstOrderCondition.LOG = True class Result(object): # a class for holding results of calculations def __repr__(self): return self.__dict__.__repr__() def display(self): for k,v in sorted(self.__dict__.items()): display(k,v) def subs(self, params): ans = Result() for k,v in sorted(self.__dict__.items()): if hasattr(v,"subs"): ans.__dict__[k] = v.subs(params) else: ans.__dict__[k] = v return ans ``` # Symbolic calculations ```python a,p,r,b,vmax,bmin,bmax,beta = symbols('a p r b v_{\max} b_{\min} b_{\max} \\beta', positive=True) w,T,D,L,n,Supply = symbols('w T \\Delta L n \\tau', positive=True) D,Supply ``` ```python def exactCostPerDay(T): return (a*p + w*b*( (1+r)**T - 1 )) / T def approxCostPerDay(T): return a*p/T + w*b*r def symmetricLifetime(w): return w**2/4/L def asymmetricLifetime(w): return w / D uniformPDF = Piecewise( (1 / bmax , b<bmax), (0, True) ) powerlawPDF = Piecewise( (0 , b<bmin), (bmin / b**2, True) ) display(sympy.integrate(uniformPDF, (b, 0, np.inf))) # should be 1 display(sympy.integrate(powerlawPDF, (b, 0, np.inf))) # should be 1 ``` ```python params = { L: 10, # total transfers per day D: 6, # delta transfers per day beta: 0.01, # value / transfer-size r: 4/100/365, # interest rate per day a: 1.1, # records per reset tx Supply: 288000, # records per day bmin: 0.001, # min transfer size (for power law distribution) bmax: 1, # max transfer size (for uniform distribution) } ``` ```python def calculateMarketEquilibrium(costPerDay:Callable, channelLifetime:Callable, wSolutionIndex:int): T = simplified(channelLifetime(w), "T") CPD = simplified(costPerDay(T), "CPD") optimal = Result() optimal.w = simplified(firstOrderCondition(CPD,w)[wSolutionIndex], "Optimal channel funding (w)") optimal.T = simplified(T.subs(w,optimal.w), "optimal channel lifetime (T)") optimal.CPD = simplified(CPD.subs(w,optimal.w), "Cost-per-day") optimal.RPD = simplified(a / optimal.T, "Potential records per day") optimal.C = simplified(optimal.CPD*optimal.T, "Cost between resets") optimal.V = simplified(optimal.T*L*beta*b, "Value between resets") optimal.VCR1 = 1 optimal.VCR2 = simplified(optimal.V / optimal.C, "Value/Cost Ratio of lightning") optimal.VCR3 = simplified(beta*b / p, "Value/Cost Ratio of blockchain") optimal.b12 = simplified(solve(optimal.VCR1-optimal.VCR2,b)[0],"b below which an agent prefers nop to lightning") optimal.b13 = simplified(solve(optimal.VCR1-optimal.VCR3,b)[0],"b below which an agent prefers nop to blockchain") optimal.b23 = simplified(solve(optimal.VCR2-optimal.VCR3,b)[0],"b below which an agent prefers lightning to blockchain") # Calculate threshold prices. This part is relevant only for uniform valuations. optimal.p12 = simplified(solve(optimal.b12-bmax,p)[0],"price above which all agents prefer nop to lightning") optimal.p13 = simplified(solve(optimal.b13-bmax,p)[0],"price above which all agents prefer nop to blockchain") optimal.p23 = simplified(solve(optimal.b23-bmax,p)[0],"price above which all agents prefer lightning to blockchain") # substitute the numeric params: numeric = optimal.subs(params) numeric.b23 = numeric.b23.evalf() numeric.p23 = numeric.p23.evalf() return (optimal,numeric) ``` ```python simplified.LOG = False firstOrderCondition.LOG = False (asymmetricSymbolic,asymmetricNumeric) = calculateMarketEquilibrium(approxCostPerDay,asymmetricLifetime,wSolutionIndex=0) ``` ```python #asymmetricSymbolic.display() asymmetricNumeric.display() ``` ```python simplified.LOG = False firstOrderCondition.LOG = False (symmetricSymbolic,symmetricNumeric) = calculateMarketEquilibrium(approxCostPerDay,symmetricLifetime,wSolutionIndex=0) ``` ```python symmetricNumeric.display() ``` # Demand curves ```python ### Generic function for calculating demand - does not give plottable expressions: def calculateDemands(optimal, valuePDF): optimal.demandWithoutLightning = simplified( sympy.integrate(L * valuePDF, (b, optimal.b13,np.inf)), "demand without lightning" ) optimal.demandWithLightning = simplified( sympy.integrate(a / optimal.T * valuePDF, (b, optimal.b12,optimal.b23)) +\ sympy.integrate(L * valuePDF, (b, optimal.b23,np.inf)), "demand with lightning" ) return optimal simplified.LOG = True calculateDemands(asymmetricSymbolic, uniformPDF) asymmetricNumeric = asymmetricSymbolic.subs(params) display(asymmetricNumeric.demandWithoutLightning) display(asymmetricNumeric.demandWithLightning) ``` # Plots ```python def plotSymbolic(xRange, yExpression, xVariable, style, label): plt.plot(xRange, [yExpression.subs(xVariable,xValue) for xValue in xRange], style, label=label) def plotDemandCurves(priceRange, demandWithoutLightning, demandAsymmetric, demandSymmetric): plotSymbolic(priceRange, demandWithoutLightning, p, "r-",label="no lightning") plotSymbolic(priceRange, demandAsymmetric, p, "b.",label="asymmetric") plotSymbolic(priceRange, demandSymmetric, p, "g--",label="symmetric") plt.gca().set_ylim(-1,11) plt.xlabel("blockchain fee $p$ [coins]") plt.ylabel("Demand of a single pair [records/day]") plt.legend() def plotPriceCurves(nRange, priceWithoutLightning, priceAsymmetric, priceSymmetric): plotSymbolic(nRange, priceWithoutLightning, n, "r-",label="no lightning") plotSymbolic(nRange, priceAsymmetric, n, "b.",label="asymmetric") if priceSymmetric: plotSymbolic(nRange, priceSymmetric, n, "g--",label="symmetric") #plt.gca().set_ylim(-1,11) plt.xlabel("Number of pairs") plt.ylabel("Market-equilibrium price [coins/record]") plt.legend() ``` ## Uniform distribution ```python def calculateDemandsUniformDistribution(optimal): optimal.demandB13 = sympy.integrate(L / bmax, (b, optimal.b13, bmax)) optimal.demandWithoutLightning = simplified(Piecewise( (optimal.demandB13, p < optimal.p13), # b13 < bmax (0, True)), "demand without lightning" ) optimal.demandL1 = sympy.integrate(a / optimal.T / bmax, (b, optimal.b12, optimal.b23)) # b12<b23<bmax optimal.demandL2 = sympy.integrate(a / optimal.T / bmax, (b, optimal.b12, bmax)) # b12<bmax<b23 optimal.demandB23 = sympy.integrate(L / bmax, (b, optimal.b23, bmax)) # b23<bmax optimal.demandWithLightning = simplified(Piecewise( (optimal.demandL1+optimal.demandB23 , p < optimal.p23), # b23 < bmax (optimal.demandL2 , p < optimal.p12), # b12 < bmax (0, True)), "demand with lightning" ) simplified.LOG = True calculateDemandsUniformDistribution(asymmetricSymbolic) asymmetricNumeric = asymmetricSymbolic.subs(params) display(asymmetricNumeric.demandWithoutLightning) display(asymmetricNumeric.demandWithLightning) calculateDemandsUniformDistribution(symmetricSymbolic) symmetricNumeric = symmetricSymbolic.subs(params) display(symmetricNumeric.demandWithoutLightning) display(symmetricNumeric.demandWithLightning) #plot: priceRange = np.linspace(0,1e-4,100) plotDemandCurves(priceRange, asymmetricNumeric.demandWithoutLightning, asymmetricNumeric.demandWithLightning, symmetricNumeric.demandWithLightning) plt.savefig('../graphs/demand-curves-small-price.pdf', format='pdf', dpi=1000) plt.show() priceRange = np.linspace(0,0.015,100) plotDemandCurves(priceRange, asymmetricNumeric.demandWithoutLightning, asymmetricNumeric.demandWithLightning, symmetricNumeric.demandWithLightning) plt.gca().set_ylim(-0.1,1) plt.savefig('../graphs/demand-curves-large-price.pdf', format='pdf', dpi=1000) plt.show() ``` ```python simplified.LOG = True ### Demand curves #asymmetric case: (b12,b13,b23) = (asymmetricNumeric.b12,asymmetricNumeric.b13,asymmetricNumeric.b23) (p12,p13,p23) = (asymmetricNumeric.p12,asymmetricNumeric.p13,asymmetricNumeric.p23) demand1 = (2/3 * sympy.sqrt(D*a*r/p/bmax**2)*(b23**(3/2) - b12**(3/2)) + L*(bmax-b23)).subs(params) demand2 = (2/3 * sympy.sqrt(D*a*r/p/bmax**2)*(bmax**(3/2) - b12**(3/2))).subs(params) asymmetricNumeric.demandWithLightning = Piecewise( (demand1 , p < p23), (demand2 , p < p12), (0, True)) #symmetric case: (b12s,b13s,b23s) = (symmetricNumeric.b12,symmetricNumeric.b13,symmetricNumeric.b23) (p12s,p13s,p23s) = (symmetricNumeric.p12,symmetricNumeric.p13,symmetricNumeric.p23) demand1s = (3/5 * (L*a*r**2/p**2/bmax**3)**(1/3)*(b23s**(5/3) - b12s**(5/3)) + L*(bmax-b23s)).subs(params) demand2s = (3/5 * (L*a*r**2/p**2/bmax**3)**(1/3)*(bmax**(5/3) - b12s**(5/3))).subs(params) symmetricNumeric.demandWithLightning = Piecewise( (demand1s , p < p23s), (demand2s , p < p12s), (0, True)) #plot: priceRange = np.linspace(0,1e-4,100) plotDemandCurves(priceRange, asymmetricNumeric.demandWithoutLightning, asymmetricNumeric.demandWithLightning, symmetricNumeric.demandWithLightning) plt.savefig('../graphs/demand-curves-small-price.pdf', format='pdf', dpi=1000) plt.show() priceRange = np.linspace(0,0.015,100) plotDemandCurves(priceRange, asymmetricNumeric.demandWithoutLightning, asymmetricNumeric.demandWithLightning, symmetricNumeric.demandWithLightning) plt.gca().set_ylim(-0.1,1) plt.savefig('../graphs/demand-curves-large-price.pdf', format='pdf', dpi=1000) plt.show() ``` ```python ### Price curves simplified.LOG = True max_demand = params[L] min_demand = asymmetricNumeric.demandWithLightning.subs(p,p13).evalf(); priceWithoutLightning = simplified(Piecewise( (beta*bmax*(1-Supply/n/L) , n*L>Supply), (0,True)).subs(params)) price1 = simplified(solve(n*demand1-Supply, p)[0]) price2 = simplified(solve(n*demand2-Supply, p)[0]) max_demand1 = demand1.subs(p,0).evalf(); min_demand1 = demand1.subs(p,p23).evalf(); max_demand2 = demand2.subs(p,p23).evalf(); min_demand2 = demand2.subs(p,p12).evalf(); asymmetricNumeric.priceWithLightning = simplified(Piecewise( (0, Supply > n*max_demand1), (price1 , Supply > n*min_demand1), (price2 , Supply > n*min_demand2), (0, True)).subs(params)) #price1s = simplified(solve(n*demand1s-Supply, p)[0]) # price2s = simplified(solve(n*demand2s-Supply, p)[0]) # unsolvable max_demand1s = demand1s.subs(p,0).evalf(); min_demand1s = demand1s.subs(p,p23s).evalf(); max_demand2s = demand2s.subs(p,p23s).evalf(); min_demand2s = demand2s.subs(p,p12s).evalf(); symmetricNumeric.priceWithLightning = None #symmetricNumeric.priceWithLightning = simplified(Piecewise( # (0, Supply > n*max_demand1s), # (price1s , Supply > n*min_demand1s), # price2s, Supply > n*min_demand2s), # u # (0, True)).subs(params)) ``` ```python nRange = np.linspace(0,3000000,100) plotPriceCurves(nRange, priceWithoutLightning, asymmetricNumeric.priceWithLightning, symmetricNumeric.priceWithLightning) plt.savefig('../graphs/price-curves.pdf', format='pdf', dpi=1000) ```
fb3e8c8f123f88051678b507a976073068600f10
317,354
ipynb
Jupyter Notebook
old/market-equilibrium-old-demand-curves.ipynb
erelsgl/bitcoin-simulations
79bfa0930ab9ad17be59b9cad1ec6e7c3530aa3b
[ "MIT" ]
1
2018-11-26T02:44:38.000Z
2018-11-26T02:44:38.000Z
old/market-equilibrium-old-demand-curves.ipynb
erelsgl/bitcoin-simulations
79bfa0930ab9ad17be59b9cad1ec6e7c3530aa3b
[ "MIT" ]
null
null
null
old/market-equilibrium-old-demand-curves.ipynb
erelsgl/bitcoin-simulations
79bfa0930ab9ad17be59b9cad1ec6e7c3530aa3b
[ "MIT" ]
3
2018-09-06T00:11:26.000Z
2021-08-29T17:14:59.000Z
181.448828
22,926
0.852175
true
3,368
Qwen/Qwen-72B
1. YES 2. YES
0.857768
0.853913
0.732459
__label__eng_Latn
0.257587
0.54008
##Ejercicio 4 Practica 1 Para cada uno de los siguientes sistemas encontrar todos los puntos de equilibrio y determinar el tipo de cada punto de equilibio aislado. * c) $$\left\{ \begin{array}{lcc} \dot{x}_{1}=(1-x_{1})x_{1}-\frac{2x_{1}x_{2}}{1+x_{1}}\\ \\ \dot{x}_{2}=(2-\frac{x_{2}}{1+x_{1}})x_{2} \end{array} \right.$$ ```python import sympy as sym ``` ```python #Con esto las salidas van a ser en LaTeX sym.init_printing(use_latex=True) ``` ```python x_1, x_2 = sym.symbols('x_1 x_2') ``` ```python X = sym.Matrix([x_1, x_2]) X ``` ```python f_1 = (1 - x_1) * x_1 - (2 * x_1 * x_2) / (1 + x_1) f_1 ``` ```python f_2 = (2 - (x_2)/(1 + x_1)) * x_2 f_2 ``` ```python F = sym.Matrix([f_1,f_2]) F ``` ```python # puntos de equilibrio del sistema pes = sym.solve([f_1,f_2]) pes ``` ```python A = F.jacobian(X) A ``` ```python A_1 = A.subs(pes[0]) A_1 ``` ```python A_1.eigenvals() ``` ```python ```
bbdcced9c2e3ea6b03eb25048b684892bb166986
21,009
ipynb
Jupyter Notebook
practica1_eje4_c.ipynb
elsuizo/Nonlinear_systems
9636d4a450339b8c735934923810c9539ac76042
[ "MIT" ]
null
null
null
practica1_eje4_c.ipynb
elsuizo/Nonlinear_systems
9636d4a450339b8c735934923810c9539ac76042
[ "MIT" ]
null
null
null
practica1_eje4_c.ipynb
elsuizo/Nonlinear_systems
9636d4a450339b8c735934923810c9539ac76042
[ "MIT" ]
null
null
null
64.051829
2,852
0.751154
true
389
Qwen/Qwen-72B
1. YES 2. YES
0.924142
0.865224
0.79959
__label__spa_Latn
0.281417
0.696048
```python import numpy as np import sympy as sp import matplotlib.pyplot as plt from pyodesys.tests._robertson import get_ode_exprs from pyodesys.symbolic import ScaledSys, PartiallySolvedSystem sp.init_printing() %matplotlib inline ``` ```python linf, linj = get_ode_exprs() logf, logj = get_ode_exprs(True, True) linsys = ScaledSys.from_callback(linf, 3, 3, dep_scaling=1e8, linear_invariants=[[1, 1, 1]], names='ABC') ``` ```python psysA = PartiallySolvedSystem.from_linear_invariants(linsys, preferred=[0], description='A ') psysC = PartiallySolvedSystem.from_linear_invariants(linsys, preferred=[2], description='C ') psysA.exprs, psysC.exprs ``` ```python tend, iv, pars = 1e18, [1, 0, 0], [0.04, 1e4, 3e7] integrate_kw = dict(integrator='cvode', record_rhs_xvals=True, record_jac_xvals=True, nsteps=3000, atol=1e-8, rtol=1e-8, return_on_error=True) results = [] fig, axes = plt.subplots(1, 3, figsize=(16, 4)) def plot_res(res, ax): res.plot(xscale='log', yscale='log', ax=ax) #, info_vlines_kw=True) ax.set_title((res.odesys.description or '') + ('%d steps, ' % res.info['n_steps']) + ('success' if res.info['success'] else 'failed')) ax.set_xlim([1e-12, tend]) ax.set_ylim([1e-30, 1e9]) ax.legend(loc='best') for odesys, ax in zip([linsys, psysA, psysC], axes): results.append(odesys.integrate(tend, iv, pars, **integrate_kw)) plot_res(results[-1], ax) ``` ```python linsys.autonomous_exprs, psysA.autonomous_exprs, psysC.autonomous_exprs ``` ```python linsys.autonomous_interface, psysA.autonomous_interface, psysC.autonomous_interface ``` ```python psysAr = PartiallySolvedSystem.from_linear_invariants(linsys, preferred=[0], roots=[1000*linsys.dep[0] - linsys.dep[2]]) psysAr.autonomous_exprs, psysAr.autonomous_interface ``` ```python resAroot = psysAr.integrate(tend, iv, pars, return_on_root=True, **integrate_kw) resAswitch = resAroot.copy() resAswitch.extend_by_integration(tend, odesys=psysC, **integrate_kw) fig, axes = plt.subplots(1, 2, figsize=(16, 4)) axes[1].axvline(resAswitch.xout[resAswitch.info['root_indices'][0]], ls='--', alpha=.5) for res, ax in zip([resAroot, resAswitch], axes): plot_res(res, ax) ```
6ed7264819a68b85920479a2c7f5bea9387e3731
4,077
ipynb
Jupyter Notebook
examples/_extend_by_integration.ipynb
slayoo/pyodesys
8e1afb195dadf6c6f8e765873bc9dd0fae067c39
[ "BSD-2-Clause" ]
82
2015-09-29T16:51:03.000Z
2022-02-02T13:26:50.000Z
examples/_extend_by_integration.ipynb
slayoo/pyodesys
8e1afb195dadf6c6f8e765873bc9dd0fae067c39
[ "BSD-2-Clause" ]
28
2015-09-29T14:40:45.000Z
2021-09-18T19:29:50.000Z
examples/_extend_by_integration.ipynb
slayoo/pyodesys
8e1afb195dadf6c6f8e765873bc9dd0fae067c39
[ "BSD-2-Clause" ]
13
2016-03-18T14:00:39.000Z
2021-09-17T13:54:29.000Z
27.362416
129
0.574197
true
729
Qwen/Qwen-72B
1. YES 2. YES
0.774583
0.746139
0.577947
__label__eng_Latn
0.201839
0.181094
# Mixed Integer Linear Programming (MILP) ## Introduction * Some variables are restricted to be integers * NP-complete * Applications * Production planning * Scheduling * Many more... ## The standard form \begin{align} \text{maximize}\ & \mathbf{c}^T\mathbf{x} + \mathbf{k}^T\mathbf{y} \\ \text{subject to } & \\ & A\mathbf{x} &&\leq \mathbf{b} \\ & D\mathbf{y} &&\leq \mathbf{e} \\ & \mathbf{x},\mathbf{y} &&\geq 0 \\ & \mathbf{x} \in \mathbb{Z}^n \end{align} where $A, D \in \mathbb{R}^{m\times n}$ are matrices, $\mathbf{b}, \mathbf{e}\in\mathbb{R}^{m}$ are constants, $\mathbf{c}, \mathbf{k} \in \mathbb{R}^{n}$ objective function coefficients, and $\mathbf{x}, \mathbf{y} \in\mathbb{R}^{n}$ are the decision variables. ## CPLEX basics: Mixed Integer Programming Model ## Mathematical Model \begin{align} \text{maximize}\ & 2x + y + 3z \\ \text{subject to } & \\ & x+2y+z &&\leq 4 \\ & 2z + y &&\leq 5 \\ & x + y &&\geq 1 \\ & x &&\in \{0,1\} \\ & y, z \geq 0 \\ & z \in \mathbb{Z} \end{align} # Code in Python using docplex ## Step 1: Importing Model from docplex package ```python from docplex.mp.model import Model ``` ## Step 2: Create an optimization model ```python milp_model = Model(name = "MILP") ``` ## Step 3: Add decision variables ```python x = milp_model.binary_var(name = 'x') y = milp_model.continuous_var(name = 'y', lb = 0) z = milp_model.integer_var(name="z", lb=0) ``` ## Step 4: Add the constraints ```python # Add constraint: x + 2 y + z <= 4 c1 = milp_model.add_constraint(x + 2 * y + z <= 4, ctname = "c1") # Add constraint: 2 z + y <= 5 \\ c2 = milp_model.add_constraint(2 * z + y <= 5, ctname = "c2") # Add constraint x + y >= 1 c3 = milp_model.add_constraint(x + y >= 1, ctname = "c3") ``` ## Step 5: Define the objective function ```python obj_fn = 2 * x + y + 3 * z milp_model.set_objective('max', obj_fn) milp_model.print_information() ``` ## Step 6: Solve the model ```python milp_model.solve() ``` ## Step 7: Output the result ```python milp_model.print_solution() ``` ```python ```
a2573da47dda73fcef7f83bbf17f1e829b159d78
4,538
ipynb
Jupyter Notebook
mathematicalProgramming/Video08/Video08.ipynb
codingperspective/videoMaterials
8c9665466d8912c6f0c701c25ad9eb4802fb73a3
[ "CC0-1.0" ]
null
null
null
mathematicalProgramming/Video08/Video08.ipynb
codingperspective/videoMaterials
8c9665466d8912c6f0c701c25ad9eb4802fb73a3
[ "CC0-1.0" ]
null
null
null
mathematicalProgramming/Video08/Video08.ipynb
codingperspective/videoMaterials
8c9665466d8912c6f0c701c25ad9eb4802fb73a3
[ "CC0-1.0" ]
6
2021-11-21T05:02:50.000Z
2022-02-17T04:44:57.000Z
22.577114
286
0.485236
true
736
Qwen/Qwen-72B
1. YES 2. YES
0.969785
0.877477
0.850964
__label__eng_Latn
0.43201
0.815408
<a href="https://colab.research.google.com/github/deanhadzi/DS-Unit-2-Regression-Classification/blob/master/module2/DSPT2_lesson_regression_classification_2.ipynb" target="_parent"></a> Lambda School Data Science *Unit 2, Sprint 1, Module 2* --- # Regression & Classification, Module 2 - Go from simple regression (1 feature) to multiple regression (2+ features) - Get and plot coefficients - Use regression metrics: MAE (Mean Absolute Error) and $R^2$ Score - Understand how ordinary least squares regression minimizes the sum of squared errors - Explain why overfitting is a problem. Do train/test split ### Setup You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below). ```python import os, sys in_colab = 'google.colab' in sys.modules # If you're in Colab... if in_colab: # Pull files from Github repo os.chdir('/content') !git init . !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git !git pull origin master # Install required python packages !pip install -r requirements.txt # Change into directory for module os.chdir('module2') ``` ```python # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') ``` # Go from simple regression (1 feature) to multiple regression (2+ features) ## Overview: Predict Elections! 🗳️ ```python import pandas as pd import plotly.express as px df = pd.read_csv('../data/bread_peace_voting.csv') px.scatter( df, x='Average Recent Growth in Personal Incomes', y='Incumbent Party Vote Share', text='Year', title='US Presidential Elections, 1952-2016', trendline='ols', # Ordinary Least Squares ) ``` <html> <head><meta charset="utf-8" /></head> <body> <div> <div id="e9eaff3e-e43e-48c5-863a-6eb0ad71cd4b" class="plotly-graph-div" style="height:600px; width:100%;"></div> </div> </body> </html> #### Douglas Hibbs, [Background Information on the ‘Bread and Peace’ Model of Voting in Postwar US Presidential Elections](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/) > Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants: > (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. #### Data sources - 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40 - 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections) - 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html) - 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12 > Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33 ## Follow Along Look at the data ```python df ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Year</th> <th>Incumbent Party Candidate</th> <th>Other Candidate</th> <th>Average Recent Growth in Personal Incomes</th> <th>US Military Fatalities per Million</th> <th>Incumbent Party Vote Share</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>1952</td> <td>Stevenson</td> <td>Eisenhower</td> <td>2.40</td> <td>190</td> <td>44.60</td> </tr> <tr> <th>1</th> <td>1956</td> <td>Eisenhower</td> <td>Stevenson</td> <td>2.89</td> <td>0</td> <td>57.76</td> </tr> <tr> <th>2</th> <td>1960</td> <td>Nixon</td> <td>Kennedy</td> <td>0.85</td> <td>0</td> <td>49.91</td> </tr> <tr> <th>3</th> <td>1964</td> <td>Johnson</td> <td>Goldwater</td> <td>4.21</td> <td>1</td> <td>61.34</td> </tr> <tr> <th>4</th> <td>1968</td> <td>Humphrey</td> <td>Nixon</td> <td>3.02</td> <td>146</td> <td>49.60</td> </tr> <tr> <th>5</th> <td>1972</td> <td>Nixon</td> <td>McGovern</td> <td>3.62</td> <td>0</td> <td>61.79</td> </tr> <tr> <th>6</th> <td>1976</td> <td>Ford</td> <td>Carter</td> <td>1.08</td> <td>2</td> <td>48.95</td> </tr> <tr> <th>7</th> <td>1980</td> <td>Carter</td> <td>Reagan</td> <td>-0.39</td> <td>0</td> <td>44.70</td> </tr> <tr> <th>8</th> <td>1984</td> <td>Reagan</td> <td>Mondale</td> <td>3.86</td> <td>0</td> <td>59.17</td> </tr> <tr> <th>9</th> <td>1988</td> <td>Bush, Sr.</td> <td>Dukakis</td> <td>2.27</td> <td>0</td> <td>53.94</td> </tr> <tr> <th>10</th> <td>1992</td> <td>Bush, Sr.</td> <td>Clinton</td> <td>0.38</td> <td>0</td> <td>46.55</td> </tr> <tr> <th>11</th> <td>1996</td> <td>Clinton</td> <td>Dole</td> <td>1.04</td> <td>0</td> <td>54.74</td> </tr> <tr> <th>12</th> <td>2000</td> <td>Gore</td> <td>Bush, Jr.</td> <td>2.36</td> <td>0</td> <td>50.27</td> </tr> <tr> <th>13</th> <td>2004</td> <td>Bush, Jr.</td> <td>Kerry</td> <td>1.72</td> <td>4</td> <td>51.24</td> </tr> <tr> <th>14</th> <td>2008</td> <td>McCain</td> <td>Obama</td> <td>0.10</td> <td>14</td> <td>46.32</td> </tr> <tr> <th>15</th> <td>2012</td> <td>Obama</td> <td>Romney</td> <td>0.95</td> <td>5</td> <td>52.00</td> </tr> <tr> <th>16</th> <td>2016</td> <td>Clinton</td> <td>Trump</td> <td>0.10</td> <td>5</td> <td>48.20</td> </tr> </tbody> </table> </div> What's the average Incumbent Party Vote Share? ```python df['Incumbent Party Vote Share'].mean() ``` 51.82823529411765 Add another feature to the scatterplot ```python px.scatter_3d( df, x = 'Average Recent Growth in Personal Incomes', y = 'US Military Fatalities per Million', z = 'Incumbent Party Vote Share', text = 'Year', title = 'US Presidential Elections' ) ``` <html> <head><meta charset="utf-8" /></head> <body> <div> <div id="6382e1de-f3da-48e0-b11a-cf9116e51a5b" class="plotly-graph-div" style="height:600px; width:100%;"></div> </div> </body> </html> Plot the hyperplane of best fit ```python import itertools import numpy as np import plotly.express as px import plotly.graph_objs as go from sklearn.linear_model import LinearRegression def regression_3d(df, x, y, z, num=100, **kwargs): """ Visualize linear regression in 3D: 2 features + 1 target df : Pandas DataFrame x : string, feature 1 column in df y : string, feature 2 column in df z : string, target column in df num : integer, number of quantiles for each feature """ # Plot data fig = px.scatter_3d(df, x, y, z, **kwargs) # Fit Linear Regression features = [x, y] target = z model = LinearRegression() model.fit(df[features], df[target]) # Define grid of coordinates in the feature space xmin, xmax = df[x].min(), df[x].max() ymin, ymax = df[y].min(), df[y].max() xcoords = np.linspace(xmin, xmax, num) ycoords = np.linspace(ymin, ymax, num) coords = list(itertools.product(xcoords, ycoords)) # Make predictions for the grid predictions = model.predict(coords) Z = predictions.reshape(num, num).T # Plot predictions as a 3D surface (plane) fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z)) return fig ``` ```python regression_3d( df, x = 'Average Recent Growth in Personal Incomes', y = 'US Military Fatalities per Million', z = 'Incumbent Party Vote Share', text = 'Year', title = 'US Presidential Elections' ) ``` <html> <head><meta charset="utf-8" /></head> <body> <div> <div id="9221647a-d587-4700-bb32-6634589aa12b" class="plotly-graph-div" style="height:600px; width:100%;"></div> </div> </body> </html> Fit Linear Regression with 2 features - Jake VanderPlas, [_Python Data Science Handbook,_ Chapter 5.2, Introducing Scikit-Learn — Basics of the API](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.html#Basics-of-the-API) - Scikit-Learn documentation, [sklearn.linear_model.LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) ```python # 1. Import the appropriate estimator class from Scikit-Learn from sklearn.linear_model import LinearRegression # 2. Instantiate this class model = LinearRegression() # 3. Arrange X features matrix & y target vector features = ['Average Recent Growth in Personal Incomes', 'US Military Fatalities per Million'] target = 'Incumbent Party Vote Share' X = df[features] y = df[target] # 4. Fit the model model.fit(X, y) ``` LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False) ## Challenge # Get and plot coefficients ```python model.intercept_ ``` 46.88551694148949 ```python model.coef_ ``` array([ 3.40621407, -0.05375223]) y = coef1 * x1 + coef2 * x2 + b ## Overview ## Follow Along What's the equation for the hyperplane? Can you relate the intercept and coefficients to what you see in the plot? ```python ``` What if ... Income growth = 0%, fatalities = 0 ```python model.predict([[0, 0]]) ``` array([46.88551694]) Income growth = 1% (fatalities = 0) ```python model.predict([[1, 0]]) ``` array([50.29173101]) The difference between these predictions = ? ```python model.predict([[1, 0]]) - model.predict([[0, 0]]) ``` array([3.40621407]) What if... income growth = 2% (fatalities = 0) ```python ``` The difference between these predictions = ? ```python model.predict([[2, 0]]) - model.predict([[1, 0]]) ``` array([3.40621407]) What if... (income growth=2%) fatalities = 100 ```python ``` The difference between these predictions = ? ```python ``` What if income growth = 3% (fatalities = 100) ```python ``` The difference between these predictions = ? ```python ``` What if (income growth = 3%) fatalities = 200 ```python ``` The difference between these predictions = ? ```python ``` Plot coefficients ```python ``` ## Challenge # Use regression metrics: MAE (Mean Absolute Error) and $R^2$ Score ## Overview ## Follow Along How's the error? ```python y_pred = model.predict(X) df["Predicted"] = y_pred df['Error'] = y_pred - y df ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>Year</th> <th>Incumbent Party Candidate</th> <th>Other Candidate</th> <th>Average Recent Growth in Personal Incomes</th> <th>US Military Fatalities per Million</th> <th>Incumbent Party Vote Share</th> <th>Predicted</th> <th>Error</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>1952</td> <td>Stevenson</td> <td>Eisenhower</td> <td>2.40</td> <td>190</td> <td>44.60</td> <td>44.847507</td> <td>0.247507</td> </tr> <tr> <th>1</th> <td>1956</td> <td>Eisenhower</td> <td>Stevenson</td> <td>2.89</td> <td>0</td> <td>57.76</td> <td>56.729476</td> <td>-1.030524</td> </tr> <tr> <th>2</th> <td>1960</td> <td>Nixon</td> <td>Kennedy</td> <td>0.85</td> <td>0</td> <td>49.91</td> <td>49.780799</td> <td>-0.129201</td> </tr> <tr> <th>3</th> <td>1964</td> <td>Johnson</td> <td>Goldwater</td> <td>4.21</td> <td>1</td> <td>61.34</td> <td>61.171926</td> <td>-0.168074</td> </tr> <tr> <th>4</th> <td>1968</td> <td>Humphrey</td> <td>Nixon</td> <td>3.02</td> <td>146</td> <td>49.60</td> <td>49.324458</td> <td>-0.275542</td> </tr> <tr> <th>5</th> <td>1972</td> <td>Nixon</td> <td>McGovern</td> <td>3.62</td> <td>0</td> <td>61.79</td> <td>59.216012</td> <td>-2.573988</td> </tr> <tr> <th>6</th> <td>1976</td> <td>Ford</td> <td>Carter</td> <td>1.08</td> <td>2</td> <td>48.95</td> <td>50.456724</td> <td>1.506724</td> </tr> <tr> <th>7</th> <td>1980</td> <td>Carter</td> <td>Reagan</td> <td>-0.39</td> <td>0</td> <td>44.70</td> <td>45.557093</td> <td>0.857093</td> </tr> <tr> <th>8</th> <td>1984</td> <td>Reagan</td> <td>Mondale</td> <td>3.86</td> <td>0</td> <td>59.17</td> <td>60.033503</td> <td>0.863503</td> </tr> <tr> <th>9</th> <td>1988</td> <td>Bush, Sr.</td> <td>Dukakis</td> <td>2.27</td> <td>0</td> <td>53.94</td> <td>54.617623</td> <td>0.677623</td> </tr> <tr> <th>10</th> <td>1992</td> <td>Bush, Sr.</td> <td>Clinton</td> <td>0.38</td> <td>0</td> <td>46.55</td> <td>48.179878</td> <td>1.629878</td> </tr> <tr> <th>11</th> <td>1996</td> <td>Clinton</td> <td>Dole</td> <td>1.04</td> <td>0</td> <td>54.74</td> <td>50.427980</td> <td>-4.312020</td> </tr> <tr> <th>12</th> <td>2000</td> <td>Gore</td> <td>Bush, Jr.</td> <td>2.36</td> <td>0</td> <td>50.27</td> <td>54.924182</td> <td>4.654182</td> </tr> <tr> <th>13</th> <td>2004</td> <td>Bush, Jr.</td> <td>Kerry</td> <td>1.72</td> <td>4</td> <td>51.24</td> <td>52.529196</td> <td>1.289196</td> </tr> <tr> <th>14</th> <td>2008</td> <td>McCain</td> <td>Obama</td> <td>0.10</td> <td>14</td> <td>46.32</td> <td>46.473607</td> <td>0.153607</td> </tr> <tr> <th>15</th> <td>2012</td> <td>Obama</td> <td>Romney</td> <td>0.95</td> <td>5</td> <td>52.00</td> <td>49.852659</td> <td>-2.147341</td> </tr> <tr> <th>16</th> <td>2016</td> <td>Clinton</td> <td>Trump</td> <td>0.10</td> <td>5</td> <td>48.20</td> <td>46.957377</td> <td>-1.242623</td> </tr> </tbody> </table> </div> How does this compare to guessing? ```python df['Error'].mean() ``` -3.761696836377001e-15 ```python df['Error'].abs().mean() ``` 1.3975663494016117 ```python # Mean Absolute Error (MAE) from sklearn.metrics import mean_absolute_error mean_absolute_error(y, y_pred) ``` 1.3975663494016117 How does this compare to guessing? ```python # What was the average vote share? y.mean() ``` 51.82823529411765 ```python # What if we guessed this number for every election? guesses = [y.mean()] * len(y) ``` ```python # How far off would this be on average? mean_absolute_error(y, guesses) ``` 4.463806228373702 Plot the "residuals" (errors) ```python import matplotlib.pyplot as plt from sklearn.metrics import mean_absolute_error, r2_score def regression_residuals(df, feature, target, m, b): """ Visualize linear regression, with residual errors, in 2D: 1 feature + 1 target. Use the m & b parameters to "fit the model" manually. df : Pandas DataFrame feature : string, feature column in df target : string, target column in df m : numeric, slope for linear equation b : numeric, intercept for linear requation """ # Plot data df.plot.scatter(feature, target) # Make predictions x = df[feature] y = df[target] y_pred = m*x + b # Plot predictions plt.plot(x, y_pred) # Plot residual errors for x, y1, y2 in zip(x, y, y_pred): plt.plot((x, x), (y1, y2), color='grey') # Print regression metrics mae = mean_absolute_error(y, y_pred) r2 = r2_score(y, y_pred) print('Mean Absolute Error:', mae) print('R^2:', r2) ``` ```python feature = 'Average Recent Growth in Personal Incomes' regression_residuals(df, feature, target, m=0, b=y.mean()) ``` ## Challenge # Understand how ordinary least squares regression minimizes the sum of squared errors ## Overview But Ordinary Least Squares Regression *doesn't* directly minimize MAE or R^2... 1. Guess & Check 2. Linear Algebra ## Follow Along ### Guess & Check ```python from matplotlib.patches import Rectangle import matplotlib.pyplot as plt import numpy as np from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score def regression_squared_errors(df, feature, target, m, b): """ Visualize linear regression, with squared errors, in 2D: 1 feature + 1 target. Use the m & b parameters to "fit the model" manually. df : Pandas DataFrame feature : string, feature column in df target : string, target column in df m : numeric, slope for linear equation b : numeric, intercept for linear requation """ # Plot data fig = plt.figure(figsize=(7,7)) ax = plt.axes() df.plot.scatter(feature, target, ax=ax) # Make predictions x = df[feature] y = df[target] y_pred = m*x + b # Plot predictions ax.plot(x, y_pred) # Plot squared errors xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() scale = (xmax-xmin)/(ymax-ymin) for x, y1, y2 in zip(x, y, y_pred): bottom_left = (x, min(y1, y2)) height = abs(y1 - y2) width = height * scale ax.add_patch(Rectangle(xy=bottom_left, width=width, height=height, alpha=0.1)) # Print regression metrics mse = mean_squared_error(y, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y, y_pred) r2 = r2_score(y, y_pred) print('Mean Squared Error:', mse) print('Root Mean Squared Error:', rmse) print('Mean Absolute Error:', mae) print('R^2:', r2) ``` ```python regression_squared_errors(df, feature, target, m=3, b=46) ``` ```python from ipywidgets import interact, fixed interact( regression_squared_errors, df=fixed(df), feature=fixed(feature), target=fixed(target), m=(-5, 5, 0.5), b=(40, 60, 0.6) ); ``` interactive(children=(FloatSlider(value=0.0, description='m', max=5.0, min=-5.0, step=0.5), FloatSlider(value=… ### Linear Algebra The same result that is found by minimizing the sum of the squared errors can be also found through a linear algebra process known as the "Least Squares Solution:" \begin{align} \hat{\beta} = (X^{T}X)^{-1}X^{T}y \end{align} Before we can work with this equation in its linear algebra form we have to understand how to set up the matrices that are involved in this equation. ### The $\beta$ vector The $\beta$ vector represents all the parameters that we are trying to estimate, our $y$ vector and $X$ matrix values are full of data from our dataset. The $\beta$ vector holds the variables that we are solving for: $\beta_0$ and $\beta_1$ Now that we have all of the necessary parts we can set them up in the following equation: \begin{align} y = X \beta + \epsilon \end{align} Since our $\epsilon$ value represents **random** error we can assume that it will equal zero on average. \begin{align} y = X \beta \end{align} The objective now is to isolate the $\beta$ matrix. We can do this by pre-multiplying both sides by "X transpose" $X^{T}$. \begin{align} X^{T}y = X^{T}X \beta \end{align} Since anything times its transpose will result in a square matrix, if that matrix is then an invertible matrix, then we should be able to multiply both sides by its inverse to remove it from the right hand side. (We'll talk tomorrow about situations that could lead to $X^{T}X$ not being invertible.) \begin{align} (X^{T}X)^{-1}X^{T}y = (X^{T}X)^{-1}X^{T}X \beta \end{align} Since any matrix multiplied by its inverse results in the identity matrix, and anything multiplied by the identity matrix is itself, we are left with only $\beta$ on the right hand side: \begin{align} (X^{T}X)^{-1}X^{T}y = \hat{\beta} \end{align} We will now call it "beta hat" $\hat{\beta}$ because it now represents our estimated values for $\beta_0$ and $\beta_1$ ### Lets calculate our $\beta$ coefficients with numpy! ```python # This is NOT an objective you'll be tested on. It's just a demo. # X is a matrix. Add constant for the intercept. from statsmodels.api import add_constant X = add_constant(df[feature].values) print('X') print(X) # y is a column vector y = df[target].values[:, np.newaxis] print('y') print(y) # Least squares solution in code X_transpose = X.T X_transpose_X = X_transpose @ X X_transpose_X_inverse = np.linalg.inv(X_transpose_X) X_transpose_y = X_transpose @ y beta_hat = X_transpose_X_inverse @ X_transpose_y print('Beta Hat') print(beta_hat) ``` X [[ 1. 2.4 ] [ 1. 2.89] [ 1. 0.85] [ 1. 4.21] [ 1. 3.02] [ 1. 3.62] [ 1. 1.08] [ 1. -0.39] [ 1. 3.86] [ 1. 2.27] [ 1. 0.38] [ 1. 1.04] [ 1. 2.36] [ 1. 1.72] [ 1. 0.1 ] [ 1. 0.95] [ 1. 0.1 ]] y [[44.6 ] [57.76] [49.91] [61.34] [49.6 ] [61.79] [48.95] [44.7 ] [59.17] [53.94] [46.55] [54.74] [50.27] [51.24] [46.32] [52. ] [48.2 ]] Beta Hat [[46.49920976] [ 2.97417709]] ```python # Scikit-learn gives the exact same results from sklearn.linear_model import LinearRegression model = LinearRegression() features = ['Average Recent Growth in Personal Incomes'] target = 'Incumbent Party Vote Share' X = df[features] y = df[target] model.fit(X, y) model.intercept_, model.coef_ ``` (46.499209757741625, array([2.97417709])) ## Challenge # Explain why overfitting is a problem. Do train/test split ## Overview #### Jake VanderPlas, [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#The-Bias-variance-trade-off) Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between bias and variance. Consider the following figure, which presents two regression fits to the same dataset: The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to _underfit_ the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high _bias_. The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to _overfit_ the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high _variance_. From the scores associated with these two models, we can make an observation that holds more generally: - For high-bias models, the performance of the model on the validation set is similar to the performance on the training set. - For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set. If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure: The diagram shown here is often called a validation curve, and we see the following essential features: - The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen. - For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data. - For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data. - For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance. The means of tuning the model complexity varies from model to model. ## Follow Along Wrangle New York City property sales data ```python import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` ```python # Read New York City property sales data, from first 4 months of 2019. # Dataset has 23040 rows, 21 columns. df = pd.read_csv('../data/NYC_Citywide_Rolling_Calendar_Sales.csv') assert df.shape == (23040, 21) # Change column names. Replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # Remove symbols from SALE_PRICE string, convert to integer df['SALE_PRICE'] = ( df['SALE_PRICE'] .str.replace('$','') .str.replace('-','') .str.replace(',','') .astype(int) ) # Keep subset of rows: # Tribeca neighborhood, Condos - Elevator Apartments, # 1 unit, sale price more than $1, less than $35 million mask = ( (df['NEIGHBORHOOD'].str.contains('TRIBECA')) & (df['BUILDING_CLASS_CATEGORY'] == '13 CONDOS - ELEVATOR APARTMENTS') & (df['TOTAL_UNITS'] == 1) & (df['SALE_PRICE'] > 0) & (df['SALE_PRICE'] < 35000000) ) df = df[mask] # Data now has 90 rows, 21 columns assert df.shape == (90, 21) # Convert SALE_DATE to datetime df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'], infer_datetime_format=True) from ipywidgets import interact import pandas as pd from sklearn.linear_model import LinearRegression # Read New York City property sales data, from first 4 months of 2019. # Dataset has 23040 rows, 21 columns. df = pd.read_csv('../data/NYC_Citywide_Rolling_Calendar_Sales.csv') assert df.shape == (23040, 21) # Change column names. Replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # Remove symbols from SALE_PRICE string, convert to integer df['SALE_PRICE'] = ( df['SALE_PRICE'] .str.replace('$','') .str.replace('-','') .str.replace(',','') .astype(int) ) # Keep subset of rows: # Tribeca neighborhood, Condos - Elevator Apartments, # 1 unit, sale price more than $1, less than $35 million mask = ( (df['NEIGHBORHOOD'].str.contains('TRIBECA')) & (df['BUILDING_CLASS_CATEGORY'] == '13 CONDOS - ELEVATOR APARTMENTS') & (df['TOTAL_UNITS'] == 1) & (df['SALE_PRICE'] > 0) & (df['SALE_PRICE'] < 35000000) ) df = df[mask] # Data now has 90 rows, 21 columns assert df.shape == (90, 21) # Convert SALE_DATE to datetime df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'], infer_datetime_format=True) # Arrange X features matrix & y target vector features = ['GROSS_SQUARE_FEET'] target = 'SALE_PRICE' X = df[features] y = df[target] ``` Do random train/test spliit ```python from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=11) ``` Repeatedly fit increasingly complex models, and keep track of the scores ```python from IPython.display import display, HTML from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression # Credit: Jake VanderPlas, Python Data Science Handbook, Chapter 5.3 # https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn def PolynomialRegression(degree=2, **kwargs): return make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs)) polynomial_degrees = range(1, 10, 2) train_r2s = [] test_r2s = [] for degree in polynomial_degrees: model = PolynomialRegression(degree) display(HTML(f'Polynomial degree={degree}')) model.fit(X_train, y_train) train_r2 = model.score(X_train, y_train) test_r2 = model.score(X_test, y_test) display(HTML(f'<b style="color: blue">Train R2 {train_r2:.2f}</b>')) display(HTML(f'<b style="color: red">Test R2 {test_r2:.2f}</b>')) plt.scatter(X_train, y_train, color='blue', alpha=0.5) plt.scatter(X_test, y_test, color='red', alpha=0.5) plt.xlabel(features) plt.ylabel(target) x_domain = np.linspace(X.min(), X.max()) curve = model.predict(x_domain) plt.plot(x_domain, curve, color='blue') plt.show() display(HTML('<hr/>')) train_r2s.append(train_r2) test_r2s.append(test_r2) display(HTML('Validation Curve')) plt.plot(polynomial_degrees, train_r2s, color='blue', label='Train') plt.plot(polynomial_degrees, test_r2s, color='red', label='Test') plt.xlabel('Model Complexity (Polynomial Degree)') plt.ylabel('R^2 Score') plt.legend() plt.show() ``` ## Challenge # Review # Sources Jake VanderPlas, [_Python Data Science Handbook,_ Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#The-Bias-variance-trade-off) #### Douglas Hibbs, [Background Information on the ‘Bread and Peace’ Model of Voting in Postwar US Presidential Elections](https://douglas-hibbs.com/background-information-on-bread-and-peace-voting-in-us-presidential-elections/) > Aggregate two-party vote shares going to candidates of the party holding the presidency during the postwar era are well explained by just two fundamental determinants: > (1) Positively by weighted-average growth of per capita real disposable personal income over the term. > (2) Negatively by cumulative US military fatalities (scaled to population) owing to unprovoked, hostile deployments of American armed forces in foreign wars. #### Data sources - 1952-2012: Douglas Hibbs, [2014 lecture at Deakin University Melbourne](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 40 - 2016, Vote Share: [The American Presidency Project](https://www.presidency.ucsb.edu/statistics/elections) - 2016, Recent Growth in Personal Incomes: [The 2016 election economy: the "Bread and Peace" model final forecast](https://angrybearblog.com/2016/11/the-2016-election-economy-the-bread-and-peace-model-final-forecast.html) - 2016, US Military Fatalities: Assumption that Afghanistan War fatalities in 2012-16 occured at the same rate as 2008-12 > Fatalities denotes the cumulative number of American military fatalities per millions of US population the in Korea, Vietnam, Iraq and Afghanistan wars during the presidential terms preceding the 1952, 1964, 1968, 1976 and 2004, 2008 and 2012 elections. —[Hibbs](http://www.douglas-hibbs.com/HibbsArticles/HIBBS-PRESVOTE-SLIDES-MELBOURNE-Part1-2014-02-26.pdf), Slide 33
80273432b08c293b0d2d1bf2ec43da163154c513
643,409
ipynb
Jupyter Notebook
module2/DSPT2_lesson_regression_classification_2.ipynb
deanhadzi/DS-Unit-2-Regression-Classification
a8392f380d76c4fbf7deb75ffbf266c08ae6c537
[ "MIT" ]
null
null
null
module2/DSPT2_lesson_regression_classification_2.ipynb
deanhadzi/DS-Unit-2-Regression-Classification
a8392f380d76c4fbf7deb75ffbf266c08ae6c537
[ "MIT" ]
null
null
null
module2/DSPT2_lesson_regression_classification_2.ipynb
deanhadzi/DS-Unit-2-Regression-Classification
a8392f380d76c4fbf7deb75ffbf266c08ae6c537
[ "MIT" ]
null
null
null
210.677472
197,090
0.831724
true
10,149
Qwen/Qwen-72B
1. YES 2. YES
0.754915
0.672332
0.507553
__label__eng_Latn
0.783499
0.017545
# Lecture 3 In this lecture we move onto non-homogeneous second-order ordinary differentials. We want to solve equations of the form $$ a \frac{d^{2}y}{dx^{2}} + b \frac{dy}{dx} + c y = f(x) $$ where $a$, $b$ and $c$ are real constants. We've seem previously that for the case $b^{2} \ne 4ac$ and $f(x) = 0$, the general solution to the above equation is $$ y = A_{1} e^{\lambda_{1}} + A_{2}e^{\lambda_{2}} $$ where $\lambda$ are the roots to the characteristic equation $a\lambda^{2} + b \lambda + c = 0$. For the case that $b^{2} = 4ac$ (repeated roots), the general solution is of the form $$ y =(A_{1} + A_{2} x) e^{\lambda x} $$ Recall that the constants $A_{1}$ and $A_{2}$ are determined from the boundary conditions. ### Initialising SymPy To solve equations using SymPy, we again need to import SymPy, and we;ll call `init_printing()` to get nicely typeset equations: ``` from sympy import * # This initialises pretty printing init_printing() from IPython.display import display # This command makes plots appear inside the browser window %matplotlib inline ``` ## Mass-spring-damper system The differential equation that governs a single degree-of-freedom mass-spring-damper system, with a forcing term $f(t)$, is $$ m \frac{d^{2}y}{dt^{2}} + \lambda \frac{dy}{dt} + ky = f(t) $$ To solve this problem using SymPy, we first define the symbols $t$ (time), $m$ (mass), $\lambda$ (damper coefficient) and $k$ (spring stiffness), and the function $y$ (displacement): ``` t, m, lmbda, k = symbols("t m lambda k") y = Function("y") f = Function("f") ``` Next, we define the differential equation, and print it to the screen: ``` eqn = Eq(m*Derivative(y(t), t, t) + lmbda*Derivative(y(t), t) + k*y(t), f(t)) display(eqn) ``` ## Classifying the ODE Checking the order of the ODE: ``` print("This order of the ODE is: {}".format(ode_order(eqn, y(t)))) ``` This order of the ODE is: 2 As expected, it is second order. Now classifying the ODE: ``` print("Properties of the ODE are: {}".format(classify_ode(eqn))) ``` Properties of the ODE are: ('nth_linear_constant_coeff_variation_of_parameters', 'nth_linear_constant_coeff_variation_of_parameters_Integral') ## Solving the ODE We can now try to solve the ODE symbolically, but without defining $f(t)$ the result will be umanageble. Sympy will return a solution but it will too complicated to interpret. We will therefore specify some of the parameters in the equation: ``` m0, lmbda0, k0 = 4, 1, 16 eqn1 = eqn.subs(m, m0).subs(k, k0).subs(lmbda, lmbda0) dsolve(eqn1) ``` The solution is looking more manageable, but we still haven't specified $f(t)$. Setting $f(t) = 2$ and then solving, ``` eqn1 = eqn1.subs(f(t), 2) print("ODE with f(t) = 2") display(eqn1) ys = dsolve(eqn1, y(t)) print("Solution to ODE:") display(ys) ``` The solution is now looking tractable. If we set $C_{1} = C_{2} = 1$, we can plot the solution. ``` ys = ys.subs('C1', 1).subs('C2', 1) plot(ys.args[1], (t, 0.0, 20.0), xlabel="time", ylabel="displacement") ``` We can see the that the solution is a decaying sinusoidal function. ### Exercise: Try changing the values for $m$, $\lambda$ and $k$ to investigate their influence in the response. You can try negative values, but only positive values are physically possible. ### Exercise: Try changing $f(t)$ to explore the response for different forcing functions. ### Example: resonance An undamped system ($\lambda = 0$) changes character when the forcing term is sinusoidal at the natural frequency of the system. The corresponds to the case of the forcing term having the same form as the solution to the homogeneous problem. The natural frequency $\omega_{n} = \sqrt{k/m}$. Setting $f(t) = \sin( \omega x)$ ``` # Define natural frequency omega = Symbol("omega") omega = sqrt(k/m) # Set f(t) = sin(\omega t) eqn1 = eqn.subs(f(t), sin(omega*t)) display(eqn1) ``` We now set values for $m$, $\lambda$ and $k$ (we'll set $\lambda = 0$ which corresponds to an undamped system): ``` # Set numerical values for m, lambda and k m0, lmbda0, k0 = 4, 0, 1 eqn1 = eqn1.subs(m, m0).subs(k, k0).subs(lmbda, lmbda0) # Solve ODE display(eqn1) y = dsolve(eqn1) display(y) ``` Note the presence of $t$ in front of the cosine. This implies that the solution will grow linearly in time. We can see this by plotting the solution: ``` y = y.subs('C1', 1).subs('C2', 1) plot(y.args[1], (t, 0.0, 100.0), xlabel="time", ylabel="displacement") ``` We observe that the solution grows in time - this is a feature of resonance. ### Example: Interactive control for the natural frequency of the mass-spring-damper system ####NOTE: This will not run on the online IPython Noteboook Viewer. You would need IPython 2.3.1 and run this cell locally for the interactive to work. The equation for the mass-spring-damper system above can be rewritten in terms of damping $\zeta$ and natural frequency $\omega_{n}$ as belowed: $$ \frac{d^{2}y}{dt^{2}} + 2\zeta\omega_{n}\frac{dy}{dt} + \omega_{n}^2y = 0$$ which gives rise to three different solutions, whether the system is under-damped, critically damped or over-damped. The values of $\omega_{n}$ and $\zeta$ are defined as below, from the original mass-spring system: $$ \omega_{n} = \sqrt{\frac{k}{m}} $$ $$ \zeta = \frac{\lambda }{2 \sqrt{mk}} $$ The interactive widget belows provides the visualisation between the changes of $m, k, \lambda$ on the final results. ``` from sympy import * # Import the interactive function from IPython widgets from IPython.html.widgets import interact from IPython.html import widgets # This initialises pretty printing init_printing() from IPython.display import display %matplotlib inline # Declare modules nump from numpy import * from matplotlib.pyplot import * # Definie time limits and step. This gives a time limit from 0 to 500 with a step of 5. t = np.linspace(0,10,500) # Define the initial conditions velocity = 0, position = 1 y0 = [1.0, 0.0] # Set up the function that plots the repsonse based on slider changes # These values of m, k and lambda here are in SI units. m in kg, k in N/m and lambda in Ns/m def plot_response(m = 5, k = 4, lmbda = 0.0): # Calculate and print out values of omega_n and zeta omega_n = sqrt(k/m) zeta = lmbda/(2*sqrt(m*k)) print "Natural frequency is",omega_n print "Damping ratio is",zeta # Define natural and damped frequency omega_d = omega_n*sqrt(1-zeta**2) # Define y(t) based on different values of zeta if zeta < 1: # Under-damped A = y0[0]; B = (1/omega_d)*(zeta*omega_n*y0[0] + y0[1]) y = exp(-zeta*omega_n*t)*(A*cos(omega_d*t) + B*sin(omega_d*t)) elif zeta == 1: # Critically damped A = y0[0] B = y0[1] + omega_n*y0[0] y = (A + B*t)*exp(-omega_n*t) elif zeta > 1: # Over-damped. root_p and root_n are the positive and negative solutions of the characteristic equations. delta = (2*zeta*omega_n)**2 - 4*(omega_n**2) root_p = (0.5)*(-2*zeta*omega_n + sqrt(delta)) root_n = (0.5)*(-2*zeta*omega_n - sqrt(delta)) A = y0[0] + (root_p*y0[0] - y0[1])/(root_n - root_p) B = (-1)*(root_p*y0[0] - y0[1])/(root_n - root_p) y = A*exp(root_p*t) + B*exp(root_n*t) # Draw an label the figure window ax = gca() ax.grid(True,linestyle=':',color='0.5') xlabel('Time (s)',fontsize=20,labelpad=5) ylabel('Position (m)',fontsize=20,labelpad=10) ylim(-1.2,1.2) xlim(0,10) # Normalised value of y y = y/y[0] # Plot the function on the figure plot(t,y,color="red",linewidth=1.5) # Call the slider interaction interact(plot_response, m=(0,20, 0.1), k = (0,10,0.1), lmbda = (0,10,0.1)) ``` ``` ```
2e7f38c68684f432a5e82f3d7f795d8c30dad26d
102,810
ipynb
Jupyter Notebook
notebooks/Lecture3.ipynb
quang-ha/IA-maths-Ipython
8ff8533d64a3d8db8e4813a7b6dfee39339fd846
[ "BSD-3-Clause" ]
null
null
null
notebooks/Lecture3.ipynb
quang-ha/IA-maths-Ipython
8ff8533d64a3d8db8e4813a7b6dfee39339fd846
[ "BSD-3-Clause" ]
null
null
null
notebooks/Lecture3.ipynb
quang-ha/IA-maths-Ipython
8ff8533d64a3d8db8e4813a7b6dfee39339fd846
[ "BSD-3-Clause" ]
null
null
null
154.137931
18,565
0.837039
true
2,409
Qwen/Qwen-72B
1. YES 2. YES
0.894789
0.890294
0.796626
__label__eng_Latn
0.974506
0.689161