text
stringlengths 87
777k
| meta.hexsha
stringlengths 40
40
| meta.size
int64 682
1.05M
| meta.ext
stringclasses 1
value | meta.lang
stringclasses 1
value | meta.max_stars_repo_path
stringlengths 8
226
| meta.max_stars_repo_name
stringlengths 8
109
| meta.max_stars_repo_head_hexsha
stringlengths 40
40
| meta.max_stars_repo_licenses
listlengths 1
5
| meta.max_stars_count
int64 1
23.9k
⌀ | meta.max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | meta.max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | meta.max_issues_repo_path
stringlengths 8
226
| meta.max_issues_repo_name
stringlengths 8
109
| meta.max_issues_repo_head_hexsha
stringlengths 40
40
| meta.max_issues_repo_licenses
listlengths 1
5
| meta.max_issues_count
int64 1
15.1k
⌀ | meta.max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | meta.max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | meta.max_forks_repo_path
stringlengths 8
226
| meta.max_forks_repo_name
stringlengths 8
109
| meta.max_forks_repo_head_hexsha
stringlengths 40
40
| meta.max_forks_repo_licenses
listlengths 1
5
| meta.max_forks_count
int64 1
6.05k
⌀ | meta.max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | meta.max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | meta.avg_line_length
float64 15.5
967k
| meta.max_line_length
int64 42
993k
| meta.alphanum_fraction
float64 0.08
0.97
| meta.converted
bool 1
class | meta.num_tokens
int64 33
431k
| meta.lm_name
stringclasses 1
value | meta.lm_label
stringclasses 3
values | meta.lm_q1_score
float64 0.56
0.98
| meta.lm_q2_score
float64 0.55
0.97
| meta.lm_q1q2_score
float64 0.5
0.93
| text_lang
stringclasses 53
values | text_lang_conf
float64 0.03
1
| label
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
```python
import numpy as np
import matplotlib.pyplot as plt
import random
import math
from sympy import exp, sqrt, pi, Integral, Symbol, S
coin = ['1','0'] # 1 앞면 0 뒷면
coin10 = [0,0,0,0,0,0,0,0,0]
#tcnt = 0 # 앞면이 두번 나온 횟수
for i in range(100000):
cnt = 0 # 10개중에 앞면 갯수
for j in range(8):
cnt += int(random.choice(coin))
coin10[cnt] += 1/100000
print(coin10)
y1_value = coin10
x_name=('0','1', '2', '3', '4', '5', '6', '7', '8')
n_groups = len(x_name)
index = np.arange(n_groups)
#print(index)
plt.bar(index, y1_value, tick_label=x_name,color='red', align='center')
plt.xlabel('tries')
plt.ylabel('probability')
plt.title('Coin probability')
plt.xlim( -1, n_groups)
plt.ylim( 0, 0.3)
plt.show()
```
```python
from sympy import exp, sqrt, pi, Integral, Symbol, S
import math
x = Symbol('x')
f = exp(-(x-10*0.5)**2/(2*math.sqrt(10*0.5*0.5)**2))/(5*sqrt(math.sqrt(10*0.5*0.5)*pi))
result = Integral(f,(x,2,8)).doit().evalf()
print(result)
```
```python
def coin_hypo(num):
import math
if 8*0.5 +math.sqrt(8*0.5*0.5)>num>8*0.5-math.sqrt(8*0.5*0.5):
return '동전을 8번 던졌을 때 뒷면이 나오는 횟수가 '+str(num)+'번 나올확률은 신뢰구간 68% 안에 있습니다.'
else:
return '동전을 8번 던졌을 때 뒷면이 나오는 횟수가 '+str(num)+'번 나올확률은 신뢰구간 68% 안에 없습니다.'
```
```python
for i in range(9):
print(coin_hypo(i))
```
```python
def coin_hypo(n):
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
import random
coin=['0','1']
coin10=[0,0,0,0,0,0,0,0,0,0,0]
for i in range(100000):
cnt = 0
for j in range(10):
cnt += int(random.choice(coin))
coin10[cnt] += 1
result=[i/100000 for i in coin10]
y_value = result
x_name=('0','1', '2', '3', '4', '5', '6', '7', '8', '9', '10')
n_groups = len(x_name)
index = np.arange(n_groups)
plt.bar(index, y_value, tick_label=x_name, align='center',color='red')
plt.xlabel('coin count')
plt.ylabel('Probability')
plt.title('Coin Tail Probability chart')
plt.xlim( -1, n_groups)
plt.ylim( 0, 0.3,0.05)
plt.show()
x=np.arange(0,10,0.001)
y=norm.pdf(x,5,1.58) # 평균 5, 표준편차 1.58
plt.plot(x,y,color='red')
plt.fill_between(x, y, interpolate=True, color='green', alpha=0.5)
x1=np.arange(5+1.96*1.58,10,0.001)
y1=norm.pdf(x1,5,1.58)
plt.fill_between(x1, y1, color='orange', alpha=0.5)
plt.scatter(n,0,c='r',alpha=0.8)
if abs(5-n)<1.96*1.58:
return '신뢰구간 95% 안에 존재합니다.'
else:
return '신뢰구간 95% 안에 존재하지 않습니다.'
coin_hypo(6)
coin_hypo(9)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
su = 0
for i in lines:
#print(i)
for k in i:
su += int(k.isdigit())
print(su)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
su = 0
for i in lines:
#print(i)
for k in i:
su += int(k.isspace())
print(su)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
su = 0
for i in lines:
#print(i)
for k in i:
su += int(k.isalpha())
print(su)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
su = 0
for i in lines:
#print(i)
for k in i:
su += 1
print(su)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
total = 0
for i in lines:
cnt = len(i)
total = total + cnt
print(total)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
su = 0
for i in lines:
#print(i)
for k in i:
if k.isalpha()|k.isdigit()|k.isspace() == False:
su += 1
print(su)
```
```python
txt_file = open('c:\\data\\winter.txt')
lines = txt_file.readlines()
cnt = 0
for i in lines:
cnt+= sum(1 for k in i if not (k.isalpha() or k.isdigit() or k.isspace()))
print(cnt)
```
```python
txt = 'A lot of things occur each day'
result1 = txt.upper()
result2 = txt.lower()
print(result1)
print(result2)
```
```python
txt_file = open('c:\\data\\winter.txt')
#lines = txt_file.readlines()
lines = txt_file.read().upper()
#for i in lines:
#print(i.upper())
print(lines)
```
```python
txt7 = ' 양쪽에 공백이 있는 문자열 입니다. '
print(txt7.lstrip(),len(txt7.lstrip()))
print(txt7.rstrip(),len(txt7.rstrip()))
print(txt7.strip(),len(txt7.strip()))
```
```python
txt = 'A lot of things occur each day'
word_count1 = txt.find('o')
word_count2 = txt.find('day')
print(word_count1)
print(word_count2)
```
```python
txt_file = open('c:\\data\\철수.txt')
#lines = txt_file.readlines()
lines = txt_file.read()
print(lines.count('국민'))
print(lines)
```
```python
import csv
file = open('c:\\data\\emp112.csv','r')
emp11 = csv.reader(file)
for list in emp11:
print(list[7][list[7].find('@')+1:list[7].find('@')+1+list[7][list[7].find('@')+1:].find('.')])
```
```python
url = 'http://www.naver.com/news/today=20191204'
result = url.split('/')
print(result)
```
```python
log = 'name:홍길동 age:17 major:경영학 nation:한국'
result = log.split(' ')
print(result)
```
```python
log = 'name:홍길동 age:17 major:경영학 nation:한국'
result = log.split(' ')
f = []
for i in result:
f.append(i.split(':')[1])
print(f)
```
```python
loglist= ['2020/05/22','200','ok','이준호']
bond = ':'
log = bond.join(loglist)
print(log)
```
```python
import csv
file = open('c:\\data\\emp11.csv','r')
emp11 = csv.reader(file)
a = []
for list in emp11:
a.append(list[1])
#print(a)
a.sort()
bond = ','
b = bond.join(a)
print(sorted(a))
print(b)
```
['강혜리', '권민준', '김명환', '김서준', '김소애', '김유진', '김은주', '김지석', '김태환', '김현수', '박민호', '박상희', '박지성', '서동혁', '안태형', '양선종', '위주희', '이서지', '이승혁', '이예라', '이준호', '이태환', '전지연', '정주희', '조성원', '조원기', '최민혁', '최유리', '한태현', '홍승희']
강혜리,권민준,김명환,김서준,김소애,김유진,김은주,김지석,김태환,김현수,박민호,박상희,박지성,서동혁,안태형,양선종,위주희,이서지,이승혁,이예라,이준호,이태환,전지연,정주희,조성원,조원기,최민혁,최유리,한태현,홍승희
```python
txt = 'My password is 1234'
result1 = txt.replace('1','0')
result1
```
```python
log = ['name:홍길동 age:17 major:경영학 nation:한국']
result = log[0].split(' ')
f = []
g = []
for i in result:
g.append(i.split(':')[0])
f.append(i.split(':')[1])
for j in range(len(f)):
print(g[j]+'--->'+f[j])
```
```python
log = ['name:홍길동', 'age:17', 'major:경영학', 'nation:한국']
#result = log[0].split(' ')
f = []
g = []
for i in log:
print(i.split(':')[0]+'--->'+i.split(':')[1])
#for j in range(len(f)):
# print(g[j]+' ---> '+f[j])
```
```python
for i in range(65,128):
print(i,'--->',chr(i))
```
```python
u_txt = 'A'
b_txt = u_txt.encode()
print(u_txt)
print(b_txt) # b'A' b는 binary의 약자
c_txt = b_txt.decode()
print(c_txt)
```
```python
a = [i for i in range(2,20,2)]
print(a)
```
```python
a =[1,2,'a','b','c',[4,5,6]]
print(a[5][0]) # 4
```
```python
b = [2,3,4,[5,6,[7,8],9],10]
print(b[3][2][0])
```
```python
import random
poc = ['y','y','y','b','b','y','b','y','b','y']
case = 0
r = int(input('꺼낼 횟수'))
for j in range(r):
if random.sample(poc,3).count('b') == 2:
case += 1
print(case/r)
```
```python
import random
a = [1,2,3,4,5,6,7]
b = random.sample(a,3)
print(b)
print(sum(b)/3)
```
```python
import numpy as np
a = [1,2,3,4,5,6,7]
np.random.choice(a,3).mean()
```
```python
import numpy as np
avg = 148.5
std = 7.8
N =100000
height = np.random.randn(N)*std+ avg
np.random.choice(height,100).mean()
```
```python
#H0 : m <=148.5 -- 평균키에 변화가 없다.
#H1 : m > 148.5 -- 평균키에 변화가 있다.
# alpha = 5%
import numpy as np
avg = 148.5
std = 7.8
N =100000
height = np.random.randn(N)*std+ avg
#x_bar = np.random.choice(height,100).mean()
x_bar = 150
print(x_bar)
T = abs((x_bar - avg)/(std/math.sqrt(100)))
print(T)
```
```python
def tall_pvalue(x_bar):
from sympy import exp, sqrt, pi, Integral, Symbol, S
#x_bar = int(input(''))
x = Symbol('x')
f = exp(-(x-148.5)**2/(2*0.78**2))/(0.78*sqrt(2*pi))
result = Integral(f,(x,x_bar,S.Infinity)).doit().evalf()
print(result)
tall_pvalue(150) #0.0272351950137387
```
```python
tall_pvalue(148.7)
```
```python
import numpy as np
a = []
height = np.random.randn(1000000)*7.8+ 148.5
for i in range(100000):
a.append(np.random.choice(height,100).mean())
a
```
[148.33570678877675,
148.79478961630835,
147.6023245580204,
148.4446903186962,
149.22513657080373,
147.83676901640868,
147.89008526281657,
148.40812946050187,
148.4452774671258,
147.57867575441261,
148.05876371111816,
147.13348071470148,
148.32580502162054,
147.70008089125582,
149.33640590783466,
147.30358682534754,
150.61211202049296,
147.78178729090712,
149.08316745343836,
148.59681254343658,
147.86664744746946,
149.14965950878815,
148.73727349653709,
149.01340695989987,
148.60912487274788,
148.66916442067955,
148.87637836672872,
148.8254597538301,
148.24119990207632,
148.28900516790247,
147.72472939220606,
147.96810245457772,
148.57822232447006,
148.71517014096088,
148.85133322220477,
148.48756956093143,
149.12468110456575,
149.0239666008573,
148.67501046823577,
149.37995292098805,
148.67197380623296,
150.08409840724156,
149.51492256210832,
149.69287991361352,
146.91525535910347,
149.20659302496045,
148.3789680478411,
149.65918681639883,
148.2606682486545,
148.83601089939174,
148.67401204996312,
149.39291232819315,
149.49729466463117,
148.5765975178864,
149.11327981326772,
148.63885023261034,
148.23627983846552,
147.70852312202535,
147.5811947892401,
149.95794251529583,
149.02359928960536,
148.7341220708312,
147.87322220497705,
147.03587309517025,
149.555534856283,
148.95965775560592,
148.60395601299294,
148.0040096857479,
148.29605793708552,
148.1785806297591,
147.65437042995615,
148.27004699898606,
148.92323971142503,
148.1337952122507,
149.4250167941598,
148.49655046333456,
148.00692590642993,
148.16103822865983,
148.72385549252738,
148.70745590126626,
148.11755768454333,
149.06892776679655,
148.33996067724036,
148.35222861197923,
148.90281579628342,
148.00896143115298,
147.99950280244116,
149.35485301522556,
149.12453023589362,
148.22077294461533,
149.63837554845443,
147.9346445494212,
148.1550398113916,
148.89015438956295,
148.43312468602792,
147.6561927425978,
148.86955291890268,
147.84845676359043,
148.93706345390473,
148.94230319335088,
147.1828345108759,
147.76608512978646,
148.99396464293392,
149.00056489090193,
149.1802925414432,
148.594590595635,
148.2528225353999,
149.52539648310577,
148.68227122518456,
148.37528452898917,
149.5706109621905,
148.61546911593362,
148.94398377789213,
148.11590864467792,
149.16025601376967,
146.4194727203986,
149.26273786624867,
148.92584797386877,
148.24392776090056,
148.8615707820393,
148.66632049167595,
148.16641589408388,
149.00324924139647,
148.50255736231682,
149.48046198270086,
149.2078196218264,
147.93838338987564,
149.24790636168416,
148.36834503273425,
148.6209679965781,
147.7883121043352,
146.5939208861853,
148.0634115874662,
147.59712523111074,
148.1268031021364,
147.17293097791998,
149.29697115185968,
148.16805738451973,
149.26620866978365,
148.4894356166696,
148.42875290058421,
147.981496565247,
149.19716669549305,
148.90165096792796,
148.20334386536453,
148.86925413055357,
146.82646553878135,
149.323343625138,
147.99485907478385,
148.22278960638533,
149.61764681473042,
148.93046912166142,
147.5466865335438,
149.11374366117775,
148.64182788585222,
147.7365982606731,
149.6026386449225,
148.76493945305165,
147.5300092313077,
148.80511722820884,
148.68040218498564,
148.30031387446397,
147.7998126105424,
149.00920828581894,
149.36595763154543,
147.55850688124545,
147.9925598509807,
148.64117863301107,
148.36091025470077,
149.512714598873,
149.13606619832373,
148.1071027365967,
148.65462440773052,
146.8562892960908,
149.10976090204542,
149.69096939459084,
147.22617297907692,
148.8101291154967,
148.9506471204374,
149.08052812873728,
147.95001266044426,
148.74157105993757,
149.86983227255047,
148.64923810062356,
148.46957410770432,
148.9255499679936,
148.04005986177054,
148.5092778432535,
149.20581466930182,
146.49035378293806,
149.34525444078278,
147.27968526387076,
147.09647088146332,
148.7894367222607,
148.96348441909694,
147.24063877536992,
148.96056653388268,
148.06043908953342,
148.80410865734564,
148.1453098033933,
148.6121420714424,
148.4828254972544,
148.71620569787737,
147.7567449784187,
148.83629163931076,
149.316712483316,
147.6237753469176,
147.01233508477293,
148.53725050183047,
148.32525798122617,
149.7145195844723,
147.81651693000526,
148.99638109703704,
148.94639831826498,
148.04886695833872,
149.55765868881178,
148.98976652030626,
149.1325829299852,
148.47452561438274,
146.97228007059624,
148.9442152147699,
146.26732687784428,
148.63291023581976,
150.45784163478348,
149.3102163366426,
148.19513945541118,
149.05649776963347,
149.38624481945143,
148.19844861080006,
148.70328439511803,
147.33914308076578,
150.21306296139835,
149.76405925805642,
148.4783503583599,
148.3224432072823,
148.06621185612153,
147.5816473402634,
148.3793993449522,
148.18168568454286,
148.39660265882836,
147.49239998154064,
147.17833657835098,
149.4890520612112,
148.96513553042573,
148.01766970782776,
148.90216368397208,
146.8245837615696,
149.65771836916008,
148.52548136802952,
147.31291471744717,
147.6353023733824,
148.6483736421147,
149.25314145107498,
147.9173227422511,
148.65506211435502,
148.78676801352486,
147.94357237403733,
147.51863805893126,
148.8228184445199,
148.75762028293164,
149.3227972556827,
149.14558292220397,
150.30010519523395,
147.4140335563975,
150.0289064356881,
148.45345695455114,
147.39430022140036,
148.3907946026419,
146.4625426119774,
148.0104057030357,
149.26342366033967,
147.45335547063695,
148.18291161291606,
148.98632430138207,
149.5698539834853,
148.2230103299984,
148.584965228266,
148.41341180060033,
149.31449301184418,
149.12063435562092,
148.81201656601056,
145.92772299441754,
148.60400456182688,
147.81932393468912,
148.28534260834908,
149.11374372412675,
149.29498020311559,
148.0123010896665,
148.20109055949789,
147.9252276597784,
147.66931899305106,
149.12489201628688,
148.91317358978105,
149.5597948564786,
148.50366114099666,
149.17699481913104,
148.34115048372016,
148.2868719733594,
148.74782127093073,
148.77642931524187,
148.3147970640359,
148.75031048120917,
149.37070933474718,
149.21600684679026,
149.755819941498,
148.42389478587484,
149.0769624071824,
148.32362153932286,
147.29069619245914,
149.11306179435292,
148.80090252529783,
147.4171993579638,
150.19296452160677,
148.59861887772266,
148.62314679028736,
147.11623500536277,
148.3885754653443,
148.20040435235097,
150.28084164975638,
149.1890734760615,
147.03343234565128,
149.6880138461309,
148.56072700524675,
149.71258521515122,
147.31586193717655,
148.64342300531354,
147.77095451638007,
147.2154137581533,
148.3316756738781,
148.78808900142002,
148.48126919908225,
147.44646208868923,
148.59610471220248,
149.0265007680578,
148.13911723494437,
148.34977325523448,
148.0432255191576,
146.9706273753423,
150.4341480586445,
148.16053668380178,
148.24667497759467,
148.41465522070237,
148.0521032396734,
148.79421706117193,
148.90068552344468,
147.48340693984196,
147.05186679345894,
147.34292112257384,
149.64892753333064,
147.81104870249854,
148.7169480098462,
148.38633121713968,
147.587942947194,
147.39015650198843,
148.61299487120763,
148.32469851326013,
147.6848175092223,
147.65387745706613,
149.7292191845181,
149.3988315888713,
148.60247110956809,
149.1918818513565,
151.1580167934806,
148.47382692455562,
147.9665333131297,
149.18486530043683,
150.00881703092497,
148.84776908391333,
148.72221617750682,
147.65434864659818,
147.7440875086197,
146.19455704113213,
148.5794355315353,
148.2984964213241,
148.86060726954162,
149.09021223651445,
148.65202584693563,
148.321062845556,
148.77132077953695,
147.65924532157527,
149.23856062372272,
147.9989796748757,
148.58388681836541,
148.8711604557613,
149.49242074306306,
148.99636826457802,
147.97233108363443,
149.43408235050015,
147.8670713362994,
148.39190700806364,
149.37648392386035,
147.73859788438668,
148.28115432730658,
148.6345778279716,
148.21415789773687,
147.85765962521543,
148.17693679650063,
150.42191507449996,
147.6697694616895,
147.7299113344153,
147.6683116876123,
148.1858836688223,
148.7425784448135,
147.80072256697235,
148.3869643494503,
148.17721396007872,
147.80742247012685,
148.72912402698742,
148.64711890674306,
148.436014124523,
148.0263981679907,
148.92518722425115,
148.560758836987,
147.5553011932023,
148.76402366279603,
149.00168277747554,
147.3096310634446,
149.79332219790413,
148.23056560451607,
149.38287611138284,
149.01767580635087,
147.15856701856057,
148.3059719439361,
149.6398263755566,
148.4839408838998,
149.5344693373349,
148.0078369652233,
147.9082554144716,
147.05793920901257,
147.9162381050663,
149.34120029393426,
149.0172431274963,
148.09498113287148,
149.21448098897054,
148.26679765389562,
150.12283829974618,
146.2807164803163,
147.90484619346964,
148.68981883089137,
149.08796299428911,
150.70455897606524,
147.44145687239416,
148.24148693188596,
148.00925348711633,
147.60970613470784,
148.306736169287,
149.0157060847676,
149.14375422080946,
149.81457185806414,
147.9666529675731,
148.62911433732526,
148.36309798981512,
148.39915279680005,
148.1667510957207,
147.85632135725444,
146.95338555239516,
147.68444376391128,
145.68164034786986,
147.9440396876379,
149.2606230781258,
148.43760624327967,
150.05553074942137,
148.58060484730197,
149.1957424816312,
149.230534820942,
147.82902067355334,
146.35418819808763,
149.88799393976925,
147.554579829233,
148.2063659727715,
148.8354882332046,
148.0007755435784,
148.11252305729695,
147.94805172105643,
147.6685519376125,
148.17188109841848,
150.90365700613486,
148.1888492841423,
149.38994925057614,
148.6689367534137,
147.88598164619262,
148.37120911082374,
147.81441100969082,
150.51854613375534,
148.2734361756916,
148.92566037281804,
148.5054583399147,
148.25950065578422,
147.31845597502803,
149.21751098368534,
149.4179022031185,
147.6301587152581,
147.96618624355332,
148.49878284910108,
148.853398498672,
149.22881995048547,
148.92581735462048,
148.27381441953935,
148.28487868961992,
148.87703329168534,
146.93814920042973,
149.86868859573534,
149.280599288534,
149.17730716727417,
148.4251596621369,
149.19511055609325,
147.94601768452472,
148.4837755736339,
148.6307931507117,
148.35174884381044,
147.74817777958486,
148.87327799003086,
148.6143562423359,
148.93405949004503,
148.74053121204818,
148.69400046014962,
148.91970615059748,
148.41345645822196,
148.20884008741137,
149.5218293676097,
147.0404344723411,
147.3860734145124,
148.80458195082502,
148.8915464527824,
147.80736231841533,
149.24027655652972,
148.5598541714151,
147.69928347114342,
148.80792792682007,
149.3178317445665,
149.4560874926851,
147.17700791702987,
148.59113951518438,
149.2575108336211,
147.52897534786098,
148.07126457661116,
147.657816248711,
148.247287768668,
148.6355708046748,
148.51969680146942,
148.20591556764455,
147.72473183372568,
149.0922392991842,
149.97834582005285,
149.3712622838356,
148.37626108246363,
148.43772780954907,
148.3021416187662,
148.9968133401097,
150.37474513179973,
148.8592974468067,
149.79934156435502,
148.35205805852513,
148.57167974491895,
149.85822674647287,
148.51746083409432,
149.0244147097645,
148.00242124420046,
149.15573514241484,
149.1179444248328,
146.43591755600443,
147.9416178337434,
148.94965434400012,
148.89702157300528,
149.5584937265213,
149.3157752845853,
148.8760132473217,
147.96741082357605,
148.304917049578,
147.8622220299143,
146.5894190244338,
147.9880059041092,
148.60953256368836,
149.0426972695849,
147.4569423610704,
149.32874084700575,
149.35634490854105,
148.64968212241013,
149.45485501874035,
147.78768909678578,
149.10328499323631,
148.51387767604893,
148.37711426351873,
148.4052465490497,
147.59320890139324,
149.42684602888326,
148.95663582001578,
149.80841314796274,
147.56643245261478,
147.99510900397135,
149.10916577459795,
149.37038624035557,
147.18269886776875,
149.62969892986774,
149.56315338818024,
148.0707790319032,
148.86788698171665,
148.06973589222153,
147.9573593053586,
148.61713033304474,
147.31734246798257,
148.88326854742888,
146.72260618853002,
148.04872804363177,
147.2561274500607,
148.8321780192475,
148.68223145652837,
148.5837221339278,
150.1749237565385,
150.7678114868864,
148.25578315126424,
149.49868289361783,
148.97267846256014,
146.88261037247085,
146.91427648626936,
148.7859055220709,
147.22238649537974,
149.26477305258084,
148.72660634699318,
147.35079577968602,
149.2350876802714,
147.58680521173787,
147.91318182244183,
148.49264376809933,
148.30665213456442,
148.02407412332332,
148.00235068615797,
148.1975133785592,
148.58993032037827,
147.92700694714978,
147.14446640099902,
149.57804403304624,
147.67267908006343,
147.9906222606842,
148.49829726602525,
149.1647227888254,
149.57861391489297,
148.66617314243726,
147.25595983897585,
148.94126738784868,
147.94817526503343,
147.60131934788942,
148.86600306278558,
148.1641471891608,
147.84112774467692,
148.8562556799773,
148.10872278041836,
147.363142742986,
148.46100881756655,
149.19750150709703,
149.80152344652151,
148.47082830901135,
149.32286839479033,
148.08250857433617,
148.52749553845214,
148.39476396791827,
148.37699787963984,
147.08812169486913,
148.65276282928815,
148.55746049455792,
148.39980434904,
149.53761771930968,
149.80330285012164,
148.9153966461515,
148.20232838447734,
149.05771232392345,
146.97300664223164,
148.63964221747065,
148.4638796498318,
147.7043043148667,
146.7989429964364,
148.00742414893736,
149.1370228489108,
148.87334785982387,
148.3380381703768,
148.0251044932128,
148.42421099862685,
148.4500604775692,
149.52337166255612,
148.5401349793395,
147.09065359829253,
147.73168263668524,
148.02415987405357,
149.18814198888617,
149.9173950414007,
147.8975025726418,
147.39956174618683,
149.02119460430717,
149.08271432113352,
148.7781780312127,
149.18623060352206,
148.22559862084688,
149.82866998877657,
148.84499824118294,
149.08592943550377,
148.6787517745508,
147.96722026389978,
149.08662069945763,
148.5574407418012,
149.10090269126323,
148.36183214397815,
147.7964065138852,
146.67636617950697,
149.38863913456302,
148.65386428996467,
147.98498892126415,
149.44709495167172,
147.17474157567523,
149.8464139113694,
149.2984083287856,
147.23464142980444,
149.25365921084025,
149.07184744956155,
149.15407559369018,
147.75241618034096,
149.20799121464157,
147.50977345105846,
149.09893314676557,
147.66402886076187,
148.21652510634627,
147.8716612519594,
148.5152793606325,
149.2176091686776,
149.65523632389474,
149.19535455544377,
149.24346802071008,
149.22869364647855,
148.39072419359286,
148.32378431028997,
149.21555684452508,
148.6145153256184,
147.45424764885033,
148.30207547207766,
149.2974057419133,
148.19805497845587,
148.57349868089432,
148.68675391524738,
147.83907451329506,
149.48654059744112,
148.0541798136476,
148.2210135664606,
149.49925718299727,
147.99652394160597,
149.65443114041665,
148.86302058410055,
148.08895307971335,
150.62602183254072,
148.3919024569133,
148.8992534333662,
149.3198799343563,
147.30242701536,
147.8637967333126,
148.53857814040765,
148.80902587244447,
147.90530094354165,
149.51309501723247,
149.113769966143,
148.85153510427585,
148.42222886942125,
148.53237329933955,
149.62224220031888,
148.07576206259503,
148.48835224482974,
149.57708774469572,
147.53279956819216,
146.88285895709907,
149.38501898846812,
148.66897384637662,
148.9629538644423,
149.77506066778923,
148.35834231469298,
148.65701229168232,
148.3652539857862,
148.72007870775028,
149.06500745256895,
148.01560016504052,
148.96932167482177,
147.32718498289478,
148.10793950520664,
149.67472333559016,
149.1376379913505,
148.00216017327935,
148.36544853456599,
148.95365634181633,
148.45863852550966,
147.2128216273331,
148.49605902725804,
148.6128092832163,
149.13063117489511,
147.4892105573935,
150.4772715463687,
147.74836063308027,
149.34885538496556,
148.58193394497113,
148.3945184735741,
148.0089351682152,
147.94657750320044,
149.1592211100736,
148.52106526565595,
148.44215380083585,
149.3395285153963,
149.2422203315732,
149.43770320307422,
147.68022546701712,
147.73907781387726,
149.16843065822673,
149.1670612996932,
147.11025049384492,
147.67783233848948,
147.74397493941217,
147.59215365313213,
148.6679375269448,
148.6439356910024,
149.09052933524347,
148.4404216747378,
147.47222585778428,
147.56721964937725,
147.3040280966716,
148.80604974404503,
147.94660446909174,
148.41470368509218,
148.11697443122884,
149.45277459387083,
148.28991108310132,
148.06856693402563,
148.18275824164078,
148.3339854419987,
149.93197762376172,
148.74939116835122,
148.84669369057426,
149.0322697285181,
149.37031160371313,
148.87528242037718,
148.20399659175393,
147.4301985061534,
149.07642782771111,
148.84018345775397,
148.34227596421908,
147.39826456356926,
149.15377173318103,
148.5954819452631,
148.33832863507632,
147.9888485444464,
148.97037974429378,
150.00459067467736,
148.4318790565317,
147.91010587593425,
149.0031541735523,
148.19871225974782,
149.13237751119507,
147.8641709403654,
149.14900004408062,
148.52780472560045,
149.33369475162235,
148.88583267373838,
148.26810598986486,
149.53423678735132,
149.67505957751055,
148.97206708391775,
149.50389297411795,
147.7023708873604,
149.7806803773654,
148.45080528103722,
147.3883147377162,
148.99104004834027,
148.81745828556913,
148.7308255046233,
148.90757173557796,
148.54885449968728,
149.13688042085613,
148.1700968641182,
149.36730334714343,
146.65762803098235,
147.23307055113665,
149.1754085440833,
147.30680071496184,
147.79012408654233,
148.32441954368295,
148.7234810880901,
148.49140860007756,
147.90321026952262,
147.6834503264614,
147.54613482297833,
149.58633781567306,
149.5189680127153,
148.40321329775634,
148.86383479538338,
148.24965140712544,
148.73433964088935,
147.62063192575005,
148.92941725785474,
148.17349692562817,
148.36103097898834,
148.83418248252994,
148.1838424948873,
149.28144094593674,
149.38347796463577,
149.25256113217637,
147.77801027600142,
148.75212489441756,
147.9585918804571,
148.65684022444904,
148.30038485034717,
148.17508020548146,
148.83567234557978,
149.4570655531812,
148.32947535006733,
148.77965754174195,
148.6400697380237,
147.38318998794938,
149.14875521679386,
147.2486573066414,
149.00699908598435,
146.92717622172094,
148.09522966078478,
148.12782811362717,
149.14529149436953,
147.07721242208575,
148.60686387382066,
149.74175256584516,
149.04431137012827,
148.71470054184334,
148.8543101875985,
147.84565777782316,
148.9162209048725,
149.4418154932924,
147.83837843042645,
148.83929080721524,
149.0689014318652,
148.67006028039958,
150.70135191261505,
148.59745035127276,
147.82041940857968,
148.0536724500113,
149.05169438333763,
148.3286301010154,
148.33240877815587,
148.9363809583119,
147.2821531849664,
147.43406821158803,
149.7404829390541,
148.90675006958523,
148.9993796996912,
149.64739528423178,
147.8588344626368,
149.37012590781995,
148.32129817873695,
148.48857883441093,
149.42224546262543,
148.43056958946943,
147.989548676082,
148.1352118720954,
148.16466690662742,
148.0573615950447,
148.6685926940806,
147.5624094894864,
148.18782938900108,
148.22007436522313,
147.76447280000846,
147.69079059589606,
149.09651244633037,
148.66968240387504,
148.61279935053165,
148.79538986246934,
147.56317125607094,
148.88746319210082,
150.34074523025768,
148.47453609019607,
149.84062452760784,
148.9148987665285,
149.04528376942903,
148.19715552778956,
149.09626067621014,
148.856054012674,
147.431102651771,
147.69519622235887,
148.2915908853109,
149.00284048767057,
148.4712683949022,
148.88075513518578,
147.5829960532429,
149.555001100463,
149.59794853242227,
149.34663043690333,
148.0591544501439,
148.06875022640733,
149.61169359633573,
149.7380597452692,
149.33694137550071,
148.18124819257034,
148.96379876467958,
148.79925253104471,
149.32069004116556,
149.67564359024112,
148.5285753505111,
148.73017138545325,
147.42726953159422,
147.95112284553537,
147.797186898,
150.52580985652045,
...]
```python
import numpy as np
avg = 148.5
std = 7.8
N =1000000
height = np.random.randn(N)*std+ avg
a.append(np.random.choice(height,100).mean())
```
```python
import numpy as np
from scipy import stats
listage = [34]*10 + [2]*10
a = np.array(listage)
me = stats.Mean(a)
print(me)
```
|
168afbce0a9bae750221cefca0170c890644f194
| 51,238 |
ipynb
|
Jupyter Notebook
|
0522.ipynb
|
hwan17/it_python
|
f987291793dcbb5426f7a8b87bd51eda9c56f225
|
[
"MIT"
] | null | null | null |
0522.ipynb
|
hwan17/it_python
|
f987291793dcbb5426f7a8b87bd51eda9c56f225
|
[
"MIT"
] | null | null | null |
0522.ipynb
|
hwan17/it_python
|
f987291793dcbb5426f7a8b87bd51eda9c56f225
|
[
"MIT"
] | null | null | null | 28.931677 | 1,182 | 0.516257 | true | 13,343 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.752013 | 0.73412 | 0.552067 |
__label__yue_Hant
| 0.174439 | 0.120967 |
# Convolutional Neural Networks
## Introduction
A neural network's hidden layers are used to learn feature detectors from input data. For simple input data types we can use fully connected hidden layers to allow us to learn features across any combination of input feature values. Thus one feature might be learned which is activated only when the first and last input feature values are both active. Another feature might be learned which is active when the average intensity across all input feature values is above a certain threshold.
The figure below illustrates this situations. Here neurons $\delta^{2}_{1}$ and $\delta^{2}_{2}$ for example might be defined to depend on the first and last input neurons, and mean values of input neurons respectively.
<!-- conv-1.png -->
While fully connected layers like this are very useful at learning features that might depend on any particular combination of input data values, they are computationally costly. The number of weights to be learned in a fully connected layer between input units $L1$ and hidden units $L2$ is:
\begin{equation}
(size(L1)+1) \times size(L2)
\end{equation}
where the addition of 1 to the left hand side is due to the fact we need to learn the weight to a bias unit on the input layer.
Lets consider for a moment the case of working with image data. Here if we assume a low resolution square image of edge 256 lines, and assume we want a hidden layer that can say pick out 64 features, then we require $((256 \times 256) + 1) \times 64$ weights, i.e., 4,194,368 weights. This is a very significant number of weights in comparison to the examples we have considered to date.
For complex input data types such as images we deal with this exponential growth in the number of weights by taking advantage of specific properties of the input data. In the case of images we can take advantage of two facts:
1. That in images the input is often translation invariant; and
2. In early layers we can focus on learning local features rather than global features.
To illustrate consider the case of analyising an image to find edges. An edge can be found by looking for a steep gradient in image pixel intensities in a local block of pixels. For example simple features for horizontal and vertical edge detection in a 3x3 block of pixels can be illustrated by the following weight matrices for linear feature detectors.
<!-- edge-detector-unattributed.png -->
Such a local feature is translation invariant -- a horizontal edge at the bottom left of the picture generally looks like a horizontal edge in any other part of the image when viewed as a steep gradient in pixel intensities. Such a property has been taken advantage of in the image processing domain for many years. For example consider the image below where vertical and horizontal edges are extracted from an image.
<!-- edges-unattributed.jpg -->
We can illustrae the use of a simple edge detector below by scanning over an image from the MNIST dataset based on the Gx filter shown above.
```python
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mp
import numpy as np
import tensorflow as tf
# Get a copy of the mnist dataset container
mnist = tf.keras.datasets.mnist
# Pull out the training and test data
(x_train, y_train),(x_test, y_test) = mnist.load_data()
image = x_train[5]
# Reshape the image into 2D and normalize pixel intensities to between 0 and 1
flattened_image = np.reshape(image, (-1, 28))
flattened_image = flattened_image / 255.
# For illustration purposes display the flattened image of a digit
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(flattened_image, cmap = mp.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
# design a filter which picks out verticle edges
# in this case it is a 3x3 filter
#weights = [-1,0,1,-1,0,1,-1,0,1]
weights = [-1,-1,-1,0,0,0,1,1,1]
# Construct a conainer for the result of filter application.
# Note that our output image will be slightly smaller than our input image
num_rows, num_cols = np.ma.shape(flattened_image)
edge_output = np.zeros((num_rows-2,num_cols-2))
# iterate over the rows in the image and apply the filter
for i,row in enumerate(flattened_image):
if i < (num_rows - 2):
# iterate over each cell in the given row
for j,val in enumerate(row):
if j < (num_cols - 2):
# manually isolate the pixels that will have the filter applied to
sample = [flattened_image[i][j],
flattened_image[i][j+1],
flattened_image[i][j+2],
flattened_image[i+1][j],
flattened_image[i+1][j+1],
flattened_image[i+1][j+2],
flattened_image[i+2][j],
flattened_image[i+2][j+1],
flattened_image[i+2][j+2],
]
# calculate and store a logistic function based on the sample and weights
logit = np.matmul(sample,weights)
edge_output[i,j] = 1 / (1 + np.exp(-logit))
# for illustration purposes display the results of the feature detector
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(edge_output, cmap = mp.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
```
In the example above we say that our filter is being *convolved* over the input image. In other words the filter is being applied independently to each input.
Note that in the example code above we design our filter to only look at complete 3x3 blocks of our input image. This is a simplification to allow a quick and transparent implementation of the process. This results in our output image being slightly smaller than the input image. In this case our output image is 26x26 pixels rather than the 28x28 of our input image.
It is also worth noting that the filters above have no bias value. This is acceptable in this simple case since we are not training the filter. However later when we switch to a Neural Network implementation we need to introduce a bias unit.
## Convolutional Layer Design
In convolutional neural networks we apply the principle above to the design of hidden layers in the network. Specifically, instead of relying on neurons in a hidden layer that are fully connected to each neuron in the preceding layer, we instead design our hidden layer neurons in such a way that they are only exposed to a small sample of the input units. However we introduce clones of these neurons so that the entire set of inputs is covered by a given hidden layer neuron type.
We can achieve this goal in a Deep Neural Network architecture by simultaneously learning copies of simple feature detectors that are applied across a complete input vector (2D in the case of images). Each of these detectors share the same weights and can thus be thought of as being copies or clones of each other. The features detected are local and assumed to be translational invariant.
Rather than having a hidden layer that is fully connected to the input layer we end up with a hidden layer that consists of a set of neurons that each look at a slightly different subsample of the input image. The sub-sample might for example be a 3x3 grid of the input image. Each neuron in the convolutional neuron set is looking at a different part of the input, but crucially since all these neurons are forced to share the same weights, they all are looking for the exact same thing.
This principle is illustrated in the figure below where the same feature detector is applied to different samples of the input image to generate a feature map. Note that the weight marked is fixed to the same value across each application.
<!-- cnn-animated.gif -->
In a convolutional layer we typically train a number of these small local features. Each of these features in turn results in individual feature maps. Considering our example earlier we might for example construct a very simple convolutional layer with two features: one feature for vertical line detection and another feature for horizontal line detection. Such a simple convolutional layer is illustrated below.
<!-- cnn_example.png -->
We see in this example that there are in fact two neuron types in our convolutional layer. Each of these is characterissed by having its own set of weights, but is applied iteratively over the input image to produce a given feature map. The feature itself is a simple 3x3 feature, i.e. it contains only 9 weights that are applied to a local block of the input. The feature results in a Feature Map over our input. The Feature Map is our resultant image above.
### Convolutional Layer Parameters
Looking again at the example we can see that a convolutional layer has 3 key parameters that describe the dimensionality of the layer's output. The first two of these are **width** and **height** and are used to describe the dimensionality of a given feature map in the convolutional layer. The third dimensionality feature is **depth**. Depth is the number of filter or neuron types in the convolution layer. Each index in the depth of the convolution layer allows another filter type / another feature type to be learned. All neurons at the same depth index share the same weights.
The figure below shows a traditional illustration of one layer of a convolutional neural network. From this illustration we can see where the term depth has come to mean the number of features / feature maps in a convolutional layer.
<!-- cnn_depth.png.png -->
It should be noted however that CNNs are sometimes visualised as being orthogonal to our viewing plane. In this case the different feature types move from left to right across the screen as illustrated in this wikimedia image below. In that image the depth of the convolutional layer is 5 units and we see that all 5 units with the same height and width values are projections from the same scanned section of the input image.
<!-- cnn_alt.png -->
In our examples above our filter was applied with a sliding window of 1 over our input image. Thus the overlap between our application of the filters was maximized. It is possible to reduce this overlap and thus result in smaller convolutional feature maps. The amount of overlap is controlled by a parameter called **stride**. In our example above we used a stride of 1. A stride of 3 in the example above would result in no actual overlap of our filter over the input images. In our examples below we will stick with a stride of 1 for notational simplicity.
### From One to Multiple Channels
In our mnist example, our input is a black and white image. We talk about this image type as just having one channel -- just black and white intensity. Colour images on the other hand are usually built around three individual maps of red, green, and blue intensities. Our feature based approach is easily expanded to deal with multiple channels. In such case the information from all 3 channels is accounted for in a single kernel computation. For example, in the example above we use a 3x3 filter applied to our 1 channel information. This results in 9 parameters to be trained (plus a single bias value). We can apply the same kernel type instead to 3 channel information. In this case the number of parameters that our kernel has to learn is (3 x 3 x 3) + 1, i.e., 28. A single feature detector will still produce a single image map. The advantage of this approach is that our filters not only get the opportunity to learn spatial features, but they can learn features dependent on particular combinations of colour.
### Multiple Convolutional Layers
Rather than having a single convolutional layer in a network, we usually have a number of layers configured in a feedforward arrangement. Here a feature map at layer k with depth $d_{1}$ feeds can be used as input to any number of feature detectors in the next convolution layer.
In a typical design, we treat all the image maps is a layer as individual channels being passed into the next layer. This gives our network the opportunity to combine information from multiple trained features.
### Linking to Fully Connected Layers
Layers of convolutional neurons create more and more sophisticated feature detectors that operate locally. At a certain point we want to be able to investigate global connections in an image. For example we might want one feature that activates if a straight vertical line is detected in one area of an image and a horizontal line is detected to its upper right. For these global connections to be detected we need to once again introduce standard non-convolutional layers.
<!-- cnn_full.png -->
In the context of CNNs we refer to standard layers as fully connected layers. The reason for this is simply that all neurons in a standard layer are fully connected back to all units in the prior layer. Where the prior layer is a convolutional layer this means that all units in the convolution are connected. This can lead to an explosion of connections / weights. Consider the case where the convolutional layer has depth 20, height 20, and width 20. If our fully connected layer has say 30 neurons then we are talking about 240000 connections in one layer alone.
## Pooling Layers
With a stride of 1 our convolutional feature maps are almost as big as our input images. With a potentially large number of features in our convolutional layer this can result in a very large number of output values which would then have to be fed into layers further down the network. To limit this growth in connection number we can subsample our convolutional feature maps to reduce their size. In neural network terminology this subsampling is referred to as pooling.
We can illustrate this pooling method by applying a pooling layer to our image detection feature map as follows.
```python
# Construct a conainer for the result of pooling application.
num_rows, num_cols = np.ma.shape(edge_output)
mean_pool = np.zeros((num_rows//2,num_cols//2))
max_pool = np.zeros((num_rows//2,num_cols//2))
# iterate over the rows in the image and apply the filter
for i,row in enumerate(edge_output):
if i % 2 == 0:
# iterate over each cell in the given row
for j,val in enumerate(row):
if j % 2 == 0:
# manually isolate the pixels that we will pool
sample = [edge_output[i][j],
edge_output[i][j+1],
edge_output[i+1][j],
edge_output[i+1][j+1],
]
# store the pooled values
mean_pool[i//2,j//2] = np.mean(sample)
max_pool[i//2,j//2] = np.amax(sample)
# for illustration purposes display the results of the feature detector
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.matshow(max_pool, cmap = mp.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
ax = fig.add_subplot(1, 2, 2)
ax.matshow(mean_pool, cmap = mp.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
```
We pool by taking a clump of units and producing one aggregate value for those units. There are a number of ways in which we can aggregate or pool these values. One straightforward way is to simply calculate the mean activation over the clump of units. This is referred to as mean-pooling. Another popular method is to take the maximum value across the clump of units as the activation for the new aggregate unit. This is referred to as max-pooling. Max pooling rather than mean pooling has become the dominant method for pooling. One 'hand-waving' interpretation of why max-pooling might be most useful is that it preserves the identification of strong features rather than smoothing out the feature map.
It is important to note that whereas a convolutional layer has an actual activation function which can be anything from a logistic function to RELU, a pooling layer does not apply any function to the input data apart from the pooling function itself (usually max or mean).
The primary motivation for pooling is thus simply to reduce the size of feature maps and therefore reduce the number of connections in the network. Not only is this important in terms of reducing computational cost, but it also helps to reduce the potential for overfitting.
The figure below illustrates the application of pooling in a complete workflow.
<!-- cnn_pooling.png -->
## Convolution and Pooling in Tensorflow
TensorFlow provides a very neat implementation of Convolutional Neural Network and Pooling functionality where once again we just need to add in some extra layer definitions.
### MNIST in TensorFlow without Convolution
To illustrate the TensorFlow approach we will swap over to the use of the MNIST digits corpus. In the code below we first provide an implementation without the use of convoutional or pooling layers. This implementation uses 2 hidden layers of 256 RELU units each. The Adam Optimzier is also used. This is basically the same examples we have seen a couple of times over the last few weeks.
```python
# import tensorflow library
import tensorflow as tf
# Get a copy of the mnist dataset container
mnist = tf.keras.datasets.mnist
# Pull out the training and test data
(x_train, y_train),(x_test, y_test) = mnist.load_data()
# Normalize the training and test datasets
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
# Create a simple sequential network object
model = tf.keras.models.Sequential()
# Add layers to the network for processing the input data
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
# Compile the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# Start the training process
model.fit(x=x_train, y=y_train, epochs=5)
model.summary()
# Evaluate the model performance with test data
test_loss, test_acc = model.evaluate(x=x_test, y=y_test,verbose=0)
# Print out the model accuracy
print('\nTest accuracy: ' + str(test_acc*100) + "%" )
```
Epoch 1/5
1875/1875 [==============================] - 5s 2ms/step - loss: 0.4701 - accuracy: 0.8648
Epoch 2/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.1156 - accuracy: 0.9641
Epoch 3/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.0730 - accuracy: 0.9778
Epoch 4/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.0524 - accuracy: 0.9836
Epoch 5/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.0389 - accuracy: 0.9873
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten (Flatten) (32, 784) 0
_________________________________________________________________
dense (Dense) (32, 128) 100480
_________________________________________________________________
dense_1 (Dense) (32, 128) 16512
_________________________________________________________________
dense_2 (Dense) (32, 10) 1290
=================================================================
Total params: 118,282
Trainable params: 118,282
Non-trainable params: 0
_________________________________________________________________
Test accuracy: 97.22999930381775%
The implementation above achieves very high accuracy in just 15 epochs. In fact we can see that the error even after completion of the 0th epoch is already down to below 20%. This isn't an error in the code, but is rather due to the use of mini-batch learning. In the code above we did not wait until collecting all of our error before making adjustments to our weights. Instead we split our training set into a number of mini-batches each of size 100. After collecting the errors for 100 examples we made adjustments to our weights. Given our training data size is very large, this means that we in fact make over 500 applications of the back-propagation of weights over every full epoch of training. Before proceeding, try adjusting the batch size and observer the error graph.
With a batch size of 10000, how many epochs does it take for the error to reduce below 10%?
### MNIST in TensorFlow with Convolution
Implementing a convolutional layer in TensorFlow is very straightforward. We can take our MNIST code from above and very quickly update it to incorporate a CNN. We implement the major changes in our ff_network function.
Note that we have to take our input images which had been flattened to 1D arrays and rework them into 2D for use with the convolutional layer. Similarly following the convolutional layer we also have to reshape the data back to 1D to feed it into a fully-connected layer.
```python
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(12, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Compile the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.summary()
# Start the training process
model.fit(x=x_train, y=y_train, epochs=5)
# Evaluate the model performance with test data
test_loss, test_acc = model.evaluate(x=x_test, y=y_test,verbose=0)
# Print out the model accuracy
print('\nTest accuracy: ' + str(test_acc*100) + "%" )
```
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 12) 120
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 13, 13, 12) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 2028) 0
_________________________________________________________________
dense_3 (Dense) (None, 16) 32464
_________________________________________________________________
dense_4 (Dense) (None, 10) 170
=================================================================
Total params: 32,754
Trainable params: 32,754
Non-trainable params: 0
_________________________________________________________________
Epoch 1/5
1875/1875 [==============================] - 18s 9ms/step - loss: 0.6760 - accuracy: 0.7947
Epoch 2/5
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1739 - accuracy: 0.9513
Epoch 3/5
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1069 - accuracy: 0.9687
Epoch 4/5
1875/1875 [==============================] - 17s 9ms/step - loss: 0.0900 - accuracy: 0.9730
Epoch 5/5
1875/1875 [==============================] - 17s 9ms/step - loss: 0.0713 - accuracy: 0.9789
Test accuracy: 97.49000072479248%
## Skip Connections & Residual Networks
CNNs allow us to build networks that can build general purpose feature detectors that can be applied over an entire image but yet only require a relatively small number of weights. This versatility lead to many different network architectures being developed around the basic CNN architecture over the last 10 years. However it is important to note that deeper and deeper networks with 50+ layers were often found to be very useful as they could detect more and more complex features from fed in image data. However it was equally found that well known challenges in building deeper and deeper networks were seen. These problems such as the 'vanishing gradient' problem limited the depth of Deep Learning networks in general and deep CNN networks in particular.
The so-called Skip Connection provided an essential next step in the development of very deep CNN based networks. The basic idea of the skip connection is that instead of having simply a single collection of neurons in a given hidden layer, that we instead build a special layer type that includes a typical layer architecture but also a specialist direct link between inputs and outputs that allowed signal to fully flow from inputs to outputs without the alteration caused by the activation function. These so-called skip connections have become an integral part of so-called Residual Networks and underpin the standard Deep CNN architectures at this point.
The figure below illustrates the general layout of a skip connection as used in a residual network.
<!-- cnn_pooling.png -->
The top of the image shows an extract from two standard layers of a network. Here we have an input x. For the moment we can assume this is either directly from an actual input, or perhaps is an output from another hidden layer that is not shown. In a normal way, that value x passes through two hidden layers in sequence. In each layer we first of all compute the value z from x and the weights, before passing this value, the logit, on to the unit's activiation function. Here we assume the activation function is the Rectified Linear Unit. The output of this unit is then passed forward as input to the next unit which in turn calculates the logit with its own weights before again calculating the activation function. This second activtion function can be thought of as the output of this extract, or block.
In the bottom of the image we show the altered form as used in a residual network. Again we have an input x, but in this case as well as being passed into the first unit, it is also copied around the first hidden unit and fed into the second unit directly. When it is fed into the second unit, it is important to note that the input is not fed into the unit's standard inputs before calculation of the logit. Instead it is added to the output of the logit calculation just before the calculation of the activation function.
While this explains the mechanism of what a skip connection is, it says little to us about the intuition of why this would work. In short, the intuition is that in backpropogation the skip connection is allowing more of the error signal to continue to backpropogate through the network than would be possible otherwise. The two RELU functions can still learn complex functions, but importantly we can now build longer deeper networks.
It should be noted that this is just one example of what a residual block can look like. There are many more variations of this general theme.
## The Inception Networks
While residual blocks solve many of the challenges in deep CNNs, they are not the end of the story. Making image classifiers more generic required a numbe of other engineering tricks that increased performance. The Incpetion Networks lead the way in many of these changes.
One of the most important changes introduced by the Inception Network concerned the issue of scale.
The core of the CNN is the feature kernel. In the examples above we suggested that feature kernels of size 3, 4, or 5 might be useful for image processing. In practice however different kernel sizes are better for different object analysis types. For example small kernels are good at identifying small patterns in data, where large kernels are better at identifying more global information. The most important contribution of the inception networks was the idea of having kernels of different sizes within the same layer of the network. This allowed the network to be far more scale invariant in training than was possible previously.
<!-- cnn_pooling.png -->
The figure above from Inception Network v1 illustrates how multiple different convolution filter sizes were used in a typical layer alongside max pooling layers to provide a more complex or rather wider range of analysis than was possible previously.
Inception introduced many other useful improvements that are beyond of what we can cover here. For anyone interested, a good blog review of the inception network versions and their individual improvements can be found here:
https://towardsdatascience.com/a-simple-guide-to-the-versions-of-the-inception-network-7fc52b863202#:~:text=The%20Inception%20network%20was%20an,Designing%20CNNs%20in%20a%20nutshell.
## CIFAR 10 Tutorial
The TensorFlow Website has a number of excellent tutorials which demonstrate the power of TensorFlow. CIFAR 10 is a basic dataset for image classification which is often used as a next step after MNIST classification.
Read, study and put to work the TensorFlow CIFAR 10 Tutorial as described here:
https://www.tensorflow.org/tutorials/images/cnn
In order to get your code to run on non GPU hardware it may be necessary for you to reduce the complexity of the network, or only training for a shorter period of time than would be possible.
## Suggested Tasks
1. Add a second CNN layer to the MNIST example above. How does performance and training compare to the 1 CNN variant?
2. Add dropout to the MNIST example. Once again, how does training speed and accuracy compare to the original variant?
3. Integrate the CIFAR-10 dataset into the MNIST example above.
|
941c25261d3611735ac2122fb6d7496fdb4467d0
| 47,871 |
ipynb
|
Jupyter Notebook
|
Complete Modules/Deep Learning/week 7/CNNs.ipynb
|
Maks-Drzezdzon/Working-With-Data-L-O
|
86a4b1953d4687cba6cb9b0c2754bc3c801b719b
|
[
"Apache-2.0"
] | 1 |
2021-11-01T12:18:13.000Z
|
2021-11-01T12:18:13.000Z
|
Complete Modules/Deep Learning/week 7/CNNs.ipynb
|
Maks-Drzezdzon/Working-With-Data-L-O
|
86a4b1953d4687cba6cb9b0c2754bc3c801b719b
|
[
"Apache-2.0"
] | 22 |
2020-10-01T17:52:52.000Z
|
2022-03-05T22:40:39.000Z
|
Deep Learning/week 7/CNNs.ipynb
|
Maks-Drzezdzon/Masters-Classes-L-O
|
489f6812d80ca57d86adbaca5d25497939ce33f0
|
[
"Apache-2.0"
] | null | null | null | 47,871 | 47,871 | 0.777381 | true | 6,561 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.930458 | 0.865224 | 0.805055 |
__label__eng_Latn
| 0.998836 | 0.708745 |
# Function
### Projectile
\begin{equation}
R=\frac{u^2\sin 2\theta}{g}
\end{equation}
\begin{equation}
TF=\frac{2u\sin \theta}{g}
\end{equation}
\begin{equation}
H=\frac{u^2\sin^2\theta}{2g}
\end{equation}
```python
import numpy as np # from numpy import* not required to write np
import pandas as pd # from pandas import*
import matplotlib.pyplot as pltfrom # from matplotlib import*
%matplotlib inline
```
```python
def projectile(u,theta):
g=9.8 # acceleration due to gravity
R = u**2*np.sin(2*np.pi*theta/180)/g # range
H= u**2*np.sin(np.pi*theta/180)**2/(2*g) # max. height
TF = 2*u*np.sin(np.pi*theta/180)/g # time of flight
return [R,H,TF]
```
```python
p=projectile(100,60)
p
```
[883.699391616774, 382.6530612244897, 17.67398783233548]
```python
Angle=[] #list for angle
R=[] #list for range
H=[] # list for height
TF=[] #list for time of flight
for angle in range(1,91):
a=projectile(100,angle)
Angle.append(angle)
R.append(a[0]) # added element in list R
H.append(a[1]) # added element in list H
TF.append(a[2]) # added element in list TF
```
```python
plt.plot(Angle,R,'g-.',label='Range')
plt.plot(Angle,H,'b^',label='Max.height')
plt.xlabel('Angle(degree)')
plt.ylabel('Distance(m)')
plt.title('Projectile')
plt.legend()
plt.show()
```
```python
plt.plot(Angle,TF,'k*')
plt.xlabel('Angle(degree)')
plt.ylabel('Time of flight(sec)')
plt.title('Projectile')
plt.savefig('projective.eps')
plt.show()
```
```python
data={} # dictionary
data.update({"Angle":Angle,"Range":R ,"Time of flight":TF,"Max.Height": H})
```
```python
DF = pd.DataFrame(data)
DF
```
```python
DF.to_csv("projectile.csv") #save data in csv format in excel
```
```python
```
|
8da22f580e66d5dc077914384d92d0bcdf6f34c6
| 6,312 |
ipynb
|
Jupyter Notebook
|
func_pms1.ipynb
|
AmbaPant/NPS
|
0500f39f6708388d5c3f2b8d3e5ee5e56a1f646f
|
[
"MIT"
] | 1 |
2020-09-16T03:21:55.000Z
|
2020-09-16T03:21:55.000Z
|
func_pms1.ipynb
|
AmbaPant/NPS
|
0500f39f6708388d5c3f2b8d3e5ee5e56a1f646f
|
[
"MIT"
] | null | null | null |
func_pms1.ipynb
|
AmbaPant/NPS
|
0500f39f6708388d5c3f2b8d3e5ee5e56a1f646f
|
[
"MIT"
] | 2 |
2020-08-10T12:17:21.000Z
|
2020-09-13T14:31:02.000Z
| 30.200957 | 1,600 | 0.544043 | true | 569 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.927363 | 0.863392 | 0.800678 |
__label__eng_Latn
| 0.417071 | 0.698575 |
```python
import ipywidgets as ipw
import json
import random
import time
import pandas as pd
import os
import webbrowser
import math
from IPython.display import display, Markdown
# set kinetic parameters
with open("rate_parameters.json") as infile:
jsdata = json.load(infile)
params = jsdata["kin1"]
```
Copyright **Jacob Martin and Paolo Raiteri**, January 2021
## Numerical solution of chemical equilibrium problems #1
Imagine a simple dimerisation reaction
\begin{equation}
2A \to B
\end{equation}
whose equilibrium constant can be written as
\begin{equation}
K_{eq} = \frac{[B]}{[A]^2} = 0.156
\end{equation}
and wanting to calculate the equilibrium concentrations of $[A]_{eq}$ and $[B]_{eq}$ given their initial concentrations $[A]_{0}$ and $[B]_{0}$.
Although this is a simple problem that can be solved analytically, in this workshop we will learn how we can use an iterative method to numerically solve it.
We will use a relatively simple minimisation procedure that can be applied to a large number of problems, for which it is not possible or it is too complicated to get an analytic solution.
Imagine mixing the reagents and then to be able to monitor the concentration of all the species in the system at discrete time intervals (*timesteps*). What you will see is that the concentrations will change and tend to the equilibrium value. As you have learnt in first year, the reaction quotient, $Q$, can be used to decided which way to reaction will proceed, and that at equilibrium the reaction quotient is equal to the equilibrium constant. Hence, as we have discussed in class, the reaction quotient and the equilibrium constant can be use to define a *driving force* that pulls the system towards equilibrium.
This *driving force* can then be used in conjunction with an *ICE* table to numerically compute the equilibrium concentration of reactant and products.
| | [A] | [B]
| :--- | :--------: |:---------:
| *I* | [A]$_0$ | [B]$_0$
| *C* | -2x | x
| *E* | [A]$_0$-2x | [B]$_0$+x
Here below you can see the working principle of the minimisation procedure that we will emply
1. compute the reaction quotient at beginning of the experiment
\begin{equation}
Q = \dfrac{\mathrm{[B]}_0}{\mathrm{[A]}^2_0}
\end{equation}
2. compute the driving force.
\begin{equation}
F \propto \ln\bigg[\frac{K_{eq}}{Q}\bigg]
\end{equation}
If $K_{eq}/Q<1$ the reaction proceeds towards the reactants, which we can call the *negative* direction. While, if $K_{eq}/Q>1$ the reaction proceeds towards the products, *i.e.* in the positive direction.
You can indeed see that because the *driving force* is defined as the logarithm of the $K_{eq}/Q$ it correctly changes sign, *i.e.* direction, when $Q$ become smaller or larger than $K_{eq}$.
3. compute the new concentrations after a *timestep* has passed.
We now assume that for an arbitrarily short time interval the *driving force* doesn't change and compute the concentrations of all the species in the system aftern that small amount of time has passed, which corresponds to the $x$ in the *ICE* table above.
\begin{equation}
x = \delta\ln\bigg[\frac{K_{eq}}{Q}\bigg]
\end{equation}
There is no unique way to compute x, the only requirement being that it should be a comparatively small change in the system composition, otherwise the procedure will become unstable. This is because our assumption that the *driving force* will break down. In the formula above we have introduced a new parameter $\delta$ that will allow us to control how much the concentrations can change before we recompute the *driving force*.
A reasonable choice for delta could be a value around 20% of the smallest concentration we have in the system.
The Larger values of $\delta$ the faster our procedure will converge, until the calculation becomes unstable and the method will fail to converge.
On the contrary, small values of $\delta$ will always converge to the correct solutions, but it may take a longer number of cycles.
4. Repeat from step 1 until there are no more change in the concentrations of all the species.
Follow now the demonstrator explaining how to create an excel spreadsheet that implements those steps.
5. The calculation has converged when the concentration of the species don't change anymore, *i.e.* the *driving force* is zero and the reaction quotient is equal to the equilibrium constant.
Now try to solve this proble yourself using an excel spreadsheet.
This python program can be used to verify your result.
### Important note on the first cycle:
In some cases one (or more) of the species involved in the reaction may have a zero initial concentration.
Therefore, the calculation of the reaction quotient would give $Q=0$ or $Q=\infty$, which makes the calculation of the force, $\ln\ [K_{eq}Q]$, impossible. In order to circumvent that problem, you can perform a "manual" first step of the minimisation cycle using an arbitrary (small) value for the force; *e.g.* $F=1$ or smaller. If a reactant has a zero concentration, you would have to use a small negative force.
Henceforth, when the concentration of all the species is different from zero and positive, you can follow the procedure outlined above.
- Click `Download CSV` to export the data as a CSV file to verify your result.
```python
def initialise():
global nPoints
global concA, concB
global Keq
global delta
nPoints = 20
concA = 1
concB = 0.1
Keq = 0.156
delta = 0.2
def addLine(t,x,y,res,Q):
var_list = []
var_list.append(t)
var_list.append(x)
var_list.append(y)
var_list.append(Q)
res.loc[len(res)] = var_list
initialise()
```
```python
respath = os.path.join(os.getcwd(), "..", "results.csv")
out_P = ipw.Output()
out_L = ipw.Output()
with out_L:
display(Markdown("[Download CSV](../results.csv)"))
def force(Q,k):
if (abs(Q) > 1.e-6):
force = - math.log(Q/k)
else:
force = 1.
return force
def calc1(btn):
out_P.clear_output()
if os.path.exists(respath):
os.remove(respath)
res = pd.DataFrame(columns=["step" , "[A]" , "[B]", "Q"])
A = float(concA_text.value)
B = float(concB_text.value)
dx = float(delta_text.value)
k = float(Keq_text.value)
n = int(nPoints_text.value)
Q = B / math.pow(A,2)
addLine(0,A,B,res,Q)
for i in range(0, n):
f = force(Q,k)
cc = min(A,B)
A = A - 2 * dx * f * cc
B = B + dx * f * cc
Q = B / math.pow(A,2)
addLine(i,A,B,res,Q)
# Append result
res.to_csv(respath, index=False)
with out_P:
display(res.tail(n))
btn_calc1 = ipw.Button(description="Get Data", layout=ipw.Layout(width="150px"))
btn_calc1.on_click(calc1)
rows = []
# Equilibrium constant
Keq_text = ipw.Text(str(Keq))
# Initial concentrations
concA_text = ipw.Text(str(concA))
concB_text = ipw.Text(str(concB))
# delta concentration
delta_text = ipw.Text(str(delta))
# Nmber of data points
nPoints_text = ipw.Text(str(nPoints))
rows.append(ipw.HBox([ipw.Label('Initial concentration of A : '),concA_text]))
rows.append(ipw.HBox([ipw.Label('Initial concentration of B : '),concB_text]))
rows.append(ipw.HBox([ipw.Label('Equilibrium constant : '),Keq_text]))
rows.append(ipw.HBox([ipw.Label('Delta concentration : '),delta_text]))
rows.append(ipw.HBox([ipw.Label('Number of data point required: '),nPoints_text]))
rows.append(ipw.HBox([btn_calc1]))
rows.append(ipw.HBox([out_L]))
rows.append(ipw.HBox([out_P]))
ipw.VBox(rows)
```
```python
```
|
e16082ee8fa2bcbd581d28b2cfce56457a1e7261
| 10,258 |
ipynb
|
Jupyter Notebook
|
CEK_problems/w4_01.ipynb
|
blake-armstrong/TeachingNotebook
|
30cdca5bffd552eaecc0368c3e92744c4d6d368c
|
[
"MIT"
] | null | null | null |
CEK_problems/w4_01.ipynb
|
blake-armstrong/TeachingNotebook
|
30cdca5bffd552eaecc0368c3e92744c4d6d368c
|
[
"MIT"
] | null | null | null |
CEK_problems/w4_01.ipynb
|
blake-armstrong/TeachingNotebook
|
30cdca5bffd552eaecc0368c3e92744c4d6d368c
|
[
"MIT"
] | 1 |
2022-02-23T11:36:12.000Z
|
2022-02-23T11:36:12.000Z
| 41.196787 | 628 | 0.591441 | true | 1,974 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.705785 | 0.73412 | 0.518131 |
__label__eng_Latn
| 0.993483 | 0.04212 |
# Examen Módulo 2 - Ecuaciones diferenciales. <font color=blue>Tipo de examen 1</font>.
Lea cuidadosamente las siguientes **indicaciones** antes de comenzar el examen:
- Para resolver el examen edite este mismo archivo y renómbrelo de la siguiente manera: *Examen1_ApellidoNombre*, donde *ApellidoNombre* corresponde a su apellido paterno con la inicial en mayúscula, seguido de su primer nombre con la inicial en mayúscula **sin acentos**. Por ejemplo, en mi caso el nombre del archivo sería *Examen1_JimenezEsteban*.
- Resuelva los puntos en el espacio provisto. Si requiere agregar más celdas para código o escritura, hágalo.
- Recuerde que también se está evaluando su capacidad de interpretar los resultados. Escriba sus interpretaciones/conclusiones en celdas utilizando *Markdown*.
- El formato de presentación de todo el examen en general debe ser adecuado. Use tamaños de letra, colores, etiquetas, etcétera.
- No se resuelven dudas de ningún tipo por los profesores de la asignatura. Por favor, absténgase de preguntar.
- Revise bien el tipo de examen. Sólo se le calificará si realiza el tipo de examen asignado.
## Las cosas del amor.
Consideramos un modelo simple de la dinámica del amor/odio en una pareja, propuesto por Strogatz en 1988.
- Strogatz, S.H. (1988) *Love affairs and differential equations*, Math. Magazine 61, 35.
___
### Primer punto (50 puntos).
Romeo está enamorado de Julieta, pero en este caso, Julieta tiene sentimientos volubles. Mientras Romeo más la ama, ella quiere huir de él. Pero cuando Romeo no muestra atención, Julieta lo empieza a ver muy atractivo.
Del otro lado, Romeo tiene sentimientos imitativos. Esto quiere decir que se pone cariñoso cuando ella lo ama y se vuelve frío cuando ella lo odia.
Sean:
- $x_1(t)$: amor/odio de Romeo hacia Julieta en el tiempo $t$.
- $x_2(t)$: amor/odio de Julieta hacia Romeo en el tiempo $t$.
Consideraremos que un valor positivo de $x_1$ o $x_2$ corresponde a amor, y que un valor negativo corresponde a odio.
Un modelo del romance anteriormente descrito es
\begin{align}
\frac{dx_1}{dt}&=a\;x_2\\
\frac{dx_2}{dt}&=-b\;x_1,
\end{align}
con $a$ y $b$ constantes positivas. Definimos $x=\left[x_1\quad x_2\right]^T$.
1. (20 puntos) Simular este sistema eligiendo los parámetros $a=b=1$ y condiciones iniciales $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[5\quad 5\right]^T$, usando un vector de tiempo de 0 a 50 (unidades de tiempo).
2. (15 puntos)
- (10 puntos) Graficar las soluciones en el tiempo, es decir, $x_1$ vs. $t$ y $x_2$ vs. $t$ (10 puntos)
- (5 puntos) ¿Qué forma tienen dichas soluciones? ¿Qué se puede decir del amor/odio de Romeo hacia Julieta? ¿Qué se puede decir del amor/odio de Julieta hacia Romeo?
3. (15 puntos)
- (10 puntos) Graficar el retrato de fase, es decir, $x_2$ vs. $x_1$.
- (5 puntos) ¿Cómo es el amor de Romeo y Julieta? ¿Considera usted sano este tipo de amor?
```python
#### Primer punto
# Importamos librerías
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
%matplotlib inline
```
```python
# Parámetros a y b
a, b = 1, 1
# Condiciones iniciales
x0 = np.array([5, 5])
# Vector de tiempo
t = np.linspace(0, 50, 1000)
```
```python
# Definir la funcion a integrar
def amor(x, t, a, b):
x1, x2 = x[0], x[1]
return np.array([a*x2, -b*x1])
```
```python
# Resolver la ecuacion numericamente
x = odeint(amor, x0, t, args=(a,b))
```
```python
#### Segundo punto
# Separar soluciones
x1 = x[:,0]
x2 = x[:,1]
# Graficar
plt.figure(figsize=(8,6))
plt.plot(t, x1, label='Amor de Romeo hacia Julieta $x_1(t)$')
plt.plot(t, x2, label='Amor de Julieta hacia Romeo $x_2(t)$')
plt.legend(loc='best')
plt.xlabel('Tiempo (unidades de tiempo)')
plt.ylabel('Sentimientos Amor/Odio')
plt.grid()
plt.show()
```
La forma de las soluciones es senoidal.
- Sus sentimientos son oscilatorios. Sin embargo, nunca se agotan los sentimientos, estan pasando constantemente del amor al odio y viceversa.
```python
#### Tercer punto punto
# Graficar
plt.figure(figsize=(6,6))
plt.plot(x1, x2)
plt.xlabel('Amor de Romeo hacia Julieta $x_1$')
plt.ylabel('Amor de Julieta hacia Romeo $x_2$')
plt.grid()
plt.show()
```
- No es sano este tipo de amor.
- Tal vez si es sano porque se mantiene la intensidad.
- Siempre hay sentimientos presentes.
- Pasan por todas las etapas.
___
Note que se pueden considerar romances más generales con el siguiente modelo
\begin{align}
\frac{dx_1}{dt}&=a\;x_1+b\;x_2\\
\frac{dx_2}{dt}&=c\;x_1+d\;x_2,
\end{align}
donde $a$, $b$, $c$ y $d$ constantes que pueden ser tanto positivas como negativas.
Por ejemplo, si $a>0$ y $b>0$, podríamos llamar a Romeo un "amante apasionado". O, si $a<0$ y $b>0$, podríamos llamar a Romeo un "amante cauteloso".
___
### Segundo punto - Amantes idénticos.
En este punto consideraremos a Romeo y a Julieta como dos amantes cautelosos idénticos.
Entonces, un modelo que describe este romance es
\begin{align}
\frac{dx_1}{dt}&=-a\;x_1+b\;x_2\\
\frac{dx_2}{dt}&=b\;x_1-a\;x_2,
\end{align}
con $a$ y $b$ constantes positivas. Definimos $x=\left[x_1\quad x_2\right]^T$.
Simular este sistema y obtener gráficas de $x_1$ vs. $t$, $x_2$ vs. $t$ y $x_2$ vs. $x_1$ en cada uno de los siguientes casos. Además, concluya acerca de cómo es el amor entre Romeo y Julieta cuando $t\to\infty$.
1. (25 puntos) Para $a=2$ y $b=1$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[2\quad 1\right]^T$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[-2\quad 1\right]^T$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[-2\quad -1\right]^T$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[2\quad -1\right]^T$.
2. (25 puntos) Para $a=1$ y $b=2$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[2\quad 1\right]^T$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[-2\quad 1\right]^T$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[-2\quad -1\right]^T$.
- $x(0)=\left[x_1(0)\quad x_2(0)\right]^T=\left[2\quad -1\right]^T$.
```python
# Definir la funcion a integrar
def amor2(x, t, a, b):
x1, x2 = x[0], x[1]
return np.array([-a*x1+b*x2, b*x1-a*x2])
```
```python
# Parámetros a y b
a, b = 2, 1
# Vector de tiempo
t = np.linspace(0, 50, 1000)
```
```python
# Primer condicion inicial
x0 = np.array([2, 1])
# Simulacion
x = odeint(amor2, x0, t, args=(a,b))
```
```python
# Separar soluciones
x1 = x[:,0]
x2 = x[:,1]
# Graficar x1,x2 vs t
plt.figure(figsize=(8,6))
plt.plot(t, x1, label='Amor de Romeo hacia Julieta $x_1(t)$')
plt.plot(t, x2, label='Amor de Julieta hacia Romeo $x_2(t)$')
plt.legend(loc='best')
plt.xlabel('Tiempo (unidades de tiempo)')
plt.ylabel('Sentimientos Amor/Odio')
plt.grid()
plt.show()
```
```python
# Graficar x2 vs x1
plt.figure(figsize=(6,6))
plt.plot(x1, x2, label='Trayectoria de sentimientos')
plt.plot(x1[0], x2[0], 'd', label='Sentimientos iniciales')
plt.plot(x1[-1], x2[-1], 'ob', label='Sentimientos finales')
plt.xlabel('Amor de Romeo hacia Julieta $x_1$')
plt.ylabel('Amor de Julieta hacia Romeo $x_2$')
plt.legend(loc='best')
plt.grid()
plt.show()
```
Al principio se quieren y luego son indiferentes (se dejan de querer).
___
Ejemplo de clase
```python
# Definir la funcion
def ejemplo(x, t, k1, k2, k3):
x1, x2, x3, x4 = x[0], x[1], x[2], x[3]
return np.array([-k1*x1+x2**2+x3**3,
x1-k2*x2+x3**3,
x1+x2**2-k3*x3,
x1+x2+x3])
```
```python
# Parametros
k1, k2, k3 = 10, 20, 30
# Condicion inicial
x0 = np.array([1, 1, 1, 0])
# Vector de tiempo
t = np.linspace(0, 0.7, 1000)
# Simular
x = odeint(ejemplo, x0, t, args=(k1,k2,k3))
# Separamos soluciones
x1 = x[:,0]
x2 = x[:,1]
x3 = x[:,2]
x4 = x[:,3]
```
```python
# Graficar x1,x2 vs t
plt.figure(figsize=(8,6))
plt.plot(t, x1, label='$x_1(t)$')
plt.plot(t, x2, label='$x_2(t)$')
plt.plot(t, x3, label='$x_3(t)$')
plt.plot(t, x4, label='$x_4(t)$')
plt.legend(loc='best')
plt.xlabel('Tiempo (unidades de tiempo)')
plt.grid()
plt.show()
```
```python
# Ejemplo gráfico 3d
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
# Esta parte es la que permite graficar z vs (x,y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x1, x2, x3, label='ejemplo 3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.legend()
plt.show()
```
```python
```
```python
```
```python
```
```python
```
# Anuncios parroquiales
## https://docs.google.com/spreadsheets/d/1QYtUajeyHoE-2jEKZ7yilkzu923br28Rqg0FCnQ1zHk/edit?usp=sharing
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Esteban Jiménez Rodríguez.
</footer>
|
be39cd35aa8db09cf65a4f7f9ff93b7d1877834c
| 234,750 |
ipynb
|
Jupyter Notebook
|
Modulo2/Clase15_RepasoModulo2.ipynb
|
lilianaavila/SimMat2018-2
|
85ef5d977c536276902c917ac5cd3f1820627fa7
|
[
"MIT"
] | 1 |
2022-01-29T04:16:12.000Z
|
2022-01-29T04:16:12.000Z
|
Modulo2/Clase15_RepasoModulo2.ipynb
|
lilianaavila/SimMat2018-2
|
85ef5d977c536276902c917ac5cd3f1820627fa7
|
[
"MIT"
] | 1 |
2020-08-14T17:44:49.000Z
|
2020-08-14T17:48:39.000Z
|
Modulo2/Clase15_RepasoModulo2.ipynb
|
lilianaavila/SimMat2018-2
|
85ef5d977c536276902c917ac5cd3f1820627fa7
|
[
"MIT"
] | 3 |
2019-01-28T20:48:49.000Z
|
2022-02-05T20:53:55.000Z
| 442.924528 | 77,208 | 0.939757 | true | 3,086 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.855851 | 0.718594 | 0.61501 |
__label__spa_Latn
| 0.894716 | 0.267204 |
<b>Traçar um esboço do gráfico e obter uma equação da parábola que satisfaça as condições dadas.</b>
<b>17. Vértice: $V(0,0)$; Eixo $y=0$; Passa pelo ponto $(4,5)$</b><br><br>
<b>Como a parábola é paralela ao eixo $x$ a equação que a representa é dada por </b>$y^2 = 2px$<br><br>
<b>Substituindo os pontos dados na equação temos: </b><br><br>
$5^2 = 2\cdot p \cdot 4$<br><br>
$25 = 8p$<br><br>
$\frac{25}{8} = p$<br><br>
<b>Encontrando o valor do foco</b><br><br>
$F = \frac{p}{2}$<br><br>
$F = \frac{\frac{25}{8}}{2}$<br><br>
$F = \frac{25}{8} \cdot \frac{1}{2}$<br><br>
$F = \frac{25}{16}$<br><br>
$F(\frac{25}{16},0)$<br><br>
<b>Encontrando o valor da diretriz</b><br><br>
$D = -\frac{p}{2}$<br><br>
$D = -\frac{25}{16}$<br><br>
$D : x = \frac{25}{16}$<br><br>
<b>Montando a equação</b><br><br>
$y^2 = 2 \cdot \frac{25}{8} \cdot x$<br><br>
$y^2 = \frac{50}{8}x$<br><br>
$y^2 = \frac{25}{4}x$<br><br>
<b>Gráfico da parábola</b><br><br>
```python
from sympy import *
from sympy.plotting import plot_implicit
x, y = symbols("x y")
plot_implicit(Eq((y-0)**2, 25/4*(x+0)), (x,-10,10), (y,-10,10),
title=u'Gráfico da parábola', xlabel='x', ylabel='y');
```
|
27b084e9cda2042171bf2781ce26fbecd4ce3243
| 16,719 |
ipynb
|
Jupyter Notebook
|
Problemas Propostos. Pag. 172 - 175/17.ipynb
|
mateuschaves/GEOMETRIA-ANALITICA
|
bc47ece7ebab154e2894226c6d939b7e7f332878
|
[
"MIT"
] | 1 |
2020-02-03T16:40:45.000Z
|
2020-02-03T16:40:45.000Z
|
Problemas Propostos. Pag. 172 - 175/17.ipynb
|
mateuschaves/GEOMETRIA-ANALITICA
|
bc47ece7ebab154e2894226c6d939b7e7f332878
|
[
"MIT"
] | null | null | null |
Problemas Propostos. Pag. 172 - 175/17.ipynb
|
mateuschaves/GEOMETRIA-ANALITICA
|
bc47ece7ebab154e2894226c6d939b7e7f332878
|
[
"MIT"
] | null | null | null | 185.766667 | 14,364 | 0.898678 | true | 535 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.928409 | 0.893309 | 0.829356 |
__label__por_Latn
| 0.89051 | 0.765206 |
# Elliptic curve cryptography
What is an Elliptic curve (EC)? An elliptic curve is a plane algebraic curve over a [finite field](https://en.wikipedia.org/wiki/Finite_field) which is defined by an equation of the form:
\begin{equation}
y^2 = x^3+ax+b \quad \textrm{where} \quad 4a^3+27b^2 ≠ 0
\label{eq:ecurve}
\tag{1}
\end{equation}
The $4a^3+27b^2 ≠ 0$ restrained is required to avoid singular points.
A finite field is a set where operations of multiplication, addition, subtraction and division are defined according to basic rules. Examples of finite fields are [integers mod p](https://en.wikipedia.org/wiki/Modular_arithmetic#Integers_modulo_n) when p is a prime number.
```python
# Import the necessary libraries
# to remove code in browser, press f12 and in console type: document.querySelectorAll("div.input").forEach(function(a){a.remove()})
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
import numpy as np
import pandas as pd
from typing import Callable, Tuple
from scipy import optimize
def ecurve_power2(a: float, b: float, x: float) -> float:
# y²=x³+ax+b and 4a³+27b²≠0
# secp256k1 curve is y² = x³+7
return x**3 + x*a + b
def ecurve(domain: pd.array, ecurve_power2_func: Callable[[float], float]) -> pd.DataFrame:
# y = sqrt(x³+ax+b)
# Only return domain where y>0
y2 = ecurve_power2_func(domain)
x_ = domain[y2>0]
y2 = y2[y2>0]
y = np.sqrt(y2)
dataset = pd.DataFrame({'x': x_, 'y': y, 'y_neg': (-1)*y})
return dataset
def domain(x1: float, x2: float, step: float = 0.1) -> np.ndarray:
return np.arange(x1, x2, step).astype(np.float64)
def straight_line(m: float, c: float, x: float) -> float:
# y = xm + c
return m*x + c
def calc_straight_line_params(point1: Tuple[float, float], point2: Tuple[float, float]) -> Tuple[float, float]:
# Calculate the gradient(m) and y intercept(c) in: y = xm + c
x1, y1 = point1
x2, y2 = point2
m = (y2 - y1)/(x2 - x1)
c = -1*x2*m + y2
return m, c
def plot_elliptic_curve(axs: plt.axes, domain: pd.array, ecurve_power2_partial: Callable[[float], float], title="") -> None:
# data must have x and y coloms
data = ecurve(domain, ecurve_power2_partial)
# to display as a continues function, the grid needs to go past the cut of values for the ec, hence the -1's
X, Y = np.mgrid[min(data.x)-1:max(data.x):100j, min(data.y_neg)-1:max(data.y):100j]
axs.contour(X, Y, Y**2 - ecurve_power2_partial(X), levels=[0]) # pos graph
axs.contour(X, Y*-1, Y**2 - ecurve_power2_partial(X), levels=[0]) # pos graph
axs.set_title(title)
axs.set_xlim(min(data.x)-1, max(data.x)+1)
axs.set_ylim(min(data.y_neg)-1, max(data.y)+1)
def plot_straight_line(axs: plt.axes, domain: pd.array, straight_line_partial_func: Callable[[float], float], title="") -> None:
axs.plot(domain, straight_line_partial_func(domain))
if title != "":
axs.set_title(title)
def roots(f: Callable[[float], float], g: Callable[[float], float], domain: pd.array) -> np.array:
d = lambda x: f(x) - g(x)
roots_index = np.argwhere(np.diff(np.sign(d(domain)))).flatten()
return domain[roots_index].to_numpy()
def calc_intersection(domain: pd.array, ecurve_power2_partial_func: Callable[[float], float], straight_line_partial_func: Callable[[float], float]) -> Tuple[float, float]:
data = ecurve(domain, ecurve_power2_partial_func)
ecurve_pos_partial = lambda x: np.sqrt(ecurve_power2_partial_func(x))
ecurve_neg_partial = lambda x: np.sqrt(ecurve_power2_partial_func(x))*-1
roots_pos = roots(ecurve_pos_partial, straight_line_partial_func, data.x)
roots_neg = roots(ecurve_neg_partial, straight_line_partial_func, data.x)
intersections = pd.DataFrame({'x': roots_pos, 'y': ecurve_pos_partial(roots_pos)})
intersections2 = pd.DataFrame({'x': roots_neg, 'y': ecurve_neg_partial(roots_neg)})
return intersections.append(intersections2).reset_index()
```
Example of elliptic curves with different a and b values:
```python
# Setup domain and Elliptic Curve function
dom = domain(-5,5) # Domain
secp256k1_pow2 = lambda x: ecurve_power2(0, 7, x) # EllipticCurve function y^2 with secp256k1 parameters
```
```python
# calc_point_on_ec = ecurve(dom, secp256k1_pow2) # EllipticCurve function sqrrt(y^2)
fig_example, (ax1_example, ax2_example, ax3_example, ax4_example) = plt.subplots(1,4, sharex='col', sharey='row', gridspec_kw={'hspace': 0.2, 'wspace': 0.1},figsize=(20,5))
plot_elliptic_curve(ax1_example, dom, lambda x: ecurve_power2(1, -1, x), 'y² = x³+x-1')
plot_elliptic_curve(ax2_example, dom, lambda x: ecurve_power2(1, 1, x), 'y² = x³+x+1')
plot_elliptic_curve(ax3_example, dom, lambda x: ecurve_power2(-3, 3, x), 'y² = x³-3x+3')
plot_elliptic_curve(ax4_example, dom, lambda x: ecurve_power2(-4, 0, x), 'y² = x³-4x')
```
The elliptic curve used by most cryptocurrencies is called the secp256k1 and takes the form
\begin{equation}
y^2 = x^3+x+7
\label{eq:secp256k1}
\tag{2}
\end{equation}
```python
fig_secp256k1, (ax_secp256k1) = plt.subplots(1,1, sharex='col', sharey='row', gridspec_kw={'hspace': 0.2, 'wspace': 0.1},figsize=(4,5))
plot_elliptic_curve(ax_secp256k1, dom, secp256k1_pow2, 'y² = x³+7')
```
## Finite field
The elliptic curve operation for point addition are different than normal addition. With normal addition you would expect that point1 (x1, y1) + point2 (x2, y2) would equal (x1+1x2, y1+y2). This is not so with elliptic curve where the add operation is defined differently: When you add two points on a elliptic curve together, you get a third point on the curve.
The process is can described as when you have 2 points on a elliptic curve, you draw a line bewteen the points, determine where it intersects the curve. This intersection point is then reflected across the x-axis (i.e multiply the y-coordinate by -1 (x, y*-1)).
A example of addition would be:
```python
fig_intersec, (ax1_intersec, ax2_intersec, ax3_intersec, ax4_intersec) = plt.subplots(1,4, sharex='col', sharey='row', gridspec_kw={'hspace': 0.2, 'wspace': 0.1},figsize=(20,5))
plot_elliptic_curve(ax1_intersec, dom, secp256k1_pow2, 'To add point P and Q')
plot_elliptic_curve(ax2_intersec, dom, secp256k1_pow2, 'Draw a line between the points')
plot_elliptic_curve(ax3_intersec, dom, secp256k1_pow2, 'Reflect that point across the x-axis')
plot_elliptic_curve(ax4_intersec, dom, secp256k1_pow2, ' P+Q=R')
# Arbitrary points on elliptic curve
points = ecurve(pd.array([-1, 4]), secp256k1_pow2)
point1 = (points.x[0], points.y[0])
point2 = (points.x[1], points.y_neg[1])
m, c = calc_straight_line_params(point1=point1, point2=point2) # Calculate straight line paramaters giving the points
straight_line_partial = lambda x: straight_line(m, c, x) # Straight line function with paramaters
# Calculate intersections between the Straight line function and the EllipticCurve function
intersections = calc_intersection(domain=dom, ecurve_power2_partial_func=secp256k1_pow2, straight_line_partial_func=straight_line_partial)
# First plot
ax1_intersec.plot(intersections.x[0], intersections.y[0], "o", label="P", c='b')
ax1_intersec.plot(intersections.x[2], intersections.y[2], "o", label="Q", c='k')
ax1_intersec.legend()
ax1_intersec.set_xlabel("Fig 1")
ax1_intersec.axhline(linewidth=1, color='k')
# Second plot
plot_straight_line(axs=ax2_intersec, domain=dom, straight_line_partial_func=straight_line_partial, title="")
ax2_intersec.plot(intersections.x[0], intersections.y[0], "o", label="P", c='b')
ax2_intersec.plot(intersections.x[2], intersections.y[2], "o", label="Q", c='k')
ax2_intersec.plot(intersections.x[1], intersections.y[1], "o", label="Intersection", c='g')
ax2_intersec.legend()
ax2_intersec.set_xlabel("Fig 2")
ax2_intersec.axhline(linewidth=1, color='k')
# Third plot
plot_straight_line(axs=ax3_intersec, domain=dom, straight_line_partial_func=straight_line_partial, title="")
ax3_intersec.plot(intersections.x[0], intersections.y[0], "o", label="P", c='b')
ax3_intersec.plot(intersections.x[2], intersections.y[2], "o", label="Q", c='k')
ax3_intersec.plot(intersections.x[1], intersections.y[1], "o", label="Intersection", c='g')
ax3_intersec.plot(intersections.x[1], intersections.y[1]*-1, "o", label="R", c='r')
ax3_intersec.legend()
ax3_intersec.set_xlabel("Fig 3")
ax3_intersec.axhline(linewidth=1, color='k')
ax3_intersec.vlines(intersections.x[1], ymin=intersections.y[1], ymax=intersections.y[1]*-1, colors='r', linestyles='dashed')
# Fourth plot
ax4_intersec.plot(intersections.x[0], intersections.y[0], "o", label="P", c='b')
ax4_intersec.plot(intersections.x[2], intersections.y[2], "o", label="Q", c='k')
ax4_intersec.plot(intersections.x[1], intersections.y[1]*-1, "o", label="R", c='r')
ax4_intersec.legend()
ax4_intersec.set_xlabel("Fig 4")
ax4_intersec.axhline(linewidth=1, color='k')
print("")
```
Steps to find $P+Q$
- Fig1: If you have point $P$ (-1, 2.5) and $Q$ (4.0, -8.4) on the elliptic curve
- Fig2: Draw a line between the points, find the intersect point at (1.7, -3.5)
- Fig3: Reflect the intersect point across the x-axis to found the new point, $R$ (1.7, 3.5)
- Fig4: $P+Q=R$
With elliptic curve cryptography, you do not just add two arbitrary points together, but rather you start with a base point on the curve and add that point to it self. If we start with a base point $P$ than we have to find a line that goes through $P$ and $P$. Unfortunately there are infinite such lines. With elliptic curve cryptography the tangent line is used in this special case. The same process is followed now to calculate $P+P=2P$:
```python
fig_ecurve, (ax1_ecurve, ax2_ecurve, ax3_ecurve, ax4_ecurve) = plt.subplots(1,4, sharex='col', sharey='row', gridspec_kw={'hspace': 0.2, 'wspace': 0.1},figsize=(20,5))
plot_elliptic_curve(ax1_ecurve, dom, secp256k1_pow2, 'Initial point P')
plot_elliptic_curve(ax2_ecurve, dom, secp256k1_pow2, 'Find tangent line that goes through P and P')
plot_elliptic_curve(ax3_ecurve, dom, secp256k1_pow2, 'Reflect the intersection point across the x-axis')
plot_elliptic_curve(ax4_ecurve, dom, secp256k1_pow2, 'P+P=2P')
# Choose a arbitrary point P on the elliptic curve
p_points = ecurve(pd.array([-1.3, -1.31]), secp256k1_pow2)
p_point1 = (p_points.x[0], p_points.y[0])
p_point2 = (p_points.x[1], p_points.y[1])
m, c = calc_straight_line_params(point1=p_point1, point2=p_point2) # Calculate straight line paramaters giving the points
straight_line_partial = lambda x: straight_line(m, c, x) # Straight line function with paramaters
# Calculate intersections between the Straight line function and the EllipticCurve function
intersections_ecurve = calc_intersection(domain=dom, ecurve_power2_partial_func=secp256k1_pow2, straight_line_partial_func=straight_line_partial)
# First plot
ax1_ecurve.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax1_ecurve.legend()
ax1_ecurve.set_xlabel("Fig 1")
ax1_ecurve.axhline(linewidth=1, color='k')
# Second plot
plot_straight_line(axs=ax2_ecurve, domain=dom, straight_line_partial_func=straight_line_partial, title="")
ax2_ecurve.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax2_ecurve.plot(intersections_ecurve.x[0], intersections_ecurve.y[0], "o", label="Intersection", c='g')
ax2_ecurve.legend()
ax2_ecurve.set_xlabel("Fig 2")
ax2_ecurve.axhline(linewidth=1, color='k')
# Third plot
plot_straight_line(axs=ax3_ecurve, domain=dom, straight_line_partial_func=straight_line_partial, title="")
# ax3_ecurve.plot(intersections_ecurve.x[0], intersections_ecurve.y[0], "o", label="P", c='b')
ax3_ecurve.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax3_ecurve.plot(intersections_ecurve.x[0], intersections_ecurve.y[0], "o", label="Intersection", c='g')
ax3_ecurve.plot(intersections_ecurve.x[0], intersections_ecurve.y[0]*-1, "o", label="P+P=2P", c='r')
ax3_ecurve.legend()
ax3_ecurve.set_xlabel("Fig 3")
ax3_ecurve.axhline(linewidth=1, color='k')
ax3_ecurve.vlines(intersections_ecurve.x[0], ymin=intersections_ecurve.y[0], ymax=intersections_ecurve.y[0]*-1, colors='r', linestyles='dashed')
# Fourth plot
ax4_ecurve.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
# ax4_ecurve.plot(intersections_ecurve.x[0], intersections_ecurve.y[0], "o", label="P", c='b')
ax4_ecurve.plot(intersections_ecurve.x[0], intersections_ecurve.y[0]*-1, "o", label="P+P=2P", c='r')
ax4_ecurve.legend()
ax4_ecurve.set_xlabel("Fig 4")
ax4_ecurve.axhline(linewidth=1, color='k')
print("")
```
Now that we have $2P$, we can add $P$ again to get $3P$, see the example below which follows the same process as before. Draw a line between $P$ and $2P$, find the intersect and reflect this intersect value across the x-axis to find $3P$.
```python
fig_ecurve3P, (ax1_ecurve3P, ax2_ecurve3P, ax3_ecurve3P, ax4_ecurve3P) = plt.subplots(1,4, sharex='col', sharey='row', gridspec_kw={'hspace': 0.2, 'wspace': 0.1},figsize=(20,5))
plot_elliptic_curve(ax1_ecurve3P, dom, secp256k1_pow2, 'P + 2P')
plot_elliptic_curve(ax2_ecurve3P, dom, secp256k1_pow2, 'Draw a line that goes through P and 2P')
plot_elliptic_curve(ax3_ecurve3P, dom, secp256k1_pow2, 'Reflect the intersection point across the x-axis')
plot_elliptic_curve(ax4_ecurve3P, dom, secp256k1_pow2, '2P+P=3P')
# Use P and 2P from previous run
p_point1 = (p_points.x[0], p_points.y[0])
p_point2 = (intersections_ecurve.x[0], intersections_ecurve.y[0]*-1)
m, c = calc_straight_line_params(point1=p_point1, point2=p_point2) # Calculate straight line paramaters giving the points
straight_line_partial = lambda x: straight_line(m, c, x) # Straight line function with paramaters
# Calculate intersections between the Straight line function and the EllipticCurve function
intersections = calc_intersection(domain=dom, ecurve_power2_partial_func=secp256k1_pow2, straight_line_partial_func=straight_line_partial)
# First plot
ax1_ecurve3P.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax1_ecurve3P.plot(intersections_ecurve.x[0], intersections_ecurve.y[0]*-1, "o", label="2P", c='r')
ax1_ecurve3P.legend()
ax1_ecurve3P.set_xlabel("Fig 1")
ax1_ecurve3P.axhline(linewidth=1, color='k')
# Second plot
plot_straight_line(axs=ax2_ecurve3P, domain=dom, straight_line_partial_func=straight_line_partial, title="")
ax2_ecurve3P.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax2_ecurve3P.plot(intersections_ecurve.x[0], intersections_ecurve.y[0]*-1, "o", label="2P", c='r')
ax2_ecurve3P.plot(intersections.x[1], intersections.y[1], "o", label="Intersection", c='g')
ax2_ecurve3P.legend()
ax2_ecurve3P.set_xlabel("Fig 2")
ax2_ecurve3P.axhline(linewidth=1, color='k')
# Third plot
plot_straight_line(axs=ax3_ecurve3P, domain=dom, straight_line_partial_func=straight_line_partial, title="")
ax3_ecurve3P.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax3_ecurve3P.plot(intersections_ecurve.x[0], intersections_ecurve.y[0]*-1, "o", label="2P", c='r')
ax3_ecurve3P.plot(intersections.x[1], intersections.y[1], "o", label="Intersection", c='g')
ax3_ecurve3P.plot(intersections.x[1], intersections.y[1]*-1, "o", label="2P+P=3P", c='m')
ax3_ecurve3P.legend()
ax3_ecurve3P.set_xlabel("Fig 3")
ax3_ecurve3P.axhline(linewidth=1, color='k')
ax3_ecurve3P.vlines(intersections.x[1], ymin=intersections.y[1], ymax=intersections.y[1]*-1, colors='r', linestyles='dashed')
# Fourth plot
ax4_ecurve3P.plot(p_points.x[0], p_points.y[0], "o", label="P", c='b')
ax4_ecurve3P.plot(intersections_ecurve.x[0], intersections_ecurve.y[0]*-1, "o", label="2P", c='r')
ax4_ecurve3P.plot(intersections.x[1], intersections.y[1]*-1, "o", label="3P", c='m')
ax4_ecurve3P.legend()
ax4_ecurve3P.set_xlabel("Fig 4")
ax4_ecurve3P.axhline(linewidth=1, color='k')
print("")
```
The same process now can be used to calculate $4P, 5P ... nP$.
The base point used in secp256k1 curve has the following ($x, y$) coordinates:<br>
$x:$ 55066263022277343669578718895168534326250603453777594175500187360389116729240<br>
$y:$ 32670510020758816978083085130507043184471273380659243275938904335757337482424
In the examples above a base point was choosen to view all the calculated points in small graph.
## Addition properties
In this finite field, addition also has the property of
\begin{equation}
nP+rP = (n+r)P
\label{eq:addition}
\tag{3}
\end{equation}
A example are $4P+6P = (4+6)P = 10P$. The easiest method to calculate for example $10P$ would require 4 calculations:<br>
$$
\begin{align}
P+P &= 2P \\
2P+2P &= 4P \\
4P+4P &= 8P \\
8P+2P &= 10P \\
\end{align}
$$
## Diffie–Hellman key exchange
In this section we will exploring the Diffie–Hellman key exchange (DH). This will serve as a basis to understand Elliptic curve cryptography. DH is one of the earliest practical examples of public key exchange implemented within the field of cryptography.
Encryption is the process of converting information or data into a code to allow only the intended precipitant to decode and read the message. Often times this encryption/decryption is done with a shared secret key. In the following example we will show how two parties can get a shared key (in the past, they physically shared the key on a piece of paper).
Let's start with the example of Nick and Connie, they want to sent messages to each other without being eavesdropped on. They will share a arbitary number: $g$, this is sent over the internet and could have intercepted, but that does not matter. Nick and Connie also create their own secret key (a big number) and do not share it with anybody:
$$
\begin{align}
Nick&: n \\
Connie&: c
\end{align}
$$
Then they will raise the arbitary number $g$ to the power of there secret key:
$$
\begin{align}
Nick: g^n &= H_n \\
Connie: g^c &= H_c
\tag{4}
\end{align}
$$
Onces they have the $H$ term, they exchange that to each other. $g, H_n, H_c$ are publicly sent and anybody can view these values. Once they have that $H$ term, they raise it to their secret key.
$$
\begin{align}
Nick: H_c^n &= S \\
Connie: H_n^c &= S
\tag{5}
\end{align}
$$
By doing this, they end up with the same number: $S$, this is the shared key, neither of them had to send it to each other explicitly. Now, for example to encrypt a message using a Caesar cipher (a simple cipher) you can shift all the letters in your message by $S$ number of letters, and shift it back by $S$ number of letters to decrypt (decipher) it. You now have method to encrypt your communication with each other.
To prove equation 5, you can subsitute equation 4 into 5:
$$
Nick: H_c^n = (g^c)^n = g^{cn} = S
$$
$$
Connie: H_n^c = (g^n)^c = g^{nc} = S
$$
Unfortunately we are not done yet, remeber that publicly sent values are $g, H_n, H_c$ are publicly sent. To calculate for example Nick's private $n$ will be trivial since the equation is
$$
Nick: g^n = H_n
$$
Calculating $n$ is as easy as solving this log problem $2^n=16$
What about this discrete log problem: $2^n mod 17 = 16$. This becomes difficult because of the [modulus](https://en.wikipedia.org/wiki/Modular_arithmetic), you do not know how many times over 17 we have gone. Another example of modulus is a clock. If I told you that the start time is 12 o'clock and the end time is 1 o'clock and I ask you how many hours has passed you would not know because you do not know how many times the clock went round. It could be 1 hour, 13 hours or 25 hours and so on. It is because of this fact that you have to start guessing, the discrete log problem is the basis for the DH key exchange. The calculations to create the shared key is simple, but it very difficult to solve for the private key.
Now you can just use the modulus operator in equations 4 and 5
$$
\begin{align}
Nick: g^n \, mod(p) &= H_n \\
Connie: g^c \, mod(p) &= H_c
\tag{6}
\end{align}
$$
You will end up with a shared key again, but this time is very difficult, almost impossible to figure out what the private keys are if the private keys are very big.
$$
\begin{align}
Nick: H_c^n \, mod(p) &= S \\
Connie: H_n^c \, mod(p) &= S
\tag{7}
\end{align}
$$
A more practical example: Nick and Connie both decide publicly on a generator, $G=3$, and a prime modulus, $P=17$. Then Connie decides on a random private key, $c=15$ and Nick does the same $n=13$.
$$
\begin{align}
Nick: G^n \, mod(p) &= H_n \\
3^{13} mod 17 &= 12\\
Connie: G^c \, mod(p) &= H_c \\
3^{15} mod 17 &= 6
\tag{6}
\end{align}
$$
Nick send $H_n=12$ publicly to Connie, and Connie sends $H_c=6$ to Nick publicly. Now the heart of the trick, Nick takes Connies publicly sent value and raises it to the power of his private number, and vice versa to obtain the same shared secret of 10.
$$
\begin{align}
Nick: H_c^n \, mod(p) &= S \\
6^{13} mod 17 &= 10 \\
Connie: H_n^c \, mod(p) &= S \\
12^{15}mod 17 &= 10
\tag{7}
\end{align}
$$
For the DH, the reason that we choose a prime number for the modulus, is that this guarantees the group is cyclic. It also has a other property, because it is a modulus of a prime ($p$), a generator exists ($g$). A generator is smaller than the prime ($<p$), it will produce all the numbers from $1$ to $p-1$ exactly once with $p^x mod \, g$ where $x = 1, 2 ... p-1$. A example with a prime of 7, then the generator is 5:
$$
\begin{align}
5^1 mod \, 7 &= 5 \\
5^2 mod \, 7 &= 4 \\
5^3 mod \, 7 &= 6 \\
5^4 mod \, 7 &= 2 \\
5^5 mod \, 7 &= 3 \\
5^6 mod \, 7 &= 1 \\
\end{align}
$$
## Elliptic curve discrete log problem
If you look back now at Elliptic curve's addition rule in equation 3, where $nP$ are represented by $nP=P+P+P+...$<br>
We can use the same form of equation 4 in the DH and apply it to EC:
$$
\begin{align}
nG &= H_n
\tag{8}
\end{align}
$$
where $G$ is the starting point and $H_n$ is the end point. $n$ is amount of times that $g$ is added to each self. Even if you know what $G$ and $H_n$ is, it very difficult to figure out what $n$ is.
Knwowing this, we can just use the DH procedure with these elliptic curve equations and it end's up working the same.
$$
\begin{align}
Nick: nG &= H_n \\
Connie: cG &= H_c
\tag{9}
\end{align}
$$
where $G, H_n, H_c$ are publicly sent. The shared public key can also be calculated in the same way as DH:
$$
\begin{align}
Nick: nH_c &= S \\
Connie: cH_n &= S
\tag{10}
\end{align}
$$
With the DF, the modulus allows to take the possible answers to the exponent problem and reduce the possible set of numbers. Using this $2^n mod 17 = 16$ example again, because of the mod 17, you limit the possible answers to 16. Now in EC, you also can take the modulus of the curve from being a function with infinite values to a finite set of values.
\begin{align}
y^2 &= x^3+ax+b\\
y^2 mod p &= (x^3+ax+b)\, mod p
\label{eq:ecurve}
\tag{11}
\end{align}
where $p$ is a prime number, it is a prime number to ensure that addition and multiplication operations can always be undone.<br>
In secp256k1, $p$ is the largest prime that is smaller than $2^{256}$, this would be $2^{256}–2^{32}–977 = $
```python
p = 2**256 - 2**32 - 977
p
```
115792089237316195423570985008687907853269984665640564039457584007908834671663
This means that x and y coordinates of the elliptic curve can be any number up to this prime.
```python
# http://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
def egcd(a, b):
x,y, u,v = 0,1, 1,0
while a != 0:
q, r = b//a, b%a
m, n = x-u*q, y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
return b, x, y
# calculate modular inverse
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
return None # modular inverse does not exist
else:
return x % m
# ecurve(dom, secp256k1_pow2).y % 7777
# secp256k1_pow2(dom) % 5
```
```python
# https://andrea.corbellini.name/2015/05/23/elliptic-curve-cryptography-finite-fields-and-discrete-logarithms/
def extended_euclidean_algorithm(a, b):
"""
Returns a three-tuple (gcd, x, y) such that
a * x + b * y == gcd, where gcd is the greatest
common divisor of a and b.
This function implements the extended Euclidean
algorithm and runs in O(log b) in the worst case.
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r != 0:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def inverse_of(n, p):
"""
Returns the multiplicative inverse of
n modulo p.
This function returns an integer m such that
(n * m) % p == 1.
"""
gcd, x, y = extended_euclidean_algorithm(n, p)
assert (n * x + p * y) % p == gcd
if gcd != 1:
# Either n is 0, or p is not a prime number.
raise ValueError(
'{} has no multiplicative inverse '
'modulo {}'.format(n, p))
else:
return x % p
```
```python
# https://andrea.corbellini.name/2015/05/23/elliptic-curve-cryptography-finite-fields-and-discrete-logarithms/
# https://www.youtube.com/watch?v=NnyZZw8d1wI
p = 19
ecurve_pow2_mod = lambda x: ecurve_power2(-7, 10, x) % p
dom = domain(0,p, 1) # Domain
dom = pd.array(dom)
dom = dom[dom>0]
y2_mod = pd.DataFrame({'x': dom, 'y_1': ecurve_pow2_mod(dom), 'y_2': ecurve_pow2_mod(dom)})
# plt.plot(y2_mod.x, y, 'ro')
# plt.show()
y2_mod
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>x</th>
<th>y_1</th>
<th>y_2</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1.0</td>
<td>4.0</td>
<td>4.0</td>
</tr>
<tr>
<th>1</th>
<td>2.0</td>
<td>4.0</td>
<td>4.0</td>
</tr>
<tr>
<th>2</th>
<td>3.0</td>
<td>16.0</td>
<td>16.0</td>
</tr>
<tr>
<th>3</th>
<td>4.0</td>
<td>8.0</td>
<td>8.0</td>
</tr>
<tr>
<th>4</th>
<td>5.0</td>
<td>5.0</td>
<td>5.0</td>
</tr>
<tr>
<th>5</th>
<td>6.0</td>
<td>13.0</td>
<td>13.0</td>
</tr>
<tr>
<th>6</th>
<td>7.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>7</th>
<td>8.0</td>
<td>10.0</td>
<td>10.0</td>
</tr>
<tr>
<th>8</th>
<td>9.0</td>
<td>11.0</td>
<td>11.0</td>
</tr>
<tr>
<th>9</th>
<td>10.0</td>
<td>9.0</td>
<td>9.0</td>
</tr>
<tr>
<th>10</th>
<td>11.0</td>
<td>10.0</td>
<td>10.0</td>
</tr>
<tr>
<th>11</th>
<td>12.0</td>
<td>1.0</td>
<td>1.0</td>
</tr>
<tr>
<th>12</th>
<td>13.0</td>
<td>7.0</td>
<td>7.0</td>
</tr>
<tr>
<th>13</th>
<td>14.0</td>
<td>15.0</td>
<td>15.0</td>
</tr>
<tr>
<th>14</th>
<td>15.0</td>
<td>12.0</td>
<td>12.0</td>
</tr>
<tr>
<th>15</th>
<td>16.0</td>
<td>4.0</td>
<td>4.0</td>
</tr>
<tr>
<th>16</th>
<td>17.0</td>
<td>16.0</td>
<td>16.0</td>
</tr>
<tr>
<th>17</th>
<td>18.0</td>
<td>16.0</td>
<td>16.0</td>
</tr>
</tbody>
</table>
</div>
```python
ecurve_pow2_mod(5)
```
5
```python
inverse_of(16, 19)
```
6
```python
inverse_of(5, 19)
```
4
```python
x = 6
g = [5,2,3]
gp = pd.array(g)
df = pd.DataFrame({'x': gp, 'diff': gp-4})
df = df.sort_values(by ='diff' )
f = lambda x: x + 1
f(a for a in df.x)
```
```python
# https://medium.com/asecuritysite-when-bob-met-alice/nothing-up-my-sleeve-creating-a-more-trust-world-with-the-elliptic-curve-pedersen-commitment-7b363d136579
2 ^= 1
```
```python
a = 1
b = 2
a ^= b
a
```
3
```python
b
```
2
```python
```
|
485bb24a4ba64466b8d8c791285643f9a561bf74
| 281,948 |
ipynb
|
Jupyter Notebook
|
e_curve.ipynb
|
grenaad/elliptic_curve
|
4dd3f0338dca1eeb23df531be4fcfa0c4268151d
|
[
"MIT"
] | null | null | null |
e_curve.ipynb
|
grenaad/elliptic_curve
|
4dd3f0338dca1eeb23df531be4fcfa0c4268151d
|
[
"MIT"
] | null | null | null |
e_curve.ipynb
|
grenaad/elliptic_curve
|
4dd3f0338dca1eeb23df531be4fcfa0c4268151d
|
[
"MIT"
] | null | null | null | 237.930802 | 60,256 | 0.900318 | true | 9,149 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.944995 | 0.874077 | 0.825998 |
__label__eng_Latn
| 0.929149 | 0.757404 |
# Four level system
We study population dynamics and effects of optical pumping on spectral line singal for near-resonant cyclic $D_{2}$ line transitions in Cs and Na atoms with the help of simplified four-level system. A classical laser field of frequency $ω_{L}$ is in resonance between highest energy ground state hyperfine component $\left|2\right>$ highest energy hyperfine state component $\left|4\right>$. Intensity of excitation laser field follows a Guassian pulse shape that is characterize by Rabi frequency coupling strengths $Ω_{24}(t)$ and $Ω_{23}(t)$ between states $\left|2\right> ↔ \left|4\right>$ and $\left|2\right> ↔ \left|3\right>$ respectively. States $\left|1\right>$ is a population trapping ground state (dark state) that does not interact with the excitation laser field.
```python
%%svg
4-level-schematics.svg
```
Corresponding $D_{2}$ line transition hyperfine level schematics in Na nd Cs atoms.
```python
%%svg
hyperfine-level-shematics.svg
```
## Atomic and interaction Hamiltonian using rotating wave approximation (RWA)
### Lindblad master equation
The standard approach for deriving the equation of motion for a system interacting with its environment is to expand the scope of the system to include the environment. The combined quantum system is closed and its evolution is governed by the von Neumann equation
\begin{equation}
\begin{aligned}
\dot{ρ}(t) &=-\frac{i}{ℏ}[H_{tot},ρ_{tot}(t)]\\
H_{tot} &= H_{sys} + H_{env} + H_{int}\text{,}
\end{aligned}
\end{equation}
where the total Hamiltonian $H_{tot}$ includes the original system Hamiltonian $H_{sys}$, the Hamiltonian for the environment $H_{env}$ and the interaction Hamiltonian $H_{int}$ between the system and its environment. To obtain the dynamics of system $H_{sys}$, we can perform a partial trace over the environmental degrees of freedom in von Neumann equation. The most general trace-preserving and completely positive form of this evolution is the Lindblad master equation <cite data-cite="communmathphys.48.119">[[Lindblad(1976)]][communmathphys.48.119]</cite>, <cite data-cite="Gardiner_2004">[[Gardiner and Zoller(2004)]][Gardiner_2004]</cite>, <cite data-cite="Walls_2008">[[Walls and Milburn(2008)]][Walls_2008]</cite>
\begin{equation}
\begin{aligned}
\dot{ρ}(t) &=-\frac{i}{ℏ}[H(t),ρ(t)] + \sum_n \frac{1}{2} \left(2 C_n \rho(t) C_n^{\dagger} - ρ(t) C_n^{\dagger} C_n - C_n^{\dagger} C_n ρ(t)\right)\\
H(t) &= H_{sys} + H_{int}\\
C_n &= \sqrt{γ_{n}} A_{n}\text{,}
\end{aligned}
\end{equation}
where $C_n$ are wave function collapse operators, $A_{n}$ are operators through which the environment couples to the system in $H_{int}$ and $γ_{n}$ are the corresponding decay rates.
### Atomic Hamiltonian $H_{sys}$ (RWA)
Let us obtain optical Bloch equation for four-level system. Using dressed state notation given in figure, system Hamiltonian can be written as
\begin{equation}
\begin{aligned}
H_{sys} &= ε_{1} \left|1,n+1\right>\left< 1,n+1\right| + ε_{2} \left|2,n+1\right>\left< 2,n+1\right| + ε_{3} \left|3,n\right>\left< 3,n\right| + ε_{4} \left|4,n\right>\left< 4,n\right|\\
&= \left[\begin{smallmatrix} ε_{1} & 0 & 0 & 0\\0 & ε_{2} & 0 & 0\\0 & 0 & ε_{3} & 0\\0 & 0 & 0 & ε_{4} \end{smallmatrix}\right]\text{.}
\end{aligned}
\end{equation}
And energies of the system Hamiltonain can be written as
\begin{equation}
\begin{aligned}
ε_{1} &= -ℏ ω_{21} &
ε_{2} &= 0 &
ε_{3} &= Δ_{23} = -ω_{43} - Δ &
ε_{4} &= Δ_{24} = - Δ \text{,}
\end{aligned}
\end{equation}
where $ω_{ab} = ΔE_{ab}/ℏ = (E_{a} - E_{b})/ℏ$ and energy level difference in SI unit system using frequency $ν$ and cyclic frequency $ω$ are written as $ΔE = h ν = ℏ ω$.
Corresponding four-level system decay channels are following, $A_{1}$ from state $\left|4,n\right>$ to state $\left|2,n+1\right>$, $A_{2}$ from state $\left|3,n\right>$ to state $\left|2,n+1\right>$, $A_{3}$ from state $\left|3,n\right>$ to state $\left|1,n+1\right>$, such that
\begin{equation}
\begin{aligned}
A_{1} &= \left|2,n+1\right>\left< 4,n\right| = \left[\begin{smallmatrix}0 & 0 & 1 & 0\\0 & 0 & 0 & 0\\0 & 0 & 0 & 0\\0 & 0 & 0 & 0\end{smallmatrix}\right] \\
A_{2} &= \left|2,n+1\right>\left< 3,n\right| = \left[\begin{smallmatrix}0 & 0 & 0 & 0\\0 & 0 & 1 & 0\\0 & 0 & 0 & 0\\0 & 0 & 0 & 0\end{smallmatrix}\right] \\
A_{3} &= \left|1,n+1\right>\left< 3,n\right| = \left[\begin{smallmatrix}0 & 0 & 0 & 0\\0 & 0 & 0 & 1\\0 & 0 & 0 & 0\\0 & 0 & 0 & 0\end{smallmatrix}\right]\text{.}
\end{aligned}
\end{equation}
Corresponding wave function collapse operators are
\begin{equation}
\begin{aligned}
C_{1} &= \sqrt{γ_{1}} A_{1} &
C_{2} &= \sqrt{γ_{2}} A_{2} &
C_{3} &= \sqrt{γ_{3}} A_{3} \\
γ_{1} &= Π_{42} Γ_{4} &
γ_{2} &= Π_{32} Γ_{3} &
γ_{3} &= Π_{31} Γ_{3}\\
Π_{42} &= 1 & & &
Π_{31} &= 1-Π_{32} \text{,}
\end{aligned}
\end{equation}
where $Γ_{4} = Γ_{3} = Γ = 1/τ_{e}$ is decay rate of excited states, $τ_{e}$ is natural lifetime of excited state and $Π_{F_{e} F_{g}}$ is branching ration for transition $\left|F_{e}\right> → \left|F_{g}\right>$.
### Laser-atom interaction Hamiltonian $H_{int}$ using rotating wave approximation (RWA)
The interaction of bound particles with laser light most often originates with the electric-dipole interaction. Therefore, the atom-laser interaction Hamiltonian $H_{int}$ in the dipole approximation is given by interaction energy operator, that is the projection of the electric dipole moment $\vec{d}$ onto the electric field
\begin{equation}
\begin{aligned}
H_{int} &= -\vec{d} · \vec{E}(t)\\
\vec{E}(t) &= \hat{\mathbf{e}} E(t)\cos\left(ω_{L} t - φ\right)\\
E(t) &= E_{0} \exp\left(-2\left(\frac{t}{τ_{tr}}\right)^{2}\right)
\end{aligned}
\end{equation}
and where the atomic dipole operator $\vec{d}$ is given in terms of the atomic electron position $\vec{r}_{e}$ as
\begin{equation}
\vec{d} = - \boldsymbol{e} \vec{r}_{e} \text{,}
\end{equation}
where we denote the fundamental charge by $\boldsymbol{e}$, so that the electron charge is $q = - \boldsymbol{e}$. The dipole transition moment between states $\left|ψ_{a}\right>$ and $\left|ψ_{b}\right>$, projected onto the field unit vector $\hat{\mathbf{e}}$, is
\begin{equation}
d_{ψ_{a}, ψ_{b}} = \left< ψ_{a}\,\middle|\, \vec{d} · \hat{\mathbf{e}}\,\middle|\,ψ_{b}\right> \text{.}
\end{equation}
For linear polarization we can take the unit vector $\hat{\mathbf{e}}$ to be real and write the interaction Hamiltonain between states $\left|ψ_{a}\right>$ and $\left|ψ_{b}\right>$ as <cite data-cite="Shore_2011">[[Shore(2011)]][Shore_2011]</cite>
\begin{equation}
\begin{aligned}
H_{int,ab} &= H_{int,ab} = -d_{ψ_{a}, ψ_{b}} E(t) \cos\left(ω_{L} t - φ\right) ≡ ℏ Ω_{ab}(t) \cos\left(ω_{L}t + φ\right) \text{,}
\end{aligned}
\end{equation}
where we denote the Rabi frequency by $Ω_{ab}(t)$. There are many definitions of Rabi frequency $Ω$ in the literature. The chosen Rabi frequency $Ω_{ab}(t)$ refers to the frequency of population oscillations of resonant two-state system, i.e., when the Rabi frequency remains constant, the populations of resonant two-state system undergo periodic Rabi oscillations at the Rabi frequency $Ω$. The Rabi frequency is defined by
\begin{equation}
Ω_{ab}(t) ≡ -\frac{d_{ψ_{a}, ψ_{b}} E(t)}{ℏ} \text{.}
\end{equation}
In rotating reference frame off-diagonal elements of the atom-laser interaction Hamiltonian $H_{int, ab}$ <cite data-cite="Shore_2011">[[Shore(2011)]][Shore_2011]</cite> are
\begin{equation}
H_{int, ab} = H_{int, ba}^{*} = -d_{ψ_{a}, ψ_{b}} E(t) \cos\left(ω_{L} t - φ\right) e^{-iω_{L}t} = \frac{1}{2} ℏ Ω_{ab}(t) \left(e^{-i φ} + e^{-2i ω_{L}t + iφ}\right)
\end{equation}
Rotating wave approximation (RWA) of atom-laser interaction is performed by disregarding the terms that vary as $2ω_{L}t$ (counter-rotating terms) when added to constant terms
\begin{equation}
\cos\left(ω_{L} t - φ\right) e^{-iω_{L}t} = \frac{1}{2} \left(e^{-i φ} + e^{-2i ω_{L}t + iφ}\right) \xrightarrow{RWA} \frac{1}{2} e^{-i φ}
\end{equation}
Note that the absolute value of the phase $φ$ is not controllable and only when one compares two pulses, both within a coherence time, or pulses affecting two locations, does one need to keep track of phases, therefore, phase $φ$ can be taken as zero at any convenient time
\begin{equation}
\frac{1}{2} e^{-i φ} → \frac{1}{2}\text{.}
\end{equation}
Thus the Rabi frequency can be made real $Ω_{ab}(t)$ such that $Ω_{ab}^{*}(t) = Ω_{ab}(t)$. Thus we obtain the atom-laser interaction Hamiltonain in the rotating wave approximation between state $\left|ψ_{a}\right>$ and $\left|ψ_{b}\right>$ as
\begin{equation}
H_{int, ab} = \frac{1}{2} ℏ Ω_{ab}(t) \text{.}
\end{equation}
And the atom-laser interaction Hamiltonain in the rotating wave approximation is given by
\begin{equation}
H_{int} = \frac{ℏ Ω_{23}(t)}{2}\left|2,n+1\right>\left< 3,n\right| + \frac{ℏ Ω_{23}^{*}(t)}{2}\left|3,n\right>\left< 2,n+1\right| + \frac{ℏ Ω_{24}(t)}{2}\left|2,n+1\right>\left< 4,n\right| + \frac{ℏ Ω_{24}^{*}(t)}{2}\left|4,n\right>\left< 2,n+1\right|
\end{equation}
When simulating the dynamics of the Hamiltonian we are modeling hyperfine transitions between states $\left|F_{g}\right> → \left|F_{e}\right>$ without taking into account Zeeman sublevels, therefore, for linearly polarized excitation laser, effective reduced dipole moment $d_{eff (F_{g} → F_{e})}$ <cite data-cite="Steck_Cs_2010">[[Steck(2010a)]][Steck_Cs_2010]</cite>, <cite data-cite="Steck_Na_2010">[[Steck(2010b)]][Steck_Na_2010]</cite> can be expressed as
\begin{equation}
\begin{aligned}
\left|d_{eff (F_{g} → F_{e})}\right|^{2} &= \left|\left< F_{g}\,\middle|\middle|\, \vec{d} · \hat{\mathbf{e}} \,\middle|\middle|\,F_{e}\right>\right|^{2} = S_{F_{g} F_{e}} \left|\left< J_{g}\,\middle|\middle|\, \vec{d} · \hat{\mathbf{e}}\,\middle|\middle|\,J_{e}\right>\right|^{2} \\
S_{F_{g} F_{e}} &= ( 2 F_{e} + 1)(2 J_{g} + 1 )
\begin{Bmatrix}
J_{g} & J_{e} & 1\\
F_{e} & F_{g} & 1
\end{Bmatrix}^{2}\\
\sum_{F_{e}} S_{F_{g} F_{e}} &= 1 \text{,}
\end{aligned}
\end{equation}
where $S_{F_{g} F_{e}}$ are dimensionless relative hyperfine transition-strength factors of each of the $\left|F_{g}\right> → \left|F_{e}\right>$ transitions. Numerical value of the reduced dipole matrix element $\left< J_{g}\,\middle|\middle|\, \vec{d} · \hat{\mathbf{e}}\,\middle|\middle|\,J_{e}\right>$ can be calculated using excited state lifetime from expression <cite data-cite="Loudon_2000">[[Loudon(2000)]][Loudon_2000]</cite>
\begin{equation}
\frac{1}{τ_{e}} = Γ_{J_{e} J_{g}} = \frac{ω_{0}^{3}}{3 π \mathit{ε}_{0} ℏ c^3} \frac{2 J_{g} + 1}{2 J_{e} + 1} \left|\left< J_{g}\,\middle|\middle|\, \vec{d}\,\middle|\middle|\,J_{e}\right>\right|^{2} \text{.}
\end{equation}
It is convenient to rewrite Rabi frequency $Ω_{ab}(t)$ using reduced Rabi frequency $Ω_{red}(t)$ and relative hyperfine transition-strength factors $S_{F_{g} F_{e}}$, where reduced Raby frequency is defined by
\begin{equation}
Ω_{red}(t) ≡ \frac{ E(t) \left< J_{g}\,\middle|\middle|\, \vec{d}\,\middle|\middle|\,J_{e}\right>}{ℏ} \text{.}
\end{equation}
This allows us to obtain Rabi frequency between states $\left|ψ_{a}\right>$ and $\left|ψ_{b}\right>$ as
\begin{equation}
Ω_{ab}(t) = Ω_{red}(t) \sqrt{S_{F_{a} F_{b}}}
\end{equation}
## Remarks
### Critical Rabi frequencies for reduced Rabi frequency $Ω_{red}$
Critical Rabi frequency $Ω_{red,cr}$ is used to estimate at what reduced Rabi frequency value $≈ 63 \%$ of the population will be trapped in a dark state after laser-atom interaction and effects of optical pumping become pronounced in spectral line signal.
\begin{equation}
\begin{aligned}
Ω_{red,cr} = \sqrt{\frac{4 τ_{e} ω_{43}^{2}}{τ_{tr} \Pi_{31} S_{32} \sqrt{\pi}}}
\end{aligned}
\end{equation}
Critical Rabi frequency $Ω_{red,cr99}$ is used to estimate at what reduced Rabi frequency value $≈ 99 \%$ of the population will be trapped in a dark state after laser-atom interaction, and leading to population depletion in time period smaller than the interaction time $τ_{tr}$.
\begin{equation}
\begin{aligned}
Ω_{red,cr99} = \sqrt{-\ln\left( 1-\frac{99}{100}\right)} Ω_{red,cr} ≈ 2.15 Ω_{red,cr}
\end{aligned}
\end{equation}
### Spectral line singal
Spectral line signal $J(Δ, Ω_{red})$ is given by a number of photons emitted from an atom during transit time through the excitation laser zone, therefore we can express spectral line signal as
\begin{equation}
J(Δ, Ω_{red}) = Γ \int\limits_{-∞}^{∞} (ρ_{4,4}(t) + ρ_{3,3}(t) )\, dt \text{.}
\end{equation}
By using adiabatic aproximation and optical Bloch equations from Lindblad master equation, we can obtain approximate analitic expression of spectral line signal $J(Δ, Ω_{red})$ as
\begin{equation}
\begin{aligned}
J(Δ, Ω_{red}) &= \frac{S_{24} 4 \left(- ω_{43} - Δ \right)^{2} }{S_{23} \left( 4 \left(-Δ \right)^{2} + S_{24} Ω_{red}^{2} + Γ^{2} \right) Π_{31} } \\
&× \left(1- \exp\left( -τ_{tr} \frac{\sqrt{π} Γ Π_{31} S_{23} Ω_{red}^{2}}{8 \left(- ω_{43} - Δ - ½\sqrt{S_{24}} Ω_{red} - ½\sqrt{S_{23}} Ω_{red}\right)^{2}} \right) \right)\text{.}
\end{aligned}
\end{equation}
[communmathphys.48.119]: http://dx.doi.org/10.1007/bf01608499 "G. Lindblad, Commun. Math. Phys. 48, 119 (1976)."
[Gardiner_2004]: http://www.springer.com/gp/book/9783540223016 "C. Gardiner and P. Zoller, Quantum Noise, Springer Series in Synergetics (Springer-Verlag Berlin Heidelberg, 2004)."
[Walls_2008]: http://dx.doi.org/10.1007/978-3-540-28574-8 "D. Walls and G. J. Milburn, Quantum Optics (Springer Science + Business Media, 2008)."
[Shore_2011]: http://dx.doi.org/10.1017/cbo9780511675713 "B. W. Shore, Manipulating Quantum Structures Using Laser Pulses (Cambridge University Press (CUP), 2011)."
[Steck_Cs_2010]: http://steck.us/alkalidata "D. A. Steck, Cesium D Line Data (http://steck.us/alkalidata, 2010)."
[Steck_Na_2010]: http://steck.us/alkalidata "D. A. Steck, Sodium D Line Data (http://steck.us/alkalidata, 2010)."
[Loudon_2000]: http://global.oup.com/academic/product/the-quantum-theory-of-light-9780198501763 "R. Loudon, The Quantum Theory of Light Third Edition (Oxford University Press, 2000)"
<!--bibtex
@article{communmathphys.48.119,
doi = {10.1007/bf01608499},
url = {http://dx.doi.org/10.1007/bf01608499},
year = 1976,
month = {jun},
publisher = {Springer Science $\mathplus$ Business Media},
volume = {48},
number = {2},
pages = {119--130},
author = {G. Lindblad},
title = {On the generators of quantum dynamical semigroups},
journal = {Commun. Math. Phys.}
}
@book{Gardiner_2004,
isbn = {978-3-540-22301-6},
url = {http://www.springer.com/gp/book/9783540223016},
year = 2004,
publisher = {Springer-Verlag Berlin Heidelberg},
author = {Gardiner, Crispin and Zoller, Peter},
title = {Quantum Noise},
series = {Springer Series in Synergetics},
}
@book{Walls_2008,
doi = {10.1007/978-3-540-28574-8},
url = {http://dx.doi.org/10.1007/978-3-540-28574-8},
year = 2008,
publisher = {Springer Science $\mathplus$ Business Media},
author = {D.F. Walls and Gerard J. Milburn},
title = {Quantum Optics}
}
@book{Shore_2011,
doi = {10.1017/cbo9780511675713},
url = {http://dx.doi.org/10.1017/cbo9780511675713},
year = 2011,
publisher = {Cambridge University Press ({CUP})},
author = {Bruce W. Shore},
title = {Manipulating Quantum Structures Using Laser Pulses}
}
@book{Steck_Cs_2010,
url = {http://steck.us/alkalidata},
year = 2010,
publisher = {http://steck.us/alkalidata},
author = {Daniel A. Steck},
title = {Cesium D Line Data},
}
@book{Steck_Na_2010,
url = {http://steck.us/alkalidata},
year = 2010,
publisher = {http://steck.us/alkalidata},
author = {Daniel A. Steck},
title = {Sodium D Line Data},
}
@book{Loudon_2000,
url = {http://global.oup.com/academic/product/the-quantum-theory-of-light-9780198501763},
year = 2000,
publisher = {Oxford University Press},
author = {Rodney Loudon},
title = {The Quantum Theory of Light Third Edition},
}
-->
# Resulsts
## Compare and review $J(Δ,Ω_{red})$ evaluated numerically from density matrix with approximate analytic expression.
### Compare spectral singnal $J(Ω_{red})$ and review corresponding population dinamics in case of Na atoms
```python
las_plot_jored(na_jored_expcase_list_1)
plot4lme(expcase_list[0], expcase_list[0].result)
plot4lme(expcase_list[1], expcase_list[1].result)
```
### Compare spectral singnal $J(Ω_{red})$ and review corresponding population dinamics in case of Cs atoms
```python
las_plot_jored(cs_jored_expcase_list_1)
plot4lme(expcase_list[2], expcase_list[2].result)
plot4lme(expcase_list[3], expcase_list[3].result)
```
### Compare spectral singnal $J(Δ)$ of Na and Cs atoms
```python
las_plot_jdelta(na_jdelta_expcase_list_1)
las_plot_jdelta(na_jdelta_expcase_list_2)
las_plot_jdelta(cs_jdelta_expcase_list_1)
las_plot_jdelta(cs_jdelta_expcase_list_2)
```
# Simulating the dynamics of 4-level system with [QuTiP](http://qutip.org/)
Please note that when simulating the dynamics of the total Hamiltonian:
1. We set the reduced Plank constant value from QuTiP backend, i.e., it is set to
\begin{equation}
ℏ = 1
\end{equation}
2. For laser-atom interaction Hamiltonian we are using a precomputed product of all of the coefficients before time-depenent exponent function
\begin{equation}
hcf_{ab} = \frac{ℏ Ω_{red} \sqrt{S_{F_{a} F_{b}}} }{2}
\end{equation}
## Simulating the dynamics for following experimental conditions:
1. Compute population dinamics for $Ω_{red} = Ω_{red,cr}$ and $Ω_{red} = Ω_{red,cr99}$.
2. Compute $J(Ω_{red})$ for $Ω_{red}$ from $0.25$ MHz to $50$ MHz.
3. Compute $J(Δ)$ for $Δ$ from $-40$ MHz to $40$ MHz.
4. Compare $J(Δ,Ω_{red})$ evaluated numerically from density matrix with approximate analytic expression.
## Setup imports
```python
%pylab inline
%config InlineBackend.figure_formats = {'svg',}
import seaborn as sns
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
# rc('axes', prop_cycle=cycler('color', flatui))
# sns.palplot(sns.color_palette(flatui))
sns.set(palette=flatui)
rc('svg', fonttype='none')
from qutip import *
from fractions import Fraction
from IPython.display import Markdown
```
Populating the interactive namespace from numpy and matplotlib
## Define functions for computing 4-level RWA Hamiltonian
```python
def solve4lme(args):
'''
Solve RWA Hamiltonian for 4-level system.
RWA Hamiltonian states |1,n+1⟩, |2,n+1⟩, |3,n⟩, |4,n⟩.
RWA Hamiltonian energies ε1=-w21, ε2=0, ε3=-w43-Δ, ε4=-Δ.
A classical laser field is in resonance between states |2⟩↔|4⟩.
Excitation schematics (bare atomic states picture)
# |4⟩
# -----------E4
# ↓Δ
# ----------- |3⟩
# / ↓Γ*Π42 ---------------E3
# Ω24 ↕ ↓ / ↓Π32*Γ ↓(1-Π32)*Γ
# / ↓ Ω23↕ ↓ ↓
# -----------------------E2 ↓
# |2⟩ -----------E1
# |1⟩
Usage::
>>> args = {'delta': 0, # detuning Δ from excited state |4⟩ in s^-1
'gamma': 1, # excited state decay rate Γ
'tint': 5920, # laser-atom interaction time, np.float32
'w21': 1756, # hyperfile splitting 1⇥2
'w43': 48, # hyperfile splitting 3⇥4
'hcf23': 3, # interaction coeff 2↔3, np.float32
'hcf24': 5, # interaction coeff 2↔4, np.float32
'cbr1': 1, # branching ratio Π42 for |2⟩→|4⟩
'cbr2': 7/12, # branching ratio Π32 for |3⟩→|2⟩
'cbr3': 5/12, # branching ratio Π31=(1-Π32) for |3⟩→|1⟩
'nsteps': 50000} # Max. number of internal ode steps/call.
>>> results = solve4lme(args)
:param args: A dictionary of the form ``(parameter, value)``.
:return: QuTiP Result object with mesolve data.
:rtype: A :class:`qutip.solver.Result`
'''
# Define atomic states |1>, |2>, |3>, |4>
st1, st2, st3, st4 = map(lambda st: basis(4, st), range(4))
# Operators for the diagonal elements of atomic Hamiltonian
# outer product of |1><1|, |2><2|, |3><3|, |4><4|
sig11, sig22, sig33, sig44 = map(ket2dm, [st1, st2, st3, st4])
# Operators for the off-diagonal elements of Hamiltonian
# Interaction Hamiltonian |2>↔|3> and |2>↔|4>
# Decay modes (collapse operators) for |4>→|2>, |3>→|2>, |3>→|1>
sig24 = st2 * st4.dag() # |2><4|
sig23 = st2 * st3.dag() # |2><3|
sig13 = st1 * st3.dag() # |1><3|
# Define time independent collapse operators
# collapse operator for 4->2, sqrt(Π42*Γ) * |2><4|
C1 = np.sqrt(args['cbr1'] * args['gamma']) * sig24
# collapse operator for 3->2, sqrt(Π32*Γ) * |2><3|
C2 = np.sqrt(args['cbr2'] * args['gamma']) * sig23
# collapse operator for 3->1, sqrt(Π31*Γ) * |1><3|
C3 = np.sqrt(args['cbr3'] * args['gamma']) * sig13
# Define list of collapse operators
c_op_list = [C1, C2, C3]
# Define time vector
t = linspace(-args['tint']*2, args['tint']*2, 101)
# Set up the time independent system Hamiltonians
# ε1=-w21, ε2 = 0, ε3=-w43-Δ, ε4=-Δ.
# state1 energy level position ε1=-w21
HS1 = -args['w21'] * sig11
# state3 energy level position ε3=-w43-Δ
HS3 = (-args['w43'] - args['delta']) * sig33
# state4 energy level position ε4=-Δ
HS4 = -args['delta'] * sig44
# Set up operators for the time varying Hamiltonians
# for laser-atom interaction Ω23(t) and Ω24(t)
HI1 = sig23.dag() + sig23
HI2 = sig24.dag() + sig24
# Set up the time varying RWA Hamiltonian with time dependant
# coefficients based on QuTip Cython string functions format
HRWA = [HS1, HS3, HS4,
[HI1, 'hcf23 * exp(-2*(t / tint) ** 2)'],
[HI2, 'hcf24 * exp(-2*(t / tint) ** 2)']]
# Define initial state as state |2>
psi0 = st2
# Define ODE solver options
opts=Odeoptions()
opts.nsteps = args['nsteps']
# Workaround for QuTip version 3.1.0 error with Cython string functions
# Convert Cython string function variable values to numpy.float32
# # error: two or more data types in declaration specifiers
# # typedef npy_double _Complex __pyx_t_npy_double_complex;
args['hcf23'] = np.float32(args['hcf23'])
args['hcf24'] = np.float32(args['hcf24'])
# Solve RWA Hamiltonian
output = mesolve(HRWA, psi0, t, c_op_list, [sig11, sig22, sig33, sig44],
args=args, options=opts, progress_bar=True)
# return mesolve
return output
def plot4lme(atomdata, results=None, saveplot=None):
'''Plot QuTiP Result object.'''
if results != None:
rs = results
else:
rs = qload(atomdata.filename)
rho11, rho22, rho33, rho44 = rs.expect
timescale = 1e6
t = rs.times * timescale
# Define pump strength as a function of time for plotting
wp = lambda t, tw, A: A * np.exp(-2*(t / tw) ** 2)
# Plot the results
fig = figure()
subplot(211)
if atomdata != None:
# colors #a6cee3 #2078b4 #afdd8a #35a12e #fa9897 #e31a1c
detuning = atomdata.delta
detuning_MHz = detuning / (timescale * 2*pi)
detuning_MHz_str = str(round(detuning_MHz,2))
fig.suptitle(atomdata.symbol + ', $τ_{int} = %s$ (s), ' % str(atomdata.tint) \
+ '$Δ = %s$ (MHz)' % detuning_MHz_str)
ored_sat_24_MHz = atomdata.osat / (timescale * 2 * pi * np.sqrt(atomdata.st24))
ored_sat_24_MHz_str = str(round(ored_sat_24_MHz,2))
axhline(y=ored_sat_24_MHz, color='#a6cee3',
label='$Ω_{red,sat,24} ≈ %s$ MHz' % ored_sat_24_MHz_str)
ored_cr_MHz = atomdata.__class__(atomdata.tint).ocr \
/ (timescale * 2 * pi)
ored_cr_MHz_str = str(round(ored_cr_MHz,2))
axhline(y=ored_cr_MHz, color='#afdd8a',
label='$Ω_{red,cr} ≈ %s$ MHz' % ored_cr_MHz_str)
ored_cr_pop_99_MHz = atomdata.__class__(atomdata.tint, pop=99).ocr \
/ (timescale * 2 * pi)
ored_cr_pop_99_MHz_str = str(round(ored_cr_pop_99_MHz,2))
axhline(y=ored_cr_pop_99_MHz, color='#fa9897',
label='$Ω_{red,cr}|_{pop=99} ≈ %s$ MHz' % ored_cr_pop_99_MHz_str)
plot(t, wp(t, atomdata.tint * timescale,
atomdata.hcf23 * 2 / (timescale * 2*pi)), '-', label='$Ω_{23}$')
plot(t, wp(t, atomdata.tint * timescale,
atomdata.hcf24 * 2 / (timescale * 2*pi)), '-', label='$Ω_{24}$')
ylabel('Coupling strength $Ω$ (MHz)')
lg_1 = legend()
lg_1.draw_frame(True)
subplot(212)
plot(t, rho11, '-', label='$ρ_{1,1}$')
plot(t, rho22, '-', label='$ρ_{2,2}$')
plot(t, rho33, '-', label='$ρ_{3,3}$')
plot(t, rho44, '-', label='$ρ_{4,4}$')
ylabel('Population $ρ$ (rel. units)')
xlabel('Time ($\mu s$)')
lg_2 = legend()
lg_2.draw_frame(True)
# check if filename for exporting figure is provided
if saveplot != None:
savefig(saveplot)
show()
class AtomData4Levels:
'''Setup parameters for 4-level system RWA Hamiltonian
Usage::
>>> class Atom4levelD2line(AtomData4Levels):
gamma = 1 # excited state decay rate Γ in s^-1
taue = 1 # exctied state lifetime in s
w21 = 1756 # hyperfile splitting 1⇤⇥2 in s^-1
w43 = 48 # hyperfile splitting 3⇤⇥4 in s^-1
cbr1 = 1 # branching ratio Π42 for |2⟩→|4⟩
cbr2 = 1/2 # branching ratio Π32 for |3⟩→|2⟩
cbr3 = 1/2 # branching ratio Π31=(1-Π32) for |3⟩→|1⟩
st23 = 1 # rel. HF trans. strength factor
st24 = 1 # rel. HF trans. strength factor
>>> tint = 5920 # laser-atom interaction time in s
>>> csd2 = Atom4levelD2line(tint, delta)
:param tint : laser-atom interaction time in s
:param delta: detuning Δ from excited state |4⟩ in s^-1
:param gamma: excited state decay rate Γ in s^-1
:param taue : exctied state lifetime in s
:param w21 : hyperfile splitting 1⇤⇥2 in s^-1
:param w43 : hyperfile splitting 3⇤⇥4 in s^-1
:param cbr1 : branching ratio Π42 for |2⟩→|4⟩
:param cbr2 : branching ratio Π32 for |3⟩→|2⟩
:param cbr3 : branching ratio Π31=(1-Π32) for |3⟩→|1⟩
:param st23 : HF transition strength factors divided by
:param st24 : square of reduced dipole matrix element
:return: Parameters for 4-level system RWA Hamiltonian.
:rtype: A :class:`AtomData4Levels`
'''
listargs = ('delta', 'tint', 'gamma', 'taue', 'w21', 'w43', 'hcf23', 'hcf24',
'cbr1', 'cbr2', 'cbr3', 'nsteps')
def __init__(self, tint, delta=0, ored=None, pop=None, nsteps=50000):
self.tint = tint
self.delta = delta
self.ored = ored
self.pop = pop
self.nsteps = nsteps
self.update()
def update(self):
'''Update parameters'''
self.osat = self.omega_saturation(self.gamma)
if self.pop == None:
self.pop = (1-np.exp(-1))*100
self.pop_coef = 1
else:
self.pop_coef = - np.log(1-self.pop/100)
self.ocr = self.omega_critical(self.tint, self.taue, self.w43-self.delta,
self.st23, self.cbr3, self.pop_coef)
if self.ored == None:
self.ored = self.ocr
self.hcf23 = self.hcf(self.ored, self.st23)
self.hcf24 = self.hcf(self.ored, self.st24)
self.argsaslist = (self.delta, self.tint, self.gamma, self.taue, self.w21,
self.w43, self.hcf23, self.hcf24, self.cbr1, self.cbr2,
self.cbr3, self.nsteps)
self.args = self.gendict(self.listargs, self.argsaslist)
self.filename = self.args_to_filename(self.listargs, **self.args)
def args_to_filename(self, listargs, **kwargs):
'''Return filename from list of args'''
return ''.join(list(map(lambda i: i + ':' + str(kwargs[i]), listargs)))
def omega_saturation(self, gamma):
'''Return aturation Rabi frequency value for reduced Rabi frequency'''
return gamma / np.sqrt(2)
def omega_critical(self, ti, te, w43, s23, p3, pop_coef):
'''Return critical Rabi frequency value for reduced Rabi frequency'''
return np.sqrt(4 * pop_coef * te * (w43 ** 2) / (ti * p3 * s23))
def hcf(self, ored, strength):
'''Return interaction Hamiltonian coeff'''
return ored * np.sqrt(strength) / 2
def gendict(self, listargs, args):
'''Return Cesium D2 Line Data as dictionary'''
return dict(zip(listargs, args))
class CsD2data:
'''Store cesium D2 line data.
Data obtained from Daniel A. Steck, Cesium D Line Data (2010).
Available online at http://steck.us/alkalidata
'''
symbol = 'Cs'
nspin = Fraction(7,2) # nuclear spin
jg = Fraction(1,2) # ground state J
je = Fraction(3,2) # excited state J
# relative hyperfine transition strength factors [D. Steck, Alkali D Line Data]
# S_{F_{g} F_{e}}
# F_{g} = 3
s32, s33, s34 = Fraction(20,56), Fraction(21,56), Fraction(15,56)
# F_{g} = 4
s43, s44, s45 = Fraction(7,72), Fraction(21,72), Fraction(44,72)
skeys = ((3,2), (3,3), (3,4), (4,3), (4,4), (4,5))
svals = (s32, s33, s34, s43, s44, s45)
sge = dict(zip(skeys, svals))
gamma = 32.889e+6 # decay rate Γ 32.889(84)e+6 in s^-1
taue = 30.405e-9 # lifetime 30.405(77) in s
wg43 = 2*pi * 9192.631770e+6 # F_{g} hfs 3⇤⇥4 9192.631770e+6 in s^-1
we54 = 2*pi * 251.0916e+6 # F_{e} hfs 4⇤⇥5 251.0916(20)e+6 in s^-1
we43 = 2*pi * 201.2871e+6 # F_{e} hfs 3⇤⇥4 201.2871(11)e+6 in s^-1
we32 = 2*pi * 151.2247e+6 # F_{e} hfs 2⇤⇥3 151.2247(16)e+6 in s^-1
cbr54 = 1 # branching ratio Π54 for |F_{5}⟩→|F_{4}⟩
cbr43 = Fraction(5,12) # branching ratio Π43 for |F_{4}⟩→|F_{3}⟩
cbr44 = Fraction(7,12) # branching ratio Π44 for |F_{4}⟩→|F_{4}⟩
cbr33 = Fraction(3,4) # branching ratio Π33 for |F_{3}⟩→|F_{3}⟩
cbr34 = Fraction(1,4) # branching ratio Π34 for |F_{3}⟩→|F_{4}⟩
cbr23 = 1 # branching ratio Π23 for |F_{2}⟩→|F_{3}⟩
class Cs4L(AtomData4Levels,CsD2data):
'''Store cesium D2 line data parameters for 4-level system RWA Hamiltonian'''
w21 = np.float32(CsD2data.wg43)
w43 = np.float32(CsD2data.we54)
cbr1 = np.float32(CsD2data.cbr54)
cbr2 = np.float32(CsD2data.cbr44)
cbr3 = np.float32(CsD2data.cbr43)
st24 = np.float32(CsD2data.sge[4,5])
st23 = np.float32(CsD2data.sge[4,4])
class NaD2data:
'''Store sodium D2 line data
Data obtained from Daniel A. Steck, Sodium D Line Data (2010).
Available online at http://steck.us/alkalidata
'''
symbol = 'Na'
nspin = Fraction(3,2) # nuclear spin
jg = Fraction(1,2) # ground state J
je = Fraction(3,2) # excited state J
# relative hyperfine transition strength factors [D. Steck, Alkali D Line Data]
# S_{F_{g} F_{e}}
# F_{g} = 1
s10 = Fraction(2,12)
s11 = Fraction(5,12)
s12 = Fraction(5,12)
# F_{g} = 2
s21 = Fraction(1,20)
s22 = Fraction(5,20)
s23 = Fraction(14,20)
skeys = ((1,0), (1,1), (1,2), (2,1), (2,2), (2,3))
svals = (s10, s11, s12, s21, s22, s23)
sge = dict(zip(skeys, svals))
gamma = 61.542e+6 # decay rate Γ 61.542(29)e+6 in s^-1
taue = 16.2492e-9 # lifetime 16.2492(77) in s
wg21 = 2*pi * 1771.6261288e+6 # F_{g} hfs 1⇤⇥2 1771.6261288(10)e+6 in s^-1
we32 = 2*pi * 58.326e+6 # F_{e} hfs 2⇤⇥3 58.326(43)e+6 in s^-1
we21 = 2*pi * 34.344e+6 # F_{e} hfs 1⇤⇥2 34.344(49)e+6 in s^-1
we10 = 2*pi * 15.810e+6 # F_{e} hfs 0⇤⇥1 15.810(80)e+6 in s^-1
cbr32 = 1 # branching ratio Π32 for |F_{3}⟩→|F_{2}⟩
cbr21 = Fraction(1,2) # branching ratio Π21 for |F_{2}⟩→|F_{1}⟩
cbr22 = Fraction(1,2) # branching ratio Π22 for |F_{2}⟩→|F_{2}⟩
cbr11 = Fraction(5,6) # branching ratio Π11 for |F_{1}⟩→|F_{1}⟩
cbr12 = Fraction(1,6) # branching ratio Π12 for |F_{1}⟩→|F_{2}⟩
cbr01 = 1 # branching ratio Π01 for |F_{0}⟩→|F_{1}⟩
class Na4L(AtomData4Levels,NaD2data):
'''Store sodium D2 line data parameters for 4-level system RWA Hamiltonian'''
w21 = np.float32(NaD2data.wg21);
w43 = np.float32(NaD2data.we32)
cbr1 = np.float32(NaD2data.cbr32)
cbr2 = np.float32(NaD2data.cbr22)
cbr3 = np.float32(NaD2data.cbr21)
st24 = np.float32(NaD2data.sge[2,3])
st23 = np.float32(NaD2data.sge[2,2])
def lorentz_norm(delta, deltapeak, gamma):
'''Return Lorentzian profile of natural linewidth norlalized to 1 at peak.'''
g, w, w0 = gamma, delta, deltapeak
return g ** 2 / (4 * ((w - w0)**2 + (g ** 2)/4))
def las_analytic(delta, omegared, atom):
'''Return laser absorption signal J using approximate analytic expression.'''
delta23 = - atom.w43 - delta
delta24 = - delta
delta23eff = delta23 - omegared * (np.sqrt(atom.st23) + np.sqrt(atom.st24)) / 2
delta24effsq = (delta24 ** 2) + (atom.st24 * omegared ** 2)/4
tau_pump = (atom.taue * 8 * (delta23eff ** 2)) \
/ (np.sqrt(pi) * atom.cbr3 * atom.st23 * omegared ** 2)
J = (atom.st24 * 4 * (delta23 ** 2) * (1 - np.exp(-atom.tint/tau_pump))) \
/ (atom.st23 * (4 * (delta24effsq) + (atom.gamma) ** 2) * atom.cbr3)
return J
def las_numeric_single(results, gamma):
'''Return laser absorption signal J by integrating populations
rho33, rho44 from QuTiP result object using Trapezoidal rule
and multiplying it by decay rate Γ.'''
rho11, rho22, rho33, rho44 = results.expect
times = results.times
return (numpy.trapz(rho33, times) + numpy.trapz(rho44, times)) * gamma
def solve4lme_list(atomlist, precomputed=True):
'''Compute and add RWA Hamiltonian QuTiP result object
to AtomData4Levels object in atomlist.'''
for atom in atomlist:
if precomputed:
atom.result = qload(atom.filename)
else:
atom.result = solve4lme(atom.args)
qsave(atom.result, atom.filename)
atom.jnumval = las_numeric_single(atom.result, atom.gamma)
def las_plot_jored(jored_expcase_list):
'''Plot J(Ω_{red}) for fixed Δ'''
# colors #a6cee3 #2078b4 #afdd8a #35a12e #fa9897 #e31a1c
atom = jored_expcase_list[0]
delta = atom.delta
delta_MHz = delta * AFtoMHz
delta_MHz_str = str(round(delta_MHz,2))
jored_list = np.array(list(map(lambda atom: atom.ored, jored_expcase_list)))
jored_list_vals = np.array(list(map(lambda atom: atom.jnumval, jored_expcase_list)))
jored_num_list = np.linspace(jored_list[0], jored_list[-1], 201)
ored_sat_24 = atom.osat / np.sqrt(atom.st24)
ored_sat_24_MHz = ored_sat_24 * AFtoMHz
ored_sat_24_MHz_str = str(round(ored_sat_24_MHz,2))
orec_cr = atom.__class__(atom.tint).ocr
ored_cr_MHz = orec_cr * AFtoMHz
ored_cr_MHz_str = str(round(float(ored_cr_MHz),2))
orec_cr_pop_99 = atom.__class__(atom.tint,pop=99).ocr
ored_cr_pop_99_MHz = orec_cr_pop_99 * AFtoMHz
ored_cr_pop_99_MHz_str = str(round(float(ored_cr_pop_99_MHz),2))
figure()
axvline(x=ored_sat_24_MHz, color='#a6cee3',
label='$Ω_{red,sat,24} ≈$ %s MHz' % ored_sat_24_MHz_str)
axvline(x=ored_cr_MHz, color='#afdd8a',
label='$Ω_{red,cr} ≈$ %s MHz' % ored_cr_MHz_str)
axvline(x=ored_cr_pop_99_MHz, color='#fa9897',
label='$Ω_{red,cr}|_{pop=0.01} ≈$ %s MHz' % ored_cr_pop_99_MHz_str)
plot(jored_list * AFtoMHz,jored_list_vals, '-', label='numeric calc.')
plot(jored_num_list * AFtoMHz, las_analytic(delta, jored_num_list, atom),
'--', label='analytic approx.')
ylabel('$J(Ω_{red})$ (arb. units)')
xlabel('$Ω_{red}$ (MHz)')
title('%s, $Δ$ = %s (MHz), $τ_{int}$ = %s (s)' \
% (atom.symbol, delta_MHz_str, str(atom.tint)))
lg = legend()
lg.draw_frame(True)
show()
def las_plot_jdelta(jdelta_expcase_list):
'''Plot J(Δ) for fixed Ω_{red}'''
# colors #a6cee3 #2078b4 #afdd8a #35a12e #fa9897 #e31a1c
atom = jdelta_expcase_list[0]
ored = atom.ored
ored_MHz = ored * AFtoMHz
ored_MHz_str = str(round(ored_MHz,2))
gamma = atom.gamma
gamma_MHz = gamma * AFtoMHz
gamma_MHz_str = str(round(gamma_MHz,2))
jdelta_list = np.array(list(map(lambda atom: atom.delta, jdelta_expcase_list)))
jdelta_list_vals = np.array(list(map(lambda atom: atom.jnumval, jdelta_expcase_list)))
jdelta_num_list = np.linspace(jdelta_list[0], jdelta_list[-1], 201)
figure()
axvline(x=-gamma_MHz/2, color='#afdd8a',
label='±$Γ/2$, $Γ≈$ %s MHz (FWHM)' % gamma_MHz_str)
axvline(x=gamma_MHz/2, color='#afdd8a')
plot(jdelta_num_list * AFtoMHz,
lorentz_norm(jdelta_num_list, 0, gamma) * jdelta_list_vals.max(),
'-', label='natural linewidth')
plot(jdelta_list * AFtoMHz, jdelta_list_vals, '-', label='numeric calc.')
plot(jdelta_num_list * AFtoMHz, las_analytic(jdelta_num_list, ored, atom),
'--', label='analytic approx.')
ylabel('$J(Δ)$ (arb. units)')
xlabel('$Δ$ (MHz)')
title('%s, $Ω_{red}$ = %s (MHz), $τ_{int}$ = %s (s)' \
% (atom.symbol, ored_MHz_str, str(atom.tint)))
lg = legend()
lg.draw_frame(True)
show()
# converting from/to frequency in MHz to/from angular frequency s^{-1}
MHztoAF = 2 * pi * 1e+6
AFtoMHz = 1/(MHztoAF)
```
## Laser-atom interaction time 0.0002 s
```python
%%capture
expcase_list = Na4L(0.0002), Na4L(0.0002, pop=99), Cs4L(0.0002), Cs4L(0.0002, pop=99)
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(expcase_list, precomputed=True)
```
```python
%%capture
# Calculate numerical laser absorption signal J(Ω_{red})|Δ=0
# laser-atom interaction time 0.0002 s,
# Ω_{red} from 0.5 MHz to 50 MHz with vairable step size
na_jored_list = np.linspace(0.25,2.25, 9)
na_jored_list = np.concatenate((na_jored_list, np.linspace(2.5,14.5, 25)), axis=0)
na_jored_list = np.concatenate((na_jored_list, np.linspace(15,50, 15)), axis=0)
na_jored_MHz_list_1 = na_jored_list
na_jored_expcase_list_1 = list(map(lambda omegaMHz:
Na4L(0.0002, ored = omegaMHz * MHztoAF),
na_jored_MHz_list_1))
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(na_jored_expcase_list_1, precomputed=True)
# Calculate numerical laser absorption signal J(Ω_{red})|Δ=0
# laser-atom interaction time 0.0002 s,
# Ω_{red} from 0.5 MHz to 50 MHz with vairable step size
cs_jored_list = np.linspace(0.25,2.25, 9)
cs_jored_list = np.concatenate((cs_jored_list, np.linspace(2.5,14.5, 13)), axis=0)
cs_jored_list = np.concatenate((cs_jored_list, np.linspace(15,50, 15)), axis=0)
cs_jored_MHz_list_1 = cs_jored_list
cs_jored_expcase_list_1 = list(map(lambda omegaMHz:
Cs4L(0.0002, ored = omegaMHz * MHztoAF),
cs_jored_MHz_list_1))
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(cs_jored_expcase_list_1, precomputed=True)
# Calculate numerical laser absorption signal J(Δ)|Ω_{red}=Ω_{red,cr}|Δ=0
# laser-atom interaction time 0.0002 s,
# Δ from -40 MHz to 40 MHz with vairable step size
na_jdelta_list = np.linspace(-40,-5, 15)
na_jdelta_list = np.concatenate((na_jdelta_list, np.linspace(-4,-1, 4)), axis=0)
na_jdelta_list = np.concatenate((na_jdelta_list, np.linspace(-0.5,0.5, 5)), axis=0)
na_jdelta_list = np.concatenate((na_jdelta_list, np.linspace(1,4, 4)), axis=0)
na_jdelta_list = np.concatenate((na_jdelta_list, np.linspace(5,40, 15)), axis=0)
na_jdelta_MHz_list_1 = na_jdelta_list
na_jdelta_expcase_list_1 = list(map(lambda deltaMHz:
Na4L(0.0002, delta = deltaMHz * MHztoAF,
ored = Na4L(0.0002).ocr),
na_jdelta_MHz_list_1))
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(na_jdelta_expcase_list_1, precomputed=True)
# Calculate numerical laser absorption signal J(Δ)|Ω_{red}=Ω_{red,cr}|Δ=0
# laser-atom interaction time 0.0002 s,
# Δ from -40 MHz to 40 MHz with vairable step size
na_jdelta_list_2 = np.linspace(-40,-5, 15)
na_jdelta_list_2 = np.concatenate((na_jdelta_list_2, np.linspace(-4,-1, 4)), axis=0)
na_jdelta_list_2 = np.concatenate((na_jdelta_list_2, np.linspace(-0.5,0.5, 5)), axis=0)
na_jdelta_list_2 = np.concatenate((na_jdelta_list_2, np.linspace(1,4, 4)), axis=0)
na_jdelta_list_2 = np.concatenate((na_jdelta_list_2, np.linspace(5,40, 15)), axis=0)
na_jdelta_MHz_list_2 = na_jdelta_list_2
na_jdelta_expcase_list_2 = list(map(lambda deltaMHz:
Na4L(0.0002, delta = deltaMHz * MHztoAF,
ored = Na4L(0.0002, pop=99).ocr),
na_jdelta_MHz_list_2))
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(na_jdelta_expcase_list_2, precomputed=True)
# Calculate numerical laser absorption signal J(Δ)|Ω_{red}=Ω_{red,cr}|Δ=0
# laser-atom interaction time 0.0002 s,
# Δ from -40 MHz to 40 MHz with vairable step size
cs_jdelta_list = np.linspace(-40,-5, 15)
cs_jdelta_list = np.concatenate((cs_jdelta_list, np.linspace(-4,-1, 4)), axis=0)
cs_jdelta_list = np.concatenate((cs_jdelta_list, np.linspace(-0.5,0.5, 5)), axis=0)
cs_jdelta_list = np.concatenate((cs_jdelta_list, np.linspace(1,4, 4)), axis=0)
cs_jdelta_list = np.concatenate((cs_jdelta_list, np.linspace(5,40, 15)), axis=0)
cs_jdelta_MHz_list_1 = cs_jdelta_list
cs_jdelta_expcase_list_1 = list(map(lambda deltaMHz:
Cs4L(0.0002, delta = deltaMHz * MHztoAF,
ored = Cs4L(0.0002).ocr,
nsteps=100000),
cs_jdelta_MHz_list_1))
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(cs_jdelta_expcase_list_1, precomputed=True)
# Calculate numerical laser absorption signal J(Δ)|Ω_{red}=Ω_{red,cr}|Δ=0
# laser-atom interaction time 0.0002 s,
# Δ from -40 MHz to 40 MHz with vairable step size
cs_jdelta_list_2 = np.linspace(-40,-5, 15)
cs_jdelta_list_2 = np.concatenate((cs_jdelta_list_2, np.linspace(-4,-1, 4)), axis=0)
cs_jdelta_list_2 = np.concatenate((cs_jdelta_list_2, np.linspace(-0.5,0.5, 5)), axis=0)
cs_jdelta_list_2 = np.concatenate((cs_jdelta_list_2, np.linspace(1,4, 4)), axis=0)
cs_jdelta_list_2 = np.concatenate((cs_jdelta_list_2, np.linspace(5,40, 15)), axis=0)
cs_jdelta_MHz_list_2 = na_jdelta_list_2
cs_jdelta_expcase_list_2 = list(map(lambda deltaMHz:
Cs4L(0.0002, delta = deltaMHz * MHztoAF,
ored = Cs4L(0.0002, pop=99).ocr,
nsteps=100000),
cs_jdelta_MHz_list_2))
# set precomputed to False to avoid loading precomputed and saved qutip results
solve4lme_list(cs_jdelta_expcase_list_2, precomputed=True)
```
\begin{thebibliography}{7}
\bibitem[1]{communmathphys.48.119}
G.~Lindblad, \href{http://dx.doi.org/10.1007/bf01608499}{Commun. Math. Phys. \textbf{48}, 119 (1976)}
\bibitem[2]{Gardiner_2004}
C.~Gardiner and P.~Zoller, \href{http://www.springer.com/gp/book/9783540223016}{\emph{Quantum Noise}}, Springer Series in Synergetics (Springer-Verlag Berlin Heidelberg, 2004)
\bibitem[3]{Walls_2008}
D.~Walls and G.~J. Milburn, \href{http://dx.doi.org/10.1007/978-3-540-28574-8}{\emph{Quantum Optics}} (Springer Science $\mathplus$ Business Media, 2008)
\bibitem[4]{Shore_2011}
B.~W. Shore, \href{http://dx.doi.org/10.1017/cbo9780511675713}{\emph{Manipulating Quantum Structures Using Laser Pulses}} (Cambridge University Press, 2011)
\bibitem[5]{Steck_Cs_2010}
D.~A. Steck, \href{http://steck.us/alkalidata}{\emph{Cesium D Line Data} (http://steck.us/alkalidata, 2010)}
\bibitem[6]{Steck_Na_2010}
D.~A. Steck, \href{http://steck.us/alkalidata}{\emph{Sodium D Line Data} (http://steck.us/alkalidata, 2010)}
\bibitem[7]{Loudon_2000}
R.~Loudon, \href{http://global.oup.com/academic/product/the-quantum-theory-of-light-9780198501763}{\emph {The Quantum Theory of Light Third Edition}} (Oxford University Press, 2000)
\end{thebibliography}
|
17da0c1251a3001c1e1cfa9320e232bfe34b2bb9
| 656,245 |
ipynb
|
Jupyter Notebook
|
notebooks/4-level-system/Hyperfine transition dynamics of D2 line using 4-level system Hamiltonian and QuTiP.ipynb
|
bruvelis/thesis
|
515cf710e4e8a898a0e15b33b37e1e054284bcd5
|
[
"CC0-1.0"
] | null | null | null |
notebooks/4-level-system/Hyperfine transition dynamics of D2 line using 4-level system Hamiltonian and QuTiP.ipynb
|
bruvelis/thesis
|
515cf710e4e8a898a0e15b33b37e1e054284bcd5
|
[
"CC0-1.0"
] | null | null | null |
notebooks/4-level-system/Hyperfine transition dynamics of D2 line using 4-level system Hamiltonian and QuTiP.ipynb
|
bruvelis/thesis
|
515cf710e4e8a898a0e15b33b37e1e054284bcd5
|
[
"CC0-1.0"
] | null | null | null | 52.558465 | 16,348 | 0.50074 | true | 15,513 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.849971 | 0.787931 | 0.669719 |
__label__eng_Latn
| 0.590457 | 0.394312 |
# How to Draw Ellipse of Covariance Matrix
Given a 2x2 covariance matrix, how to draw the ellipse representing it. The following function explains the method to visualize multivariate normal distributions and correlation matrices. Formulae for radii & rotation are provided for covariance matrix shown below
\begin{align}
\Sigma = \begin{bmatrix} a & b \\ c & d \end{bmatrix}
\end{align}
## Radii and Rotation
\begin{align}
\lambda_{1,2} &= \frac{a+c}{2} \pm \sqrt{\left( \frac{a-c}{2} \right)^{2} + b^{2}} \\
\theta &= \begin{cases} 0 & \text{ if } b = 0 \text{ and } a \geq c \\
\frac{\pi}{2} & \text{ if } b = 0 \text{ and } a < c \\
\text{atan2}(\lambda_{1} - a, b) & \text{ if } b \neq 0
\end{cases}
\end{align}
Here, $\theta$ is the angle in radians from positive x-axis to the ellipse's major axis in the counterclockwise direction. $\sqrt{\lambda_{1}}$ is the radius of the major axis (the longer radius) and $\sqrt{\lambda_{2}}$ is the radius of the minor axis (shorter radius). In $atan2(\cdot, \cdot)$, the first parameter is $y$ and second is $x$.
```python
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.linalg import block_diag
from scipy.special import erfinv
from scipy.stats import t as tdist
from numpy.linalg import inv
from numpy import linalg as LA
from matplotlib.patches import Ellipse
```
```python
def GetAngleAndRadii(covar):
"""
Given a covariance matrix, the function GetAngleAndRadii() calculates
the major axis and minor axis radii and the orientation of the ellipse.
Inputs:
covar: 2x2 matrix
Output:
major_radius: Radius of the major axis of ellipse
minor_radius: Radius of the minor axis of ellipse
theta : Orientation angle in radians from positive x-axis
to the ellipse's major axis in the counterclockwise direction
"""
# Infer the a,b,c values
a = covar[0,0]
b = covar[0,1]
c = covar[1,1]
if b > a:
raise Exception("Sorry, covariance matrix is invalid - Cov[0,1] should be < Cov[0,0] ")
lambda_1 = (a+c)/2 + math.sqrt(((a-c)/2)**2 + b**2)
lambda_2 = (a+c)/2 - math.sqrt(((a-c)/2)**2 + b**2)
# Infer the radii
major_radius = math.sqrt(lambda_1)
minor_radius = math.sqrt(lambda_2)
# Infer the rotation
if b == 0:
if a >= c:
theta = 0
else:
theta = pi/2
else:
theta = math.atan2(lambda_1-a, b)
return major_radius, minor_radius, theta
```
```python
# Check the above code
covar_check = np.array([[9,5],[5,4]])
major_radius_check, minor_radius_check, theta_check = GetAngleAndRadii(covar_check)
print('major axis radius = ', round(major_radius_check,2),
'minor axis radius = ', round(minor_radius_check,2),
'orientation = ', round(theta_check,2), 'rad')
```
major axis radius = 3.48 minor axis radius = 0.95 orientation = 0.55 rad
```python
def plot_ellipse(center, cov = None):
# Get the center of ellipse
x_cent, y_cent = center
print('center x at: ', x_cent)
print('center y at: ', y_cent)
# Get Ellipse Properties from cov matrix
if cov is not None:
major_radius, minor_radius, theta_orient = GetAngleAndRadii(cov)
print('major axis radius = ', round(major_radius,2),
'minor axis radius = ', round(minor_radius,2),
'orientation = ', round(theta_orient,2), 'rad')
eig_vec,eig_val,u = np.linalg.svd(cov)
# Generate data for ellipse structure
t = np.linspace(0,2*np.pi,1000)
x = major_radius*np.cos(t)
y = minor_radius*np.sin(t)
data = np.array([x,y])
R = np.array([[np.cos(theta_orient),-np.sin(theta_orient)],
[np.sin(theta_orient),np.cos(theta_orient)]])
T = np.dot(R,eig_vec)
data = np.dot(T,data)
# Center the ellipse at given center
data[0] += x_cent
data[1] += y_cent
# Plot the ellipse
fig,ax = plt.subplots()
ax.plot(data[0],data[1],color='b',linestyle='-')
ax.fill(data[0],data[1])
```
```python
covar_check = np.array([[9,4],[4,3]])
plot_ellipse(center = (1,2), cov=covar_check, plot_way = 2)
```
```python
```
|
67ca447a2021197c2e43b576d78e932ba3c6ce26
| 21,086 |
ipynb
|
Jupyter Notebook
|
.ipynb_checkpoints/Draw_Covariance_Ellipse-checkpoint.ipynb
|
venkatramanrenganathan/Demonstrations
|
6d25f6b6b208b6c74aecb6c1482ad54d44ad8038
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Draw_Covariance_Ellipse-checkpoint.ipynb
|
venkatramanrenganathan/Demonstrations
|
6d25f6b6b208b6c74aecb6c1482ad54d44ad8038
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Draw_Covariance_Ellipse-checkpoint.ipynb
|
venkatramanrenganathan/Demonstrations
|
6d25f6b6b208b6c74aecb6c1482ad54d44ad8038
|
[
"MIT"
] | null | null | null | 92.078603 | 9,232 | 0.817035 | true | 1,235 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.935347 | 0.913677 | 0.854604 |
__label__eng_Latn
| 0.803121 | 0.823865 |
```python
surf_choice = 'torus'
```
```python
from sympy import init_printing; init_printing();
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
```
```python
from silkpy.symbolic.surface.surface import ParametricSurface
from sympy import symbols, sin, cos, pi, cot, Array, refine, Q
from silkpy.sympy_utility import dot
u, v = symbols('u, v', real=True)
```
```python
if surf_choice=='cylindrical':
R = symbols('R', positive=True)
s = ParametricSurface([u, v], [R*cos(u), R*sin(u), v])
elif surf_choice=='cone':
w = symbols('omega', real=True)
s = ParametricSurface([u, v], [v*cos(u), v*sin(u), v*cot(w)])
elif surf_choice=='Mobius':
theta = symbols('theta', real=True)
s = ParametricSurface([theta, v],
Array([cos(theta), sin(theta), 0 ]) +
Array([sin(theta/2) * cos(theta), sin(theta/2) * sin(theta), cos(theta/2)]) * v)
elif surf_choice=='torus':
from sympy import Q, ask
from sympy.assumptions import global_assumptions
a, r = symbols('a, r', real=True, positive=True)
global_assumptions.add(Q.positive(a + r*cos(u)))
s = ParametricSurface([u, v], [ (a+r*cos(u)) * cos(v), (a+r*cos(u)) * sin(v), r*sin(u)])
```
```python
s.christoffel_symbol.tensor()
```
/home/wenyin/pyvenv/vsym/lib/python3.8/site-packages/IPython/lib/latextools.py:126: MatplotlibDeprecationWarning:
The to_png function was deprecated in Matplotlib 3.4 and will be removed two minor releases later. Use mathtext.math_to_image instead.
mt.to_png(f, s, fontsize=12, dpi=dpi, color=color)
/home/wenyin/pyvenv/vsym/lib/python3.8/site-packages/IPython/lib/latextools.py:126: MatplotlibDeprecationWarning:
The to_rgba function was deprecated in Matplotlib 3.4 and will be removed two minor releases later. Use mathtext.math_to_image instead.
mt.to_png(f, s, fontsize=12, dpi=dpi, color=color)
/home/wenyin/pyvenv/vsym/lib/python3.8/site-packages/IPython/lib/latextools.py:126: MatplotlibDeprecationWarning:
The to_mask function was deprecated in Matplotlib 3.4 and will be removed two minor releases later. Use mathtext.math_to_image instead.
mt.to_png(f, s, fontsize=12, dpi=dpi, color=color)
/home/wenyin/pyvenv/vsym/lib/python3.8/site-packages/IPython/lib/latextools.py:126: MatplotlibDeprecationWarning:
The MathtextBackendBitmap class was deprecated in Matplotlib 3.4 and will be removed two minor releases later. Use mathtext.math_to_image instead.
mt.to_png(f, s, fontsize=12, dpi=dpi, color=color)
$\displaystyle \left[\begin{matrix}\left[\begin{matrix}0 & 0\\0 & \frac{\left(a + r \cos{\left(u \right)}\right) \sin{\left(u \right)}}{r}\end{matrix}\right] & \left[\begin{matrix}0 & - \frac{r \left(a + r \cos{\left(u \right)}\right) \sin{\left(u \right)}}{a^{2} + 2 a r \cos{\left(u \right)} + r^{2} \cos^{2}{\left(u \right)}}\\- \frac{r \left(a + r \cos{\left(u \right)}\right) \sin{\left(u \right)}}{a^{2} + 2 a r \cos{\left(u \right)} + r^{2} \cos^{2}{\left(u \right)}} & 0\end{matrix}\right]\end{matrix}\right]$
```python
```
```python
s.metric_tensor.tensor()
s.metric_tensor.change_config('uu').tensor()
s.christoffel_symbol.tensor()
r_u, r_v = s.exprs.diff(u), s.exprs.diff(v); r_u, r_v
a_, b_ = r_u, r_v
s.weingarten_matrix
```
$\displaystyle \left[\begin{matrix}r^{2} & 0\\0 & \left(a + r \cos{\left(u \right)}\right)^{2}\end{matrix}\right]$
$\displaystyle \left[\begin{matrix}\frac{1}{r^{2}} & 0\\0 & \frac{1}{\left(a + r \cos{\left(u \right)}\right)^{2}}\end{matrix}\right]$
$\displaystyle \left[\begin{matrix}\left[\begin{matrix}0 & 0\\0 & \frac{\left(a + r \cos{\left(u \right)}\right) \sin{\left(u \right)}}{r}\end{matrix}\right] & \left[\begin{matrix}0 & - \frac{r \left(a + r \cos{\left(u \right)}\right) \sin{\left(u \right)}}{a^{2} + 2 a r \cos{\left(u \right)} + r^{2} \cos^{2}{\left(u \right)}}\\- \frac{r \left(a + r \cos{\left(u \right)}\right) \sin{\left(u \right)}}{a^{2} + 2 a r \cos{\left(u \right)} + r^{2} \cos^{2}{\left(u \right)}} & 0\end{matrix}\right]\end{matrix}\right]$
$\displaystyle \left( \left[\begin{matrix}- r \sin{\left(u \right)} \cos{\left(v \right)} & - r \sin{\left(u \right)} \sin{\left(v \right)} & r \cos{\left(u \right)}\end{matrix}\right], \ \left[\begin{matrix}- \left(a + r \cos{\left(u \right)}\right) \sin{\left(v \right)} & \left(a + r \cos{\left(u \right)}\right) \cos{\left(v \right)} & 0\end{matrix}\right]\right)$
$\displaystyle \left[\begin{matrix}\frac{1}{r} & 0\\0 & \frac{\cos{\left(u \right)}}{a + r \cos{\left(u \right)}}\end{matrix}\right]$
```python
Wa = s.weingarten_transform(a_)
Wb = s.weingarten_transform(b_)
dot(Wa, b_), dot(a_, Wb)
s.K_H
s.prin_curvature_and_vector
from silkpy.sympy_utility import dot
(_, vec1), (_, vec2) = s.prin_curvature_and_vector
dot(vec1, vec2) # The two principal curvature vectors are perpendicular to each other.
```
```python
InteractiveShell.ast_node_interactivity = "last"
```
```python
from sympy import sin, cos, pi
from silkpy.numeric.surface.geodesic import geodesic_ncurve
theta = pi / 24 # symbols('theta', real=True)
t_arr, (u_arr, v_arr) = geodesic_ncurve(
s.subs({a:5, r:2}), [pi/4, pi/4], [cos(theta), sin(theta)])
```
```python
from sympy import sin, cos, pi
from silkpy.numeric.surface.geodesic import geodesic_polar_ncoordinate
rho_arr, theta_arr, u_grid, v_grid = geodesic_polar_ncoordinate(
s.subs({a:5, r:2}), [pi/4, pi/4], rho1=2.4, nrho=12, ntheta=48)
x_grid, y_grid, z_grid = s.subs({a:5, r:2}).lambdified()(u_grid, v_grid)
```
```python
from silkpy.symbolic.surface.draw import draw_surface_plotly
import plotly.graph_objects as go
if surf_choice=='torus':
fig = draw_surface_plotly(s.subs({a: 5, r:2}), domain=[(-float(pi), float(pi)), (-float(pi), float(pi))])
else:
raise NotImplementedError()
fig.add_trace(go.Scatter3d(
x=x_arr, y=y_arr, z=z_arr,
mode='lines',
line=dict(color=t_arr, width=2)
))
# import numpy as np
# for i in range(len(theta_arr)):
# fig.add_trace(go.Scatter3d(
# x=x_grid[:, i],
# y=y_grid[:, i],
# z=z_grid[:, i],
# mode='lines',
# line=dict(#color=rho_arr,
# width=2)
# ))
# for i in range(len(rho_arr)):
# fig.add_trace(go.Scatter3d(
# x=np.r_[x_grid[i,:], x_grid[i,:]],
# y=np.r_[y_grid[i,:], y_grid[i,:]],
# z=np.r_[z_grid[i,:], z_grid[i,:]],
# mode='lines',
# line=dict(#color=rho_arr[i],
# width=2)
# ))
# fig.show()
```
## Not yet done
```python
from sympy import series, Eq
t0 = symbols('t_0', real=True)
```
```python
t0 = 0
exprs[0].subs(t, t0) + (t-t0) * exprs[0].diff(t, 1).subs(t, t0)
exprs[1].subs(t, t0) + (t-t0) * exprs[1].diff(t, 1).subs(t, t0)
```
```python
exprs[0].evalf(subs={t:0}) + exprs[0].diff(t, 1).evalf(subs={t:0})
```
```python
from sympy import Eq
import sympy.solvers.ode as ode
ode.systems.dsolve_system([
Eq(linearized_exprs[0], 0),
Eq(linearized_exprs[1], 0)], funcs=[u1, u2])
```
```python
```
```python
def curvature_curve(surface):
from sympy import Matrix, Array, Eq
from sympy import Function, symbols
import sympy.solvers.ode as ode
t = symbols('t', real=True)
# u1, u2 = symbols('u1, u2', real=True, cls=Function)
u1 = Function(surface.sym(0), real=True)(t)
u2 = Function(surface.sym(1), real=True)(t)
curvature_curve_mat = Matrix([
[u1.diff(t)**2, -u1.diff(t) * u2.diff(t), u2.diff(t)**2],
Array(surface.E_F_G).subs(surface.sym(0), u1),
Array(surface.L_M_N).subs(surface.sym(1), u2)])
# typically there would be two solutions
sol_with_u1_equal_t = ode.systems.dsolve_system(
[Eq(curvature_curve_mat.det(), 0 ), Eq(u1.diff(t), 1)])[0]
sol_with_u2_equal_t = ode.systems.dsolve_system(
[Eq(curvature_curve_mat.det(), 0 ), Eq(u2.diff(t), 1)])[0]
return [sol_with_u1_equal_t, sol_with_u2_equal_t]
```
```python
curvature_curve(s)
```
```python
```
|
c76e18bae4d5b9c52142f939e4c5bb37c578fc2d
| 24,348 |
ipynb
|
Jupyter Notebook
|
nb/construct_surface.ipynb
|
WenyinWei/silkpy
|
2e3773e15fbebc4914f5627d8361a24d35fb5935
|
[
"MIT"
] | 1 |
2021-05-13T09:23:03.000Z
|
2021-05-13T09:23:03.000Z
|
nb/construct_surface.ipynb
|
WenyinWei/silkpy
|
2e3773e15fbebc4914f5627d8361a24d35fb5935
|
[
"MIT"
] | null | null | null |
nb/construct_surface.ipynb
|
WenyinWei/silkpy
|
2e3773e15fbebc4914f5627d8361a24d35fb5935
|
[
"MIT"
] | 1 |
2021-05-27T14:13:24.000Z
|
2021-05-27T14:13:24.000Z
| 45.256506 | 3,000 | 0.551996 | true | 2,662 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.91848 | 0.812867 | 0.746603 |
__label__eng_Latn
| 0.299931 | 0.57294 |
# Statistical parameters using probability density function
### Given probability density function, $p(x)$
$ p = 2x/b^2$, $0 < x < b$
### The mean value of $x$ is estimated analytically:
$\overline{x} = \int\limits_0^b x\, p(x)\, dx = \int\limits_0^b 2x^2/b^2 = \left. 2x^3/3b^2\right|_0^b =2b^3/3b^2 = 2b/3$
### the median
median: $ \int\limits_0^m p(x)\,dx = 1/2 = \int\limits_0^m 2x/b^2\,dx = \left. x^2/b^2 \right|_0^m = m^2/b^2 = 1/2$, $m = b/\sqrt(2)$
### the second moment
second moment: $x^{(2)} = \int\limits_0^b x^2\, p(x)\, dx = \int\limits_0^b 2x^3/b^2 = \left. x^4/2b^2\right|_0^b =b^4/2b^2 = b^2/2$
### the variance is the second moment less the squared mean value
$var(x) = x^{(2)} - \overline{x}^2 = b^2/2 - 4b^2/9 = b^2/18$
```
def p(x,b):
return 2*x/(b**2)
```
```
b = 2
x = linspace(0,b,200)
y = p(x,b)
```
```
plot(x,y)
xlabel('$x$')
ylabel('$p(x)$')
```
```
# approximate using the numerical integration
print trapz(y*x,x)
print 2.*b/3
```
1.33335016793
1.33333333333
```
print trapz(y*x**2,x)
print b^2/18
```
2.00005050378
2
```
import sympy
```
```
sympy.var('x,b,p,m')
p = 2*x/b**2
print p
```
2*x/b**2
```
sympy.integrate(p*x,(x,0,b))
```
2*b/3
```
sympy.integrate(p*x**2,(x,0,b))
```
b**2/2
```
sympy.integrate(p,(x,0,m))
```
m**2/b**2
```
sympy.solve(m**2/b**2 - 0.5,m)
```
[-0.707106781186548*b, 0.707106781186548*b]
```
```
|
6ac40ff747f3969f9a9d3d2c9e5ec8432b4149b8
| 12,517 |
ipynb
|
Jupyter Notebook
|
notebooks/unsorted/estimate_mean_variance_median_using_pdf.ipynb
|
alexlib/engineering_experiments_measurements_course
|
0b80d90519a2a72547ffd9ef4da2158530016196
|
[
"CC0-1.0"
] | 2 |
2018-05-03T09:41:03.000Z
|
2022-03-26T12:39:27.000Z
|
notebooks/unsorted/estimate_mean_variance_median_using_pdf.ipynb
|
alexlib/engineering_experiments_measurements_course
|
0b80d90519a2a72547ffd9ef4da2158530016196
|
[
"CC0-1.0"
] | 1 |
2018-04-22T09:04:13.000Z
|
2018-04-22T09:04:13.000Z
|
notebooks/unsorted/estimate_mean_variance_median_using_pdf.ipynb
|
alexlib/engineering_experiments_measurements_course
|
0b80d90519a2a72547ffd9ef4da2158530016196
|
[
"CC0-1.0"
] | 4 |
2015-07-02T11:39:57.000Z
|
2021-05-03T15:49:42.000Z
| 67.295699 | 7,805 | 0.772949 | true | 652 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.941654 | 0.817574 | 0.769872 |
__label__eng_Latn
| 0.252825 | 0.627004 |
```python
%matplotlib notebook
```
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import ScalarFormatter
import math
```
This notebook assumes you have completed the notebook [Introduction of sine waves](TDS_Introduction-sine_waves.ipynb). This notebook follows the same pattern of time domain waveform generation: instantaneous frequency -> angle step -> total angle -> time domain waveform.
Our goal is to track features of different acoustic impedance in material using a low power time domain waveform. Time delay spectrometry (TDS) is one implementation of this goal. To understand TDS we need to understand the waveform which is used by TDS called a chirp. A chirp is a sinusoid that is constantly varying in frequency. The chirp is generated by integrating a varying angle step which is derived from an instantaneous frequency profile. We will generate a chirp in this notebook. An overview of this technique is given [here](https://www.youtube.com/watch?v=RQplkt0bw_c).
The angle of the chirp can be found by integrating the instantaneous frequency:
\begin{equation}
f(t)=\frac{f_{end}-f_{start}}{T_c}t + f_{start}
\end{equation}
\begin{equation}
\Delta\phi(t) = 2\pi f(t)\Delta t
\end{equation}
\begin{equation}
\phi (t)=\int_{}^{} \Delta\phi(t) = \int_{}^{} 2\pi f(t) dt = \int_{}^{}\frac{f_{end}-f_{start}}{T_c}tdt + \int_{}^{}f_{start}dt
\end{equation}
\begin{equation}
\phi (t)= \frac{f_{end}-f_{start}}{T_c}\int_{}^{}tdt + f_{start}\int_{}^{}dt
\end{equation}
\begin{equation}
\phi (t)= \frac{f_{end}-f_{start}}{T_c}\frac{t^2}{2} + f_{start}t
\end{equation}
This gives the time series value of
\begin{equation}
x(t) = e^{j\phi (t)} = e^{j(\frac{f_{end}-f_{start}}{T_c}\frac{t^2}{2} + f_{start}t)}
\end{equation}
But the formula for angle requires squaring time which will cause numeric errors as the time increases. Another approach is to implement the formula for angle as a cummulative summation.
\begin{equation}
\phi_{sum} (N)=\sum_{k=1}^{N} \Delta\phi(k) = \sum_{k=1}^{N} 2\pi f(k) t_s = \sum_{k=1}^{N}(\frac{f_{end}-f_{start}}{T_c}k + f_{start})t_s
\end{equation}
This allow for the angle always stay between 0 and two pi by subtracting two phi whenever the angle exceeds the value. We will work with the cummlative sum of angle, but then compare it to the integral to determine how accurate the cummulative sum is.
```python
#max free 8 points per sample
#Tc is the max depth we are interested in
Tc_sec=0.00003
f_start_Hz=3e5
#talk about difference and similarity of sine wave example, answer why not 32 samples
f_stop_Hz=16e5
#We choose 8 samples per cycle at the maximum frequency to not require steep pulse shaping filter profiles on the output of the
#digital to analog converter
samplesPerCycle=8
fs=f_stop_Hz*samplesPerCycle
ts=1/fs
total_samples= math.ceil(fs*Tc_sec)
n = np.arange(0,total_samples, step=1, dtype=np.float64)
t_sec=n*ts
t_usec = t_sec *1e6
#This is the frequency of the chirp over time. We assume linear change in frequency
chirp_freq_slope_HzPerSec=(f_stop_Hz-f_start_Hz)/Tc_sec
#Compute the instantaneous frequency which is a linear function
chirp_instantaneous_freq_Hz=chirp_freq_slope_HzPerSec*t_sec+f_start_Hz
chirp_instantaneous_angular_freq_radPerSec=2*np.pi*chirp_instantaneous_freq_Hz
#Since frequency is a change in phase the we can plot it as a phase step
chirp_phase_step_rad=chirp_instantaneous_angular_freq_radPerSec*ts
#The phase step can be summed (or integrated) to produce the total phase which is the phase value
#for each point in time for the chirp function
chirp_phase_rad=np.cumsum(chirp_phase_step_rad)
#The time domain chirp function
chirp = np.exp(1j*chirp_phase_rad)
```
```python
#We can see, unlike the complex exponential, the chirp's instantaneous frequency is linearly increasing.
#This corresponds with the linearly increasing phase step.
fig, ax = plt.subplots(2, 1, sharex=True,figsize = [8, 8])
lns1=ax[0].plot(t_usec,chirp_instantaneous_freq_Hz,linewidth=4, label='instantanous frequency');
ax[0].set_title('Comparing the instantaneous frequency and phase step')
ax[0].set_ylabel('instantaneous frequency (Hz)')
axt = ax[0].twinx()
lns2=axt.plot(t_usec,chirp_phase_step_rad,linewidth=2,color='black', linestyle=':', label='phase step');
axt.set_ylabel('phase step (rad)')
#ref: https://stackoverflow.com/questions/5484922/secondary-axis-with-twinx-how-to-add-to-legend
lns = lns1+lns2
labs = [l.get_label() for l in lns]
ax[0].legend(lns, labs, loc=0)
#We see that summing or integrating the linearly increasing phase step gives a quadratic function of total phase.
ax[1].plot(t_usec,chirp_phase_rad,linewidth=4,label='chirp');
ax[1].plot([t_usec[0], t_usec[-1]],[chirp_phase_rad[0], chirp_phase_rad[-1]],linewidth=1, linestyle=':',label='linear (x=y)');
ax[1].set_title('Cumulative quandratic phase function of chirp')
ax[1].set_xlabel('time ($\mu$sec)')
ax[1].set_ylabel('total phase (rad)')
ax[1].legend();
```
<IPython.core.display.Javascript object>
```python
#The complex exponential of each phase value gives us the time domain chirp signal.
#We have highlighted the beginning and end of the chirp where it starts at a low frequency and linearly increases to a high frequency
samplesToShowSlow=np.arange(5*samplesPerCycle,dtype=np.int32)
samplesToShowFast=np.flip(np.ceil(t_sec.shape[0]).astype(np.int32) - np.arange(5*samplesPerCycle,dtype=np.int32))-1
fig2 = plt.figure(constrained_layout=True,figsize = [8, 6])
gs = fig2.add_gridspec(2, 3)
f2_ax1 = fig2.add_subplot(gs[0, :])
f2_ax2 = fig2.add_subplot(gs[1, :])
f2_ax1.plot(t_usec,chirp_phase_rad, color='#27A4A3', label='chirp');
f2_ax1.plot(t_usec[samplesToShowSlow],chirp_phase_rad[samplesToShowSlow],color=(1,0,0),linewidth=4, label='slow');
f2_ax1.plot(t_usec[samplesToShowFast],chirp_phase_rad[samplesToShowFast],color=(0,0,1),linewidth=4, label='fast');
f2_ax1.set_title('Cumulative quandratic phase function of chirp')
f2_ax1.set_xlabel('time ($\mu$sec)')
f2_ax1.set_ylabel('total phase (rad)')
f2_ax1.legend();
f2_ax2.plot(t_usec,np.real(chirp),color='#27A4A3', label='real');
f2_ax2.plot(t_usec,np.imag(chirp),color='#27A4A3', linestyle=':', label='imag');
f2_ax2.plot(t_usec[samplesToShowSlow],np.real(chirp[samplesToShowSlow]),color=(1,0,0));
f2_ax2.plot(t_usec[samplesToShowSlow],np.imag(chirp[samplesToShowSlow]),color=(1,0,0), linestyle=':');
f2_ax2.plot(t_usec[samplesToShowFast],np.real(chirp[samplesToShowFast]),color=(0,0,1));
f2_ax2.plot(t_usec[samplesToShowFast],np.imag(chirp[samplesToShowFast]),color=(0,0,1), linestyle=':');
f2_ax2.set_title('Time domain chirp')
f2_ax2.set_xlabel('time ($\mu$sec)')
f2_ax2.set_ylabel('amplitude')
f2_ax2.get_xaxis().get_major_formatter().set_useOffset(False)
f2_ax2.legend();
```
<IPython.core.display.Javascript object>
```python
#With perfect integration we have
#This is the frequency of the chirp over time. We assume linear change in frequency
chirp_freq_slope_HzPerSec=(f_stop_Hz-f_start_Hz)/Tc_sec
#Compute the instantaneous frequency which is a linear function
chirp_phase_continous_time_rad=2*np.pi*(chirp_freq_slope_HzPerSec/2*np.power(t_sec,2)+f_start_Hz*t_sec)
chirp = np.exp(1j*chirp_phase_continous_time_rad)
#The complex exponential of each phase value gives us the time domain chirp signal.
#We have highlighted the beginning and end of the chirp where it starts at a low frequency and linearly increases to a high frequency
fig2 = plt.figure(constrained_layout=True,figsize = [8, 6])
gs = fig2.add_gridspec(2, 3)
f2_ax1 = fig2.add_subplot(gs[0, :])
f2_ax2 = fig2.add_subplot(gs[1, :])
f2_ax1.plot(t_usec,chirp_phase_rad, color='#27A4A3', label='chirp');
f2_ax1.plot(t_usec,chirp_phase_continous_time_rad,color=(1,0,0),linewidth=4, linestyle=':', label='chirp continuous');
f2_ax1.set_title('Cumulative quandratic phase function of chirp')
f2_ax1.set_xlabel('time ($\mu$sec)')
f2_ax1.set_ylabel('total phase (rad)')
f2_ax1.legend();
f2_ax2.plot(t_usec,chirp_phase_rad-chirp_phase_continous_time_rad, color='#27A4A3', label='chirp');
f2_ax2.set_title('Cumulative quandratic phase function of chirp')
f2_ax2.set_xlabel('time ($\mu$sec)')
f2_ax2.set_ylabel('total phase (rad)')
f2_ax2.legend();
```
<IPython.core.display.Javascript object>
We examine the error
\begin{equation}
\phi_{sum} (N)=\sum_{k=1}^{N} \Delta\phi(k) = \sum_{k=1}^{N} 2\pi f(k) t_s = \sum_{k=1}^{N}\left(\frac{f_{end}-f_{start}}{T_c}k + f_{start}\right)t_s
\end{equation}
To analyze the error we collect the phase terms into A and
\begin{equation}
A = \left(\frac{f_{end}-f_{start}}{T_c}\right) t_s
\end{equation}
\begin{equation}
B = f_{start} t_s
\end{equation}
This gives a summation of
\begin{equation}
\phi_{sum} (N)= \sum_{k=1}^{N} 2\pi f(k) t_s = \sum_{k=1}^{N}\left(Ak + B\right)
\end{equation}
Which allows us to write
\begin{equation}
\phi_{sum} (N)= \sum_{k=1}^{N}\left(Ak\right) + \sum_{k=1}^{N}\left(B\right) = A\sum_{k=1}^{N}k + BN
\end{equation}
We solve the below summation by recognizing it is half the area of a rectangle with sides N and N+1 so
\begin{equation}
\sum_{k=1}^{N}k = \frac{(N+1)N}{2}
\end{equation}
This formula can be visually illustrated by the graphic
So collecting the terms we eliminate the sum with
\begin{equation}
\phi_{sum} (N)= A\frac{(N+1)N}{2} + BN =\frac{A}{2}N^2 + \frac{A+2B}{2}N
\end{equation}
Using the same A and B we can write the integral of instantaneous frequency as
\begin{equation}
\phi (t)= \frac{f_{end}-f_{start}}{T_c}\frac{t^2}{2} + f_{start}t =\frac{A}{2t_s}t^2 + \frac{B}{t_s}t
\end{equation}
We can also relate N and t by t = Nt_s which lets us rewrite $$ \phi (t) $$ as
\begin{equation}
\phi (N)= \frac{A}{2t_s}\left(Nt_s\right)^2 + \frac{B}{t_s}(Nt_s)= \frac{At_s}{2}N^2 + BN
\end{equation}
Now we can compute the error which is:
\begin{equation}
\phi (N) - \phi_{sum} (N)= \left(\frac{At_s}{2}N^2 + BN\right) - \left(\frac{A}{2}N^2 + \frac{A+2B}{2}N\right)
\end{equation}
This simplifies to
\begin{equation}
\phi (N) - \phi_{sum} (N)= \left(\frac{At_s}{2}N^2 + BN\right) - \left(\frac{A}{2}N^2 + \frac{A+2B}{2}N\right)
\end{equation}
|
0e356bf0ad95a9dcddff4e8b8bc8570b3ba9a09b
| 639,659 |
ipynb
|
Jupyter Notebook
|
course/tds-200/week_01/notebooks/TDS_Part_1-chirp_basics.ipynb
|
potto216/tds-tutorials
|
2acf2002ac5514dc60781c3e2e6797a4595104e6
|
[
"MIT"
] | 6 |
2020-07-12T19:17:59.000Z
|
2020-09-24T22:19:02.000Z
|
course/tds-200/week_01/notebooks/TDS_Part_1-chirp_basics.ipynb
|
potto216/tds-tutorials
|
2acf2002ac5514dc60781c3e2e6797a4595104e6
|
[
"MIT"
] | 7 |
2020-09-16T12:18:01.000Z
|
2020-12-17T23:04:37.000Z
|
course/tds-200/week_01/notebooks/TDS_Part_1-chirp_basics.ipynb
|
potto216/tds-tutorials
|
2acf2002ac5514dc60781c3e2e6797a4595104e6
|
[
"MIT"
] | null | null | null | 236.998518 | 296,297 | 0.879209 | true | 3,295 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.92523 | 0.868827 | 0.803865 |
__label__eng_Latn
| 0.887876 | 0.705979 |
#### MIT License (c) 2018 by Andrew Lyasoff
#### Jupyter notebook written in Python 3. It illustrates the use of SymPy to compute the distribution function of the Gaussian law and its inverse, which is then used to transform a uniform Monte Carlo sample into a Gaussian Monte Carlo sample.
First, compute the distribution function and its inverse in symbolic form:
```python
import numpy as np
import matplotlib.pyplot as plt
from numpy import *
from sympy import *
x,y,n,u=symbols('x y n u')
init_printing()
```
```python
integrate((1/sqrt(2*pi))*exp(-u**2/2),(u,-oo,+oo))
```
```python
AA=integrate(sqrt(1/(2*pi))*exp(-u**2/2),(u,-oo,x))
AA
```
```python
(1+erf(sqrt(2)*x/2))/2-y
```
```python
BB=solve((1+erf(sqrt(2)*x/2))/2-y,x)[0]
BB
```
```python
sqrt(2)
```
```python
sqrt(2.)
```
```python
def F(xxx):
return erf(sqrt(2)*xxx/2)/2 + 1/2
def Finv(yyy):
return sqrt(2)*erfinv(2*yyy - 1)
```
```python
F(Finv(0.8))
```
```python
Finv(F(5))
```
```python
Finv(F(5.))
```
Now plot the distribution function for the standard normal ${\cal N}(0,1)$ distribution law.
```python
t = arange(-3, 3, 0.01)
s=[F(u) for u in t]
fig, ax = plt.subplots()
ax.plot(t, s)
plt.show()
```
```python
t = arange(.001, 1, 0.001)
s=[Finv(u) for u in t]
fig, ax = plt.subplots()
ax.plot(t, s)
plt.show()
```
Plot the inverse:
Compute the "exact" probability for falling outside the interval $[-3\sigma,3\sigma]$.
```python
2*(1-F(3.0))
```
```python
1-(_)
```
Now try to compute the above probability by using Monte Carlo simulation and sampling from the uniform distribution in $[0,1]$.
```python
2*(1-F(3.0))-0.002641
```
```python
Finv(F(8))
```
```python
1+1
```
```python
```
|
5ed4c3761cef0d203f4bc7c34606e88fcb2e5ace
| 43,785 |
ipynb
|
Jupyter Notebook
|
Inverse_of_a_Distribution_Function_Example_Python.ipynb
|
AndrewLyasoff/SMAP
|
6eeea8953a26a05b1e23387067109d23b2011824
|
[
"MIT"
] | 21 |
2018-09-04T19:12:32.000Z
|
2022-03-20T02:05:44.000Z
|
Inverse_of_a_Distribution_Function_Example_Python.ipynb
|
lhyzh/SMAP
|
f6687291769d4c16a0d51a06a941384f646bb432
|
[
"MIT"
] | 1 |
2019-02-13T14:24:38.000Z
|
2019-02-13T14:24:38.000Z
|
Inverse_of_a_Distribution_Function_Example_Python.ipynb
|
lhyzh/SMAP
|
f6687291769d4c16a0d51a06a941384f646bb432
|
[
"MIT"
] | 6 |
2019-02-10T03:43:40.000Z
|
2021-03-28T03:53:22.000Z
| 87.047714 | 11,248 | 0.854494 | true | 565 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.927363 | 0.879147 | 0.815288 |
__label__eng_Latn
| 0.846532 | 0.732521 |
# In this demo
This demo presents the design and functionality of `Signal`s, which are the core objects for representing coefficients in models. `Signal`s are structured to represent the mathematical formula:
\begin{equation}
s(t) = Re[f(t)e^{i(2 \pi \nu t + \phi)}],
\end{equation}
where
- $f(t)$ is a complex-valued envelope,
- $\nu$ is the carrier frequency, and
- $\phi$ is the phase.
Using this formula as a basis for the representation of `Signal`s is motivated by:
- This is a very common representation when modelling control sequences in quantum systems; in particular, for representing signals whose Fourier spectrum is localized about a particular frequency.
- It is general; even when the previous point is not immediately applicable, the full generality of the envelope $f(t)$ allows for specification of arbitrary real-valued functions.
Taking the above as a given, the design for `Signal`s is further motivated to facilitate different modes of operation:
- Completely general envelopes: The user specifies $f(t)$ as python-callable function.
- This allows for arbitrary signal definitions, including continuous functions.
- Piecewise constant, or *discrete* envelopes, in which $f(t)$ is a piecewise constant function of time.
- This allows for sample-based construction and computation on `Signals`.
- Signal processing and evaluation can be performed purely as collective array-based operations, which is more efficient than the function-based products required in the fully general case.
```python
import numpy as np
from qiskit_ode.signals import Signal, DiscreteSignal, SignalList, DiscreteSignalSum
from matplotlib import pyplot as plt
%matplotlib inline
```
# 1. `Signal` and its subclasses
## 1.1 `Signal`
The most general signal object takes the envelope as a python callable function.
```python
sig = Signal(envelope=lambda t: t**2, carrier_freq=2.)
```
We can evaluate the signal by directly calling the object, or we can evaluate its complex envelope or full complex value.
```python
print(sig(1.1))
print(sig.envelope(1.1))
print(sig.complex_value(1.1))
```
0.373910563193685
1.2100000000000002
(0.373910563193685+1.1507783847171364j)
The evaluation functions are all setup for vectorized evaluation, and we assume that the user-specified envelope is vectorized. If not, it can be vectorized using `numpy.vectorize`.
```python
t_vals = np.array([0.1, 1.1])
print(sig(t_vals))
print(sig.envelope(t_vals))
print(sig.complex_value(t_vals))
```
[0.00309017 0.37391056]
[0.01 1.21]
[0.00309017+0.00951057j 0.37391056+1.15077838j]
We can also plot the evaluation functions.
```python
# plot the signal
sig.draw(0, 5, 100)
```
```python
# plot the complex value
sig.draw(0, 5, 100, function='complex_value')
```
```python
# plot just the envelope
sig.draw(0, 5, 100, function='envelope')
```
## 1.2 Constant `Signal`
This is not a subclass, but an alternate mode of behaviour for `Signal`. Generally a user shouldn't need to do this, but internally, constant values are converted to a `Signal` with constant envelope and zero carrier frequency/phase.
This can be instantiated by passing a numeric value to the constructor of `Signal`.
```python
constant = Signal(2.)
constant.draw(0, 5, 100)
```
```python
constant.is_constant
```
True
## 1.3 `DiscreteSignal(Signal)`
A `DiscreteSignal` represents a `Signal` whose envelope is piecewise constant, and is specified as an array of samples.
```python
discrete_signal = DiscreteSignal(dt=0.5, samples=[1., 2., 3.])
discrete_signal.draw(0, 1.5, 100)
```
A `DiscreteSignal` can still have an analog carrier.
```python
discrete_signal = DiscreteSignal(dt=0.5, samples=[1., 2., 3.], carrier_freq=1.)
discrete_signal.draw(0, 1.5, 100)
```
# 2. Algebraic operations on `Signal`s
It is natural to add and multiply signals, as these mathematical operations correspond to the behaviour of physical circuit elements. To facilitate this, we introduce the `SignalSum` object, which represents a sum of `Signal`s. The purpose of this class is to allow higher level algebraic operations on `Signal`s while preserving frequency information.
Algebraic operations have been implemented using python dunder methods, so that a user can add and multiply signals using standard python syntax.
## 2.1 Adding general `Signal`s
```python
sig1 = Signal(lambda t: t, carrier_freq=10.)
sig2 = Signal(lambda t: 10 * t, carrier_freq=20.)
# add using standard python syntax
sig_sum = sig1 + sig2
```
`sig_sum` is now a `SignalSum` object:
```python
print(type(sig_sum))
str(sig_sum)
```
<class 'qiskit_ode.signals.signals.SignalSum'>
'Signal(carrier_freq=10.0, phase=0.0) + Signal(carrier_freq=20.0, phase=0.0)'
This is a container class storing the original signals in the sum in the `components` attribute:
```python
sig_sum.components
```
[<qiskit_ode.signals.signals.Signal at 0x7fb8d05b0370>,
<qiskit_ode.signals.signals.Signal at 0x7fb8d05b0430>]
We can interact with this object in the same way as a regular `Signal`, e.g. we can draw it:
```python
sig_sum.draw(0, 1, 100)
```
We can produce the same plot by evaluating the individual signals in the sum separately:
```python
t_vals = np.linspace(0, 1, 100)
plt.plot(t_vals, sig1(t_vals) + sig2(t_vals))
```
## 2.2 Multiplying signals
Multiplication of signals is implemented via the formula:
\begin{equation}
Re[f(t)e^{i(2 \pi \nu t + \phi)}] \times Re[g(t)e^{i(2 \pi \omega t + \psi)}]
= Re[\frac{1}{2} f(t)g(t)e^{i(2\pi (\omega + \nu)t + (\phi + \psi))} ]
+ Re[\frac{1}{2} f(t)\overline{g(t)}e^{i(2\pi (\omega - \nu)t + (\phi - \psi))} ].
\end{equation}
When two signals are multiplied, a `SignalSum` representing the right hand side of the above equation is constructed.
```python
sig_prod = sig1 * sig2
str(sig_prod)
```
'Signal(carrier_freq=30.0, phase=0.0) + Signal(carrier_freq=-10.0, phase=0.0)'
We see that the above product consists of a sum of signals centred at frequencies `4.0` and `0.0`.
```python
sig_prod(2.) - sig1(2.) * sig2(2.)
```
0.0
## 2.3 `DiscreteSignalSum` and operations on `DiscreteSignal`s
The previous sections show how addition/multiplication work for general `Signal`s. In the general case, as the envelope is a user-specified function, we can only multiply envelopes as functions. For `DiscreteSignal`s with compatible sample strcuture, we can multiply envelopes as arrays.
```python
discrete_sig1 = DiscreteSignal(dt=1., samples=[1., 2., 3.], carrier_freq=2.)
discrete_sig2 = DiscreteSignal(dt=1., samples=[4j, 5j, 6j], carrier_freq=2.)
discrete_prod = discrete_sig1 * discrete_sig2
```
```python
print(type(discrete_prod))
str(discrete_prod)
```
<class 'qiskit_ode.signals.signals.DiscreteSignalSum'>
'DiscreteSignal(dt=1.0, carrier_freq=4.0, phase=0.0) + DiscreteSignal(dt=1.0, carrier_freq=0.0, phase=0.0)'
A `DiscreteSignalSum` stores the samples of its constituent components in a single array.
```python
discrete_prod.samples
```
Array([[0.+2.j, 0.-2.j],
[0.+5.j, 0.-5.j],
[0.+9.j, 0.-9.j]], backend='numpy')
## 2.4 Frequency preservation
One of the benefits of `SignalSum` is that it "preserves frequency information", in the sense that, in applications where each `Signal` has a well-defined central frequency, the algebraic operations will result in `SignalSum`s whose components have well-defined central frequencies. This is useful for operations that require analyzing the frequency components of signals, e.g. the rotating wave approximation.
We can visualize this by examining the FFT of the product of signals above. The product of signals results in a component whose central frequency is at the sum of the frequencies in the product, as well as a component at the difference:
```python
N = 1000
T = 10.
t_vals = np.linspace(0., T, N)
fft_freqs = np.fft.fftfreq(N, T / N)
sig_prod_fft = np.fft.fft(sig_prod(t_vals))
plt.plot(fft_freqs, np.real(sig_prod_fft), fft_freqs, np.imag(sig_prod_fft))
```
We see components at `30.` and `10.`, as expected. Now, we can look at the FFT of each component of the product:
```python
N = 1000
T = 10.
t_vals = np.linspace(0., T, N)
fft_freqs = np.fft.fftfreq(N, T / N)
sig_prod0_fft = np.fft.fft(sig_prod[0](t_vals))
plt.plot(fft_freqs, np.real(sig_prod0_fft), fft_freqs, np.imag(sig_prod0_fft))
```
We see the first component in the sum is the term whose frequency is at the sum of the product term frequencies.
```python
N = 1000
T = 10.
t_vals = np.linspace(0., T, N)
fft_freqs = np.fft.fftfreq(N, T / N)
sig_prod1_fft = np.fft.fft(sig_prod[1](t_vals))
plt.plot(fft_freqs, np.real(sig_prod1_fft), fft_freqs, np.imag(sig_prod1_fft))
```
The second component in the sum is at the difference.
# 3. IQ mixer via algebraic operations
```python
# define some envelope function
def env(t):
return t * (1 - t)
w_d = 2.
w_lo = 1.9
w_if = 0.1
I = Signal(env, carrier_freq=w_if, name='I') # envelope times cosine at w_if
Q = Signal(env, carrier_freq=w_if, phase=-np.pi/2, name='Q') # envelope times sine at w_if
slo_I = Signal(1., carrier_freq=w_lo, name='slo_I') # local oscillator for I
slo_Q = Signal(1., carrier_freq=w_lo, phase=-np.pi/2, name='slo_Q') # local oscillator for Q
```
The IQ output is a sum of two products. Hence, it produces a `SignalSum` with 4 outputs.
```python
IQ_output = (I * slo_I) - (Q * slo_Q)
str(IQ_output)
```
'Signal(carrier_freq=2.0, phase=0.0) + Signal(carrier_freq=-1.7999999999999998, phase=0.0) + Signal(carrier_freq=2.0, phase=-3.141592653589793) + Signal(carrier_freq=-1.7999999999999998, phase=0.0)'
```python
IQ_output.draw(0, 1, 100, title='IQ output')
```
The $0^{th}$ and $2^{nd}$ terms in the sum are at frequency `2.0`, which we can plot:
```python
IQ_output[[0, 2]].draw(0, 1, 100, title='freq==2.0 terms')
```
The $1^{st}$ and $3^{rd}$ terms are at frequency `1.8`:
```python
IQ_output[[1, 3]].draw(0, 1, 100, title='freq==1.8 terms')
```
This demonstrates the cancellation at the lower sideband, with the remaining signal at the upper sideband. When modelling imperfect operation of an IQ mixer, this cancellation may not occur perfectly.
# 4. `SignalList`
A model of a Hamiltonian contains a list of `Signal`s representing coefficients of operators. Each coefficient may be any subclass of `Signal` (including `SignalSum` and `DiscreteSignalSum`). The `SignalList` object is similar to `SignalSum`, in that it stores a list of `Signal`s, but the evaluation functions return an array of each component evaluated separately.
```python
sig_list = SignalList([2. + Signal(lambda t: t, carrier_freq=1.),
1.,
DiscreteSignal(dt=1., samples=[1., 2., 3.])])
```
```python
sig_list(2.)
```
array([4., 1., 3.])
`SignalList` also contains a helper `drift` property for evaluating the constant part:
```python
sig_list.drift
```
Array([2., 1., 0.], backend='numpy')
# 5. Transfer functions
```python
from qiskit_ode.signals.transfer_functions import Convolution
```
```python
def gaus(t):
sigma = 10
dt = 0.1
return 2.*dt/np.sqrt(2.*np.pi*sigma**2)*np.exp(-t**2/(2*sigma**2))
```
```python
ts = np.linspace(0, 100, 1000)
np.sum([gaus(_) for _ in ts])
```
1.0029894228040142
```python
plt.plot(ts, gaus(ts))
```
```python
convolution = Convolution(gaus)
```
```python
samples = [0. if t<20. or t > 80. else 1 for t in ts]
piecewise_const = DiscreteSignal(dt=ts[1]-ts[0], samples=samples, carrier_freq=0.0, start_time=0)
```
```python
piecewise_const.draw(0, 100, 200)
piecewise_const.carrier_freq
```
```python
convolved = convolution(piecewise_const)
convolved.draw(0, 150, 200)
convolved.carrier_freq
```
```python
convolved.carrier_freq = 0.3
convolved.draw(0, 200, 500)
convolved.carrier_freq
```
# 6. Fancy indexing
`Signal` subclasses representing collections of signals can be subscripted in the same manner as a 1d numpy array.
```python
sig_sum = 1. + Signal(lambda t: t) + DiscreteSignal(dt=0.5, samples=[1, 2, 3, 4, 5])
str(sig_sum)
```
'Signal(carrier_freq=0.0, phase=0.0) + Constant(1.0) + DiscreteSignal(dt=0.5, carrier_freq=0.0, phase=0.0)'
We can access the first two components of the sum by subscripting with the list `[0, 1]`.
```python
str(sig_sum[[0, 1]])
```
'Signal(carrier_freq=0.0, phase=0.0) + Constant(1.0)'
This works for both `DiscreteSignalSum` and `SignalList`. Subscripting these results in an object of the appropriate type.
# 7. Sampling
We can generate `DiscreteSignal`s and `DiscreteSignalSum`s by sampling, respectively, general `Signal`s and `SignalSum`s. When performing this discretization, we can choose to sample the carrier as well, or keep it analog.
```python
sig = Signal(lambda t: t, carrier_freq=2.)
disc1 = DiscreteSignal.from_Signal(sig, dt=0.1, start_time=0., n_samples=10)
disc2 = DiscreteSignal.from_Signal(sig, dt=0.1, start_time=0., n_samples=10, sample_carrier=True)
```
```python
# plot the signal and the discretized version when keeping the carrier analog
sig.draw(0, 1, 100)
disc1.draw(0, 1, 100)
```
```python
# plot the signal and the discretized version when also sampling the carrier
sig.draw(0, 1, 100)
disc2.draw(0, 1, 100)
```
The same can be done with `DiscreteSignalSum`.
```python
sig1 = Signal(lambda t: t, carrier_freq=2.)
sig2 = Signal(lambda t: 1j * t**2, carrier_freq=3.)
sig_sum = sig1 + sig2
disc1 = DiscreteSignalSum.from_SignalSum(sig_sum, dt=0.1, start_time=0., n_samples=10)
disc2 = DiscreteSignalSum.from_SignalSum(sig_sum, dt=0.1, start_time=0., n_samples=10, sample_carrier=True)
```
```python
sig_sum.draw(0, 1, 100)
disc1.draw(0, 1, 100)
```
```python
sig_sum.draw(0, 1, 100)
disc2.draw(0, 1, 100)
```
|
76e2132adf2c3c15e9e65f283d35e91fa5313fb6
| 480,911 |
ipynb
|
Jupyter Notebook
|
example_notebooks/Signals.ipynb
|
divshacker/qiskit-ode
|
3b5d7afb1a80faea9b489f1d79b09c1e52580107
|
[
"Apache-2.0"
] | null | null | null |
example_notebooks/Signals.ipynb
|
divshacker/qiskit-ode
|
3b5d7afb1a80faea9b489f1d79b09c1e52580107
|
[
"Apache-2.0"
] | null | null | null |
example_notebooks/Signals.ipynb
|
divshacker/qiskit-ode
|
3b5d7afb1a80faea9b489f1d79b09c1e52580107
|
[
"Apache-2.0"
] | null | null | null | 344.245526 | 40,324 | 0.937047 | true | 4,020 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.857768 | 0.782662 | 0.671343 |
__label__eng_Latn
| 0.970735 | 0.398086 |
<center>
<h1><b>Lab 3</b></h1>
<h1>PHYS 580 - Computational Physics</h1>
<h2>Professor Molnar</h2>
</br>
<h3><b>Ethan Knox</b></h3>
<h4>https://www.github.com/ethank5149</h4>
<h4>ethank5149@gmail.com</h4>
</br>
</br>
<h3><b>September 17, 2020</b></h3>
</center>
### Imports
```python
import numpy as np
import sympy as sp
from scipy.special import ellipk
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
from functools import partial
```
### Support Functions
```python
def euler_step(f, y, t, dt):
y = y + f(t, y) * dt
return y
def rk2_step(f, y, t, dt):
k1 = dt * f(t, y)
k2 = dt * f(t + dt, y + k1)
y = y + (k1 + k2) / 2.0
return y
def euler_cromer_step(f, y, dy, t, dt):
dy = dy + f(t, y, dy) * dt
y = y + dy * dt
return y, dy
def dsolve(f, t, y0, step = euler_step):
t = np.asarray(t) # Ensure t is a Numpy array
y0 = np.asarray(y0)
y = np.zeros((np.size(t), np.size(y0))) # Create our output data container
y[0] = y0 # Set initial condition
for i in range(np.size(t)-1):
y[i+1] = step(f, y[i], t[i], t[i+1] - t[i]) # Step forward
return t, np.hsplit(y, np.size(y0))
def dsolve_simplectic(f, t, y0, dy0, step = euler_cromer_step):
t = np.asarray(t) # Ensure t is a Numpy array
y0 = np.asarray(y0)
y = np.zeros((np.size(t), np.size(y0))) # Create our output data container
dy = np.zeros((np.size(t), np.size(dy0))) # Create our output data container
y[0] = y0 # Set initial condition
dy[0] = dy0 # Set initial condition
for i in range(np.size(t)-1):
y[i+1], dy[i+1] = step(f, y[i], dy[i], t[i], t[i+1] - t[i]) # Step forward
return t, y, dy
def get_kinetic_energy(I, omega):
return 0.5 * I * omega ** 2
def get_potential_energy(m, g, l, theta):
return m * g * l * (1.0 - np.cos(theta))
def get_total_energy(m, I, l, g, theta, omega):
return get_kinetic_energy(I, omega) + get_potential_energy(m, g, l, theta)
def global_error(exact, calculated):
error = np.zeros_like(exact)
for i in range(len(error)):
error[i] = calculated[i] - exact[i]
return error
def local_error(y_exact, y_approx, x):
error = np.zeros_like(x)
for i in np.arange(1, len(error)):
error[i-1] = y_exact[i] - y_exact[i-1] - (y_approx[i] - y_approx[i-1])
return error
```
### Analytical Calculations
$$I\ddot{\theta}+c\dot{\theta}+mgl\theta=F_0\cos(\omega_Dt)\rightarrow\ddot{\theta}+\frac{c}{I}\dot{\theta}+\frac{mgl}{I}\theta=\frac{F_0}{I}\cos(\omega_Dt)$$
Using:
$$A=\frac{F_0}{I},\quad\beta=\frac{c}{2\sqrt{mglI}},\quad\omega_0=\sqrt{\frac{mgl}{I}}$$
Gives:
$$\ddot{\theta}+2\beta\omega_0\dot{\theta}+\omega_0^2\theta=A\cos(\omega_Dt)$$
```python
def df_linear_pendula(t, x, zeta, w0, A, wd):
return np.asarray([x[1], -2 * zeta * w0 * x[1] - w0 ** 2 * x[0] + A * np.cos(wd * t)])
def df_linear_pendula_simplectic(t, x, dx, zeta, w0, A, wd):
return -2 * zeta * w0 * dx - w0 ** 2 * x + A * np.cos(wd * t)
```
# Number 1
## Analytical Solution
```python
omega_0, t, theta0, dtheta0 = sp.symbols(r'\omega_0 t \theta_0 \dot{\theta}_0')
theta = sp.Function(r'\theta')
ode = sp.Eq(sp.Derivative(theta(t), t, t) + omega_0**2*theta(t),0)
ics = {theta(0): theta0, theta(t).diff(t).subs(t, 0): dtheta0}
soln = sp.dsolve(ode, theta(t), ics=ics).rewrite(sp.cos).simplify()
theta_func = soln.rhs
omega_func = theta_func.diff(t)
m, g, l, I = sp.symbols(r'm g l I')
V = m * g * l * (1 - sp.cos(theta_func))
T = I * omega_func ** 2 / 2
H = V + T
```
```python
theta_func
```
$\displaystyle \frac{\dot{\theta}_0 \sin{\left(\omega_0 t \right)}}{\omega_0} + \theta_0 \cos{\left(\omega_0 t \right)}$
```python
H
```
$\displaystyle \frac{I \left(\dot{\theta}_0 \cos{\left(\omega_0 t \right)} - \omega_0 \theta_0 \sin{\left(\omega_0 t \right)}\right)^{2}}{2} + g l m \left(1 - \cos{\left(\frac{\dot{\theta}_0 \sin{\left(\omega_0 t \right)}}{\omega_0} + \theta_0 \cos{\left(\omega_0 t \right)} \right)}\right)$
```python
def theta_exact(t, theta0, dtheta0, w0):
t = np.asarray(t)
return dtheta0 * np.sin(w0 * t) / w0 + theta0 * np.cos(w0 * t)
def total_energy_exact(t, theta0, dtheta0, w0, m, g, l, I):
t = np.asarray(t)
return I * (dtheta0 * np.cos(w0 * t) - w0 * theta0 * np.sin(w0 * t))**2 / 2 + m*g*l*(1-np.cos(dtheta0 * np.sin(w0 * t) / w0 + theta0 * np.cos(w0 * t)))
```
## Parameters
```python
m = 1.0
g = 9.81
l = 1.0
I = m*l**2
c = 0.0
F0 = 0.0
A = F0/I
zeta = c/(2*np.sqrt(m*g*l*I)) # Damping ratio
w0 = np.sqrt(m*g*l/I)
wd = 1.0
theta0 = np.pi/2.0
dtheta0 = 0.0
ti = 0
tf = 10
dt = 0.001
t = np.arange(ti, tf, dt)
state0 = np.asarray([theta0, dtheta0])
```
## Calculate Trajectories
```python
## Curried differential equation
df = partial(df_linear_pendula, zeta=zeta, w0=w0, A=A, wd=wd)
df_simplectic = partial(df_linear_pendula_simplectic, zeta=zeta, w0=w0, A=A, wd=wd)
## Solutions
t, pendula_euler = dsolve(df, t, state0, step=euler_step)
t, pendula_rk2 = dsolve(df, t, state0, step=rk2_step)
t, *pendula_euler_cromer = dsolve_simplectic(df_simplectic, t, theta0, dtheta0)
## Energies
pendula_euler_energy = get_total_energy(m, I, l, g, *pendula_euler)
pendula_rk2_energy = get_total_energy(m, I, l, g, *pendula_rk2)
pendula_euler_cromer_energy = get_total_energy(m, I, l, g, *pendula_euler_cromer)
theta_analytic = theta_exact(t, theta0, dtheta0, w0)
total_energy_analytic = total_energy_exact(t, theta0, dtheta0, w0, m, g, l, I)
```
## Plotting
```python
fig, ax = plt.subplots(3, 2, figsize=(16, 9), constrained_layout=True)
ax[0,0].plot(t, pendula_euler[0], label='Euler Method')
ax[0,0].plot(t, pendula_rk2[0], label='RK2 Method')
ax[0,0].plot(t, pendula_euler_cromer[0], label='Euler-Cromer Method')
ax[0,0].set_xlabel(r't [s]')
ax[0,0].set_ylabel(r'$\theta$ [rad]')
ax[0,0].set_title(r'$\theta$ vs Time')
ax[0,0].grid()
ax[0,0].legend()
ax[0,1].plot(t, pendula_euler_energy, label='Euler Method')
ax[0,1].plot(t, pendula_rk2_energy,label='RK2 Method')
ax[0,1].plot(t, pendula_euler_cromer_energy, label='Euler-Cromer Method')
ax[0,1].set_xlabel(r't [s]')
ax[0,1].set_ylabel(r'$E$ [J]')
ax[0,1].set_title('Total Energy vs Time')
ax[0,1].grid()
ax[0,1].legend()
ax[1,0].plot(t, local_error(theta_analytic, pendula_euler[0], t), label='Euler Method')
ax[1,0].plot(t, local_error(theta_analytic, pendula_rk2[0], t), label='RK2 Method')
ax[1,0].plot(t, local_error(theta_analytic, pendula_euler_cromer[0], t), label='Euler-Cromer Method')
ax[1,0].set_xlabel(r't [s]')
ax[1,0].set_ylabel(r'$\theta$ [rad]')
ax[1,0].set_title(r'$\theta$ Local Error')
ax[1,0].grid()
ax[1,0].legend()
ax[1,1].plot(t, local_error(total_energy_analytic, pendula_euler_energy, t), label='Euler Method')
ax[1,1].plot(t, local_error(total_energy_analytic, pendula_rk2_energy, t),label='RK2 Method')
ax[1,1].plot(t, local_error(total_energy_analytic, pendula_euler_cromer_energy, t), label='Euler-Cromer Method')
ax[1,1].set_xlabel(r't [s]')
ax[1,1].set_ylabel(r'$E$ [J]')
ax[1,1].set_title('Total Energy Local Error')
ax[1,1].grid()
ax[1,1].legend()
ax[2,0].plot(t, global_error(theta_analytic, pendula_euler[0]), label='Euler Method')
ax[2,0].plot(t, global_error(theta_analytic, pendula_rk2[0]), label='RK2 Method')
ax[2,0].plot(t, global_error(theta_analytic, pendula_euler_cromer[0]), label='Euler-Cromer Method')
ax[2,0].set_xlabel(r't [s]')
ax[2,0].set_ylabel(r'$\theta$ [rad]')
ax[2,0].set_title(r'$\theta$ Global Error')
ax[2,0].grid()
ax[2,0].legend()
ax[2,1].plot(t, global_error(total_energy_analytic, pendula_euler_energy), label='Euler Method')
ax[2,1].plot(t, global_error(total_energy_analytic, pendula_rk2_energy),label='RK2 Method')
ax[2,1].plot(t, global_error(total_energy_analytic, pendula_euler_cromer_energy), label='Euler-Cromer Method')
ax[2,1].set_xlabel(r't [s]')
ax[2,1].set_ylabel(r'$E$ [J]')
ax[2,1].set_title('Total Energy Global Error')
ax[2,1].grid()
ax[2,1].legend()
plt.show()
```
## Repeat With Different Initial Conditions
```python
theta0 = 0.0
dtheta0 = np.pi/2.0
state0 = np.asarray([theta0, dtheta0])
```
```python
## Curried differential equation
df = partial(df_linear_pendula, zeta=zeta, w0=w0, A=A, wd=wd)
df_simplectic = partial(df_linear_pendula_simplectic, zeta=zeta, w0=w0, A=A, wd=wd)
## Solutions
t, pendula_euler = dsolve(df, t, state0, step=euler_step)
t, pendula_rk2 = dsolve(df, t, state0, step=rk2_step)
t, *pendula_euler_cromer = dsolve_simplectic(df_simplectic, t, theta0, dtheta0)
## Energies
pendula_euler_energy = get_total_energy(m, I, l, g, *pendula_euler)
pendula_rk2_energy = get_total_energy(m, I, l, g, *pendula_rk2)
pendula_euler_cromer_energy = get_total_energy(m, I, l, g, *pendula_euler_cromer)
theta_analytic = theta_exact(t, theta0, dtheta0, w0)
total_energy_analytic = total_energy_exact(t, theta0, dtheta0, w0, m, g, l, I)
```
```python
fig, ax = plt.subplots(3, 2, figsize=(16, 9), constrained_layout=True)
ax[0,0].plot(t, pendula_euler[0], label='Euler Method')
ax[0,0].plot(t, pendula_rk2[0], label='RK2 Method')
ax[0,0].plot(t, pendula_euler_cromer[0], label='Euler-Cromer Method')
ax[0,0].set_xlabel(r't [s]')
ax[0,0].set_ylabel(r'$\theta$ [rad]')
ax[0,0].set_title(r'$\theta$ vs Time')
ax[0,0].grid()
ax[0,0].legend()
ax[0,1].plot(t, pendula_euler_energy, label='Euler Method')
ax[0,1].plot(t, pendula_rk2_energy,label='RK2 Method')
ax[0,1].plot(t, pendula_euler_cromer_energy, label='Euler-Cromer Method')
ax[0,1].set_xlabel(r't [s]')
ax[0,1].set_ylabel(r'$E$ [J]')
ax[0,1].set_title('Total Energy vs Time')
ax[0,1].grid()
ax[0,1].legend()
ax[1,0].plot(t, local_error(theta_analytic, pendula_euler[0], t), label='Euler Method')
ax[1,0].plot(t, local_error(theta_analytic, pendula_rk2[0], t), label='RK2 Method')
ax[1,0].plot(t, local_error(theta_analytic, pendula_euler_cromer[0], t), label='Euler-Cromer Method')
ax[1,0].set_xlabel(r't [s]')
ax[1,0].set_ylabel(r'$\theta$ [rad]')
ax[1,0].set_title('Theta Local Error')
ax[1,0].grid()
ax[1,0].legend()
ax[1,1].plot(t, local_error(total_energy_analytic, pendula_euler_energy, t), label='Euler Method')
ax[1,1].plot(t, local_error(total_energy_analytic, pendula_rk2_energy, t),label='RK2 Method')
ax[1,1].plot(t, local_error(total_energy_analytic, pendula_euler_cromer_energy, t), label='Euler-Cromer Method')
ax[1,1].set_xlabel(r't [s]')
ax[1,1].set_ylabel(r'$E$ [J]')
ax[1,1].set_title('Total Energy Local Error')
ax[1,1].grid()
ax[1,1].legend()
ax[2,0].plot(t, global_error(theta_analytic, pendula_euler[0]), label='Euler Method')
ax[2,0].plot(t, global_error(theta_analytic, pendula_rk2[0]), label='RK2 Method')
ax[2,0].plot(t, global_error(theta_analytic, pendula_euler_cromer[0]), label='Euler-Cromer Method')
ax[2,0].set_xlabel(r't [s]')
ax[2,0].set_ylabel(r'$\theta$ [rad]')
ax[2,0].set_title('Theta Global Error')
ax[2,0].grid()
ax[2,0].legend()
ax[2,1].plot(t, global_error(total_energy_analytic, pendula_euler_energy), label='Euler Method')
ax[2,1].plot(t, global_error(total_energy_analytic, pendula_rk2_energy),label='RK2 Method')
ax[2,1].plot(t, global_error(total_energy_analytic, pendula_euler_cromer_energy), label='Euler-Cromer Method')
ax[2,1].set_xlabel(r't [s]')
ax[2,1].set_ylabel(r'$E$ [J]')
ax[2,1].set_title('Total Energy Global Error')
ax[2,1].grid()
ax[2,1].legend()
plt.show()
```
# Number 2
## Parameters
```python
m = 1.0
g = 9.81
l = 1.0
I = m*l**2
c1 = 2*np.sqrt(m*g*l*I) / 10
c2 = 2*np.sqrt(m*g*l*I)
c3 = 2*np.sqrt(m*g*l*I) * 10
F0 = 1.0
A = F0/I
zeta1 = c1/(2*np.sqrt(m*g*l*I)) # Damping ratio
zeta2 = c2/(2*np.sqrt(m*g*l*I)) # Damping ratio
zeta3 = c3/(2*np.sqrt(m*g*l*I)) # Damping ratio
w0 = np.sqrt(m*g*l/I)
wd = 1.0
ti = 0
tf = 50
dt = 0.001
t = np.arange(ti, tf, dt)
state0 = np.asarray([-np.pi / 2.0, np.pi / 2.0])
```
## Calculate Trajectories
```python
## Curried differential equation
df1_simplectic = partial(df_linear_pendula_simplectic, zeta=zeta1, w0=w0, A=A, wd=wd)
df2_simplectic = partial(df_linear_pendula_simplectic, zeta=zeta2, w0=w0, A=A, wd=wd)
df3_simplectic = partial(df_linear_pendula_simplectic, zeta=zeta3, w0=w0, A=A, wd=wd)
## Solutions
t, *pendula_euler_cromer_1 = dsolve_simplectic(df1_simplectic, t, state0[0], state0[1])
t, *pendula_euler_cromer_2 = dsolve_simplectic(df2_simplectic, t, state0[0], state0[1])
t, *pendula_euler_cromer_3 = dsolve_simplectic(df3_simplectic, t, state0[0], state0[1])
```
## Plotting
```python
fig, ax = plt.subplots(2, 3, figsize=(16, 9), constrained_layout=True)
plt.suptitle(r'Euler-Cromer Method, Initial Conditions: $\psi_0=\left<-\frac{\pi}{2},\frac{\pi}{2}\right>$')
ax[0,0].plot(t, pendula_euler_cromer_1[0])
ax[0,0].set_xlabel(r't [s]')
ax[0,0].set_ylabel(r'$\theta$ [rad]')
ax[0,0].set_title(r'Underdamped')
ax[0,0].grid()
ax[0,1].plot(t, pendula_euler_cromer_2[0])
ax[0,1].set_xlabel(r't [s]')
ax[0,1].set_ylabel(r'$\theta$ [rad]')
ax[0,1].set_title(r'Critically Damped')
ax[0,1].grid()
ax[0,2].plot(t, pendula_euler_cromer_3[0])
ax[0,2].set_xlabel(r't [s]')
ax[0,2].set_ylabel(r'$\theta$ [rad]')
ax[0,2].set_title(r'Overdamped')
ax[0,2].grid()
ax[1,0].plot(*pendula_euler_cromer_1)
ax[1,0].set_xlabel(r'$\theta$ [rad]')
ax[1,0].set_ylabel(r'$\dot{\theta}$ [rad]/[s]')
ax[1,0].grid()
ax[1,1].plot(*pendula_euler_cromer_2)
ax[1,1].set_xlabel(r'$\theta$ [rad]')
ax[1,1].set_ylabel(r'$\dot{\theta}$ [rad]/[s]')
ax[1,1].grid()
ax[1,2].plot(*pendula_euler_cromer_3)
ax[1,2].set_xlabel(r'$\theta$ [rad]')
ax[1,2].set_ylabel(r'$\dot{\theta}$ [rad]/[s]')
ax[1,2].grid()
plt.show()
```
# Number 3
$$I\ddot{\theta}=mgl\sin\left(\theta\right)\rightarrow\ddot{\theta}=\frac{g}{l}\sin\left(\theta\right)\rightarrow\ddot{\theta}=\omega_0^2\sin\left(\theta\right)$$
$$T=4\sqrt{\frac{l}{g}}K\left(\sin\left(\frac{\theta_m}{2}\right)\right)=\frac{4}{\omega_0}K\left(\sin\left(\frac{\theta_m}{2}\right)\right)$$
## Parameters
```python
w0 = np.linspace(0,3*np.pi,500)
ti = 0
tf = 50
dt = 0.001
t = np.arange(ti, tf, dt)
state0 = np.asarray([-np.pi / 2.0, np.pi / 2.0])
```
## Functions
```python
def df(t, x, dx, w0):
return - w0 ** 2 * np.sin(x)
def get_period(t, x):
peak_indices = find_peaks(x.flatten())[0]
times = [t[i] for i in peak_indices]
diffs = np.ediff1d(times)
return np.mean(diffs)
def get_amplitude(x):
peak_indices = find_peaks(x.flatten())[0]
amps = [x[i] for i in peak_indices]
return np.mean(amps)
```
## Part A: Amplitude vs. Period
```python
amps = []
from tqdm import tqdm
for _,w in enumerate(tqdm(w0)):
df_1 = partial(df, w0=w)
t, *soln = dsolve_simplectic(df_1, t, state0[0], state0[1])
theta_m = get_amplitude(soln[0])
amps.append(theta_m)
```
0%| | 0/500 [00:00<?, ?it/s]C:\Users\ethan\anaconda3\lib\site-packages\numpy\core\fromnumeric.py:3334: RuntimeWarning: Mean of empty slice.
return _methods._mean(a, axis=axis, dtype=dtype,
C:\Users\ethan\anaconda3\lib\site-packages\numpy\core\_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
100%|██████████| 500/500 [10:24<00:00, 1.25s/it]
```python
fig = plt.figure(figsize=(16, 9))
ax = plt.axes()
ax.plot(w0**(-1),amps)
ax.set_xlabel('Period [s]')
ax.set_ylabel('Amplitude [m]')
ax.set_title('Effect of Oscillation Period On Amplitude')
ax.grid()
plt.show()
```
## Part B: Period Accuracy
```python
ti = 0
tf = 150
dt = 0.001
t = np.arange(ti, tf, dt)
state0 = np.asarray([np.pi / 2.0, -np.pi/8])
w01 = 0.0885*np.pi
w02 = 0.09*np.pi
w03 = 0.2*np.pi
## Curried differential equation
df_1 = partial(df, w0=w01)
df_2 = partial(df, w0=w02)
df_3 = partial(df, w0=w03)
## Solutions
t, *soln1 = dsolve_simplectic(df_1, t, state0[0], state0[1])
t, *soln2 = dsolve_simplectic(df_2, t, state0[0], state0[1])
t, *soln3 = dsolve_simplectic(df_3, t, state0[0], state0[1])
theta_m1 = get_amplitude(soln1[0])
theta_m2 = get_amplitude(soln2[0])
theta_m3 = get_amplitude(soln3[0])
T_exact1 = (4/w01)*ellipk(np.sin(theta_m1/2))
T_exact2 = (4/w02)*ellipk(np.sin(theta_m2/2))
T_exact3 = (4/w03)*ellipk(np.sin(theta_m3/2))
T_approx1 = get_period(t, soln1[0])
T_approx2 = get_period(t, soln2[0])
T_approx3 = get_period(t, soln3[0])
print(f'Exact Period | Approx. Period | % Error ')
print(f' {T_exact1:0.4f} s | {T_approx1:0.4f} s | {100*(T_approx1-T_exact1)/T_exact1:0.4f}%')
print(f' {T_exact2:0.4f} s | {T_approx2:0.4f} s | {100*(T_approx2-T_exact2)/T_exact2:0.4f}%')
print(f' {T_exact3:0.4f} s | {T_approx3:0.4f} s | {100*(T_approx3-T_exact3)/T_exact3:0.4f}%')
```
Exact Period | Approx. Period | % Error
73.5437 s | 68.5660 s | -6.7683%
53.1268 s | 48.3200 s | -9.0478%
14.0068 s | 12.3960 s | -11.4999%
## Plotting
```python
fig, ax = plt.subplots(2, 3, figsize=(16, 9), constrained_layout=True)
plt.suptitle(r'Nonlinear Pendulum, Euler-Cromer Method, Initial Conditions: $\psi_0=\left<\frac{\pi}{2},-\frac{\pi}{8}\right>$')
ax[0,0].plot(t, soln1[0])
ax[0,0].set_xlabel(r't [s]')
ax[0,0].set_ylabel(r'$\theta$ [rad]')
ax[0,0].set_title(rf'$\omega_0={w01:0.4f}$')
ax[0,0].grid()
ax[0,1].plot(t, soln2[0])
ax[0,1].set_xlabel(r't [s]')
ax[0,1].set_ylabel(r'$\theta$ [rad]')
ax[0,1].set_title(rf'$\omega_0={w02:0.4f}$')
ax[0,1].grid()
ax[0,2].plot(t, soln3[0])
ax[0,2].set_xlabel(r't [s]')
ax[0,2].set_ylabel(r'$\theta$ [rad]')
ax[0,2].set_title(rf'$\omega_0={w03:0.4f}$')
ax[0,2].grid()
ax[1,0].plot(*soln1)
ax[1,0].set_xlabel(r'$\theta$ [rad]')
ax[1,0].set_ylabel(r'$\dot{\theta}$ [rad]/[s]')
ax[1,0].grid()
ax[1,1].plot(*soln2)
ax[1,1].set_xlabel(r'$\theta$ [rad]')
ax[1,1].set_ylabel(r'$\dot{\theta}$ [rad]/[s]')
ax[1,1].grid()
ax[1,2].plot(*soln3)
ax[1,2].set_xlabel(r'$\theta$ [rad]')
ax[1,2].set_ylabel(r'$\dot{\theta}$ [rad]/[s]')
ax[1,2].grid()
plt.show()
```
```python
```
```python
```
|
815a0d3091b871a87090696ac2f4a7189e2be03b
| 747,657 |
ipynb
|
Jupyter Notebook
|
Labs/Lab03/Lab3out.ipynb
|
ethank5149/PurduePHYS580
|
54d5d75737aa0d31ed723dd0e79c98dc01e71ca7
|
[
"MIT"
] | null | null | null |
Labs/Lab03/Lab3out.ipynb
|
ethank5149/PurduePHYS580
|
54d5d75737aa0d31ed723dd0e79c98dc01e71ca7
|
[
"MIT"
] | null | null | null |
Labs/Lab03/Lab3out.ipynb
|
ethank5149/PurduePHYS580
|
54d5d75737aa0d31ed723dd0e79c98dc01e71ca7
|
[
"MIT"
] | null | null | null | 651.269164 | 222,368 | 0.947459 | true | 6,874 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.859664 | 0.863392 | 0.742226 |
__label__eng_Latn
| 0.106986 | 0.562773 |
# Exercise 1.1: Compute 1+1
```python
a = 1 + 1
print(a)
```
2
# Exercise 1.2: Write a Hello World program
```python
print("Hello, World!")
```
Hello, World!
# Exercise 1.3: Derive and compute a formula
```python
from sympy import Symbol
t = Symbol('t') # Symbol: time [s]
t = t / 60 / 60 / 24 / 365.24225 # Unit conversion: t: [s] -> [a]
# Result
result = t.subs('t', 1e9)
print(f"10^9 seconds are {result:.3g} years!")
```
10^9 seconds are 31.7 years!
# Exercise 1.4: Convert from meters to British length units
```python
x = 640 # length [m]
print(f"""
{x} m in British units are:
{x * 100 / 2.54 :.6g} inches or
{x * 100 / 2.54 / 12 :.6g} feet or
{x * 100 / 2.54 / 12 / 3 :.6g} yards or
{x * 100 / 2.54 / 12 / 3 / 1760 :.6g} miles
""")
```
640 m in British units are:
25196.9 inches or
2099.74 feet or
699.913 yards or
0.397678 miles
# Exercise 1.5: Compute the mass of various substances
```python
from sympy import Symbol, solve
# Constants and symbols
V = 1 # volume [dm^3]
rho = Symbol('rho') # density [g / cm^3]
m = Symbol('m') # mass [kg]
# Unit conversions:
V = V * 1000 # V: [dm^3 -> m^3]
rho = rho / 1000 # rho: [g / cm^3 -> kg / m^3]
e = m/V - rho
m = solve(e, m)[0]
print(f"One liter of air has a mass of {m.subs('rho', 0.0012):.3g} kg")
print(f"One liter of gasoline has a mass of {m.subs('rho', 0.67):.3g} kg")
print("...")
print(f"One liter of protons has a mass of {m.subs('rho', 2.3e14):.3g} kg")
```
One liter of air has a mass of 0.00120 kg
One liter of gasoline has a mass of 0.670 kg
...
One liter of protons has a mass of 2.30e+14 kg
# Exercise 1.6: Compute the growth of money in a bank
```python
# Constants
A = 1000 # initial amount [€]
p = 5.0 # interest rate [% / a]
n = 3 # years
result = A * (1 + p/100)**n
print(f"The initial {A}€ will have turned into {result:.6g}€ after {n} years.")
```
The initial 1000€ will have turned into 1157.63€ after 3 years.
# Exercise 1.7: Find error(s) in a program
```python
# Faulty Program:
"""
x=1; print("sin(%g)=%g" % (x, sin(x)))
"""
# Correct Program:
from math import sin
x=1; print("sin(%g)=%g" % (x, sin(x)))
```
sin(1)=0.841471
# Exercise 1.8: Type in program text
```python
from math import pi
h = 5.0 # height
b = 2.0 # base
r = 1.5 # radius
area_parallelogram = h * b
print("The area of the parallelogram is %.3f" % area_parallelogram)
area_square = b ** 2
print("The area of the square is %g" % area_square)
area_circle = pi * r**2
print("The area of the circle is %.3f" % area_circle)
volume_cone = 1.0 / 3 * pi * r**2 * h
print("The volume of the cone is %.3f" % volume_cone)
```
The area of the parallelogram is 10.000
The area of the square is 4
The area of the circle is 7.069
The volume of the cone is 11.781
# Exercise 1.9: Type in programs and debug them
```python
from math import sin, cos, pi
x = pi/4
val = sin(x)**2 + cos(x)**2
print(val)
```
1.0
```python
v0 = 3
t=1
a = 2
s = v0*t + (0.5 * a * t**2)
print(s)
```
4.0
```python
a = 3.3; b = 5.3
a2 = a**2
b2 = b**2
eq1_sum = a2 + 2*a*b + b2
eq2_sum = a2 - 2*a*b + b2
eq1_pow = (a + b)**2
eq2_pow = (a - b)**2
print("First equation: %g = %g" % (eq1_sum, eq1_pow))
print("Second equation: %g = %g" % (eq2_pow, eq2_pow))
```
First equation: 73.96 = 73.96
Second equation: 4 = 4
# Exercise 1.10: Evaluate a Gaussian function
```python
from math import sqrt, pi, exp
m = 0 # mean
s = 2 # standard deviation
x = 1 # independent variable
f = 1 / sqrt(2 * pi) / s * exp(-1 / 2 * ((x - m) / s)**2)
print(f)
```
0.17603266338214976
# Exercise 1.11: Compute the air resistance on a football
```python
from math import pi
# Constants
CD = 0.4 # drag coefficient [1]
rho = 1.2 # air density [kg / m^3]
a = 11e-2 # radius of the ball [m]
m = 0.43 # mass of the ball [kg]
g = 9.81 # acceleration of gravity [m / s^2]
V1 = 120 # (1) velocity of the ball [km / h]
V2 = 30 # (2) velocity of the ball [km / h]
# Unit conversions
V1_ = V1 * 1000 / 60 / 60 # V1: [km / h] -> [m / s]
V2_ = V2 * 1000 / 60 / 60 # V2: [km / h] -> [m / s]
# Calculations
A = pi * a**2 # area of the ball [m^2]
Fg = m * g # force of gravity [N]
Fd1 = 1 / 2 * CD * rho * A * V1_**2 # drag force on the ball (with V1)
Fd2 = 1 / 2 * CD * rho * A * V2_**2 # drag force on the ball (with V2)
print(f"""
Force of gravity on the ball: {Fg:.1f} N
Drag Force on the ball (V = {V1} km / h): {Fd1:.1f} N
Drag Force on the ball (V = {V2} km / h): {Fd2:.1f} N
""")
```
Force of gravity on the ball: 4.2 N
Drag Force on the ball (V = 120 km / h): 10.1 N
Drag Force on the ball (V = 30 km / h): 0.6 N
# Exercise 1.12: How to cook the perfect egg
```python
from sympy import Symbol, log, lambdify
from math import pi
from numpy import *
# Constants and symbols
Ty = 70 # maximum temperature of the yolk [°C]
Tw = 100 # temperature of the boiling water [°C]
M = 67e-3 # mass of the egg [kg]
rho = 1.038e3 # density of the egg [kg / m^3]
c = 3.7e3 # specific heat capacity of the egg [J / kg / K]
K = 0.54 # thermal conductivity [W / K]
T0 = Symbol('T0') # initial temperature of the egg [°C] (Symbol)
T01 = 4 # initial temperature of the egg when taken from the fridge [°C]
T02 = 20 # initial temperature of the egg at room temperature [°C]
t = M**(2 / 3) * c * rho**(1 / 3) / K / pi**2 / (4 * pi / 3)**(2 / 3) \
* log(0.76 * (T0 - Tw) / (Ty - Tw)) # time to boil the egg (t(T0))
t = lambdify((T0), t, dummify = False)
t1 = t(T0 = T01) # time to boil the egg taken from the fridge
t2 = t(T0 = T02) # time to boil the egg at room temperature
print(f"""
Time to boil the egg from the fridge: {t1:.3g} s
Time to boil the egg at room temperature: {t2:.3g} s
""")
```
Time to boil the egg from the fridge: 397 s
Time to boil the egg at room temperature: 315 s
# Exercise 1.13: Derive the trajectory of a ball
Prerequisites:
$$
\begin{align*}
a_x &= \frac{d^2x}{dt^2}\\
a_y &= \frac{d^2y}{dt^2}\\
\\
\frac{d}{dt}x(0) &= v_0 \cos\theta\\
\frac{d}{dt}y(0) &= v_0 \sin\theta\\
x(0) &= 0\\
y(0) &= y_0\\~\\
\end{align*}
$$
Calculations for $x(t)$ and $y(t)$:
$$
\begin{align*}
\int\int a_x &= \frac{1}{2} a_x t^2 + v_0 \cos\theta \dot~ t + x_0\\
&= v_0 \cos\theta \dot~ t\\
&= x(t)\\
\int\int a_y &= \frac{1}{2} a_y t^2 + v_0 \sin\theta \dot~ t + y_0\\
&= \frac{-1}{2} g t^2 + v_0 \sin\theta \dot~ t + y_0\\
&= y(t)\\~\\
\end{align*}
$$
$\theta = \frac{\pi}{2}$:
$$
\begin{align*}
x(t) &= v_0 \cos\theta \dot~ t\\
&= 0\\
y(t) &= \frac{-1}{2} g t^2 + v_0 \sin\theta \dot~ t + y_0\\
&= \frac{-1}{2} g t^2 + v_0 t + y_0\\~\\
\end{align*}
$$
Elimination of $t$:
$$
\begin{align*}
&~ &x(t) &= v_0 \cos\theta \dot~ t\\
&\Leftrightarrow &t(x) &= \frac{x}{v_0 \cos\theta}\\
&\Rightarrow &y(x) &= \frac{-1}{2} g \frac{x^2}{v_0^2 \cos^2\theta} + v_0 \sin\theta \frac{x}{v_0 \cos\theta} + y_0\\
&&&= x \tan{\theta} - \frac{1}{2 v_0^2} \frac{g x^2}{\cos^2{\theta}} + y_0
\end{align*}
$$
# Exercise 1.14: Find errors in the coding of formulas
Correct formula:
$$
F = \frac{9}{5}C + 32
$$
```python
# Adapted for Python 3
C = 21; F = 9/5*C + 32; print(F) # works
C = 21.0; F = (9/5)*C + 32; print(F) # works
C = 21.0; F = 9*C/5 + 32; print(F) # works
C = 21.0; F = 9.*(C/5.0) + 32; print(F) # works
C = 21.0; F = 9.0*C/5.0 + 32; print(F) # works
C = 21; F = 9*C/5 + 32; print(F) # works
C = 21; F = (1/5)*9*C + 32; print(F) # works
C = 21; F = (1./5)*9*C + 32; print(F) # works
# yeah, Pyhon 3!
# Note for Python 2: watch out for integer divisions!
```
69.80000000000001
69.80000000000001
69.8
69.80000000000001
69.8
69.8
69.80000000000001
69.80000000000001
# Exercise 1.15: Explain why a program does not work
```python
# Code (illegal):
"""
C = A + B
A = 3
B = 2
print(C)
"""
# Code (correct):
A = 3
B = 2
C = A + B
print(C)
# Objects can only be used after they have been defined!
```
5
# Exercise 1.16: Find errors in Python statements
```python
# 1a = 2
# Variable names cannot start with a number.
a1 = 2
# a1 = b
# b is not defined.
a1 = 3
x = 2
# y = X + 4
# python differentiates between upper- and lowercase
y = x + 4
# from Math import tan
# the module is named math (lowercase)
from math import tan
# print(tan(pi))
# pi also has to be imported.
from math import pi
print(tan(pi))
# pi = "3.14159’
# strings can be defined using single or double quotes, but not both for the same string.
pi = "3.14159"
# print(tan(pi))
# pi is now a string. firs, it has to be converted to float.
print(tan(float(pi)))
c = 4**3**2**3
# _ = ((c-78564)/c + 32))
# a number cannot be assigned anything. it only can be assigned to a variable. also, there is an extra parenthesis
d = ((c-78564)/c + 32)
# discount = 12%
# the percent sign is not defined as such in python.
discount = 12e-2
# AMOUNT = 120.-
# same goes for that .- thingy.
AMOUNT = 120.0
# amount = 120$
# again for the dollar sign
amount = 120 # [$]
# address = hpl@simula.no
# that is not a valid string.
address = "hpl@simula.no"
# and = duck
# much wrong here. a variable cannot be named "and", because that is a reserved name in python.
# also, duck is not defined / not a valid string.
and_ = "duck"
# class = ’INF1100, gr 2"
# wow. ok, class is again reserved, and you still can't use different quotes to define one string
class_ = "INF1100, gr 2"
continue_ = x > 0
rev = fox = True
Norwegian = ["a human language"]
true = fox is rev in Norwegian
```
-1.2246467991473532e-16
-2.653589793362073e-06
# Exercise 1.17: Find errors in the coding of a formula
```python
# Code (illegal)
"""
a = 2; b = 1; c = 2
from math import sqrt
q = b*b - 4*a*c
q_sr = sqrt(q)
x1 = (-b + q_sr)/2*a
x2 = (-b - q_sr)/2*a
print(x1, x2)
"""
# Code (correct)
a = 2; b = 1; c = 2
from cmath import sqrt
q = b*b - 4*a*c
q_sr = sqrt(q)
x1 = (-b + q_sr)/2*a
x2 = (-b - q_sr)/2*a
print(x1, x2)
```
(-1+3.872983346207417j) (-1-3.872983346207417j)
# Exercise 1.18: Find errors in a program
```python
# Code (wrong)
"""
from math import pi, tan
tan = tan(pi/4)
tan2 = tan(pi/3)
print(tan, tan2)
"""
# Code (correct)
from math import pi, tan
tan1 = tan(pi/4)
tan2 = tan(pi/3)
print(tan1, tan2)
```
0.9999999999999999 1.7320508075688767
|
874b98b064f30bb76965d1ff309ee7638a97eac6
| 19,551 |
ipynb
|
Jupyter Notebook
|
Exercises 1.ipynb
|
onnoeberhard/scipro-primer-notebooks
|
e9cf1069d62f8c50549d9912403c640f08a99392
|
[
"MIT"
] | 16 |
2019-04-18T13:35:42.000Z
|
2021-09-05T21:01:06.000Z
|
Exercises 1.ipynb
|
onnoeberhard/scipro-primer-notebooks
|
e9cf1069d62f8c50549d9912403c640f08a99392
|
[
"MIT"
] | 1 |
2021-02-15T16:26:00.000Z
|
2021-02-15T16:26:00.000Z
|
Exercises 1.ipynb
|
onnoeberhard/scipro-primer-notebooks
|
e9cf1069d62f8c50549d9912403c640f08a99392
|
[
"MIT"
] | 17 |
2018-09-30T07:35:00.000Z
|
2021-08-28T19:15:09.000Z
| 23.871795 | 138 | 0.44944 | true | 4,090 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.90599 | 0.76908 | 0.696779 |
__label__eng_Latn
| 0.881366 | 0.457182 |
# Listen
In der Praxis sind Berechnungen häufig nicht nur für einen einzelnen Wert durchzuführen, sondern für mehrere gleichartige Werte. Als Beispiel kann eine Wohnung dienen, bei der der Abluftvolumenstrom für jeden einzelnen Abluftraum berechnet werden muss und verschiedene weitere Berechnungen davon ebenfalls betroffen sind.
Nehmen wir an, ein Bad ($45\frac{m^3}{h}$), zwei WCs je ($25\frac{m^3}{h}$), eine Abstellkammer ($25\frac{m^3}{h}$) und ein Saunaraum ($100\frac{m^3}{h}$) sollen mit Abluft versorgt werden.
Dann wären die erforderlichen Berechnungen für jeden einzelnen Raum durchzuführen. Dazu werden Listen benutzt:
`# Liste der Räume` <br>
`raum = ['Bad','WC_1','WC_2','Abstellkammer', 'Saunaraum']`
`# Liste der Abluftvolumenströme je Raum` <br>
`dV_ab = [45,25,25,25,100]`
Die Berechnungen können nun mit der Liste durchgeführt werden:
`dV_ges_ab = sum(dV_ab)`
# Beispiel
Berechnen Sie für die oben gegebenen Werte den Gesamtvolumenstrom `dV_ges_ab` und ermitteln Sie, wieviel Prozent der Abluft in jeden Raum geführt wird.
# Lösung
Zunächst werden die Listen angelegt.
Dann wird die Summe aller Listeneinträge gebildet und anschließend angezeigt:
```python
raum = ['Bad','WC_1','WC_2','Abstellkammer', 'Saunaraum']
dV_ab = [45,25,25,25,100]
dV_ges_ab = sum(dV_ab)
dV_ges_ab
```
220
Für die Berechnung der Prozentzahlen muss eine neue Liste gebildet werden. Dazu wird oft eine `for`-Schleife verwendet.
Der Lösungsweg sieht ist prinzipiell:
```
# neue Liste initialisieren
neue_liste = []
# Schleife für die Berechnung der neue Werte
for wert in alte_liste:
neuer_wert = ...
neue_liste.append(neuer_wert)
# Ergebnis anzeigen
neue_liste
```
```python
# leere Liste anlegen (initialisieren)
dV_prozent = []
# Schleife über alle Elemente von dV_ab
for dV in dV_ab:
# Berechnung der Prozentzahl für dies dV
prozent = dV/dV_ges_ab * 100
# Erweiterung der Liste dV_prozent
# um den berechneten Wert
dV_prozent.append(prozent)
# Ergebnis anzeigen
dV_prozent
```
[20.454545454545457,
11.363636363636363,
11.363636363636363,
11.363636363636363,
45.45454545454545]
Häufig ist die Berechnungsvorschrift sehr einfach. Dann ist es einfacher, für die gleiche Berechnung eine *List Comprehension* zu verwenden:
`neue_liste = [Berechnungs_vorschrift(wert) for wert in alte_liste]`
Solche List Comprehensions sind oft leicht zu lesen. Sie können meist deutlich schneller berechnet werden, als eine `for`-Schleife.
```python
dV_prozent = [dV/dV_ges_ab * 100 for dV in dV_ab]
dV_prozent # prozentuale Verteilung
```
[20.454545454545457,
11.363636363636363,
11.363636363636363,
11.363636363636363,
45.45454545454545]
Die Zahlenwerte der Liste `dV_prozent` müssen sich zu 100% aufsummieren:
```python
sum(dV_prozent) # Gesamter Abluftvolumenstrom in Prozent
```
100.0
Zur Anzeige wird oft der sogenannte Reißverschluss (englisch zipper) verwendet. Dazu dient die Funktion
`zip(liste_1, liste_2, ...)`
```python
list(zip(raum,dV_prozent)) # prozentuale Verteilung der Abluft auf die Räume
```
[('Bad', 20.454545454545457),
('WC_1', 11.363636363636363),
('WC_2', 11.363636363636363),
('Abstellkammer', 11.363636363636363),
('Saunaraum', 45.45454545454545)]
# Aufgabe
Berechnen Sie die Liste der erforderlichen Anschlussdurchmesser `d_erf # in mm` nach der Formel aus den gegebenen Abluftvolumenströmen in der Liste `dV_ab # in m**3/h`.
$$
d = \sqrt{\dfrac{4\,\dot V}{\pi\,v}}
$$
für jeden Abluftraum. Gehen Sie dabei von einer zulässigen Strömungsgeschwindigkeit `v=2 # m/s` in den Lüftungsrohren aus.
Achtung: Vergessen Sie die Einheitenumrechnung von $1h = 3600\,s$ und $1m=1000\,mm$ nicht!
```python
import math
v = 2 # m/s
```
```python
# Ihre Lösung beginnt hier
d_erf = [math.sqrt(4*dV/(3600*math.pi*v))*1000 for dV in dV_ab]
d_erf # erforderliche Durchmesser in mm
```
[89.20620580763855,
66.49038006690546,
66.49038006690546,
66.49038006690546,
132.98076013381092]
# Aufgabe
Sie können selbstgeschriebene Funktionen verwenden, um den Aufbau einer List Comprehension einfach zu halten.
Im folgenden habe ich im Modul `utilities.py` eine Funktion `ermittle_normdurchmesser(d)` angegeben, die zu einem vorgegebenen Durchmesser `d` in mm den nächstgrößeren Normdurchmesser ermittelt. Dabei ist die Liste der Normdurchmesser von einem Hersteller übernommen worden, siehe
https://www.msl-bauartikel.de/fileadmin/user_upload/produktbilder/Rohrsysteme/Lueftungsrohr/System_Lueftungsrohrde_eng_.pdf
Damit ergibt sich z.B. für `d=87 # mm`
`ermittle_normdurchmesser(d)` <br>
`90`
Benutzen Sie die Durchmesser, die Sie in der vorigen Aufgabe berechnet haben, um die zugehörigen Normdurchmesser zu ermitteln. Verwenden Sie für die Berechnung eine List Comprehension.
```python
from utilities import ermittle_normdurchmesser
# Anwendungsbeispiel
ermittle_normdurchmesser(87)
```
90
```python
# Ihre Lösung beginnt hier.
d_gew = [ermittle_normdurchmesser(d) for d in d_erf]
list(zip(raum,d_erf,d_gew))
```
[('Bad', 89.20620580763855, 90),
('WC_1', 66.49038006690546, 80),
('WC_2', 66.49038006690546, 80),
('Abstellkammer', 66.49038006690546, 80),
('Saunaraum', 132.98076013381092, 140)]
# Beispiel
Die Liste der Normdurchmesser und der Wandstärken von Lüftungsrohren läßt sich folgendermaßen angeben:
```python
normdurchmesser = [
80,90,100,112,125,140,
150,160,180,200,224,250,
280,300,315,355,400,450,
500,560,600,630,710,800,
900,1000,1120,1250,1400,
1600,1800,2000
]
# Um hier nicht 32 Werte von Hand eintragen zu müssen:
wandstaerken=2*[0.4]+12*[0.6]+7*[0.8]+4*[1.0]+3*[1.2]+4*[1.5]
```
```python
# Das Ergebnis können Sie sich anzeigen lassen,
# wenn Sie vor der nächsten Zeile das Kommentarzeichen entfernen
list(zip(normdurchmesser,wandstaerken))
```
[(80, 0.4),
(90, 0.4),
(100, 0.6),
(112, 0.6),
(125, 0.6),
(140, 0.6),
(150, 0.6),
(160, 0.6),
(180, 0.6),
(200, 0.6),
(224, 0.6),
(250, 0.6),
(280, 0.6),
(300, 0.6),
(315, 0.8),
(355, 0.8),
(400, 0.8),
(450, 0.8),
(500, 0.8),
(560, 0.8),
(600, 0.8),
(630, 1.0),
(710, 1.0),
(800, 1.0),
(900, 1.0),
(1000, 1.2),
(1120, 1.2),
(1250, 1.2),
(1400, 1.5),
(1600, 1.5),
(1800, 1.5),
(2000, 1.5)]
Berechnen Sie das Gewicht je Meter Lüftungsrohr aus Wickelfalzrohr. Die Dichte von Stahl ist
$\varrho=7.85\,\frac{kg}{dm^3}$
Die Masse berechnet sich nach der Formel
$$
m = \varrho\,V
$$
mit dem Volumen
\begin{align}
V &= \dfrac{\pi \left(d_a^2 - d_i^2\right)}{4}\,\ell \\[2ex]
&= \pi\,d_m\,s\,\ell
\end{align}
Verwenden Sie eine `for`-Schleife für die Berechnung.
```python
# Ihre Lösung beginnt hier
l = 1e1 # Länge l in dm
rho = 7.85 # kg/dm**3
m_je_meter = []
for d_a,s in zip(normdurchmesser,wandstaerken):
d_a = d_a*1e-2 # Umrechnung von mm in dm
s = s*1e-2 #
d_i = d_a-2*s
m = rho*math.pi*(d_a**2 - d_i**2)/4*l # Masse in kg
m_je_meter.append(m)
# Ergebnisse
list(zip(normdurchmesser,wandstaerken,m_je_meter))
```
[(80, 0.4, 0.7852222342088522),
(90, 0.4, 0.8838682435315676),
(100, 0.6, 1.4708119990017468),
(112, 0.6, 1.6483748157826348),
(125, 0.6, 1.840734533961952),
(140, 0.6, 2.062688054938074),
(150, 0.6, 2.2106570689221368),
(160, 0.6, 2.3586260829062273),
(180, 0.6, 2.6545641108743805),
(200, 0.6, 2.950502138842534),
(224, 0.6, 3.305627772404323),
(250, 0.6, 3.6903472087629305),
(280, 0.6, 4.134254250715174),
(300, 0.6, 4.4301922786833545),
(315, 0.8, 6.19891522583965),
(355, 0.8, 6.988083300421538),
(400, 0.8, 7.875897384325915),
(450, 0.8, 8.862357477553221),
(500, 0.8, 9.848817570780307),
(560, 0.8, 11.03256968265303),
(600, 0.8, 11.821737757234699),
(630, 1.0, 15.51208496599741),
(710, 1.0, 17.48500515245202),
(800, 1.0, 19.704540362212857),
(900, 1.0, 22.170690595280355),
(1000, 1.2, 29.558290233458454),
(1120, 1.2, 33.10954656907684),
(1250, 1.2, 36.95674093266182),
(1400, 1.5, 51.73366651418207),
(1600, 1.5, 59.13211721338631),
(1800, 1.5, 66.53056791259405),
(2000, 1.5, 73.92901861179831)]
|
445aa0cbacd3503aa924236868f914cd50f1d1de
| 14,827 |
ipynb
|
Jupyter Notebook
|
src/04-Listen_lsg.ipynb
|
w-meiners/anb-first-steps
|
6cb3583f77ae853922acd86fa9e48e9cf5188596
|
[
"MIT"
] | null | null | null |
src/04-Listen_lsg.ipynb
|
w-meiners/anb-first-steps
|
6cb3583f77ae853922acd86fa9e48e9cf5188596
|
[
"MIT"
] | null | null | null |
src/04-Listen_lsg.ipynb
|
w-meiners/anb-first-steps
|
6cb3583f77ae853922acd86fa9e48e9cf5188596
|
[
"MIT"
] | null | null | null | 26.242478 | 327 | 0.527079 | true | 3,256 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.865224 | 0.731059 | 0.632529 |
__label__deu_Latn
| 0.925284 | 0.307909 |
<a href="https://colab.research.google.com/github/alanexplorer/Robotic-Algorithm-Tutorial/blob/master/kalmanFIlter.ipynb" target="_parent"></a>
# Kalman Filter
## Introduction
Kalman filtering is an algorithm that provides estimates of some unknown variables given the measurements observed over time. Kalman filters have been demonstrating its usefulness in various applications. Kalman filters have relatively simple form and require small computational power
## Problem definition
Kalman filters are used to estimate states based on linear dynamical systems in
state space format. The Kalman filter represents beliefs by the moments parameterization: At time $t$, the belief is represented by the the mean $\mu_t$ and the covariance $\Sigma_t$. The process model defines the evolution of the state from time $t - 1$ to time $t$. The state transition probability $p(x_t | u_t, x_{t−1})$ must be a linear function in its arguments with added Gaussian noise. This is expressed by the following equation:
$x_t = A_tx_{t−1} + B_tu_t + \varepsilon_t$
Here $x_t$ and $x_{t−1}$ are state vectors, and ut is the control vector at time t.
In our notation, both of these vectors are vertical vectors. They are of the
form
$x_{t}^{n} = \begin{pmatrix} \\ x_{t}^{1}\\ x_{t}^{2}\\ \vdots \\ x_{t}^{n}\\ \end{pmatrix}$ and $u_{t}^{m} = \begin{pmatrix} \\ u_{t}^{1}\\ u_{t}^{2}\\ \vdots \\ u_{t}^{m}\\ \end{pmatrix}$
where $A_t$ is the state transition matrix applied to the previous state vector $x_{t−1}$, $A_t$ is a square matrix of size $n \times n$, where $n$ is
the dimension of the state vector $x_t$. $B_t$ is the control-input matrix applied to the control vector $u_{k}$, $B_t$ have a size $n \times m$, with $m$ being the dimension of the control vector $u_t$. and $\varepsilon_t$ is the process noise vector that is assumed to be zero-mean Gaussian with the covariance $R_t$, $\varepsilon_t \sim 𝒩(0,R)$.
The measurement probability $p(z_t | x_t)$ must also be linear in its arguments, with added Gaussian noise. The process model is paired with the measurement model that describes the relationship between the state and the measurement at the current time step t as:
$z_t = C_tx_t + \delta_t$
where $z_t$ is the measurement vector, $C_t$ is the measurement matrix, $C_t$ is a matrix of size $k \times n$, where $k$ is the dimension of the measurement vector $z_t$. The $\delta_t$ is the measurement noise vector that is assumed to be zero-mean Gaussian with the covariance $Q_t$ , $\delta_t \sim 𝒩(0,Q)$.
The role of the Kalman filter is to provide estimate of $x_t$ at time $t$, given the initial estimate of $x_0$ , the series of measurement, $z_1,z_2,…,z_t$ , and the information of the system described by $A_t$ , $B_t$ , $C_t$ , $Q$, and $R$. Note that subscripts to these matrices are omitted here by assuming that they are invariant over time as in most applications. Although the covariance matrices are supposed to reflect the statistics of the noises, the true statistics of the noises is not known or not Gaussian in many practical applications. Therefore, Q and R are usually used as tuning parameters that the user can adjust to get desired performance.
## Pseudocode
$1: Algorithm Kalmanfilter(μt−1, Σt−1, ut, zt):$
$2: \bar{\mu}_t = A_t \mu_{t−1} + B_t u_t$
$3: \bar{\Sigma}_t = A_t \Sigma_{t−1} A^T_t + R_t$
$4: K_t = \bar{\Sigma}_t C^T_t (C_t \Sigma_t C^T_t + Q_t)^{−1}$
$5: \mu_t = \bar{\mu}_t + K_t(z_t − C_t \bar{\mu}_t)$
$6: \Sigma_t = (I − K_t C_t)\bar{\Sigma}_t$
$7: return (\mu_t, \Sigma_t)$
## Summary
### Prediction:
| Description | Representation in the pseudocodigo|
|----------------------------|-------------------------------------------------------|
| Predicted state estimate | $\bar{\mu} _t = A_t \mu_ {t−1} + B_t u_t$ |
| Predicted error covariance | $\bar{\Sigma} _t = A_t \Sigma_ {t−1} A^T_t + R_t$ |
### Update:
| Description | Representation in the pseudocodigo |
|--------------------------|----------------------------------------------------------------|
| Measurement residual | $(z_t − C_t \bar{\mu} _t)$ |
| Kalman gain | $K_t = \bar{\Sigma} _t C^T_t (C_t \Sigma_t C^T_t + Q_t)^{−1} $ |
| Updated state estimate | $\mu_t = \bar{\mu} _t + K_t(z_t − C_t \bar{\mu} _t)$ |
| Updated error covariance | $\Sigma_t = (I − K_t C_t)\bar{\Sigma} _t$ |
## Kalman Filter for Sensor Fusion
## The Kalman Filter 1-D
Kalman filters are discrete systems that allows us to define a dependent variable by an independent variable, where by we will solve for the independent variable so that when we are given measurements (the dependent variable),we can infer an estimate of the independent variable assuming that noise exists from our input measurement and noise also exists in how we’ve modeled the world with our math equations because of inevitably unaccounted for factors in the non-sterile world.Input variables become more valuable when modeled as a system of equations,ora matrix, in order to make it possible to determine the relationships between those values. Every variables in every dimension will contain noise, and therefore the introduction of related inputs will allow weighted averaging to take place based on the predicted differential at the next step, the noise unaccounted for in the system,and the noise introduced by the sensor inputs.
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sb
from scipy import stats
import time
from numpy.linalg import inv
import scipy.stats as scs
```
```python
%matplotlib inline
fw = 10 # figure width
```
#### Plot the Distributions in this range:
```python
x = np.linspace(-100,100,1000)
```
```python
mean0 = 0.0 # e.g. meters or miles
var0 = 20.0
```
```python
plt.figure(figsize=(fw,5))
plt.plot(x, scs.norm.pdf(x, mean0, var0), 'b', label='Normal Distribution')
plt.ylim(0, 0.1);
plt.legend(loc='best');
plt.xlabel('Position');
```
## Now we have something, which estimates the moved distance
#### The Mean is meters, calculated from velocity*dt or step counter or wheel encoder ...
#### VarMove is the Estimated or determined with static measurements
```python
meanMove = 25.0
varMove = 10.0
```
```python
plt.figure(figsize=(fw,5))
plt.plot(x,scs.norm.pdf(x, meanMove, varMove), 'r', label='Normal Distribution')
plt.ylim(0, 0.1);
plt.legend(loc='best');
plt.xlabel('Distance moved');
```
Both Distributions have to be merged together
$\mu_\text{new}=\mu_\text{0}+\mu_\text{move}$ is the new mean and $\sigma^2_\text{new}=\sigma^2_\text{0}+\sigma^2_\text{move}$ is the new variance.
```python
def predict(var, mean, varMove, meanMove):
new_var = var + varMove
new_mean= mean+ meanMove
return new_var, new_mean
```
```python
new_var, new_mean = predict(var0, mean0, varMove, meanMove)
```
```python
plt.figure(figsize=(fw,5))
plt.plot(x,scs.norm.pdf(x, mean0, var0), 'b', label='Beginning Normal Distribution')
plt.plot(x,scs.norm.pdf(x, meanMove, varMove), 'r', label='Movement Normal Distribution')
plt.plot(x,scs.norm.pdf(x, new_mean, new_var), 'g', label='Resulting Normal Distribution')
plt.ylim(0, 0.1);
plt.legend(loc='best');
plt.title('Normal Distributions of 1st Kalman Filter Prediction Step');
plt.savefig('Kalman-Filter-1D-Step.png', dpi=150)
```
### What you see: The resulting distribution is flat > uncertain.
The more often you run the predict step, the flatter the distribution get
First Sensor Measurement (Position) is coming in...
#### Sensor Defaults for Position Measurements
(Estimated or determined with static measurements)
```python
meanSensor = 25.0
varSensor = 12.0
```
```python
plt.figure(figsize=(fw,5))
plt.plot(x,scs.norm.pdf(x, meanSensor, varSensor), 'c')
plt.ylim(0, 0.1);
```
Now both Distributions have to be merged together
$\sigma^2_\text{new}=\cfrac{1}{\cfrac{1}{\sigma^2_\text{old}}+\cfrac{1}{\sigma^2_\text{Sensor}}}$ is the new variance and the new mean value is $\mu_\text{new}=\cfrac{\sigma^2_\text{Sensor} \cdot \mu_\text{old} + \sigma^2_\text{old} \cdot \mu_\text{Sensor}}{\sigma^2_\text{old}+\sigma^2_\text{Sensor}}$
```python
def correct(var, mean, varSensor, meanSensor):
new_mean=(varSensor*mean + var*meanSensor) / (var+varSensor)
new_var = 1/(1/var +1/varSensor)
return new_var, new_mean
```
```python
var, mean = correct(new_var, new_mean, varSensor, meanSensor)
```
```python
plt.figure(figsize=(fw,5))
plt.plot(x,scs.norm.pdf(x, new_mean, new_var), 'g', label='Beginning (after Predict)')
plt.plot(x,scs.norm.pdf(x, meanSensor, varSensor), 'c', label='Position Sensor Normal Distribution')
plt.plot(x,scs.norm.pdf(x, mean, var), 'm', label='New Position Normal Distribution')
plt.ylim(0, 0.1);
plt.legend(loc='best');
plt.title('Normal Distributions of 1st Kalman Filter Update Step');
```
###### This is called the Measurement or Correction step! The Filter get's more serious about the actual state.
#### Let's put everything together: The 1D Kalman Filter
"Kalman-Filter: Predicting the Future since 1960"
Let's say, we have some measurements for position and for distance traveled. Both have to be fused with the 1D-Kalman Filter.
```python
positions = (10, 20, 30, 40, 50)+np.random.randn(5)
distances = (10, 10, 10, 10, 10)+np.random.randn(5)
```
```python
positions
```
array([10.3266042 , 20.89387408, 29.3947916 , 39.875685 , 48.93473369])
```python
distances
```
array([ 9.07429798, 10.3234591 , 11.58789189, 9.57728332, 11.52106393])
```python
for m in range(len(positions)):
# Predict
var, mean = predict(var, mean, varMove, distances[m])
#print('mean: %.2f\tvar:%.2f' % (mean, var))
plt.plot(x,scs.norm.pdf(x, mean, var), label='%i. step (Prediction)' % (m+1))
# Correct
var, mean = correct(var, mean, varSensor, positions[m])
print('After correction: mean= %.2f\tvar= %.2f' % (mean, var))
plt.plot(x,scs.norm.pdf(x, mean, var), label='%i. step (Correction)' % (m+1))
plt.ylim(0, 0.1);
plt.xlim(-20, 120)
plt.legend();
```
The sensors are represented as normal distributions with their parameters ($\mu$ and $\sigma^2$) and are calculated together with addition or convolution. The prediction decreases the certainty about the state, the correction increases the certainty.
Prediction: Certainty $\downarrow$
Correction: Certainty $\uparrow$
## Kalman Filter - Multi-Dimensional Measurement
### Kalman Filter Implementation for Constant Velocity Model (CV) in Python
Situation covered: You drive with your car in a tunnel and the GPS signal is lost. Now the car has to determine, where it is in the tunnel. The only information it has, is the velocity in driving direction. The x and y component of the velocity ($\dot x$ and $\dot y$) can be calculated from the absolute velocity (revolutions of the wheels) and the heading of the vehicle (yaw rate sensor).
First, we have to initialize the matrices and vectors. Setting up the math.
## State Vector
Constant Velocity Model for Ego Motion
$$x_t= \left[ \matrix{ x \\ y \\ \dot x \\ \dot y} \right] = \matrix{ \text{Position x} \\ \text{Position y} \\ \text{Velocity in x} \\ \text{Velocity in y}}$$
Formal Definition (Motion of Law):
$$x_{t} = \textbf{$A_t$} \cdot x_{t-1}$$
which is
$$x_{t} = \begin{bmatrix}1 & 0 & \Delta t & 0 \\ 0 & 1 & 0 & \Delta t \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \cdot \begin{bmatrix} x \\ y \\ \dot x \\ \dot y \end{bmatrix}_{t-1}$$
Observation Model:
$$z_t = \textbf{$C_t$}\cdot x_t$$
which is
$$z_t = \begin{bmatrix}0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1\end{bmatrix} \cdot x_t$$ means: You observe the velocity directly in the correct unit
### Initial State $x_0$
$$x_{0} = \begin{bmatrix}0 \\ 0 \\ 0 \\ 0\end{bmatrix}$$
```python
x = np.matrix([[0.0, 0.0, 0.0, 0.0]]).T
print(x, x.shape)
plt.scatter(float(x[0]),float(x[1]), s=100)
plt.title('Initial Location')
```
### Covariance Matrix $P_0$ ($\Sigma_0$)
An uncertainty must be given for the initial state $x_0$ . In the 1D case, the $\mu_0$ , now a matrix, defines an initial uncertainty for all states.
This matrix is most likely to be changed during the filter passes. It is changed in both the Predict and Correct steps. If one is quite sure about the states at the beginning, one can use low values here, if one does not know exactly how the values of the state vector are, the covariance matrix should be initialized with very large values (1 million or so) to allow the filter to converge relatively quickly (find the right values based on the measurements).
$$P_{0} = \begin{bmatrix}\sigma^2_x & 0 & 0 & 0 \\ 0 & \sigma^2_y & 0 & 0 \\ 0 & 0 & \sigma^2_{\dot x} & 0 \\ 0 & 0 & 0 & \sigma^2_{\dot y} \end{bmatrix}$$
with $\sigma$ as the standard deviation
```python
P = np.diag([1000.0, 1000.0, 1000.0, 1000.0])
print(P, P.shape)
```
[[1000. 0. 0. 0.]
[ 0. 1000. 0. 0.]
[ 0. 0. 1000. 0.]
[ 0. 0. 0. 1000.]] (4, 4)
```python
fig = plt.figure(figsize=(6, 6))
im = plt.imshow(P, interpolation="none", cmap=plt.get_cmap('binary'))
plt.title('Initial Covariance Matrix $P$')
ylocs, ylabels = plt.yticks()
# set the locations of the yticks
plt.yticks(np.arange(7))
# set the locations and labels of the yticks
plt.yticks(np.arange(6),('$x$', '$y$', '$\dot x$', '$\dot y$'), fontsize=22)
xlocs, xlabels = plt.xticks()
# set the locations of the yticks
plt.xticks(np.arange(7))
# set the locations and labels of the yticks
plt.xticks(np.arange(6),('$x$', '$y$', '$\dot x$', '$\dot y$'), fontsize=22)
plt.xlim([-0.5,3.5])
plt.ylim([3.5, -0.5])
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax);
```
### Dynamic Matrix $A$
It is calculated from the dynamics of the Egomotion.
$$x_{t} = x_{t-1} + \dot x_{t-1} \cdot \Delta t$$
$$y_{t} = y_{t} + \dot y_{t-1} \cdot \Delta t$$
$$\dot x_{t} = \dot x_{t-1}$$
$$\dot y_{t} = \dot y_{t-1}$$
```python
dt = 0.1 # Time Step between Filter Steps
A = np.matrix([[1.0, 0.0, dt, 0.0],
[0.0, 1.0, 0.0, dt],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
print(A, A.shape)
```
[[1. 0. 0.1 0. ]
[0. 1. 0. 0.1]
[0. 0. 1. 0. ]
[0. 0. 0. 1. ]] (4, 4)
### Measurement Matrix $C_t$
We directly measure the Velocity $\dot x$ and $\dot y$
$$H = \begin{bmatrix}0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1\end{bmatrix}$$
```python
C = np.matrix([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
print(C, C.shape)
```
[[0. 0. 1. 0.]
[0. 0. 0. 1.]] (2, 4)
### Measurement Noise Covariance $Q_t$
Tells the Kalman Filter how 'bad' the sensor readings are.
$$Q_t = \begin{bmatrix}\sigma^2_{\dot x} & 0 \\ 0 & \sigma^2_{\dot y} \end{bmatrix}$$
```python
ra = 10.0**2
Q = np.matrix([[ra, 0.0],
[0.0, ra]])
print(Q, Q.shape)
```
[[100. 0.]
[ 0. 100.]] (2, 2)
```python
# Plot between -10 and 10 with .001 steps.
xpdf = np.arange(-10, 10, 0.001)
plt.subplot(121)
plt.plot(xpdf, norm.pdf(xpdf,0,Q[0,0]))
plt.title('$\dot x$')
plt.subplot(122)
plt.plot(xpdf, norm.pdf(xpdf,0,Q[1,1]))
plt.title('$\dot y$')
plt.tight_layout()
```
### Process Noise Covariance $R$
The Position of the car can be influenced by a force (e.g. wind), which leads to an acceleration disturbance (noise). This process noise has to be modeled with the process noise covariance matrix R.
$$R = \begin{bmatrix}\sigma_{x}^2 & \sigma_{xy} & \sigma_{x \dot x} & \sigma_{x \dot y} \\ \sigma_{yx} & \sigma_{y}^2 & \sigma_{y \dot x} & \sigma_{y \dot y} \\ \sigma_{\dot x x} & \sigma_{\dot x y} & \sigma_{\dot x}^2 & \sigma_{\dot x \dot y} \\ \sigma_{\dot y x} & \sigma_{\dot y y} & \sigma_{\dot y \dot x} & \sigma_{\dot y}^2 \end{bmatrix}$$
One can calculate R as
$$R = G\cdot G^T \cdot \sigma_v^2$$
with $G = \begin{bmatrix}0.5dt^2 & 0.5dt^2 & dt & dt\end{bmatrix}^T$ and $\sigma_v$ as the acceleration process noise, which can be assumed for a vehicle to be $8.8m/s^2$, according to: Schubert, R., Adam, C., Obst, M., Mattern, N., Leonhardt, V., & Wanielik, G. (2011). [Empirical evaluation of vehicular models for ego motion estimation](http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5940526). 2011 IEEE Intelligent Vehicles Symposium (IV), 534–539. doi:10.1109/IVS.2011.5940526
```python
sv = 8.8
G = np.matrix([[0.5*dt**2],
[0.5*dt**2],
[dt],
[dt]])
R = G*G.T*sv**2
```
```python
from sympy import Symbol, Matrix
from sympy.interactive import printing
printing.init_printing()
dts = Symbol('dt')
Rs = Matrix([[0.5*dts**2],[0.5*dts**2],[dts],[dts]])
Rs*Rs.T
```
```python
fig = plt.figure(figsize=(6, 6))
im = plt.imshow(R, interpolation="none", cmap=plt.get_cmap('binary'))
plt.title('Process Noise Covariance Matrix $P$')
ylocs, ylabels = plt.yticks()
# set the locations of the yticks
plt.yticks(np.arange(7))
# set the locations and labels of the yticks
plt.yticks(np.arange(6),('$x$', '$y$', '$\dot x$', '$\dot y$'), fontsize=22)
xlocs, xlabels = plt.xticks()
# set the locations of the yticks
plt.xticks(np.arange(7))
# set the locations and labels of the yticks
plt.xticks(np.arange(6),('$x$', '$y$', '$\dot x$', '$\dot y$'), fontsize=22)
plt.xlim([-0.5,3.5])
plt.ylim([3.5, -0.5])
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax);
```
### Identity Matrix $I$
```python
I = np.eye(4)
print(I, I.shape)
```
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]] (4, 4)
## Measurements
For example, we are using some random generated measurement values
```python
m = 200 # Measurements
vx= 20 # in X
vy= 10 # in Y
mx = np.array(vx+np.random.randn(m))
my = np.array(vy+np.random.randn(m))
measurements = np.vstack((mx,my))
print(measurements.shape)
print('Standard Deviation of Acceleration Measurements=%.2f' % np.std(mx))
print('You assumed %.2f in Q.' % Q[0,0])
```
(2, 200)
Standard Deviation of Acceleration Measurements=0.96
You assumed 100.00 in Q.
```python
fig = plt.figure(figsize=(16,5))
plt.step(range(m),mx, label='$\dot x$')
plt.step(range(m),my, label='$\dot y$')
plt.ylabel(r'Velocity $m/s$')
plt.title('Measurements')
plt.legend(loc='best',prop={'size':18})
```
```python
# Preallocation for Plotting
xt = []
yt = []
dxt= []
dyt= []
Zx = []
Zy = []
Px = []
Py = []
Pdx= []
Pdy= []
Rdx= []
Rdy= []
Kx = []
Ky = []
Kdx= []
Kdy= []
def savestates(x, Z, P, Q, K):
xt.append(float(x[0]))
yt.append(float(x[1]))
dxt.append(float(x[2]))
dyt.append(float(x[3]))
Zx.append(float(Z[0]))
Zy.append(float(Z[1]))
Px.append(float(P[0,0]))
Py.append(float(P[1,1]))
Pdx.append(float(P[2,2]))
Pdy.append(float(P[3,3]))
Rdx.append(float(Q[0,0]))
Rdy.append(float(Q[1,1]))
Kx.append(float(K[0,0]))
Ky.append(float(K[1,0]))
Kdx.append(float(K[2,0]))
Kdy.append(float(K[3,0]))
```
# Kalman Filter
```python
for n in range(len(measurements[0])):
# Time Update (Prediction)
# ========================
# Project the state ahead
x = A*x
# Project the error covariance ahead
P = A*P*A.T + R
# Measurement Update (Correction)
# ===============================
# Compute the Kalman Gain
S = C*P*C.T + Q
K = (P*C.T) * np.linalg.pinv(S)
# Update the estimate via z
Z = measurements[:,n].reshape(2,1)
y = Z - (C*x) # Innovation or Residual
x = x + (K*y)
# Update the error covariance
P = (I - (K*C))*P
# Save states (for Plotting)
savestates(x, Z, P, Q, K)
```
# Let's take a look at the filter performance
### Kalman Gains $K$
```python
def plot_K():
fig = plt.figure(figsize=(16,9))
plt.plot(range(len(measurements[0])),Kx, label='Kalman Gain for $x$')
plt.plot(range(len(measurements[0])),Ky, label='Kalman Gain for $y$')
plt.plot(range(len(measurements[0])),Kdx, label='Kalman Gain for $\dot x$')
plt.plot(range(len(measurements[0])),Kdy, label='Kalman Gain for $\dot y$')
plt.xlabel('Filter Step')
plt.ylabel('')
plt.title('Kalman Gain (the lower, the more the measurement fullfill the prediction)')
plt.legend(loc='best',prop={'size':22})
```
```python
plot_K()
```
### Uncertainty Matrix $P$
```python
def plot_P():
fig = plt.figure(figsize=(16,9))
plt.plot(range(len(measurements[0])),Px, label='$x$')
plt.plot(range(len(measurements[0])),Py, label='$y$')
plt.plot(range(len(measurements[0])),Pdx, label='$\dot x$')
plt.plot(range(len(measurements[0])),Pdy, label='$\dot y$')
plt.xlabel('Filter Step')
plt.ylabel('')
plt.title('Uncertainty (Elements from Matrix $P$)')
plt.legend(loc='best',prop={'size':22})
```
```python
plot_P()
```
### State Estimate $x$
```python
def plot_x():
fig = plt.figure(figsize=(16,9))
plt.step(range(len(measurements[0])),dxt, label='$\dot x$')
plt.step(range(len(measurements[0])),dyt, label='$\dot y$')
plt.axhline(vx, color='#999999', label='$\dot x_{real}$')
plt.axhline(vy, color='#999999', label='$\dot y_{real}$')
plt.xlabel('Filter Step')
plt.title('Estimate (Elements from State Vector $x$)')
plt.legend(loc='best',prop={'size':22})
plt.ylim([0, 30])
plt.ylabel('Velocity')
```
```python
plot_x()
```
## Position x/y
```python
def plot_xy():
fig = plt.figure(figsize=(16,16))
plt.scatter(xt,yt, s=20, label='State', c='k')
plt.scatter(xt[0],yt[0], s=100, label='Start', c='g')
plt.scatter(xt[-1],yt[-1], s=100, label='Goal', c='r')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Position')
plt.legend(loc='best')
plt.axis('equal')
```
```python
plot_xy()
```
```python
```
|
5367fbda8928fe27c4f071a87e0ceb094b14c7fe
| 398,851 |
ipynb
|
Jupyter Notebook
|
kalmanFIlter.ipynb
|
alanexplorer/contatosalan-outlook.com
|
e315c59054ae7d68200263a581f38c02b754491e
|
[
"MIT"
] | 1 |
2020-04-13T16:58:41.000Z
|
2020-04-13T16:58:41.000Z
|
kalmanFIlter.ipynb
|
alanexplorer/Robotic-Algorithm-Tutorial
|
e315c59054ae7d68200263a581f38c02b754491e
|
[
"MIT"
] | null | null | null |
kalmanFIlter.ipynb
|
alanexplorer/Robotic-Algorithm-Tutorial
|
e315c59054ae7d68200263a581f38c02b754491e
|
[
"MIT"
] | null | null | null | 257.822237 | 52,500 | 0.916886 | true | 6,906 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.944177 | 0.853913 | 0.806245 |
__label__eng_Latn
| 0.896733 | 0.711509 |
<a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/advi_beta_binom_jax.ipynb" target="_parent"></a>
# ADVI from scratch in JAX
Authors: karm-patel@, murphyk@
In this notebook we apply ADVI (automatic differentiation variational inference) to the beta-binomial model, using a Normal Distribution as Variational Posterior. This involves a change of variable from the unconstrained z in R space to the constrained theta in [0,1] space.
```python
try:
import jax
except ModuleNotFoundError:
%pip install -qqq jax jaxlib
import jax
import jax.numpy as jnp
from jax import lax
try:
from tensorflow_probability.substrates import jax as tfp
except ModuleNotFoundError:
%pip install -qqq tensorflow_probability
from tensorflow_probability.substrates import jax as tfp
try:
import optax
except ModuleNotFoundError:
%pip install -qqq optax
import optax
try:
from rich import print
except ModuleNotFoundError:
%pip install -qqq rich
from rich import print
try:
from tqdm import trange
except:
%pip install -qqq tqdm
from tqdm import trange
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
dist = tfp.distributions
plt.rc("font", size=10) # controls default text sizes
plt.rc("axes", labelsize=12) # fontsize of the x and y labels
plt.rc("legend", fontsize=12) # legend fontsize
plt.rc("figure", titlesize=15) # fontsize of the figure title
```
## Functions
Helper functions which will be used later
```python
def prior_dist():
return dist.Beta(concentration1 = 1.0, concentration0 = 1.0)
def likelihood_dist(theta):
return dist.Bernoulli(probs=theta)
def transform_fn(x):
return 1 / (1 + jnp.exp(-x)) # sigmoid
def positivity_fn(x):
return jnp.log(1 + jnp.exp(x)) # softplus
def variational_distribution_q(params):
loc = params["loc"]
scale = positivity_fn(params["scale"]) # apply softplus
return dist.Normal(loc, scale)
jacobian_fn = jax.jacfwd(transform_fn) # define function to find jacobian for tranform_fun
```
## Dataset
Now, we will create the dataset. we sample `theta_true` (probability of occurring head) random variable from the prior distribution which is Beta in this case. Then we sample `n_samples` coin tosses from likelihood distribution which is Bernouli in this case.
```python
# preparing dataset
# key = jax.random.PRNGKey(128)
# n_samples = 12
# theta_true = prior_dist().sample((5,),key)[0]
# dataset = likelihood_dist(theta_true).sample(n_samples,key)
# print(f"Dataset: {dataset}")
# n_heads = dataset.sum()
# n_tails = n_samples - n_heads
```
```python
# Use same data as https://github.com/probml/probml-notebooks/blob/main/notebooks/beta_binom_approx_post_pymc.ipynb
key = jax.random.PRNGKey(128)
dataset = np.repeat([0, 1], (10, 1))
n_samples = len(dataset)
print(f"Dataset: {dataset}")
n_heads = dataset.sum()
n_tails = n_samples - n_heads
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">Dataset: <span style="font-weight: bold">[</span><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span><span style="font-weight: bold">]</span>
</pre>
## Prior, Likelihood, and True Posterior
For coin toss problem, since we know the closed form solution of posterior, we compare the distributions of Prior, Likelihood, and True Posterior below.
```python
# closed form of beta posterior
a = prior_dist().concentration1
b = prior_dist().concentration0
exact_posterior = dist.Beta(concentration1 = a + n_heads, concentration0 = b + n_tails)
theta_range = jnp.linspace(0.01, 0.99, 100)
ax = plt.gca()
ax2 = ax.twinx()
plt2, = ax2.plot(theta_range, exact_posterior.prob(theta_range), "g--", label="True Posterior")
plt3, = ax2.plot(theta_range, prior_dist().prob(theta_range), label="Prior")
likelihood = jax.vmap(lambda x: jnp.prod(likelihood_dist(x).prob(dataset)))(theta_range)
plt1, = ax.plot(theta_range, likelihood, "r-.", label="Likelihood")
ax.set_xlabel("theta")
ax.set_ylabel("Likelihood")
ax2.set_ylabel("Prior & Posterior")
ax2.legend(handles=[plt1,plt2,plt3],bbox_to_anchor=(1.6,1));
```
## Optimizing the ELBO
In order to minimize KL divergence between true posterior and variational distribution, we need to minimize the negative ELBO, as we describe below.
We start with the ELBO, which is given by:
\begin{align}
ELBO(\psi) &= E_{z \sim q(z|\psi)} \left[
p(\mathcal{D}|z) + \log p(z) - \log q(z|\psi) \right]
\end{align}
where
$\psi = (\mu, \sigma)$ are the variational parameters,
$p(\mathcal{D}|z) = p(\mathcal{D}|\theta=\sigma(z))$
is the likelihood,
and the prior is given by the change of variables formula:
\begin{align}
p(z) &= p(\theta) | \frac{\partial \theta}{\partial z} |
= p(\theta) | J |
\end{align}
where $J$ is the Jacobian of the $z \rightarrow \theta$ mapping.
We will use a Monte Carlo approximation of the expectation over $z$.
We also apply the reparameterization trick
to replace $z \sim q(z|\psi)$ with
\begin{align}
\epsilon &\sim \mathcal{N}(0,1 ) \\
z &= \mu + \sigma \epsilon
\end{align}
Putting it altogether our estimate for the negative ELBO (for a single sample of $\epsilon$) is
\begin{align}
-L(\psi; z) &= -( \log p(\mathcal{D}|\theta )
+\log p( \theta) + \log|J_\boldsymbol{\sigma}(z)|)
+ \log q(z|\psi)
\end{align}
```python
def log_prior_likelihood_jacobian(normal_sample, dataset):
theta = transform_fn(normal_sample) # transform normal sample to beta sample
likelihood_log_prob = likelihood_dist(theta).log_prob(dataset).sum() # log probability of likelihood
prior_log_prob = prior_dist().log_prob(theta) # log probability of prior
log_det_jacob = jnp.log(jnp.abs(jnp.linalg.det(jacobian_fn(normal_sample).reshape(1,1)))) # log of determinant of jacobian
return likelihood_log_prob + prior_log_prob + log_det_jacob
```
```python
# reference: https://code-first-ml.github.io/book2/notebooks/introduction/variational.html
def negative_elbo(params, dataset, n_samples = 10, key=jax.random.PRNGKey(1)):
q = variational_distribution_q(params) # Normal distribution.
q_loc, q_scale = q.loc, q.scale
std_normal = dist.Normal(0, 1)
sample_set = std_normal.sample(seed=key, sample_shape=[n_samples,])
sample_set = q_loc + q_scale * sample_set # reparameterization trick
#calculate log joint for each sample of z
p_log_prob = jax.vmap(log_prior_likelihood_jacobian, in_axes=(0, None))(sample_set, dataset)
return jnp.mean(q.log_prob(sample_set) - p_log_prob)
```
We now apply stochastic gradient descent to minimize negative ELBO and optimize the variational parameters (`loc` and `scale`)
```python
loss_and_grad_fn = jax.value_and_grad(negative_elbo, argnums=(0))
loss_and_grad_fn = jax.jit(loss_and_grad_fn) # jit the loss_and_grad function
params = {"loc": 0.0, "scale": 0.5}
elbo, grads = loss_and_grad_fn(params, dataset)
print(f"loss: {elbo}")
print(f"grads:\n loc: {grads['loc']}\n scale: {grads['scale']} ")
optimizer = optax.adam(learning_rate = 0.01)
opt_state = optimizer.init(params)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">loss: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9.09147834777832</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">grads:
loc: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4.692040920257568</span>
scale: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0.6960504651069641</span>
</pre>
```python
#jax scannable function for training
def train_step(carry, data_output):
#take carry data
key = carry["key"]
elbo = carry["elbo"]
grads = carry["grads"]
params = carry["params"]
opt_state = carry["opt_state"]
updates = carry["updates"]
#training
key, subkey = jax.random.split(key)
elbo, grads = loss_and_grad_fn(params, dataset, key = subkey)
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
#forward carry to next iteration by storing it
carry = { "key": subkey, "elbo": elbo, "grads": grads, "params": params,
"opt_state": opt_state, "updates": updates }
output = { "elbo": elbo, "params": params }
return carry, output
```
```python
%%time
#dummy iteration to pass carry to jax scannale function train()
key, subkey = jax.random.split(key)
elbo, grads = loss_and_grad_fn(params, dataset, key = subkey)
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
carry = {
"key": key,
"elbo": elbo,
"grads": grads,
"params": params,
"opt_state": opt_state,
"updates": updates
}
num_iter = 1000
elbos = np.empty(num_iter)
#apply scan() to optimize training loop
last_carry, output = lax.scan(train_step, carry, elbos)
elbo = output["elbo"]
params = output["params"]
optimized_params = last_carry["params"]
```
CPU times: user 2.09 s, sys: 20.1 ms, total: 2.11 s
Wall time: 2.03 s
```python
print(params['loc'].shape)
print(params['scale'].shape)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">(</span><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1000</span>,<span style="font-weight: bold">)</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">(</span><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1000</span>,<span style="font-weight: bold">)</span>
</pre>
We now plot the ELBO
```python
plt.plot(elbo)
plt.xlabel("Iterations")
plt.ylabel("Negative ELBO")
sns.despine()
plt.savefig('advi_beta_binom_jax_loss.pdf')
```
We can see that after 200 iterations ELBO is optimized and not changing too much.
## Samples using Optimized parameters
Now, we take 1000 samples from variational distribution (Normal) and transform them into true posterior distribution (Beta) by applying `tranform_fn` (sigmoid) on samples. Then we compare density of samples with exact posterior.
```python
q_learned = variational_distribution_q(optimized_params)
key = jax.random.PRNGKey(128)
q_learned_samples = q_learned.sample(1000, seed=key) # q(z|D)
transformed_samples = transform_fn(q_learned_samples) #transform Normal samples into Beta samples
theta_range = jnp.linspace(0.01,0.99,100)
plt.plot(theta_range, exact_posterior.prob(theta_range),"r" , label="$p(x)$: true posterior")
sns.kdeplot(transformed_samples, color="blue" ,label="$q(x)$: learned", bw_adjust=1.5, clip=(0.0,1.0), linestyle="--")
plt.xlabel("theta")
plt.legend() #bbox_to_anchor=(1.5, 1));
sns.despine();
plt.savefig('advi_beta_binom_jax_posterior.pdf')
```
We can see that the learned `q(x)` is a reasonably good approximation to the true posterior. It seems to have support over negative theta but this is an artefact of KDE.
```python
#print(transformed_samples)
print(len(transformed_samples))
print(jnp.sum(transformed_samples < 0)) # all samples of thetas should be in [0,1]
print(jnp.sum(transformed_samples > 1)) # all samples of thetas should be in [0,1]
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1000</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>
</pre>
```python
print(q_learned)
print(q_learned.mean())
print(jnp.sqrt(q_learned.variance()))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">tfp.distributions.Normal</span><span style="font-weight: bold">(</span><span style="color: #008000; text-decoration-color: #008000">"Normal"</span>, <span style="color: #808000; text-decoration-color: #808000">batch_shape</span>=<span style="font-weight: bold">[]</span>, <span style="color: #808000; text-decoration-color: #808000">event_shape</span>=<span style="font-weight: bold">[]</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #800080; text-decoration-color: #800080">float32</span><span style="font-weight: bold">)</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.9198773</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0.8108946</span>
</pre>
```python
locs, scales = params["loc"], params["scale"]
sigmas = positivity_fn(jnp.array(scales))
plt.plot(locs, label="mu")
plt.xlabel("Iterations")
plt.ylabel("$E_q[z]$")
plt.legend()
sns.despine();
plt.savefig('advi_beta_binom_jax_post_mu_vs_time.pdf')
plt.show()
plt.plot(sigmas, label="sigma")
plt.xlabel("Iterations")
#plt.ylabel(r'$\sqrt{\text{var}(z)}')
plt.ylabel("$std_{q}[z]$")
plt.legend()
sns.despine();
plt.savefig('advi_beta_binom_jax_post_sigma_vs_time.pdf')
plt.show()
```
## Comparison with pymc.ADVI()
Now, we compare our implementation with pymc's ADVI implementation.
**Note**: For pymc implementation, the code is taken from this notebook: https://github.com/probml/probml-notebooks/blob/main/notebooks/beta_binom_approx_post_pymc.ipynb
```python
try:
import pymc3 as pm
except ModuleNotFoundError:
%pip install pymc3
import pymc3 as pm
try:
import scipy.stats as stats
except ModuleNotFoundError:
%pip install scipy
import scipy.stats as stats
import scipy.special as sp
try:
import arviz as az
except ModuleNotFoundError:
%pip install arviz
import arviz as az
import math
```
```python
a = prior_dist().concentration1
b = prior_dist().concentration0
with pm.Model() as mf_model:
theta = pm.Beta("theta", a, b)
y = pm.Binomial("y", n=1, p=theta, observed=dataset) # Bernoulli
advi = pm.ADVI()
tracker = pm.callbacks.Tracker(
mean=advi.approx.mean.eval, # callable that returns mean
std=advi.approx.std.eval, # callable that returns std
)
approx = advi.fit(callbacks=[tracker],n=20000)
trace_approx = approx.sample(1000)
thetas = trace_approx["theta"]
```
<style>
/* Turns off some styling */
progress {
/* gets rid of default border in Firefox and Opera. */
border: none;
/* Needs to be in here for Safari polyfill so background images work as expected. */
background-size: auto;
}
.progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {
background: #F44336;
}
</style>
<div>
<progress value='20000' class='' max='20000' style='width:300px; height:20px; vertical-align: middle;'></progress>
100.00% [20000/20000 00:01<00:00 Average Loss = 4.9174]
</div>
```python
plt.plot(advi.hist, label="ELBO")
plt.xlabel("Iterations")
plt.ylabel("ELBO")
plt.legend()
sns.despine();
plt.savefig('advi_beta_binom_pymc_loss.pdf')
plt.show()
```
```python
print(f"ELBO comparison for last 1% iterations:\nJAX ELBO: {elbo[-10:].mean()}\nPymc ELBO: {advi.hist[-100:].mean()}")
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">ELBO comparison for last <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>% iterations:
JAX ELBO: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4.899871826171875</span>
Pymc ELBO: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4.941951796589394</span>
</pre>
## True posterior, JAX q(x), and pymc q(x)
```python
plt.plot(theta_range, exact_posterior.prob(theta_range), "b--", label="$p(x)$: True Posterior")
sns.kdeplot(transformed_samples, color="red", label="$q(x)$: learnt - jax", clip=(0.0,1.0), bw_adjust=1.5)
sns.kdeplot(thetas, label="$q(x)$: learnt - pymc", clip=(0.0,1.0), bw_adjust=1.5)
plt.xlabel("theta")
plt.legend(bbox_to_anchor=(1.3, 1))
sns.despine()
```
## Plot of loc and scale for variational distribution
```python
fig1, (ax1,ax2) = plt.subplots(1,2, figsize = (10,4), sharey=True)
locs, scales = params["loc"], params["scale"]
#plot loc
#JAX
ax1.plot(locs, label="JAX: loc")
ax1.set_ylabel("loc")
ax1.legend();
#pymc
ax2.plot(tracker['mean'], label="Pymc: loc")
ax2.legend()
sns.despine()
#plot scale
fig2, (ax3,ax4) = plt.subplots(1,2, figsize = (10,4), sharey=True)
#JAX
ax3.plot(positivity_fn(jnp.array(scales)), label="JAX: scale"); #apply softplus on scale
ax3.set_xlabel("Iterations")
ax3.set_ylabel("scale")
ax3.legend();
#pymc
ax4.plot(tracker['std'], label="Pymc: scale")
ax4.set_xlabel("Iterations")
ax4.legend();
sns.despine();
```
## References:
1. ADVI paper: https://arxiv.org/abs/1603.00788
2. Blog: https://code-first-ml.github.io/book2/notebooks/introduction/variational.html
3. Blog: https://luiarthur.github.io/statorial/varinf/introvi/
4. Video: https://www.youtube.com/watch?v=HxQ94L8n0vU
5. Github issue: https://github.com/pyro-ppl/pyro/issues/3016#:~:text=loc%3D%27upper%20right%27)-,Bandwidth%20adjustment,-Another%20thing%20to
6. Blog: https://ericmjl.github.io/dl-workshop/02-jax-idioms/02-loopy-carry.html
```python
```
|
28c14e34e8a0dced43dd7f361c5114516298f50e
| 233,086 |
ipynb
|
Jupyter Notebook
|
notebooks/advi_beta_binom_jax.ipynb
|
patel-zeel/probml-notebooks
|
1ff09bfddb2bd6b3932d81845546770e7e2fce3a
|
[
"MIT"
] | null | null | null |
notebooks/advi_beta_binom_jax.ipynb
|
patel-zeel/probml-notebooks
|
1ff09bfddb2bd6b3932d81845546770e7e2fce3a
|
[
"MIT"
] | 1 |
2022-03-30T20:00:48.000Z
|
2022-03-30T20:30:42.000Z
|
notebooks/advi_beta_binom_jax.ipynb
|
patel-zeel/probml-notebooks
|
1ff09bfddb2bd6b3932d81845546770e7e2fce3a
|
[
"MIT"
] | null | null | null | 189.655004 | 27,348 | 0.899054 | true | 5,513 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.819893 | 0.839734 | 0.688492 |
__label__eng_Latn
| 0.383548 | 0.437929 |
```python
import import_ipynb
from Data_Analysis_1 import *
```
importing Jupyter notebook from Data_Analysis_1.ipynb
# $e^x$
```python
from sympy import *
import numpy as np
x = Symbol('x')
y = exp(x)
# yprime = y.diff(x)
def factorial(i):
if i == 1 or i == 0:
return 1
else:
return i*factorial(i-1)
def Taylor(point_of_expansion, order_of_polynomial):
x = Symbol('x')
y = exp(x)
x_0 = point_of_expansion
i = order_of_polynomial
polinomio = 0
for j in range(0, i+1):
f_1 = lambdify(x, y)
y_0 = f_1(x_0)
polinomio += pow((x-x_0),j)*Rational(y_0, factorial(j))
y = y.diff(x)
return polinomio
```
```python
polinomio_1 = Taylor(0, 1)
f = lambdify(x, polinomio_1, 'numpy')
xaxis = np.linspace(-2, 2, 100)
yaxis_1 = f(xaxis)
```
```python
polinomio_2 = Taylor(0, 2)
f = lambdify(x, polinomio_2, 'numpy')
yaxis_2 = f(xaxis)
```
```python
polinomio_3 = Taylor(0, 3)
f = lambdify(x, polinomio_3, 'numpy')
yaxis_3 = f(xaxis)
```
```python
polinomio_4 = Taylor(0, 4)
f = lambdify(x, polinomio_4, 'numpy')
yaxis_4 = f(xaxis)
```
```python
polinomio_5 = Taylor(0, 5)
f = lambdify(x, polinomio_5, 'numpy')
yaxis_5 = f(xaxis)
```
```python
polinomio_6 = Taylor(0, 6)
f = lambdify(x, polinomio_6, 'numpy')
yaxis_6 = f(xaxis)
```
```python
layout = Layout()
traccia_1 = traccia_relazione(yaxis_1, None, xaxis, None, "lines", "Taylor I")
traccia_2 = traccia_relazione(yaxis_2, None, xaxis, None, "lines", "Taylor II")
traccia_3 = traccia_relazione(yaxis_3, None, xaxis, None, "lines", "Taylor III")
traccia_4 = traccia_relazione(yaxis_4, None, xaxis, None, "lines", "Taylor IV")
traccia_5 = traccia_relazione(yaxis_5, None, xaxis, None, "lines", "Taylor V")
traccia_6 = traccia_relazione(yaxis_6, None, xaxis, None, "lines", "Taylor VI")
```
```python
e = exp(x)
f_real_exp = lambdify(x, e, 'numpy')
yaxis_prime = f_real_exp(xaxis)
traccia_e = traccia_relazione(yaxis_prime, None ,xaxis, None, "lines", "e^x")
```
```python
data = [traccia_1, traccia_2, traccia_3, traccia_4, traccia_5, traccia_6, traccia_e, X(-2, 2), Y(-2, 7.4)]
Grafico(data, layout)
```
<div>
<div id="a7b0482c-e281-4310-9657-8411e9f8a327" class="plotly-graph-div" style="height:525px; width:100%;"></div>
</div>
# $log(x)$
```python
x = Symbol('x')
y = log(x)
def Taylor(point_of_expansion, order_of_polynomial):
x = Symbol('x')
y = log(x)
x_0 = point_of_expansion
i = order_of_polynomial
polinomio = 0
for j in range(0, i+1):
f_1 = lambdify(x, y)
y_0 = f_1(x_0)
polinomio += pow((x-x_0),j)*Rational(y_0, factorial(j))
y = y.diff(x)
return polinomio
```
```python
polinomio_1 = Taylor(1, 1)
f = lambdify(x, polinomio_1, 'numpy')
xaxis = np.linspace(0.5, 2, 100)
yaxis_1 = f(xaxis)
```
```python
polinomio_2 = Taylor(1, 2)
f = lambdify(x, polinomio_2, 'numpy')
yaxis_2 = f(xaxis)
```
```python
polinomio_3 = Taylor(1, 3)
f = lambdify(x, polinomio_3, 'numpy')
yaxis_3 = f(xaxis)
```
```python
polinomio_4 = Taylor(1, 4)
f = lambdify(x, polinomio_4, 'numpy')
yaxis_4 = f(xaxis)
```
```python
polinomio_5 = Taylor(1, 5)
f = lambdify(x, polinomio_5, 'numpy')
yaxis_5 = f(xaxis)
```
```python
polinomio_6 = Taylor(1, 6)
f = lambdify(x, polinomio_6, 'numpy')
yaxis_6 = f(xaxis)
```
```python
layout = Layout()
traccia_1 = traccia_relazione(yaxis_1, None, xaxis, None, "lines", "Taylor I")
traccia_2 = traccia_relazione(yaxis_2, None, xaxis, None, "lines", "Taylor II")
traccia_3 = traccia_relazione(yaxis_3, None, xaxis, None, "lines", "Taylor III")
traccia_4 = traccia_relazione(yaxis_4, None, xaxis, None, "lines", "Taylor IV")
traccia_5 = traccia_relazione(yaxis_5, None, xaxis, None, "lines", "Taylor V")
traccia_6 = traccia_relazione(yaxis_6, None, xaxis, None, "lines", "Taylor VI")
```
```python
logarithm = log(x)
f_real_log = lambdify(x, logarithm, 'numpy')
yaxis_prime = f_real_log(xaxis)
traccia_log = traccia_relazione(yaxis_prime, None ,xaxis, None, "lines", "log(x)")
```
```python
data = [traccia_1, traccia_2, traccia_3, traccia_4, traccia_5, traccia_6, traccia_log, X(0, 2), Y(-0.7, 1)]
Grafico(data, layout)
```
<div>
<div id="45453b3c-a073-4ae4-821e-a42f0e95a11a" class="plotly-graph-div" style="height:525px; width:100%;"></div>
</div>
|
2fc194824a29821b7f5249c5e5286477b5814eb5
| 487,815 |
ipynb
|
Jupyter Notebook
|
Taylor Polynomial.ipynb
|
RiccardoTancredi/Polynomials
|
6ddeb927284092cbb52308065d1119a2f7f7e277
|
[
"MIT"
] | null | null | null |
Taylor Polynomial.ipynb
|
RiccardoTancredi/Polynomials
|
6ddeb927284092cbb52308065d1119a2f7f7e277
|
[
"MIT"
] | null | null | null |
Taylor Polynomial.ipynb
|
RiccardoTancredi/Polynomials
|
6ddeb927284092cbb52308065d1119a2f7f7e277
|
[
"MIT"
] | null | null | null | 37.025806 | 80,165 | 0.549501 | true | 1,717 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.805632 | 0.740174 | 0.596308 |
__label__eng_Latn
| 0.10713 | 0.223754 |
# Deep Learning
**CS5483 Data Warehousing and Data Mining**
___
```python
%reset -f
%load_ext tensorboard
%matplotlib inline
import jupyter_manim
from manimlib.imports import *
import pprint as pp
import tensorflow_datasets as tfds
import tensorflow.compat.v2 as tf
import tensorflow_addons as tfa
import os, datetime, pytz
import tensorboard as tb
from matplotlib import pyplot as plt
import numpy as np
from IPython import display
import matplotlib.pyplot as plt
# produce vector inline graphics
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg')
```
## Introduction
In this notebook, we will train the following classifier using deep learning:
1. Handwrite a digit from 0, ..., 9.
1. Click predict to see if the app can recognize the digit.
```python
display.IFrame(src="https://www.cs.cityu.edu.hk/~ccha23/mnist/", width=805, height=450)
```
**What is deep learning?**
[Deep learning](https://en.wikipedia.org/wiki/Deep_learning) is a technique of training a neural network with many layers of computational units called neurons. The following videos showcase some interesting applications of the technique.
```python
display.IFrame(src="https://slides.com/ccha23/dl_intro/embed", width=805, height=450)
```
**How to train a neural network?**
We can visualize the training process in the following application, which trains a neural network that predicts the color of a given point when given its coordinate $(x_1,x_2)$.
- Choose a data set from the `DATA` column.
- Click the `play` button to start training the network.
- `Epoch` is the number of times a neural network is trained iteratively using the data selected.
- In the `OUTPUT` column,
- points in the blue region are predicted blue, and
- points in the orange region are predicted orange.
```python
display.IFrame(src="https://www.cs.cityu.edu.hk/~ccha23/playground", width=900, height=800)
```
The above app is a slight modification of the open source project [Tensorflow Playground](https://playground.tensorflow.org) with the additional features that:
- You can save your configuration to the browser session by clicking the button `Save to browser session`. If you reopen the browser, it will load your previously saved configuration automatically.
- You can reset the configuration by clicking the `reset` button.
- The last button copies the permalink to your configuration to the clipboard. You can save multiple configurations and share them by keeping the permalinks.
For instance, the following uses the permalink to initialize the simplest neural network for the linearly separable data:
```python
display.IFrame(src="https://www.cs.cityu.edu.hk/~ccha23/playground/#activation=linear&batchSize=10&dataset=gauss®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.15891&showTestData=false&discretize=false&percTrainData=50&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false", width=900, height=800)
```
**Exercise** Try to tune the parameters to classify the Spiral dataset until the testing loss reaches 0.02. Include a screenshot below and give the permalink to your configuration.
YOUR ANSWER HERE
You can also [fork and modify the code on GitHub](https://github.com/tensorflow/tensorflow/pulls) to add new features, e.g., to customize the datasets and store the trained neural network. However, since the visualization is limited 2D, it is difficult to extend the app for higher-dimensional dataset with multiple class values.
Nevertheless, it is possible to train a practical neural network without any coding, by using a service called the [Teachable Machine](https://teachablemachine.withgoogle.com/). E.g., you may follow the interactive slides below to learn to train a machine that recognizes musical notes.
(Click the `play` button at the bottom of the slides to start the presentation.)
```python
%%html
```
**Exercise** Use [Teachable Machine](https://teachablemachine.withgoogle.com/) to train your machine. Explain what your machine does and include a link to it.
YOUR ANSWER HERE
## Preliminaries
The neural network is trained using an iterative algorithm called the *stochastic gradient descent*, which requires some background on vector calculus and probability theory. The lecture series below explain the idea nicely with mathematical animations made using the python library [mathematical animation engine (`manim`)](https://github.com/3b1b/manim).
```python
display.IFrame(src="https://www.youtube.com/embed/videoseries?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi", width=805, height=450)
```
Instead of using the mean-squared error for the loss function, we will consider the [cross-entropy](https://en.wikipedia.org/wiki/Cross_entropy) loss, which is more suitable for training a neural network classifier. We will have a glimpse of the information theory involved.
$\def\abs#1{\left\lvert #1 \right\rvert}
\def\Set#1{\left\{ #1 \right\}}
\def\mc#1{\mathcal{#1}}
\def\M#1{\boldsymbol{#1}}
\def\R#1{\mathsf{#1}}
\def\RM#1{\boldsymbol{\mathsf{#1}}}
\def\op#1{\operatorname{#1}}
\def\E{\op{E}}
\def\d{\mathrm{\mathstrut d}}
$
**What to know about vector calculus?**
To help explain the theory, we will use some notations from vector linear algebra and probability theory.
- [Vectors](https://en.wikipedia.org/wiki/Vector_(mathematics_and_physics)) in lowercase boldface font:
$$\M{x}=\begin{bmatrix}x_1 \\ x_2 \\ \vdots \end{bmatrix}$$
- [Matrices](https://en.wikipedia.org/wiki/Matrix_(mathematics)) in uppercase boldface font:
$$\M{W}=\begin{bmatrix}w_{11} & w_{12} & \cdots \\
w_{21} & \ddots & \\
\vdots & & \end{bmatrix}$$
- [Matrix multiplication](https://en.wikipedia.org/wiki/Matrix_multiplication):
$$\M{W}\M{x} = \begin{bmatrix}w_{11} & w_{12} & \cdots \\
w_{21} & \ddots & \\
\vdots & & \end{bmatrix}
\begin{bmatrix}x_1 \\ x_2 \\ \vdots \end{bmatrix}
=
\begin{bmatrix}w_{11}x_1 + w_{12}x_2 + \cdots \\ w_{21}x_1+\cdots \\ \vdots \end{bmatrix}
$$
**What to know about Probability Theory?**
- [Random variables](https://en.wikipedia.org/wiki/Random_variable) in sanserif font:
$$\underbrace{\R{y}}_{\text{random variable}}, \underbrace{\RM{x}=\begin{bmatrix}\R{x}_1 \\ \R{x}_2 \\ \vdots \end{bmatrix}}_{\text{random vector}}$$
- [Support sets](https://en.wikipedia.org/wiki/Support_(mathematics)) in calligraphic font:
$$\underbrace{\mc{Y}=\Set{0,1,\dots,k-1}}_{\text{finite set}}, \underbrace{\mc{X}=\mc{X}_1\times \mc{X}_2 \times \cdots}_{\text{product space}}$$
- [Joint distribution](https://en.wikipedia.org/wiki/Joint_probability_distribution#Mixed_case):
$$p_{\RM{x}\R{y}}(\M{x},y)= \underbrace{p_{\R{y}|\RM{x}}(y|\M{x})}_{\underbrace{\Pr}_{\text{probability measure}\kern-3em}\Set{\R{y}=y|\RM{x}=\M{x}}} \cdot \underbrace{p_{\RM{x}}(\M{x})}_{(\underbrace{\partial_{x_1}}_{\text{partial derivative w.r.t. $x_1$}\kern-5em} \partial_{x_2}\cdots)\Pr\Set{\RM{x} \leq \M{x}}\kern-4em}\kern1em \text{where}$$
- $p_{\R{y}|\RM{x}}(y|\M{x})$ is the *probability mass function [(pmf)](https://en.wikipedia.org/wiki/Probability_mass_function)* of $\R{y}=y\in \mc{Y}$ [conditioned](https://en.wikipedia.org/wiki/Conditional_probability_distribution) on $\RM{x}=\M{x}\in \mc{X}$, and
- $p_{\RM{x}}(\M{x})$ is the *(multivariate) probability density function [(pdf)](https://en.wikipedia.org/wiki/Probability_density_function#Densities_associated_with_multiple_variables)* of $\RM{x}=\M{x}\in \mc{X}$.
- For any function $g$ of $(\RM{x},y)$, the expectations are:
\begin{align}
\E[g(\RM{x},\R{y})|\RM{x}]&=\sum_{y\in \mc{Y}} g(\RM{x},y)\cdot p_{\R{y}|\RM{x}}(y|\RM{x})\tag{conditional expectation}
\\
\E[g(\RM{x},\R{y})] &=\int_{\mc{X}} \underbrace{\sum_{y\in \mc{Y}} g(\RM{x},y)\cdot \underbrace{p_{\RM{x},\R{y}}(\M{x},y)}_{p_{\R{y}|\RM{x}}(y|\M{x}) p_{\R{x}}(\M{x})}\kern-1.7em}_{\E[g(\RM{x},\R{y})|\RM{x}]}\kern1.4em\,\d \M{x} \tag{expectation}\\
&= \E[\E[g(\RM{x},\R{y})|\RM{x}]] \tag{iterated expectation}
\end{align}
The followings are some manims created to introduce the above notions.
```python
%%html
<b>Notation:</b><br>
```
<b>Notation:</b><br>
```python
%%html
<b>Distribution:</b><br>
```
<b>Distribution:</b><br>
```python
%%html
<b>Expectation:<br>
```
<b>Expectation:<br>
You may also create your own animations with `manim` in the jupyter notebook using `jupyter_manim` and `manimlib` as described [here](https://ccha23.github.io/CS1302ICP/Lecture5/Objects.html).
```python
import jupyter_manim
from manimlib.imports import *
```
**Exercise** Run the following cell and see the effect when changing
- Mobjects: `TextMobject('Hello, World!')` to `TexMobject(r'E=mc^2')` or `Circle()` or `Square()`.
- Animation objects: `Write` to `FadeIn` or `GrowFromCenter`.
You may take a look at the documentation [here](https://docs.manim.community/en/v0.2.0/index.html) and a more detailed [tutorial](https://talkingphysics.wordpress.com/2019/01/08/getting-started-animating-with-manim-and-python-3-7/) here.
```python
%%manim HelloWorld -l
class HelloWorld(Scene):
def construct(self):
self.play(Write(TextMobject('Hello, World!')))
```
## Classification Problem
A neural network learns from many examples collected together as a *dataset*. For instance, the [MNIST (Modified National Institute of Standards and Technology)](https://en.wikipedia.org/wiki/MNIST_database) dataset consists of labeled handwritten digits.
<a title="Josef Steppan, CC BY-SA 4.0 <https://creativecommons.org/licenses/by-sa/4.0>, via Wikimedia Commons" href="https://commons.wikimedia.org/wiki/File:MnistExamples.png"></a>
**What is an example in a dataset?**
A dataset is a sequence
\begin{align}
(\RM{x}_1,\R{y}_1),(\RM{x}_2,\R{y}_2), \dots\tag{dataset}
\end{align}
of *tuples/instances* $(\RM{x}_i,\R{y}_i)$, each of which consists of
- an *input feature vector* $\RM{x}_i$ such as an image of a handwritten digit and
- a *label* $\R{y}_i$ such as the digit type of the handwritten digit.
**What to learn from the dataset?**
One of the problem in Machine Learning is *classification*: The goal is to train a *classifier* that predicts a label $\R{y}$ for an input feature $\RM{x}$:
- A hard-decision classifier is a function $f:\mc{X}\to \mc{Y}$ such that
$f(\RM{x})$ predicts $\R{y}$.
- A probabilistic classifier is a conditional distribution $q_{\R{y}|\RM{x}}$ that estimates $p_{\R{y}|\RM{x}}$.
For MNIST, the goal is to classify the digit type of a handwritten digit. When given a handwritten digit,
- a hard-decision classifier returns a digit type, and
- a probabilistic classifier returns a distribution of the digit types.
**Why consider a probabilistic classifier?**
We often train a neural network as a probabilistic classifer because:
- A probabilistic classifer is more general and can give a hard decision as well
$$f(\RM{x}):=\arg\max_{y\in \mc{Y}} q_{\R{y}|\RM{x}}(y|\RM{x})$$
by returning the estimated most likely digit type.
- A neural network can model the distribution $p_{\R{y}|\RM{x}}(\cdot|\RM{x})$ better than $\R{y}$ because its output is continous.
**Why can we learn from examples?**
For the problem to be meaningful, $(\RM{x},\R{y})$ is assumed to be random with some unknown joint distribution $p_{\RM{x},\R{y}}$.
- If we always had $\R{y}=y$ instead, then a perfect classifier can just return $y$ without even looking at $\RM{x}$.
- If $p_{\RM{x},\R{y}}$ were known instead, then $p_{\R{y}|\RM{x}}$ would also be known and therefore needed not be estimated.
Examples are often collected randomly and independently from a population, i.e., as [i.i.d. samples](https://en.wikipedia.org/wiki/Independent_and_identically_distributed_random_variables) of $(\RM{x},\R{y})$:
$$(\RM{x}_1,\R{y}_1), (\RM{x}_2,\R{y}_2), \dots\sim_{\text{i.i.d.}} p_{\RM{x},\R{y}}.$$
- If all the examples were the same instead, the examples could not show the pattern of how $\R{y}$ depended on $\RM{x}$.
- The observed distribution of i.i.d. samples converge to the unknown distribution by the [law of large number](https://en.wikipedia.org/wiki/Law_of_large_numbers).
**How to determine if a classifier is good?**
Ultimately, we desire a classifier with the maximum accuracy in predicting $\R{y}$ but doing so is [computationally too difficult](https://en.wikipedia.org/wiki/Loss_functions_for_classification).
Instead, we regard a classification algorithm to be reasonably good if it can achieve the maximum possible accuracy as the number of training samples goes to $\infty$. This is more formally stated below:
**Definition** A probabilistic classifier for the input feature $\RM{x}$ and label $\R{y}$ with unknown joint distribution is a conditional pmf
$$
\R{q}_{\R{y}|\RM{x}}(y|\RM{x})\qquad \text{for }\M{x}\in \mc{X}, y\in \mc{Y}
$$
defined as a function of the i.i.d. samples
$$\{(\RM{x}_i,\R{y}_i)\}_{i=1}^N$$
of $(\RM{x},\R{y})$ (but independent of $(\RM{x},\R{y})$).
The classifier is said to be a *consistent* estimate (of $p_{\R{y}|\M{x}}$) if
\begin{align}
\lim_{N\to \infty} \Pr\Set{\R{q}_{\R{y}|\RM{x}}(y|\RM{x})=p_{\R{y}|\RM{x}}(y|\RM{x})\text{ for all } y\in \mc{Y}}=1,\tag{consistency}
\end{align}
namely, $\R{q}_{\R{y}|\RM{x}}(y|\RM{x})$ converges almost surely (a.s.) to $p_{\R{y}|\RM{x}}$. $\square$
A consistent probabilistic classifier gives rise to an asymptotically optimal hard-decision classifier that achieves the maximum accuracy.
**Proposition** If for some $\epsilon\geq 0$ that
$$\Pr\Set{\R{q}_{\R{y}|\RM{x}}(y|\RM{x})=p_{\R{y}|\RM{x}}(y|\RM{x}) \text{ for all } y\in \mc{Y}}=1-\epsilon,$$
the (hard-decision) classifier
\begin{align}\R{f}(\RM{x}):=\arg\max_{y\in \mc{Y}} \R{q}_{\R{y}|\RM{x}}(y|\RM{x})\tag{hard decision}\end{align}
achieves an accuracy
\begin{align}
\sup_{f:\mc{X}\to \mc{Y}} \Pr(\R{y}= f(\RM{x})) &\geq \E\left[\max_{y\in \mc{Y}} p_{\R{y}|\M{x}}(y|\RM{x})\right] - \epsilon.\tag{accuracy lower bound}
\end{align}
where the expectation is the maximum possible accuracy. $\square$
*Proof:* For any classifier $f$,
$$ \begin{align*}
\Pr(\R{y}= f(\RM{x}))
&= \E[p_{\R{y}|\M{x}}(f(\RM{x})|\RM{x})] \\
&\leq \E\left[\max_{y\in \mc{Y}} p_{\R{y}|\M{x}}(y|\RM{x})\right]\end{align*}
$$
where the last inequality is achievable with equality with the hard-decision classifier and $\R{q}$ replaced by $p$. This implies the desired accuracy lower bound for the case $\epsilon=0$. The more general case with $\epsilon\geq 0$ can be derived similarly. $\blacksquare$
**How can we obtain a consistent classifier?**
We train a neural network to minimize certain *loss*. A common loss function for classification uses the [cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) measure in information theory.
The theoretical underpinning is the following identity that relates three information quantities:
\begin{align}
\overbrace{\E\left[\log \frac{1}{q_{\R{y}|\RM{x}}(\R{y}|\RM{x})}\right]}^{ \text{Cross entropy}\\ H(p_{\R{y}|\RM{x}}\|q_{\R{y}|\RM{x}}|p_{\RM{x}}):=} &\equiv \overbrace{\E\left[\log \frac{1}{p_{\R{y}|\RM{x}}(\R{y}|\RM{x})}\right]}^{\text{Conditional entropy}\\ H(\R{y}|\RM{x}):=} + \overbrace{\E\left[\log \frac{p_{\R{y}|\RM{x}}(\R{y}|\RM{x})}{q_{\R{y}|\RM{x}}(\R{y}|\RM{x})}\right].}^{\text{Divergence}\\ D(p_{\R{y}|\RM{x}}\|q_{\R{y}|\RM{x}}|p_{\RM{x}}):=}\tag{identity}
\end{align}
The identity can be proved quite easily using the linearity of expectation
$$ \E[\R{u}+\R{v}]=\E[\R{u}]+\E[\R{v}],$$
and a property of logarithm that
$$\log uv = \log u+ \log v.$$
```python
%%html
<b>Information Identity</b><br>
```
<b>Information Identity</b><br>
**Proposition** With $q_{\R{y}|\RM{x}}(y|\M{x})$ being a valid pmf of a random variable taking values from $\mc{Y}$ conditioned on a random variable taking values from $\mc{X}$,
\begin{align}
\min_{q_{\R{y}|\RM{x}}} H(p_{\R{y}|\RM{x}}\|q_{\R{y}|\RM{x}}|p_{\RM{x}})
&= H(\R{y}|\RM{x})
\end{align}
and the optimal solution equals $p_{\R{y}|\RM{x}}(y|\RM{x})$ a.s. for all $y\in \mc{Y}$. $\square$
Hence, a neural network that minimizes the cross entropy equals $p_{\R{y}|\RM{x}}(y|\RM{x})$ a.s. for all $y\in \mc{Y}$ and any possible input image $\RM{x}$.
*Proof:* It suffices to show that
\begin{align}
D(p_{\R{y}|\RM{x}}\|q_{\R{y}|\RM{x}}|p_{\RM{x}})\geq 0 \tag{positivity of divergence}
\end{align}
with equality iff $q_{\R{y}|\RM{x}}(y|\RM{x})=p_{\R{y}|\RM{x}}(y|\RM{x})$ a.s. This, in turn, can be proved using the [log-sum inequality](https://en.wikipedia.org/wiki/Log_sum_inequality):
\begin{align}
\sum_{i} a_i \log\left(\frac{a_i}{b_i}\right) \geq (\textstyle \sum_{i} a_i) \log\left(\frac{\sum_{i} a_i}{\sum_{i} b_i}\right)\tag{log-sum inequality}
\end{align}
for any sequences $\{a_i\}$, $\{b_i\}$, and $\{c_i\}$. $\blacksquare$
## Data Preparation
### Load the dataset
Like the iris dataset, the MNIST dataset can be obtained in many ways due to its popularity in image recognition. For instance, one may use `tensorflow.keras.datasets.mnist.load_data` to load the data as tuples/arrays and convert it to `DataFrame`. However, training a neural network often requires a lot of data and computational power. It may be inefficient or impossible to load all data into memory.
A better way is to use the package [`tensorflow_datasets`](https://blog.tensorflow.org/2019/02/introducing-tensorflow-datasets.html), which lazily load the dataset and prepare the data as [`Tensor`s](https://www.tensorflow.org/guide/tensor), which can be operated faster by GPU or TPU instead of CPU.
```python
import tensorflow_datasets as tfds # give a shorter name tfds for convenience
import os
user_home = os.getenv("HOME") # get user home directory
data_dir = os.path.join(user_home, "data") # download folder for data
ds, ds_info = tfds.load(
'mnist',
data_dir=data_dir, # download location
as_supervised=True, # separate input features and label
with_info=True, # return information of the dataset
)
# print information related to loading the dataset
import pprint as pp
print('-' * 79)
print(f'Data downloaded to {data_dir}')
print(f'Data to be loaded in memory:')
pp.pprint(ds)
```
The function `tfds.load` download the data to `data_dir` and prepare it for loading using variable `ds`. In particular, the dataset is split into
- a training set `ds["train"]` and
- a test set `ds["test"]`.
`tfds.load?` shows more information about the function. E.g., we can control the split ratio using the argument [`split`](https://www.tensorflow.org/datasets/splits).
**Why split the data?**
The test set is used to evaluate the performance of a neural network trained using the training set (separate from the test set).
The purpose of separating the test set from the training set is to avoid *overly-optimistic* performance estimate. Why?
Suppose the final exam questions (test set) are the same as the previous homework questions (training set).
- Students may get a high exam score simply by studying the model answers to the homework instead of understanding entire subject.
- The exam score is therefore an overly-optimistic estimate of the students' understanding of the subject.
**Exercise** Assign to `train_size` and `test_size` the numbers of instances in the training set and test set respectively.
*Hint: Both the training and test sets are loaded as [`Dataset` objects](https://www.tensorflow.org/api_docs/python/tf/data/Dataset). Since the loading is lazy, i.e., the data is not yet in memory, we cannot count the number of instances directly. Instead, we obtain such information from `ds_info`.*
```python
# YOUR CODE HERE
raise NotImplementedError()
train_size, test_size
```
```python
# tests
```
Note that the training set is often much larger than the test set especially for deep learning because
- training a neural network requires many examples but
- estimating its performance does not.
### Show an example
The following retrieves an example from the training set.
```python
for image, label in ds["train"].take(1):
print(
f'image dtype: {type(image)} shape: {image.shape} element dtype: {image.dtype}'
)
print(f'label dtype: {label.dtype}')
```
The for loop above takes one example from `ds["train"]` using the method [`take`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#take) and print its data types.
- The handwritten digit is represented by a 28x28x1 [`EagerTensor`](https://www.tensorflow.org/guide/eager), which is essentially a 2D array of bytes (8-bit unsigned integers `uint8`).
- The digit type is an integer.
The following function plots the image using the `imshow` function from `matplotlib.pyplot`. We set the parameter `cmap` to `gray_r` so the color is darker if the pixel value is larger. The slice operator `[:,:,0]` for the image reshaped the numpy array from 3 dimensions to 2 dimensions.
```python
def plot_mnist_image(example, ax=None, pixel_format=None):
(image, label) = example
if ax == None:
ax = plt.gca()
ax.title.set_text(label.numpy()) # show digit type as plot title
ax.imshow(image[:, :, 0], cmap="gray_r") # show image
# Major ticks
ax.set_xticks(np.arange(0, 28, 3))
ax.set_yticks(np.arange(0, 28, 3))
# Minor ticks
ax.set_xticks(np.arange(-.5, 28, 1), minor=True)
ax.set_yticks(np.arange(-.5, 28, 1), minor=True)
if pixel_format is not None:
for i in range(28):
for j in range(28):
ax.text(
j,
i,
pixel_format.format(image[i, j,
0].numpy()), # show pixel value
va='center',
ha='center',
color='white',
fontweight='bold',
fontsize='small')
ax.grid(color='lightblue', linestyle='-', linewidth=1, which='minor')
ax.set_xlabel('2nd dimension')
ax.set_ylabel('1st dimension')
ax.title.set_text('Image with label ' + ax.title.get_text())
if input('Execute? [Y/n]').lower != 'n':
plt.figure(figsize=(11, 11), dpi=80)
for example in ds["train"].take(1):
plot_mnist_image(example, pixel_format='{}')
plt.show()
```
**Exercise** Complete the following code to generate a matrix plot of the first 50 examples from the training sets.
The parameter `nrows` and `ncols` specify the number of rows and columns respectively. You code may look like
```Python
...
for ax, example in zip(axes.flat, ds["train"].____(nrows * ncols)):
plot_mnist_image(_______, ax)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
...
```
and the output image should look like
```python
if input('Execute? [Y/n]').lower != 'n':
def plot_mnist_image_matrix(ds, nrows=5, ncols=10):
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
# YOUR CODE HERE
raise NotImplementedError()
fig.tight_layout() # adjust spacing between subplots automatically
return fig, axes
fig, axes = plot_mnist_image_matrix(ds, nrows=5)
fig.set_figwidth(9)
fig.set_figheight(6)
fig.set_dpi(80)
# plt.savefig('mnist_examples.svg')
plt.show()
```
### Preprocess the data
We will use the [`tensorflow`](https://www.tensorflow.org/) library to process the data and train the neural network. (Another popular library is [PyTorch](https://pytorch.org/).)
```python
import tensorflow.compat.v2 as tf # explicitly use tensorflow version 2
```
Each pixel is stored as an integer from $\Set{0,\dots,255}$ ($2^8$ possible values). However, for computations by the neural network, we need to convert it to a floating point number. We will also normalize each pixel value to be within the unit interval $[0,1]$:
\begin{align}
v \mapsto \frac{v - v_{\min}}{v_{\max} - v_{\min}} = \frac{v}{255}\tag{min-max normalization}
\end{align}
**Exercise** Using the function `map`, normalize each element of an image to the unit interval $[0,1]$ after converting them to `tf.float32` using [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast).
*Hint:* The normalization factor is NOT 256. You code may look like
```Python
...
ds_n[part] = ds[part].map(
lambda image, label: (_____(image, _____) / ___, label),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
...
```
and the output image should look like
```python
def normalize_mnist(ds):
"""
Returns:
MNIST Dataset with image pixel values normalized to float32 in [0,1].
"""
ds_n = dict.fromkeys(ds.keys()) # initialize the normalized dataset
for part in ds.keys():
# normalize pixel values to [0,1]
# YOUR CODE HERE
raise NotImplementedError()
return ds_n
ds_n = normalize_mnist(ds)
pp.pprint(ds_n)
plt.figure(figsize=(11, 11), dpi=80)
for example in ds_n["train"].take(1):
plot_mnist_image(example,
pixel_format='{:.2f}') # show pixel value to 2 d.p.s
plt.savefig('mnist_example_normalized.svg')
plt.show()
```
```python
# tests
```
To avoid overfitting, the training of a neural network uses *stochastic gradient descent* which
- divides the training into many steps where
- each step uses a *randomly* selected minibatch of samples
- to improve the neural network *bit-by-bit*.
The following code specifies the batch size and enables caching and prefetching to reduce the latency in loading examples repeatedly for training and testing.
```python
def batch_mnist(ds_n):
ds_b = dict.fromkeys(ds_n.keys()) # initialize the batched dataset
for part in ds_n.keys():
ds_b[part] = (
ds_n[part].batch(
128) # Use a minibatch of examples for each training step
.shuffle(
ds_info.splits[part].num_examples,
reshuffle_each_iteration=True) # shuffle data for each epoch
.cache() # cache current elements
.prefetch(tf.data.experimental.AUTOTUNE)
) # preload subsequent elements
return ds_b
ds_b = batch_mnist(ds_n)
pp.pprint(ds_b)
```
**Exercise** The output to the above cell should look like
```Python
{'test': <PrefetchDataset shapes: ((None, 28, 28, 1), (None,)), types: (tf.float32, tf.int64)>,
'train': <PrefetchDataset shapes: ((None, 28, 28, 1), (None,)), types: (tf.float32, tf.int64)>}
```
with a new first dimension of unknown size `None`. Why?
YOUR ANSWER HERE
## Training and Testing
### Define the neural network architecture
As mentioned earlier, the neural network computes an estimate $q_{\R{y}|\RM{x}}(y|\M{x})$ of the unknown probability $p_{\R{y}|\RM{x}}(y|\M{x})$ for any image $\M{x}\in \mc{X}$ and label $y\in \mc{Y}$. The computation is organized into layers of computation units called the *neurons*.
For $\ell\in \{0,\dots,L\}$ and integer $L\geq 1$, let
- $\M{a}^{(\ell)}$ be the output of the $\ell$-th layer of the neural network, and
- $a^{(\ell)}_i$ be the $i$-th element of $\M{a}^{(\ell)}$. The element is computed from the output $\M{a}^{(\ell-1)}$ of its previous layer except for $\ell=0$.
The $0$-th layer is called the *input layer* while the $L$-th layer is called the *output layer*. All other layers are called the *hidden layers*.
A basic neural network architecture computes $q_{\R{y}|\RM{x}}(y|\M{x})$ as
\begin{align}
[q_{\R{y}|\RM{x}}(y|\M{x})]_{y\in \mc{Y}} &:= \M{a}^{(L)}
\end{align}
where
\begin{align}
\M{a}^{(\ell)}&:=\begin{cases}
\M{x} & \ell=0\\
\sigma^{(\ell)}(\overbrace{\M{W}^{(\ell)}\M{a}^{(\ell-1)}+\M{b}^{(\ell)}}^{\RM{z}^{(\ell)}:=})& \ell>0;
\end{cases}\tag{net}
\end{align}
- $\M{W}^{(\ell)}$ is a matrix of weights;
- $\M{b}^{(\ell)}$ is a vector called bias; and
- $\sigma^{(\ell)}$ is a reveal-valued function called the *activation function*.
To ensure $\M{a}^{(L)}$ is a valid probability vector, the soft-max activation function is often used for the last layer:
$$ \sigma^{(L)}\left(\left[\begin{smallmatrix}z^{(\ell)}_1 \\ \vdots \\ z^{(\ell)}_k\end{smallmatrix}\right]\right) := \frac{1}{\sum_{i=1}^k e^{z^{(\ell)}_i}} \left[\begin{smallmatrix}e^{z^{(\ell)}_1} \\ \vdots \\ e^{z^{(\ell)}_k}\end{smallmatrix}\right] $$
where $k:=\abs{\mc{Y}}=10$ is the number of distinct class labels.
This ensures $\M{a}^{(L)}$ (the output of soft-max) is stochastic, i.e.,
$$\sum_{i=1}^k a_i^{(L)} = 1\kern1em \text{and} \kern1em a_i^{(L)}\geq 0\qquad \forall i\in \{1,\dots,k\}.$$
The activation functions $\sigma^{(\ell)}$ for other layers $1\leq \ell<L$ is often the vectorized version of
- sigmoid:
$$\sigma_{\text{sigmoid}}(z) = \frac{1}{1+e^{-z}}$$
- rectified linear unit (ReLU):
$$ \sigma_{\text{ReLU}}(z) = \max\{0,z\}. $$
```python
%%html
<b>Activation function:</b><br>
```
<b>Activation function:</b><br>
The following plots the ReLU activation function.
```python
def ReLU(z):
return np.max([np.zeros(z.shape), z], axis=0)
z = np.linspace(-5, 5, 100)
plt.plot(z, ReLU(z))
plt.xlim(-5, 5)
plt.title(r'ReLU: $\max\{0,z\}$')
plt.xlabel(r'$z$')
plt.show()
```
**Exercise** Complete the vectorized function `sigmoid` using the vectorized exponentiation `np.exp`.
```python
def sigmoid(z):
# YOUR CODE HERE
raise NotImplementedError()
plt.plot(z, ReLU(z))
plt.xlim(-5, 5)
plt.title(r'Sigmoid function: $\frac{1}{1+e^{-z}}$')
plt.xlabel(r'$z$')
plt.show()
```
```python
# tests
```
The following uses the [`keras`](https://keras.io/) library to define the basic neural network achitecture. `keras` runs on top of `tensorflow` and offers a higher-level abstraction to simplify the construction and training of a neural network. ([`tflearn`](https://github.com/tflearn/tflearn) is another library that provides a higher-level API for `tensorflow`.)
```python
def create_simple_model():
tf.keras.backend.clear_session() # clear keras cache.
# See https://github.com/keras-team/keras/issues/7294
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(16, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(16, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(10, activation=tf.keras.activations.softmax)
], 'Simple_sequential')
return model
model = create_simple_model()
model.summary()
```
The above defines [a linear stack](https://www.tensorflow.org/api_docs/python/tf/keras/layers) of [fully-connected layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) after [flattening the input](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten). The method `summary` is useful for [debugging in Keras](https://keras.io/examples/keras_recipes/debugging_tips/).
**Execise** Assign to `n_hidden_layers` the number of hidden layers for the above simple sequential model.
*Hint:* The layer `Flatten` do not counts as a hidden layer since it simply reshape the input without using any trainable parameters. The output layer also do not count as a hidden layer since its output is the output of the neural network, not intermediate (hidden) values that require further processing by the neurons.
```python
# YOUR CODE HERE
raise NotImplementedError()
n_hidden_layers
```
```python
# tests
```
### Train and validate
Recall that a neural network that minimizes the cross entropy gives $p_{\R{y}|\RM{x}}(y|\RM{x})$ a.s. for all $y\in \mc{Y}$ and any possible input image $\RM{x}$:
\begin{align}
\min_{q_{\R{y}|\RM{x}}} \overbrace{\E\left[\log \frac{1}{q_{\R{y}|\RM{x}}(\R{y}|\RM{x})}\right]}^{ \text{Cross entropy}\\ H(p_{\R{y}|\RM{x}}\|q_{\R{y}|\RM{x}}|p_{\RM{x}}):=}
&= H(\R{y}|\RM{x})
\end{align}
The cross entropy cannot be computed exactly without knowing the joint distribution $p_{\RM{x}\R{y}}$. Nevertheless, it can be estimated from a batch of i.i.d. samples $(\RM{x}_{\R{j}_i},\R{y}_{\R{j}_i})$ for $1\leq i\leq n$:
\begin{align}
\R{L}(\theta)&:=\frac1n \sum_{i=1}^n \log \frac{1}{q_{\R{y}|\RM{x}}(\R{y}_{\R{j}_i}|\RM{x}_{\R{j}_i})}\tag{empirical loss}
\end{align}
where
$$\theta := \operatorname{flat}(\M{W}^{(\ell)},\M{b}^{(\ell)}\mid 0\leq \ell \leq L)$$
is the vector of parameters of the neural network defined in (net).
A mini-batch [gradient descent algorithm](https://en.wikipedia.org/wiki/Stochastic_gradient_descent) is often used to reduce the loss. It iteratively updates/trains the neural network parameters:
$$\theta \leftarrow \theta -s\nabla \R{L}(\theta)$$
by computing the gradient $\nabla \R{L}(\theta)$ on a randomly selected minibatch of examples and choosing an appropriate learning rate $s$.
As explained in the [lecture series on deep learning](https://www.youtube.com/embed/videoseries?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi) in the introduction section:
- The gradient can be computed systematically using a technique called *backpropagation* due to the structure of the neural network in (net).
- The learning rate can affect the convergence rate of the loss to a local minima:
- $\theta$ may overshoot its optimal value if $s$ is too large, and
- the convergence can be very slow if $\theta$ is too small.
A more advanced method called [Adam (Adaptive Momentum Estimation)](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) can adaptively choose $s$ to speed up the convergence.
```python
%%html
<div>
<b>What is gradient descent?</b><br>
</div>
<div>
<b>How to choose the step size?</b><br>
</div>
```
<div>
<b>What is gradient descent?</b><br>
</div>
<div>
<b>How to choose the step size?</b><br>
</div>
The [loss function](https://keras.io/api/losses/), gradient descent algorithm, and the performance metrics can be specified using the [`compile` method](https://keras.io/api/losses/).
```python
def compile_model(model):
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
compile_model(model)
model.loss, model.optimizer
```
We can now train the neural network using the method [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit) of the compiled model:
```python
if input('Train? [Y/n]').lower() != 'n':
model.fit(ds_b["train"])
```
**Exercise** By default, the neural network is trained for 1 epoch. What happens to the training accuracy if you rerun the above cell to train the model for another epoch?
YOUR ANSWER HERE
We can set the parameter `epochs` to train the neural network for multiple epochs since it is quite unlikely to train a neural network well with just one epoch. To determine whether the neural network is well-trained (when to stop training), we should also use a separate validation set to evaluate the performance of the neural network. The validation set can be specified using the parameter `validation_set` as follows:
```python
if input('Train? [Y/n]').lower() != 'n':
model.fit(ds_b["train"], epochs=6, validation_data=ds_b["test"])
```
**Exercise** Is the maximum validation accuracy `val_sparse_categorical_accuracy` (over different epoches) an unbiased estimate of the performance of deep learning for the given dataset? If not, how to avoid the bias?
YOUR ANSWER HERE
**Exercise** Is it a good idea to use cross-validation to evaluate the neural network? Why or why not?
YOUR ANSWER HERE
### Log the results
To call additional functions during training, we can add the functions to the `callbacks` parameter of the model `fit` method. For instance:
```python
import tqdm.keras
if input('Train? [Y/n]').lower() != 'n':
model.fit(ds_b["train"],
epochs=6,
validation_data=ds_b["test"],
verbose=0,
callbacks=[tqdm.keras.TqdmCallback(verbose=2)])
```
The above code uses [`tqdm.keras.TqdmCallback()`](https://tqdm.github.io/docs/keras/) to return a callback function that displays a graphical progress bar:
- Setting `verbose=0` for the method `fit` disables the default text-based progress bar.
- Setting `verbose=2` for the class `TqdmCallback` show and keep the progress bars for training each batch. Try changing `verbose` to other values to see different effects.
An import use of callback functions is to save the models and results during training for further analysis. We define the following function `train_model` for this purpose:
- Take a look at the docstring to learn its basic usage, and then
- learn the implementations in the source code.
```python
import os, datetime, pytz
def train_model(model,
fit_params={},
log_root='.',
save_log_params=None,
save_model_params=None,
debug_params=None):
'''Train and test the model, and return the log directory path name.
Parameters:
----------
log_root (str): the root directory for creating log directory
fit_params (dict): dictionary of parameters to pass to model.fit.
save_log_params (dict): dictionary of parameters to pass to
tf.keras.callbacks.TensorBoard to save the results for TensorBoard.
The default value None means no logging of the results.
save_model_params (dict): dictionary of parameters to pass to
tf.keras.callbacks.ModelCheckpoint to save the model to checkpoint
files.
The default value None means no saving of the models.
debug_params (dict): dictionary of parameters to pass to
tf.debugging.experimental.enable_dump_debug_info for debugger
v2 in tensorboard.
The default value None means no logging of the debug information.
Returns:
-------
str: log directory path that points to a subfolder of log_root named
using the current time.
'''
# use a subfolder named by the current time to distinguish repeated runs
log_dir = os.path.join(
log_root,
datetime.datetime.now(
tz=pytz.timezone('Asia/Hong_Kong')).strftime("%Y%m%d-%H%M%S"))
callbacks = fit_params.pop('callbacks', []).copy()
if save_log_params is not None:
# add callback to save the training log for further analysis by tensorboard
callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir,
**save_log_params))
if save_model_params is not None:
# save the model as checkpoint files after each training epoch
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(os.path.join(log_dir, '{epoch}.ckpt'),
**save_model_params))
if debug_params is not None:
# save information for debugger v2 in tensorboard
tf.debugging.experimental.enable_dump_debug_info(
log_dir, **debug_params)
# training + testing (validation)
model.fit(ds_b['train'],
validation_data=ds_b['test'],
callbacks=callbacks,
**fit_params)
return log_dir
```
For example:
```python
fit_params = {'epochs': 6, 'callbacks': [tqdm.keras.TqdmCallback()], 'verbose': 0}
log_root = 'private/demo/'
save_log_params = {'update_freq': 100, 'histogram_freq': 1}
save_model_params = {'save_weights_only': True, 'verbose': 1}
debug_params = {'tensor_debug_mode': "FULL_HEALTH", 'circular_buffer_size': -1}
if input('Train? [Y/n]').lower() != 'n':
model = compile_model(create_simple_model())
log_dir = train_model(model,
fit_params = fit_params,
log_root=log_root,
save_log_params=save_log_params,
save_model_params=save_model_params,
debug_params=debug_params)
```
By providing the `save_model_params` to the callback [`tf.keras.callbacks.ModelCheckpoint`](https://www.tensorflow.org/tutorials/keras/save_and_load#save_checkpoints_during_training), the model is saved at the end of each epoch to `log_dir`.
```python
!ls {log_dir}
```
Saving the model is useful because it often takes a long time to train a neural network. To reload the model from the latest checkpoint and continue to train it:
```python
if input('Continue to train? [Y/n]').lower() != 'n':
# load the weights of the previously trained model
restored_model = compile_model(create_simple_model())
restored_model.load_weights(tf.train.latest_checkpoint(log_dir))
# continue to train
train_model(restored_model,
log_root=log_root,
save_log_params=save_log_params)
```
By providing [`tf.keras.callbacks.TensorBoard`](https://www.tensorflow.org/tensorboard/get_started#using_tensorboard_with_keras_modelfit) as a callback function to the `fit` method earlier, the training logs can be analyzed using TensorBoard.
```python
if input('Execute? [Y/n]').lower() != 'n':
%load_ext tensorboard
%tensorboard --logdir {log_dir}
```
The `SCALARS` tab shows the curves of training and validation losses/accuracies after different batches/epoches. The curves often have jitters as the gradient descent is stochastic (random). To see the typical performance, a smoothing factor $\theta\in [0,1]$ can be applied on the left panel. The smoothed curve $\bar{l}(t)$ of the original curve $l(t)$ is defined as
\begin{align}
\bar{l}(t) = \theta \bar{l}(t-1) + (1-\theta) l(t)
\end{align}
which is called the moving average. Try changing the smoothing factor on the left panel to see the effect.
**Exercise** If the smoothing factor $\theta$ is too large, would it cause bias when using empirical loss or performance to estimate the actual loss or performance? If so, is estimate overly optimistic or pessimistic?
YOUR ANSWER HERE
We can also visualize the input images in TensorBoard:
- Run the following cell to write the images to the log directory.
- Click the `refresh` button on the top of the previous TensorBoard panel.
- Click the `IMAGE` tab to show the images.
```python
if input('Execute? [Y/n]').lower() != 'n':
file_writer = tf.summary.create_file_writer(log_dir)
with file_writer.as_default():
# Don't forget to reshape.
images = np.reshape([image for (image, label) in ds["train"].take(25)],
(-1, 28, 28, 1))
tf.summary.image("25 training data examples",
images,
max_outputs=25,
step=0)
```
In addition to presenting the results, TensorBoard is useful for debugging deep learning. In particular, learn
- to check the model graph under the [`GRAPHS`](https://www.tensorflow.org/tensorboard/graphs) tab,
- to debug using the [`DEBUGGER v2` tab](https://www.tensorflow.org/tensorboard/debugger_v2), and
- to [publish your results](https://www.tensorflow.org/tensorboard/get_started#tensorboarddev_host_and_share_your_ml_experiment_results).
TensorBoard can also show simultaneously the logs of different runs stored in different subfolders of the log directory:
```python
if input('Execute? [Y/n]').lower() != 'n':
%load_ext tensorboard
%tensorboard --logdir {log_root}
```
You can select different runs on the left panel to compare their performance.
Note that loading the log to TensorBoard may consume a lot of memory. You can list the TensorBoard notebook instances and kill those you do not need anymore by running `!kill {pid}`.
```python
import tensorboard as tb
tb.notebook.list() # list all the running TensorBoard notebooks.
pids_to_kill = [] # modify the list of pid to kill
for pid in pids_to_kill:
!kill {pid}
```
**Exercise** Train the following network with [dropout](https://en.wikipedia.org/wiki/Dilution_(neural_networks)#Dropout). Try to tune the network for the best accuracy. Use `log_root='logs/dropout/'` to so your log will also be submitted along with your notebook. Put your training code inside the body of the conditional `if input...` for autograding to work.
```python
def create_dropout_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(128, activation=tf.keras.activations.relu),
tf.keras.layers.Dropout(0.2), # dropout
tf.keras.layers.Dense(10, activation=tf.keras.activations.softmax)
], name="Dropout")
return model
model = compile_model(create_dropout_model())
print(model.summary())
log_root = 'logs/dropout/'
if input('Train? [Y/n]').lower() != 'n':
# YOUR CODE HERE
raise NotImplementedError()
```
**Exercise** Explore the [convolutional neural network (CNN)](https://en.wikipedia.org/wiki/Convolutional_neural_network). Try to tune the network for the best accuracy.
```python
def create_cnn_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,
3,
activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
], name="CNN")
return model
model = compile_model(create_cnn_model())
print(model.summary())
log_root = 'logs/cnn/'
if input('Train? [Y/n]').lower() != 'n':
# YOUR CODE HERE
raise NotImplementedError()
```
**Exercise** Launch TensorBoard to show the best performances of each of the two neural network architectures. Note that to clean up the log of the inferior results, you may need to kill the TensorBoard instance. It is easier to use the vscode interface or the terminal in the lab interface to remove folders.
```python
if input('Execute? [Y/n]').lower() != 'n':
# YOUR CODE HERE
raise NotImplementedError()
```
## Deployment
Once you are satisfied with the result, you can deploy the model as a web application.
```python
model.save('model.h5')
```
Convert the model to files in `mnist/model` that can be loaded by [`tensorflow.js`](https://www.tensorflow.org/js) on the page `mnist/index.html`.
```python
!tensorflowjs_converter --input_format keras 'model.h5' 'mnist/model'
```
To download the `mnist` folder, we first compress it as follows:
```python
!zip -r mnist.zip mnist/* index.html
```
Finally, you can download the zip file [here](./mnist.zip) and host the web application on a static web server.
|
7b012154beaec72210540634fa5f5e8840c346ee
| 92,504 |
ipynb
|
Jupyter Notebook
|
Tutorial7/Deep Learning.ipynb
|
ccha23/cs5483
|
e8fa9d9b8a0545696958ca87c2c9a8a133109191
|
[
"MIT"
] | null | null | null |
Tutorial7/Deep Learning.ipynb
|
ccha23/cs5483
|
e8fa9d9b8a0545696958ca87c2c9a8a133109191
|
[
"MIT"
] | 1 |
2021-04-19T09:21:04.000Z
|
2021-04-19T09:21:06.000Z
|
Tutorial7/Deep Learning.ipynb
|
ccha23/cs5483
|
e8fa9d9b8a0545696958ca87c2c9a8a133109191
|
[
"MIT"
] | 1 |
2022-03-20T05:25:45.000Z
|
2022-03-20T05:25:45.000Z
| 32.063778 | 539 | 0.559965 | true | 12,722 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.782662 | 0.793106 | 0.620734 |
__label__eng_Latn
| 0.932696 | 0.280504 |
```python
import pyprob
from pyprob import Model
from pyprob import InferenceEngine
from pyprob.distributions import Normal
from pyprob.dis import ModelDIS
import torch
import numpy as np
import math
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure();
```
<Figure size 432x288 with 0 Axes>
# Defining the Model
```python
class GaussianUnknownMean(Model):
def __init__(self):
super().__init__(name='Gaussian with unknown mean') # give the model a name
self.prior_mean = 1
self.prior_std = math.sqrt(5)
self.likelihood_std = math.sqrt(2)
def forward(self): # Needed to specifcy how the generative model is run forward
# sample the (latent) mean variable to be inferred:
mu = pyprob.sample(Normal(self.prior_mean, self.prior_std)) # NOTE: sample -> denotes latent variables
# define the likelihood
likelihood = Normal(mu, self.likelihood_std)
# Lets add two observed variables
# -> the 'name' argument is used later to assignment values:
#pyprob.observe(likelihood, name='obs0') # NOTE: observe -> denotes observable variables
# return the latent quantity of interest
return mu
class GaussianDIS(GaussianUnknownMean, ModelDIS):
def __init__(self):
super().__init__()
self.obs = 8
#self.bool_func = bool_func
def dummy_bernoulli(self, x):
return True
def forward(self):
x = super().forward()
likelihood = Normal(x, self.likelihood_std)
sample_obs = pyprob.sample(likelihood)
euclidean_dist = np.abs(sample_obs - self.obs)
delta_val = self.dummy_bernoulli(x)
bool_func_dist = pyprob.distributions.Bernoulli(delta_val)
pyprob.observe(bool_func_dist, name = "bool_func")
dummy = pyprob.distributions.Normal(euclidean_dist,0.1)
pyprob.observe(dummy, euclidean_dist, name = "distance") # Any other way to do this?
return x
model = GaussianUnknownMean()
simulator = GaussianDIS()
```
# Finding the correct posterior analytically
Since all distributions are gaussians in this model, we can analytically compute the posterior and we can compare the true posterior to the inferenced one.
Assuming that the prior and likelihood are $p(x) = \mathcal{N}(\mu_0, \sigma_0)$ and $p(y|x) = \mathcal{N}(x, \sigma)$ respectively and, $y_1, y_2, \ldots y_n$ are the observed values, the posterior would be $p(x|y) = \mathcal{N}(\mu_p, \sigma_p)$ where,
$$
\begin{align}
\sigma_{p}^{2} & = \frac{1}{\frac{n}{\sigma^2} + \frac{1}{\sigma_{0}^{2}}} \\
\mu_p & = \sigma_{p}^{2} \left( \frac{\mu_0}{\sigma_{0}^{2}} + \frac{n\overline{y}}{\sigma^2} \right)
\end{align}
$$
The following class implements computing this posterior distribution. We also implement some helper functions and variables for plotting the correct posterior and prior.
```python
def plot_function(min_val, max_val, func, *args, **kwargs):
x = np.linspace(min_val,max_val,int((max_val-min_val)*50))
plt.plot(x, np.vectorize(func)(x), *args, **kwargs)
def get_dist_pdf(dist):
return lambda x: math.exp(dist.log_prob(x))
class CorrectDistributions:
def __init__(self, model):
self.prior_mean = model.prior_mean
self.prior_std = model.prior_std
self.likelihood_std = model.likelihood_std
self.prior_dist = Normal(self.prior_mean, self.prior_std)
@property
def observed_list(self):
return self.__observed_list
@observed_list.setter
def observed_list(self, new_observed_list):
self.__observed_list = new_observed_list
self.construct_correct_posterior()
def construct_correct_posterior(self):
n = len(self.observed_list)
posterior_var = 1/(n/self.likelihood_std**2 + 1/self.prior_std**2)
posterior_mu = posterior_var * (self.prior_mean/self.prior_std**2 + n*np.mean(self.observed_list)/self.likelihood_std**2)
self.posterior_dist = Normal(posterior_mu, math.sqrt(posterior_var))
def prior_pdf(self, model, x):
p = Normal(model.prior_mean,model.prior_stdd)
return math.exp(p.log_prob(x))
def plot_posterior(self, min_val, max_val):
if not hasattr(self, 'posterior_dist'):
raise AttributeError('observed values are not set yet, and posterior is not defined.')
plot_function(min_val, max_val, get_dist_pdf(self.posterior_dist), label='correct posterior', color='orange')
def plot_prior(self, min_val, max_val):
plot_function(min_val, max_val, get_dist_pdf(self.prior_dist), label='prior', color='green')
```
```python
correct_dists = CorrectDistributions(model)
correct_dists.observed_list=[8]
```
# Training
For a sanity check, start with one training round.
```python
simulator.train(iterations = 1,
importance_sample_size=5000,
proposal_mixture_components=5,
observe_embeddings={'bool_func': {'dim': 1, 'depth': 1}} # Dummy value as we currently have to observe something
)
```
Creating new inference network...
Observable bool_func: reshape not specified, using shape torch.Size([]).
Observable bool_func: using embedding dim torch.Size([1]).
Observable bool_func: observe embedding not specified, using the default FEEDFORWARD.
Observable bool_func: using embedding depth 1.
Observe embedding dimension: 1
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
New layers, address: 16__forward__mu__Normal__1, distribution: Normal
New layers, address: 28__forward__sample_obs__Normal__1, distribution: Normal
Total addresses: 2, parameters: 308
0d:00:00:00 | 1 | 300 | +4.03e+00 | [1m[32m+3.95e+00[0m | [1m[31m+4.16e+00[0m | [1m[32m0d:00:00:00[0m | +1.00e-03 | 1,006.0
Stop condition reached. num_traces: 500
0d:00:00:00 | 1 | 500 | +4.03e+00 | +3.95e+00 | [31m+4.10e+00[0m | 0d:00:00:00 | +1.00e-03 | 1,163.2
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:20 | 0d:00:00:00 | #################### | 5000/5000 | 4807.00 | 240.37
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:00 | 0d:00:00:00 | #################### | 500/500 | 1,032.81
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:00 | 1 | 900 | +4.03e+00 | +3.95e+00 | [32m+4.99e+00[0m | 0d:00:00:00 | +1.00e-03 | 770.5
Stop condition reached. num_traces: 500
0d:00:00:01 | 1 | 1,000 | +4.03e+00 | +3.95e+00 | [31m+5.14e+00[0m | 0d:00:00:00 | +1.00e-03 | 852.5
Training iterations 1 epsilon 1.92 ESS 500.0
```python
prior = simulator.prior_results(num_traces = 2000)
posterior_check = simulator.posterior(num_traces = 2000,
inference_engine = InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK,
observe={"bool_func": 1})
posterior_check = simulator.update_DIS_posterior_weights(posterior_check)
results_check = np.array([x.result for x in posterior_check.values])
```
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:03 | 0d:00:00:00 | #################### | 2000/2000 | 2000.00 | 643.56
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:08 | 0d:00:00:00 | #################### | 2000/2000 | 1920.54 | 230.02
Now do 50 more iterations.
```python
simulator.train(iterations = 50,
importance_sample_size=5000,
proposal_mixture_components=5,
observe_embeddings={'bool_func': {'dim': 1, 'depth': 1}} # Dummy value as we currently have to observe something
)
```
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:18 | 0d:00:00:00 | #################### | 5000/5000 | 4801.09 | 271.60
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:00 | 0d:00:00:00 | #################### | 500/500 | 1,204.59
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:01 | 1 | 1,300 | +4.03e+00 | +3.95e+00 | [32m+4.96e+00[0m | 0d:00:00:00 | +1.00e-03 | 439.9
Stop condition reached. num_traces: 500
0d:00:00:01 | 1 | 1,500 | +4.03e+00 | +3.95e+00 | [32m+5.01e+00[0m | 0d:00:00:00 | +1.00e-03 | 989.9
Training iterations 1 epsilon 1.30 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:20 | 0d:00:00:00 | #################### | 5000/5000 | 4817.46 | 249.91
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:00 | 0d:00:00:00 | #################### | 500/500 | 829.38
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:02 | 1 | 1,800 | +4.03e+00 | +3.95e+00 | [32m+4.99e+00[0m | 0d:00:00:00 | +1.00e-03 | 693.1
Stop condition reached. num_traces: 500
0d:00:00:02 | 1 | 2,000 | +4.03e+00 | +3.95e+00 | [31m+5.02e+00[0m | 0d:00:00:00 | +1.00e-03 | 351.1
Training iterations 2 epsilon 1.05 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:57 | 0d:00:00:00 | #################### | 5000/5000 | 4819.09 | 87.53
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:02 | 0d:00:00:00 | #################### | 500/500 | 234.92
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:04 | 1 | 2,400 | +4.03e+00 | +3.95e+00 | [31m+4.99e+00[0m | 0d:00:00:02 | +1.00e-03 | 171.6
Stop condition reached. num_traces: 500
0d:00:00:05 | 1 | 2,500 | +4.03e+00 | +3.95e+00 | [31m+5.05e+00[0m | 0d:00:00:02 | +1.00e-03 | 173.9
Training iterations 3 epsilon 0.90 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:01:19 | 0d:00:00:00 | #################### | 5000/5000 | 4805.33 | 62.69
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:02 | 0d:00:00:00 | #################### | 500/500 | 209.79
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:06 | 1 | 2,900 | +4.03e+00 | +3.95e+00 | [31m+4.99e+00[0m | 0d:00:00:01 | +1.00e-03 | 351.6
Stop condition reached. num_traces: 500
0d:00:00:06 | 1 | 3,000 | +4.03e+00 | +3.95e+00 | [32m+4.83e+00[0m | 0d:00:00:01 | +1.00e-03 | 481.1
Training iterations 4 epsilon 0.79 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:55 | 0d:00:00:00 | #################### | 5000/5000 | 4784.43 | 90.89
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:01 | 0d:00:00:00 | #################### | 500/500 | 499.84
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:07 | 1 | 3,300 | +4.03e+00 | +3.95e+00 | [32m+4.96e+00[0m | 0d:00:00:00 | +1.00e-03 | 325.8
Stop condition reached. num_traces: 500
0d:00:00:08 | 1 | 3,500 | +4.03e+00 | +3.95e+00 | [32m+4.80e+00[0m | 0d:00:00:01 | +1.00e-03 | 178.2
Training iterations 5 epsilon 0.71 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:01:11 | 0d:00:00:00 | #################### | 5000/5000 | 4758.18 | 69.80
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:01 | 0d:00:00:00 | #################### | 500/500 | 263.98
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:09 | 1 | 3,900 | +4.03e+00 | +3.95e+00 | [32m+4.77e+00[0m | 0d:00:00:01 | +1.00e-03 | 238.9
Stop condition reached. num_traces: 500
0d:00:00:10 | 1 | 4,000 | +4.03e+00 | +3.95e+00 | [31m+4.90e+00[0m | 0d:00:00:01 | +1.00e-03 | 250.5
Training iterations 6 epsilon 0.65 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:01:08 | 0d:00:00:00 | #################### | 5000/5000 | 4746.82 | 73.42
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:01 | 0d:00:00:00 | #################### | 500/500 | 415.50
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:11 | 1 | 4,400 | +4.03e+00 | +3.95e+00 | [31m+4.87e+00[0m | 0d:00:00:01 | +1.00e-03 | 353.2
Stop condition reached. num_traces: 500
0d:00:00:12 | 1 | 4,500 | +4.03e+00 | +3.95e+00 | [32m+4.82e+00[0m | 0d:00:00:01 | +1.00e-03 | 358.4
Training iterations 7 epsilon 0.60 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:59 | 0d:00:00:00 | #################### | 5000/5000 | 4705.87 | 84.16
OfflineDataset at: .
Num. traces : 500
Sorted on disk : False
No pre-computed hashes found, generating: ./pyprob_hashes
Hashing offline dataset for sorting
Time spent | Time remain.| Progress | Traces | Traces/sec
0d:00:00:01 | 0d:00:00:00 | #################### | 500/500 | 351.63
Sorting offline dataset
Sorting done
Num. trace types : 1
Trace hash Count
2.94810057 500
Continuing to train existing inference network...
Total number of parameters: 308
Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec
0d:00:00:13 | 1 | 4,900 | +4.03e+00 | +3.95e+00 | [32m+4.70e+00[0m | 0d:00:00:01 | +1.00e-03 | 348.4
Stop condition reached. num_traces: 500
0d:00:00:13 | 1 | 5,000 | +4.03e+00 | +3.95e+00 | [31m+4.86e+00[0m | 0d:00:00:01 | +1.00e-03 | 378.2
Training iterations 8 epsilon 0.55 ESS 500.0
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:36 | 0d:00:00:36 | ##########---------- | 2521/5000 | 2344.68 | 68.41
```python
posterior = simulator.posterior(num_traces = 2000,
inference_engine = InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK,
observe={"bool_func": 1})
posterior = simulator.update_DIS_posterior_weights(posterior)
results = np.array([x.result for x in posterior.values])
```
Time spent | Time remain.| Progress | Trace | ESS | Traces/sec
0d:00:00:06 | 0d:00:00:00 | #################### | 2000/2000 | 99.34 | 296.71
# Plots
First do checks for single training round. This is to make sure that the algorithm isn't somehow 'cheating'
```python
posterior_check = simulator.update_DIS_posterior_weights(posterior_check)
results_check = np.array([x.result for x in posterior_check.values])
```
```python
plt.hist(results_check, bins=50, weights=np.array(posterior_check.weights), label='posterior',density=True)
plt.hist(results_check, bins=50, label='proposal',density = True)
correct_dists.plot_prior(min(prior.values_numpy()),max(prior.values_numpy()))
correct_dists.plot_posterior(min(results), max(results))
plt.legend()
```
Now check full results
```python
plt.hist(results, bins=50, weights=np.array(posterior.weights), label='posterior',density=True)
plt.hist(results, bins=50, label='proposal',density = True)
correct_dists.plot_prior(min(prior.values_numpy()),max(prior.values_numpy()))
correct_dists.plot_posterior(min(results), max(results))
plt.legend()
```
# Roughs
```python
importance_weights = np.exp(np.array([x.log_importance_weight for x in posterior.values]))
importance_weights = importance_weights/sum(importance_weights)
weights = posterior.weights
probs = posterior._categorical.probs
print(importance_weights[:10])
print(np.array(weights[:10].detach()))
print(np.array(probs[:10].detach()))
```
[4.43934743e-04 1.12809762e-03 6.99781531e-05 3.08431125e-04
2.07236835e-04 1.65965433e-04 2.79483327e-04 1.51536002e-03
4.46125723e-04 3.05139416e-04]
[5.16142642e-11 2.43493937e-16 3.47589046e-11 2.37514305e-08
7.04764763e-05 8.20254264e-05 1.54813143e-05 2.09902897e-24
4.43681561e-06 7.53436269e-09]
[5.16142642e-11 2.43493937e-16 3.47589046e-11 2.37514305e-08
7.04764763e-05 8.20254264e-05 1.54813143e-05 2.09902897e-24
4.43681561e-06 7.53436269e-09]
```python
print(importance_weights.sum())
print(weights.sum())
```
1.0000000000000004
tensor(0.0158, dtype=torch.float64)
```python
plt.hist(results, bins=20, weights=importance_weights, label='posterior')
plt.hist(results, bins=20, label='proposal',density = True)
correct_dists.plot_prior(min(prior.values_numpy()),max(prior.values_numpy()))
correct_dists.plot_posterior(min(results), max(results))
plt.legend()
```
Why do importance weights bring the posterior back to the prior?
|
a01ec8da5cda1a41c2d1da384038e1550eb82218
| 84,989 |
ipynb
|
Jupyter Notebook
|
examples/Gaussian_DIS.ipynb
|
SRagy/pyprob
|
c8fc67d5f4ad78d565a86c2f86abe10b73549f18
|
[
"BSD-2-Clause"
] | null | null | null |
examples/Gaussian_DIS.ipynb
|
SRagy/pyprob
|
c8fc67d5f4ad78d565a86c2f86abe10b73549f18
|
[
"BSD-2-Clause"
] | null | null | null |
examples/Gaussian_DIS.ipynb
|
SRagy/pyprob
|
c8fc67d5f4ad78d565a86c2f86abe10b73549f18
|
[
"BSD-2-Clause"
] | null | null | null | 118.53417 | 20,294 | 0.82626 | true | 6,705 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.863392 | 0.812867 | 0.701823 |
__label__eng_Latn
| 0.421924 | 0.468901 |
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
sys.path.append('../../pyutils')
import metrics
import utils
```
# When $p$ is much bigger than $N$
High variance and overfitting are a major concern in this setting.
Simple, highly regularized models are often used.
Let's suppose we are trying to predict a linear model.
With $p<<N$, we can identify as many coefficients as we want without shrinkage.
With $p=N$, we can identify some non-zero coefficients with moderate shrinkage.
With $p>>N$, even though they are many non-zero coefficients, we don't have a hope to find them, we need to shrink a lot.
# Diagonal LDA and Nearest Shrunken Centroids
The simplest form of regularization assumes that the features are independant within each class (the within class covariance matris is diagonal).
It greatly reduces the number of parameters and often result in an effective and interpretable classifier.
The discriminant score for class $k$ is:
$$\theta_k(x) = - \sum_{j=1}^p \frac{(x_j - \bar{x}_{kj})^2}{s_j^2} + 2 \log \pi_k$$
with $s_j$ the within-class standard deriavtion for feature $j$, and:
$$\bar{x}_{kj} = \frac{1}{N_k} \sum_{i \in C_k} x_{ij}$$
We call $\bar{x}_k$ the centroid of class $k$. Diagonal LDA can be seen as a nearest centroid classifier with appropriate standardization.
To regularize in order to drop out features, we can shrink the classwise mean toward the overall mean for each feature separately. This method is called Nearest Shrunken Procedure (NSC).
Let
$$d_{jk} = \frac{\bar{x}_{kj} - \bar{x}_j}{m_k(s_j + s_0)}$$
with $m_k^2 = 1/N_k - 1/N$ and $s_0$ a small positive constant.
We can shrink $d_{kj}$ toward zero using soft thresholding:
$$d'_{kj} = \text{sign}(d_{kj})(|d_{kj}| - \Delta)_+$$
with $\Delta$ a parameter to be determined.
The shruken centroids are obtained by:
$$\bar{x}'_{kj} = \bar{x}_j + m_k(s_j + s_0)d'_{kj}$$
We use the shrunken centroids $\bar{x}'_{kj}$ instead of the original $\bar{x}_{kj}$ in the discriminant score.
# Linear Classifiers with Quadratic Regularization
## Regularized Discriminant Analysis
LDA involves the inversion of a $p*p$ within-covariance matrix $\Sigma$. When $p > n$, the matrix is singular.
RDA solves the issue by shrinking $\Sigma$ towards its diagonal:
$$\hat{\Sigma}(\gamma) = \gamma \hat{\Sigma} + (1 - \gamma) \text{diag}(\hat{\Sigma})$$
## Logistic Regression with Quadratic Regularization
The multiclass logistic model is expressed as:
$$P(G=k|X=x) \frac{\exp (\beta_{k0} + x^T \beta_k)}{\sum_{l=1}^K \exp (\beta_{l0} + x^T \beta_l)}$$
This has $K$ coefficients vecors $\beta_k$. We regalurize the fitting by maximizing the penalized log-likelihhood:
$$\max_{ \{ \beta_{0k}, \beta_k \}_1^K} \left[ \sum_{i=1}^N \log P(g_i|x_i) - \frac{\lambda}{2} \sum_{k=1}^K ||\beta_k||_2^2 \right]$$
## The Support Vector Classifier
When $p > N$, the classes are perfectly separable, unless there are identical feature vectors in different classes.
Surprisingly, the unregularized SVC often works about as well as the best regularized version.
## Feature Selection
RDA, regularized logistic regression and SVC shrinks weights toward zero, but they keep all features.
Recursive feature elimination remove feature with small weights, and retrain the classifier.
All 3 approches can be modified using kernels, to increase model complixity. With $p > N$ overfitting is always a danger, and yet using kernel may sometimes give better results.
## Computational shorcuts when $p \gg N$
Instead of working with $X \in \mathbb{R}^{N*p}$ matrix, we can work with a matrix of size $N*N$, using the SVD:
$$
\begin{equation}
\begin{split}
X & = UDV^T \\
& = RV^T
\end{split}
\end{equation}
$$
with $R \in \mathbb{R}^{N*N}$:
$$R = UD$$
We can usually work with $R$ instead of $X$.
For example, let's consider the estimates from a ridge regression:
$$\hat{\beta} = (X^TX + \delta I)^{-1}X^ty$$
We can instead get $\hat{\theta}$ the ridge regression estimate using $(r_i, y_i)$. And then we get $\hat{\beta} = V \hat{\theta}$.
This idea can be generalized to any linear models with a quadratic penalty on the weights.
# Linear classifiers with $L_1$ Regularization
The lasso for linear regression is:
$$\min_{\beta} \frac{1}{2} \sum_{i=1}^N \left( y_i - \beta_0 - \sum_{j=1}^p x_{ij}\beta_j \right) ^2 + \lambda \sum_{j=1}^p |\beta_j|$$
$L_1$ penalty causes a subset of the $\hat{\beta}_j$ to be exactly zero for a sufficiently large value of $\lambda$, and hence performs feature selection.
When $p > N$, as $\lambda \to 0$ the model fits perfectly the dataset.
When $p > N$ the number of non-zero coefficients is at most $N$ for any values of $\lambda$.
Linear regression can be applied for two-class clasification using $\pm 1$ as labels, and using sign for the predictions.
A more natural approach is to use the lasso penalty on logistic regression. We can use a symmetric multinomial logistric regression model, and maximixe the penalized log-likelihood:
$$\max_{ \{ \beta_{0k}, \beta_k \}_1^K} \left[ \sum_{i=1}^N log P(g_i|x_i) - \lambda \sum_{k=1}^K \sum_{j=1}^p |\beta_{kj}| \right]$$
The lasso tends to encourage a sparse solution, and ridge tends to shrink the oefficients of correlated variables toward each other.
The elastic net penalty is a compromise:
$$\sum_{j=1}^p (\alpha |\beta|_j + (1 - \alpha) \beta_j^2)$$
with $\alpha \in [0,1]$ parameter that determines the mix of the penalties.
The logistic regression problem above with the elastic net penalty becomes:
$$\max_{ \{ \beta_{0k}, \beta_k \}_1^K} \left[ \sum_{i=1}^N log P(g_i|x_i) - \lambda \sum_{k=1}^K \sum_{j=1}^p (\alpha|\beta_{kj}| + (1 - \alpha) \beta_{kj}^2) \right]$$
## The Fused Lasso
The Fused Lasso is a method that tend to smooth the coefficients uniformly. We add a penalty to take into account the ordering of the features:
$$\min_{\beta} \sum_{i=1}^N \left( y_i - \beta_0 - \sum_{j=1}^p x_{ij}\beta_j \right) ^2 + \lambda_1 \sum_{j=1}^p |\beta_j| + \lambda_2 \sum_{j=1}^{p-1} |\beta_{j-1} - \beta_j|$$
# Classification When Features are Unavailable
Instead of working with features, we can instead work with an $N*N$ proximity matrix, and we can interpret the proximities as inner-products.
For example, it can be considerer as the matrix kernel $K$, and can be used with kernel methods, SVM.
## Classition and other methods using Kernels
They are a number of other classifier, besides SVM, that can be implemented using only inner-product matrices. This also implies they can be kernelized like the SVM.
For nearest-neigbor, we can transform inner-products to distances:
$$||x_i - x_{i'}||^2 = \langle x_i, x_i \rangle + \langle x_{i'}, x_{i'} \rangle - \langle x_i, x_{i'} \rangle$$
For nearest-centroid classification, with training pairs $(x_i, g_i)$, and class centroids $\bar{x}_k$, we can compute the distance of the test point to each centroid:
$$||x_0 - \bar{x}_k||^2 = \langle x_0, x_0 \rangle - \frac{2}{N_k} \sum_{g_i=k} \langle x_0, x_i \rangle + \frac{1}{N_k^2} \sum_{g_i=k} \sum_{g_{i'}=k} \langle x_i, x_{i'} \rangle$$
We can also perform kernel PCA. Let $X$ centered data matrix, with SVD decomposition:
$$X=UDV^T$$
We get the matrix of principal components $Z$:
$$Z = UD$$
When $K=XX^T$, it follow that $K=UD^2U^T$, and hence we can compute $Z$ from the eigeindecomposition of $K$.
If $X$ is not centered, we need to use the double-centered kernel instead:
$$\tilde{K} = (I-M)K(I-M)$$
with $M = \frac{1}{N} 1 1^T$.
But they are some things that we cannot do with kernels:
- We cannot standardize the variables.
- We cannot assess directly the contribution of individual variables (i.e. we cannot use the lasso penalty)
- We cannot separate the good variables from the noise: they all get an equal say.
# High-Dimensional Regression: Supervised Principal Components
PCA is an effective method to find linear combinations of features that exhibit large variation in the data.
Supervised PCA find linear linear combination with both high variance and significant correlation with the outcome.
Supervised PCA can be related Latent-Variable modeling.
Suppose we hase a response variable $Y$ related to an underlying latent variable U by a linear model:
$$Y = \beta_0 + \beta_1 U + \sigma$$
We have measurements on a set of features $X_j$, $j \in \mathcal{P}$:
$$X_j = \alpha_{0j} + \alpha_{1j}U + \sigma_j, \space j \in \mathcal{P}$$
We also have many additional features $X_k$, $k \notin \mathcal{P}$, which are independent of $U$.
This is a 3 steps, proccess, similar to supervised PCA:
- Estimates the set $\mathcal{P}$
- Given $\hat{\mathcal{P}}$, estimates $U$
- Perform a regression fit to estimate $\beta$, $\beta_0$.
# Feature Assessment and the Multiple-Testing Problem
Feature Assessment asses the significance of each features, it's related to multiple hypothesis testing.
Let's suppose we have a dataset ok $N$ observations, each with $M$ features, separated into $K=2$ groups.
To identify which features are informative to guess the group, we construct a two-sample t-statistic for each feature:
$$t_j = \frac{\bar{x}_{2j} - \bar{x}_{1j}}{\text{se}_j}$$
where:
$$\bar{x}_{kj} = \frac{1}{N_k} \sum_{i \in C_k} x_{ij}$$
$\text{se}_j$ is the pooled within-group standard error for feature $j$:
$$\text{se}_j = \hat{\sigma}_j \sqrt{\frac{1}{N_1} + \frac{1}{N_2}}$$
$$\hat{\sigma}_j^2 = \frac{1}{N_1 + N_2 - 2} \left( \sum_{i \in C_1} (x_{ij} - \bar{x}_{1j})^2 + \sum_{i \in C_2} (x_{ij} - \bar{x}_{2j})^2 \right)$$
We could consider any value $> 2$ in absoluve value to be significantly large. However, with $M$ large, we would expect many largve values to occur by chance.
We can assess the result for all $M$ using the multiple testing problem.
We can compute the p-value for each feature $p_j$:
$$p_j = \frac{1}{K} \sum_{k=1}^K I(|t_j^k| > |t_j|)$$
where whe take $K$ random sample labels permutations $t_j^k$.
Using p-values, we can test the hypotheses:
$H_{0j} = $ label has no effect on feature $j$.
$H_{1j} = $ label has an effect on feature $j$.
We reject $H_{0j}$ at level $\alpha$ if $p_j < \alpha$.
Let $A_j$ the event that $H_{0j}$ if falsely rejected: $P(A_j) = \alpha$
The familiy-wise error rate (FWER) is the probability of at least one false rejection.
## The False Discovery Rate
Possible outcomes from $M$ hypotesis tests:
| |Called not significant|Called signification|Total|
|---|---|---|---|
|$H_0$ True| $U$ | $V$ | $M_0$|
|$H_0$ False|$T$|$S$|$M_1$|
|Total|$M-R$|$R$|$M$|
The false discovery rate is:
$$\text{FDR} = E(V/R)$$
The expectation is taken over the sampled data.
## Asymmetric Cutpoints and the SAM Procedure
In previous approaches, we used the absolute value of $t_j$, hence applying the same cutpoint to both positive and negative values.
Significance analysis of microrrays (SAM) derive separate cut-point for the two classes.
|
58d342ac59acb80652e991040435f3f03b357f97
| 14,364 |
ipynb
|
Jupyter Notebook
|
refs/elements-of-statistical-learning/eosl18_high_dim_problems.ipynb
|
obs145628/ml-notebooks
|
08a64962e106ec569039ab204a7ae4c900783b6b
|
[
"MIT"
] | 1 |
2020-10-29T11:26:00.000Z
|
2020-10-29T11:26:00.000Z
|
refs/elements-of-statistical-learning/eosl18_high_dim_problems.ipynb
|
obs145628/ml-notebooks
|
08a64962e106ec569039ab204a7ae4c900783b6b
|
[
"MIT"
] | 5 |
2021-03-18T21:33:45.000Z
|
2022-03-11T23:34:50.000Z
|
refs/elements-of-statistical-learning/eosl18_high_dim_problems.ipynb
|
obs145628/ml-notebooks
|
08a64962e106ec569039ab204a7ae4c900783b6b
|
[
"MIT"
] | 1 |
2019-12-23T21:50:02.000Z
|
2019-12-23T21:50:02.000Z
| 46.335484 | 202 | 0.595377 | true | 3,299 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.894789 | 0.851953 | 0.762318 |
__label__eng_Latn
| 0.989191 | 0.609453 |
# Exercise 6.2 in JT: More on pooling in credit markets.
Import packages, classes and settings:
```python
import numpy as np
import math
import itertools
from scipy import optimize
import scipy.stats as stats
import PS1 as func
# For plots:
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
plt.style.use('seaborn-whitegrid')
mpl.style.use('seaborn')
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
```
Compared to exercise and 6.1, alter the setup as follows:
* Continuum of types instead of good/bad. For entrepreneur $i$ the value $B_i$ is distributed according to the CDF function $H(B)$, with support on $[0, \bar{B}]$
* Monopoly lender offers credit to borrowers (entrepreneurs).
The lender offers $R_b$ for a successfull investment, otherwise 0. Borrower $i$ then behaves **if** the IC constraint holds: $B_i\leq \Delta p R_b$. The expected profits from offering the contract is then defined by:
$$\begin{align}
\pi(R_b) = H\big(\Delta p R_b\big)p_H(R-R_b)+\left[1-H\big(\Delta p R_b\big)\right] p_L(R-R_b)-I, \tag{Profits}
\end{align} $$
where $H(\Delta p R_b)$ measures the share of borrowers that behave, i.e. with $B_i<\Delta p R_b$. From this note:
* The share of high-quality borrowers increase with $R_b$ (bad types start to behave).
* Same dynamics as before: Adverse selection reduces quality of lending, induces cross-subsidies between types.
### Example with uniform distribution:
```python
Model = func.poolingCredit() # create model instance with default values
Model.plot_distr()
```
### Expected profits as a function of $B$:
```python
Model.plot_exp_profits()
```
## Q: Show a number of results:
1. Proportion of high-quality borrowers is endogenous and depends on $R_b$.
2. Adverse selection reduces quality of lending
3. externality among types: Presence of low-quality borrower forces lender to charge interest rate, generating strictly positive profits on high-quality types.
4. Show an example of 'bad lemons' breaking down the market.
The first 3 are answered straightforwardly:
* A high quality borrower, is one that behaves. The share of high quality borrowers is defined as $H(\Delta p R_b)$. If $R_b$ increases, so does this share.
* Adverse selection means that the lender cannot e.g. **only** offer the contract $R_b$ to certain types / offer individual contracts based on their types $R_b^i(B_i)$.
Lastly, we give an example where the market breaks down. All we need for the market to break down, is to find a scenario where profits of the lender is negative:
* Assume that $p_L=0$, and that borrowers are **uniformly distributed** that is $H(B) = B/\bar{B}$, for $B\in[0,\bar{B}]$.
* In this case the expected profits are given by:
$$\begin{align}
\pi(R_b) &= \overbrace{\dfrac{\Delta p R_b}{\bar{B}}}^{\equiv H(\Delta p R_b)} p_H (R-R_b)-I \\
&= \dfrac{p_H R_b}{\bar{B}}p_H(R-R_b)-I.
\end{align} $$
The profit maximizing level of $R_b$ is then given by: $R_b^* = R/2$, which yields profits of
$$\begin{align}
\pi(R_b^*) &= \dfrac{p_H R/2}{\bar{B}}p_H\left(R-\dfrac{R}{2}\right)-I \\
&= \dfrac{p_H^2R^2}{4\bar{B}}-I.
\end{align} $$
Thus as long as $I>p_H^2R^2/(4\bar{B})$ the market breaks down.
We can show this by updating parameter values as follows:
* $p_L = 0$ (this is 0.5 as default),
* $I = 5$ (this is 2 as default).
```python
par = {'pL': 0, # set pL = 0, which is set to 0.5 as default
'I': 5} #set I=5, which is set to 2 as default
Model.upd_par(par)
```
Next, we can plot the expected profit functions:
```python
# Profits with default values
Model_st = func.poolingCredit()
Model_st.plot_exp_profits()
```
```python
# Profits with updated parameter values:
Model.plot_exp_profits()
```
|
9145162b9180e64b567beabaf2c1f280876aa293
| 115,347 |
ipynb
|
Jupyter Notebook
|
PS1/PS1_3.ipynb
|
ChampionApe/FinancialFriction
|
eb5be29c7951871972b55fd863c89b83bb50d295
|
[
"MIT"
] | null | null | null |
PS1/PS1_3.ipynb
|
ChampionApe/FinancialFriction
|
eb5be29c7951871972b55fd863c89b83bb50d295
|
[
"MIT"
] | null | null | null |
PS1/PS1_3.ipynb
|
ChampionApe/FinancialFriction
|
eb5be29c7951871972b55fd863c89b83bb50d295
|
[
"MIT"
] | null | null | null | 353.825153 | 29,300 | 0.938438 | true | 1,103 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.894789 | 0.782662 | 0.700318 |
__label__eng_Latn
| 0.989322 | 0.465405 |
# Programación Entera: Heurísticas para el Problema de Localización de Servicios
## Descripción
En este trabajo se estudia el problema de localización de servicios de manera detallada, en concreto localización con costos fijos, analizando el problema de programación lineal además de la heurística *ADD*. Todo ello se ha hecho apoyandose en el lenguaje *R* y el solver *glpk*. Por último se ha comprobado la validez de los métodos sobre distintos problemas a modo de ejemplo.
Este documento forma parte de un grupo de trabajos relacionados con la heuristicas para la resolución de problemas de programación entera. Existen otros tres trabajos referidos al estudio del [problema de la asignación](https://nbviewer.jupyter.org/github/garciparedes/integer-programming-heuristics/blob/master/integer-programming-assignment-heuristics.ipynb), al [problema de la mochila](https://nbviewer.jupyter.org/github/garciparedes/integer-programming-heuristics/blob/master/integer-programming-knapsack-heuristics.ipynb) y a [problemas de redes](https://nbviewer.jupyter.org/github/garciparedes/integer-programming-heuristics/blob/master/integer-programming-network-heuristics.ipynb). El contenido completo del trabajo está publicado en el siguiente repositorio: https://github.com/garciparedes/integer-programming-heuristics
## Autor
* Sergio García Prado - [garciparedes.me](https://garciparedes.me)
## Fecha
* Mayo de 2018
## Contenidos
* [Introducción](#Introducción)
* [Problemas de Localización de Servicios](#Problemas-de-Localización-de-Servicios)
* [Localización con Costos Fijos](#Localización-con-Costos-Fijos)
* [Conclusiones](#Conclusiones)
## Introducción
Los problemas de localización de servicios son una clase de problemas de programación lineal mixta (variables enteras y continuas) que permite la resolución de una gran cantidad de problemas de optimización que se dan en nuestra sociedad. A grandes rasgos, estos modelos permiten el estudio de aquellos problemas de decisión basados en la búsqueda del lugar óptimo donde colocar un servicio, de tal manera que se minimice una determinada métrica. Es por ello que su ámbito de aplicación es tan extenso.
El caso prototípico es aquel en que una determinada empresa va a llevar a cabo una ampliación de sucursales. Es por ello que necesita seleccionar la alternativa que mayor beneficio le proporcione. Para ello, puede tratar de definir el problema de distintas maneras, como la de cubrir la zona en que se encuentran todos sus clientes, o más modestamente, una proporción de la misma, así como reducir la distancia según función de error u otras sitituaciones. Las más habituales son las siguientes:
* Cubrimiento Total (Set Covering).
* Cubrimiento Máximo.
* Localización de Medianas.
* Localización de Centros de Emergencia.
* Localización con Costos Fijos.
* Distritos.
En este trabajo, vamos a centrarnos en el problema de *Localización con Costos Fijos*, el cual se basa en la búsqueda de la configuración óptima entre un conjunto de puntos de demanda y un conjunto de puntos de abastecimiento posibles, entre los cuales se debe escoger un subconjunto de los mismos para satisfacer las necesidades de los puntos de demanda.
El resto del trabajo se distribuye de la siguiente manera: Inicialmente se han definido distintas constantes y funciones que se utilizarán en el resto de apartados del trabajo. Posteriormente, se describirá más en detalle el modelo de los problemas de localización de servicios, para después centrarnos en el caso de *Localización de Costes Fijos*, describiendo el modelo en detalle y analizando la heurística *ADD* (muy usada en entornos reales). Después se resolverá el problema *WebBook* describiendo paso a paso las restricciones que vamos añadiendo al problema. Finalmente se discuten distintos aspectos de este tipo de problemas en el apartado de conclusiones.
#### Configuración del Entorno
```R
rm(list = ls())
```
```R
library(slam, warn.conflicts = FALSE)
library(Rglpk, warn.conflicts = FALSE)
library(purrr, warn.conflicts = FALSE)
library(dplyr, warn.conflicts = FALSE)
library(magrittr, warn.conflicts = FALSE)
library(ggplot2, warn.conflicts = FALSE)
library(ggthemes, warn.conflicts = FALSE)
library(scales, warn.conflicts = FALSE)
```
Using the GLPK callable library version 4.65
```R
options(repr.matrix.max.rows = 600, repr.matrix.max.cols = 600)
```
#### Constantes y Funciones de Apoyo
```R
Solve <- function(...) {
Rglpk_solve_LP(...)
}
```
```R
FormatUncapacitedFacicilityLocationSolution <- function(s, p) {
list(
min.cost = s$optimum,
ratios = matrix(round(s$solution[1:(p$m * p$n)], digits = 3), nrow = p$n),
services = matrix(s$solution[(p$m * p$n + 1):(p$n * (1 + p$m))], nrow = 1),
amount = round(t(t(matrix(s$solution[1:(p$m * p$n)], nrow = p$n)) * p$d), digits = 3)
)
}
```
## Problemas de Localización de Servicios
Tal y como se ha indicado anteriormente, los problema de localización de servicios tratan de resolver el problema de buscar el lugar óptimo en que colocar puntos de abastecimiento de entre un conjunto de posibles puntos, para cumplir las necesidades de un conjunto de puntos de demanda, tratando de reducir al máximo los costes.
### Localización con Costos Fijos
#### Descripción
El problema de localización con costos fijos se basa en lo siguiente: suponemos que tenemos un conjunto de $m$ puntos de demanda, los cuales tienen una determinada cantidad de demanda denotada por $d_i$ (para representar la demanda del punto $i$-ésimo). Además, existe un conjunto de $n$ posibles puntos de abastecimiento, los cuales se definen por una tupla $(f_j, s_j)$, que representa el coste de fijo de utilización, y la capacidad máxima para el punto $j$-ésimo. Entonces, el problema se basa en decidir la planificación óptima entre puntos de demanda y abastecimiento, teniendo en cuenta los costes fijos de utilización. Además, existe una matriz $C$ de $m$ filas y $n$ columnas, que almacena los costos de utilización del punto de abastecimiento $j$-ésimo por el punto de demanda $i$-ésimo.
Este problema se puede modelizar de distintas maneras, sin embargo una de las más utilizadas utiliza las siguientes variables de decisión: Un vector $X$ de longitud $n$ y naturaleza binaria que indicará si es o no utilizado un determinado punto de abastecimiento (lo cual se utilizará para contabilidar los costos fijos), junto con una matriz $Y$ de $m$ filas y $n$ columnas que de naturaleza continua, que indica el ratio de utilización del punto de abastecimiento $j$-ésimo por el punto de demanda $i$-ésimo. Por tanto, estas variables tomarán valores únicamente en el intervalo $[0, 1]$.
#### Modelo
A continuación se incluye la modelización como problema de programación lineal para el problema de localización con costos fijos. Para ellos, nos hemos apoyado en la notación descrita en el anterior apartado.
\begin{equation}
\begin{array}{ll@{}ll}
\text{Minimizar} & \displaystyle \sum\limits_{j=1}^{n} f_{j} \cdot x_{j} + \sum\limits_{i=1}^{m}\sum\limits_{j=1}^{n} c_{ij} \cdot d_{i} \cdot y_{ij} \\
\text{sujeto a} & \sum\limits_{j=1}^{n} y_{ij} = 1, & \forall i \in \{1,...,m\}\\
& \sum\limits_{i=1}^{m} d_{i} \cdot y_{ij} - s_j \cdot x_{j} \leq 0, & \forall j \in \{1,...,n\}\\
& y_{ij} \geq 0, & \forall i \in \{1,...,m\}, \forall j \in \{1,...,n\} \\
& x_{j} \in \{0, 1\}, & \forall j \in \{1,...,n\}
\end{array}
\end{equation}
```R
SolveUncapacitedFacicilityLocationExact <- function(p, filtered = NULL) {
conditions <- matrix(0, nrow = p$n + p$m, ncol = (1 + p$m) * p$n)
for (i in 1:p$m) {
conditions[i, seq(p$n * (i - 1) + 1, length.out = p$n)] <- 1
}
for (i in 1:p$n) {
conditions[p$m + i, seq(i, length.out = p$m, by = p$n)] <- p$d
if (is.null(filtered) || i %in% filtered) {
conditions[p$m + i, p$m * p$n + i] <- -p$s[i]
}
}
rhs <- rep(c(1, 0), c(p$m, p$n))
obj <- c(t(t(p$c) * p$d), p$f)
dir <- rep(c("==", "<="), c(p$m, p$n))
types <- rep(c("C", "B"), c(p$m, 1) * p$n)
s <- Solve(obj = obj,
mat = conditions,
dir = dir,
rhs = rhs,
types = types)
return(s)
}
```
#### Heurísticas
Para tratar de resolver el problema de costos fijos utilizando un menor coste computacional, existen distintas heurísticas que proporcionan resultados razonables utilizando un menor coste computacional. En este caso, se va a estudiar la heurística ADD para el problema de *localización de servicios con costos fijos*.
##### ADD
TODO(@garciparedes): Añadir Descripción de la Heurística ADD.
```R
SolveUncapacitedFacicilityLocationADD <- function(p, ...) {
stop("SolveUncapacitedFacicilityLocationADD() unimplemented yet!")
}
```
#### Ejemplo: WebBook
Web Book vende libros por internet. La administración quiere saber en qué sitios se deben ubicar almacenes de distribución, de entre $5$ sitios posibles, que denotaremos por *A1*, *A2*, *A3*, *A4* y *A5*. La región de ventas está dividida en $5$ zonas indicadas por *NO*, *SO*, *MO*, *SE* y *NE*. Se proporcionan los siguientes datos: la demanda semanal promedio de cada región (en número de libros), el costo promedio de envío de un libro (Euros/libro), el costo fijo semanal (en Euros) si el almacén está en funcionamiento, y la capacidad de cada almacén (en número de libros).
Los datos necesarios para la resolución del problema se definen a continuación, siendo `n` el número de puntos de demanda, `m` el de posibles puntos de abastecimiento, `c` la matriz de costes, `f` los costes fijos, `s` las capacidades de cada punto de abastecimiento y `d` la demanda de cada punto de demanda.
```R
p <- list(n = 5,
m = 5,
c = matrix(c(2.40, 3.50, 4.80, 6.80, 5.75,
3.25, 2.30, 3.40, 5.25, 6.00,
4.05, 3.25, 2.85, 4.30, 4.75,
5.25, 6.05, 4.30, 3.25, 2.75,
6.95, 5.85, 4.80, 2.10, 3.50),
nrow = 5, byrow = TRUE),
f = c(4000, 3000, 2500, 4000, 3000),
s = c(20000, 20000, 15000, 25000, 15000),
d = c(8000, 12000, 9000, 14000, 17000)
)
```
##### WebBook: Cuestión 1
En este apartado se resuelve el problema de manera exacta, proporcionando los siguientes resultados (nótese que la solución obtenida para la matriz $Y$ (o de ratios) es binaria)
```R
s <- SolveUncapacitedFacicilityLocationExact(p)
FormatUncapacitedFacicilityLocationSolution(s, p)
```
<dl>
<dt>$min.cost</dt>
<dd>165100</dd>
<dt>$ratios</dt>
<dd><table>
<tbody>
<tr><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>0</td><td>1</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>0</td><td>0</td><td>0</td><td>1</td></tr>
<tr><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr>
</tbody>
</table>
</dd>
<dt>$services</dt>
<dd><table>
<tbody>
<tr><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr>
</tbody>
</table>
</dd>
<dt>$amount</dt>
<dd><table>
<tbody>
<tr><td>8000 </td><td> 0</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td>12000</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td>9000 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td>17000</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td>14000</td><td> 0</td></tr>
</tbody>
</table>
</dd>
</dl>
##### WebBook: Cuestión 2
*"Un directivo de la compañía piensa que la mejor opción es abrir los almacenes *A1*, *A2* y *A4*. Para esta opción, calcular manualmente su correspodiente costo."*
En este caso, se restringe el espacio de posibles soluciones a las que se basen en los almacenes 1, 2 y 4. La solución obtenida en este caso es la siguiente (nótese que en este caso las soluciones obtenidas en la matriz $Y$ (o de ratios) no son binarias).
```R
s <- SolveUncapacitedFacicilityLocationExact(p, c(1, 2, 4))
FormatUncapacitedFacicilityLocationSolution(s, p)
```
<dl>
<dt>$min.cost</dt>
<dd>199850</dd>
<dt>$ratios</dt>
<dd><table>
<tbody>
<tr><td>1 </td><td>0.083</td><td>0 </td><td>0 </td><td>0.353</td></tr>
<tr><td>0 </td><td>0.917</td><td>1 </td><td>0 </td><td>0.000</td></tr>
<tr><td>0 </td><td>0.000</td><td>0 </td><td>0 </td><td>0.000</td></tr>
<tr><td>0 </td><td>0.000</td><td>0 </td><td>1 </td><td>0.647</td></tr>
<tr><td>0 </td><td>0.000</td><td>0 </td><td>0 </td><td>0.000</td></tr>
</tbody>
</table>
</dd>
<dt>$services</dt>
<dd><table>
<tbody>
<tr><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr>
</tbody>
</table>
</dd>
<dt>$amount</dt>
<dd><table>
<tbody>
<tr><td>8000 </td><td> 1000</td><td> 0 </td><td> 0</td><td> 6000</td></tr>
<tr><td> 0 </td><td>11000</td><td>9000 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td>14000</td><td>11000</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td> 0</td></tr>
</tbody>
</table>
</dd>
</dl>
##### WebBook: Cuestión 3
*"Añadir las siguientes restricciones:*
* *Por proximidad geográfica, los almacenes *A1* y *A3* no deberían estar abiertos simultáneamente.*
* *Para mantener un equilibrio entre los distribuidores, si al almacén *A4* se le asignan más de $22.000$ libros semanales, los almacenes *A1* y *A2* deben tener asignados, en conjunto, al menos $15.000$ libros.*"
En este caso, vamos a resolver el problema paso a paso, construyendo de manera apropiada la matriz de restricciones. A continuación se construye la matriz de condiciones inicial:
```R
conditions <- matrix(0, nrow = p$n + p$m, ncol = (1 + p$m) * p$n)
for (i in 1:p$m) {
conditions[i, seq(p$n * (i - 1) + 1, length.out = p$n)] <- 1
}
for (i in 1:p$n) {
conditions[p$m + i, seq(i, length.out = p$m, by = p$n)] <- p$d
conditions[p$m + i, p$m * p$n + i] <- -p$s[i]
}
rhs <- rep(c(1, 0), c(p$m, p$n))
obj <- c(t(t(p$c) * p$d), p$f)
dir <- rep(c("==", "<="), c(p$m, p$n))
types <- rep(c("C", "B"), c(p$m, 1) * p$n)
cbind(conditions, dir, rhs)
```
<table>
<thead><tr><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col>dir</th><th scope=col>rhs</th></tr></thead>
<tbody>
<tr><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>-20000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td>-20000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-15000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-25000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-15000 </td><td><= </td><td>0 </td></tr>
</tbody>
</table>
El siguiente paso es añadir la restricción de que únicamente puedan construirse el Almacén A1 o el A3, obteniendo la siguiente matriz de restricciones:
```R
a1 <- rep(0, (1 + p$m) * p$n)
a1[p$m * p$n + c(1, 3)] <- 1
conditions <- rbind(conditions, a1)
dir <- c(dir, "<=")
rhs <- c(rhs, 1)
rownames(conditions) <- NULL
cbind(conditions, dir, rhs)
```
<table>
<thead><tr><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col>dir</th><th scope=col>rhs</th></tr></thead>
<tbody>
<tr><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>-20000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td>-20000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-15000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-25000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-15000 </td><td><= </td><td>0 </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>1 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>1 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><= </td><td>1 </td></tr>
</tbody>
</table>
Finalmente, se añadirán las restricciones necesarias para indicar que si el número de libros almacenados en *A4* supera los $22000$, entonces *A1* y *A2* deberán superar $15000$ libros en conjunto. En este caso es necesario añadir una variable binaria adiccional para modelizar la nueva lógica de implicación. La matriz de restricciones se muestra a continuación:
```R
obj <- c(obj, 0)
conditions <- cbind(conditions, rep(0, nrow(conditions)))
types <- c(types, "B")
a2 <- conditions[4, ]
a2[length(a2)] <- -1
conditions <- rbind(conditions, a2)
dir <- c(dir, "<=")
rhs <- c(rhs, 22000 / p$s[4])
a3 <- colSums(conditions[c(1, 2), ])
conditions <- rbind(conditions, a3)
dir <- c(dir, ">=")
rhs <- c(rhs, 15000/sum(p$s[c(1, 2)]))
rownames(conditions) <- NULL
cbind(conditions, dir, rhs)
```
<table>
<thead><tr><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col></th><th scope=col>dir</th><th scope=col>rhs</th></tr></thead>
<tbody>
<tr><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>1 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>0 </td><td>== </td><td>1 </td></tr>
<tr><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>-20000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>0 </td><td><= </td><td><span style=white-space:pre-wrap>0 </span> </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td>-20000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>0 </td><td><= </td><td><span style=white-space:pre-wrap>0 </span> </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-15000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>0 </td><td><= </td><td><span style=white-space:pre-wrap>0 </span> </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-25000 </td><td><span style=white-space:pre-wrap>0 </span></td><td>0 </td><td><= </td><td><span style=white-space:pre-wrap>0 </span> </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>8000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>12000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>9000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>14000 </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td>17000 </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-15000 </td><td>0 </td><td><= </td><td><span style=white-space:pre-wrap>0 </span> </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>1 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>1 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>0 </td><td><= </td><td><span style=white-space:pre-wrap>1 </span> </td></tr>
<tr><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>-1 </td><td><= </td><td>0.88 </td></tr>
<tr><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>1 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span> </td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td><span style=white-space:pre-wrap>0 </span></td><td>0 </td><td>>= </td><td>0.375 </td></tr>
</tbody>
</table>
Por último, resolvemos el problema con la matriz de restricciones indicada, obteniendo la siguiente solución, cuya matriz $Y$ es binaria, a pesar de que esta no se ha restringido de tal manera. Por tanto, esta solución también será la que se obtenga modelizando el problema como de fuente única. Los resultados se muestran a continuación
```R
s <- Solve(obj = obj,
mat = conditions,
dir = dir,
rhs = rhs,
types = types)
FormatUncapacitedFacicilityLocationSolution(s, p)
```
<dl>
<dt>$min.cost</dt>
<dd>167900</dd>
<dt>$ratios</dt>
<dd><table>
<tbody>
<tr><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>0</td><td>1</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>0</td><td>0</td><td>0</td><td>1</td></tr>
<tr><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr>
</tbody>
</table>
</dd>
<dt>$services</dt>
<dd><table>
<tbody>
<tr><td>0</td><td>1</td><td>1</td><td>1</td><td>1</td></tr>
</tbody>
</table>
</dd>
<dt>$amount</dt>
<dd><table>
<tbody>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td>8000 </td><td>12000</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td>9000 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td>17000</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td>14000</td><td> 0</td></tr>
</tbody>
</table>
</dd>
</dl>
##### WebBook: Cuestión 4
*¿Cómo se debe modificar el modelo de distribución si se exige que cada región de ventas debe ser asignada a un único almacén?*
Para modelizar el problema como de fuente única basta con indicar que todas las variables de decisión deberán ser de tipo binario, lo cual se hace a partir de la siguiente orden:
```R
types.unique <- rep("B", (p$m + 1) * p$n)
```
Lo siguiente es resolver el problema, que como vemos, proporciona los mismos resultados que el anterior, ya que este ya era de fuente única:
```R
s <- Solve(obj = obj,
mat = conditions,
dir = dir,
rhs = rhs,
types = types.unique)
FormatUncapacitedFacicilityLocationSolution(s, p)
```
<dl>
<dt>$min.cost</dt>
<dd>167900</dd>
<dt>$ratios</dt>
<dd><table>
<tbody>
<tr><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>0</td><td>1</td><td>0</td><td>0</td></tr>
<tr><td>0</td><td>0</td><td>0</td><td>0</td><td>1</td></tr>
<tr><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr>
</tbody>
</table>
</dd>
<dt>$services</dt>
<dd><table>
<tbody>
<tr><td>0</td><td>1</td><td>1</td><td>1</td><td>1</td></tr>
</tbody>
</table>
</dd>
<dt>$amount</dt>
<dd><table>
<tbody>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td>8000 </td><td>12000</td><td> 0 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td>9000 </td><td> 0</td><td> 0</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td> 0</td><td>17000</td></tr>
<tr><td> 0 </td><td> 0</td><td> 0 </td><td>14000</td><td> 0</td></tr>
</tbody>
</table>
</dd>
</dl>
## Conclusiones
Los problemas de localización de servicios mediante la programación lineal representan un método de modelización muy enriquecedor, que permite estudiar el problema desde el punto de vista matemático, de manera relativamente sencilla. Estos problemas, presentan una gran complejidad a nivel computacional debido a su carácter de programación binaria, donde la explosión combinatoria hace que para situaciones en que hay que modelizar un gran número de puntos de demanda y abastecimiento, encontrar soluciones en tiempos razonables se convierte en una tarea muy complicada.
Por dichas razones, la literatura ha propuesto distintas heurísticas que tratan de hacer frente a estas situaciones tratando de proporcionar un contrato razonable entre los resultados óptimos que se obtiene a través de métodos exactos, y el uso reducido de recursos computacionales. Tal y como se ha indicado, las heurísticas greedy más básicas son *ADD* y *DROP*, pero a pesar de ello, proporcionan resultados eficientes. Es por ello que estas técnicas deben ser tenidas en cuenta.
## Referencias
* [TRC13] Team, R.C., 2013. R: A language and environment for statistical computing.
* [GP18] Sergio García Prado. Programación Entera: Heurísticas, 2018. [github.com/garciparedes/integer-programming-heuristics](https://github.com/garciparedes/integer-programming-heuristics).
* [SA18] Jesús Sáez Aguado. Programación Entera, 2017/18. Facultad de Ciencias: Departamento de Estadística e Investigación Operativa.
* [THBSST17] Theussl, S., Hornik, K., Buchta, C., Schwendinger, F., Schuchardt, H. and Theussl, M.S., 2017. Package ‘Rglpk’.
|
b8e494ab982839ad2a369bdaced5a78475fe73ff
| 115,042 |
ipynb
|
Jupyter Notebook
|
integer-programming-service-location-heuristics.ipynb
|
garciparedes/linear-programming-heuristics
|
2b4aa26f4c68f800f93cc7530daf3272d7f67b14
|
[
"Apache-2.0"
] | 1 |
2019-06-24T02:14:25.000Z
|
2019-06-24T02:14:25.000Z
|
integer-programming-service-location-heuristics.ipynb
|
garciparedes/linear-programming-heuristics
|
2b4aa26f4c68f800f93cc7530daf3272d7f67b14
|
[
"Apache-2.0"
] | null | null | null |
integer-programming-service-location-heuristics.ipynb
|
garciparedes/linear-programming-heuristics
|
2b4aa26f4c68f800f93cc7530daf3272d7f67b14
|
[
"Apache-2.0"
] | null | null | null | 85.724292 | 1,838 | 0.3939 | true | 21,644 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.752013 | 0.760651 | 0.572019 |
__label__kor_Hang
| 0.222616 | 0.167321 |
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import factorial
from collections import Counter
```
### G-M tube high coltage
```python
#the amplitude, counting rate and background noise
A = np.array([0.440,0.586,0.624,0.728,0.808,0.952,1.002])
R = np.array([0.02,10.7,11.3,11.7,12.3,12.2,12.2])
N = np.array([0,.25,.37,.33,.46,.57,.53])
```
```python
mean = np.mean(R)
sd = np.std(R)
error_on_mean = (sd**2)/6
print("mean:",mean)
print("standard deviation:",sd)
print("error on the mean:", error_on_mean)
fig = plt.figure(figsize=(7,7))
#set the voltage from 780V to 900V
V=np.linspace(780,900,7)
plt.plot(V,A)
plt.title('Figure 2.1')
plt.xlabel('Voltage')
plt.ylabel('Amplitude')
plt.savefig("Amplitude_voltage.png")
```
```python
#voltage vs rate
fig = plt.figure(figsize=(7,7))
plt.plot(V,R-N)
plt.title('Figure 2.2')
plt.xlabel('Voltage')
plt.ylabel('Rate - Noise')
plt.savefig("Voltage_rate.png")
```
## Frequency Distribution
```python
#import the measured 100 rates
Rate = np.array([13,9,10,12,5,6,7,13,16,11,6,11,9,8,10,11,13,12,10,12,9,7,8,14,8,7,9,8,9,12,8,7,15,11,10,9,8,14,9,12,9,11,9,15,14,10,11,12,9,
5,12,13,19,11,10,16,14,13,16,11,8,10,11,4,9,16,10,8,12,13,10,9,12,12,10,19,9,12,13,11,12,14,15,11,15,6,11,5,7,14,8,13,18,10,9
,12,11,12,14,10])
counts_of_rates = dict(Counter(sorted(Rate,key=int)))
print(counts_of_rates)
ratecounts = np.array([1,3,3,5,9,14,12,13,14,8,7,4,4,1,2])
print(ratecounts)
```
{4: 1, 5: 3, 6: 3, 7: 5, 8: 9, 9: 14, 10: 12, 11: 13, 12: 14, 13: 8, 14: 7, 15: 4, 16: 4, 18: 1, 19: 2}
[ 1 3 3 5 9 14 12 13 14 8 7 4 4 1 2]
```python
#caculate the mean of the rates
m = np.mean(Rate)
print("mean:",m)
```
mean: 10.83
```python
#define a function of factorial
def fac(x):
x = np.zeros(15)
x[:] = np.linspace(4,19,15)
fac = factorial(n,exact=False)
return fac
```
```python
#define the function for poission distribution
def P(n):
return ((m**n)*(np.exp(-m)))/fac(n)
```
```python
#plot the histgram and poission distribution
plt.figure(figsize=(7,7))
plt.hist(Rate,bins=15,label="F(n)")
n = np.linspace(4,19,15)
plt.plot(n,100*P(n),color="red",label="P(n)")
plt.legend()
plt.title('Figure 3.1')
plt.xlabel('Rate')
plt.ylabel('Frequency')
plt.savefig("Poission_of_counting.png")
```
```python
ratecounts - 100*P(n)
```
array([-0.13473211, 0.42173075, -1.76393821, -2.40233238, -0.90891765,
2.36253292, -0.16344536, 1.5555674 , 4.21513825, 0.33785491,
1.46808087, 0.296458 , 1.68935414, -0.3493233 , 1.25965045])
```python
#define function for X^2
chi_square = np.sum((ratecounts-100*P(n))**2/(100*P(n)))
print('chi_square:',chi_square)
print("confindence level:",0.9235)
```
chi_square: 8.007176844614612
confindence level: 0.9235
## Absorbation of gamma ray by lead
```python
#import the data
#x in density times thickness
x = np.array([30.71,26.89,22.52,18.59,13.88,7.41,2.74])
#intensity = rate
I = np.array([673/831,555/501,700/479,728/428,1120/401,1003/187,999/114])
#I not
I0 = 8.517
#background intensity = bacrate*1000
BI = 0.451
#the total error of measured and bacground, sqrt(sigmai^2+sigmab^2)
yerr = np.array([0.0459,0.0578,0.0646,0.0714,0.0900,0.1727,0.2793])
```
```python
#show that x and ln(I) have liner relationship
plt.figure(figsize=(7,7))
plt.plot(x,np.log(I-BI))
plt.title('Figure 4.1')
plt.xlabel('ρx')
plt.ylabel('lnI')
plt.savefig("linear_relation.png")
```
```python
#the model functiuon is ux+b
def func(x,u,b):
return u*x + b
#liner I is ln(I-BI)
lnI = np.log(I-BI)
#the liner error is error over I
lin_yerr = yerr/(I-BI)
#define the terms needed to find u,b and find u,b
wi = 1/(lin_yerr)**2
W = np.sum(wi)
X_1 = np.sum(wi*x)
Y_1 = np.sum(wi*lnI)
X_2 = np.sum(wi*x**2)
Y_2 = np.sum(wi*lnI**2)
P = np.sum(wi*x*lnI)
delta = (W*X_2 - X_1**2)
u = (W*P - X_1*Y_1) / delta
b = (X_2*Y_1 - X_1*P) / delta
```
```python
#plot the data points and fitting line
plt.figure(figsize=(7,7))
plt.errorbar(x,lnI,lin_yerr,fmt=".",color="blue",label="data points")
plt.plot(x, func(x,u,b) ,color="red",label="fit")
plt.legend()
plt.title('Figure 4.2')
plt.xlabel('ρx')
plt.ylabel('ln(I-BI)')
plt.savefig("line_fitting.png")
print("mass absorption coefficient:",-u)
```
```python
# caculate the chi-square
chi_square = np.sum((lnI - func(x,u,b))**2/lin_yerr**2)
degree_freedom = 7-2
normalized_chisqr = chi_square/degree_freedom
print("chi-square:",chi_square)
print("normalized chi-square:", normalized_chisqr)
print("confindence level:",0.06289)
```
```python
from sympy.solvers import solve
from sympy import Symbol
z = Symbol('z')
w = solve(np.sum((lnI - func(x,z,b))**2/lin_yerr**2)-11.473266437659785,z)
print("uncertanity on u:", w-u)
```
|
bcd282bb160c16d88460954379c610bfa056a29b
| 102,658 |
ipynb
|
Jupyter Notebook
|
Appendix.LAB_1.ipynb
|
LorenzoZhu/Phys133
|
df818055e279ccbdd92851ad1cb4d6851735912d
|
[
"Apache-2.0"
] | 1 |
2021-07-24T04:01:08.000Z
|
2021-07-24T04:01:08.000Z
|
Appendix.LAB_1.ipynb
|
PengpeiZhu/Phys133
|
df818055e279ccbdd92851ad1cb4d6851735912d
|
[
"Apache-2.0"
] | null | null | null |
Appendix.LAB_1.ipynb
|
PengpeiZhu/Phys133
|
df818055e279ccbdd92851ad1cb4d6851735912d
|
[
"Apache-2.0"
] | null | null | null | 235.995402 | 22,200 | 0.921263 | true | 1,879 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.763484 | 0.833325 | 0.63623 |
__label__eng_Latn
| 0.370578 | 0.316506 |
**NOTE:** *The slope-deflection sign convention may seem strange to those used to matirx stiffness analysis, but it makes sense. None of the slope deflection equations explicitly state a
member 'direction' and it doesn't matter. For example, whether you consider the column AB as
going from A to B or as going from B to A, a +ive shear at end A is still directed toward the
left. In matrix analysis, that direction matters.*
## Kulak & Grondin - Example 8.2
This solves a close approximation to the first-order example of Kulak and Grondin. The major
differences are:
* the lateral resistance of the outside column stacks are ignored; only the central rigid frame is included.
* this method does not account for axial changes of length in any of the members.
The results obtained here are
compared with those obtained via a [first-order matrix analysis](../matrix-methods/frame2d/50-test-KG82sd.ipynb),
and found to agree to about 6 significant figures.
```python
from IPython import display
display.SVG('KG-8.2sd.svg')
```
### Solve for joint rotations and storey translations using slope-deflection
```python
import sympy as sy # use symbolic algebra
sy.init_printing() # print pretty math
from sdutil2 import SD, FEF # slope-deflection library
```
```python
sy.var('theta_a theta_b theta_c theta_d theta_e theta_f Delta_b Delta_c')
```
```python
E = 200000
Ic = 222E6 # W310x97
Ib = 488E6 # W460x106
Hf = 21900 # Horizontal load at F
He = 43400 # Horizontal load at E
Lab = 6500 # Length of column
Lbc = 5500 # Length of column
Lbe = 10500 # Length of beam
```
```python
Mab,Mba,Vab,Vba = SD(Lab,E*Ic,theta_a,theta_b,Delta_b) # column AB, BC
Mbc,Mcb,Vbc,Vcb = SD(Lbc,E*Ic,theta_b,theta_c,Delta_c-Delta_b)
Mde,Med,Vde,Ved = SD(Lab,E*Ic,theta_d,theta_e,Delta_b) # column DE, EF
Mef,Mfe,Vef,Vfe = SD(Lbc,E*Ic,theta_e,theta_f,Delta_c-Delta_b)
Mbe,Meb,Vbe,Veb = SD(Lbe,E*Ib,theta_b,theta_e) + FEF.udl(Lbe,55) # beams BE, CF
Mcf,Mfc,Vcf,Vfc = SD(Lbe,E*Ib,theta_c,theta_f) + FEF.udl(Lbe,45)
```
```python
eqns = [ Mba+Mbe+Mbc, # sum of moments at B = 0
Mcb+Mcf, # sum of moments at C = 0
Med+Meb+Mef, # sum of moments at E = 0
Mfe+Mfc, # sum of moments at F = 0
-Vab-Vde+Hf+He, # sum of Fx @ base of storey 1 = 0
-Vbc-Vef+Hf, # sum of Fx @ base of storey 2 = 0
theta_a, # fixed support at A, rotation = 0
theta_d] # fixed support at F, rotation = 0
```
```python
soln = sy.solve(eqns)
soln
```
### Determine end moments and shears
#### Demonstrate how to access and convert one end moment:
```python
Mab
```
```python
Mab.subs(soln)
```
```python
Mab.subs(soln).n(4) * 1E-6
```
```python
V = globals() # another way to access global variables
V['Mab']
```
```python
Mab is V['Mab']
```
True
```python
V['Mab'].subs(soln).n()
```
#### Determine end moments by back substitution:
```python
# collect the end moments in all 6 members
allm = []
V = globals()
for m in 'ab,bc,de,ef,be,cf'.split(','):
mj = V['M'+m].subs(soln).n()*1E-6 # Mxy
mk = V['M'+m[::-1]].subs(soln).n()*1E-6 # Myx
allm.append((m.upper(),mj,mk))
allm
```
[('AB', -49.9080183705027, 47.4941396356060),
('BC', 250.526060428376, 252.096857589821),
('DE', -189.782099213905, -232.254022051198),
('EF', -290.011616822927, -333.061301195269),
('BE', -298.020200063982, 522.265638874126),
('CF', -252.096857589821, 333.061301195269)]
```python
[(m,round(a,1),round(b,1)) for m,a,b in allm] # display to one decimal place
```
[('AB', -49.9, 47.5),
('BC', 250.5, 252.1),
('DE', -189.8, -232.3),
('EF', -290.0, -333.1),
('BE', -298.0, 522.3),
('CF', -252.1, 333.1)]
#### Determine end shears by back substitution:
```python
# collect the end shears in all 6 members
allv = []
V = globals()
for m in 'ab,bc,de,ef,be,cf'.split(','):
mj = V['V'+m].subs(soln).n()*1E-3 # Mxy
mk = V['V'+m[::-1]].subs(soln).n()*1E-3 # Myx
allv.append((m.upper(),mj,mk))
allv
```
[('AB', 0.371365959214883, 0.371365959214883),
('BC', -91.3859850942176, -91.3859850942176),
('DE', 64.9286340407851, 64.9286340407851),
('EF', 113.285985094218, 113.285985094218),
('BE', 267.393291541891, -310.106708458109),
('CF', 228.539100609005, -243.960899390995)]
```python
[(m,round(a,1),round(b,1)) for m,a,b in allv] # display to one decimal place
```
[('AB', 0.4, 0.4),
('BC', -91.4, -91.4),
('DE', 64.9, 64.9),
('EF', 113.3, 113.3),
('BE', 267.4, -310.1),
('CF', 228.5, -244.0)]
### Now compare to matrix method solution (Frame2D):
```python
import pandas as pd
dd = '../matrix-methods/frame2d/data/KG82sd.d/all' # location of mm solution
```
### Compare Moments:
#### Convert current member end moments solution to tabular form:
```python
mems = pd.DataFrame(allm,columns=['ID','MZJ','MZK']).set_index('ID')
mems
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>MZJ</th>
<th>MZK</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>AB</th>
<td>-49.9080183705027</td>
<td>47.4941396356060</td>
</tr>
<tr>
<th>BC</th>
<td>250.526060428376</td>
<td>252.096857589821</td>
</tr>
<tr>
<th>DE</th>
<td>-189.782099213905</td>
<td>-232.254022051198</td>
</tr>
<tr>
<th>EF</th>
<td>-290.011616822927</td>
<td>-333.061301195269</td>
</tr>
<tr>
<th>BE</th>
<td>-298.020200063982</td>
<td>522.265638874126</td>
</tr>
<tr>
<th>CF</th>
<td>-252.096857589821</td>
<td>333.061301195269</td>
</tr>
</tbody>
</table>
</div>
#### Fetch solution from Frame2D:
```python
mefs = pd.read_csv(dd+'/member_end_forces.csv').set_index('MEMBERID').loc[mems.index]
mems2 = mefs[['MZJ','MZK']] * -1E-6 # convert sign and to kN-m
mems2
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>MZJ</th>
<th>MZK</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>AB</th>
<td>-49.908014</td>
<td>47.494144</td>
</tr>
<tr>
<th>BC</th>
<td>250.526061</td>
<td>252.096859</td>
</tr>
<tr>
<th>DE</th>
<td>-189.782098</td>
<td>-232.254022</td>
</tr>
<tr>
<th>EF</th>
<td>-290.011612</td>
<td>-333.061297</td>
</tr>
<tr>
<th>BE</th>
<td>-298.020204</td>
<td>522.265634</td>
</tr>
<tr>
<th>CF</th>
<td>-252.096859</td>
<td>333.061297</td>
</tr>
</tbody>
</table>
</div>
#### Compare member end moments in the two solutions:
```python
mdiff = (100*(1-mems/mems2)) # calculate % diff
mdiff
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>MZJ</th>
<th>MZK</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>AB</th>
<td>-9.17049660653646e-6</td>
<td>8.35047321290361e-6</td>
</tr>
<tr>
<th>BC</th>
<td>9.14187947564926e-8</td>
<td>6.57745258259013e-7</td>
</tr>
<tr>
<th>DE</th>
<td>-5.40915578994827e-7</td>
<td>4.17979983957650e-8</td>
</tr>
<tr>
<th>EF</th>
<td>-1.72059335667996e-6</td>
<td>-1.36026414576662e-6</td>
</tr>
<tr>
<th>BE</th>
<td>1.40762708156217e-6</td>
<td>-9.36849420263286e-7</td>
</tr>
<tr>
<th>CF</th>
<td>6.57745302667934e-7</td>
<td>-1.36026414576662e-6</td>
</tr>
</tbody>
</table>
</div>
```python
mdiff.abs().max()
```
MZJ 0.000009
MZK 0.000008
dtype: float64
The maximum difference in member end moments is 0.000009% (about 7 or 8 sig figs).
### Compare Shears:
#### Convert our end shears to tabular form:
```python
mevs = pd.DataFrame(allv,columns=['ID','FYJ','FYK']).set_index('ID')
mevs
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>FYJ</th>
<th>FYK</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>AB</th>
<td>0.371365959214883</td>
<td>0.371365959214883</td>
</tr>
<tr>
<th>BC</th>
<td>-91.3859850942176</td>
<td>-91.3859850942176</td>
</tr>
<tr>
<th>DE</th>
<td>64.9286340407851</td>
<td>64.9286340407851</td>
</tr>
<tr>
<th>EF</th>
<td>113.285985094218</td>
<td>113.285985094218</td>
</tr>
<tr>
<th>BE</th>
<td>267.393291541891</td>
<td>-310.106708458109</td>
</tr>
<tr>
<th>CF</th>
<td>228.539100609005</td>
<td>-243.960899390995</td>
</tr>
</tbody>
</table>
</div>
#### Extract the end shears from Frame2D results:
```python
mevs2 = mefs[['FYJ','FYK']] * 1E-3
mevs2[['FYK']] *= -1 # change sign on end k
mevs2
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>FYJ</th>
<th>FYK</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>AB</th>
<td>0.371365</td>
<td>0.371365</td>
</tr>
<tr>
<th>BC</th>
<td>-91.385985</td>
<td>-91.385985</td>
</tr>
<tr>
<th>DE</th>
<td>64.928634</td>
<td>64.928634</td>
</tr>
<tr>
<th>EF</th>
<td>113.285983</td>
<td>113.285983</td>
</tr>
<tr>
<th>BE</th>
<td>267.393292</td>
<td>-310.106708</td>
</tr>
<tr>
<th>CF</th>
<td>228.539101</td>
<td>-243.960899</td>
</tr>
</tbody>
</table>
</div>
#### Compare end shears in the two results:
```python
vdiff = 100*(1-mevs/mevs2)
vdiff
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>FYJ</th>
<th>FYK</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>AB</th>
<td>-0.000353904629113444</td>
<td>-0.000353904629113444</td>
</tr>
<tr>
<th>BC</th>
<td>3.75466946422875e-7</td>
<td>3.75466946422875e-7</td>
</tr>
<tr>
<th>DE</th>
<td>-2.20237894588138e-7</td>
<td>-2.20237894588138e-7</td>
</tr>
<tr>
<th>EF</th>
<td>-1.52798038666191e-6</td>
<td>-1.52798038666191e-6</td>
</tr>
<tr>
<th>BE</th>
<td>3.23684223868526e-7</td>
<td>-2.79100675903976e-7</td>
</tr>
<tr>
<th>CF</th>
<td>2.57897703193066e-7</td>
<td>-2.41594921845945e-7</td>
</tr>
</tbody>
</table>
</div>
```python
vdiff.abs().max()
```
FYJ 0.000354
FYK 0.000354
dtype: float64
The maximum difference is about 0.00004% which is high, but end shears on column AB are very small.
### Compare Displacements
```python
deltw = pd.DataFrame([('B', soln[Delta_b], soln[theta_b]),
('C', soln[Delta_c], soln[theta_c])],columns=['ID','DX','RZ']).set_index('ID')
deltw
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>DX</th>
<th>RZ</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>B</th>
<td>23.3628188885579</td>
<td>0.00712966246666336</td>
</tr>
<tr>
<th>C</th>
<td>34.3068306465552</td>
<td>0.00722695283139248</td>
</tr>
</tbody>
</table>
</div>
```python
disp = pd.read_csv(dd+'/node_displacements.csv').set_index('NODEID').loc[deltw.index][['DX','RZ']]
disp['RZ'] *= -1
disp
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>DX</th>
<th>RZ</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>B</th>
<td>23.362818</td>
<td>0.007130</td>
</tr>
<tr>
<th>C</th>
<td>34.306830</td>
<td>0.007227</td>
</tr>
</tbody>
</table>
</div>
```python
diffd = (100*(1-deltw/disp))
diffd
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>DX</th>
<th>RZ</th>
</tr>
<tr>
<th>ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>B</th>
<td>-3.52157611960280e-6</td>
<td>-6.27118601492782e-7</td>
</tr>
<tr>
<th>C</th>
<td>-2.71776707805316e-6</td>
<td>6.06124439528344e-7</td>
</tr>
</tbody>
</table>
</div>
```python
diffd.abs().max()
```
DX 3.521576e-06
RZ 6.271186e-07
dtype: float64
Max difference in displacement is 0.000006%.
Note that the matrix method solution was accomplished by setting A very high in order to minimize
effects of axial deformation. But there is a limit to how high this can be set, due to numerical
instability in equation solving (probably). Setting $A=10^{10}$ seems to be about as high as is possible - larger values lead to worse results.
```python
```
|
b526c70b426f9255bd45731097ea3d6f4afba8f4
| 93,147 |
ipynb
|
Jupyter Notebook
|
slope-deflection/KG-Example-8.2.ipynb
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | 3 |
2016-05-26T07:01:51.000Z
|
2019-05-31T23:48:11.000Z
|
slope-deflection/KG-Example-8.2.ipynb
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | null | null | null |
slope-deflection/KG-Example-8.2.ipynb
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | 1 |
2016-08-30T06:08:03.000Z
|
2016-08-30T06:08:03.000Z
| 60.485065 | 9,360 | 0.657047 | true | 5,621 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.921922 | 0.831143 | 0.766249 |
__label__eng_Latn
| 0.520126 | 0.618585 |
# A model for stock return calculation
The purpose of this model is to help us estimate, what size a stock position need to have, to be considered lucrative given the boundary conditions (investment costs & market conditions).
### Annotations:
$R$: Return<br>
$I$: Income<br>
$E$: Expenditure<br>
$_b$ or $_s$: buy or sell annotation<br>
$n$: Number of stocks<br>
$p$. Price per stock<br>
$c$: Cost per position transaction (not per stock)<br>
$m$: Total capital available<br>
$r$: Maximum risk per position<br>
$x$: Relative stock price change<br>
$\Delta x$: Uncertainty for x<br>
$<x>/<\Delta X>$: Average value of variable $x/\Delta x$<br>
### Definitions
\begin{equation*}
R = I - E \tag{1}
\end{equation*}
- <b>Assumption 1: $c_{b} \approx c_{s} = c $
\begin{equation*}
E = n_b \cdot p_{b} + c = \frac{m r - c}{p_{b}} p_{b} + c = m r \tag{2}
\end{equation*}
- <b>Assumption 2:</b> $p_{s} = (1+x)\cdot p_{b}$
\begin{equation*}
I = n_{b} \cdot p_{s} - c = \frac{m r - c}{p_{b}} p_{s} - c = \frac{m r - c}{p_{b}} (1+x)\cdot p_{b} - c\\ \tag{3}
I = (m r - c)\cdot(1+x) - c = (m r - c)\cdot(1+x) - c\\
\end{equation*}
This yields us the formula for the return:
\begin{equation}
R = (m r - c)\cdot(1+x) - c - m r = \left[ (m r - c)\cdot x - 2c \right],
\end{equation}
having an uncertainty of
\begin{equation*}
\sigma^2_R = \left[ \Delta x \cdot (m r - c) \right]^2 \tag{4}
\end{equation*}
The ensemble return is given by:<br>
$$<R_{tot}> = \sum_{i=1}^{n} R_{i} = n \cdot \left[ \left( m r - c\right) \cdot <x> - 2c \right], \tag{5}$$
- <b>Assumption 3:</b> Weighting relative stock price change by amount invested per position $w_i = mr - c$
$$<x> = \frac{\sum_{i=1}^{n} w_i \cdot x_i}{\sum_{i=1}^{n} w_i} = \frac{\sum_{i=1}^{n} (mr - c)\cdot x_i}{\sum_{i=1}^{n} (mr - c)} \tag{6}$$
with an uncertainty for the ensemble
\begin{equation*}
<\sigma^2_{R_{tot}}> = \sum_{i=1}^{n} \sigma^2_{R,i} = \left[ (m r - c) \cdot \sqrt{n} \cdot < \Delta x>\right] \tag{7}
\end{equation*}
$$<\Delta x> = \frac{\sum_{i=1}^{n} \Delta x_i \cdot (mr - c) }{\sum_{i=1}^{n} (mr - c)} \tag{7} $$
The risk is defined by (helps us to eliminate n or r in the above formulae):
\begin{equation*}
n = \frac{1}{r} \,\,\, , \,\, r \in{(0.005, 0.05)} \tag{8}
\end{equation*}
```python
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
from matplotlib.patches import Ellipse
from matplotlib.ticker import FixedLocator
%matplotlib inline
import numpy as np
```
```python
risk = 0.05 # maximum risk you are willing to take per position
n_pos = 1/risk
capital = 10000 # total amount of money
costs = 15 # transaction costs (buying, selling)
total_yield = 0.07 # yield = change in stock price + dividends
delta_total_yield = 0.01 # standard deviation on the yield
def R(capital, risk, cost, total_yield):
"""Expected return value"""
return (capital * risk - cost) * total_yield - 2 * cost
def R2(capital, n_pos, cost, total_yield):
"""As R, independent of risk"""
return (capital / n_pos - cost) * total_yield - 2 * cost
def dR(capital, risk, cost, delta_total_yield):
"""Expected uncertainty on the return value"""
return (capital * risk - cost) * delta_total_yield
def dR2(capital, n_pos, cost, delta_total_yield):
"""As dR, independent of risk"""
return (capital / n_pos - cost) * delta_total_yield
def R_tot(capital, risk, cost, total_yield_avg, n_pos):
"""Expected total return value"""
return n_pos * ( (capital * risk - cost) * total_yield_avg - 2 * cost)
def R_tot2(capital, cost, total_yield_avg, n_pos):
"""As R_tot, independent of risk"""
return ( (capital - cost * n_pos) * total_yield_avg - 2 * cost * n_pos)
def dR_tot(capital, risk, cost, delta_total_yield_avg, n_pos):
"""Expected uncertainty on R_tot"""
return np.abs(np.sqrt(n_pos) * ( (capital * risk - cost
)*delta_total_yield_avg ))
def dR_tot2(capital, cost, delta_total_yield_avg, n_pos):
"""As dR_tot, independent of risk"""
return np.abs(( (capital / np.sqrt(n_pos) - cost* np.sqrt(n_pos)
)*delta_total_yield_avg ))
```
```python
# Example values
print('ROI: {:.2f} - Delta ROI: {:.2f}'.format(
R(capital, risk, costs, total_yield
), dR(capital, risk, costs, delta_total_yield)
))
print('Total ROI: {:.2f} - Delta Total ROI: {:.2f}'.format(
R_tot(capital, risk, costs, total_yield, n_pos
), dR_tot(capital, risk, costs,
delta_total_yield, n_pos)))
```
ROI: 3.95 - Delta ROI: 4.85
Total ROI: 79.00 - Delta Total ROI: 21.69
# ROI
```python
risks, total_yields = np.meshgrid(
np.arange(0.01, 0.1, 0.001),
np.arange(-0.15, 0.15, 0.001) )
returns = R(capital, risks, costs, total_yields)
def r_to_c(r, capital=capital):
return capital * r
def c_to_r(r, capital=capital):
return r / capital
# We find, that the rule-of-thumb to invest up to 0.02 per position given the boundary conditions,
# does not become profitable, even under exceptionally good market conditions (changes > 7% p.a.)
fig, ax = plt.subplots(figsize=(18, 10))
seismic = cm.get_cmap('jet', 25)
newcolors = seismic(np.linspace(0, 1, 25))
newcolors[:15, :] = np.array([0.5, .5, .5, 0.2])
newcmp = ListedColormap(newcolors)
cs = ax.contour(risks, total_yields, returns, levels=25,
cmap=cm.get_cmap('binary', 1))
cs = ax.contourf(risks, total_yields, returns, levels=25, cmap=newcmp,
)
ax.add_patch(Ellipse([0.02, 0.07], 0.02, 0.14, edgecolor='k',
facecolor='black', fill=True, alpha=1))
ax.add_patch(Ellipse([0.02, 0.07], 0.018, 0.13, edgecolor='k',
facecolor='green', fill=True, alpha=1))
ax.text(0.015, 0.05, 'Benchmark', fontsize=15)
cbar = fig.colorbar(cs)
ax.set_xlabel('Risk per position')
ax.set_ylabel('Total yield')
ax.set_title('Return as function of risk and yield')
ax.set_xlim([0.01, 0.10])
ax.set_ylim([-0.15, 0.15])
secax = ax.secondary_xaxis('top',
functions=(r_to_c, r_to_c), )
secax.get_xaxis().set_major_locator(FixedLocator(
np.arange(0, 10000, 100)))
secax.set_xlabel('Investment per position')
```
# Delta ROI
```python
risks, delta_total_yields = np.meshgrid(
np.arange(0.01, 0.1, 0.001),
np.arange(0.01, 0.15, 0.001) )
delta_returns = dR(capital, risks, costs, delta_total_yields)
fig, ax = plt.subplots(figsize=(18, 10))
cs = ax.contour(risks, delta_total_yields, delta_returns,
levels=25, cmap='gist_gray',
)
cs = ax.contourf(risks, delta_total_yields, delta_returns,
levels=25, cmap='jet',
)
cbar = fig.colorbar(cs)
ax.set_xlabel('Risk per position')
ax.set_ylabel('Delta total yield')
ax.set_title('Delta ROI')
secax = ax.secondary_xaxis('top',
functions=(r_to_c, c_to_r), )
secax.get_xaxis().set_major_locator(FixedLocator(
np.arange(0, 1100, 100)))
secax.set_xlabel('Investment per position')
```
# Total ROI
```python
ns, total_yields = np.meshgrid(
np.arange(10, 50, 1),
np.arange(0.0, 0.15, 0.01) )
returns_tot = R_tot2(capital, costs, total_yields, ns)
def n_to_c(x, capital=capital):
return capital / x
def c_to_n(x, capital=capital):
return x * capital
fig, ax = plt.subplots(figsize=(18, 10))
seismic = cm.get_cmap('jet', 25)
newcolors = seismic(np.linspace(0, 1, 25))
newcolors[:14, :] = np.array([0.5, .5, .5, 0.2])
newcmp = ListedColormap(newcolors)
cs = ax.contour(ns, total_yields, returns_tot,
levels=25, cmap='gray',
)
cs = ax.contourf(ns, total_yields, returns_tot,
levels=25, cmap=newcmp,
)
cbar = fig.colorbar(cs)
ax.set_xlabel('No. of positions')
ax.set_ylabel('Total yield')
ax.set_title('Total ROIs')
secax = ax.secondary_xaxis('top',
functions=(n_to_c, n_to_c)) # the inverse function does not work properly here
secax.get_xaxis().set_major_locator(FixedLocator(
np.arange(0, 1100, 100)))
secax.set_xlabel('Investment per position')
```
# Delta Total ROI
```python
ns, delta_total_yields = np.meshgrid(
np.arange(10, 50, 1),
np.arange(0.01, 0.07, 0.001) )
delta_tot_returns = dR_tot2(capital, costs, delta_total_yields,
ns)
fig, ax = plt.subplots(figsize=(18, 10))
cs = ax.contour(ns, delta_total_yields, delta_tot_returns,
levels=25, cmap='gist_gray',
)
cs = ax.contourf(ns, delta_total_yields, delta_tot_returns,
levels=25, cmap='jet',
)
cbar = fig.colorbar(cs)
ax.set_xlabel('No. of positions')
ax.set_ylabel('Average delta total yield')
ax.set_title('Delta total ROI')
secax = ax.secondary_xaxis('top',
functions=(n_to_c, n_to_c), )
secax.get_xaxis().set_major_locator(FixedLocator(
np.arange(0, 1100, 100)))
secax.set_xlabel('Investment per position')
```
|
0850175a6411706197eafa0095c18ae23a163df3
| 749,489 |
ipynb
|
Jupyter Notebook
|
Stock return model.ipynb
|
jkotula89/StockAnalysis
|
b8e56feb34693a042d599a16aa9d5bcfc9cc4f4a
|
[
"MIT"
] | null | null | null |
Stock return model.ipynb
|
jkotula89/StockAnalysis
|
b8e56feb34693a042d599a16aa9d5bcfc9cc4f4a
|
[
"MIT"
] | null | null | null |
Stock return model.ipynb
|
jkotula89/StockAnalysis
|
b8e56feb34693a042d599a16aa9d5bcfc9cc4f4a
|
[
"MIT"
] | null | null | null | 1,411.467043 | 209,440 | 0.957164 | true | 2,837 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.901921 | 0.795658 | 0.71762 |
__label__eng_Latn
| 0.380702 | 0.505604 |
# Finding Roots of Equations
## Calculus review
```python
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy as scipy
from scipy.interpolate import interp1d
```
Let's review the theory of optimization for multivariate functions. Recall that in the single-variable case, extreme values (local extrema) occur at points where the first derivative is zero, however, the vanishing of the first derivative is not a sufficient condition for a local max or min. Generally, we apply the second derivative test to determine whether a candidate point is a max or min (sometimes it fails - if the second derivative either does not exist or is zero). In the multivariate case, the first and second derivatives are *matrices*. In the case of a scalar-valued function on $\mathbb{R}^n$, the first derivative is an $n\times 1$ vector called the *gradient* (denoted $\nabla f$). The second derivative is an $n\times n$ matrix called the *Hessian* (denoted $H$)
Just to remind you, the gradient and Hessian are given by:
$$\nabla f(x) = \left(\begin{matrix}\frac{\partial f}{\partial x_1}\\ \vdots \\\frac{\partial f}{\partial x_n}\end{matrix}\right)$$
$$H = \left(\begin{matrix}
\dfrac{\partial^2 f}{\partial x_1^2} & \dfrac{\partial^2 f}{\partial x_1\,\partial x_2} & \cdots & \dfrac{\partial^2 f}{\partial x_1\,\partial x_n} \\[2.2ex]
\dfrac{\partial^2 f}{\partial x_2\,\partial x_1} & \dfrac{\partial^2 f}{\partial x_2^2} & \cdots & \dfrac{\partial^2 f}{\partial x_2\,\partial x_n} \\[2.2ex]
\vdots & \vdots & \ddots & \vdots \\[2.2ex]
\dfrac{\partial^2 f}{\partial x_n\,\partial x_1} & \dfrac{\partial^2 f}{\partial x_n\,\partial x_2} & \cdots & \dfrac{\partial^2 f}{\partial x_n^2}
\end{matrix}\right)$$
One of the first things to note about the Hessian - it's symmetric. This structure leads to some useful properties in terms of interpreting critical points.
The multivariate analog of the test for a local max or min turns out to be a statement about the gradient and the Hessian matrix. Specifically, a function $f:\mathbb{R}^n\rightarrow \mathbb{R}$ has a critical point at $x$ if $\nabla f(x) = 0$ (where zero is the zero vector!). Furthermore, the second derivative test at a critical point is as follows:
* If $H(x)$ is positive-definite ($\iff$ it has all positive eigenvalues), $f$ has a local minimum at $x$
* If $H(x)$ is negative-definite ($\iff$ it has all negative eigenvalues), $f$ has a local maximum at $x$
* If $H(x)$ has both positive and negative eigenvalues, $f$ has a saddle point at $x$.
If you have $m$ equations with $n$ variables, then the $m \times n$ matrix of first partial derivatives is known as the Jacobian $J(x)$. For example, for two equations $f(x, y)$ and $g(x, y)$, we have
$$
J(x) = \begin{bmatrix}
\frac{\delta f}{\delta x} & \frac{\delta f}{\delta y} \\
\frac{\delta g}{\delta x} & \frac{\delta g}{\delta y}
\end{bmatrix}
$$
We can now express the multivariate form of Taylor polynomials in a familiar format.
$$
f(x + \delta x) = f(x) + \delta x \cdot J(x) + \frac{1}{2} \delta x^T H(x) \delta x + \mathcal{O}(\delta x^3)
$$
## Main Issues in Root Finding in One Dimension
* Separating close roots
* Numerical Stability
* Rate of Convergence
* Continuity and Differentiability
## Bisection Method
The bisection method is one of the simplest methods for finding zeros of a non-linear function. It is guaranteed to find a root - but it can be slow. The main idea comes from the intermediate value theorem: If $f(a)$ and $f(b)$ have different signs and $f$ is continuous, then $f$ must have a zero between $a$ and $b$. We evaluate the function at the midpoint, $c = \frac12(a+b)$. $f(c)$ is either zero, has the same sign as $f(a)$ or the same sign as $f(b)$. Suppose $f(c)$ has the same sign as $f(a)$ (as pictured below). We then repeat the process on the interval $[c,b]$.
```python
def f(x):
return x**3 + 4*x**2 -3
x = np.linspace(-3.1, 0, 100)
plt.plot(x, x**3 + 4*x**2 -3)
a = -3.0
b = -0.5
c = 0.5*(a+b)
plt.text(a,-1,"a")
plt.text(b,-1,"b")
plt.text(c,-1,"c")
plt.scatter([a,b,c], [f(a), f(b),f(c)], s=50, facecolors='none')
plt.scatter([a,b,c], [0,0,0], s=50, c='red')
xaxis = plt.axhline(0)
pass
```
```python
x = np.linspace(-3.1, 0, 100)
plt.plot(x, x**3 + 4*x**2 -3)
d = 0.5*(b+c)
plt.text(d,-1,"d")
plt.text(b,-1,"b")
plt.text(c,-1,"c")
plt.scatter([d,b,c], [f(d), f(b),f(c)], s=50, facecolors='none')
plt.scatter([d,b,c], [0,0,0], s=50, c='red')
xaxis = plt.axhline(0)
pass
```
We can terminate the process whenever the function evaluated at the new midpoint is 'close enough' to zero. This method is an example of what are known as 'bracketed methods'. This means the root is 'bracketed' by the end-points (it is somewhere in between). Another class of methods are 'open methods' - the root need not be somewhere in between the end-points (but it usually needs to be close!)
## Secant Method
The secant method also begins with two initial points, but without the constraint that the function values are of opposite signs. We use the secant line to extrapolate the next candidate point.
```python
def f(x):
return (x**3-2*x+7)/(x**4+2)
x = np.arange(-3,5, 0.1);
y = f(x)
p1=plt.plot(x, y)
plt.xlim(-3, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
t = np.arange(-10, 5., 0.1)
x0=-1.2
x1=-0.5
xvals = []
xvals.append(x0)
xvals.append(x1)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--']
while (notconverge==1 and count < 3):
slope=(f(xvals[count+1])-f(xvals[count]))/(xvals[count+1]-xvals[count])
intercept=-slope*xvals[count+1]+f(xvals[count+1])
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(f(nextval)) < 0.001:
notconverge=0
else:
xvals.append(nextval)
count = count+1
plt.show()
```
The secant method has the advantage of fast convergence. While the bisection method has a linear convergence rate (i.e. error goes to zero at the rate that $h(x) = x$ goes to zero, the secant method has a convergence rate that is faster than linear, but not quite quadratic (i.e. $\sim x^\alpha$, where $\alpha = \frac{1+\sqrt{5}}2 \approx 1.6$) however, the trade-off is that the secant method is not guaranteed to find a root in the brackets.
A variant of the secant method is known as the **method of false positions**. Conceptually it is identical to the secant method, except that instead of always using the last two values of $x$ for linear interpolation, it chooses the two most recent values that maintain the bracket property (i.e $f(a) f(b) < 0$). It is slower than the secant, but like the bisection, is safe.
## Newton-Raphson Method
We want to find the value $\theta$ so that some (differentiable) function $g(\theta)=0$.
Idea: start with a guess, $\theta_0$. Let $\tilde{\theta}$ denote the value of $\theta$ for which $g(\theta) = 0$ and define $h = \tilde{\theta} - \theta_0$. Then:
$$
\begin{eqnarray*}
g(\tilde{\theta}) &=& 0 \\\\
&=&g(\theta_0 + h) \\\\
&\approx& g(\theta_0) + hg'(\theta_0)
\end{eqnarray*}
$$
This implies that
$$ h\approx \frac{g(\theta_0)}{g'(\theta_0)}$$
So that
$$\tilde{\theta}\approx \theta_0 - \frac{g(\theta_0)}{g'(\theta_0)}$$
Thus, we set our next approximation:
$$\theta_1 = \theta_0 - \frac{g(\theta_0)}{g'(\theta_0)}$$
and we have developed an iterative procedure with:
$$\theta_n = \theta_{n-1} - \frac{g(\theta_{n-1})}{g'(\theta_{n-1})}$$
#### Example
Let $$g(x) = \frac{x^3-2x+7}{x^4+2}$$
```python
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Example Function')
plt.show()
```
```python
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Good Guess')
t = np.arange(-5, 5., 0.1)
x0=-1.5
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval=(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
slope=-((4*xvals[count]**3 *(7 - 2 *xvals[count] + xvals[count]**3))/(2 + xvals[count]**4)**2) + (-2 + 3 *xvals[count]**2)/(2 + xvals[count]**4)
intercept=-slope*xvals[count]+(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge=0
else:
xvals.append(nextval)
count = count+1
```
From the graph, we see the zero is near -2. We make an initial guess of $$x=-1.5$$
We have made an excellent choice for our first guess, and we can see rapid convergence!
```python
funval
```
0.007591996330867034
In fact, the Newton-Raphson method converges quadratically. However, NR (and the secant method) have a fatal flaw:
```python
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Bad Guess')
t = np.arange(-5, 5., 0.1)
x0=-0.5
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval=(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
slope=-((4*xvals[count]**3 *(7 - 2 *xvals[count] + xvals[count]**3))/(2 + xvals[count]**4)**2) + (-2 + 3 *xvals[count]**2)/(2 + xvals[count]**4)
intercept=-slope*xvals[count]+(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
```
We have stumbled on the horizontal asymptote. The algorithm fails to converge.
### Convergence Rate
The following is a derivation of the convergence rate of the NR method:
Suppose $x_k \; \rightarrow \; x^*$ and $g'(x^*) \neq 0$. Then we may write:
$$x_k = x^* + \epsilon_k$$.
Now expand $g$ at $x^*$:
$$g(x_k) = g(x^*) + g'(x^*)\epsilon_k + \frac12 g''(x^*)\epsilon_k^2 + ...$$
$$g'(x_k)=g'(x^*) + g''(x^*)\epsilon_k$$
We have that
\begin{eqnarray}
\epsilon_{k+1} &=& \epsilon_k + \left(x_{k-1}-x_k\right)\\
&=& \epsilon_k -\frac{g(x_k)}{g'(x_k)}\\
&\approx & \frac{g'(x^*)\epsilon_k + \frac12g''(x^*)\epsilon_k^2}{g'(x^*)+g''(x^*)\epsilon_k}\\
&\approx & \frac{g''(x^*)}{2g'(x^*)}\epsilon_k^2
\end{eqnarray}
## Gauss-Newton
For 1D, the Newton method is
$$
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)}
$$
We can generalize to $k$ dimensions by
$$
x_{n+1} = x_n - J^{-1} f(x_n)
$$
where $x$ and $f(x)$ are now vectors, and $J^{-1}$ is the inverse Jacobian matrix. In general, the Jacobian is not a square matrix, and we use the generalized inverse $(J^TJ)^{-1}J^T$ instead, giving
$$
x_{n+1} = x_n - (J^TJ)^{-1}J^T f(x_n)
$$
In multivariate nonlinear estimation problems, we can find the vector of parameters $\beta$ by minimizing the residuals $r(\beta)$,
$$
\beta_{n+1} = \beta_n - (J^TJ)^{-1}J^T r(\beta_n)
$$
where the entries of the Jacobian matrix $J$ are
$$
J_{ij} = \frac{\partial r_i(\beta)}{\partial \beta_j}
$$
## Inverse Quadratic Interpolation
Inverse quadratic interpolation is a type of polynomial interpolation. Polynomial interpolation simply means we find the polynomial of least degree that fits a set of points. In quadratic interpolation, we use three points, and find the quadratic polynomial that passes through those three points.
```python
def f(x):
return (x - 2) * x * (x + 2)**2
x = np.arange(-5,5, 0.1);
plt.plot(x, f(x))
plt.xlim(-3.5, 0.5)
plt.ylim(-5, 16)
plt.xlabel('x')
plt.axhline(0)
plt.title("Quadratic Interpolation")
#First Interpolation
x0=np.array([-3,-2.5,-1.0])
y0=f(x0)
f2 = interp1d(x0, y0,kind='quadratic')
#Plot parabola
xs = np.linspace(-3, -1, num=10000, endpoint=True)
plt.plot(xs, f2(xs))
#Plot first triplet
plt.plot(x0, f(x0),'ro');
plt.scatter(x0, f(x0), s=50, c='yellow');
#New x value
xnew=xs[np.where(abs(f2(xs))==min(abs(f2(xs))))]
plt.scatter(np.append(xnew,xnew), np.append(0,f(xnew)), c='black');
#New triplet
x1=np.append([-3,-2.5],xnew)
y1=f(x1)
f2 = interp1d(x1, y1,kind='quadratic')
#New Parabola
xs = np.linspace(min(x1), max(x1), num=100, endpoint=True)
plt.plot(xs, f2(xs))
xnew=xs[np.where(abs(f2(xs))==min(abs(f2(xs))))]
plt.scatter(np.append(xnew,xnew), np.append(0,f(xnew)), c='green');
```
So that's the idea behind quadratic interpolation. Use a quadratic approximation, find the zero of interest, use that as a new point for the next quadratic approximation.
Inverse quadratic interpolation means we do quadratic interpolation on the *inverse function*. So, if we are looking for a root of $f$, we approximate $f^{-1}(x)$ using quadratic interpolation. This just means fitting $x$ as a function of $y$, so that the quadratic is turned on its side and we are guaranteed that it cuts the x-axis somewhere. Note that the secant method can be viewed as a *linear* interpolation on the inverse of $f$. We can write:
$$f^{-1}(y) = \frac{(y-f(x_n))(y-f(x_{n-1}))}{(f(x_{n-2})-f(x_{n-1}))(f(x_{n-2})-f(x_{n}))}x_{n-2} + \frac{(y-f(x_n))(y-f(x_{n-2}))}{(f(x_{n-1})-f(x_{n-2}))(f(x_{n-1})-f(x_{n}))}x_{n-1} + \frac{(y-f(x_{n-2}))(y-f(x_{n-1}))}{(f(x_{n})-f(x_{n-2}))(f(x_{n})-f(x_{n-1}))}x_{n-1}$$
We use the above formula to find the next guess $x_{n+1}$ for a zero of $f$ (so $y=0$):
$$x_{n+1} = \frac{f(x_n)f(x_{n-1})}{(f(x_{n-2})-f(x_{n-1}))(f(x_{n-2})-f(x_{n}))}x_{n-2} + \frac{f(x_n)f(x_{n-2})}{(f(x_{n-1})-f(x_{n-2}))(f(x_{n-1})-f(x_{n}))}x_{n-1} + \frac{f(x_{n-2})f(x_{n-1})}{(f(x_{n})-f(x_{n-2}))(f(x_{n})-f(x_{n-1}))}x_{n}$$
We aren't so much interested in deriving this as we are understanding the procedure:
```python
x = np.arange(-5,5, 0.1);
plt.plot(x, f(x))
plt.xlim(-3.5, 0.5)
plt.ylim(-5, 16)
plt.xlabel('x')
plt.axhline(0)
plt.title("Inverse Quadratic Interpolation")
#First Interpolation
x0=np.array([-3,-2.5,1])
y0=f(x0)
f2 = interp1d(y0, x0,kind='quadratic')
#Plot parabola
xs = np.linspace(min(f(x0)), max(f(x0)), num=10000, endpoint=True)
plt.plot(f2(xs), xs)
#Plot first triplet
plt.plot(x0, f(x0),'ro');
plt.scatter(x0, f(x0), s=50, c='yellow');
```
Convergence rate is approximately $1.8$. The advantage of the inverse method is that we will *always* have a real root (the parabola will always cross the x-axis). A serious disadvantage is that the initial points must be very close to the root or the method may not converge.
That is why it is usually used in conjunction with other methods.
## Brentq Method
Brent's method is a combination of bisection, secant and inverse quadratic interpolation. Like bisection, it is a 'bracketed' method (starts with points $(a,b)$ such that $f(a)f(b)<0$.
Roughly speaking, the method begins by using the secant method to obtain a third point $c$, then uses inverse quadratic interpolation to generate the next possible root. Without going into too much detail, the algorithm attempts to assess when interpolation will go awry, and if so, performs a bisection step. Also, it has certain criteria to reject an iterate. If that happens, the next step will be linear interpolation (secant method).
To find zeros, use
```python
x = np.arange(-5,5, 0.1);
p1=plt.plot(x, f(x))
plt.xlim(-4, 4)
plt.ylim(-10, 20)
plt.xlabel('x')
plt.axhline(0)
pass
```
```python
scipy.optimize.brentq(f,-1,.5)
```
-7.864845203343107e-19
```python
scipy.optimize.brentq(f,.5,3)
```
2.0
## Roots of polynomials
One method for finding roots of polynomials converts the problem into an eigenvalue one by using the **companion matrix** of a polynomial. For a polynomial
$$
p(x) = a_0 + a_1x + a_2 x^2 + \ldots + a_m x^m
$$
the companion matrix is
$$
A = \begin{bmatrix}
-a_{m-1}/a_m & -a_{m-2}/a_m & \ldots & -a_0/a_m \\
1 & 0 & \ldots & 0 \\
0 & 1 & \ldots & 0 \\
\vdots & \vdots & \ldots & \vdots \\
0 & 0 & \ldots & 0
\end{bmatrix}
$$
The characteristic polynomial of the companion matrix is $\lvert \lambda I - A \rvert$ which expands to
$$
a_0 + a_1 \lambda + a_2 \lambda^2 + \ldots + a_m \lambda^m
$$
In other words, the roots we are seeking are the eigenvalues of the companion matrix.
For example, to find the cube roots of unity, we solve $x^3 - 1 = 0$. The `roots` function uses the companion matrix method to find roots of polynomials.
```python
# Coefficients of $x^3, x^2, x^1, x^0$
poly = np.array([1, 0, 0, -1])
```
```python
x = np.roots(poly)
x
```
array([-0.5+0.8660254j, -0.5-0.8660254j, 1. +0. j])
```python
plt.scatter([z.real for z in x], [z.imag for z in x])
theta = np.linspace(0, 2*np.pi, 100)
u = np.cos(theta)
v = np.sin(theta)
plt.plot(u, v, ':')
plt.axis('square')
pass
```
## Using `scipy.optimize`
### Finding roots of univariate equations
```python
def f(x):
return x**3-3*x+1
```
```python
x = np.linspace(-3,3,100)
plt.axhline(0, c='red')
plt.plot(x, f(x))
pass
```
```python
from scipy.optimize import brentq, newton
```
#### `brentq` is the recommended method
```python
brentq(f, -3, 0), brentq(f, 0, 1), brentq(f, 1,3)
```
(-1.8793852415718166, 0.3472963553337031, 1.532088886237956)
#### Secant method
```python
newton(f, -3), newton(f, 0), newton(f, 3)
```
(-1.8793852415718169, 0.34729635533385395, 1.5320888862379578)
#### Newton-Raphson method
```python
fprime = lambda x: 3*x**2 - 3
newton(f, -3, fprime), newton(f, 0, fprime), newton(f, 3, fprime)
```
(-1.8793852415718166, 0.34729635533386066, 1.532088886237956)
### Finding fixed points
Finding the fixed points of a function $g(x) = x$ is the same as finding the roots of $g(x) - x$. However, specialized algorithms also exist - e.g. using `scipy.optimize.fixedpoint`.
```python
from scipy.optimize import fixed_point
```
```python
x = np.linspace(-3,3,100)
plt.plot(x, f(x), color='red')
plt.plot(x, x)
pass
```
```python
fixed_point(f, 0), fixed_point(f, -3), fixed_point(f, 3)
```
(array(0.25410169), array(-2.11490754), array(1.86080585))
### Mutlivariate roots and fixed points
Use `root` to solve polynomial equations. Use `fsolve` for non-polynomial equations.
```python
from scipy.optimize import root, fsolve
```
Suppose we want to solve a sysetm of $m$ equations with $n$ unknowns
\begin{align}
f(x_0, x_1) &= x_1 - 3x_0(x_0+1)(x_0-1) \\
g(x_0, x_1) &= 0.25 x_0^2 + x_1^2 - 1
\end{align}
Note that the equations are non-linear and there can be multiple solutions. These can be interpreted as fixed points of a system of differential equations.
```python
def f(x):
return [x[1] - 3*x[0]*(x[0]+1)*(x[0]-1),
.25*x[0]**2 + x[1]**2 - 1]
```
```python
sol = root(f, (0.5, 0.5))
sol.x
```
array([1.11694147, 0.82952422])
```python
fsolve(f, (0.5, 0.5))
```
array([1.11694147, 0.82952422])
```python
r0 = root(f,[1,1])
r1 = root(f,[0,1])
r2 = root(f,[-1,1.1])
r3 = root(f,[-1,-1])
r4 = root(f,[2,-0.5])
roots = np.c_[r0.x, r1.x, r2.x, r3.x, r4.x]
```
```python
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U = Y - 3*X*(X + 1)*(X-1)
V = .25*X**2 + Y**2 - 1
plt.streamplot(X, Y, U, V, color=U, linewidth=2, cmap=plt.cm.autumn)
plt.scatter(roots[0], roots[1], s=50, c='none', edgecolors='k', linewidth=2)
pass
```
#### We can also give the Jacobian
```python
def jac(x):
return [[-6*x[0], 1], [0.5*x[0], 2*x[1]]]
```
```python
sol = root(f, (0.5, 0.5), jac=jac)
sol.x, sol.fun
```
(array([1.11694147, 0.82952422]), array([-4.23383550e-12, -3.31612515e-12]))
#### Check that values found are really roots
```python
np.allclose(f(sol.x), 0)
```
True
#### Starting from other initial conditions, different roots may be found
```python
sol = root(f, (12,12))
sol.x
```
array([ 0.77801314, -0.92123498])
```python
np.allclose(f(sol.x), 0)
```
True
```python
```
|
be4751a3214fdd7a26e84ac9cffe4c7442421ae2
| 340,669 |
ipynb
|
Jupyter Notebook
|
notebooks/S09A_Root_Finding.ipynb
|
ZhechangYang/STA663
|
0dcf48e3e7a2d1f698b15e84946e44344b8153f5
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/S09A_Root_Finding.ipynb
|
ZhechangYang/STA663
|
0dcf48e3e7a2d1f698b15e84946e44344b8153f5
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/S09A_Root_Finding.ipynb
|
ZhechangYang/STA663
|
0dcf48e3e7a2d1f698b15e84946e44344b8153f5
|
[
"BSD-3-Clause"
] | null | null | null | 249.574359 | 79,964 | 0.905483 | true | 6,897 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.919643 | 0.932453 | 0.857524 |
__label__eng_Latn
| 0.949041 | 0.830648 |
# Energy storage convex loss model
Objective: explore the *possibilities* to have convex loss models (i.e. $P_{losses}(...)$ below):
$$E_b(k+1) = E_b(k) + (P_b(k) - P_{losses}(P_b, E_b)) \Delta_t$$
Reminder: to preserve convexity, we need to replace the equality constraint with an **inequality**:
$$ E_b(k+1) ≤ ...$$
Next step: explore the interest of such models in some ESS optimization use cases:
- arbitrage
- with optimal power split between two batteries with different losses curves ?
Also, maybe the same reasoning applies to model battery aging?
$$A(k+1) = A(k) + f(P,E)$$
PH, May 2020
```python
import sympy
from sympy import symbols, Function, Matrix, simplify
```
```python
sympy.init_printing()
```
```python
e = symbols('e', positive=True) # we are interested in the positive domain e = SoE - SoE_singular (below)
p = symbols('p')
Re = Function('R')(e)
Rep = Function('R')(e,p)
```
Losses: Joule model, dependent on SoE
```python
losses = Re*p**2
losses
```
Gradient
```python
x = [p, e]
grad = [losses.diff(xi) for xi in x]
grad
```
Hessian
```python
H = [[losses.diff(xi).diff(xj) for xi in x] for xj in x]
H = Matrix(H)
H
```
Eigen values
```python
λ1,λ2 = H.eigenvals().keys()
λ1
```
```python
λ2
```
For the positivity, the first eigen value (λ1) may be critical
**TODO**: solve $λ ≥ 0$ for the general function R(e)
## Try with an affine R(e) function
```python
r0, r1 = symbols('r_0 r_1')
Rlin = r0 + r1*e
Rlin
```
```python
λ1_lin = simplify(λ1.subs(Re, Rlin))
λ1_lin
```
```python
simplify(H.subs(Re, Rlin))
```
```python
simplify(H.subs(Re, Rlin)).eigenvals()
```
## Try with a negative power R(e) function
$$R(e) = \frac{1}{e^a}$$
Conclusion as of May 14 2020 (to be prooved with a better pen and paper analysis of the eigenvalue):
- convex function for $a \in [0,1]$
- *quasi*-convex but not convex for $a \in [1,2]$ (cf. §3.4 in Boyd 2004 book)
- not even quasi convex for $a≥2$ (sublevel sets not convex)
Defintion:
A function f : R n → R is called quasiconvex (or unimodal ) if its domain and all its *sublevel sets*
$$ S_α = \{x ∈ \mathbf{dom} f \;| \; f(x) ≤ α \},$$
for α ∈ R, are convex.
```python
a = symbols('a')
Rpow = 1/e**a
Rpow
```
Hessian with the power R(e) function
```python
Hpow = simplify(H.subs(Re, Rpow))
Hpow
```
```python
λ1pow,_ = Hpow.eigenvals().keys()
λ1pow
```
Subcase: $a=1$:
$$λ = 0$$
although sympy doesn't find it (unless using assumption $p≥0$)
```python
simplify(
(λ1pow * e**3).subs(a,1)
)
```
Subcase: $a=2$
```python
simplify(
(λ1pow*e**4).subs(a,2)
)
```
Numerical value:
ccl: for $a=2$, λ is **always negative** (cf Xournal++ doc). Also, see numerical value at point (0.5, 0.5) below
TODO: prove that λ≥0 for a≤1 and λ≤0 for a≥1
```python
λ1pow.subs({
e:0.5,
p:0.5,
a:1.5,
})
```
## Plot
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
```
```python
%matplotlib inline
```
### Contour and 3D plots
```python
def losses_contour(a):
n = 100
x = np.linspace(1e-3, 1, n) # e
y = np.linspace(-1, 1, n) # p
X, Y = np.meshgrid(x, y)
Z = Y**2/X**a
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
levels = [0.125,0.25, 0.5,1, 2]
CS = ax1.contour(X, Y, Z, levels)
ax1.clabel(CS, inline=1, fontsize=10)
ax1.grid()
x = np.linspace(1e-3, 1, n) # e
y = np.linspace(-1, 1, n) # p
X, Y = np.meshgrid(x, y)
Z = Y**2/X**a
Z[Z>2.1] = np.nan
ax2.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
ax2.set_zlim(0,2)
ax1.set(
title = f'losses contours, a={a}',
xlabel = 'SoE',
ylabel = 'Power',
)
ax2.set(
title = f'losses, a={a}',
xlabel = 'SoE',
ylabel = 'Power',
)
fig.tight_layout()
losses_contour(0.5)
plt.savefig('losses_3D_0.5.png', dpi=200)
losses_contour(1.0)
plt.savefig('losses_3D_1.0.png', dpi=200)
losses_contour(1.5)
plt.savefig('losses_3D_1.5.png', dpi=200)
losses_contour(2.0)
plt.savefig('losses_3D_2.0.png', dpi=200)
```
Non convex sublevel sets: $a > 2$
```python
losses_contour(3.0)
plt.savefig('losses_3D_3.0.png', dpi=200)
```
Interactive plot
```python
%matplotlib qt5
```
```python
losses_contour(2)
```
```python
%matplotlib inline
```
### 2D cuts on lines
because of the property that a convex function evaluated on a line is a 1D convex function
```python
def plot_line_cut(a, ang, e0=0.5, p0=0.5, r = 0.45):
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(8,4))
n = 100
x = np.linspace(1e-3, 1, n) # e
y = np.linspace(-1, 1, n) # p
X, Y = np.meshgrid(x, y)
Z = Y**2/X**a
levels = [0.125,0.25, 0.5,1, 2]
CS = ax1.contour(X, Y, Z, levels)
ax1.clabel(CS, inline=1, fontsize=10)
t = np.linspace(-r, r, 300)
x = e0 + t*np.cos(ang)
y = p0 + t*np.sin(ang)
l = y**2/x**a
ax1.plot(x,y, 'k:')
ax1.plot(e0,p0, 'ko')
ax2.plot(t, l)
ax2.plot(0, p0**2/e0**a, 'ko')
ax1.set(
title = f'losses contours, a={a}',
xlabel = 'SoE',
ylabel = 'Power',
)
ax1.grid()
ax2.set(
title = f'losses cut, a={a}',
xlabel = 't',
)
ax2.grid()
fig.tight_layout()
```
```python
plot_line_cut(0.5, 0.8)
plt.savefig('losses_cut_0.5.png', dpi=200)
```
```python
plot_line_cut(1.0, 0.8)
plt.savefig('losses_cut_1.0.png', dpi=200)
```
Concavity for $a=1.5$
```python
plot_line_cut(1.5, 0.8)
plt.savefig('losses_cut_1.5.png', dpi=200)
```
Non convexity for $a=2$ (although sublevel sets are convex)
```python
plot_line_cut(2, 0.9)
plt.savefig('losses_cut_2.0.png', dpi=200)
```
```python
```
|
9a1f3a8b2e4706aa9da3738a85d42b0a4ae5d63e
| 912,709 |
ipynb
|
Jupyter Notebook
|
ESS Convex loss model.ipynb
|
pierre-haessig/convex-storage-loss
|
821929c887e9a12b70e7ae9becb2830f1764e5d6
|
[
"CC-BY-4.0"
] | null | null | null |
ESS Convex loss model.ipynb
|
pierre-haessig/convex-storage-loss
|
821929c887e9a12b70e7ae9becb2830f1764e5d6
|
[
"CC-BY-4.0"
] | null | null | null |
ESS Convex loss model.ipynb
|
pierre-haessig/convex-storage-loss
|
821929c887e9a12b70e7ae9becb2830f1764e5d6
|
[
"CC-BY-4.0"
] | null | null | null | 875.920345 | 120,612 | 0.951919 | true | 2,115 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.824462 | 0.841826 | 0.694053 |
__label__eng_Latn
| 0.592856 | 0.450849 |
# Modelo de Reverchon
El modelo de Reverchon resulta de realizar los balances de masa sobre el lecho de extracción utilizando la supocición de flujo pisto en el interior del lecho, despreciando la disperción axial y en las que se considera que tanto el flujo del fluido, la presión, temperatura se mantienen constantes.
Estos balances de masa se muestran en las ecuaciones 1 y 2, los cuales son ecuaciones diferenciales que se resuelven con las condicones iniciales de la ecuación 3
\begin{equation}
uV \frac{\partial c_c}{\partial t}+eV \frac{\partial c_c}{\partial t}+ AK(q-q^*) = 0
\end{equation}
\begin{equation}
(1-e)VuV^* \frac{\partial c_q}{\partial t}= -AK(q-q^*)
\end{equation}
\begin{eqnarray}
C = 0 & q=q_0 %%& t = 0 & c(0,t) & h=0
\end{eqnarray}
El equilibrio de fases se modela por medio de la supocicón que se cumple la ley de henry como se muestra en la ecuación 4
\begin{equation}
c = kq^*
\end{equation}
Una simplificación habitual, es la de aproximar el el lecho de extracción como una serie de $(n)$ subdivisiones del lecho de extracción supercritica, permitiendo convertir los balances de masa en un sistema de $(2n)$ ecuaciones diferenciales ordinarias como se muestra en las ecuaciones 6 y 8
\begin{equation}
D_n = \left( \frac{v}{n}
\right) \left( e \left( \frac{dC_n}{dt} \right) +(1-e) \left( \frac{dq_n}{dt} \right)
\right) = 0
\end{equation}
\begin{equation}
\left(\frac{W}{p} \right)
\left( C_n- C_{n-1} \right) + D_n = 0
\end{equation}
\begin{equation}
\left( \frac{dq_n}{dt} \right)= - \left(\frac{1}{t_i}\right) \left(q_n-q_n^* \right)
\end{equation}
condiciones iniciales:
\begin{eqnarray}
C_n = C_n^0 & q_n = q_0
\end{eqnarray}
```python
```
## Nomenclatura
$a_0$ Surface, $\frac{m^2}{m^3}$
```python
```
```python
```
|
d72775adbc0807bfab2b274b7d6014b3074bd4b5
| 3,435 |
ipynb
|
Jupyter Notebook
|
docs/source/reverchon_doc.ipynb
|
pysg/sepya
|
b9973026c625df1c913f5ed85637acd6f7e69408
|
[
"MIT"
] | 1 |
2021-02-27T01:05:12.000Z
|
2021-02-27T01:05:12.000Z
|
docs/source/reverchon_doc.ipynb
|
pysg/sepya
|
b9973026c625df1c913f5ed85637acd6f7e69408
|
[
"MIT"
] | null | null | null |
docs/source/reverchon_doc.ipynb
|
pysg/sepya
|
b9973026c625df1c913f5ed85637acd6f7e69408
|
[
"MIT"
] | 1 |
2021-02-27T01:05:13.000Z
|
2021-02-27T01:05:13.000Z
| 26.835938 | 306 | 0.547307 | true | 629 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.779993 | 0.715424 | 0.558026 |
__label__spa_Latn
| 0.88461 | 0.13481 |
```python
# Models from Introduction to Algorithmic Marketing
# https://algorithmicweb.wordpress.com/
#
# Markov chain-based LTV model predicts customer lifetime value
# using the probabilities of transition between different customer states
```
```python
%matplotlib inline
import sympy as sy
import numpy as np
import matplotlib.pyplot as plt
```
```python
p1 = 0.7 # Probability of a purchase after 1 month of silence
p2 = 0.5 # Probability of a purchase after 2 months of silence
p3 = 0.3 # Probability of a purchase after 3 months of silence
r = 100 # Revenue associated with one purchase, $
c = 5 # Cost of one promotion
d = 0.001 # Inflation rate for one month
projection_horizon = 10 # LTV projection horizon, months
```
```python
# Transition probabilities
P = np.matrix([
[ p1, 1-p1, 0, 0 ],
[ p2, 0, 1-p2, 0 ],
[ p3, 0, 0, 1-p3 ],
[ 0, 0, 0, 0]
])
# Expected revenue
G = np.matrix([
[ r-c ], # If customer goes to the initial state (purchases), we get the revenue minus promotion cost
[ -c ], # If customer is silent for one month, we send promotion and lose c dollars
[ -c ], # If customer is silent for two months, we send promotion and lose c dollars
[ 0 ] # If customer is silent for three month, we stop to send promotions
])
```
```python
# Calculate and visualize the LTV. Each line on the plot corresponds to a customer state
#
# For example, the topmost line corresponds to the first state (just purchased)
# It indicates that the cumulative revenue from this cusomer is likely to grow over
# next projection_horizon months.
#
# The bottommost line corresponds to customers who are silent for 3 months
# This customers are considred lost, so this line is constantly zero
ltv_dynamics = [
sum(
(1/((1+d)**(t-1))) * (P**t)*G
for t in range(1, T+1)
) for T in range(1, projection_horizon)
]
plt.plot(np.concatenate(ltv_dynamics, axis=1).T);
```
|
7cee1e79048135c00a252d52c0bc2a06757134f2
| 21,997 |
ipynb
|
Jupyter Notebook
|
promotions/markov-ltv.ipynb
|
sayandesarkar/algorithmic-marketing-examples
|
6feb857bfcae8a3b792747577ad05914b081a25c
|
[
"Apache-2.0"
] | 1 |
2019-06-05T09:40:57.000Z
|
2019-06-05T09:40:57.000Z
|
promotions/markov-ltv.ipynb
|
axlander83/algorithmic-examples
|
8a2478b99017502f2b1b82e326bc41fbcfcffe02
|
[
"Apache-2.0"
] | null | null | null |
promotions/markov-ltv.ipynb
|
axlander83/algorithmic-examples
|
8a2478b99017502f2b1b82e326bc41fbcfcffe02
|
[
"Apache-2.0"
] | 1 |
2022-02-07T05:56:32.000Z
|
2022-02-07T05:56:32.000Z
| 167.916031 | 18,336 | 0.886212 | true | 557 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.891811 | 0.800692 | 0.714066 |
__label__eng_Latn
| 0.992644 | 0.497346 |
```python
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
```python
N = 11
h = 1/(N-1)
x = linspace(0,1,N)
```
```python
f = ones((N,))
```
```python
A = zeros((N,N))
for i in range(1,N-1):
A[i, i-1] = A[i, i+1] = -1
A[i,i] = 2
A[0,0] = A[-1,-1] = 1
f[0] = f[-1] = 0
A = A/h**2
```
```python
A, f
```
(array([[ 100., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0.],
[-100., 200., -100., 0., 0., 0., 0., 0., 0.,
0., 0.],
[ 0., -100., 200., -100., 0., 0., 0., 0., 0.,
0., 0.],
[ 0., 0., -100., 200., -100., 0., 0., 0., 0.,
0., 0.],
[ 0., 0., 0., -100., 200., -100., 0., 0., 0.,
0., 0.],
[ 0., 0., 0., 0., -100., 200., -100., 0., 0.,
0., 0.],
[ 0., 0., 0., 0., 0., -100., 200., -100., 0.,
0., 0.],
[ 0., 0., 0., 0., 0., 0., -100., 200., -100.,
0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., -100., 200.,
-100., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., -100.,
200., -100.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 100.]]),
array([0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.]))
```python
u = linalg.solve(A, f)
```
```python
plot(x, u, 'o-b')
plot(x, 0.5*(x*(1-x)), 'r')
```
```python
exact = 0.5*(x*(1-x))
error = max(abs(exact-u))
```
```python
error
```
4.163336342344337e-17
```python
exact = x*(1-x)*(x-.85)
```
```python
import sympy as sym
t = sym.var('x')
exact_t = t*(1-t)*(t-.85)
fsymbol = sym.lambdify(t, -exact_t.diff(t, 2) )
x = linspace(0,1,N)
f = fsymbol(x)
```
```python
x = linspace(0,1,N)
f = fsymbol(x)
f[0] = f[-1] = 0
```
```python
plot(x,f)
```
```python
u = linalg.solve(A, f)
```
```python
plot(x, u, 'ob-')
plot(x, exact)
```
```python
max(abs(u - exact))
```
3.469446951953614e-17
```python
x = sym.var('x')
h = sym.var('h')
g = sym.Function('g')
```
```python
def cfd_II(x,h,g):
return (g(x+h)- 2*g(x) + g(x-h))/h**2
def back_fd(x,h,g):
return (g(x+h)- g(x))/h
def forward_fd(x,h,g):
return (g(x)- g(x-h))/h
def central_fd(x,h,g):
return (g(x+h)- g(x-h))/(2*h)
```
```python
sym.series(back_fd(x, h, g), x=h, x0=0, n=2)
```
$\displaystyle \left. \frac{d}{d \xi_{1}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }} + \frac{h \left. \frac{d^{2}}{d \xi_{1}^{2}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }}}{2} + O\left(h^{2}\right)$
```python
sym.series(forward_fd(x, h, g), x=h, x0=0, n=2)
```
$\displaystyle \left. \frac{d}{d \xi_{1}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }} - \frac{h \left. \frac{d^{2}}{d \xi_{1}^{2}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }}}{2} + O\left(h^{2}\right)$
```python
sym.series(central_fd(x, h, g), x=h, x0=0, n=3)
```
$\displaystyle \left. \frac{d}{d \xi_{1}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }} + \frac{h^{2} \left. \frac{d^{3}}{d \xi_{1}^{3}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }}}{6} + O\left(h^{3}\right)$
```python
sym.series(cfd_II(x, h, g), x=h, x0=0, n=5)
```
$\displaystyle \left. \frac{d^{2}}{d \xi_{1}^{2}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }} + \frac{h^{2} \left. \frac{d^{4}}{d \xi_{1}^{4}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }}}{12} + \frac{h^{4} \left. \frac{d^{6}}{d \xi_{1}^{6}} g{\left(\xi_{1} \right)} \right|_{\substack{ \xi_{1}=x }}}{360} + O\left(h^{5}\right)$
|
ede7863c3fc96c2bd58283fa8baca8b69234793d
| 55,766 |
ipynb
|
Jupyter Notebook
|
slides/Lecture 10 - LH - LAB - Introduction to PDEs - Finite Differences in 1D.ipynb
|
vitturso/numerical-analysis-2021-2022
|
d675a6f766a42d0a46e7cd69dbfed8645a0b2590
|
[
"CC-BY-4.0"
] | null | null | null |
slides/Lecture 10 - LH - LAB - Introduction to PDEs - Finite Differences in 1D.ipynb
|
vitturso/numerical-analysis-2021-2022
|
d675a6f766a42d0a46e7cd69dbfed8645a0b2590
|
[
"CC-BY-4.0"
] | null | null | null |
slides/Lecture 10 - LH - LAB - Introduction to PDEs - Finite Differences in 1D.ipynb
|
vitturso/numerical-analysis-2021-2022
|
d675a6f766a42d0a46e7cd69dbfed8645a0b2590
|
[
"CC-BY-4.0"
] | null | null | null | 125.035874 | 16,264 | 0.861815 | true | 1,697 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.927363 | 0.877477 | 0.81374 |
__label__yue_Hant
| 0.128546 | 0.728923 |
# 5. Gyakorlat - 1 DoF csillapított lengő kar
2021.03.08
## Feladat:
```python
from IPython.display import Image
Image(filename='gyak_5_1.png',width=500)
```
A mellékelt ábrán egy lengőkar látható, ami két különböző tömegű és hosszúságú rúdból és a hozzá csatlakozó $R$ sugarú korongból áll. A két rúd két $k_1$, illetve $k_2$ merevségű rugón és egy $c_1$ csillapítási tényezőjű csillapító elemen keresztül csatlakozik a környezetehéz. A lengőkar csak az $A$ csukló körül tud elfordulni. A mozgást leíró általános koordináta a $\varphi$ szög, melyet a vízszintes egyenestől mérünk. A lengőkar a Föld gravitációs terében helyezkedik el, egyensúlyi helyezete a $\varphi=0$ pozícióban van, ahol a $k_2$ merevségű rugó feszítetlen.
### Adatok:
|||
|-------------------|------------------|
| $l$ = 0,2 m | $k_1$ = 300 N/m |
| $R$ = 0,1 m | $k_2$ = 10 N/m |
| $m$ = 0,12 kg | $c_1$ = 2 Ns/m |
### Részfeladatok:
1. Írja fel a mozgásegyenletet és számítsa ki a csillapítatlan ($\omega_n$) illetve csillapított sajátfrekvenciákat ($\omega_d$), és a realtív csillapítási tényezőt ($\zeta$)
2. Számítsa ki a kritikus csillapítási tényezőt ($c_{cr}$)!
3. Számítsa ki a $k_1$ merevségű rugóhoz tartozó maximális rugóerő értékét a következő kezdeti feltételek esetén ! ($\varphi(t=0)=\varphi_0=0,01$[rad];$\dot{\varphi}(t=0)=0$[rad/s])!
## Megoldás:
```python
from IPython.display import Image
Image(filename='gyak5_2.png',width=900)
```
### 1. Feladat:
A fenti ábrán a lengőkar szabadtest ábrája látható az egyensúgyi helyzetből kitérített helyezetében. Egy korábbi példában (3. gyakorlat) már be lett mutatva, hogy milyen egyszerűsítéséskkel lehet élni annak érdekében hogy linearizáljunk egy ilyen lengőrendszert. Röviden összefoglalva az egyszerűsítéseket:
- a vízszintes rúdelemre ható gravitációs erőnek nincs hatása a sajátkörfrekvenciára (viszont hatása lehet a maximális rugóerőkre)
- a rugók deformációja az egyensúlyi helyzetből mért ívhosszakkal jól közelíthetők
A mozgás egyenlet Newton II alapján:
\begin{equation}
\dot{\mathbf{I}}=\mathbf{F}
\end{equation}
Amiből az SZTA alapján (és cos$\varphi\approx 1$, illetve a sin$\varphi\approx \varphi$ közelítéseket alkalmazva) adódik
\begin{equation}
\Theta_A\ddot{\varphi}=-F_{r,1}3l-F_{r,2}l-F_{cs,1}3l-3mg\frac{3}{2}l-2mgl\varphi-mg(2l+R)\varphi,
\end{equation}
ahol a csillapításból adódó erő és a rugerők
\begin{equation}
F_{cs,1}\cong c_13l\dot{\varphi}; \hspace{10pt} F_{r,1}\cong F_{r,1st}+k_13l\varphi; \hspace{10pt} F_{r,2}\cong k_2l\varphi
\end{equation}
(megjegyzés: a mozgásegyenletben szerplő $-3mg\frac{3}{2}l$ tag és a $k_1$ merevségű rugó statikus deformációjáből adódó erő ($F_{r,1st}$) kiegyenlítik egymást, így kiesenek az egyenletből). Tehát a mozgásegyenlet
\begin{equation}
\Theta_A\ddot{\varphi}=-9l^2k_1\varphi-l^2k_2\varphi-9l^2c_1\dot{\varphi}-2mgl\varphi-mg(2l+R)\varphi
\end{equation}
A szerkezet tehetetlenségi nyomatékát a Steiner tétel segítségével lehet az $A$ pontra meghatározni
\begin{equation}
\Theta_A=\frac{1}{3}3ml(3l)^2+\frac{1}{3}2ml(2l)^2+\frac{1}{2}mR^2+m(2l+R)^2
\end{equation}
```python
import sympy as sp
from IPython.display import display, Math
sp.init_printing()
```
```python
l, R, m, k1, k2, c1, Θ_A, g = sp.symbols("l, R, m, k1, k2, c1, Θ_A, g", real=True)
# Készítsünk behelyettesítési listát az adatok alapján, SI-ben
adatok = [(l, 0.2), (R, 0.1), (m, 0.12), (k1, 300), (k2, 10), (c1, 2), (g, 9.81)]
# Az általános koordináta definiálása az idő függvényeként
t = sp.symbols("t",real=True, positive=True)
φ_t = sp.Function('φ')(t)
# A z tengelyre számított perdület derivált az A pontban
dΠ_Az = Θ_A*φ_t.diff(t,2)
# A z tengelyre számított nyomaték az A pontban
M_Az = -9*l**2*k1*φ_t-l**2*k2*φ_t-9*l**2*c1*φ_t.diff(t)-2*m*g*l*φ_t-m*g*(2*l+R)*φ_t
# A dinamika alapegyenlete
# (nullára rendezve)
mozgegy = dΠ_Az-M_Az
mozgegy
```
```python
# Osszunk le a főegyütthatóval:
foegyutthato = mozgegy.coeff(sp.diff(φ_t,t,2))
mozgegy = (mozgegy/foegyutthato).expand().apart(φ_t)
mozgegy
```
```python
# Írjuk be a tehetetlenségi nyomatékot
# a rúdhosszakkal és tömegekkelkifejezve
mozgegy = mozgegy.subs(Θ_A, 1/3*3*m*(3*l)**2+1/3*2*m*(2*l)**2+1/2*m*R**2+m*(2*l+R)**2)
```
```python
# A mozgásegyenletnek ebben az alakjában
# d/dt(φ(t)) együtthatója 2ζω_n -nel
# a φ(t) együtthatója pedig ω_n^2-tel egyezik meg
# tehát mind a három kérdéses paraméter megadható
ω_n_num = sp.sqrt((mozgegy.coeff(φ_t)).subs(adatok)).evalf(6)
ζ_num = ((mozgegy.coeff(φ_t.diff(t))).subs(adatok)/(2*ω_n_num)).evalf(4)
ω_d_num = (ω_n_num*sp.sqrt(1-ζ_num**2)).evalf(6)
display(Math('\omega_n = {}'.format(sp.latex(ω_n_num))),Math('\zeta = {}'.format(sp.latex(ζ_num))),Math('\omega_d = {}'.format(sp.latex(ω_d_num))))
# [rad/s]
# [1]
# [rad/s]
```
$\displaystyle \omega_n = 35.5523$
$\displaystyle \zeta = 0.1169$
$\displaystyle \omega_d = 35.3085$
```python
# Később még szükség lesz a csillapított frekvenciára és periódusidőre is
T_d_num = (2*sp.pi/ω_d_num).evalf(4)
f_d_num = (ω_d_num/(2*sp.pi)).evalf(5)
display(Math('T_d = {}'.format(sp.latex(T_d_num))),Math('f_d = {}'.format(sp.latex(f_d_num))))
# [s]
# [1/s]
```
$\displaystyle T_d = 0.178$
$\displaystyle f_d = 5.6195$
## 2. Feladat
Kritikus csillapításról akkor beszélünk, amikor a relatív csillapítási tényező éppen 1.
```python
# A mozgásegyenletnek d/dt(φ(t)) együtthatóját kell vizsgálni
mozgegy.coeff(φ_t.diff(t)).evalf(5)
```
```python
# Ez az együttható pont 2ζω_n -nel egyenlő
# Az így adódó egyenlet megoldásásval kapjuk
# a kritikus csillapítshoz tartozó c1 értéket
ζ_cr = 1
# itt még nem helyettesítünk be, hanem csakkifejezzük a kritikus csillapítási együtthatót
c1_cr = sp.solve(mozgegy.coeff(φ_t.diff(t))-2*ζ_cr*ω_n_num,c1)[0]
# most már be lehet helyettesíteni
c1_cr_num = c1_cr.subs(adatok)
display(Math('c_{{1,cr}} = {:.6}'.format(sp.latex(c1_cr_num))))
# [1]
```
$\displaystyle c_{1,cr} = 17.104$
## 3. Feladat
A $k_1$ mrevségúrugóban ébredő erő az alábbi alakban adható meg
\begin{equation}
F_{r,1}(t) = F_{r,1st}+k_13l\varphi(t).
\end{equation}
A rugó az egyensúlyi helyzetben feszített állapotban van. A statikus deformáció tehát az egyensúlyi egyenletből határozható meg
\begin{equation}
\sum M_A=0:\hspace{20pt} -F_{r,1st}3l-3mg\frac{3}{2}l=0,
\end{equation}
```python
Fr_1st = sp.symbols("Fr_1st")
Fr_1st_num = (sp.solve(-Fr_1st*3*l-3*m*g*3/2*l,Fr_1st)[0]).subs(adatok)
Fr_1st_num
# [N]
```
A dinamikus rugóerőnek ott lesz a maximuma ahol a legnagyobb a kitérés. Első körben tehát a mozgástörvény kell meghatározni.
```python
kezdeti_ert = {φ_t.subs(t,0): 0.01, φ_t.diff(t).subs(t,0): 0}
display(kezdeti_ert)
mozg_torv = (sp.dsolve(mozgegy.subs(adatok),φ_t,ics=kezdeti_ert)).evalf(6)
mozg_torv
```
Keressük meg a kitérés maximumát numerikus módszerek segítéségvel. Ehhez először célszerű kirajzoltatni a függvényünket. (Analatikus megoldáshoz lásd 4. gyakorlat hasoló példa megoldása!)
```python
import numpy as np
import matplotlib.pyplot as plt
```
```python
# A matplotlib plottere az általunk megadott pontokat fogja egyenes vonalakkal összekötni.
# Elegendően kis lépésközt választva az így kapott görbe simának fog tűnni.
# Állítsuk elő az (x,y) koordinátákat!
t_val = np.linspace(0,0.5,1000) # lista létrehozása a [0 ; 0,5] intervallum 1000 részre való bontásával
φ_val = np.zeros(len(t_val)) # nulla lista létrehozása (ugyanannyi elemszámmal)
# for ciklus segítségével írjuk felül a nulla listában szerplő elemelet az adott x értékhez tartozó y értékekkel
for i in range(len(t_val)):
φ_val[i] = mozg_torv.rhs.subs(t,t_val[i])
# rajzterület létrehozása
plt.figure(figsize=(40/2.54,30/2.54))
# függvény kirajzolása az x és y kordináta értékeket tartalmazó listák megadásásval
plt.plot(t_val,φ_val,color='b',label=r'num_sim')
# tengelyek
axes = plt.gca()
axes.set_xlim([0,t_val[-1]])
axes.set_ylim([-0.01, 0.01])
# rácsozás
plt.grid()
# tengely feliratozás
plt.xlabel(r'$ t [s] $',fontsize=30)
plt.ylabel(r'$ \varphi(t) [rad] $',fontsize=30)
plt.show()
```
A statikus rugóerő negatív előjelű, ezért két szélsőérték helyet is meg kell vizsgálni, az első lokális maximumot és a minimumot. Ezeket az értékekeket könnyen ki tudjuk szedni a korábban meghatározott listából.
```python
lok_max = max(φ_val)
lok_min = min(φ_val)
# Rugóerők meghatározása
Fr_11 = (Fr_1st_num+k1*3*l*φ_t).subs(adatok).subs(φ_t,lok_max).evalf(5)
Fr_12 = (Fr_1st_num+k1*3*l*φ_t).subs(adatok).subs(φ_t,lok_min).evalf(5)
display(Math('F_{{r,1}} = {}'.format(sp.latex(Fr_11))),Math('F_{{r,2}} = {}'.format(sp.latex(Fr_12))))
# [N]
```
$\displaystyle F_{r,1} = 0.0342$
$\displaystyle F_{r,2} = -3.0093$
A 1-es rugóban ébredő maximális erő tehát 3,009 N.
Készítette:
Juhos-Kiss Álmos (Alkalmazott Mechanika Szakosztály)
Takács Dénes (BME MM) kidolgozása és ábrái alapján.
Hibák, javaslatok:
amsz.bme@gmail.com
csuzdi02@gmail.com
almosjuhoskiss@gmail.com
2021.03.07
```python
```
|
9ce05c987664a0d899a7560457253823ba976f6a
| 310,993 |
ipynb
|
Jupyter Notebook
|
otodik_het/.ipynb_checkpoints/gyak_5-checkpoint.ipynb
|
barnabaspiri/RezgestanPython
|
3fcc4374c90d041436c816d26ded63af95b44103
|
[
"MIT"
] | null | null | null |
otodik_het/.ipynb_checkpoints/gyak_5-checkpoint.ipynb
|
barnabaspiri/RezgestanPython
|
3fcc4374c90d041436c816d26ded63af95b44103
|
[
"MIT"
] | null | null | null |
otodik_het/.ipynb_checkpoints/gyak_5-checkpoint.ipynb
|
barnabaspiri/RezgestanPython
|
3fcc4374c90d041436c816d26ded63af95b44103
|
[
"MIT"
] | null | null | null | 457.342647 | 127,912 | 0.937092 | true | 3,967 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.826712 | 0.749087 | 0.619279 |
__label__hun_Latn
| 0.999918 | 0.277124 |
```python
%pylab inline
import numpy as np
import pandas as pd
import sympy as sp
```
Populating the interactive namespace from numpy and matplotlib
```python
font = {'size' : 14}
matplotlib.rc('font', **font)
sp.init_printing()
```
# Approximate integration methods
Recall that the definite integral is defined as a limit of Riemann sums, so any Riemann sum could be used as an approximation to the integral: If we divide $[a,b]$ into $n$ subintervals of equal length $\Delta x = (b-a)/n$, then we have
$$ \int_a^b f(x)\ dx \approx \sum\limits_{i=1}^n f(x_i^*) \Delta x$$
where $x_i^*$ is any point in the $i$th subinterval $[x_{i-1}, x_i]$.
## Left endpoint approximation
If $x_i^*$ is chosen to be the left-endpoint of the interval, then
$$ \tag{1} \int_a^b f(x)\ dx \approx L_n = \sum\limits_{i=1}^n f(x_{i-1}^*) \Delta x$$
This is the implementation of equation $(1)$.
```python
def L(f, a, b, n):
dx = (b - a) / n
X = np.linspace(a, b - dx, n)
return sum([f(x)*dx for x in X])
```
## Right endpoint approximation
If we choose $x_i^*$ to be the right endpoint, then $x_i^*=x_i$ and we have
$$ \tag{2} \int_a^b f(x)\ dx \approx R_n = \sum\limits_{i=1}^n f(x_{i}^*) \Delta x$$
The approximations $L_n$ and $R_n$ are called the **left endpoint approximation** and **right endpoint approximation** respectively. Next is the implementation of equation $(2)$.
```python
def R(f, a, b, n):
dx = (b - a) / n
X = np.linspace(a, b - dx, n) + dx
return sum([f(x)*dx for x in X])
```
## Midpoint rule
Let's look at the midpoint approximation $M_n$, which appears to be better than either $L_n$ or $R_n$.
$$ \tag{3} \int_a^b f(x)\ dx \approx M_n = \Delta x[f(\bar{x}_1), f(\bar{x}_2), \ldots, f(\bar{x}_n)] $$ where $$\Delta x = \frac{b-a}{n}$$ and $$\bar{x}_i = \frac{1}{2}(x_{i-1} + x_i) $$ is the midpoint of $[x_{i-1}, x_i]$. Equation $(3)$ also has an implementation.
```python
def M(f, a, b, n):
dx = (b - a) / n
X = np.linspace(a, b - dx, n) + dx / 2
return sum([f(x)*dx for x in X])
```
## Trapezoidal rule
Another approximation, called the Trapezoidal Rule, results from averaging the appromations in equations 1 and 2.
$$ \int_a^b f(x)\ dx \approx \dfrac{1}{2}\left[ \sum\limits_{i=1}^n f(x_{i-1})\Delta x + \sum\limits_{i=1}^n f(x_{i})\Delta x \right] = \dfrac{\Delta x}{2} \sum\limits_{i=1}^n \left( f(x_{i-1})+f(x_{i})\right) $$
```python
def T(f, a, b, n):
dx = (b - a) / n
X = np.linspace(a, b - dx, n)
return sum( [f(x) + f(x+dx) for x in X] ) * dx / 2
```
It can also be defined as:
$$ \tag{4} \int_a^b f(x)\ dx \approx T_n = \dfrac{\Delta x}{2} \left[ f(x_0) + 2f(x_1) + 2f(x_2) + \ldots + 2f(x_{n-1}) + f(x_n)\right]$$
where $\Delta x = (b-a)/n$ and $x_i=a+i\Delta x$.
# Approximations
Suppose we have the following integral:
$$\tag{Example 1}\int^2_1 \dfrac{1}{x}\ dx = \begin{bmatrix} \ln\ |\ x\ | \end{bmatrix}^2_1 = \ln 2 - \ln 1 = \ln 2 \approx 0.693147.$$
It is an easy integral, because we want to check the error between the approximations methods and the exact value. Estimating the integral with each of the methods gives:
```python
f=lambda x: 1/x
a=1; b=2
N=[]; Ln=[]; Rn=[]; Mn=[]; Tn=[];
for n in [5, 10, 15, 20, 25]:
N.append(n)
Ln.append(L(f, a, b, n))
Rn.append(R(f, a, b, n))
Mn.append(M(f, a, b, n))
Tn.append(T(f, a, b, n))
```
Now we add everything to a datatable and calculate the error between the exact value of the integral and each method.
```python
df=pd.DataFrame()
df['n'] = N
df['ln 2'] = [math.log(2)] * len(N)
df['Ln'] = Ln
df['Rn'] = Rn
df['Mn'] = Mn
df['Tn'] = Tn
df['L_error'] = math.log(2) - df['Ln']
df['R_error'] = math.log(2) - df['Rn']
df['M_error'] = math.log(2) - df['Mn']
df['T_error'] = math.log(2) - df['Tn']
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>n</th>
<th>ln 2</th>
<th>Ln</th>
<th>Rn</th>
<th>Mn</th>
<th>Tn</th>
<th>L_error</th>
<th>R_error</th>
<th>M_error</th>
<th>T_error</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>5</td>
<td>0.693147</td>
<td>0.745635</td>
<td>0.645635</td>
<td>0.691908</td>
<td>0.695635</td>
<td>-0.052488</td>
<td>0.047512</td>
<td>0.001239</td>
<td>-0.002488</td>
</tr>
<tr>
<th>1</th>
<td>10</td>
<td>0.693147</td>
<td>0.718771</td>
<td>0.668771</td>
<td>0.692835</td>
<td>0.693771</td>
<td>-0.025624</td>
<td>0.024376</td>
<td>0.000312</td>
<td>-0.000624</td>
</tr>
<tr>
<th>2</th>
<td>15</td>
<td>0.693147</td>
<td>0.710091</td>
<td>0.676758</td>
<td>0.693008</td>
<td>0.693425</td>
<td>-0.016944</td>
<td>0.016389</td>
<td>0.000139</td>
<td>-0.000278</td>
</tr>
<tr>
<th>3</th>
<td>20</td>
<td>0.693147</td>
<td>0.705803</td>
<td>0.680803</td>
<td>0.693069</td>
<td>0.693303</td>
<td>-0.012656</td>
<td>0.012344</td>
<td>0.000078</td>
<td>-0.000156</td>
</tr>
<tr>
<th>4</th>
<td>25</td>
<td>0.693147</td>
<td>0.703247</td>
<td>0.683247</td>
<td>0.693097</td>
<td>0.693247</td>
<td>-0.010100</td>
<td>0.009900</td>
<td>0.000050</td>
<td>-0.000100</td>
</tr>
</tbody>
</table>
</div>
```python
f, ((ax1, ax2, ax3, ax4)) = plt.subplots(1,4, sharex='col', sharey='row'
, figsize=(14,6))
cols = ['L_error', 'R_error', 'M_error', 'T_error']
titles = ['Left endpoint', 'Right endpoint', 'Midpoint', 'Trapezoidal']
axes = [ax1, ax2, ax3, ax4]
for i in range(len(cols)):
ax = axes[i]
ax.bar(df['n'], df[cols[i]], width=3, alpha=0.33, fc='b')
ax.set_title(titles[i])
ax.set_ylabel('Error')
ax.set_xlabel('$n$ approximations')
ax.axhline(0, c='black', lw=1, ls='dashed')
ax.grid(ls='dashed', alpha=0.5)
```
We can make several observations from these tables:
1. In all methods we get more accurate approximations when we increase the value of $n$. However, very large numbers of $n$ result in so many arithmetic operations that we have to beware of accumulated round-off error.
2. The errors in the left and right endpoint approximations are opposite in sign and appear to decrease by a factor of about 2 when we double the value of $n$.
3. The Trapezoidal and Midpoint Rules are much more accurate that the endpoint approximations.
4. The errors in the Trapezoidal and Midpoint Rules are opposite in sign and appear to decrease by a factor of about 4 when we double the value of $n$.
5. The size of error in the Midpoint Rule is about half the size of the error in the Trapezoidal Rule.
# Error bounds
Suppose $|\ f''(x)\ | \leq K$ for $a \leq x \leq b$. If $E_t$ and $E_m$ are the errors in the Trapezoidal and Midpoint Rules, then $$\tag{5} |\ E_t\ | \leq \dfrac{K(b-a)^3}{12n^2} \quad \text{and} \quad |\ E_m\ | \leq \dfrac{K(b-a)^3}{24n^2}.$$ Let's apply this error estimate to the Trapezoidal Rule approximation in Example 1 equation. First we find $f''(x)$:
```python
x = sp.symbols('x')
f = 1/x
sp.diff(sp.diff(f))
```
Because $1 \leq x \leq 2$, we have $1/x \leq 1$, so
$$ |\ f''(x)\ | = \left|\dfrac{2}{x^3}\right| \leq \dfrac{2}{1^3}=2 $$
Therefore, taking $K=2,a=1,b=2$, and $n=5$ in the error estimate equation 5, we see that
$$ |\ E_r |\ \leq \dfrac{2(2-1)^3}{12(5)^2}=\dfrac{1}{150}\approx0.006667 $$
**Guaranteed error** How large should we take $n$ in order to guarantee that the Trapezoidal and Midpoint Rule approximations for $\int_1^2(1/x)\ dx$ are accurate to within $0.0001$?
We saw in the preceding calculation that $|\ f(x)\ | \leq 2$ for $1\leq x\leq 2$, so we take $K=2, a=1$, and $b=2$ in equation 5. Accuracy to within $0.0001$ means that the size of the error should be less than $0.0001$. Therefore we choose $n$ so that $$ \dfrac{2(1)^3}{12n^2}\leq0.0001.$$
Solving the inequality for $n$, we get $$ n \gt \dfrac{1}{\sqrt{0.0006}}\approx 40.8 $$ Thus $n=41$ will ensure the desired accuracy.
# Simpson's rule
...
```python
from sympy.integrals.manualintegrate import integral_steps
```
```python
x = sp.symbols('x')
integral_steps(x*sp.sin(3*x), x)
```
PartsRule(u=x, dv=sin(3*x), v_step=URule(u_var=_u, u_func=3*x, constant=1/3, substep=ConstantTimesRule(constant=1/3, other=sin(_u), substep=TrigRule(func='sin', arg=_u, context=sin(_u), symbol=_u), context=sin(_u), symbol=_u), context=sin(3*x), symbol=x), second_step=ConstantTimesRule(constant=-1/3, other=cos(3*x), substep=URule(u_var=_u, u_func=3*x, constant=1/3, substep=ConstantTimesRule(constant=1/3, other=cos(_u), substep=TrigRule(func='cos', arg=_u, context=cos(_u), symbol=_u), context=cos(_u), symbol=_u), context=cos(3*x), symbol=x), context=-cos(3*x)/3, symbol=x), context=x*sin(3*x), symbol=x)
```python
```
|
f0f781cff1fa356c39a06b304720557e8a48469b
| 45,724 |
ipynb
|
Jupyter Notebook
|
Notebooks/Approximate integration.ipynb
|
darkeclipz/jupyter-notebooks
|
5de784244ad9db12cfacbbec3053b11f10456d7e
|
[
"Unlicense"
] | 1 |
2018-08-28T12:16:12.000Z
|
2018-08-28T12:16:12.000Z
|
Notebooks/Approximate integration.ipynb
|
darkeclipz/jupyter-notebooks
|
5de784244ad9db12cfacbbec3053b11f10456d7e
|
[
"Unlicense"
] | null | null | null |
Notebooks/Approximate integration.ipynb
|
darkeclipz/jupyter-notebooks
|
5de784244ad9db12cfacbbec3053b11f10456d7e
|
[
"Unlicense"
] | null | null | null | 80.358524 | 28,402 | 0.779263 | true | 3,382 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.908618 | 0.888759 | 0.807542 |
__label__eng_Latn
| 0.838423 | 0.714524 |
# Harmonic oscillator
```python
import sympy
sympy.init_printing()
from IPython.display import display
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, './code')
from gauss_legendre import gauss_legendre
from evaluate_functional import evaluate_functional
```
```python
# state vector (symbolic)
q = sympy.Symbol('q')
p = sympy.Symbol('p')
x = [q, p]
# structure matrix
J = sympy.Matrix([[0, 1],
[-1, 0]])
# physical parameters of Hamiltonian function
# (system mass and spring stiffness)
m = sympy.Symbol('m')
k = sympy.Symbol('k')
params = {
m: 0.1,
k: 3.0,
}
# Hamiltonian function
H = sympy.Rational(1,2) * p * 1/m * p + sympy.Rational(1, 2) * q * k * q
# storage efforts
dH = sympy.Matrix([H.diff(x_i) for x_i in x])
# time evolution
xdot = J @ dH
# initial condition
x_0 = numpy.array([numpy.pi/6, 0.0])
# duration of simulation in seconds
t_f = 30.0
# time step in seconds
dt = 5e-2
# order of collocation polynomial
s = 2
```
```python
# run the simulation
%time time, solution = gauss_legendre(x, xdot, x_0, t_f, dt, s=s, params=params)
```
CPU times: user 212 ms, sys: 3.56 ms, total: 215 ms
Wall time: 215 ms
```python
fig, ax = plt.subplots(nrows=2, dpi=200)
fig.tight_layout(pad=1.5)
# upper plot shows q over t
ax[0].set_title("q")
ax[0].plot(time, solution[:, 0])
# plot analytical solution for q in dashed
ω = numpy.sqrt(params[k] / params[m])
ax[0].plot(time, solution[0,0]*numpy.cos(time*ω), '--')
# lower plot shows p over t
ax[1].set_title("p")
ax[1].plot(time, solution[:, 1])
ax[1].set_xlabel("time");
```
```python
# relative error of energy conservation
energy = evaluate_functional(x, H, solution, params=params)
abs(energy[0] - energy[-1]) / energy[0]
```
|
2132301ae00f2ba9adf6aefcb0bf909c194b8f68
| 272,054 |
ipynb
|
Jupyter Notebook
|
harmonic_oscillator.ipynb
|
MarkusLohmayer/master-thesis-code
|
b107d1b582064daf9ad4414e1c9f332ef0be8660
|
[
"MIT"
] | 1 |
2020-11-14T15:56:07.000Z
|
2020-11-14T15:56:07.000Z
|
harmonic_oscillator.ipynb
|
MarkusLohmayer/master-thesis-code
|
b107d1b582064daf9ad4414e1c9f332ef0be8660
|
[
"MIT"
] | null | null | null |
harmonic_oscillator.ipynb
|
MarkusLohmayer/master-thesis-code
|
b107d1b582064daf9ad4414e1c9f332ef0be8660
|
[
"MIT"
] | null | null | null | 1,519.854749 | 266,584 | 0.960817 | true | 563 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.931463 | 0.831143 | 0.774179 |
__label__eng_Latn
| 0.668488 | 0.637009 |
# Combination - Passwords & Students
> This document is written in *R*.
>
> ***GitHub***: https://github.com/czs108
## Question A
> If passwords can consist of **6** *letters*, find the probability that a randomly chosen password will *not* have any *repeated* letters.
\begin{equation}
P = \frac{26 \times 25 \times \cdots \times 21}{26^{6}}
\end{equation}
```R
norpt <- 0
count <- 100000
for (i in c(1:count)) {
pwd <- sample(x=letters, size=6, replace=TRUE)
if (length(unique(pwd)) == 6) {
norpt <- norpt + 1
}
}
norpt / count
```
0.53777
## Question B
> How many ways can you get a sample of **6** letters *without* *repeated* letters, if the order does *not* matter?
\begin{equation}
^{26}C_6 = \frac{26!}{6! \times 20!}
\end{equation}
```R
choose(n=26, k=6)
```
230230
## Question C
> Use the `sample` command to simulate tossing a coin **10000** times. You can use a `for` loop and record the result of each toss. Then you can the use `table` command to find how often you got *heads* or *tails*.
```R
res <- sample(x=c("Head", "Tail"), size=10000, replace=TRUE)
prop.table(table(res))
```
res
Head Tail
0.5015 0.4985
## Question D
> If a class contains **60** *females* and **40** *males* and you choose a random sample of **5** students from the class, what is the probability of getting **5** *females*?
\begin{equation}
P = \frac{^{60}C_5}{^{100}C_{5}}
\end{equation}
```R
choose(n=60, k=5) / choose(n=100, k=5)
```
0.0725420627482483
Check the *Hypergeometric Distribution*.
```R
dhyper(x=c(0:5), m=60, n=40, k=5)
```
<ol class=list-inline>
<li>0.00873993458676817</li>
<li>0.072832788223068</li>
<li>0.232277540819514</li>
<li>0.354528878092943</li>
<li>0.259078795529458</li>
<li>0.0725420627482483</li>
</ol>
## Question E
> Use the `sample` command to simulate the situation in *Question D*. Repeat the sample **10000** times. How often do you get **5** *females*?
```R
students <- c(rep("Male", 40), rep("Female", 60))
noman <- 0
count <- 10000
for (i in c(1:count)) {
group <- sample(x=students, size=5)
if (group[1] == "Female" && length(unique(group)) == 1) {
noman <- noman + 1
}
}
noman / count
```
0.0771
|
351dc578b236a2d8657c6e3e7373b9c704ee3c85
| 6,639 |
ipynb
|
Jupyter Notebook
|
exercises/Combination - Passwords & Students.ipynb
|
czs108/Probability-Theory-Exercises
|
60c6546db1e7f075b311d1e59b0afc3a13d93229
|
[
"MIT"
] | null | null | null |
exercises/Combination - Passwords & Students.ipynb
|
czs108/Probability-Theory-Exercises
|
60c6546db1e7f075b311d1e59b0afc3a13d93229
|
[
"MIT"
] | null | null | null |
exercises/Combination - Passwords & Students.ipynb
|
czs108/Probability-Theory-Exercises
|
60c6546db1e7f075b311d1e59b0afc3a13d93229
|
[
"MIT"
] | 1 |
2022-03-21T05:04:07.000Z
|
2022-03-21T05:04:07.000Z
| 21.143312 | 220 | 0.463775 | true | 764 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.944177 | 0.91118 | 0.860315 |
__label__eng_Latn
| 0.894813 | 0.837133 |
# Método dos Mínimos Quadrados (MMQ)
## License
All content can be freely used and adapted under the terms of the
[Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/).
## Imports
Coloque **todos** os `import` na célula abaixo. Não se esqueça do `%matplotlib inline` para que os gráficos apareçam no notebook.
```python
import numpy as np
```
## IMPORTANTE
Agora que vocês sabem técnicas de programação defensiva, eu espero que todo o código que vocês fizerem abaixo utilizem essas técnicas. Crie docstrings para suas funções, cheque as entradas (quando for possível) e cheque as saídas. **Não esqueçam dos comentários**.
## Fabricando dados para teste
Para saber se nosso código está funcionando, precisamos fabricar alguns dados com parâmetros conhecidos. Vamos gerar dados que seguem a equação da reta:
$$
d_i = a x_i + b
$$
**IMPORTANTE**: Eu vou usar a biblioteca numpy as gerar os dados abaixo.
Vocês **não podem** utilizar o numpy para calcular a sua solução.
Uso do numpy deve ser conquistado com ~~sangue~~esforço.
O código abaixo serve como exemplo do que vocês poderão fazer ao utilizar o Python no seu trabalho (fora da aula).
```python
a = 10
b = 50
N = 50
# Vou utilizar a função linspace do numpy para facilitar a vida
# Essa função cria N valores igualmente espaçados entre dois números (5 e 50)
x = np.linspace(5, 50, N)
# Agora podemos usar os valores de x, a e b acima para simular dados observados
dados_obs = a*x + b
# Vamos adicionar erro aleatório aos dados para ficar mais interessante
# O erro seguirá uma distribuição normal com os seguintes parâmetros
media_erro = 0
std_erro = 20
# A linha abaixo faz com que os valores aleatórios não sejam verdadeiramente aleatórios
# veja https://en.wikipedia.org/wiki/Pseudorandom_number_generator
np.random.seed(42)
# Gera a lista de numéros aleatórios
erro = np.random.normal(loc=media_erro, scale=std_erro, size=len(dados_obs))
# Agora podemos adicionar o erro aos dados observados
dados_obs += erro
```
Utilize a célula abaixo para gerar um gráfico de círculos pretos (`ok`) de seus dados.
```python
```
## Forma matricial da equação da reta e a matriz Jacobiana
Temos uma equação da reta para cada valor de $x_i$:
$$
\begin{align}
d_1 &= ax_1 + b \\
d_2 &= ax_2 + b \\
\vdots \\
d_N &= ax_N + b \\
\end{align}
$$
Esse sistema pode ser escrito de forma matricial com os parâmetros sendo $a$ e $b$:
$$
\begin{bmatrix}
d_1 \\ d_2 \\ \vdots \\ d_N
\end{bmatrix} =
\begin{bmatrix}
x_1 & 1 \\
x_2 & 1 \\
\vdots & \vdots \\
x_N & 1
\end{bmatrix}
\begin{bmatrix}
a \\ b
\end{bmatrix}
$$
$$
\bar{d} = \bar{\bar{A}}\bar{p}
$$
## Tarefa
Faça uma função chamada `jacobiana` que calcule e retorne a matrix Jacobiana ($\bar{\bar{A}}$).
**Para pensar**: o que essa função deve receber como argumento? (**Dica**: ela só precisa de 1)
```python
```
```python
```
```python
```
### Resultado esperado
A célula abaixo testa a sua Jacobiana contra uma produzida pelo numpy.
```python
assert np.allclose(jacobiana(x), np.transpose([x, np.ones_like(x)]))
```
```python
```
```python
```
## Tarefa
Calcule dados preditos para o vetor de parâmetros definido abaixo **utilizando a forma matricial da equação**. Guarde o resultado em uma variável chamada `preditos`.
Faça um gráfico dos dados observados (gerados acima) como pontos pretos e os dados preditos que você calculou como uma linha vermelha.
**Dica**: utilize as funções que você criou na aula passada.
```python
p = [5, 15]
```
```python
```
```python
```
```python
```
### Resultado esperado
A célula abaixo testa seus resultados contra um calculado com o numpy.
```python
assert np.allclose(preditos, np.dot(jacobiana(x), p))
```
```python
```
```python
```
O gráfico deve ser parecido com o abaixo:
## Sistema de equações normais
A solução de mínimos quadrados é o vetor $\bar{p}$ que resolve o sistema linear abaixo (chamado de sistema de equações normais):
$$
\bar{\bar{A}}^T\bar{\bar{A}}\bar{p} = \bar{\bar{A}}^T\bar{d}^o
$$
Para resolver esse sistema, precisamos primeiramente calcular a matriz do sistema $\bar{\bar{A}}^T\bar{\bar{A}}$ e o vetor do lado direito $\bar{\bar{A}}^T\bar{d}^o$.
## Tarefa
Faça uma função chamada `eqnormais_sistema` que calcule e retorne a matriz $\bar{\bar{A}}^T\bar{\bar{A}}$ dada a matriz Jacobiana.
Utilize as funções criadas na aula anterior.
**Dica**: É possível saber quantas linhas e colunas o sistema deve conter. Cheque se o seu resulatdo possui esse número.
```python
```
```python
```
```python
```
### Resultado esperado
A célula abaixo testa seus resultados contra um calculado com o numpy.
```python
assert np.allclose(eqnormais_sistema(jacobiana(x)), np.transpose(jacobiana(x)).dot(jacobiana(x)))
```
```python
```
```python
```
## Tarefa
Faça uma função chamada `eqnormais_lado_direito` que calcule e retorne o vetor do lado direito do sistema de equações normais.
**Dicas**:
* Essa função deve receber 2 argumentos.
* O que essa função deve retornar é um vetor ou matriz?
* É possível saber o número de elementos que o resultado deve conter. Cheque esse número.
```python
```
```python
```
```python
```
### Resultado esperado
A célula abaixo testa seus resultados contra um calculado com o numpy.
```python
assert np.allclose(eqnormais_lado_direito(jacobiana(x), dados_obs), np.transpose(jacobiana(x)).dot(dados_obs))
```
```python
```
```python
```
## Solução de mínimos quadrados
Agora que temos o sistema de equações normais, podemos resolvê-lo numericamente para encontrar os valores de $a$ e $b$ que produzem a reta que melhor ajusta nossos dados.
## Tarefa
Faça uma função chamada `elim_gauss` que resolve um sistema de equações utilizando a eliminação de Gauss. Essa função deve receber como argumento a matriz do sistema e o vetor lado direito e retornar o vetor de solução.
**Dicas**:
* Cheque o número de elementos na matriz e no vetor.
* A matriz deve ser quadrada.
```python
```
```python
```
```python
```
### Resultado esperado
A célula abaixo testa seus resultados contra um calculado com o numpy.
```python
np.random.seed(42)
A_teste = np.random.uniform(10, 50, size=(21, 21))
x_teste = np.random.uniform(5, 20, size=21)
y_teste = A_teste.dot(x_teste)
assert np.allclose(elim_gauss(A_teste, y_teste), x_teste)
```
```python
```
```python
```
## Tarefa
Faça uma função `ajuste_reta` que recebe um vetor de valores de x e um de dados observados e retorna a solução de minimos quadrados $\bar{p}$ (vetor com os valores de $a$ e $b$ estimados).
Aplique essa função nos dados observados simulados acima. Cheque se a solução bate com o valor esperado (você pode fazer isso usando um `assert`).
Faça um gráfico dos dados observados (pontos pretos) pelos dados preditos pela solução que você obteve agora (linha vermelha). O gráfico deve conter uma legenda. A legenda para os dados preditos deve ser da forma "y = 234x + 244" (trocando os números pelos valores que você estimou).
**Dica**:r
* Quantos elementos deve ter o vetor retornado?
* Para inserir números em uma string (texto): `"y = {}".format(123.1)"` $\to$ `"y = 123.1"`
* Para formatar os números que você quer inserir numa string: `"y = {:.4f}".format(123.242524536362446353436335)` $\to$ `"y = 123.2425"`
```python
```
```python
```
```python
```
### Resultado esperado
Os valores estimados para $\bar{p}$ devem ser aproximadamente:
[9.742296, 52.57738183]
O gráfico deve ser parecido com o abaixo:
## Tarefa Bônus
Podemos utilizar o método dos mínimos quadrados para ajustar qualquer equação que seja linear com relação as parâmetros ($a$ e $b$ no caso da reta). Isso quer dizer que podemos ajustar uma parábola:
$$
d_i = ax_i^2 + bx + c
$$
Dessa vez, os parâmetros que queremos estimar são $a$, $b$ e $c$. Note que agora temos 3 parâmetros, não 2. Por isso, a Jacobiana terá 3 colunas ao invés de 2.
Faça ao menos as seguintes funções:
* `jacobiana_parabola`: calcula e retorna a matriz Jacobiana para o caso da parábola. Deve receber como argumento somente o vetor de coordenadas x.
* `ajuste_parabola`: calcula a solução de mínimos quadrados para o caso de uma parábola. Deve receber como argumento o vetor de coordenadas x e o vetor de dados. Deve retornar o vetor de parâmetros $\bar{p}$ estimado (contem os valores de $a$, $b$ e $c$)
Teste suas funções com os dados gerados abaixo. Note que estamos usando o mesmo vetor x. Gere gráficos dos dados fabricados e também dos dados preditos pela estimativa (como os que foram feitos acima).
O que acontece se você tentar ajustar uma reta aos dados da parábola? E se tentar ajustar uma parábola aos dados da reta?
**Dicas**:
* Você precisa criar outras funções para montar o sistema de equações normais e calcular a solução do sistema?
```python
a_par, b_par, c_par = 2, 20, 200
dados_parabola = a_par*x**2 + b_par*x + c_par + erro
```
```python
```
```python
```
```python
```
```python
```
### Resultado esperado
Os gráficos que você deve gerar deverão ser parecidos com os abaixo:
|
27808b3f638c1333ad3e1140884d9bd5369f0acb
| 18,806 |
ipynb
|
Jupyter Notebook
|
minimos-quadrados.ipynb
|
mat-esp-uerj/minimos-quadrados-leovsf
|
39f283e91ef192658507b2e825b942a4f27ef990
|
[
"CC-BY-4.0"
] | null | null | null |
minimos-quadrados.ipynb
|
mat-esp-uerj/minimos-quadrados-leovsf
|
39f283e91ef192658507b2e825b942a4f27ef990
|
[
"CC-BY-4.0"
] | 1 |
2016-01-14T01:13:53.000Z
|
2016-01-14T13:08:06.000Z
|
minimos-quadrados.ipynb
|
mat-esp-uerj/minimos-quadrados-leovsf
|
39f283e91ef192658507b2e825b942a4f27ef990
|
[
"CC-BY-4.0"
] | 1 |
2021-05-22T02:37:26.000Z
|
2021-05-22T02:37:26.000Z
| 24.423377 | 294 | 0.557748 | true | 2,705 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.699254 | 0.912436 | 0.638025 |
__label__por_Latn
| 0.999403 | 0.320677 |
```python
#import krotov
#import qutip as qt# NBVAL_IGNORE_OUTPUT
#import qutip
#import numpy as np
#import scipy
#import matplotlib
#import matplotlib.pylab as plt
#import krotov
import numpy as np
import sympy as sp
from sympy import Function,Symbol,symbols,zeros,Matrix,sqrt,simplify,solve,diff,dsolve,lambdify
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from sympy.physics.quantum.dagger import Dagger
```
```python
t=Symbol('t')
gamma=Symbol('gamma',Real=True,positive=True)
Nb=Symbol('N_b',Real=True,positive=True)
x = Function('x')(t)
c= Function('c')(t)
x1 = Function('x1')(t)
x2 = Function('x2')(t)
p1= Function('p1')(t)
p2 = Function('p2')(t)
e=Function('e')(t)
mu,g=symbols('mu g')
x11,x12,p11,p12,e1,de1,c1=symbols('x1 x2 p1 p2 e de c')
de=diff(e,t)
F=zeros(2,2)
F[0,1]=1
F[1,0]=-1
B=zeros(2,2)
B[0,0]=1
B[1,1]=-1
#A=np.block([
# [g*B+F, Matrix([[0,0],[0,0]])],
# [Matrix([[0,0],[0,0]]), g*B +F ]
# ])
Omega=sp.Matrix([[0,0,0,0,0],[0,0,0,1,0],[0,0,0,0,1],[0,-1,0,0,0],[0,0,-1,0,0]])
G=sp.Matrix([[0,0,0,0,0],[0,1+2*mu*e,g,0,0],[0,g,1,0,0],[0,0,0,1-2*mu*e,g],[0,0,0,g,1]])
C=sp.Matrix([[0,0,0,0,0],[0,sp.sqrt(gamma*(Nb+1)/2),0,1j*sp.sqrt(gamma*(Nb+1)/2),0],[0,sp.sqrt(gamma*(Nb)/2),0,-1j*sp.sqrt(gamma*(Nb)/2),0],[0,0,0,0,0],[0,0,0,0,0]])
A=Omega*G+Omega*((Dagger(C)*C).as_real_imag()[1])
D=Omega*((Dagger(C)*C).as_real_imag()[0])*Omega.T
d=[]
d.append(c)
d.append(0)
d.append(0)
d.append(0)
d.append(0)
v=[]
v.append(c)
v.append(x1)
v.append(x2)
v.append(p1)
v.append(p2)
```
```python
def to_symbols(expr):
return expr.subs({x1:x11,x2:x12,p1:p11,p2:p12,e:e1,diff(e,t):de1,c:c1})
```
```python
v=Matrix(v)
to_symbols(v)
```
$\displaystyle \left[\begin{matrix}c\\x_{1}\\x_{2}\\p_{1}\\p_{2}\end{matrix}\right]$
```python
r=simplify(diff(v,t)-Matrix(A)*Matrix(v)-Matrix(Omega)*Matrix(d))
r
```
$\displaystyle \left[\begin{matrix}\frac{d}{d t} c{\left(t \right)}\\- g \operatorname{p_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{x_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{p_{1}}{\left(t \right)} + \frac{d}{d t} \operatorname{x_{1}}{\left(t \right)}\\- g \operatorname{p_{1}}{\left(t \right)} - \operatorname{p_{2}}{\left(t \right)} + \frac{d}{d t} \operatorname{x_{2}}{\left(t \right)}\\g \operatorname{x_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{p_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{x_{1}}{\left(t \right)} + \frac{d}{d t} \operatorname{p_{1}}{\left(t \right)}\\g \operatorname{x_{1}}{\left(t \right)} + \operatorname{x_{2}}{\left(t \right)} + \frac{d}{d t} \operatorname{p_{2}}{\left(t \right)}\end{matrix}\right]$
```python
vector=diff(Matrix([c,x1,x2,p1,p2]),t)
```
```python
import sympy
#eqs=list(to_symbols(Matrix(A)*Matrix(v)))
#t=sympy.linear_eq_to_matrix(eqs, [x11,x12,p11,p12,c1])[0]
```
```python
#t2=sympy.linear_eq_to_matrix(eqs, [x11,x12,p11,p12,c1])[1]
```
```python
#t*Matrix([x11,x12,p11,p12,c1])
```
```python
sol=simplify(r-diff(Matrix([c,x1,x2,p1,p2]),t))
sol
```
$\displaystyle \left[\begin{matrix}0\\- g \operatorname{p_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{x_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{p_{1}}{\left(t \right)}\\- g \operatorname{p_{1}}{\left(t \right)} - \operatorname{p_{2}}{\left(t \right)}\\g \operatorname{x_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{p_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{x_{1}}{\left(t \right)}\\g \operatorname{x_{1}}{\left(t \right)} + \operatorname{x_{2}}{\left(t \right)}\end{matrix}\right]$
```python
import sympy
eqs=list(to_symbols(sol))
t=sympy.linear_eq_to_matrix(eqs, [c1,x11,x12,p11,p12])[0]
t2=sympy.linear_eq_to_matrix(eqs, [c1,x11,x12,p11,p12])[1]
```
```python
t*Matrix([c1,x11,x12,p11,p12])+vector
```
$\displaystyle \left[\begin{matrix}\frac{d}{d t} c{\left(t \right)}\\- g p_{2} + 0.5 \gamma x_{1} + p_{1} \left(2 e \mu - 1\right) + \frac{d}{d t} \operatorname{x_{1}}{\left(t \right)}\\- g p_{1} - p_{2} + \frac{d}{d t} \operatorname{x_{2}}{\left(t \right)}\\g x_{2} + 0.5 \gamma p_{1} + x_{1} \left(2 e \mu + 1\right) + \frac{d}{d t} \operatorname{p_{1}}{\left(t \right)}\\g x_{1} + x_{2} + \frac{d}{d t} \operatorname{p_{2}}{\left(t \right)}\end{matrix}\right]$
```python
def to_func(expr):
return expr.subs({x11:x1,x12:x2,p11:p1,p12:p2,e1:e,de1:de,c1:c})
```
```python
new_eqs=to_func(t*Matrix([c1,x11,x12,p11,p12])+vector)
new_eqs
```
$\displaystyle \left[\begin{matrix}\frac{d}{d t} c{\left(t \right)}\\- g \operatorname{p_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{x_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{p_{1}}{\left(t \right)} + \frac{d}{d t} \operatorname{x_{1}}{\left(t \right)}\\- g \operatorname{p_{1}}{\left(t \right)} - \operatorname{p_{2}}{\left(t \right)} + \frac{d}{d t} \operatorname{x_{2}}{\left(t \right)}\\g \operatorname{x_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{p_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{x_{1}}{\left(t \right)} + \frac{d}{d t} \operatorname{p_{1}}{\left(t \right)}\\g \operatorname{x_{1}}{\left(t \right)} + \operatorname{x_{2}}{\left(t \right)} + \frac{d}{d t} \operatorname{p_{2}}{\left(t \right)}\end{matrix}\right]$
```python
#field=np.linspace(0,100,1000)
lista=np.random.rand(5)
time,dt=np.linspace(1,10,5,retstep=True)
def field_discrete(lista,t,dt):
return lista[int(t/dt)]
def derivative(lista,t,dt):
return np.array([(lista[j]-lista[j-1])/(dt) if (j!=0)&(j!=len(lista)) else lista[j] for j in range(0,len(lista))])[int(t/dt)]
```
```python
lista
```
array([0.18773669, 0.66300483, 0.99995879, 0.72723594, 0.38313259])
```python
derivative(lista,time[1],dt)
```
0.21123028467962757
```python
(lista[1]-lista[0])/dt
```
0.21123028467962757
```python
field_discrete(lista,10,dt)
```
0.38313258597515254
```python
#sol=solve([r[0],r[1],r[2],r[3]],diff(x1,t),diff(x2,t),diff(p1,t),diff(p2,t))
#sol
```
```python
#d1=simplify(sol[diff(x1,t)])
#d2=simplify(sol[diff(x2,t)])
#d3=simplify(sol[diff(p1,t)])
#d4=simplify(sol[diff(p2,t)])
```
```python
#x1_diff(x1,x2,p1,p2,g,diff(e,t),e,mu)
```
```python
#p1_diff(x1,x2,p1,p2,g,diff(e,t),e,mu)
```
```python
#p2_diff(x1,x2,p1,p2,g,diff(e,t),e,mu)
```
```python
#x2_diff(x1,x2,p1,p2,g,diff(e,t),e,mu)
```
```python
#x1_diff=lambdify((x1,x2,p1,p2,g,diff(e,t),e,mu),d1)
#x2_diff=lambdify((x1,x2,p1,p2,g,diff(e,t),e,mu),d2)
#p1_diff=lambdify((x1,x2,p1,p2,g,diff(e,t),e,mu),d3)
#p2_diff=lambdify((x1,x2,p1,p2,g,diff(e,t),e,mu),d4)
```
```python
#x1_diff(x1,x2,g,diff(e,t),e,mu)
##def x1_diff(x1,x2,g,rec,e,mu):
# return x1**2 #x1*x2*g*rec*e*mu
#def x2_diff(x1,x2,g,rec,e,mu):
# return x2*g*rec**2 *e*mu#(x1)**2 + x2 *g*rec*e*mu
```
```python
#x2_diff(x1,x2,g,diff(e,t),e,mu)
```
```python
def vectorfield( t,w, p):
"""
Defines the differential equations for system.
Arguments:
w : vector of the state variables:
w = [x1,x2]
t : time
p: vector of parameters
p=[g,field,dt,mu]
"""
x1, x2,p1,p2 = w
g,field,dt,mu=p
# Create f = (x1',y1',x2',y2'):
f = [x1_diff(x1,x2,p1,p2,g,derivative(field,t,dt),field_discrete(field,t,dt),mu),x2_diff(x1,x2,p1,p2,g,derivative(field,t,dt),field_discrete(field,t,dt),mu),
p1_diff(x1,x2,p1,p2,g,derivative(field,t,dt),field_discrete(field,t,dt),mu),p2_diff(x1,x2,p1,p2,g,derivative(field,t,dt),field_discrete(field,t,dt),mu)]
return f
```
```python
#from scipy.integrate import solve_ivp as ivp
#ini=0
#g=0.2
#fin=np.pi/g
#lt=1000
#ini_x1=0.05
#ini_x2=1
#field=np.random.rand(lt)
#np.loadtxt("EFieldValuesFINAL.txt")
#time=np.linspace(ini,fin,lt)
#field=guess_field(time,ini,fin,lt)
#dt=fin/(lt-1)
#p=[g,field,dt,0.1]
#wsol = ivp(vectorfield, [ini,fin],[ini_x1,ini_x1,10,10], args=(p,),t_eval=time)
```
```python
#wsol.message
```
```python
#%timeit wsol = ivp(vectorfield, [ini,fin],[ini_x1,ini_x2], args=(p,),t_eval=time)
```
```python
#from edos import guess_field
```
```python
#import matplotlib.pyplot as plt
#plt.figure(figsize=(18, 6), dpi=80)
#plt.plot(wsol.t,wsol.y[0],label=r'$x_{1}$')
#plt.plot(wsol.t,wsol.y[1],'-.',label=r'$x_{2}$')
#plt.plot(wsol.t,wsol.y[2],'-.',label=r'$p_{1}$')
#plt.plot(wsol.t,wsol.y[3],'-.',label=r'$p_{2}$')
#plt.legend()
#plt.show()
```
```python
#Now with CM Matrix
```
```python
t=Symbol('t')
v11 = Function('v11')(t)
v12 = Function('v12')(t)
v13= Function('v13')(t)
v14 = Function('v14')(t)
v22= Function('v22')(t)
v23 = Function('v23')(t)
v24= Function('v24')(t)
v33 = Function('v33')(t)
v34= Function('v34')(t)
v44 = Function('v44')(t)
x1 = Function('x1')(t)
x2 = Function('x2')(t)
p1= Function('p1')(t)
p2 = Function('p2')(t)
g= symbols('g')
e=Function('e')(t)
mu=symbols('mu')
Omega=sp.Matrix([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])
G=sp.Matrix([[1+2*mu*e,g,0,0],[g,1,0,0],[0,0,1-2*mu*e,g],[0,0,g,1]])
C=sp.Matrix([[sp.sqrt(gamma*(Nb+1)/2),0,1j*sp.sqrt(gamma*(Nb+1)/2),0],[sp.sqrt(gamma*(Nb)/2),0,-1j*sp.sqrt(gamma*(Nb)/2),0],[0,0,0,0],[0,0,0,0]])
A=Omega*G+Omega*((Dagger(C)*C).as_real_imag()[1])
D=Omega*((Dagger(C)*C).as_real_imag()[0])*Omega.T
#V=Matrix([[v11-2*x1+alpha**2,v12-alpha*x2+beta*x1-alpha*beta,v13-alpha*p1,v14-alpha*p2],[v12-alpha*x2+beta*x1-alpha*beta,v22+beta**2+2*x2,v23+beta*p1,v24+beta*p2],[v13-alpha*p1,v23+beta*p1,v33,v34],[v14-alpha*p2,v24+beta*p2,v34,v44]])
V=Matrix([[v11,v12,v13,v14],[v12,v22,v23,v24],[v13,v23,v33,v34],[v14,v24,v34,v44]])
```
```python
V
```
$\displaystyle \left[\begin{matrix}\operatorname{v_{11}}{\left(t \right)} & \operatorname{v_{12}}{\left(t \right)} & \operatorname{v_{13}}{\left(t \right)} & \operatorname{v_{14}}{\left(t \right)}\\\operatorname{v_{12}}{\left(t \right)} & \operatorname{v_{22}}{\left(t \right)} & \operatorname{v_{23}}{\left(t \right)} & \operatorname{v_{24}}{\left(t \right)}\\\operatorname{v_{13}}{\left(t \right)} & \operatorname{v_{23}}{\left(t \right)} & \operatorname{v_{33}}{\left(t \right)} & \operatorname{v_{34}}{\left(t \right)}\\\operatorname{v_{14}}{\left(t \right)} & \operatorname{v_{24}}{\left(t \right)} & \operatorname{v_{34}}{\left(t \right)} & \operatorname{v_{44}}{\left(t \right)}\end{matrix}\right]$
```python
r=simplify(diff(V,t)-Matrix(A)*Matrix(V)-Matrix(V)*Matrix(A).T-D)
simplify(r)
```
$\displaystyle \left[\begin{matrix}- 1.0 N_{b} \gamma - 2 g \operatorname{v_{14}}{\left(t \right)} + 1.0 \gamma \operatorname{v_{11}}{\left(t \right)} - 0.5 \gamma + 4 \mu e{\left(t \right)} \operatorname{v_{13}}{\left(t \right)} - 2 \operatorname{v_{13}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{11}}{\left(t \right)} & - g \operatorname{v_{13}}{\left(t \right)} - g \operatorname{v_{24}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{12}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{v_{23}}{\left(t \right)} - \operatorname{v_{14}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{12}}{\left(t \right)} & g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} + 1.0 \gamma \operatorname{v_{13}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{v_{33}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{v_{11}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{13}}{\left(t \right)} & g \operatorname{v_{11}}{\left(t \right)} - g \operatorname{v_{44}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{14}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{v_{34}}{\left(t \right)} + \operatorname{v_{12}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{14}}{\left(t \right)}\\- g \operatorname{v_{13}}{\left(t \right)} - g \operatorname{v_{24}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{12}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{v_{23}}{\left(t \right)} - \operatorname{v_{14}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{12}}{\left(t \right)} & - 2 g \operatorname{v_{23}}{\left(t \right)} - 2 \operatorname{v_{24}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{22}}{\left(t \right)} & g \operatorname{v_{22}}{\left(t \right)} - g \operatorname{v_{33}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{23}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{v_{12}}{\left(t \right)} - \operatorname{v_{34}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{23}}{\left(t \right)} & g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} + \operatorname{v_{22}}{\left(t \right)} - \operatorname{v_{44}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{24}}{\left(t \right)}\\g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} + 1.0 \gamma \operatorname{v_{13}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{v_{33}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{v_{11}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{13}}{\left(t \right)} & g \operatorname{v_{22}}{\left(t \right)} - g \operatorname{v_{33}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{23}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{v_{12}}{\left(t \right)} - \operatorname{v_{34}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{23}}{\left(t \right)} & - N_{b} \gamma + 2 g \operatorname{v_{23}}{\left(t \right)} + \gamma \operatorname{v_{33}}{\left(t \right)} - \frac{\gamma}{2} + 4 \mu e{\left(t \right)} \operatorname{v_{13}}{\left(t \right)} + 2 \operatorname{v_{13}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{33}}{\left(t \right)} & g \operatorname{v_{13}}{\left(t \right)} + g \operatorname{v_{24}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{34}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{v_{14}}{\left(t \right)} + \operatorname{v_{23}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{34}}{\left(t \right)}\\g \operatorname{v_{11}}{\left(t \right)} - g \operatorname{v_{44}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{14}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{v_{34}}{\left(t \right)} + \operatorname{v_{12}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{14}}{\left(t \right)} & g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} + \operatorname{v_{22}}{\left(t \right)} - \operatorname{v_{44}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{24}}{\left(t \right)} & g \operatorname{v_{13}}{\left(t \right)} + g \operatorname{v_{24}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{34}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{v_{14}}{\left(t \right)} + \operatorname{v_{23}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{34}}{\left(t \right)} & 2 g \operatorname{v_{14}}{\left(t \right)} + 2 \operatorname{v_{24}}{\left(t \right)} + \frac{d}{d t} \operatorname{v_{44}}{\left(t \right)}\end{matrix}\right]$
```python
new_eqs
```
$\displaystyle \left[\begin{matrix}\frac{d}{d t} c{\left(t \right)}\\- g \operatorname{p_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{x_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{p_{1}}{\left(t \right)} + \frac{d}{d t} \operatorname{x_{1}}{\left(t \right)}\\- g \operatorname{p_{1}}{\left(t \right)} - \operatorname{p_{2}}{\left(t \right)} + \frac{d}{d t} \operatorname{x_{2}}{\left(t \right)}\\g \operatorname{x_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{p_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{x_{1}}{\left(t \right)} + \frac{d}{d t} \operatorname{p_{1}}{\left(t \right)}\\g \operatorname{x_{1}}{\left(t \right)} + \operatorname{x_{2}}{\left(t \right)} + \frac{d}{d t} \operatorname{p_{2}}{\left(t \right)}\end{matrix}\right]$
```python
c_eqs=[r[0,0],r[0,1],r[0,2],r[0,3],r[1,1],r[1,2],r[1,3],r[2,2],r[2,3],r[3,3]]
```
```python
gaga=new_eqs-vector
gaga
```
$\displaystyle \left[\begin{matrix}0\\- g \operatorname{p_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{x_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} - 1\right) \operatorname{p_{1}}{\left(t \right)}\\- g \operatorname{p_{1}}{\left(t \right)} - \operatorname{p_{2}}{\left(t \right)}\\g \operatorname{x_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{p_{1}}{\left(t \right)} + \left(2 \mu e{\left(t \right)} + 1\right) \operatorname{x_{1}}{\left(t \right)}\\g \operatorname{x_{1}}{\left(t \right)} + \operatorname{x_{2}}{\left(t \right)}\end{matrix}\right]$
```python
dx1=diff(x1,t)
dx2=diff(x2,t)
dp1=diff(p1,t)
dp2=diff(p2,t)
dc=diff(c,t)
```
```python
despejadas=[(c_eqs[i]).subs({dc:gaga[0],dx1:gaga[1],dx2:gaga[2],dp1:gaga[3],dp2:gaga[4]}) for i in range(10)]
```
```python
despejadas=[despejadas[i].simplify() for i in range(10)]
```
```python
#eqs=list(to_symbols(sol.subs(diff(c,t),0)))
#t=sympy.linear_eq_to_matrix(eqs, [c1,x11,x12,p11,p12])[0]
#t2=sympy.linear_eq_to_matrix(eqs, [c1,x11,x12,p11,p12])[1]
```
```python
covariance=[v11,v12,v13,v14,v22,v23,v24,v33,v34,v44]
coeffs=[(Matrix(despejadas).expand()[i]).collect(diff(covariance[i],t)).coeff(diff(covariance[i],t)) for i in range(len(despejadas))]
coeffs
```
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```python
despeje=[diff(covariance[i],t)*coeffs[i] for i in range(len(covariance))]
```
```python
kara=simplify(Matrix(despejadas).expand()-Matrix(despeje))
kara
```
$\displaystyle \left[\begin{matrix}- 1.0 N_{b} \gamma - 2 g \operatorname{v_{14}}{\left(t \right)} + 1.0 \gamma \operatorname{v_{11}}{\left(t \right)} - 0.5 \gamma + 4 \mu e{\left(t \right)} \operatorname{v_{13}}{\left(t \right)} - 2 \operatorname{v_{13}}{\left(t \right)}\\- g \operatorname{v_{13}}{\left(t \right)} - g \operatorname{v_{24}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{12}}{\left(t \right)} + 2 \mu e{\left(t \right)} \operatorname{v_{23}}{\left(t \right)} - \operatorname{v_{14}}{\left(t \right)} - \operatorname{v_{23}}{\left(t \right)}\\g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} + 1.0 \gamma \operatorname{v_{13}}{\left(t \right)} + 2 \mu e{\left(t \right)} \operatorname{v_{11}}{\left(t \right)} + 2 \mu e{\left(t \right)} \operatorname{v_{33}}{\left(t \right)} + \operatorname{v_{11}}{\left(t \right)} - \operatorname{v_{33}}{\left(t \right)}\\g \operatorname{v_{11}}{\left(t \right)} - g \operatorname{v_{44}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{14}}{\left(t \right)} + 2 \mu e{\left(t \right)} \operatorname{v_{34}}{\left(t \right)} + \operatorname{v_{12}}{\left(t \right)} - \operatorname{v_{34}}{\left(t \right)}\\- 2 g \operatorname{v_{23}}{\left(t \right)} - 2 \operatorname{v_{24}}{\left(t \right)}\\g \operatorname{v_{22}}{\left(t \right)} - g \operatorname{v_{33}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{23}}{\left(t \right)} + 2 \mu e{\left(t \right)} \operatorname{v_{12}}{\left(t \right)} + \operatorname{v_{12}}{\left(t \right)} - \operatorname{v_{34}}{\left(t \right)}\\g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} + \operatorname{v_{22}}{\left(t \right)} - \operatorname{v_{44}}{\left(t \right)}\\- N_{b} \gamma + 2 g \operatorname{v_{23}}{\left(t \right)} + \gamma \operatorname{v_{33}}{\left(t \right)} - \frac{\gamma}{2} + 4 \mu e{\left(t \right)} \operatorname{v_{13}}{\left(t \right)} + 2 \operatorname{v_{13}}{\left(t \right)}\\g \operatorname{v_{13}}{\left(t \right)} + g \operatorname{v_{24}}{\left(t \right)} + 0.5 \gamma \operatorname{v_{34}}{\left(t \right)} + 2 \mu e{\left(t \right)} \operatorname{v_{14}}{\left(t \right)} + \operatorname{v_{14}}{\left(t \right)} + \operatorname{v_{23}}{\left(t \right)}\\2 g \operatorname{v_{14}}{\left(t \right)} + 2 \operatorname{v_{24}}{\left(t \right)}\end{matrix}\right]$
```python
v111,v112,v113,v114 = symbols('v_11:15')
v122,v123,v124 = symbols('v_22:25')
v133,v134=symbols('v_33 v_34')
v144=symbols('v_44')
```
```python
subsss={v11:v111,v12:v112,v13:v113,v14:v114,v22:v122,v23:v123,v24:v124,v33:v133,v34:v134,v44:v144,diff(c1,t):0,(c1**2):c1}
```
```python
to_symbols(kara.subs(subsss))
```
$\displaystyle \left[\begin{matrix}- 1.0 N_{b} \gamma + 4 e \mu v_{13} - 2 g v_{14} + 1.0 \gamma v_{11} - 0.5 \gamma - 2 v_{13}\\2 e \mu v_{23} - g v_{13} - g v_{24} + 0.5 \gamma v_{12} - v_{14} - v_{23}\\2 e \mu v_{11} + 2 e \mu v_{33} + g v_{12} - g v_{34} + 1.0 \gamma v_{13} + v_{11} - v_{33}\\2 e \mu v_{34} + g v_{11} - g v_{44} + 0.5 \gamma v_{14} + v_{12} - v_{34}\\- 2 g v_{23} - 2 v_{24}\\2 e \mu v_{12} + g v_{22} - g v_{33} + 0.5 \gamma v_{23} + v_{12} - v_{34}\\g v_{12} - g v_{34} + v_{22} - v_{44}\\- N_{b} \gamma + 4 e \mu v_{13} + 2 g v_{23} + \gamma v_{33} - \frac{\gamma}{2} + 2 v_{13}\\2 e \mu v_{14} + g v_{13} + g v_{24} + 0.5 \gamma v_{34} + v_{14} + v_{23}\\2 g v_{14} + 2 v_{24}\end{matrix}\right]$
```python
full_dvect=Matrix(list(vector)+list(diff(Matrix(covariance),t)))
full_dvect
```
$\displaystyle \left[\begin{matrix}\frac{d}{d t} c{\left(t \right)}\\\frac{d}{d t} \operatorname{x_{1}}{\left(t \right)}\\\frac{d}{d t} \operatorname{x_{2}}{\left(t \right)}\\\frac{d}{d t} \operatorname{p_{1}}{\left(t \right)}\\\frac{d}{d t} \operatorname{p_{2}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{11}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{12}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{13}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{14}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{22}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{23}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{24}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{33}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{34}}{\left(t \right)}\\\frac{d}{d t} \operatorname{v_{44}}{\left(t \right)}\end{matrix}\right]$
```python
full_vect=Matrix([c1,x11,x12,p11,p12,v111,v112,v113,v114,v122,v123,v124,v133,v134,v144])
full_vect
```
$\displaystyle \left[\begin{matrix}c\\x_{1}\\x_{2}\\p_{1}\\p_{2}\\v_{11}\\v_{12}\\v_{13}\\v_{14}\\v_{22}\\v_{23}\\v_{24}\\v_{33}\\v_{34}\\v_{44}\end{matrix}\right]$
```python
full_eqs=Matrix(list(to_symbols(sol))+list(to_symbols(kara.subs(subsss))))
```
```python
tocheck=[full_eqs[i].expand().subs(Nb,Nb*c1).subs(c1**2,c1).subs(c1*x12,x12).subs(c1*x11,x11).subs(c1*p11,p11).subs(c1*p12,p12) for i in range(len(full_eqs))]
Matrix(tocheck)
```
$\displaystyle \left[\begin{matrix}0\\2 e \mu p_{1} - g p_{2} + 0.5 \gamma x_{1} - p_{1}\\- g p_{1} - p_{2}\\2 e \mu x_{1} + g x_{2} + 0.5 \gamma p_{1} + x_{1}\\g x_{1} + x_{2}\\- 1.0 N_{b} c \gamma + 4 e \mu v_{13} - 2 g v_{14} + 1.0 \gamma v_{11} - 0.5 \gamma - 2 v_{13}\\2 e \mu v_{23} - g v_{13} - g v_{24} + 0.5 \gamma v_{12} - v_{14} - v_{23}\\2 e \mu v_{11} + 2 e \mu v_{33} + g v_{12} - g v_{34} + 1.0 \gamma v_{13} + v_{11} - v_{33}\\2 e \mu v_{34} + g v_{11} - g v_{44} + 0.5 \gamma v_{14} + v_{12} - v_{34}\\- 2 g v_{23} - 2 v_{24}\\2 e \mu v_{12} + g v_{22} - g v_{33} + 0.5 \gamma v_{23} + v_{12} - v_{34}\\g v_{12} - g v_{34} + v_{22} - v_{44}\\- N_{b} c \gamma + 4 e \mu v_{13} + 2 g v_{23} + \gamma v_{33} - \frac{\gamma}{2} + 2 v_{13}\\2 e \mu v_{14} + g v_{13} + g v_{24} + 0.5 \gamma v_{34} + v_{14} + v_{23}\\2 g v_{14} + 2 v_{24}\end{matrix}\right]$
```python
len(tocheck)
```
15
```python
t=sympy.linear_eq_to_matrix(tocheck, list(full_vect))[0]
t2=sympy.linear_eq_to_matrix(tocheck, list(full_vect))[1]
```
```python
vector
```
$\displaystyle \left[\begin{matrix}\frac{d}{d t} c{\left(t \right)}\\\frac{d}{d t} \operatorname{x_{1}}{\left(t \right)}\\\frac{d}{d t} \operatorname{x_{2}}{\left(t \right)}\\\frac{d}{d t} \operatorname{p_{1}}{\left(t \right)}\\\frac{d}{d t} \operatorname{p_{2}}{\left(t \right)}\end{matrix}\right]$
```python
sol
```
$\displaystyle \left[\begin{matrix}0\\- g \operatorname{p_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{x_{1}}{\left(t \right)} - \operatorname{p_{1}}{\left(t \right)}\\- g \operatorname{p_{1}}{\left(t \right)} - \operatorname{p_{2}}{\left(t \right)}\\g \operatorname{x_{2}}{\left(t \right)} + 0.5 \gamma \operatorname{p_{1}}{\left(t \right)} - \sqrt{2} \mu c{\left(t \right)} e{\left(t \right)} + \operatorname{x_{1}}{\left(t \right)}\\g \operatorname{x_{1}}{\left(t \right)} + \operatorname{x_{2}}{\left(t \right)}\end{matrix}\right]$
```python
full_vect[11]
```
$\displaystyle v_{24}$
```python
((t*full_vect)[11]).subs({p12:0,x11:0,p11:0,v122:0,x12:0,v114:0,v124:0}).subs({e1:e,de1:de}).factor()
```
$\displaystyle g v_{12} - g v_{34} - v_{44}$
```python
ricard=r[1,3].subs(vector[4],sol[4]).expand().collect(covariance[4]).collect(p1).collect(p2).subs(c*x1,x1).collect(x1).subs(c*x2,x2).collect(x2).collect(v12).subs(vector[0],0).subs(c**2,c).collect(c)
simplify(ricard.subs({p2:0,x1:0,p1:0,full_dvect[11]:0,v22:0,x2:0,v14:0,v24:0})).expand().factor()
```
$\displaystyle g \operatorname{v_{12}}{\left(t \right)} - g \operatorname{v_{34}}{\left(t \right)} - \operatorname{v_{44}}{\left(t \right)}$
```python
lista=[]
for i in range(15):
dummy=[simplify(t[i,j].subs({e1:e,de1:de})) for j in range(15)]
lista.append(dummy)
```
```python
matriz=Matrix(lista)
matriz
```
$\displaystyle \left[\begin{array}{ccccccccccccccc}0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0.5 \gamma & 0 & 2 \mu e{\left(t \right)} - 1 & - g & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & - g & -1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 2 \mu e{\left(t \right)} + 1 & g & 0.5 \gamma & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & g & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\- 1.0 N_{b} \gamma & 0 & 0 & 0 & 0 & 1.0 \gamma & 0 & 4 \mu e{\left(t \right)} - 2 & - 2 g & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0.5 \gamma & - g & -1 & 0 & 2 \mu e{\left(t \right)} - 1 & - g & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} + 1 & g & 1.0 \gamma & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} - 1 & - g & 0\\0 & 0 & 0 & 0 & 0 & g & 1 & 0 & 0.5 \gamma & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} - 1 & - g\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & - 2 g & -2 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} + 1 & 0 & 0 & g & 0.5 \gamma & 0 & - g & -1 & 0\\0 & 0 & 0 & 0 & 0 & 0 & g & 0 & 0 & 1 & 0 & 0 & 0 & - g & -1\\- N_{b} \gamma & 0 & 0 & 0 & 0 & 0 & 0 & 4 \mu e{\left(t \right)} + 2 & 0 & 0 & 2 g & 0 & \gamma & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & g & 2 \mu e{\left(t \right)} + 1 & 0 & 1 & g & 0 & 0.5 \gamma & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 2 g & 0 & 0 & 2 & 0 & 0 & 0\end{array}\right]$
```python
matriz[5,0]+=-gamma/2
matriz[12,0]+=-gamma/2
matriz
```
$\displaystyle \left[\begin{array}{ccccccccccccccc}0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0.5 \gamma & 0 & 2 \mu e{\left(t \right)} - 1 & - g & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & - g & -1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 2 \mu e{\left(t \right)} + 1 & g & 0.5 \gamma & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & g & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\- 1.0 N_{b} \gamma - \frac{\gamma}{2} & 0 & 0 & 0 & 0 & 1.0 \gamma & 0 & 4 \mu e{\left(t \right)} - 2 & - 2 g & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0.5 \gamma & - g & -1 & 0 & 2 \mu e{\left(t \right)} - 1 & - g & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} + 1 & g & 1.0 \gamma & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} - 1 & - g & 0\\0 & 0 & 0 & 0 & 0 & g & 1 & 0 & 0.5 \gamma & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} - 1 & - g\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & - 2 g & -2 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 2 \mu e{\left(t \right)} + 1 & 0 & 0 & g & 0.5 \gamma & 0 & - g & -1 & 0\\0 & 0 & 0 & 0 & 0 & 0 & g & 0 & 0 & 1 & 0 & 0 & 0 & - g & -1\\- N_{b} \gamma - \frac{\gamma}{2} & 0 & 0 & 0 & 0 & 0 & 0 & 4 \mu e{\left(t \right)} + 2 & 0 & 0 & 2 g & 0 & \gamma & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & g & 2 \mu e{\left(t \right)} + 1 & 0 & 1 & g & 0 & 0.5 \gamma & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 2 g & 0 & 0 & 2 & 0 & 0 & 0\end{array}\right]$
```python
t0=Symbol("t0")
A=matriz[5:,5:]
A
```
$\displaystyle \left[\begin{matrix}1.0 \gamma & 0 & -2.0 & - 2.0 g & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0.5 \gamma & - g & -1 & 0 & -1 & - g & 0 & 0 & 0\\1 & g & \gamma & 0 & 0 & 0 & 0 & -1 & - g & 0\\g & 1 & 0 & 0.5 \gamma & 0 & 0 & 0 & 0 & -1 & - g\\0 & 0 & 0 & 0 & 0 & - 2 g & -2 & 0 & 0 & 0\\0 & 1 & 0 & 0 & g & 0.5 \gamma & 0 & - g & -1 & 0\\0 & g & 0 & 0 & 1 & 0 & 0 & 0 & - g & -1\\0 & 0 & 2 & 0 & 0 & 2 g & 0 & \gamma & 0 & 0\\0 & 0 & g & 1 & 0 & 1 & g & 0 & 0.5 \gamma & 0\\0 & 0 & 0 & 2 g & 0 & 0 & 2 & 0 & 0 & 0\end{matrix}\right]$
```python
matriz*full_vect
```
$\displaystyle \left[\begin{matrix}0\\- g p_{2} + 0.5 \gamma x_{1} + p_{1} \left(2 \mu e{\left(t \right)} - 1\right)\\- g p_{1} - p_{2}\\g x_{2} + 0.5 \gamma p_{1} + x_{1} \left(2 \mu e{\left(t \right)} + 1\right)\\g x_{1} + x_{2}\\c \left(- 1.0 N_{b} \gamma - \frac{\gamma}{2}\right) - 2 g v_{14} + 1.0 \gamma v_{11} + v_{13} \left(4 \mu e{\left(t \right)} - 2\right)\\- g v_{13} - g v_{24} + 0.5 \gamma v_{12} - v_{14} + v_{23} \left(2 \mu e{\left(t \right)} - 1\right)\\g v_{12} - g v_{34} + 1.0 \gamma v_{13} + v_{11} \left(2 \mu e{\left(t \right)} + 1\right) + v_{33} \left(2 \mu e{\left(t \right)} - 1\right)\\g v_{11} - g v_{44} + 0.5 \gamma v_{14} + v_{12} + v_{34} \left(2 \mu e{\left(t \right)} - 1\right)\\- 2 g v_{23} - 2 v_{24}\\g v_{22} - g v_{33} + 0.5 \gamma v_{23} + v_{12} \left(2 \mu e{\left(t \right)} + 1\right) - v_{34}\\g v_{12} - g v_{34} + v_{22} - v_{44}\\c \left(- N_{b} \gamma - \frac{\gamma}{2}\right) + 2 g v_{23} + \gamma v_{33} + v_{13} \left(4 \mu e{\left(t \right)} + 2\right)\\g v_{13} + g v_{24} + 0.5 \gamma v_{34} + v_{14} \left(2 \mu e{\left(t \right)} + 1\right) + v_{23}\\2 g v_{14} + 2 v_{24}\end{matrix}\right]$
```python
Matrix(tocheck)
```
$\displaystyle \left[\begin{matrix}0\\2 e \mu p_{1} - g p_{2} + 0.5 \gamma x_{1} - p_{1}\\- g p_{1} - p_{2}\\2 e \mu x_{1} + g x_{2} + 0.5 \gamma p_{1} + x_{1}\\g x_{1} + x_{2}\\- 1.0 N_{b} c \gamma + 4 e \mu v_{13} - 2 g v_{14} + 1.0 \gamma v_{11} - 0.5 \gamma - 2 v_{13}\\2 e \mu v_{23} - g v_{13} - g v_{24} + 0.5 \gamma v_{12} - v_{14} - v_{23}\\2 e \mu v_{11} + 2 e \mu v_{33} + g v_{12} - g v_{34} + 1.0 \gamma v_{13} + v_{11} - v_{33}\\2 e \mu v_{34} + g v_{11} - g v_{44} + 0.5 \gamma v_{14} + v_{12} - v_{34}\\- 2 g v_{23} - 2 v_{24}\\2 e \mu v_{12} + g v_{22} - g v_{33} + 0.5 \gamma v_{23} + v_{12} - v_{34}\\g v_{12} - g v_{34} + v_{22} - v_{44}\\- N_{b} c \gamma + 4 e \mu v_{13} + 2 g v_{23} + \gamma v_{33} - \frac{\gamma}{2} + 2 v_{13}\\2 e \mu v_{14} + g v_{13} + g v_{24} + 0.5 \gamma v_{34} + v_{14} + v_{23}\\2 g v_{14} + 2 v_{24}\end{matrix}\right]$
```python
H0=simplify(matriz.subs({e:0}))
He=simplify(matriz.subs({de:0})-H0)
```
```python
He=He/e
```
```python
H0
```
$\displaystyle \left[\begin{array}{ccccccccccccccc}0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0.5 \gamma & 0 & -1 & - g & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & - g & -1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 1 & g & 0.5 \gamma & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & g & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\- \gamma \left(1.0 N_{b} + 0.5\right) & 0 & 0 & 0 & 0 & 1.0 \gamma & 0 & -2 & - 2 g & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0.5 \gamma & - g & -1 & 0 & -1 & - g & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 1 & g & 1.0 \gamma & 0 & 0 & 0 & 0 & -1 & - g & 0\\0 & 0 & 0 & 0 & 0 & g & 1 & 0 & 0.5 \gamma & 0 & 0 & 0 & 0 & -1 & - g\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & - 2 g & -2 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & g & 0.5 \gamma & 0 & - g & -1 & 0\\0 & 0 & 0 & 0 & 0 & 0 & g & 0 & 0 & 1 & 0 & 0 & 0 & - g & -1\\- \gamma \left(N_{b} + \frac{1}{2}\right) & 0 & 0 & 0 & 0 & 0 & 0 & 2 & 0 & 0 & 2 g & 0 & \gamma & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & g & 1 & 0 & 1 & g & 0 & 0.5 \gamma & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 2 g & 0 & 0 & 2 & 0 & 0 & 0\end{array}\right]$
```python
He
```
$\displaystyle \left[\begin{array}{ccccccccccccccc}0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 2 \mu & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 2 \mu & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 4 \mu & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 2 \mu & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 2 \mu & 0 & 0 & 0 & 0 & 0 & 0 & 2 \mu & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 2 \mu & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 2 \mu & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 4 \mu & 0 & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 2 \mu & 0 & 0 & 0 & 0 & 0 & 0\\0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\end{array}\right]$
```python
hlineal=lambdify((mu),He)
import dill
dill.settings['recurse'] = True
```
```python
dill.dump(hlineal, open("H_campo", "wb"))
```
```python
Hde=Hde/de
```
```python
Hde=simplify(Hde)
```
```python
Hde
```
```python
func=lambdify((mu,g),Hde)
```
```python
Matrix(func(2,0.1))
```
```python
H0
```
```python
h0lineal=lambdify((g,gamma,Nb),H0)
h0lineal(0.2,1,0.5)
```
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 0. , 0.5, 0. , -1. , -0.2, 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , -0.2, -1. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 0. , 1. , 0.2, 0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 0. , 0.2, 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[-1. , 0. , 0. , 0. , 0. , 1. , 0. , -2. , -0.4, 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.5, -0.2, -1. , 0. , -1. ,
-0.2, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 1. , 0.2, 1. , 0. , 0. , 0. ,
0. , -1. , -0.2, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.2, 1. , 0. , 0.5, 0. , 0. ,
0. , 0. , -1. , -0.2],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , -0.4,
-2. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0.2, 0.5,
0. , -0.2, -1. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.2, 0. , 0. , 1. , 0. ,
0. , 0. , -0.2, -1. ],
[-1. , 0. , 0. , 0. , 0. , 0. , 0. , 2. , 0. , 0. , 0.4,
0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.2, 1. , 0. , 1. ,
0.2, 0. , 0.5, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.4, 0. , 0. ,
2. , 0. , 0. , 0. ]])
```python
dill.dump(h0lineal, open("H_0", "wb"))
```
```python
H0
```
```python
from Integrals import overlap2
```
```python
vector1=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
vector2=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
```
```python
V=[[vector1[5],vector1[6],vector1[7],vector1[8]],[1,vector1[9],vector1[10],vector1[11]],[1,1,vector1[12],vector1[13]],[1,1,1,vector1[14]]]
R=[vector1[i] for i in range(1,5)]
B=[[vector2[5],vector2[6],vector2[7],vector2[8]],[1,vector2[9],vector2[10],vector2[11]],[1,1,vector2[12],vector2[13]],[1,1,1,vector2[14]]]
S=[vector2[i] for i in range(1,5)]
```
```python
V
```
```python
R
```
```python
B
```
```python
S
```
```python
overlap2(vector1,vector2)
```
```python
(e*de)
```
```python
sp.sin(2)*sp.cos(2)
```
```python
```
```python
```
```python
```
|
b1b64a376141ce2434d9be9f8cecb8c96b8e1ae6
| 82,692 |
ipynb
|
Jupyter Notebook
|
Squeezed+dissipation/save(squeezed).ipynb
|
mcditoos/krotov
|
6a70cc791fa21186997ad2ca5a72f6d30574e7a0
|
[
"BSD-3-Clause"
] | null | null | null |
Squeezed+dissipation/save(squeezed).ipynb
|
mcditoos/krotov
|
6a70cc791fa21186997ad2ca5a72f6d30574e7a0
|
[
"BSD-3-Clause"
] | null | null | null |
Squeezed+dissipation/save(squeezed).ipynb
|
mcditoos/krotov
|
6a70cc791fa21186997ad2ca5a72f6d30574e7a0
|
[
"BSD-3-Clause"
] | 1 |
2021-11-26T17:01:29.000Z
|
2021-11-26T17:01:29.000Z
| 43.3851 | 5,006 | 0.409375 | true | 16,648 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.831143 | 0.685949 | 0.570122 |
__label__kor_Hang
| 0.195831 | 0.162915 |
# Objective: To filter a textured image without loosing its texture
```python
# Import the required libs
from torch_pdegraph.pdes import pdeanisodiff
from torch_pdegraph.utilities import *
import numpy as np
from matplotlib import image as mpimg
import faiss
import matplotlib.pyplot as plt
import torch
```
```python
# load the texture based image, add some noise
# and extract patches
x = mpimg.imread("../data/barbara.png")
x = addNoise(5,x)
# feature that shall be used to create the knn-graph
patches = imgPatches(x,(5,5))
dispImg(x)
```
# Create the graph
- In order to simply show the effectiveness of PDEs on graph, I am only creating a simple K-NN based graphs. This may or maynot be the best graph for a given problem at hand.
- One can create graph using whatsoever apt approach or one can even use third-party network datasets and run a PDE on that graph. PDEs are extensible to any given graph/network at hand as long as that graph has edges and weights( edge_index and edge_attr).
Although torch_cluster comes with a knn-graph method. I found it to be limited and slow when the node-features have high dimensions. We shall be using facebook's faiss library which is blazingly fast for a KNN-graph construction.
```python
# Create a Knn graph of the pacthes of the texture based image
# and assign weights to the edges
res = faiss.StandardGpuResources()
index = faiss.IndexFlatL2(patches.shape[1])
gpu_index_flat = faiss.index_cpu_to_gpu(res,0,index)
gpu_index_flat.add(patches.astype(np.float32))
k = 30
D, I = gpu_index_flat.search(patches.astype(np.float32),k+1)
#Graph
edge_index = np.vstack((I[:,1:].flatten(), np.repeat(I[:,0].flatten(),k)))
#edge_attr = np.exp(-(D[:,1:].flatten()/np.inf))
edge_attr = np.ones(edge_index.shape[1]) # Let the weights be equal to 1 !
edge_index = torch.tensor(edge_index, dtype=torch.long).to('cuda:0')
edge_attr = torch.tensor(edge_attr, dtype=torch.float32).to('cuda:0')
edge_attr = edge_attr.view(-1,1)
graph = Graph(edge_index, edge_attr)
```
# Run a predefined PDE on graph :
We shall be using the following adaptive $dt$ (time step) iterative scheme for anisotropic diffusion
\begin{equation}
\mathbf{x}^{n+1}_{i} = \frac{\lambda \mathbf{x}^{0}_{i} + \sum_{j \in N(i)} w_{i,j}^{p/2}|\mathbf{x}^{n}_{j}-\mathbf{x}^{n}_{i}|^{p-2}\mathbf{x}^{n}_{j}}{\lambda+ \sum_{j \in N(i)} w_{i,j}^{p/2}|\mathbf{x}^{n}_{j}-\mathbf{x}^{n}_{i}|^{p-2}}
\end{equation}
- $\mathbf{x}_{i}$ is the node feature/signal at the $i^{th}$ node
- $w_{i,j}$ is the scalar edge_attr on the edge $e(i\to j)$
**Example:**
```python
from torch_pdegraph.pdes import pdeanisodiff
# create a pde instance
pde = pdeanisodiff.PDE(graph, **hp)
# run the pde on the features on nodes for some iterations
new_features = pde(features, itr)
"""
graph is a structure which has graph.edge_index and graph.edge_attr(weights)
hp is hyperparams in the equation (\lambda and p)
"""
```
The above equation is basically a gauss-jacobi based iterative scheme on graphs for this Energy:
\begin{equation}
\mathcal{E}_{w,p}(\mathbf{x, x^{0}, \lambda}) = \frac{\lambda}{2}\|\mathbf{x} - \mathbf{x}^{0}\|_{2}^{2} + \frac{1}{p}\sum_{i \in V}\left( \sum_{j \in N(i)} w_{i,j}^{p/2}\bigl |\mathbf{x}_{j}{-}\mathbf{x}_{i}\bigr|^p \right)
\end{equation}
To know more about it ref [Elmoataz](https://hal.archives-ouvertes.fr/hal-00163573/document)
```python
#hyperparams
hp = dict(x0=torch.tensor(toFmat(x),dtype=torch.float32).cuda(), lamb=0, epsilon=5e-07, p_val=1)
# Run the implicit PDE on graph
itr = 30
features = torch.tensor(toFmat(x),dtype=torch.float32).cuda()
pde = pdeanisodiff.PDE(graph, **hp)
new_features = pde(features, itr)
```
100%|██████████| 30/30 [00:00<00:00, 468.53it/s]
```python
# Now reshape the new_features to the sqaure shape
y = new_features.cpu().numpy()
y = toImg(y,x.shape)
#compare the noisy and filterd image
f = plt.figure(figsize=(8,8))
f.add_subplot(1,2, 1)
plt.imshow(y, cmap="gray")
f.add_subplot(1,2, 2)
plt.imshow(x, cmap="gray")
plt.show(block=True)
```
# Filter an image over a surface with same pde
```python
# Filter an image over a surface
p = np.load("../data/position_lena.npz")
t = np.load("../data/texture_lena.npz")
p = p["position"].astype(np.float32)
signal = t["texture"]
#param = dict(position=p, texture=signal)
#displaySur(**param)
#add noise
signal = addNoise(10, signal)
# Create a knn graph
res = faiss.StandardGpuResources()
index = faiss.IndexFlatL2(p.shape[1])
gpu_index_flat = faiss.index_cpu_to_gpu(res,0,index)
gpu_index_flat.add(p)
k = 10
_, I = gpu_index_flat.search(p,k+1)
edge_index = np.vstack((I[:,1:].flatten(), np.repeat(I[:,0].flatten(),k)))
edge_attr = np.exp(-np.sum(((signal[I]-signal[I][:,0,None])**2),axis=2)/(0.5)**2)[:,1:].flatten()
edge_index = torch.tensor(edge_index, dtype=torch.long).to('cuda:0')
edge_attr = torch.tensor(edge_attr, dtype=torch.float32).to('cuda:0')
edge_attr = edge_attr.view(-1,1)
graph = Graph(edge_index, edge_attr)
#hyperparams
hp = dict(x0=torch.tensor(signal,dtype=torch.float32).cuda(), lamb=0, epsilon=5e-07, p_val=1)
# Run the implicit PDE on graph
itr = 100
features = torch.tensor(signal,dtype=torch.float32).cuda()
pde = pdeanisodiff.PDE(graph, **hp)
new_sig = pde(features, itr)
```
100%|██████████| 100/100 [00:00<00:00, 1439.50it/s]
```python
displayJSur(**dict(position=p, texture=new_sig.cpu().numpy()))
```
JVisualizer with 1 geometries
```python
displayJSur(**dict(position=p, texture=signal))
```
JVisualizer with 1 geometries
```python
# Unlike an image, a pcd display cannot be saved in ipython notebook, hence the notebook must be running to
# to view the pcd. Expect to see the surface filtered like shown in the following image.
from matplotlib import image as mpimg
from matplotlib import pyplot as plt
res = mpimg.imread("../gallery/lena_sur.png")
f = plt.figure(figsize=(12,12))
plt.imshow(res)
```
|
73998876af153cbbd8d9ba4ae459a0c157e994d3
| 994,519 |
ipynb
|
Jupyter Notebook
|
applications/2_texture_denoising.ipynb
|
aGIToz/Pytorch_pdegraph
|
fade6817e437b606c43221a5ca13bdaeec563fff
|
[
"MIT"
] | 17 |
2020-08-24T09:04:48.000Z
|
2022-03-19T03:46:07.000Z
|
applications/2_texture_denoising.ipynb
|
aGIToz/Pytorch_pdegraph
|
fade6817e437b606c43221a5ca13bdaeec563fff
|
[
"MIT"
] | null | null | null |
applications/2_texture_denoising.ipynb
|
aGIToz/Pytorch_pdegraph
|
fade6817e437b606c43221a5ca13bdaeec563fff
|
[
"MIT"
] | 3 |
2020-08-27T15:53:02.000Z
|
2021-09-13T08:36:27.000Z
| 2,762.552778 | 655,668 | 0.964978 | true | 1,781 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.939025 | 0.847968 | 0.796263 |
__label__eng_Latn
| 0.732104 | 0.688318 |
# Exercises
<!-- --- begin exercise --- -->
## Problem 11: Define nodes and elements
<div id="fem:approx:fe:exer:mesh1"></div>
Consider a domain $\Omega =[0,2]$ divided into the three elements
$[0,1]$, $[1,1.2]$, and $[1.2,2]$.
For P1 and P2 elements, set up the list of coordinates and nodes
(`nodes`) and the numbers of the nodes that belong to each element
(`elements`) in two cases: 1) nodes and elements numbered from left to
right, and 2) nodes and elements numbered from right to left.
<!-- --- begin solution of exercise --- -->
**Solution.**
We can write up figure sketches and the data structure in code:
```python
# P1 elements
# Left to right numbering
"""
elements: |--0--|--1--|--2--|
nodes: 0 1 2 3
"""
nodes = [0, 1, 1.2, 2]
elements = [[0,1], [1,2], [2,3]]
# Right to left numbering
"""
elements: |--2--|--1--|--0--|
nodes: 3 2 1 0
"""
nodes = [2, 1.2, 1, 0]
elements = [[1,0], [2,1], [3,2]]
# P2 elements
# Left to right numbering
"""
elements: |--0--|--1--|--2--|
nodes: 0 1 2 3 4 5 6
"""
nodes = [0, 0.5, 1, 1.1, 1.6, 2]
elements = [[0,1,2], [2,3,4], [4,5,6]]
# Right to left numbering
"""
elements: |--2--|--1--|--0--|
nodes: 6 5 4 3 2 1 0
"""
nodes = [2, 1.6, 1.2, 1.1, 1, 0.5, 0]
elements = [[2,1,0], [4,3,2], [6,5,4]]
```
<!-- --- end solution of exercise --- -->
Filename: `fe_numberings1`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Problem 12: Define vertices, cells, and dof maps
<div id="fem:approx:fe:exer:mesh2"></div>
Repeat [Problem 11: Define nodes and elements](#fem:approx:fe:exer:mesh1), but define the
data structures `vertices`, `cells`, and `dof_map` instead of
`nodes` and `elements`.
<!-- --- begin solution of exercise --- -->
**Solution.**
Written in Python, the solution becomes
```python
# P1 elements
# Left to right numbering
"""
elements: |--0--|--1--|--2--|
vertices: 0 1 2 3
dofs: 0 1 2 3
"""
# elements: 0 1 2
# vertices: 0 1 2 3
vertices = [0, 1, 1.2, 2]
cells = [[0,1], [1,2], [2,3]]
dof_map = [[0,1], [1,2], [2,3]]
# Right to left numbering
"""
elements: |--2--|--1--|--0--|
vertices: 3 2 1 0
dofs: 3 2 1 0
"""
vertices = [2, 1.2, 1, 0]
cells = [[1,0], [2,1], [3,2]]
dof_map = [[1,0], [2,1], [3,2]]
# P2 elements
# Left to right numbering
# elements: 0 1 2
"""
elements: |--0--|--1--|--2--|
vertices: 0 1 2 3
dofs: 0 1 2 3 4 5 6
"""
vertices = [0, 1, 1.2, 2]
cells = [[0,1], [1,2], [2,3]]
dof_map = [[0,1,2], [2,3,4], [4,5,6]]
# Right to left numbering
# elements: 2 1 0
"""
elements: |--2--|--1--|--0--|
vertices: 3 2 1 0
dofs: 6 5 4 3 2 1 0
"""
vertices = [2, 1.2, 1, 0]
cells = [[1,0], [2,1], [3,2]]
dof_map = [[2,1,0], [4,3,2], [6,5,4]]
```
<!-- --- end solution of exercise --- -->
Filename: `fe_numberings2`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Problem 13: Construct matrix sparsity patterns
<div id="fem:approx:fe:exer:defmesh:sparsity"></div>
[Problem 11: Define nodes and elements](#fem:approx:fe:exer:mesh1) describes a element mesh
with a total of five elements, but with two different element and
node orderings. For each of the two orderings,
make a $5\times 5$ matrix and fill in the entries that will be nonzero.
<!-- --- begin hint in exercise --- -->
**Hint.**
A matrix entry $(i,j)$ is nonzero if $i$ and $j$ are nodes in the
same element.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
If we create an empty matrix, we can run through all elements and
then over all local node pairs and mark that the corresponding
entry $(i,j)$ in the global matrix is a nonzero entry.
The `elements` data structure is sufficient. Below is a program
that fills matrix entries with an `X` and prints the matrix sparsity
pattern.
```python
def sparsity_pattern(elements, N_n):
import numpy as np
matrix = np.zeros((N_n, N_n), dtype=str)
matrix[:,:] = '0'
for e in elements:
for i in e:
for j in e:
matrix[i,j] = 'X'
matrix = matrix.tolist()
matrix = '\n'.join([' '.join([matrix[i][j]
for j in range(len(matrix[i]))])
for i in range(len(matrix))])
return matrix
print('\nP1 elements, left-to-right numbering')
N_n = 4
elements = [[0,1], [1,2], [2,3]]
print((sparsity_pattern(elements, N_n)))
print('\nP1 elements, right-to-left numbering')
elements = [[1,0], [2,1], [3,2]]
print((sparsity_pattern(elements, N_n)))
print('\nP2 elements, left-to-right numbering')
N_n = 7
elements = [[0,1,2], [2,3,4], [4,5,6]]
print((sparsity_pattern(elements, N_n)))
print('\nP1 elements, right-to-left numbering')
elements = [[2,1,0], [4,3,2], [6,5,4]]
print((sparsity_pattern(elements, N_n)))
```
The output becomes
P1 elements, left-to-right numbering
X X 0 0
X X X 0
0 X X X
0 0 X X
P1 elements, right-to-left numbering
X X 0 0
X X X 0
0 X X X
0 0 X X
P2 elements, left-to-right numbering
X X X 0 0 0 0
X X X 0 0 0 0
X X X X X 0 0
0 0 X X X 0 0
0 0 X X X X X
0 0 0 0 X X X
0 0 0 0 X X X
P1 elements, right-to-left numbering
X X X 0 0 0 0
X X X 0 0 0 0
X X X X X 0 0
0 0 X X X 0 0
0 0 X X X X X
0 0 0 0 X X X
0 0 0 0 X X X
<!-- --- end solution of exercise --- -->
Filename: `fe_sparsity_pattern`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Problem 14: Perform symbolic finite element computations
<div id="fem:approx:fe:exer:Asinwt:symbolic"></div>
Perform symbolic calculations to find formulas for the coefficient
matrix and right-hand side when approximating $f(x) = \sin (x)$ on
$\Omega=[0, \pi]$ by two P1 elements of size $\pi/2$. Solve the
system and compare $u(\pi/2)$ with the exact value 1.
<!-- --- begin solution of exercise --- -->
**Solution.**
Here are suitable `sympy` commands:
```python
import sympy as sym
# Mesh: |--------|-------|
# 0 pi/2 pi
#
# Basis functions:
#
# phi_0 phi_1 phi_2
# \ /\ /
# \ / \ /
# \ / \ /
# \/ \/
# |-------|-------|
# 0 pi/2 pi
x = sym.Symbol('x')
A = sym.zeros(3,3)
f = sym.sin
phi_0 = 1 - (2*x)/sym.pi
phi_1l = 2*x/sym.pi # left part of phi_1
phi_1r = 2 - (2*x)/sym.pi # right part of phi_1
phi_2 = x/(sym.pi/2) - 1
node_0 = 0
node_1 = sym.pi/2
node_2 = sym.pi
# Diagonal terms
A[0,0] = sym.integrate(phi_0**2, (x, node_0, node_1))
A[1,1] = sym.integrate(phi_1l**2, (x, node_0, node_1)) + \
sym.integrate(phi_1r**2, (x, node_1, node_2))
A[2,2] = sym.integrate(phi_2**2, (x, node_1, node_2))
# Off-diagonal terms
A[0,1] = sym.integrate(phi_0*phi_1l, (x, node_0, node_1))
A[1,0] = A[0,1]
A[1,2] = sym.integrate(phi_1r*phi_2, (x, node_1, node_2))
A[2,1] = A[1,2]
print(('A:\n', A)) # Can compare with general matrix, h=pi/2
b = sym.zeros(3,1)
b[0] = sym.integrate(phi_0*f(x), (x, node_0, node_1))
b[1] = sym.integrate(phi_1l*f(x), (x, node_0, node_1)) + \
sym.integrate(phi_1r*f(x), (x, node_1, node_2))
b[2] = sym.integrate(phi_2*f(x), (x, node_1, node_2))
print(('b:\n', b))
c = A.LUsolve(b)
print(('c:\n', c))
for i in range(len(c)):
print(('c[%d]=%g' % (i, c[i].evalf())))
print(('u(pi/2)=%g' % c[1]))
# For reports
print((sym.latex(A)))
print((sym.latex(b)))
print((sym.latex(c)))
```
Running the program, we get the matrix system
$$
\left[\begin{matrix}\frac{\pi}{6} & \frac{\pi}{12} & 0\\\frac{\pi}{12} & \frac{\pi}{3} & \frac{\pi}{12}\\0 & \frac{\pi}{12} & \frac{\pi}{6}\end{matrix}\right]
\left[\begin{matrix}\frac{1}{\pi} \left(- \frac{24}{\pi} + 8\right)\\\frac{-28 + \frac{168}{\pi}}{7 \pi}\\\frac{1}{\pi} \left(- \frac{24}{\pi} + 8\right)\end{matrix}\right]
=
\left[\begin{matrix}- \frac{2}{\pi} + 1\\\frac{4}{\pi}\\- \frac{2}{\pi} + 1\end{matrix}\right]
$$
The solution at the midpoint is $1.15847$, i.e., 16% error.
<!-- --- end solution of exercise --- -->
Filename: `fe_sin_P1`.
<!-- Hint: wolframalpha or sympy can help with (1-x)*sin(a*x+b), -->
<!-- which is the integral -->
<!-- that arises on the right-hand side. -->
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Problem 15: Approximate a steep function by P1 and P2 elements
<div id="fem:approx:exer:tanh:P1P2"></div>
Given
$$
f(x) = \tanh(s(x-\frac{1}{2}))
$$
use the Galerkin or least squares method with finite elements to find
an approximate function $u(x)$. Choose $s=20$ and try
$N_e=4,8,16$ P1 elements and
$N_e=2,4,8$ P2 elements.
Integrate $f{\varphi}_i$ numerically.
<!-- --- begin hint in exercise --- -->
**Hint.**
You can automate the computations by calling the `approximate` method
in the `fe_approx1D_numint` module.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
The set of calls to `approximate` becomes
```python
from fe_approx1D_numint import approximate
from sympy import tanh, Symbol
x = Symbol('x')
steepness = 20
arg = steepness*(x-0.5)
approximate(tanh(arg), symbolic=False, numint='GaussLegendre2',
d=1, N_e=4, filename='fe_p1_tanh_4e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre2',
d=1, N_e=8, filename='fe_p1_tanh_8e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre2',
d=1, N_e=16, filename='fe_p1_tanh_16e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre3',
d=2, N_e=2, filename='fe_p2_tanh_2e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre3',
d=2, N_e=4, filename='fe_p2_tanh_4e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre3',
d=2, N_e=8, filename='fe_p2_tanh_8e')
```
<!-- dom:FIGURE: [fig/fe_p1_tanh.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig/fe_p2_tanh.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
Filename: `fe_tanh_P1P2`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Problem 16: Approximate a steep function by P3 and P4 elements
<div id="fem:approx:exer:tanh:P3P4"></div>
**a)**
Solve [Problem 15: Approximate a steep function by P1 and P2 elements](#fem:approx:exer:tanh:P1P2) using $N_e=1,2,4$ P3 and P4
elements.
<!-- --- begin solution of exercise --- -->
**Solution.**
We can easily adopt the code from [Problem 15: Approximate a steep function by P1 and P2 elements](#fem:approx:exer:tanh:P1P2):
```python
from fe_approx1D_numint import approximate, u_glob
from sympy import tanh, Symbol, lambdify
x = Symbol('x')
steepness = 20
arg = steepness*(x-0.5)
approximate(tanh(arg), symbolic=False, numint='GaussLegendre4',
d=3, N_e=1, filename='fe_p3_tanh_1e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre4',
d=3, N_e=2, filename='fe_p3_tanh_2e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre4',
d=3, N_e=4, filename='fe_p3_tanh_4e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre5',
d=4, N_e=1, filename='fe_p4_tanh_1e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre5',
d=4, N_e=2, filename='fe_p4_tanh_2e')
approximate(tanh(arg), symbolic=False, numint='GaussLegendre5',
d=4, N_e=4, filename='fe_p4_tanh_4e')
```
<!-- dom:FIGURE: [fig/fe_p3_tanh.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig/fe_p4_tanh.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
**b)**
How will an interpolation method work in
this case with the same number of nodes?
<!-- --- begin solution of exercise --- -->
**Solution.**
The coefficients arising from the interpolation method are trivial to compute
since $c_i=f(x_i)$, where $x_i$ are the global nodes. The function
`u_glob` in the `fe_approx1D_numint` module can be used to compute
appropriate arrays for plotting the resulting finite element function.
We create plots where the finite element approximation is shown along
with $f(x)$ and the interpolation points.
Since `u_glob` requires the `vertices`, `cells`, and `dof_map` data
structures, we must compute these for the values of number of
elements ($N_e$) and the polynomial degree ($d$).
```python
# Interpolation method
import numpy as np
import matplotlib.pyplot as plt
f = lambdify([x], tanh(arg), modules='numpy')
# Compute exact f on a fine mesh
x_fine = np.linspace(0, 1, 101)
f_fine = f(x_fine)
for d in 3, 4:
for N_e in 1, 2, 4:
h = 1.0/N_e # element length
vertices = [i*h for i in range(N_e+1)]
cells = [[e, e+1] for e in range(N_e)]
dof_map = [[d*e + i for i in range(d+1)] for e in range(N_e)]
N_n = d*N_e + 1 # Number of nodes
x_nodes = np.linspace(0, 1, N_n) # Node coordinates
U = f(x_nodes) # Interpolation method samples node values
x, u, _ = u_glob(U, vertices, cells, dof_map,
resolution_per_element=51)
plt.figure()
plt.plot(x, u, '-', x_fine, f_fine, '--',
x_nodes, U, 'bo')
plt.legend(['%d P%d elements' % (N_e, d),
'exact', 'interpolation points'],
loc='upper left')
plt.savefig('tmp_%d_P%d.pdf' % (N_e, d))
plt.savefig('tmp_%d_P%d.png' % (N_e, d))
plt.show()
```
<!-- dom:FIGURE: [fig/tanh_fe_interpol_P3.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig/tanh_fe_interpol_P4.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
Filename: `fe_tanh_P3P4`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 17: Investigate the approximation error in finite elements
<div id="fem:approx:fe:exer:Asinwt:interpol:error"></div>
The theory ([109](#fem:approx:fe:error:theorem)) from the section [Computing the error of the approximation](#fem:approx:fe:error) predicts that the error in the P$d$
approximation of a function should behave as $h^{d+1}$, where $h$ is
the length of the element. Use experiments to verify this asymptotic
behavior (i.e., for small enough $h$). Choose three examples:
$f(x)=Ae^{-\omega x}$ on $[0,3/\omega]$, $f(x) = A\sin (\omega x)$ on
$\Omega=[0, 2\pi/\omega]$ for constant $A$ and $\omega$, and
$f(x)=\sqrt{x}$ on $[0,1]$.
<!-- --- begin hint in exercise --- -->
**Hint 1.**
Run a series of experiments: $(h_i,E_i)$, $i=0,\ldots,m$, where $E_i$
is the $L^2$ norm of the error corresponding to element length $h_i$.
Assume an error model $E=Ch^r$ and compute $r$ from two successive
experiments:
$$
r_i = \ln (E_{i+1}/E_i)/\ln (h_{i+1}/h_i),\quad i=0,\ldots,m-1{\thinspace .}
$$
Hopefully, the sequence $r_0,\ldots,r_{m-1}$ converges to the true
$r$, and $r_{m-1}$ can be taken as an approximation to $r$.
Run such experiments for different $d$ for the different $f(x)$ functions.
<!-- --- end hint in exercise --- -->
<!-- --- begin hint in exercise --- -->
**Hint 2.**
The `approximate` function in `fe_approx1D_numint.py` is handy for
calculating the numerical solution. This function returns the
finite element solution as the coefficients $\left\{ {c}_i \right\}_{i\in{\mathcal{I}_s}}$.
To compute $u$, use `u_glob` from the same module.
Use the Trapezoidal rule to integrate the $L^2$ error:
```python
xc, u = u_glob(c, vertices, cells, dof_map)
e = f_func(xc) - u
L2_error = 0
e2 = e**2
for i in range(len(xc)-1):
L2_error += 0.5*(e2[i+1] + e2[i])*(xc[i+1] - xc[i])
L2_error = np.sqrt(L2_error)
```
The reason for this Trapezoidal integration is
that `u_glob` returns coordinates `xc` and corresponding `u` values
where some of the coordinates (the cell vertices) coincides, because
the solution is computed in one element at a time, using all local
nodes. Also note that there are many coordinates in $xc$ per cell
such that we can accurately compute the error inside each cell.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
Here is an appropriate program:
```python
from fe_approx1D_numint import approximate, mesh_uniform, u_glob
from sympy import sqrt, exp, sin, Symbol, lambdify, simplify
import numpy as np
from math import log
x = Symbol('x')
A = 1
w = 1
cases = {'sqrt': {'f': sqrt(x), 'Omega': [0,1]},
'exp': {'f': A*exp(-w*x), 'Omega': [0, 3.0/w]},
'sin': {'f': A*sin(w*x), 'Omega': [0, 2*np.pi/w]}}
results = {}
d_values = [1, 2, 3, 4]
for case in cases:
f = cases[case]['f']
f_func = lambdify([x], f, modules='numpy')
Omega = cases[case]['Omega']
results[case] = {}
for d in d_values:
results[case][d] = {'E': [], 'h': [], 'r': []}
for N_e in [4, 8, 16, 32, 64, 128]:
try:
c = approximate(
f, symbolic=False,
numint='GaussLegendre%d' % (d+1),
d=d, N_e=N_e, Omega=Omega,
filename='tmp_%s_d%d_e%d' % (case, d, N_e))
except np.linalg.linalg.LinAlgError as e:
print((str(e)))
continue
vertices, cells, dof_map = mesh_uniform(
N_e, d, Omega, symbolic=False)
xc, u, _ = u_glob(c, vertices, cells, dof_map, 51)
e = f_func(xc) - u
# Trapezoidal integration of the L2 error over the
# xc/u patches
e2 = e**2
L2_error = 0
for i in range(len(xc)-1):
L2_error += 0.5*(e2[i+1] + e2[i])*(xc[i+1] - xc[i])
L2_error = np.sqrt(L2_error)
h = (Omega[1] - Omega[0])/float(N_e)
results[case][d]['E'].append(L2_error)
results[case][d]['h'].append(h)
# Compute rates
h = results[case][d]['h']
E = results[case][d]['E']
for i in range(len(h)-1):
r = log(E[i+1]/E[i])/log(h[i+1]/h[i])
results[case][d]['r'].append(round(r, 2))
print(results)
for case in results:
for d in sorted(results[case]):
print(('case=%s d=%d, r: %s' % \
(case, d, results[case][d]['r'])))
```
The output becomes
case=sqrt d=1, r: [1.0, 1.0, 1.0, 1.0, 1.0]
case=sqrt d=2, r: [1.0, 1.0, 1.0, 1.0, 1.0]
case=sqrt d=3, r: [1.0, 1.0, 1.0, 1.0, 1.0]
case=sqrt d=4, r: [1.0, 1.0, 1.0, 1.0, 1.0]
case=exp d=1, r: [2.01, 2.01, 2.0, 2.0, 2.0]
case=exp d=2, r: [2.81, 2.89, 2.94, 2.97, 2.98]
case=exp d=3, r: [3.98, 4.0, 4.0, 4.0, 4.0]
case=exp d=4, r: [4.87, 4.93, 4.96, 4.98, 4.99]
case=sin d=1, r: [2.15, 2.06, 2.02, 2.0, 2.0]
case=sin d=2, r: [2.68, 2.83, 2.93, 2.97, 2.99]
case=sin d=3, r: [4.06, 4.04, 4.01, 4.0, 4.0]
case=sin d=4, r: [4.79, 4.9, 4.96, 4.98, 4.99]
showing that the convergence rate stabilizes quite quickly at $N_e=128$
cells. While the theory predicts the rate as $d+1$, this is only
fulfilled for the exponential and sine functions, while the square root
functions gives a rate 1 regardless of $d$. The reason is that the
estimate ([109](#fem:approx:fe:error:theorem)) contains the integral of
the derivatives of $f$ over $[0,1]$. For $f=\sqrt{x}$, we
have $f'=\frac{1}{2} x^{-1/2}$, $f''=-\frac{1}{4}x^{-3/2}$, and all integrals
of $f''$ and higher derivatives are infinite on $[0,L]$. Our experiments
show that the method still converges, but $f$ is not smooth enough that
higher-order elements give superior convergence rates.
<!-- --- end solution of exercise --- -->
Filename: `Pd_approx_error`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Problem 18: Approximate a step function by finite elements
<div id="fem:approx:fe:exer:Heaviside"></div>
Approximate the step function
$$
f(x) = \left\lbrace\begin{array}{ll}
0 & \mbox{ if } 0\leq x < {1/2},\\
1 & \mbox{ if } {1/2} \leq x \geq {1/2}
\end{array}\right.
$$
by 2, 4, 8, and 16 elements and P1, P2, P3, and P4. Compare approximations visually.
<!-- --- begin hint in exercise --- -->
**Hint.**
This $f$ can also be expressed in terms of the Heaviside function $H(x)$:
$f(x) = H(x-{1/2})$.
Therefore, $f$ can be defined by
```python
f = sym.Heaviside(x - sym.Rational(1,2))
```
making the `approximate` function in the
`fe_approx1D.py` module an obvious candidate to solve the
problem. However, `sympy` does not handle symbolic integration
with this particular integrand, and the `approximate` function faces a problem
when converting `f` to a Python function (for plotting) since
`Heaviside` is not an available function in `numpy`.
An alternative is to perform hand calculations. This is an instructive
task, but in practice only feasible for few elements and P1 and P2 elements.
It is better to copy the functions `element_matrix`, `element_vector`,
`assemble`, and `approximate` from the `fe_approx1D_numint.py` file
and edit these functions such that they can compute approximations
with `f` given as a Python function and not a symbolic expression.
Also assume that `phi` computed by the `basis` function is a Python
callable function. Remove all instances of the `symbolic` variable
and associated code.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
The modifications of `element_matrix`, `element_vector`,
`assemble`, and `approximate` from the `fe_approx1D_numint.py` file
are listed below.
```python
from fe_approx1D_numint import mesh_uniform, u_glob
from fe_approx1D import basis
import numpy as np
def element_matrix(phi, Omega_e, numint):
n = len(phi)
A_e = np.zeros((n, n))
h = Omega_e[1] - Omega_e[0]
detJ = h/2 # dx/dX
for r in range(n):
for s in range(r, n):
for j in range(len(numint[0])):
Xj, wj = numint[0][j], numint[1][j]
A_e[r,s] += phi[r](Xj)*phi[s](Xj)*detJ*wj
A_e[s,r] = A_e[r,s]
return A_e
def element_vector(f, phi, Omega_e, numint):
n = len(phi)
b_e = np.zeros(n)
h = Omega_e[1] - Omega_e[0]
detJ = h/2
for r in range(n):
for j in range(len(numint[0])):
Xj, wj = numint[0][j], numint[1][j]
xj = (Omega_e[0] + Omega_e[1])/2 + h/2*Xj # mapping
b_e[r] += f(xj)*phi[r](Xj)*detJ*wj
return b_e
def assemble(vertices, cells, dof_map, phi, f, numint):
N_n = len(list(set(np.array(dof_map).ravel())))
N_e = len(cells)
A = np.zeros((N_n, N_n))
b = np.zeros(N_n)
for e in range(N_e):
Omega_e = [vertices[cells[e][0]], vertices[cells[e][1]]]
A_e = element_matrix(phi[e], Omega_e, numint)
b_e = element_vector(f, phi[e], Omega_e, numint)
#print('element', e)
#print(b_e)
for r in range(len(dof_map[e])):
for s in range(len(dof_map[e])):
A[dof_map[e][r],dof_map[e][s]] += A_e[r,s]
b[dof_map[e][r]] += b_e[r]
return A, b
def approximate(f, d, N_e, numint, Omega=[0,1], filename='tmp'):
"""
Compute the finite element approximation, using Lagrange
elements of degree d, to a Python functionn f on a domain
Omega. N_e is the number of elements.
numint is the name of the numerical integration rule
(Trapezoidal, Simpson, GaussLegendre2, GaussLegendre3,
GaussLegendre4, etc.). numint=None implies exact
integration.
"""
from math import sqrt
numint_name = numint # save name
if numint == 'Trapezoidal':
numint = [[-1, 1], [1, 1]]
elif numint == 'Simpson':
numint = [[-1, 0, 1], [1./3, 4./3, 1./3]]
elif numint == 'Midpoint':
numint = [[0], [2]]
elif numint == 'GaussLegendre2':
numint = [[-1/sqrt(3), 1/sqrt(3)], [1, 1]]
elif numint == 'GaussLegendre3':
numint = [[-sqrt(3./5), 0, sqrt(3./5)],
[5./9, 8./9, 5./9]]
elif numint == 'GaussLegendre4':
numint = [[-0.86113631, -0.33998104, 0.33998104,
0.86113631],
[ 0.34785485, 0.65214515, 0.65214515,
0.34785485]]
elif numint == 'GaussLegendre5':
numint = [[-0.90617985, -0.53846931, -0. ,
0.53846931, 0.90617985],
[ 0.23692689, 0.47862867, 0.56888889,
0.47862867, 0.23692689]]
elif numint is not None:
print(('Numerical rule %s is not supported '\
'for numerical computing' % numint))
sys.exit(1)
vertices, cells, dof_map = mesh_uniform(N_e, d, Omega)
# phi is a list where phi[e] holds the basis in cell no e
# (this is required by assemble, which can work with
# meshes with different types of elements).
# len(dof_map[e]) is the number of nodes in cell e,
# and the degree of the polynomial is len(dof_map[e])-1
phi = [basis(len(dof_map[e])-1) for e in range(N_e)]
A, b = assemble(vertices, cells, dof_map, phi, f,
numint=numint)
print(('cells:', cells))
print(('vertices:', vertices))
print(('dof_map:', dof_map))
print(('A:\n', A))
print(('b:\n', b))
c = np.linalg.solve(A, b)
print(('c:\n', c))
if filename is not None:
title = 'P%d, N_e=%d' % (d, N_e)
title += ', integration: %s' % numint_name
x_u, u, _ = u_glob(np.asarray(c), vertices, cells, dof_map,
resolution_per_element=51)
x_f = np.linspace(Omega[0], Omega[1], 10001) # mesh for f
import scitools.std as plt
plt.plot(x_u, u, '-',
x_f, f(x_f), '--')
plt.legend(['u', 'f'])
plt.title(title)
plt.savefig(filename + '.pdf')
plt.savefig(filename + '.png')
return c
```
With a purely numerical version of the `approximate` function, we can
easily investigate the suggested approximations in this exercise:
```python
def exercise():
def f(x):
if isinstance(x, (float,int)):
return 0 if x < 0.5 else 1
elif isinstance(x, np.ndarray):
return np.where(x < 0.5, 0, 1)
N_e_values = [2, 4, 8, 16]
for d in 1, 2, 3, 4:
for N_e in N_e_values:
approximate(f, numint='GaussLegendre%d' % (d+1),
d=d, N_e=N_e,
filename='fe_Heaviside_P%d_%de' % (d, N_e))
for ext in 'pdf', 'png':
cmd = 'doconce combine_images '
cmd += ext + ' -2 '
cmd += ' '.join(['fe_Heaviside_P%d_%de' % (d, N_e)
for N_e in N_e_values])
cmd += ' fe_Heaviside_P%d' % d
print(cmd)
os.system(cmd)
```
Running this function reveals that even finite elements
(and not only sines, as demonstrated in [Exercise 8: Fourier series as a least squares approximation](#fem:approx:exer:Fourier))
give oscillations around a discontinuity.
<!-- dom:FIGURE: [fig/fe_Heaviside_P1.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig/fe_Heaviside_P2.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig/fe_Heaviside_P3.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig/fe_Heaviside_P4.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
**Remarks.**
It is of extreme importance to use a Gauss-Legendre numerical integration
rule that matches the degree of polynomials in the basis.
Using a rule with fewer points may lead to very strange results.
<!-- --- end solution of exercise --- -->
Filename: `fe_Heaviside_P1P2`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 19: 2D approximation with orthogonal functions
<div id="fem:approx:fe:exer:2Dsines:symbolic"></div>
**a)**
Assume we have basis functions ${\varphi}_i(x,y)$ in 2D that are
orthogonal such that $({\varphi}_i,{\varphi}_j)=0$ when $i\neq j$. The
function `least_squares` in the file [`approx2D.py`](${fem_src}/fe_approx2D.py) will then spend much time on computing
off-diagonal terms in the coefficient matrix that we know are zero.
To speed up the computations, make a version `least_squares_orth` that
utilizes the orthogonality among the basis functions.
<!-- --- begin solution of exercise --- -->
**Solution.**
We 1) remove the `j` loop in the `least_squares` function and set
`j = i`,
2) make `A` a vector (i.e., $(N+1, 1)$ matrix as `b` and `c`),
3) solve for `c[i,0]` as soon as `A[i,0]` and `b[i,0]` are computed.
```python
import sympy as sym
import mpmath
def least_squares_orth(f, psi, Omega, symbolic=True,
print_latex=False):
"""
Given a function f(x,y) on a rectangular domain
Omega=[[xmin,xmax],[ymin,ymax]],
return the best approximation to f(x,y) in the space V
spanned by the functions in the list psi.
This function assumes that psi are orthogonal on Omega.
"""
# Modification of least_squares function: drop the j loop,
# set j=i, compute c on the fly in the i loop.
N = len(psi) - 1
# Note that A, b, c becmes (N+1)x(N+1), use 1st column
A = sym.zeros(N+1)
b = sym.zeros(N+1)
c = sym.zeros(N+1)
x, y = sym.symbols('x y')
print(('...evaluating matrix...', A.shape, b.shape, c.shape))
for i in range(N+1):
j = i
print(('(%d,%d)' % (i, j)))
integrand = psi[i]*psi[j]
if symbolic:
I = sym.integrate(integrand,
(x, Omega[0][0], Omega[0][1]),
(y, Omega[1][0], Omega[1][1]))
if not symbolic or isinstance(I, sym.Integral):
# Could not integrate symbolically, use numerical int.
print(('numerical integration of', integrand))
integrand = sym.lambdify([x,y], integrand, 'mpmath')
I = mpmath.quad(integrand,
[Omega[0][0], Omega[0][1]],
[Omega[1][0], Omega[1][1]])
A[i,0] = I
integrand = psi[i]*f
if symbolic:
I = sym.integrate(integrand,
(x, Omega[0][0], Omega[0][1]),
(y, Omega[1][0], Omega[1][1]))
if not symbolic or isinstance(I, sym.Integral):
# Could not integrate symbolically, use numerical int.
print(('numerical integration of', integrand))
integrand = sym.lambdify([x,y], integrand, 'mpmath')
I = mpmath.quad(integrand,
[Omega[0][0], Omega[0][1]],
[Omega[1][0], Omega[1][1]])
b[i,0] = I
c[i,0] = b[i,0]/A[i,0]
print()
print(('A:\n', A, '\nb:\n', b))
c = [c[i,0] for i in range(c.shape[0])] # make list
print(('coeff:', c))
# c is a sympy Matrix object, numbers are in c[i,0]
u = sum(c[i]*psi[i] for i in range(len(psi)))
print(('approximation:', u))
print(('f:', sym.expand(f)))
if print_latex:
print((sym.latex(A, mode='plain')))
print((sym.latex(b, mode='plain')))
print((sym.latex(c, mode='plain')))
return u, c
```
<!-- --- end solution of exercise --- -->
**b)**
Apply the function to approximate
$$
f(x,y) = x(1-x)y(1-y)e^{-x-y}
$$
on $\Omega = [0,1]\times [0,1]$ via basis functions
$$
{\varphi}_i(x,y) = \sin ((p+1)\pi x)\sin((q+1)\pi y),\quad i=q(N_x+1) + p,
$$
where $p=0,\ldots,N_x$ and $q=0,\ldots,N_y$.
<!-- --- begin hint in exercise --- -->
**Hint.**
Get ideas from the function `least_squares_orth` in
the section [Orthogonal basis functions](#fem:approx:global:orth) and
file [`approx1D.py`](${fem_src}/fe_approx1D.py).
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
A function for computing the basis functions may look like this:
```python
def sine_basis(Nx, Ny):
"""
Compute basis sin((p+1)*pi*x)*sin((q+1)*pi*y),
p=0,...,Nx, q=0,...,Ny.
"""
x, y = sym.symbols('x y')
psi = []
for q in range(0, Ny+1):
for p in range(0, Nx+1):
r = sym.sin((p+1)*sym.pi*x)*sym.sin((q+1)*sym.pi*y)
psi.append(r)
return psi
```
Application of this basis to approximate the given function is coded in
the following function:
```python
def demo(N):
"""
Find the approximation of f by the least squares method.
The basis is sin((p+1)*pi*x)sin((q+1)*pi*y) where
0<p<=N, p<q<=N.
"""
x, y = sym.symbols('x y')
f = x*(1-x)*y*(1-y)*sym.exp(-x-y)
psi = sine_basis(N, N)
Omega = [[0,1], [0,1]]
u, c = least_squares_orth(f, psi, Omega, symbolic=False)
from approx2D import comparison_plot
comparison_plot(f, u, Omega, title='N=%d' % N)
print(c)
if __name__=='__main__':
#test_least_squares_orth()
demo(N=2)
```
A lesson learned is that `symbolic=False` is important, otherwise `sympy`
consumes a lot of CPU time on trying to integrate symbolically.
The figure below shows the error in the approximation for $N=0$ (left)
and $N=2$ (right). The coefficients for $N=2$ decay rapidly:
[0.025, 0.0047, 0.0014, 0.0047, 0.0009, 0.0003, 0.0014, 0.0003,
8.2e-5]
<!-- dom:FIGURE: [fig/approx2D_ls_orth_sine_c.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
**c)**
Make a unit test for the `least_squares_orth` function.
<!-- --- begin solution of exercise --- -->
**Solution.**
Let us use the basis in b), fix the coefficients of some function
$f$, and check that the computed approximation, with the
same basis, has the same coefficients (this test employs the principle
that if $f\in V$, then $u=f$).
```python
def test_least_squares_orth():
# Use sine functions
x, y = sym.symbols('x y')
N = 2 # (N+1)**2 = 9 basis functions
psi = sine_basis(N, N)
f_coeff = [0]*len(psi)
f_coeff[3] = 2
f_coeff[4] = 3
f = sum(f_coeff[i]*psi[i] for i in range(len(psi)))
# Check that u exactly reproduces f
u, c = least_squares_orth(f, psi, Omega=[[0,1], [0,1]],
symbolic=False)
import numpy as np
diff = np.abs(np.array(c) - np.array(f_coeff)).max()
print(('diff:', diff))
tol = 1E-15
assert diff < tol
```
<!-- --- end solution of exercise --- -->
Filename: `approx2D_ls_orth`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 20: Use the Trapezoidal rule and P1 elements
<div id="fem:approx:fe:exer:1D:trapez"></div>
Consider the approximation of some $f(x)$ on an interval $\Omega$ using
the least squares or Galerkin methods with P1 elements. Derive
the element matrix and vector using the
Trapezoidal rule ([117](#fem:approx:fe:numint1:trapez)) for calculating
integrals on the reference element. Assemble the contributions, assuming
a uniform cell partitioning, and show that the resulting linear system
has the form $c_i=f(x_{i})$ for $i\in{\mathcal{I}_s}$.
<!-- --- begin solution of exercise --- -->
**Solution.**
The Trapezoidal rule for integrals on $[-1,1]$
is given by ([117](#fem:approx:fe:numint1:trapez)).
The expressions for the entries in the element matrix
are given by ([82](#fem:approx:fe:mapping:Ae)) in
the section [Mapping to a reference element](#fem:approx:fe:mapping):
$$
\begin{align*} \tilde A^{(e)}_{r,s} &=
\int_{-1}^1 {\tilde{\varphi}}_r(X){\tilde{\varphi}}_s(X)\det J\,{\, \mathrm{d}X}\\
&\approx \frac{h}{2}({\tilde{\varphi}}_r(-1){\tilde{\varphi}}_s(-1)
+ {\tilde{\varphi}}_r(1){\tilde{\varphi}}_s(1)){\thinspace .}
\end{align*}
$$
We know that if ${\tilde{\varphi}}_r(\pm 1)$ is 0 or 1, so evaluating
the formula above for $r,s=0,1$ gives
$$
\tilde A^{(e)} = \frac{h}{2}\left(\begin{array}{cc}
1 & 0\\
0 & 1
\end{array}\right){\thinspace .}
$$
As usual, $h$ is the length of the element in physical coordinates.
The element vector in the reference element is given by
([83](#fem:approx:fe:mapping:be)):
$$
\begin{align*}
\tilde b^{(e)}_{r} &= \int_{-1}^1 f(x(X)){\tilde{\varphi}}_r(X)\det J\,{\, \mathrm{d}X}\\
&\approx \frac{h}{2}(f(x(-1)){\tilde{\varphi}}_r(-1)
+ f(x(1)){\tilde{\varphi}}_r(1)){\thinspace .}
\end{align*}
$$
Evaluating the formula for $r=0,1$ leads to
$$
\tilde b^{(e)} = \frac{h}{2}\left(\begin{array}{c}
f(x_L)\\
f(x_R)
\end{array}\right),
$$
where $x_L$ and $x_R$ are the $x$ coordinates of the local points
$X=-1$ and $X=1$, respectively.
With a uniform mesh with nodes $x_{i}=ih$, the element matrix and
vectors assemble to a coefficient matrix
$$
\frac{h}{2}\hbox{diag}(1, 2, \ldots, 2, 1),
$$
and right-hand side vector
$$
\frac{h}{2}(f(x_{0}), 2f(x_{1}), \ldots, 2f(x_{N_n-1}),
f(x_{N_n})){\thinspace .}
$$
The factors $h/2$ and $2$ cancel, so we are left with the solution of
the system as
$$
c_i = f(x_{i}){\thinspace .}
$$
<!-- --- end solution of exercise --- -->
Filename: `fe_P1_trapez`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 21: Compare P1 elements and interpolation
<div id="fem:approx:fe:exer:1D:P1:vs:interp"></div>
We shall approximate the function
$$
f(x) = 1 + \epsilon\sin (2\pi nx),\quad x\in \Omega = [0,1],
$$
where $n\in\mathbb{Z}$ and $\epsilon \geq 0$.
**a)**
Plot $f(x)$ for $n=1,2,3$ and find the wavelength of the function.
**b)**
We want to use $N_P$ elements per wavelength. Show that the number
of elements is then $nN_P$.
**c)**
The critical quantity for accuracy is the number of elements per
wavelength, not the element size in itself. It therefore suffices
to study an $f$ with just one wavelength in $\Omega = [0,1]$.
Set $\epsilon = 0.5$.
Run the least squares or projection/Galerkin method for
$N_P=2,4,8,16,32$. Compute the error $E=||u-f||_{L^2}$.
<!-- --- begin hint in exercise --- -->
**Hint 1.**
Use the `fe_approx1D_numint` module to compute $u$ and use
the technique from the section [Computing the error of the approximation](#fem:approx:fe:error) to
compute the norm of the error.
<!-- --- end hint in exercise --- -->
<!-- --- begin hint in exercise --- -->
**Hint 2.**
Read up on the Nyquist–Shannon sampling theorem.
<!-- --- end hint in exercise --- -->
**d)**
Repeat the set of experiments in the above point, but
use interpolation/collocation based on the node points to
compute $u(x)$ (recall that $c_i$ is now simply $f(x_{i})$).
Compute the error $E=||u-f||_{L^2}$.
Which method seems to be most accurate?
Filename: `fe_P1_vs_interp`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 22: Implement 3D computations with global basis functions
<div id="fem:approx:fe:exer:3D:approx3D"></div>
Extend the [`approx2D.py`](${fem_src}/approx2D.py) code to 3D
by applying ideas from the section [Extension to 3D](#fem:approx:3D:global).
Construct some 3D problem to make a test function for the
implementation.
<!-- --- begin hint in exercise --- -->
**Hint.**
Drop symbolic integration since it is in general too slow for 3D problems.
Also use `scipy.integrate.nquad` instead of `mpmath.quad`
for numerical integration, since it is much faster.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
We take a copy of `approx2D.py` and drop the `comparison_plot` function since
plotting in 3D is much more complicated (could make a special version with
curves through lines in the 3D domain, for instance).
Furthermore, we remove the lines with symbolic integration and replace
the calls to `mpmath.quad` by calls to
`scipy.integrate.nquad`. The resulting function becomes
```python
import sympy as sym
import numpy as np
import scipy.integrate
def least_squares(f, psi, Omega):
"""
Given a function f(x,y,z) on a rectangular domain
Omega=[[xmin,xmax],[ymin,ymax],[zmin,zmax]],
return the best approximation to f in the space V
spanned by the functions in the list psi.
f and psi are symbolic (sympy) expressions, but will
be converted to numeric functions for faster integration.
"""
N = len(psi) - 1
A = np.zeros((N+1, N+1))
b = np.zeros(N+1)
x, y, z = sym.symbols('x y z')
f = sym.lambdify([x, y, z], f, modules='numpy')
psi_sym = psi[:] # take a copy, needed for forming u later
psi = [sym.lambdify([x, y, z], psi[i]) for i in range(len(psi))]
print('...evaluating matrix...')
for i in range(N+1):
for j in range(i, N+1):
print(('(%d,%d)' % (i, j)))
integrand = lambda x, y, z: psi[i](x,y,z)*psi[j](x,y,z)
I, err = scipy.integrate.nquad(
integrand,
[[Omega[0][0], Omega[0][1]],
[Omega[1][0], Omega[1][1]],
[Omega[2][0], Omega[2][1]]])
A[i,j] = A[j,i] = I
integrand = lambda x, y, z: psi[i](x,y,z)*f(x,y,z)
I, err = scipy.integrate.nquad(
integrand,
[[Omega[0][0], Omega[0][1]],
[Omega[1][0], Omega[1][1]],
[Omega[2][0], Omega[2][1]]])
b[i] = I
print()
c = np.linalg.solve(A, b)
if N <= 10:
print(('A:\n', A, '\nb:\n', b))
print(('coeff:', c))
u = sum(c[i]*psi_sym[i] for i in range(len(psi_sym)))
print(('approximation:', u))
return u, c
```
As test example, we can use the basis
$$
{\psi}_{p,q,r} = \sin((p+1)\pi x)\sin((q+1)\pi y)\sin((r+1)\pi z),
$$
for $p=1,\ldots,N_x$, $q=1,\ldots,N_y$, $r=1,\ldots,N_z$.
We choose $f$ as some prescribed combination of these functions and
check that the computed $u$ is exactly equal to $f$.
```python
def sine_basis(Nx, Ny, Nz):
"""
Compute basis sin((p+1)*pi*x)*sin((q+1)*pi*y)*sin((r+1)*pi*z),
p=0,...,Nx, q=0,...,Ny, r=0,...,Nz.
"""
x, y, z = sym.symbols('x y z')
psi = []
for r in range(0, Nz+1):
for q in range(0, Ny+1):
for p in range(0, Nx+1):
s = sym.sin((p+1)*sym.pi*x)*\
sym.sin((q+1)*sym.pi*y)*sym.sin((r+1)*sym.pi*z)
psi.append(s)
return psi
def test_least_squares():
# Use sine functions
x, y, z = sym.symbols('x y z')
N = 1 # (N+1)**3 = 8 basis functions
psi = sine_basis(N, N, N)
f_coeff = [0]*len(psi)
f_coeff[3] = 2
f_coeff[4] = 3
f = sum(f_coeff[i]*psi[i] for i in range(len(psi)))
# Check that u exactly reproduces f
u, c = least_squares(f, psi, Omega=[[0,1], [0,1], [0,1]])
diff = np.abs(np.array(c) - np.array(f_coeff)).max()
print(('diff:', diff))
tol = 1E-15
assert diff < tol
```
<!-- --- end solution of exercise --- -->
Filename: `approx3D`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 23: Use Simpson's rule and P2 elements
<div id="fem:approx:fe:exer:1D:simpson"></div>
Redo [Exercise 20: Use the Trapezoidal rule and P1 elements](#fem:approx:fe:exer:1D:trapez), but use P2
elements and Simpson's rule based on sampling the integrands at
the nodes in the reference cell.
<!-- --- begin solution of exercise --- -->
**Solution.**
Simpson's rule for integrals on $[-1,1]$
is given by ([118](#fem:approx:fe:numint1:Simpson)).
The expressions for the entries in the element matrix
are given by ([82](#fem:approx:fe:mapping:Ae)):
$$
\begin{align*} \tilde A^{(e)}_{r,s} &=
\int_{-1}^1 {\tilde{\varphi}}_r(X){\tilde{\varphi}}_s(X)\det J\,{\, \mathrm{d}X}\\
&\approx \frac{1}{3}\frac{h}{2}({\tilde{\varphi}}_r(-1){\tilde{\varphi}}_s(-1)
+ 4{\tilde{\varphi}}_r(0){\tilde{\varphi}}_s(0)
+ {\tilde{\varphi}}_r(1){\tilde{\varphi}}_s(1)){\thinspace .}
\end{align*}
$$
The expressions for ${\tilde{\varphi}}_r(X)$ are given by
([84](#fem:approx:fe:mapping:P1:phi0))-([85](#fem:approx:fe:mapping:P1:phi1)).
Evaluating the formula for $r,s=0,1,2$ gives the element matrix
$$
\tilde A^{(e)} = \frac{h}{6}\left(\begin{array}{ccc}
1 & 0 & 0\\
0 & 4 & 0\\
0 & 0 & 1
\end{array}\right){\thinspace .}
$$
As usual, $h$ is the length of the element in physical coordinates.
The element vector in the reference element is given by
([83](#fem:approx:fe:mapping:be)):
$$
\begin{align*}
\tilde b^{(e)}_{r} &= \int_{-1}^1 f(x(X)){\tilde{\varphi}}_r(X)\det J\,{\, \mathrm{d}X}\\
&\approx \frac{1}{3}\frac{h}{2}(f(x(-1)){\tilde{\varphi}}_r(-1)
+ 4f(x(0)){\tilde{\varphi}}_r(0)
+ f(x(1)){\tilde{\varphi}}_r(1)){\thinspace .}
\end{align*}
$$
Evaluating the formula for $r=0,1,2$ leads to
$$
\tilde b^{(e)} = \frac{h}{2}\left(\begin{array}{c}
f(x_L)\\
4f(x_c)
f(x_R)
\end{array}\right),
$$
where $x_L$, $x_c$, and $x_R$ are the $x$ coordinates of the local points
$X=-1$, $X=0$, and $X=1$, respectively. These correspond to the nodes
in the element.
With a uniform mesh with nodes $x_{i}=ih$, the element matrix and
vectors assemble to a coefficient matrix
$$
\frac{h}{6}\hbox{diag}(1, 4, 2, 4, 2, 4, \ldots, 2, 4, 1),
$$
and right-hand side vector
$$
\frac{h}{6}(f(x_{0}), 4f(x_{1}), 2f(x_{2}),
4f(x_{3}), 2f(x_{4}), \ldots, 2f(x_{N_n-2}),
4f(x_{N_n-1}), f(x_{N_n})){\thinspace .}
$$
The factors $h/6$, $2$ and $4$ all cancel, so we are left with the solution of
the system as
$$
c_i = f(x_{i}){\thinspace .}
$$
<!-- --- end solution of exercise --- -->
Filename: `fe_P2_simpson`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 24: Make a 3D code for Lagrange elements of arbitrary order
Extend the code from the section [Refined code with curve plotting](#fem:approx:fenics:2D:2) to 3D.
<!-- --- end exercise --- -->
|
c9daed30acb8d44daf4dd2d40625f03bb705b349
| 67,188 |
ipynb
|
Jupyter Notebook
|
3- approx-fe-exercises.ipynb
|
mbarzegary/finite-element-intro
|
47ef0a3592b823ae71a874ee35850114f16b6d8b
|
[
"MIT"
] | 8 |
2021-01-26T13:18:02.000Z
|
2022-02-14T15:20:11.000Z
|
3- approx-fe-exercises.ipynb
|
mbarzegary/finite-element-intro
|
47ef0a3592b823ae71a874ee35850114f16b6d8b
|
[
"MIT"
] | null | null | null |
3- approx-fe-exercises.ipynb
|
mbarzegary/finite-element-intro
|
47ef0a3592b823ae71a874ee35850114f16b6d8b
|
[
"MIT"
] | 2 |
2021-08-05T23:14:15.000Z
|
2021-10-05T10:22:29.000Z
| 32.663102 | 205 | 0.477198 | true | 15,077 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.689306 | 0.7773 | 0.535797 |
__label__eng_Latn
| 0.847334 | 0.083166 |
```python
from sympy import *
init_printing()
```
```python
Uo,x,s=symbols('Uo x s', real=True)
```
```python
U=Uo*((s/x)**12-(s/x)**6)
U
```
```python
Up=U.diff(x)
Up
```
```python
roots = solve(Up,x)
roots
```
```python
x0=roots[1]
print(x0)
x0
```
```python
Um=U.subs(x,x0)
print(Um)
Um
```
```python
Upp=Up.diff(x)
Upp
```
```python
k=Upp.subs(x,x0)
print(k)
print(latex(k))
k
```
```python
m=symbols("m")
```
```python
omega=sqrt(k/m)
omega
```
```python
```
|
ce4f961f15ab98915c4e86ee036bbe3bb2394053
| 21,061 |
ipynb
|
Jupyter Notebook
|
P03-TaylorSeriesWarmUpA.ipynb
|
parduhne/PHYS_280
|
9890804b87fa23c3d53e826fde4a3b82430c875c
|
[
"MIT"
] | null | null | null |
P03-TaylorSeriesWarmUpA.ipynb
|
parduhne/PHYS_280
|
9890804b87fa23c3d53e826fde4a3b82430c875c
|
[
"MIT"
] | 2 |
2021-02-02T23:14:34.000Z
|
2021-03-20T05:46:23.000Z
|
P03-TaylorSeriesWarmUpA.ipynb
|
TejasAvinashShetty/sci-comp-notebooks
|
8ac7887a6c5da6fff8173febbc9a9968a81836af
|
[
"MIT"
] | null | null | null | 61.402332 | 5,188 | 0.761692 | true | 191 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.877477 | 0.835484 | 0.733117 |
__label__yue_Hant
| 0.449598 | 0.541609 |
# Jupyter like a pro
In this third notebook of the tutorial ["The World of Jupyter"](https://github.com/barbagroup/jupyter-tutorial/blob/master/World-of-Jupyter.md), we want to leave you with pro tips for using Jupyter in your future work.
## Importing libraries
First, a word on importing libraries. Previously, we used the following command to load all the functions in the **NumPy** library:
```python
import numpy
```
Once you execute that command in a code cell, you call any **NumPy** function by prepending the library name, e.g., `numpy.linspace()`, [`numpy.ones()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html#numpy.ones), [`numpy.zeros()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html#numpy.zeros), [`numpy.empty()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.empty.html#numpy.empty), [`numpy.copy()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.copy.html#numpy.copy), and so on (explore the documentation for these very useful functions!).
But, you will find _a lot_ of sample code online that uses a different syntax for importing. They will do:
```python
import numpy as np
```
All this does is create an alias for `numpy` with the shorter string `np`, so you then would call a **NumPy** function like this: `np.linspace()`. This is just an alternative way of doing it, for lazy people that find it too long to type `numpy` and want to save 3 characters each time. For the not-lazy, typing `numpy` is more readable and beautiful. We like it better like this:
```python
import numpy
```
## Make your plots beautiful
When you make a plot using **Matplotlib**, you have many options to make your plots beautiful and publication-ready. Here are some of our favorite tricks.
First, let's load the `pyplot` module—and remember, `%matplotlib notebook` gets our plots inside the notebook (instead of a pop-up).
Our first trick is `rcparams`: we use it to customize the appearance of the plots. Here, we set the default font to a serif type of size 14 pt and make the size of the font for the axes labels 18 pt. Honestly, the default font is too small.
```python
from matplotlib import pyplot
%matplotlib notebook
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 14
pyplot.rcParams['axes.labelsize'] = 18
```
The following example is from a tutorial by [Dr. Justin Bois](http://www.justinbois.info), a lecturer in Biology and Biological Engineering at Caltech, for his class in [Data Analysis in the Biological Sciences](http://bebi103.caltech.edu/2015/) (2015). He has given us permission to use it.
```python
# Get an array of 100 evenly spaced points from 0 to 2*pi
x = numpy.linspace(0.0, 2.0 * numpy.pi, 100)
# Make a pointwise function of x with exp(sin(x))
y = numpy.exp(numpy.sin(x))
```
Here, we added comments in the Python code with the `#` mark. Comments are often useful not only for others who read the code, but as a "note to self" for the future you!
Let's see how the plot looks with the new font settings we gave **Matplotlib**, and make the plot more friendly by adding axis labels. This is always a good idea!
```python
pyplot.figure()
pyplot.plot(x, y, color='k', linestyle='-')
pyplot.xlabel('$x$')
pyplot.ylabel('$\mathrm{e}^{\sin(x)}$')
pyplot.xlim(0.0, 2.0 * numpy.pi);
```
<IPython.core.display.Javascript object>
Did you see how **Matplotlib** understands LaTeX mathematics? That is beautiful. The function `pyplot.xlim()` specifies the limits of the x-axis (you can also manually specify the y-axis, if the defaults are not good for you).
Continuing with the tutorial example by Justin Bois, let's have some mathematical fun and numerically compute the derivative of this function, using finite differences. We need to apply the following mathematical formula on all the discrete points of the `x` array:
\begin{equation}
\frac{\mathrm{d}y(x_i)}{\mathrm{d}x} \approx \frac{y(x_{i+1}) - y(x_i)}{x_{i+1} - x_i}.
\end{equation}
By the way, did you notice how we can typeset beautiful mathematics within a markdown cell? The Jupyter notebook is happy typesetting mathematics using LaTeX syntax.
Since this notebook is _"Jupyter like a pro,"_ we will define a custom Python function to compute the forward difference. It is good form to define custon functions to make your code modular and reusable.
```python
def forward_diff(y, x):
"""Compute derivative by forward differencing."""
# Use numpy.empty to make an empty array to put our derivatives in
deriv = numpy.empty(y.size - 1)
# Use a for-loop to go through each point and compute the derivative.
for i in range(deriv.size):
deriv[i] = (y[i+1] - y[i]) / (x[i+1] - x[i])
# Return the derivative (a NumPy array)
return deriv
# Call the function to perform finite differencing
deriv = forward_diff(y, x)
```
Notice how we define a function with the `def` statement, followed by our custom name for the fuction, the function arguments in parenthesis, and ending the statement with a colon. The contents of the function are indicated by the indentation (four spaces, in this case), and the `return` statement indicates what the function returns to the code that called it (in this case, the contents of the variable `deriv`). Right after the function definition (in between triple quotes) is the _docstring_, a short text documenting what the function does. It is good form to always write docstrings for your functions!
In our custom `forward_diff()` function, we used `numpy.empty()` to create an empty array of length `y.size-1`, that is, one less than the length of the array `y`. Then, we start a for-loop that iterates over values of `i` using the [**`range()`**](https://docs.python.org/3/library/functions.html#func-range) function of Python. This is a very useful function that you should think about for a little bit. What it does is create a list of integers. If you give it just one argument, it's a _"stop"_ argument: `range(stop)` creates a list of integers from `0` to `stop-1`, i.e., the list has `stop` numbers in it because it always starts at zero. But you can also give it a _"start"_ and _"step"_ argument.
Experiment with this, if you need to. It's important that you internalize the way `range()` works. Go ahead and create a new code cell, and try things like:
```python
for i in range(5):
print(i)
```
changing the arguments of `range()`. (Note how we end the `for` statement with a colon.) Now think for a bit: how many numbers does the list have in the case of our custom function `forward_diff()`?
Now, we will make a plot of the numerical derivative of $\exp(\sin(x))$. We can also compare with the analytical derivative:
\begin{equation}
\frac{\mathrm{d}y}{\mathrm{d}x} = \mathrm{e}^{\sin x}\,\cos x = y \cos x,
\end{equation}
```python
deriv_exact = y * numpy.cos(x) # analytical derivative
pyplot.figure()
pyplot.plot((x[1:] + x[:-1]) / 2.0, deriv,
label='numerical',
marker='.', color='gray',
linestyle='None', markersize=10)
pyplot.plot(x, deriv_exact,
label='analytical',
color='k', linestyle='-') # analytical derivative in black line
pyplot.xlabel('$x$')
pyplot.ylabel('$\mathrm{d}y/\mathrm{d}x$')
pyplot.xlim(0.0, 2.0 * numpy.pi)
pyplot.legend(loc='upper center', numpoints=1);
```
<IPython.core.display.Javascript object>
Stop for a bit and look at the first `pyplot.plot()` call above. The square brackets normally are how you access a particular element of an array via its index: `x[0]` is the first element of `x`, and `x[i+1]` is the `i`-th element. What's very cool is that you can also use _negative_ indices: they indicate counting backwards from the end of the array, so `x[-1]` is the last element of `x`.
A neat trick of arrays is called [_slicing_](http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html): picking elements using the _colon notation_. Its general form is `x[start:stop:step]`. Note that, like the `range()` function, the `stop` index is _exclusive_, i.e., `x[stop]` is not included in the result.
For example, this code will give the odd numbers from `1` to `7`:
```python
x = numpy.array( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] )
x[1:-1:2]
```
Try it! Remember, Python arrays are indexed from `0`, so `x[1]` is the second element. The end-point in the slice above is index `-1`, that's the last array element (not included in the result), and we're stepping by `2`, i.e., every other element. If the `step` is not given, it defaults to `1`. If `start` is not given, it defaults to the first array element, and if `stop` is not given, it defaults to the last element. Try several variations on the slice, until you're comfortable with it.
## There's a built-in for that
Here's another pro tip: whenever you find yourself writing a custom function for something that seems that a lot of people might use, find out first if there's a built-in for that. In this case, **NumPy** does indeed have a built-in for taking the numerical derivative by differencing! Check it out. We also use the function [`numpy.allclose()`](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.allclose.html) to check if the two results are close.
```python
numpy_deriv = numpy.diff(y) / numpy.diff(x)
print('Are the two results close? {}'.format(numpy.allclose(numpy_deriv, deriv)))
```
Are the two results close? True
Not only is the code much more compact and easy to read with the built-in **NumPy** function for the numerical derivative ... it is also much faster:
```python
%timeit numpy_deriv = numpy.diff(y) / numpy.diff(x)
%timeit deriv = forward_diff(y, x)
```
100000 loops, best of 3: 13.4 µs per loop
10000 loops, best of 3: 75.2 µs per loop
**NumPy** functions will always be faster than equivalent code you write yourself because at the heart they use pre-compiled code and highly optimized numerical libraries, like BLAS and LAPACK.
## Do math like a pro
Do you want to compute the integral of $y(x) = \mathrm{e}^{\sin x}$? Of course you do. We find the analytical integral using the integral formulas for modified Bessel functions:
\begin{equation}
\int_0^{2\pi}\mathrm{d} x\, \mathrm{e}^{\sin x} = 2\pi \,I_0(1),
\end{equation}
where $I_0$ is the modified Bessel function of the first kind. But if you don't have your special-functions handbook handy, we can find the integral with Python. We just need the right modules from the [**SciPy**](http://docs.scipy.org/doc/scipy/reference/) library. **SciPy** has a module of special functions, including Bessel functions, called `scipy.special`. Let's get that loaded, then use it to compute the exact integral:
```python
import scipy.special
exact_integral = 2.0 * numpy.pi * scipy.special.iv(0, 1.0)
print('Exact integral: {}'.format(exact_integral))
```
Exact integral: 7.95492652101
Or instead, we may want to compute the integral numerically, via the trapezoid rule. The integral is over one period of a periodic function, so only the constant term of its Fourier series will contribute (the periodic terms integrate to zero). The constant Fourier term is the mean of the function over the interval, and the integral is the area of a rectangle: $2\pi \langle y(x)\rangle_x$. Sampling $y$ at $n$ evenly spaced points over the interval of length $2\pi$, we have:
\begin{align}
\int_0^{2\pi}\mathrm{d} x\, y(x) \approx \frac{2\pi}{n}\sum_{i=0}^{n} y(x_i),
\end{align}
**NumPy** gives as a `mean` method to quickly get the sum:
```python
approx_integral = 2.0 * numpy.pi * y[:-1].mean()
print('Approximate integral: {}'.format(approx_integral))
print('Error: {}'.format(exact_integral - approx_integral))
```
Approximate integral: 7.95492652101
Error: 0.0
```python
approx_integral = 2.0 * numpy.pi * numpy.mean(y[:-1])
print('Approximate integral: {}'.format(approx_integral))
print('Error: {}'.format(exact_integral - approx_integral))
```
Approximate integral: 7.95492652101
Error: 0.0
The syntax `y.mean()` applies the `mean()` **NumPy** method to the array `y`. Here, we apply the method to a _slice_ of `y` that does not include the last element (see discussion of _slicing_ above). We could have also done `numpy.mean(y[:-1])` (the function equivalent of the method `mean()` applied to an array); they give equivalent results and which one you choose is a matter of style.
## Beautiful interactive plots with Bokeh
**Matplotlib** will be your workhorse for creating plots in notebooks. But it's not the only game in town! A recent new player is [**Bokeh**](http://nbviewer.jupyter.org/github/bokeh/bokeh-notebooks/blob/master/index.ipynb), a visualization library to make amazing interactive plots and share them online. It can also handle very large data sets with excellent performance.
If you installed **Anaconda** in your system, you will probably already have **Bokeh**. You can check if it's there by running the `conda list` command. If you installed **Miniconda**, you will need to install it with `conda install bokeh`.
After installing **Bokeh**, we have many modules available: [`bokeh.plotting`](http://bokeh.pydata.org/en/latest/docs/reference/plotting.html#bokeh-plotting) gives you the ability to create interactive figures with zoom, pan, resize, save, and other tools.
```python
from bokeh import plotting as bplotting
```
**Bokeh** integrates with Jupyter notebooks by calling the output function, as follows:
```python
bplotting.output_notebook()
```
<div>
<a href="http://bokeh.pydata.org" target="_blank" class="bk-logo bk-logo-small bk-logo-notebook"></a>
<span>BokehJS successfully loaded.</span>
</div>
```python
# create a new Bokeh plot with axis labels, name it "bop"
bop = bplotting.figure(x_axis_label='x', y_axis_label='dy/dx')
# add a title, change the font
bop.title = "Derivative of exp(sin(x))"
bop.title_text_font = "palatino"
# add a line with legend and line thickness to "bop"
bop.line(x, deriv_exact, legend="analytical", line_width=2)
# add circle markers with legend, specify color
bop.circle((x[1:] + x[:-1]) / 2.0, deriv, legend="numerical", fill_color="gray", size=8, line_color=None)
bop.grid.grid_line_alpha=0.3
bplotting.show(bop);
```
<div class="plotdiv" id="ebe782a2-497f-4abc-a723-8bf42de4e2ce"></div>
**Note**—As of June 2016 (v.0.11.1), Bokeh does not support LaTeX on axis labels. This is an [issue](https://github.com/bokeh/bokeh/issues/647) they are working on, so stay tuned!
Look at the neat tools on the **Bokeh** figure: you can zoom in to any portion to explore the data, you can drag the plot area around, resize and finally save the figure to a file. You also have many beautiful [styling](http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#userguide-styling) options!
# Optional next step: get interactive with Lorenz
We found two really cool ways for you to get interactive with the Lorenz equations! Try out the interactive blog post by Tim Head on [Exploring the Lorenz equations](https://betatim.github.io/posts/interactive-posts/) (January 2016), and learn about IPython widgets. Or, check out the [Lorentz example on **Bokeh** plots](http://nbviewer.jupyter.org/github/bokeh/bokeh-notebooks/blob/master/gallery/lorenz.ipynb). Better yet, try them both.
---
<p style="font-size:smaller">(c) 2016 Lorena A. Barba. Free to use under Creative Commons Attribution <a href="https://creativecommons.org/licenses/by/4.0/">CC-BY 4.0 License</a>. This notebook was written for the tutorial <a href="https://github.com/barbagroup/jupyter-tutorial/blob/master/World-of-Jupyter.md">"The world of Jupyter"</a> at the Huazhong University of Science and Technology (HUST), Wuhan, China.
</p>
<p style="font-size:smaller">Example from Justin Bois (c) 2015 also under a <a href="https://creativecommons.org/licenses/by/4.0/">CC-BY 4.0 License</a>.
</p>
|
a1a33fa5334d86f6c651762741e3d1dd9226bd6e
| 193,184 |
ipynb
|
Jupyter Notebook
|
week1/jupyter-tutorial/3--Jupyter like a pro.ipynb
|
leoliu0/FINS5517
|
897fb730c2b7ebac71174cbd08a0bf417a25dbd5
|
[
"MIT"
] | 2 |
2021-10-06T13:23:02.000Z
|
2021-10-13T23:29:45.000Z
|
week1/jupyter-tutorial/3--Jupyter like a pro.ipynb
|
leoliu0/FINS5517
|
897fb730c2b7ebac71174cbd08a0bf417a25dbd5
|
[
"MIT"
] | null | null | null |
week1/jupyter-tutorial/3--Jupyter like a pro.ipynb
|
leoliu0/FINS5517
|
897fb730c2b7ebac71174cbd08a0bf417a25dbd5
|
[
"MIT"
] | null | null | null | 83.847222 | 44,473 | 0.705115 | true | 4,155 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.757794 | 0.91118 | 0.690487 |
__label__eng_Latn
| 0.991383 | 0.442564 |
# M6803 Assignment3
## Wang Longqi
## Ex.1.
(a). Apply LDLT decomposition on the matrix A. Since the matrix is symmetrical, we only need to calculate L.
$$L_{21}=\frac{20}{8}=2.5 \\ L_{31}=\frac{15}{8}=1.875\\U_{22}=30\\U_{13}=15
\\L_{32}=\frac{A_{32}-L_{31}U_{12}}{U_{22}}=0.4167\\U_{23}=12.5\\U_{33}=A_{33}-L_{31}U_{13}-L_{32}U_{23}=26.667$$
Therefore, we have following LDLT decomposition:
\begin{equation}A=\left[
\begin{array}{ccc}
1 & 0 & 0 \\
2.5 & 1 & 0 \\
1.875 & 0.4167 & 1
\end{array} \right]
\left[
\begin{array}{ccc}
8 & 0 & 0 \\
0 & 30 & 0 \\
0 & 0 & 26.667
\end{array} \right]
\left[
\begin{array}{ccc}
1 & 2.5 & 1.875 \\
0 & 1 & 0.4167 \\
0 & 0 & 1
\end{array} \right]
\end{equation}
Then, transform the result to Cholesky decomposition
\begin{equation}A=\left[
\begin{array}{ccc}
2.8284 & 0 & 0 \\
7.0711 & 5.4772 & 0 \\
5.3033 & 2.2822 & 5.1640
\end{array} \right]
\left[
\begin{array}{ccc}
2.8284 & 7.0711 & 5.3033 \\
0 & 5.4772 & 2.2822 \\
0 & 0 & 5.1640
\end{array} \right]
\end{equation}
The Equation can be solved as $Ax=L(L^Tx)=b$. By using substitute procedure, we can get
\begin{equation} L^Tx=
\left[
\begin{array}{c}
17.677\\22.822\\-8.876
\end{array} \right]
\end{equation}
Then, back substitute, we obtain
\begin{equation} x=
\left[
\begin{array}{c}
-2.734 \\ 4.883\\ -1.719
\end{array} \right]
\end{equation}
(b). Because the matrix is not diagonally dominant matrix, Jacobi method cannot converge. What is worse, two largest elements in row one and row two are in the same column. Therefore, we cannot adjust rows or columns to make Jacobi method converge. Following is the Jacobi iteration for original matrix:
\begin{equation} x^{(n+1)}=
\left[
\begin{array}{c}
6.25\\31.25\\12.5
\end{array} \right]
-\left[
\begin{array}{ccc}
0 & 2.5 & 1.875\\
0.25 & 0 & 0.625\\
0.25 & 0.83333333& 0\\
\end{array} \right] x^{(n)}
\end{equation}
Therefore,
\begin{equation} x^{(1)}=
\left[
\begin{array}{c}
6.25\\31.25\\12.5
\end{array} \right]
-\left[
\begin{array}{ccc}
0 & 2.5 & 1.875\\
0.25 & 0 & 0.625\\
0.25 & 0.83333333& 0\\
\end{array} \right]
\left[
\begin{array}{c}
-1\\2\\-1
\end{array} \right]
=\left[
\begin{array}{c}
3.125 \\ 32.125 \\ 11.0833
\end{array} \right]
\end{equation}
\begin{equation} x^{(2)}=
\left[
\begin{array}{c}
6.25\\31.25\\12.5
\end{array} \right]
-\left[
\begin{array}{ccc}
0 & 2.5 & 1.875\\
0.25 & 0 & 0.625\\
0.25 & 0.83333333& 0\\
\end{array} \right]
\left[
\begin{array}{c}
3.125 \\ 32.125 \\ 11.0833
\end{array} \right]
=\left[
\begin{array}{c}
-94.843\\
23.541\\
-15.052
\end{array} \right]
\end{equation}\begin{equation} x^{(3)}=
\left[
\begin{array}{c}
6.25\\31.25\\12.5
\end{array} \right]
-\left[
\begin{array}{ccc}
0 & 2.5 & 1.875\\
0.25 & 0 & 0.625\\
0.25 & 0.83333333& 0\\
\end{array} \right]
\left[
\begin{array}{c}
-94.843\\
23.541\\
-15.052
\end{array} \right]
=\left[
\begin{array}{c}
-24.3815\\
64.3684\\
16.5928
\end{array} \right]
\end{equation}\begin{equation} x^{(4)}=
\left[
\begin{array}{c}
6.25\\31.25\\12.5
\end{array} \right]
-\left[
\begin{array}{ccc}
0 & 2.5 & 1.875\\
0.25 & 0 & 0.625\\
0.25 & 0.83333333& 0\\
\end{array} \right]
\left[
\begin{array}{c}
-24.3815\\
64.3684\\
16.5928
\end{array} \right]
=\left[
\begin{array}{c}
-185.782\\
26.974\\
-35.045
\end{array} \right]
\end{equation}
## Ex.2
We know the Taylor expansion of function $f(x+h)$ is
$$f(x+h)=f{\left (x \right )} + h \frac{d}{d x} f{\left (x \right )} + \frac{h^{2}}{2} \frac{d^{2}}{d x^{2}} f{\left (x \right )} + \frac{h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )} + \frac{h^{4}}{24} \frac{d^{4}}{d x^{4}} f{\left (x \right )} +\mathcal{O}\left(h^{4}\right)$$
Therefore, we write down Taylor expansion for
$$f(x-2h)=f{\left (x \right )} -2 h \frac{d}{d x} f{\left (x \right )} + \frac{4h^{2}}{2} \frac{d^{2}}{d x^{2}} f{\left (x \right )} - \frac{8h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )} + \frac{16 h^{4}}{24} \frac{d^{4}}{d x^{4}} f{\left (x \right )} +\mathcal{O}\left(h^{5}\right)\\
f(x-h)=f{\left (x \right )} - h \frac{d}{d x} f{\left (x \right )} + \frac{h^{2}}{2} \frac{d^{2}}{d x^{2}} f{\left (x \right )} - \frac{h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )} + \frac{h^{4}}{24} \frac{d^{4}}{d x^{4}} f{\left (x \right )} +\mathcal{O}\left(h^{5}\right)\\
f(x+h)=f{\left (x \right )} + h \frac{d}{d x} f{\left (x \right )} + \frac{h^{2}}{2} \frac{d^{2}}{d x^{2}} f{\left (x \right )} + \frac{h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )} + \frac{h^{4}}{24} \frac{d^{4}}{d x^{4}} f{\left (x \right )} +\mathcal{O}\left(h^{5}\right)\\
f(x+2h)=f{\left (x \right )} + 2h \frac{d}{d x} f{\left (x \right )} + \frac{4h^{2}}{2} \frac{d^{2}}{d x^{2}} f{\left (x \right )} + \frac{8h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )} + \frac{16h^{4}}{24} \frac{d^{4}}{d x^{4}} f{\left (x \right )} +\mathcal{O}\left(h^{5}\right)$$
It can be easily observed that
$$\begin{array}{l}
f(x+2h)-f(x-2h)-2\left(f(x+h)+f(x-h)\right)\\
=4 h \frac{d}{d x} f{\left (x \right )} + \frac{16h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )}-2\left(2 h \frac{d}{d x} f{\left (x \right )} + \frac{2h^{3}}{6} \frac{d^{3}}{d x^{3}} f{\left (x \right )}\right) +\mathcal{O}\left(h^{5}\right)\\
= 2h^3 \frac{d^{3}}{d x^{3}} f{\left (x \right )} +\mathcal{O}\left(h^{5}\right)
\end{array}$$
Therefore, we obtain the centered finite-difference approximation to the third derivate that is second-order accurate:
$$ \frac{d^{3}}{d x^{3}} f{\left (x \right )} =\frac{f(x+2h)-f(x-2h)-2\left(f(x+h)+f(x-h)\right)}{2} +\mathcal{O}\left(h^{2}\right)$$
## Ex.3
Firstly, we derive the governing equation of the given system.
$$V=\pi D^4h/4 \\
\frac{dV}{dt}=-Q_{out}=-CA\sqrt{2gh}$$
Substitute the given parameters into the equation, we obtain:
$$\frac{dh}{dt}=-\frac{0.55 \sqrt{2gh}}{10^4}\\
h(0)=2.75$$
The exact time that the tank will be empty is 13620.9 second.
Inserting 4th order RK method in to Excel for this problem, we have following plot for $h(t)$
Meanwhile, iteration of RK methods with step $t=1000$s is attached:
<table>
<tr>
<td>t</td>
<td>h</td>
<td>f</td>
<td>k1</td>
<td>k2</td>
<td>k3</td>
<td>k4</td>
</tr>
<tr>
<td>0</td>
<td>2.75</td>
<td>-0.000403799</td>
<td>-0.403799068</td>
<td>-0.388693465</td>
<td>-0.3892691</td>
<td>-0.374129719</td>
</tr>
<tr>
<td>1000</td>
<td>2.361024347</td>
<td>-0.000374153</td>
<td>-0.374152971</td>
<td>-0.359024038</td>
<td>-0.359648126</td>
<td>-0.344479461</td>
</tr>
<tr>
<td>2000</td>
<td>2.001694887</td>
<td>-0.000344507</td>
<td>-0.344506885</td>
<td>-0.329350421</td>
<td>-0.330031862</td>
<td>-0.314827988</td>
</tr>
<tr>
<td>3000</td>
<td>1.672011647</td>
<td>-0.000314861</td>
<td>-0.314860815</td>
<td>-0.299671369</td>
<td>-0.300421766</td>
<td>-0.285174771</td>
</tr>
<tr>
<td>4000</td>
<td>1.371974671</td>
<td>-0.000285215</td>
<td>-0.28521477</td>
<td>-0.269985097</td>
<td>-0.270819965</td>
<td>-0.255518962</td>
</tr>
<tr>
<td>5000</td>
<td>1.101584029</td>
<td>-0.000255569</td>
<td>-0.255568769</td>
<td>-0.240288934</td>
<td>-0.241229683</td>
<td>-0.225859126</td>
</tr>
<tr>
<td>6000</td>
<td>0.86083984</td>
<td>-0.000225923</td>
<td>-0.225922843</td>
<td>-0.210578713</td>
<td>-0.211656061</td>
<td>-0.196192678</td>
</tr>
<tr>
<td>7000</td>
<td>0.649742329</td>
<td>-0.000196277</td>
<td>-0.196277061</td>
<td>-0.180847533</td>
<td>-0.182107814</td>
<td>-0.166514572</td>
</tr>
<tr>
<td>8000</td>
<td>0.468291941</td>
<td>-0.000166632</td>
<td>-0.166631578</td>
<td>-0.151083097</td>
<td>-0.152600965</td>
<td>-0.136813845</td>
</tr>
<tr>
<td>9000</td>
<td>0.316489683</td>
<td>-0.000136987</td>
<td>-0.136986807</td>
<td>-0.121261112</td>
<td>-0.123168435</td>
<td>-0.10706284</td>
</tr>
<tr>
<td>10000</td>
<td>0.194338226</td>
<td>-0.000107344</td>
<td>-0.107344076</td>
<td>-0.091325871</td>
<td>-0.093889789</td>
<td>-0.077173919</td>
</tr>
<tr>
<td>11000</td>
<td>0.101846673</td>
<td>-7.77092E-05</td>
<td>-0.077709191</td>
<td>-0.061114172</td>
<td>-0.065014768</td>
<td>-0.046731644</td>
</tr>
<tr>
<td>12000</td>
<td>0.039063554</td>
<td>-4.81266E-05</td>
<td>-0.048126563</td>
<td>-0.029822809</td>
<td>-0.037842242</td>
<td>-0.008509661</td>
</tr>
<tr>
<td>13000</td>
<td>0.007069167</td>
<td>-2.04731E-05</td>
<td>-0.020473075</td>
<td>#NUM!</td>
<td>#NUM!</td>
<td>#NUM!</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</table>
Therefore, approximately after 13500 seconds (with step of 100), the tank will be empty. Comparing to the exact
solution, this result is acceptable.
The online version of this homework (including the [Excel worksheet](https://wanglongqi.github.io/public/res/RK4.xlsx)) is upload to [http://goo.gl/ALDR1E](http://goo.gl/ALDR1E).
|
476dd85e72a66526ae559030d102ae279d2125ed
| 14,906 |
ipynb
|
Jupyter Notebook
|
public/res/M6803.ipynb
|
wanglongqi/wanglongqi.github.io
|
359f16930d4b0af8b377657d9bade7fd64120027
|
[
"MIT"
] | 7 |
2015-02-11T02:09:18.000Z
|
2021-03-30T03:07:53.000Z
|
public/res/M6803.ipynb
|
wanglongqi/wanglongqi.github.io
|
359f16930d4b0af8b377657d9bade7fd64120027
|
[
"MIT"
] | null | null | null |
public/res/M6803.ipynb
|
wanglongqi/wanglongqi.github.io
|
359f16930d4b0af8b377657d9bade7fd64120027
|
[
"MIT"
] | 17 |
2015-01-24T14:17:48.000Z
|
2022-02-11T16:54:13.000Z
| 40.069892 | 333 | 0.381256 | true | 4,290 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.857768 | 0.740174 | 0.634898 |
__label__yue_Hant
| 0.174284 | 0.313411 |
# Decision Tree & Ensemble Learning
Classification And Regression Trees (CART for short) is a term introduced by [Leo Breiman](https://en.wikipedia.org/wiki/Leo_Breiman) to refer to Decision Tree algorithms that can be used for classification or regression predictive modeling problems.
In this lab assignment, you will implement various ways to calculate impurity which is used to split data in constructing the decision trees and apply the Decision Tree and ensemble learning algorithms to solve two real-world problems: a classification one and a regression one.
```python
# import packages
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from matplotlib.legend_handler import HandlerLine2D
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
import math as math
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
# make this notebook's output stable across runs
np.random.seed(0)
```
## Gini impurity and Entropy
#### Gini impurity
The CART algorithm recursively splits the training set into two subsets using a single feature k and a threshold $t_k$. The best feature and threshold are chosen to produce the purest subsets weighted by their size. **Gini impurity** measures the impurity of the data points in a set and is used to evaluate how good a split is when the CART algorithm searches for the best pair of feature and the threshold.
To compute Gini impurity for a set of items with J classes, suppose $i \in \{1, 2, \dots, J\}$ and let $p_i$ be the fraction of items labeled with class i in the set.
\begin{align}
I(p) = 1 - \sum_{i=1}^J p_i^2
\end{align}
The following function calculates the gini impurity for a given set of data points.
```python
def gini_impurity(x):
"""
This function calculate the Gini impurity for a given set of data points.
Args:
x: a numpy ndarray
"""
unique, counts = np.unique(x, return_counts=True)
probabilities = counts / sum(counts)
gini = 1 - sum([p*p for p in probabilities])
return gini
```
```python
np.testing.assert_equal(0, gini_impurity(np.array([1, 1, 1])))
np.testing.assert_equal(0.5, gini_impurity(np.array([1, 0, 1, 0])))
np.testing.assert_equal(3/4, gini_impurity(np.array(['a', 'b', 'c', 'd'])))
np.testing.assert_almost_equal(2.0/3, gini_impurity(np.array([1, 2, 3, 1, 2, 3])))
```
#### Entropy
Another popular measure of impurity is called **entropy**, which measures the average information content of a message. Entropy is zero when all messages are identical. When it applied to CART, a set's entropy is zero when it contains instances of only one class. Entropy is calculated as follows:
\begin{align}
I(p) = - \sum_{i=1}^J p_i log_2{p_i}
\end{align}
<span style="color:orange">**Question 1: In this exercise, you will implement the entropy function.**
```python
def entropy(x):
"""
TODO: This function calculate the entropy of an array.
Args:
x: a numpy ndarray
"""
unique, counts = np.unique(x, return_counts=True)
probabilities = counts / sum(counts)
e = abs(-(sum(p* math.log(p,2) for p in probabilities)))
return e
```
```python
np.testing.assert_equal(0, entropy(np.array([1, 1, 1])))
np.testing.assert_equal(1.0, entropy(np.array([1, 0, 1, 0])))
np.testing.assert_equal(2.0, entropy(np.array(['a', 'b', 'c', 'd'])))
np.testing.assert_almost_equal(1.58496, entropy(np.array([1, 2, 3, 1, 2, 3])), 4)
```
---
## Iris dataset
The Iris data set contains the morphologic variation of Iris flowers of three related species (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each observation (see image below):
- Sepal.Length: sepal length in centimeters.
- Sepal.Width: sepal width in centimeters.
- Petal.Length: petal length in centimeters.
- Petal.Width: petal width in centimeters.
<table>
<tr>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Iris setosa</td>
<td>Iris versicolor</td>
<td>Iris virginica</td>
</tr>
</table>
```python
# load the iris train and test data from CSV files
train = pd.read_csv('https://raw.githubusercontent.com/zariable/data/master/iris_train.csv')
test = pd.read_csv('https://raw.githubusercontent.com/zariable/data/master/iris_test.csv')
train_x = train.iloc[:,0:4]
train_y = train.iloc[:,4]
test_x = test.iloc[:,0:4]
test_y = test.iloc[:,4]
# print the number of instances in each class
print(train_y.value_counts().sort_index())
print(test_y.value_counts().sort_index())
```
Iris-setosa 34
Iris-versicolor 32
Iris-virginica 39
Name: species, dtype: int64
Iris-setosa 16
Iris-versicolor 18
Iris-virginica 11
Name: species, dtype: int64
### Decision Tree Classifier
<span style="color:orange">**In this exercise, we will apply the Decision Tree classifier to classify the Iris flower data.**
#### Train and visualize a simple Decision Tree
<span style="color:orange">**Question 2: create a decision tree with max_depth of 2.**
```python
# TODO: read the scikit-learn doc on DecisionTreeClassifier and train a Decision Tree with max depth of 2
dtc = DecisionTreeClassifier(max_depth=2)
dtc.fit(train_x,train_y)
```
DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='gini',
max_depth=2, max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort='deprecated',
random_state=None, splitter='best')
Now let's visualize the decision tree we just trained on the iris dataset and see how it makes predictions. Note that if the following code does not work for you because the graphviz is missing, do not worry about it and you should still be able to move on.
```python
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
feature_names = train_x.columns
class_names = train_y.unique()
class_names.sort()
export_graphviz(dtc, out_file=dot_data, feature_names=feature_names, class_names=class_names, filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
```
Decision trees are easy to inteprete and is often referred to as *whitebox* machine learning algorithm. Let's see how this decision tree represented above makes predictions. Suppose you find an iris flower and want to classify it into setosa, versicolor or virginica. You start at the root node (the very top node in the tree). In this node, we check if the flower's patel length is smaller than or equal to 2.35 cm. If it is, we move to the left child and predict setosa to be its class. Otherwise, we move to the right child node. Then similarly we check if the petal length is smaller than or equal to 4.95 cm. If it is, we move to its left child node and predict versicolor to be its class. Otherwise, we move to its right child and predict virginica to be its class.
#### Prediction with Decision tree
With this simple decision tree above, we can apply it to make predictions on the test dataset and evaluate its performance.
<span style="color:orange">**Question 3: make prediction using the trained decision tree model on the test data.**
```python
# TODO: use the trained decision tree model to make predictions on the test data and evaluate the model performance.
test_z = dtc.predict(test_x)
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
```
model accuracy: 0.9111111111111111
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 3 8]]
***Ans 3: model accuracy: 0.9111111111111111
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 3 8]] ***
#### Hyper-parameters
Hyper-parameter controls the complexity of the decision tree model. For example, the deeper the tree is, the more complex patterns the model will be able to capture. In this exercise, we train the decision trees with increasing number of maximum depth and plot its performance. We should see the accuracy of the training data increase as the tree grows deeper, but the accuracy on the test data might not as the model will eventually start to overfit and does not generalize well on the unseen test data.
<span style="color:orange">**Question 4: for each value of max_depth, we train a decision tree model and evaluate its accuracy on both train and test data, and plot both accuracies in the figure.**
```python
```
```python
dtc = DecisionTreeClassifier(max_depth=2)
dtc.fit(train_x,train_y)
test_z = dtc.predict(test_x)
print("model on train data: {}".format(accuracy_score(train_y, dtc.predict(train_x))))
print("model on test data: {}".format(accuracy_score(test_y, test_z)))
#print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
dep2_train= round((accuracy_score(train_y, dtc.predict(train_x))*100),2)
dep2_test=round((accuracy_score(test_y, test_z)*100),2)
```
model on train data: 0.9619047619047619
model on test data: 0.9111111111111111
```python
# TODO: train the decision tree model with various max_depth, make predictions and evaluate on both train and test data.
dtc = DecisionTreeClassifier(max_depth=3)
dtc.fit(train_x,train_y)
test_z = dtc.predict(test_x)
print("model on train data: {}".format(accuracy_score(train_y, dtc.predict(train_x))))
print("model on test data: {}".format(accuracy_score(test_y, test_z)))
#print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
dep3_train= round((accuracy_score(train_y, dtc.predict(train_x))*100),2)
dep3_test=round((accuracy_score(test_y, test_z)*100),2)
```
model on train data: 0.9809523809523809
model on test data: 0.9777777777777777
```python
dtc = DecisionTreeClassifier(max_depth=4)
dtc.fit(train_x,train_y)
test_z = dtc.predict(test_x)
print("model on train data: {}".format(accuracy_score(train_y, dtc.predict(train_x))))
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
dep4_train= round((accuracy_score(train_y, dtc.predict(train_x))*100),2)
dep4_test=round((accuracy_score(test_y, test_z)*100),2)
```
model on train data: 1.0
model accuracy: 0.9777777777777777
```python
dtc = DecisionTreeClassifier(max_depth=5)
dtc.fit(train_x,train_y)
test_z = dtc.predict(test_x)
print("model on train data: {}".format(accuracy_score(train_y, dtc.predict(train_x))))
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
dep5_train= round((accuracy_score(train_y, dtc.predict(train_x))*100),2)
dep5_test=round((accuracy_score(test_y, test_z)*100),2)
```
model on train data: 1.0
model accuracy: 0.9777777777777777
```python
accuracy_train= {"labels":[2,3,4,5], "accuracy": [dep2_train,dep3_train,dep4_train,dep5_train]}
ax=sns.barplot(x="labels",y="accuracy",data=accuracy_train)
ax.set(title="Train data accuracy",xlabel="Max_depth", ylabel="Accuracy",ylim=(96,100))
```
```python
accuracy_test= {"labels":[2,3,4,5], "accuracy": [dep2_test,dep3_test,dep4_test,dep5_test]}
ax=sns.barplot(x="labels",y="accuracy",data=accuracy_test)
ax.set(title="Test_data accuracy",xlabel="Max_depth", ylabel="Accuracy",ylim=(90,100))
```
*** And 4: With train data, accuracy is at its best 98.09% when max depth is 3 and starts over fitting as max depth goes beyond 3. With test data, accuracy reaches maximum of 97.78% at max depth of 3 and remains constant beyond that ***
#### Fine-tune the decision tree classifier
Decision tree is a very powerful model with very few assumptions about the incoming training data (unlike linear models, which assume the data linear), however, it is more likely to overfit the data and won't generalize well to unseen data. To void overfitting, we need to restrict the decision tree's freedom during training via regularization (e.g. max_depth, min_sample_split, max_leaf_nodes and etc.).
To fine-tune the model and combat overfitting, use grid search with cross-validation (with the help of the GridSearchCV class) to find the best hyper-parameter settings for the DecisionTreeClassifier. In particular, we would like to fine-tune the following hyper-parameters:
- **criteria**: this defines how we measure the quality of a split. we can choose either "gini" for the Gini impurity or "entropy" for the information gain.
- **max_depth**: the maximum depth of the tree. This indicates how deep the tree can be. The deeper the tree, the more splits it has and it captures more information about the data. But meanwhile, deeper trees are more likely to overfit the data. For this practice, we will choose from {1, 2, 3} given there are only 4 features in the iris dataset.
- **min_samples_split**: This value represents the minimum number of samples required to split an internal node. The smaller this value is, the deeper the tree will grow, thus more likely to overfit. On the other hand, if the value is really large (the size of the training data in the extreme case), the tree will be very shallow and could suffer from underfit. In this practice, we choose from {0.01, 0.05, 0.1, 0.2}.
<span style="color:orange">**Question 5: Use grid search with 3-fold cross-validation to fine-tune the decision tree model and output the best hyper-parameters.**
```python
# TODO: fine-tune the model, use grid search with 3-fold cross-validation.
parameters = {"criterion" : ["gini","entropy"],
"max_depth" : [1,2,3],
"min_samples_split" : [0.01,0.05,0.1,0.2]
}
dt = DecisionTreeClassifier()
grid = GridSearchCV(dt, parameters, cv=3)
grid.fit(train_x, train_y)
# summarize the results of the grid search
print("The best score is {}".format(grid.best_score_))
print("The best hyper parameter setting is {}".format(grid.best_params_))
```
The best score is 0.9619047619047619
The best hyper parameter setting is {'criterion': 'gini', 'max_depth': 3, 'min_samples_split': 0.01}
*** Ans 5: The best score is 0.9619047619047619
The best hyper parameter setting is {'criterion': 'gini', 'max_depth': 3, 'min_samples_split': 0.01} ***
#### Prediction and Evaluation
Now we have a fine-tuned decision tree classifier based on the training data, let's apply this model to make predictions on the test data and evaluate its performance.
```python
test_z= grid.predict(test_x)
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
```
model accuracy: 0.9777777777777777
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]]
### Random Forest
**Question 6: Apply Random Forest together with Gridsearch to the Iris dataset and evaluate its accuracy.**
```python
### TODO
params = {
"criterion" :['gini', 'entropy'],
"n_estimators" : [100, 200],
"max_depth" : [2,3,4],
"min_samples_split" :[0.01,0.05,0.1,0.2]
}
rfc_grid= GridSearchCV(RandomForestClassifier(), params, cv=3)
rfc_grid.fit(train_x,train_y)
print("The best score is {}".format(rfc_grid.best_score_))
print("The best hyper parameter setting is {}".format(rfc_grid.best_params_))
```
The best score is 0.9619047619047619
The best hyper parameter setting is {'criterion': 'gini', 'max_depth': 2, 'min_samples_split': 0.05, 'n_estimators': 100}
```python
test_z=rfc_grid.predict(test_x)
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
```
model accuracy: 0.9777777777777777
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]]
*** The best score is 0.9619047619047619
The best hyper parameter setting is {'criterion': 'gini', 'max_depth': 2, 'min_samples_split': 0.05, 'n_estimators': 100}
model accuracy: 0.9777777777777777
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]]***
### Adaboost
**Question 7: Apply Adaboost together with Gridsearch to the Iris dataset and evaluate its accuracy.**
```python
### TODO
paramsada= {"n_estimators" : [100,200],
"learning_rate" :[ 0.05, 0.1, 0,2]
}
ada = AdaBoostClassifier(DecisionTreeClassifier(max_depth=4))
adaboost_grid= GridSearchCV(ada, paramsada, cv=3)
adaboost_grid.fit(train_x,train_y)
# summarize the results of the grid search
print("The best score is {}".format(adaboost_grid.best_score_))
print("The best hyper parameter setting is {}".format(adaboost_grid.best_params_))
```
The best score is 0.9523809523809522
The best hyper parameter setting is {'learning_rate': 0.05, 'n_estimators': 100}
/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py:536: FitFailedWarning: Estimator fit failed. The score on this train-test partition for these parameters will be set to nan. Details:
ValueError: learning_rate must be greater than zero
FitFailedWarning)
```python
test_z=adaboost_grid.predict(test_x)
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
```
model accuracy: 0.9777777777777777
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]]
*** The best score is 0.9619047619047619
The best hyper parameter setting is {'learning_rate': 2, 'n_estimators': 200}
Model accuracy: 0.9777777777777777
Model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]]***
### Gradient Boosting
**Question 8: Apply Boosting together with Gridsearch to the Iris dataset and evaluate its accuracy.**
```python
### TODO
grd= GradientBoostingClassifier()
parameters1 = {
"loss":["deviance"],
"learning_rate": [0.01, 0.1],
"min_samples_split": [0.01,0.05,0.1,0.2],
"max_depth":[2, 3, 4],
"n_estimators":[100]
}
gbc_grid = GridSearchCV(grd, parameters1, cv=3)
gbc_grid.fit(train_x, train_y)
# summarize the results of the grid search
print("The best score is {}".format(gbc_grid.best_score_))
print("The best hyper parameter setting is {}".format(gbc_grid.best_params_))
```
The best score is 0.9619047619047619
The best hyper parameter setting is {'learning_rate': 0.01, 'loss': 'deviance', 'max_depth': 2, 'min_samples_split': 0.1, 'n_estimators': 100}
```python
test_z=gbc_grid.predict(test_x)
print("model accuracy: {}".format(accuracy_score(test_y, test_z)))
print("model confusion matrix:\n {}".format(confusion_matrix(test_y, test_z, labels=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])))
```
model accuracy: 0.9777777777777777
model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]]
*** The best score is 0.9619047619047619
The best hyper parameter setting is {'learning_rate': 0.01, 'loss': 'deviance', 'max_depth': 2, 'min_samples_split': 0.1, 'n_estimators': 100}
Model accuracy: 0.9777777777777777
Model confusion matrix:
[[16 0 0]
[ 0 17 1]
[ 0 0 11]] ***
---
**BONUS POINT: we will apply the supervised learning models we learnt so far to predict the California housing prices.**
## California Housing Dataset
The California Housing dataset appeared in a 1997 paper titled Sparse Spatial Autoregressions by Pace, R. Kelley and Ronald Barry, published in the Statistics and Probability Letters journal. They built it using the 1990 California census data. It contains one row per census block group. A block group is the smallest geographical unit for which the U.S. Census Bureau publishes sample data (a block group typically has a population of 600 to 3,000 people).
```python
# Load train and test data from CSV files.
train = pd.read_csv('https://raw.githubusercontent.com/zariable/data/master/housing_train.csv')
test = pd.read_csv('https://raw.githubusercontent.com/zariable/data/master/housing_test.csv')
train_x = train.iloc[:,0:8]
train_y = train.iloc[:,8]
test_x = test.iloc[:,0:8]
test_y = test.iloc[:,8]
```
```python
grd= GradientBoostingRegressor()
parameters1 = {
"learning_rate": [0.1, 0.2],
"min_samples_split": [0.01],
"max_depth":[4,7],
"n_estimators":[100]
}
gbc_grid = GridSearchCV(grd, parameters1, cv=3)
gbc_grid.fit(train_x, train_y)
# summarize the results of the grid search
print("The best score is {}".format(gbc_grid.best_score_))
print("The best hyper parameter setting is {}".format(gbc_grid.best_params_))
test_z= gbc_grid.predict(test_x)
rsq= sm.r2_score(test_y,test_z)
print(rsq)
```
The best score is 0.8167181654156304
The best hyper parameter setting is {'learning_rate': 0.2, 'max_depth': 7, 'min_samples_split': 0.01, 'n_estimators': 100}
0.8236932422217006
```python
from xgboost import XGBRegressor
xgb= XGBRegressor(objective='reg:squarederror')
params = {"learning_rate" :[0.1,0.2],
"max_depth" : [5],
"n_estimators" : [500]
}
xgb_grid = GridSearchCV(xgb,params, cv=2, n_jobs= -1)
xgb_grid.fit(train_x,train_y)
test_z=xgb_grid.predict(test_x)
```
```python
print("The best score is {}".format(xgb_grid.best_score_))
print("The best hyper parameter setting is {}".format(xgb_grid.best_params_))
```
The best score is 0.8182037370882838
The best hyper parameter setting is {'learning_rate': 0.1, 'max_depth': 5, 'n_estimators': 500}
```python
rsq= sm.r2_score(test_y,test_z)
print(rsq)
```
0.8334510273653781
*** XGBRegressor model gives better R squared value compared to other models ***
### End of Assignment 2
---
|
09fb6b1d3215dfd91a65ef90b91e92a7b043cc43
| 141,184 |
ipynb
|
Jupyter Notebook
|
Iris Dataset Ensemble learning.ipynb
|
jessiececilya/ensemble-irisdataset
|
a66de222086b14fd0730d6e57152de5ca48a3d8d
|
[
"MIT"
] | null | null | null |
Iris Dataset Ensemble learning.ipynb
|
jessiececilya/ensemble-irisdataset
|
a66de222086b14fd0730d6e57152de5ca48a3d8d
|
[
"MIT"
] | null | null | null |
Iris Dataset Ensemble learning.ipynb
|
jessiececilya/ensemble-irisdataset
|
a66de222086b14fd0730d6e57152de5ca48a3d8d
|
[
"MIT"
] | null | null | null | 109.870817 | 77,882 | 0.823634 | true | 6,068 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.879147 | 0.803174 | 0.706108 |
__label__eng_Latn
| 0.900503 | 0.478856 |
###### Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license © 2014 L.A. Barba, C.D. Cooper, G.F. Forsyth. Based on [CFD Python](https://github.com/barbagroup/CFDPython), © 2013 L.A. Barba, also under CC-BY license.
# Relax and hold steady
Welcome to the second notebook of *"Relax and hold steady: elliptic problems"*, **Module 5** of the course [**"Practical Numerical Methods with Python"**](https://openedx.seas.gwu.edu/courses/course-v1:MAE+MAE6286+2017/about). Are you relaxed yet?
In the [previous notebook](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/05_relax/05_01_2D.Laplace.Equation.ipynb), you learned to use Jacobi iterations to solve Laplace's equation. The iterations *relax* the solution from an initial guess to the final, steady-state solution. You also saw again that the way we treat boundary conditions can influence our solution. Using a first-order approximation of the Neumann boundary messed up our spatial convergence in the whole domain! (We expected second-order spatial convergence from the central difference scheme, but we got closer to first order.) This was easily fixed by using a second-order scheme for the Neumann boundary. *It's always good to check that you get the expected order of convergence.*
A word of warning: in this course module, we will introduce a different use of the word *"convergence"*. Before, we used it to refer to the decay of the truncation errors (in space and time) with a decrease in the grid spacing ($\Delta x$ and $\Delta t$). Now, we also have a relaxation scheme, and we use the word convergence to mean that the iterative solution approaches the exact solution of the linear system. Sometimes, this is called *algebraic convergence*. We'll concern ourselves with this in the next lesson. But first, let's play with Poisson.
## Poisson equation
The **Poisson equation** has a forcing function that drives the solution to its steady state. Unlike the Laplace equation, Poisson's equation involves imposed values inside the field (a.k.a., sources):
$$
\frac{\partial ^2 p}{\partial x^2} + \frac{\partial ^2 p}{\partial y^2} = b
$$
In discretized form, this looks almost the same as [the Laplace Equation](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/05_relax/05_01_2D.Laplace.Equation.ipynb), except for the source term on the right-hand side:
$$
\frac{p_{i+1,j}^{k}-2p_{i,j}^{k}+p_{i-1,j}^{k}}{\Delta x^2}+\frac{p_{i,j+1}^{k}-2 p_{i,j}^{k}+p_{i,j-1}^{k}}{\Delta y^2}=b_{i,j}^{k}
$$
As before, we rearrange this to obtain an equation for $p$ at point $i,j$, based on its neighbors:
$$
p_{i,j}^{k+1}=\frac{(p_{i+1,j}^{k}+p_{i-1,j}^{k})\Delta y^2+(p_{i,j+1}^{k}+p_{i,j-1}^{k})\Delta x^2-b_{i,j}^{k}\Delta x^2\Delta y^2}{2(\Delta x^2+\Delta y^2)}
$$
It's slightly more complicated than the Laplace equation, but nothing we can't handle.
### An example problem
Let's consider the following Poisson equation:
$$
\begin{equation}
\nabla^2 p = -2\left(\frac{\pi}{2}\right)^2\sin\left( \frac{\pi x}{L_x} \right) \cos\left(\frac{\pi y}{L_y}\right)
\end{equation}
$$
in the domain
$$
\left\lbrace \begin{align*}
0 &\leq x\leq 1 \\
-0.5 &\leq y \leq 0.5
\end{align*} \right.
$$
where $L_x = L_y = 1$ and with Dirichlet boundary conditions
$$p=0 \text{ at } \left\lbrace
\begin{align*}
x&=0\\
y&=0\\
y&=-0.5\\
y&=0.5
\end{align*} \right.$$
To solve this equation, we assume an initial state of $p=0$ everywhere, apply the boundary conditions and then iteratively relax the system until we converge on a solution.
To start, let's import the libraries and set up our spatial mesh.
```python
import numpy
from matplotlib import pyplot
%matplotlib inline
```
```python
# Set the font family and size to use for Matplotlib figures.
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 16
```
```python
# Set parameters.
nx = 41 # number of points in the x direction
ny = 41 # number of points in the y direction
xmin, xmax = 0.0, 1.0 # domain limits in the x direction
ymin, ymax = -0.5, 0.5 # domain limits in the y direction
Lx = (xmax - xmin) # domain length in the x direction
Ly = (ymax - ymin) # domain length in the y direction
dx = Lx / (nx - 1) # grid spacing in the x direction
dy = Ly / (ny - 1) # grid spacing in the y direction
# Create the gridline locations.
x = numpy.linspace(xmin, xmax, num=nx)
y = numpy.linspace(ymin, ymax, num=ny)
```
```python
def poisson_source(x, y, Lx, Ly):
"""
Computes and returns the source term (right-hand side)
of the Poisson equation.
Parameters
----------
x : numpy.ndarray
The gridline locations in the x direction
as a 1D array of floats.
y : numpy.ndarray
The gridline locations in the y direction
as a 1D array of floats.
Lx : float
Domain length in the x direction.
Ly : float
Domain length in the y direction.
Returns
-------
b : numpy.ndarray of floats
The forcing function as a 2D array.
"""
X, Y = numpy.meshgrid(x, y)
b = (-2.0 * numpy.pi / Lx * numpy.pi / Ly *
numpy.sin(numpy.pi * X / Lx) *
numpy.cos(numpy.pi * Y / Ly))
return b
```
The Jacobi iterations need an exit condition, based on some norm of the difference between two consecutive iterations. We can use the same relative L2-norm that we wrote for the Laplace exit condition, so we saved the function into a helper Python file (`helper.py`) for easy importing.
```python
from helper import l2_norm
```
Now, what value to choose for the exit condition? We saw in the previous notebook that with an exit tolerance of $10^{-8}$, we could converge well for the different grids we tried, and observe second-order spatial convergence (with the second-order Neumann BC). We speculated in the end that we might be able to use a less stringent exit tolerance, since the spatial error was a lot larger (around $0.0002$ for the finer grid). Here, we'll try with $2\times 10^{-7}$. Go ahead and try with different values and see what you get!
It's time to write the function to solve the Poisson equation. Notice that all of the boundaries in this problem are Dirichlet boundaries, so no BC updates required!
There's also one extra piece we're adding in here. To later examine the convergence of the iterative process, we will save the L2-norm of the difference between successive solutions. A plot of this quantity with respect to the iteration number will be an indication of how fast the relaxation scheme is converging.
```python
def poisson_2d_jacobi(p0, b, dx, dy, maxiter=20000, rtol=1e-6):
"""
Solves the 2D Poisson equation for a given forcing term
using Jacobi relaxation method.
The function assumes Dirichlet boundary conditions with value zero.
The exit criterion of the solver is based on the relative L2-norm
of the solution difference between two consecutive iterations.
Parameters
----------
p0 : numpy.ndarray
The initial solution as a 2D array of floats.
b : numpy.ndarray
The forcing term as a 2D array of floats.
dx : float
Grid spacing in the x direction.
dy : float
Grid spacing in the y direction.
maxiter : integer, optional
Maximum number of iterations to perform;
default: 20000.
rtol : float, optional
Relative tolerance for convergence;
default: 1e-6.
Returns
-------
p : numpy.ndarray
The solution after relaxation as a 2D array of floats.
ite : integer
The number of iterations performed.
conv : list
The convergence history as a list of floats.
"""
p = p0.copy()
conv = [] # convergence history
diff = rtol + 1.0 # initial difference
ite = 0 # iteration index
while diff > rtol and ite < maxiter:
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, :-2] + pn[1:-1, 2:]) * dy**2 +
(pn[:-2, 1:-1] + pn[2:, 1:-1]) * dx**2 -
b[1:-1, 1:-1] * dx**2 * dy**2) /
(2.0 * (dx**2 + dy**2)))
# Dirichlet boundary conditions at automatically enforced.
# Compute and record the relative L2-norm of the difference.
diff = l2_norm(p, pn)
conv.append(diff)
ite += 1
return p, ite, conv
```
We can use the `plot_3d` function we wrote in the previous notebook to explore the field $p$, before and after the relaxation. We saved this plotting function into the helper Python file, so we can re-use it here.
```python
from helper import plot_3d
```
Now we initialize all of the problem variables and plot!
```python
# Set the initial conditions.
p0 = numpy.zeros((ny, nx))
# Compute the source term.
b = poisson_source(x, y, Lx, Ly)
# Plot the initial scalar field.
plot_3d(x, y, p0)
```
That looks suitably boring. Zeros everywhere and boundaries held at zero. If this were a Laplace problem we would already be done!
But the Poisson problem has a source term that will evolve this zero initial guess to something different. Let's run our relaxation scheme and see what effect the forcing function has on `p`.
```python
# Compute the solution using Jacobi relaxation method.
p, ites, conv = poisson_2d_jacobi(p0, b, dx, dy, rtol=2e-7)
print('Jacobi relaxation: {} iterations '.format(ites) +
'to reach a relative difference of {}'.format(conv[-1]))
```
Jacobi relaxation: 3125 iterations to reach a relative difference of 1.9958631078740742e-07
It took 3,125 iterations to converge to the exit criterion (that's quite a lot, don't you think? Let's now take a look at a plot of the final field:
```python
# Plot the solution.
plot_3d(x, y, p)
```
Something has definitely happened. That looks good, but what about the error? This problem has the following analytical solution:
$$
\begin{equation}
p(x,y) = \sin{\left(\frac{x\pi}{L_x} \right)}\cos{\left(\frac{y\pi}{L_y} \right)}
\end{equation}
$$
Time to compare the calculated solution to the analytical one. Let's do that.
```python
def poisson_solution(x, y, Lx, Ly):
"""
Computes and returns the analytical solution of the Poisson equation
on a given two-dimensional Cartesian grid.
Parameters
----------
x : numpy.ndarray
The gridline locations in the x direction
as a 1D array of floats.
y : numpy.ndarray
The gridline locations in the y direction
as a 1D array of floats.
Lx : float
Length of the domain in the x direction.
Ly : float
Length of the domain in the y direction.
Returns
-------
p : numpy.ndarray
The analytical solution as a 2D array of floats.
"""
X, Y = numpy.meshgrid(x, y)
p = numpy.sin(numpy.pi * X / Lx) * numpy.cos(numpy.pi * Y / Ly)
return p
```
```python
# Compute the analytical solution.
p_exact = poisson_solution(x, y, Lx, Ly)
```
```python
# Compute the relative L2-norm of the error.
l2_norm(p, p_exact)
```
0.00044962635351970283
That seems small enough. Of course, each application problem can have different accuracy requirements.
### Algebraic convergence
Remember that we saved the L2-norm of the difference between two consecutive iterations. The purpose of that was to look at how the relaxation scheme *converges*, in algebraic sense: with consecutive solutions getting closer and closer to each other. Let's use a line plot for this.
```python
# Plot the convergence history.
pyplot.figure(figsize=(9.0, 4.0))
pyplot.xlabel('Iterations')
pyplot.ylabel('Relative $L_2$-norm\nof the difference')
pyplot.grid()
pyplot.semilogy(conv, color='C0', linestyle='-', linewidth=2)
pyplot.xlim(0, len(conv));
```
It looks like in the beginning, iterations started converging pretty fast, but they quickly adopted a slower rate. As we saw before, it took more than 3,000 iterations to get to our target difference between two consecutive solutions (in L2-norm). That is a *lot* of iterations, and we would really like to relax faster! No worries, we'll learn to do that in the next notebook.
### Spatial convergence
For a sanity check, let's make sure the solution is achieving the expected second-order convergence in space.
```python
# List of the grid sizes to investigate.
nx_values = [11, 21, 41, 81]
# Create an empty list to record the error on each grid.
errors = []
# Compute the solution and error for each grid size.
for nx in nx_values:
ny = nx # same number of points in all directions
dx = Lx / (nx - 1) # grid spacing in the x direction
dy = Ly / (ny - 1) # grid spacing in the y direction
# Create the gridline locations.
x = numpy.linspace(xmin, xmax, num=nx)
y = numpy.linspace(ymin, ymax, num=ny)
# Set the initial conditions.
p0 = numpy.zeros((ny, nx))
# Compute the source term.
b = poisson_source(x, y, Lx, Ly)
# Relax the solution.
# We do not return number of iterations
# or the convergence history.
p, ites, _ = poisson_2d_jacobi(p0, b, dx, dy, rtol=2e-7)
print('[nx = {}] Number of Jacobi iterations: {}'.format(nx, ites))
# Compute the analytical solution.
p_exact = poisson_solution(x, y, Lx, Ly)
# Compute and record the relative L2-norm of the error.
errors.append(l2_norm(p, p_exact))
```
[nx = 11] Number of Jacobi iterations: 249
[nx = 21] Number of Jacobi iterations: 892
[nx = 41] Number of Jacobi iterations: 3125
[nx = 81] Number of Jacobi iterations: 10708
```python
# Plot the error versus the grid-spacing size.
pyplot.figure(figsize=(6.0, 6.0))
pyplot.xlabel(r'$\Delta x$')
pyplot.ylabel('Relative $L_2$-norm\nof the error')
pyplot.grid()
dx_values = Lx / (numpy.array(nx_values) - 1)
pyplot.loglog(dx_values, errors,
color='black', linestyle='--', linewidth=2, marker='o')
pyplot.axis('equal');
```
That looks pretty much second order! Remember that the boundary conditions can adversely affect convergence, but Dirichlet boundaries are "exact" and will never impact your convergence.
## Final word
We have used the difference between two consecutive solutions in the iterative process as a way to indicate convergence. However, this is *not* in general the best idea. For some problems and some iterative methods, you could experience iterates *stagnating* but the solution *not converging*.
Convergence of an iterative solution of a system $A \mathbf{x} = \mathbf{b}$ means that:
$$
\begin{equation}
\lim_{k \rightarrow \infty} \mathbf{x}^k = \mathbf{x}
\end{equation}
$$
The error in the solution is actually $\mathbf{x}-\mathbf{x}^k$, but we're looking at $\mathbf{x}^{k+1}-\mathbf{x}^k$ for our exit criterion. They are not the same thing and the second could tend to zero (or machine precision) without the first being comparably small.
A discussion of better ways to apply stopping criteria for iterative methods is a more advanced topic than we want cover in this course module. Just keep this in mind as you continue your exploration of numerical methods in the future!
---
###### The cell below loads the style of the notebook
```python
from IPython.core.display import HTML
css_file = '../../styles/numericalmoocstyle.css'
HTML(open(css_file, 'r').read())
```
<link href='http://fonts.googleapis.com/css?family=Alegreya+Sans:100,300,400,500,700,800,900,100italic,300italic,400italic,500italic,700italic,800italic,900italic' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Arvo:400,700,400italic' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=PT+Mono' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Shadows+Into+Light' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Nixie+One' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Source+Code+Pro' rel='stylesheet' type='text/css'>
<style>
@font-face {
font-family: "Computer Modern";
src: url('http://mirrors.ctan.org/fonts/cm-unicode/fonts/otf/cmunss.otf');
}
#notebook_panel { /* main background */
background: rgb(245,245,245);
}
div.cell { /* set cell width */
width: 750px;
}
div #notebook { /* centre the content */
background: #fff; /* white background for content */
width: 1000px;
margin: auto;
padding-left: 0em;
}
#notebook li { /* More space between bullet points */
margin-top:0.8em;
}
/* draw border around running cells */
div.cell.border-box-sizing.code_cell.running {
border: 1px solid #111;
}
/* Put a solid color box around each cell and its output, visually linking them*/
div.cell.code_cell {
background-color: rgb(256,256,256);
border-radius: 0px;
padding: 0.5em;
margin-left:1em;
margin-top: 1em;
}
div.text_cell_render{
font-family: 'Alegreya Sans' sans-serif;
line-height: 140%;
font-size: 125%;
font-weight: 400;
width:600px;
margin-left:auto;
margin-right:auto;
}
/* Formatting for header cells */
.text_cell_render h1 {
font-family: 'Nixie One', serif;
font-style:regular;
font-weight: 400;
font-size: 45pt;
line-height: 100%;
color: rgb(0,51,102);
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
}
.text_cell_render h2 {
font-family: 'Nixie One', serif;
font-weight: 400;
font-size: 30pt;
line-height: 100%;
color: rgb(0,51,102);
margin-bottom: 0.1em;
margin-top: 0.3em;
display: block;
}
.text_cell_render h3 {
font-family: 'Nixie One', serif;
margin-top:16px;
font-size: 22pt;
font-weight: 600;
margin-bottom: 3px;
font-style: regular;
color: rgb(102,102,0);
}
.text_cell_render h4 { /*Use this for captions*/
font-family: 'Nixie One', serif;
font-size: 14pt;
text-align: center;
margin-top: 0em;
margin-bottom: 2em;
font-style: regular;
}
.text_cell_render h5 { /*Use this for small titles*/
font-family: 'Nixie One', sans-serif;
font-weight: 400;
font-size: 16pt;
color: rgb(163,0,0);
font-style: italic;
margin-bottom: .1em;
margin-top: 0.8em;
display: block;
}
.text_cell_render h6 { /*use this for copyright note*/
font-family: 'PT Mono', sans-serif;
font-weight: 300;
font-size: 9pt;
line-height: 100%;
color: grey;
margin-bottom: 1px;
margin-top: 1px;
}
.CodeMirror{
font-family: "Source Code Pro";
font-size: 90%;
}
.alert-box {
padding:10px 10px 10px 36px;
margin:5px;
}
.success {
color:#666600;
background:rgb(240,242,229);
}
</style>
|
27ee70949e812e32f3e412dc374494d309d06332
| 396,727 |
ipynb
|
Jupyter Notebook
|
lessons/05_relax/05_02_2D.Poisson.Equation.ipynb
|
mcarpe/numerical-mooc
|
62b3c14c2c56d85d65c6075f2d7eb44266b49c17
|
[
"CC-BY-3.0"
] | 748 |
2015-01-04T22:50:56.000Z
|
2022-03-30T20:42:16.000Z
|
lessons/05_relax/05_02_2D.Poisson.Equation.ipynb
|
mcarpe/numerical-mooc
|
62b3c14c2c56d85d65c6075f2d7eb44266b49c17
|
[
"CC-BY-3.0"
] | 62 |
2015-02-02T01:06:07.000Z
|
2020-11-09T12:27:41.000Z
|
lessons/05_relax/05_02_2D.Poisson.Equation.ipynb
|
mcarpe/numerical-mooc
|
62b3c14c2c56d85d65c6075f2d7eb44266b49c17
|
[
"CC-BY-3.0"
] | 1,270 |
2015-01-02T19:19:52.000Z
|
2022-02-27T01:02:44.000Z
| 467.838443 | 175,360 | 0.934189 | true | 5,160 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.851953 | 0.727975 | 0.620201 |
__label__eng_Latn
| 0.972995 | 0.279265 |
<a href="https://colab.research.google.com/github/annissatessffaaye/QAPython/blob/master/02_Python_Numpy.ipynb" target="_parent"></a>
```python
```
The first thing we want to do is import numpy.
```python
import numpy as np
```
Let us first define a Python list containing the ages of 6 people.
```python
ages_list = [10, 5, 8, 32, 65, 43]
print(ages_list)
```
[10, 5, 8, 32, 65, 43]
There are 3 main ways to instantiate a Numpy ndarray object. One of these is to use `np.array(<collection>)`
```python
ages = np.array(ages_list)
print(type(ages))
print(ages)
```
<class 'numpy.ndarray'>
[10 5 8 32 65 43]
```python
print(ages)
print("Size:\t" , ages.size)
print("Shape:\t", ages.shape)
```
[10 5 8 32 65 43]
Size: 6
Shape: (6,)
```python
zeroArr = np.zeros(5)
print(zeroArr)
```
[0. 0. 0. 0. 0.]
### Multi-dim
Now let us define a new list containing the weights of these 6 people.
```python
weight_list = [32, 18, 26, 60, 55, 65]
```
Now, we define an ndarray containing all fo this information, and again print the size and shape of the array.
```python
people = np.array([ages_list, weight_list])
print("People:\t" , people)
print("Size:\t" , people.size)
print("Shape:\t", people.shape)
```
People: [[10 5 8 32 65 43]
[32 18 26 60 55 65]]
Size: 12
Shape: (2, 6)
```python
people = people.reshape(12,1)
print("People:\t" , people)
print("Size:\t" , people.size)
print("Shape:\t", people.shape)
```
People: [[10]
[ 5]
[ 8]
[32]
[65]
[43]
[32]
[18]
[26]
[60]
[55]
[65]]
Size: 12
Shape: (12, 1)
###### Note: The new shape must be the same "size" as the old shape
### Exercise
* Generate a 1D numpy array with the values [7, 9, 65, 33, 85, 99]
* Generate a matrix (2D numpy array) of the values:
\begin{align}
\mathbf{A} =
\begin{pmatrix}
1 & 2 & 4 \\
2 & 3 & 0 \\
0 & 5 & 1
\end{pmatrix}
\end{align}
* Change the dimensions of this array to another permitted shape
## Array Generation
Instead of defining an array manually, we can ask numpy to do it for us.
The `np.arange()` method creates a range of numbers with user defined steps between each.
```python
five_times_table = np.arange(0, 55, 5)
five_times_table
```
array([ 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
The `np.linspace()` method will produce a range of evenly spaced values, starting, ending, and taking as many steps as you specify.
```python
five_spaced = np.linspace(0,50,11)
print(five_spaced)
```
[ 0. 5. 10. 15. 20. 25. 30. 35. 40. 45. 50.]
The `.repeat()` method will repeat an object you pas a specified number of times.
```python
twoArr = np.repeat(2, 10)
print(twoArr)
```
[2 2 2 2 2 2 2 2 2 2]
The `np.eye()` functions will create an identity matrix/array for us.
```python
identity_matrix = np.eye(6)
print(identity_matrix)
```
[[1. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 1. 0.]
[0. 0. 0. 0. 0. 1.]]
# Operations
There are many, many operations which we can perform on arrays. Below, we demonstrate a few.
What is happening in each line?
```python
five_times_table
```
array([ 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
```python
print("1:", 2 * five_times_table)
print("2:", 10 + five_times_table)
print("3:", five_times_table - 1)
print("4:", five_times_table/5)
print("5:", five_times_table **2)
print("6:", five_times_table < 20)
```
1: [ 0 10 20 30 40 50 60 70 80 90 100]
2: [10 15 20 25 30 35 40 45 50 55 60]
3: [-1 4 9 14 19 24 29 34 39 44 49]
4: [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.]
5: [ 0 25 100 225 400 625 900 1225 1600 2025 2500]
6: [ True True True True False False False False False False False]
### Speed Test
If we compare the speed at which we can do these operations compared to core python, we will notice a substantial difference.
```python
fives_list = list(range(0,5001,5))
fives_list
```
[0,
5,
10,
15,
20,
25,
30,
35,
40,
45,
50,
55,
60,
65,
70,
75,
80,
85,
90,
95,
100,
105,
110,
115,
120,
125,
130,
135,
140,
145,
150,
155,
160,
165,
170,
175,
180,
185,
190,
195,
200,
205,
210,
215,
220,
225,
230,
235,
240,
245,
250,
255,
260,
265,
270,
275,
280,
285,
290,
295,
300,
305,
310,
315,
320,
325,
330,
335,
340,
345,
350,
355,
360,
365,
370,
375,
380,
385,
390,
395,
400,
405,
410,
415,
420,
425,
430,
435,
440,
445,
450,
455,
460,
465,
470,
475,
480,
485,
490,
495,
500,
505,
510,
515,
520,
525,
530,
535,
540,
545,
550,
555,
560,
565,
570,
575,
580,
585,
590,
595,
600,
605,
610,
615,
620,
625,
630,
635,
640,
645,
650,
655,
660,
665,
670,
675,
680,
685,
690,
695,
700,
705,
710,
715,
720,
725,
730,
735,
740,
745,
750,
755,
760,
765,
770,
775,
780,
785,
790,
795,
800,
805,
810,
815,
820,
825,
830,
835,
840,
845,
850,
855,
860,
865,
870,
875,
880,
885,
890,
895,
900,
905,
910,
915,
920,
925,
930,
935,
940,
945,
950,
955,
960,
965,
970,
975,
980,
985,
990,
995,
1000,
1005,
1010,
1015,
1020,
1025,
1030,
1035,
1040,
1045,
1050,
1055,
1060,
1065,
1070,
1075,
1080,
1085,
1090,
1095,
1100,
1105,
1110,
1115,
1120,
1125,
1130,
1135,
1140,
1145,
1150,
1155,
1160,
1165,
1170,
1175,
1180,
1185,
1190,
1195,
1200,
1205,
1210,
1215,
1220,
1225,
1230,
1235,
1240,
1245,
1250,
1255,
1260,
1265,
1270,
1275,
1280,
1285,
1290,
1295,
1300,
1305,
1310,
1315,
1320,
1325,
1330,
1335,
1340,
1345,
1350,
1355,
1360,
1365,
1370,
1375,
1380,
1385,
1390,
1395,
1400,
1405,
1410,
1415,
1420,
1425,
1430,
1435,
1440,
1445,
1450,
1455,
1460,
1465,
1470,
1475,
1480,
1485,
1490,
1495,
1500,
1505,
1510,
1515,
1520,
1525,
1530,
1535,
1540,
1545,
1550,
1555,
1560,
1565,
1570,
1575,
1580,
1585,
1590,
1595,
1600,
1605,
1610,
1615,
1620,
1625,
1630,
1635,
1640,
1645,
1650,
1655,
1660,
1665,
1670,
1675,
1680,
1685,
1690,
1695,
1700,
1705,
1710,
1715,
1720,
1725,
1730,
1735,
1740,
1745,
1750,
1755,
1760,
1765,
1770,
1775,
1780,
1785,
1790,
1795,
1800,
1805,
1810,
1815,
1820,
1825,
1830,
1835,
1840,
1845,
1850,
1855,
1860,
1865,
1870,
1875,
1880,
1885,
1890,
1895,
1900,
1905,
1910,
1915,
1920,
1925,
1930,
1935,
1940,
1945,
1950,
1955,
1960,
1965,
1970,
1975,
1980,
1985,
1990,
1995,
2000,
2005,
2010,
2015,
2020,
2025,
2030,
2035,
2040,
2045,
2050,
2055,
2060,
2065,
2070,
2075,
2080,
2085,
2090,
2095,
2100,
2105,
2110,
2115,
2120,
2125,
2130,
2135,
2140,
2145,
2150,
2155,
2160,
2165,
2170,
2175,
2180,
2185,
2190,
2195,
2200,
2205,
2210,
2215,
2220,
2225,
2230,
2235,
2240,
2245,
2250,
2255,
2260,
2265,
2270,
2275,
2280,
2285,
2290,
2295,
2300,
2305,
2310,
2315,
2320,
2325,
2330,
2335,
2340,
2345,
2350,
2355,
2360,
2365,
2370,
2375,
2380,
2385,
2390,
2395,
2400,
2405,
2410,
2415,
2420,
2425,
2430,
2435,
2440,
2445,
2450,
2455,
2460,
2465,
2470,
2475,
2480,
2485,
2490,
2495,
2500,
2505,
2510,
2515,
2520,
2525,
2530,
2535,
2540,
2545,
2550,
2555,
2560,
2565,
2570,
2575,
2580,
2585,
2590,
2595,
2600,
2605,
2610,
2615,
2620,
2625,
2630,
2635,
2640,
2645,
2650,
2655,
2660,
2665,
2670,
2675,
2680,
2685,
2690,
2695,
2700,
2705,
2710,
2715,
2720,
2725,
2730,
2735,
2740,
2745,
2750,
2755,
2760,
2765,
2770,
2775,
2780,
2785,
2790,
2795,
2800,
2805,
2810,
2815,
2820,
2825,
2830,
2835,
2840,
2845,
2850,
2855,
2860,
2865,
2870,
2875,
2880,
2885,
2890,
2895,
2900,
2905,
2910,
2915,
2920,
2925,
2930,
2935,
2940,
2945,
2950,
2955,
2960,
2965,
2970,
2975,
2980,
2985,
2990,
2995,
3000,
3005,
3010,
3015,
3020,
3025,
3030,
3035,
3040,
3045,
3050,
3055,
3060,
3065,
3070,
3075,
3080,
3085,
3090,
3095,
3100,
3105,
3110,
3115,
3120,
3125,
3130,
3135,
3140,
3145,
3150,
3155,
3160,
3165,
3170,
3175,
3180,
3185,
3190,
3195,
3200,
3205,
3210,
3215,
3220,
3225,
3230,
3235,
3240,
3245,
3250,
3255,
3260,
3265,
3270,
3275,
3280,
3285,
3290,
3295,
3300,
3305,
3310,
3315,
3320,
3325,
3330,
3335,
3340,
3345,
3350,
3355,
3360,
3365,
3370,
3375,
3380,
3385,
3390,
3395,
3400,
3405,
3410,
3415,
3420,
3425,
3430,
3435,
3440,
3445,
3450,
3455,
3460,
3465,
3470,
3475,
3480,
3485,
3490,
3495,
3500,
3505,
3510,
3515,
3520,
3525,
3530,
3535,
3540,
3545,
3550,
3555,
3560,
3565,
3570,
3575,
3580,
3585,
3590,
3595,
3600,
3605,
3610,
3615,
3620,
3625,
3630,
3635,
3640,
3645,
3650,
3655,
3660,
3665,
3670,
3675,
3680,
3685,
3690,
3695,
3700,
3705,
3710,
3715,
3720,
3725,
3730,
3735,
3740,
3745,
3750,
3755,
3760,
3765,
3770,
3775,
3780,
3785,
3790,
3795,
3800,
3805,
3810,
3815,
3820,
3825,
3830,
3835,
3840,
3845,
3850,
3855,
3860,
3865,
3870,
3875,
3880,
3885,
3890,
3895,
3900,
3905,
3910,
3915,
3920,
3925,
3930,
3935,
3940,
3945,
3950,
3955,
3960,
3965,
3970,
3975,
3980,
3985,
3990,
3995,
4000,
4005,
4010,
4015,
4020,
4025,
4030,
4035,
4040,
4045,
4050,
4055,
4060,
4065,
4070,
4075,
4080,
4085,
4090,
4095,
4100,
4105,
4110,
4115,
4120,
4125,
4130,
4135,
4140,
4145,
4150,
4155,
4160,
4165,
4170,
4175,
4180,
4185,
4190,
4195,
4200,
4205,
4210,
4215,
4220,
4225,
4230,
4235,
4240,
4245,
4250,
4255,
4260,
4265,
4270,
4275,
4280,
4285,
4290,
4295,
4300,
4305,
4310,
4315,
4320,
4325,
4330,
4335,
4340,
4345,
4350,
4355,
4360,
4365,
4370,
4375,
4380,
4385,
4390,
4395,
4400,
4405,
4410,
4415,
4420,
4425,
4430,
4435,
4440,
4445,
4450,
4455,
4460,
4465,
4470,
4475,
4480,
4485,
4490,
4495,
4500,
4505,
4510,
4515,
4520,
4525,
4530,
4535,
4540,
4545,
4550,
4555,
4560,
4565,
4570,
4575,
4580,
4585,
4590,
4595,
4600,
4605,
4610,
4615,
4620,
4625,
4630,
4635,
4640,
4645,
4650,
4655,
4660,
4665,
4670,
4675,
4680,
4685,
4690,
4695,
4700,
4705,
4710,
4715,
4720,
4725,
4730,
4735,
4740,
4745,
4750,
4755,
4760,
4765,
4770,
4775,
4780,
4785,
4790,
4795,
4800,
4805,
4810,
4815,
4820,
4825,
4830,
4835,
4840,
4845,
4850,
4855,
4860,
4865,
4870,
4875,
4880,
4885,
4890,
4895,
4900,
4905,
4910,
4915,
4920,
4925,
4930,
4935,
4940,
4945,
4950,
4955,
4960,
4965,
4970,
4975,
4980,
4985,
4990,
4995,
...]
```python
five_times_table_lge = np.arange(0,5001,5)
five_times_table_lge
```
array([ 0, 5, 10, ..., 4990, 4995, 5000])
```python
%timeit five_times_table_lge + 5
```
The slowest run took 26.25 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 5: 1.66 µs per loop
```python
%timeit [e + 5 for e in fives_list]
```
10000 loops, best of 5: 51 µs per loop
Boolean string operations can also be performed on ndarrays.
```python
words = np.array(["ten", "nine", "eight", "seven", "six"])
print(np.isin(words, 'e'))
print("e" in words)
["e" in word for word in words]
```
[False False False False False]
False
[True, True, True, True, False]
# Transpose
```python
people.shape = (2, 6)
print(people, "\n")
print(people.T)
```
[[10 5 8 32 65 43]
[32 18 26 60 55 65]]
[[10 32]
[ 5 18]
[ 8 26]
[32 60]
[65 55]
[43 65]]
# Data Types
As previously mentioned, ndarrays can only have one data type. If we want to obtain or change this, we use the `.dtype` attribute.
```python
people.dtype
```
dtype('int64')
What is the data type of the below ndarray?
```python
ages_with_strings = np.array([10, 5, 8, '32', '65', '43'])
ages_with_strings
```
array(['10', '5', '8', '32', '65', '43'], dtype='<U21')
What is the dtype of this array?
```python
ages_with_strings = np.array([10, 5, 8, '32', '65', '43'], dtype='int32')
ages_with_strings
```
array([10, 5, 8, 32, 65, 43], dtype=int32)
What do you think has happened here?
```python
ages_with_strings = np.array([10, 5, 8, '32', '65', '43'])
print(ages_with_strings)
```
['10' '5' '8' '32' '65' '43']
```python
ages_with_strings.dtype = 'int32'
print(ages_with_strings)
```
[49 48 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 53 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 56 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 51 50 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 54 53 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 52 51 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0]
```python
ages_with_strings.size
```
126
```python
ages_with_strings.size/21
```
6.0
```python
np.array([10, 5, 8, '32', '65', '43']).size
```
6
The correct way to have changed the data type of the ndarray would have been to use the `.astype()` method, demonstrated below.
```python
ages_with_strings = np.array([10, 5, 8, '32', '65', '43'])
print(ages_with_strings)
print(ages_with_strings.astype('int32'))
```
['10' '5' '8' '32' '65' '43']
[10 5 8 32 65 43]
### Exercise
* #### Create an array of string numbers, but use dtype to make it an array of floats.
* #### Transpose the matrix, printing the new size and shape.
* #### Use the .astype() method to convert the array to boolean.
## Array Slicing Operations
As before, we can use square brackets and indices to access individual values, and the colon operator to slice the array.
```python
five_times_table
```
array([ 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
```python
five_times_table[0]
```
0
```python
five_times_table[-1]
```
50
```python
five_times_table[:4]
```
array([ 0, 5, 10, 15])
```python
five_times_table[4:]
```
array([20, 25, 30, 35, 40, 45, 50])
We can also slice an n-dim ndarray., specifying the slice operation accross each axis.
```python
print(people)
people[:3, :3]
```
[[10 5 8 32 65 43]
[32 18 26 60 55 65]]
array([[10, 5, 8],
[32, 18, 26]])
### Exercise
* Create a numpy array with 50 zeros
* Create a np array of 2 repeated 20 times
* Create a numpy array from 0 to 2 $\pi$ in steps of 0.1
For one of the arrays generated:
* Get the first five values
* Get the last 3 values
* Get the 4th value to the 7th value
We can reverse an array by using `.flip()` or by using the `::` operator.
```python
reverse_five_times_table = np.flip(five_times_table)
reverse_five_times_table
```
array([50, 45, 40, 35, 30, 25, 20, 15, 10, 5, 0])
```python
reverse_five_times_table = five_times_table[-1::-1]
print(reverse_five_times_table)
five_times_table
```
[50 45 40 35 30 25 20 15 10 5 0]
array([ 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
We can also use the `::` operator to select steps of the original array.
```python
five_times_table[0::3] #Every 3rd element starting from 0
```
array([ 0, 15, 30, 45])
### Exercise
Take one of the arrays you defined and
* #### Reverse it
* #### Only keep every 4th element.
* #### Get every 2nd element, starting from the last and moving backwards.
# Stats
```python
np.array([1.65432, 5.98765]).round(2)
```
array([1.65, 5.99])
```python
nums = np.arange(0, 4, 0.2555)
```
### Exercise
* Compute min, max, sum, mean, median, variance, and standard deviation of the above array, all to to 2 decimal places.
```python
print("min = ", np.min(nums).round(2))
print("max = ", np.max(nums).round(2))
print("sum = ", np.sum(nums).round(2))
print("mean = ", np.mean(nums).round(2))
print("median = ", np.median(nums).round(2))
print("var = ", np.var(nums).round(2))
print("std = ", np.std(nums).round(2))
```
min = 0.0
max = 3.83
sum = 30.66
mean = 1.92
median = 1.92
var = 1.39
std = 1.18
## Random
With `np.random`, we can generate a number of types of dataset, and create training data.
The below code simulates a fair coin toss.
```python
flip = np.random.choice([0,1], 10)
flip
```
array([0, 0, 0, 1, 0, 1, 0, 1, 0, 0])
```python
np.random.rand(10,20,9)
```
array([[[0.57903607, 0.70658326, 0.46823039, ..., 0.67872556,
0.97668838, 0.59296004],
[0.68265612, 0.35568777, 0.09006245, ..., 0.28822124,
0.51443494, 0.52815057],
[0.47576313, 0.19055337, 0.53289686, ..., 0.9720602 ,
0.59957457, 0.00136381],
...,
[0.96615173, 0.11924133, 0.27622075, ..., 0.60268858,
0.76115508, 0.60700918],
[0.7947058 , 0.60565686, 0.89898931, ..., 0.24012824,
0.86142456, 0.26890588],
[0.08503049, 0.85532359, 0.04862309, ..., 0.83191984,
0.12922411, 0.05534613]],
[[0.0571167 , 0.2627298 , 0.24012262, ..., 0.17173116,
0.09829425, 0.78794406],
[0.14690905, 0.71135404, 0.16426146, ..., 0.74632793,
0.53593656, 0.4414745 ],
[0.43014199, 0.74044911, 0.28382775, ..., 0.59250707,
0.50027838, 0.29761281],
...,
[0.24179913, 0.74530331, 0.74018769, ..., 0.14497582,
0.26258718, 0.8021661 ],
[0.72357274, 0.02393067, 0.03247564, ..., 0.55356794,
0.41219468, 0.23650561],
[0.49846368, 0.03640285, 0.50449892, ..., 0.38784041,
0.1143238 , 0.09864435]],
[[0.89092575, 0.67273482, 0.58401463, ..., 0.05977424,
0.08851958, 0.38219284],
[0.59141116, 0.36883351, 0.47516797, ..., 0.76518547,
0.13250162, 0.46123038],
[0.97293218, 0.82266408, 0.16817342, ..., 0.45949225,
0.41081166, 0.56793835],
...,
[0.16166455, 0.51404214, 0.21340341, ..., 0.07144955,
0.96286045, 0.49441027],
[0.01173436, 0.62760475, 0.44478 , ..., 0.51227539,
0.1095556 , 0.77223662],
[0.46022167, 0.96680813, 0.23408062, ..., 0.94361105,
0.36161187, 0.22280426]],
...,
[[0.7621445 , 0.71656463, 0.62140542, ..., 0.78161937,
0.54050741, 0.35509936],
[0.88235509, 0.57271812, 0.84588271, ..., 0.17100842,
0.68256992, 0.49224554],
[0.3103567 , 0.39843377, 0.32951047, ..., 0.38696677,
0.74197158, 0.63489489],
...,
[0.87627107, 0.58826 , 0.85497237, ..., 0.96820957,
0.49715622, 0.44814367],
[0.30571801, 0.03233591, 0.47085265, ..., 0.75714441,
0.44313711, 0.08418118],
[0.71783251, 0.90823314, 0.72704044, ..., 0.59965653,
0.4231835 , 0.30740116]],
[[0.76174456, 0.71447451, 0.77809227, ..., 0.48855986,
0.9367547 , 0.38798618],
[0.13485896, 0.07370139, 0.83190798, ..., 0.28765397,
0.73623527, 0.97569682],
[0.14505399, 0.84622162, 0.2885453 , ..., 0.7665278 ,
0.54913027, 0.84625343],
...,
[0.32134819, 0.56332715, 0.27872278, ..., 0.52081844,
0.93898686, 0.39460526],
[0.50629651, 0.36927313, 0.47709624, ..., 0.75562799,
0.34733057, 0.44587717],
[0.36130956, 0.07595216, 0.48705111, ..., 0.64247632,
0.51827511, 0.40092295]],
[[0.62058256, 0.54825669, 0.05118057, ..., 0.69456006,
0.83958809, 0.9723071 ],
[0.11303148, 0.6988732 , 0.27029805, ..., 0.57086763,
0.76399898, 0.00617371],
[0.57886927, 0.51730271, 0.77585789, ..., 0.2332254 ,
0.16303748, 0.28319879],
...,
[0.73697707, 0.48755375, 0.71565277, ..., 0.07981335,
0.75923367, 0.93050114],
[0.29318998, 0.50255749, 0.62045676, ..., 0.4489524 ,
0.84108982, 0.50832608],
[0.83918305, 0.65324551, 0.47826852, ..., 0.94658946,
0.23852473, 0.08114226]]])
We can produce 1000 datapoints of a normally distributed data set by using `np.random.normal()`
```python
mu, sigma = 0, 0.1 # mean and standard deviation
s = np.random.normal(mu, sigma, 1000)
```
### Exercise
* Simulate a six-sided dice using numpy.random.choice(), generate a list of values you would obtain from 10 throws.
* Simulate a two-sided coin toss that is NOT fair: it is twice as likely to have head than tails.
```python
```
|
b1e595733a7c4e3832e0d7f5e20516ddba4bba09
| 70,359 |
ipynb
|
Jupyter Notebook
|
02_Python_Numpy.ipynb
|
annissatessffaaye/QA-Data-Engineering-Bootcamp-Azure-Python-SQL
|
9204dfb4b1e9ac4dd477200a2a79f9aed49c6a11
|
[
"MIT"
] | 1 |
2021-11-23T19:57:34.000Z
|
2021-11-23T19:57:34.000Z
|
02_Python_Numpy.ipynb
|
annissatessffaaye/QA-Data-Engineering-Bootcamp-Azure-Python-SQL
|
9204dfb4b1e9ac4dd477200a2a79f9aed49c6a11
|
[
"MIT"
] | null | null | null |
02_Python_Numpy.ipynb
|
annissatessffaaye/QA-Data-Engineering-Bootcamp-Azure-Python-SQL
|
9204dfb4b1e9ac4dd477200a2a79f9aed49c6a11
|
[
"MIT"
] | null | null | null | 26.001109 | 240 | 0.327805 | true | 10,262 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.835484 | 0.861538 | 0.719801 |
__label__krc_Cyrl
| 0.712371 | 0.510671 |
<a id='iterative-methods-sparsity'></a>
<div id="qe-notebook-header" style="text-align:right;">
<a href="https://quantecon.org/" title="quantecon.org">
</a>
</div>
# Krylov Methods and Matrix Conditioning
## Contents
- [Krylov Methods and Matrix Conditioning](#Krylov-Methods-and-Matrix-Conditioning)
- [Overview](#Overview)
- [Ill-Conditioned Matrices](#Ill-Conditioned-Matrices)
- [Stationary Iterative Algorithms for Linear Systems](#Stationary-Iterative-Algorithms-for-Linear-Systems)
- [Krylov Methods](#Krylov-Methods)
- [Iterative Methods for Linear Least Squares](#Iterative-Methods-for-Linear-Least-Squares)
- [Iterative Methods for Eigensystems](#Iterative-Methods-for-Eigensystems)
- [Krylov Methods for Markov-Chain Dynamics](#Krylov-Methods-for-Markov-Chain-Dynamics)
## Overview
This lecture takes the structure of [numerical methods for linear algebra](numerical_linear_algebra.html) and builds further
toward working with large, sparse matrices. In the process, we will examine foundational numerical analysis such as
ill-conditioned matrices.
### Setup
```julia
using InstantiateFromURL
# optionally add arguments to force installation: instantiate = true, precompile = true
github_project("QuantEcon/quantecon-notebooks-julia", version = "0.8.0")
```
```julia
using LinearAlgebra, Statistics, BenchmarkTools, Random
Random.seed!(42); # seed random numbers for reproducibility
```
### Applications
In this section, we will consider variations on classic problems
1. Solving a linear system for a square $ A $ where we will maintain throughout that there is a unique solution to
$$
A x = b
$$
1. [Linear least-squares](https://en.wikipedia.org/wiki/Linear_least_squares) solution, for a rectangular $ A $
$$
\min_x \| Ax -b \|^2
$$
From theory, we know that if $ A $ has linearly independent columns, then the solution is the [normal equation](https://en.wikipedia.org/wiki/Linear_least_squares#Derivation_of_the_normal_equations)
$$
x = (A'A)^{-1}A'b
$$
1. In the case of a square matrix $ A $, the eigenvalue problem is that of finding $ x $ and $ \lambda $ such that
$$
A x = \lambda x
$$
For eigenvalue problems, keep in mind that you do not always require all of the $ \lambda $, and sometimes the largest (or smallest) would be enough. For example, calculating the spectral radius requires only the eigenvalue with maximum absolute value.
## Ill-Conditioned Matrices
An important consideration in numerical linear algebra, and iterative methods in general, is the [condition number](https://en.wikipedia.org/wiki/Condition_number#Matrices).
An ill-conditioned matrix is one where the basis eigenvectors are close to, but not exactly, collinear. While this poses no problem on pen and paper,
or with infinite-precision numerical methods, it is important in practice, for two reasons:
1. Ill-conditioned matrices introduce numerical errors roughly in proportion to the base-10 log of the condition number.
1. The convergence speed of many iterative methods is based on the spectral properties of the matrices (e.g., the basis formed by the eigenvectors), and hence ill-conditioned systems can converge slowly.
The solutions to these problems are to
- be careful with operations which introduce error based on the condition number (e.g., matrix inversions when the condition number is high)
- choose, where possible, alternative representations which have less collinearity (e.g., an orthogonal polynomial basis rather than a monomial one)
- use a preconditioner for iterative methods, which changes the spectral properties to increase convergence speed
### Condition Number
First, let’s define and explore the condition number $ \kappa $
$$
\kappa(A) \equiv \|A\| \|A^{-1}\|
$$
where you can use the Cauchy–Schwarz inequality to show that $ \kappa(A) \geq 1 $. While the condition number can be calculated with any norm, we will focus on the 2-norm.
First, a warning on calculations: Calculating the condition number for a matrix can be an expensive operation (as would calculating a determinant)
and should be thought of as roughly equivalent to doing an eigendecomposition. So use it for detective work judiciously.
Let’s look at the condition number of a few matrices using the `cond` function (which allows a choice of the norm, but we’ll stick with the default 2-norm).
```julia
A = I(2)
cond(A)
```
1.0
Here we see an example of the best-conditioned matrix, the identity matrix with its completely orthonormal basis, which has a condition number of 1.
On the other hand, notice that
```julia
ϵ = 1E-6
A = [1.0 0.0
1.0 ϵ]
cond(A)
```
2.0000000000005004e6
has a condition number of order `10E6` - and hence (taking the base-10 log) you would expect to be introducing numerical errors of about 6 significant digits if you
are not careful. For example, note that the inverse has both extremely large and extremely small negative numbers
```julia
inv(A)
```
2×2 Array{Float64,2}:
1.0 0.0
-1.0e6 1.0e6
Since we know that the determinant of nearly collinear matrices is close to zero, this shows another symptom of poor conditioning
```julia
det(A)
```
1.0e-6
However, be careful since the determinant has a scale, while the condition number is dimensionless. That is,
```julia
@show det(1000 * A)
@show cond(1000 * A);
```
det(1000A) = 1.0
cond(1000A) = 2.0000000000005001e6
In that case, the determinant of `A` is 1, while the condition number is unchanged. This example also provides some
intuition that ill-conditioned matrices typically occur when a matrix has radically different scales (e.g., contains both `1` and `1E-6`, or `1000` and `1E-3`). This can occur frequently with both function approximation and linear least squares.
### Condition Numbers and Matrix Operations
Multiplying a matrix by a constant does not change the condition number. What about other operations?
For this example, we see that the inverse has the same condition number (though this will not always be the case).
```julia
@show cond(A)
@show cond(inv(A));
```
cond(A) = 2.0000000000005004e6
cond(inv(A)) = 2.0000000002463197e6
The condition number of the product of two matrices can change radically and lead things to becoming
even more ill-conditioned.
This comes up frequently when calculating the product of a matrix and its transpose (e.g., forming the covariance matrix). A classic example is the [Läuchli matrix](https://link.springer.com/article/10.1007%2FBF01386022).
```julia
lauchli(N, ϵ) = [ones(N)'; ϵ * I(N)]'
ϵ = 1E-8
L = lauchli(3, ϵ) |> Matrix
```
3×4 Array{Float64,2}:
1.0 1.0e-8 0.0 0.0
1.0 0.0 1.0e-8 0.0
1.0 0.0 0.0 1.0e-8
Note that the condition number increases substantially
```julia
@show cond(L)
@show cond(L' * L);
```
cond(L) = 1.732050807568878e8
cond(L' * L) = 5.345191558726545e32
You can show that the analytic eigenvalues of this are $ \{3 + \epsilon^2, \epsilon^2, \epsilon^2\} $ but the poor conditioning
means it is difficult to distinguish these from $ 0 $.
This comes up when conducting [Principal Component Analysis](https://en.wikipedia.org/wiki/Principal_component_analysis#Singular_value_decomposition), which
requires calculations of the eigenvalues of the covariance matrix
```julia
sort(sqrt.(Complex.(eigen(L*L').values)), lt = (x,y) -> abs(x) < abs(y))
```
3-element Array{Complex{Float64},1}:
0.0 + 4.870456104375987e-9im
4.2146848510894035e-8 + 0.0im
1.7320508075688772 + 0.0im
Note that these are significantly different than the known analytic solution and, in particular, are difficult to distinguish from 0.
```julia
sqrt.([3 + ϵ^2, ϵ^2, ϵ^2]) |> sort
```
3-element Array{Float64,1}:
1.0e-8
1.0e-8
1.7320508075688772
Alternatively, we could calculate these by taking the square of the singular values of $ L $ itself, which is much more accurate
and lets us clearly distinguish from zero
```julia
svd(L).S |> sort
```
3-element Array{Float64,1}:
9.999999999999997e-9
1.0e-8
1.7320508075688774
Similarly, we are better off calculating least squares directly rather than forming the normal equation (i.e., $ A' A x = A' b $) ourselves
```julia
N = 3
A = lauchli(N, 1E-7)' |> Matrix
b = rand(N+1)
x_sol_1 = A \ b # using a least-squares solver
x_sol_2 = (A' * A) \ (A' * b) # forming the normal equation ourselves
norm(x_sol_1 - x_sol_2)
```
2502.05373776057
### Why a Monomial Basis Is a Bad Idea
A classic example of poorly conditioned matrices is using a monomial basis of a polynomial with interpolation.
Take a grid of points, $ x_0, \ldots x_N $ and values $ y_0, \ldots y_N $ where we want to calculate the
interpolating polynomial.
If we were to use the simplest, and most obvious, polynomial basis, then the calculation consists of finding the coefficients $ c_1, \ldots c_n $ where
$$
P(x) = \sum_{i=0}^N c_i x^i
$$
To solve for the coefficients, we notice that this is a simple system of equations
$$
\begin{array}
\,y_0 = c_0 + c_1 x_0 + \ldots c_N x_0^N\\
\,\ldots\\
\,y_N = c_0 + c_1 x_N + \ldots c_N x_N^N
\end{array}
$$
Or, stacking $ c = \begin{bmatrix} c_0 & \ldots & c_N\end{bmatrix}, y = \begin{bmatrix} y_0 & \ldots & y_N\end{bmatrix} $ and
$$
A = \begin{bmatrix} 1 & x_0 & x_0^2 & \ldots &x_0^N\\
\vdots & \vdots & \vdots & \vdots & \vdots \\
1 & x_N & x_N^2 & \ldots & x_N^N
\end{bmatrix}
$$
We can then calculate the interpolating coefficients as the solution to
$$
A c = y
$$
Implementing this for the interpolation of the $ exp(x) $ function
```julia
N = 5
f(x) = exp(x)
x = range(0.0, 10.0, length = N+1)
y = f.(x) # generate some data to interpolate
A = [x_i^n for x_i in x, n in 0:N]
A_inv = inv(A)
c = A_inv * y
norm(A * c - f.(x), Inf)
```
1.356966095045209e-9
The final step just checks the interpolation vs. the analytic function at the nodes. Keep in mind that this should be very close to zero
since we are interpolating the function precisely at those nodes.
In our example, the Inf-norm (i.e., maximum difference) of the interpolation errors at the nodes is around `1E-9`, which
is reasonable for many problems.
But note that with $ N=5 $ the condition number is already of order `1E6`.
```julia
cond(A)
```
564652.3214053963
What if we increase the degree of the polynomial with the hope of increasing the precision of the
interpolation?
```julia
N = 10
f(x) = exp(x)
x = range(0.0, 10.0, length = N+1)
y = f.(x) # generate some data to interpolate
A = [x_i^n for x_i in x, n in 0:N]
A_inv = inv(A)
c = A_inv * y
norm(A * c - f.(x), Inf)
```
8.61171429278329e-7
Here, we see that hoping to increase the precision between points by adding extra polynomial terms is backfiring. By going to a 10th-order polynomial, we have
introduced an error of about `1E-5`, even at the interpolation points themselves.
This blows up quickly
```julia
N = 20
f(x) = exp(x)
x = range(0.0, 10.0, length = N+1)
y = f.(x) # generate some data to interpolate
A = [x_i^n for x_i in x, n in 0:N]
A_inv = inv(A)
c = A_inv * y
norm(A * c - f.(x), Inf)
```
19978.410967681375
To see the source of the problem, note that the condition number is astronomical.
```julia
cond(A)
```
2.0386741019186427e24
At this point, you should be suspicious of the use of `inv(A)`, since we have considered solving
linear systems by taking the inverse as verboten. Indeed, this made things much worse. The
error drops dramatically if we solve it as a linear system
```julia
c = A \ y
norm(A * c - f.(x), Inf)
```
1.864464138634503e-10
But an error of `1E-10` at the interpolating nodes themselves can be a problem in many applications, and if you increase `N`
then the error will become non-trivial eventually - even without taking the inverse.
The heart of the issue is that the monomial basis leads to a [Vandermonde matrix](https://en.wikipedia.org/wiki/Vandermonde_matrix), which
is especially ill-conditioned.
#### Aside on Runge’s Phenomenon
The monomial basis is also a good opportunity to look at a separate type of error due to [Runge’s Phenomenon](https://en.wikipedia.org/wiki/Runge%27s_phenomenon). It is an important
issue in approximation theory, albeit not one driven by numerical approximation errors.
It turns out that using a uniform grid of points is, in general, the worst possible choice of interpolation nodes for a polynomial approximation. This phenomenon can be seen with the interpolation of the seemingly innocuous Runge’s function, $ g(x) = \frac{1}{1 + 25 x^2} $.
Let’s calculate the interpolation with a monomial basis to find the $ c_i $ such that
$$
\frac{1}{1 + 25 x^2} \approx \sum_{i=0}^N c_i x^i,\, \text{ for } -1 \leq x \leq 1
$$
First, interpolate with $ N = 5 $ and avoid taking the inverse. In that case, as long as we avoid taking an inverse, the numerical errors from the ill-conditioned matrix are manageable.
```julia
using Plots
N_display = 100
g(x) = 1/(1 + 25x^2)
x_display = range(-1, 1, length = N_display)
y_display = g.(x_display)
# interpolation
N = 5
x = range(-1.0, 1.0, length = N+1)
y = g.(x)
A_5 = [x_i^n for x_i in x, n in 0:N]
c_5 = A_5 \ y
# use the coefficients to evaluate on x_display grid
B_5 = [x_i^n for x_i in x_display, n in 0:N] # calculate monomials for display grid
y_5 = B_5 * c_5 # calculates for each in x_display_grid
plot(x_display, y_5, label = "P_5(x)")
plot!(x_display, y_display, w = 3, label = "g(x)")
```
Note that while the function, $ g(x) $, and the approximation with a 5th-order polynomial, $ P_5(x) $, coincide at the 6 nodes, the
approximation has a great deal of error everywhere else.
The oscillations near the boundaries are the hallmarks of Runge’s Phenomenon. You might guess that increasing the number
of grid points and the order of the polynomial will lead to better approximations:
```julia
N = 9
x = range(-1.0, 1.0, length = N+1)
y = g.(x)
A_9 = [x_i^n for x_i in x, n in 0:N]
c_9 = A_9 \ y
# use the coefficients to evaluate on x_display grid
B_9 = [x_i^n for x_i in x_display, n in 0:N] # calculate monomials for display grid
y_9 = B_9 * c_9 # calculates for each in x_display_grid
plot(x_display, y_9, label = "P_9(x)")
plot!(x_display, y_display, w = 3, label = "g(x)")
```
While the approximation is better near `x=0`, the oscillations near the boundaries have become worse. Adding on extra polynomial terms will not
globally increase the quality of the approximation.
#### Using an Orthogonal Polynomial Basis
We can minimize the numerical problems of an ill-conditioned basis matrix by choosing a different basis for the polynomials.
For example, [Chebyshev polynomials](https://en.wikipedia.org/wiki/Chebyshev_polynomials) form an orthonormal basis under an appropriate inner product, and we can form precise high-order approximations, with very little numerical error
```julia
using ApproxFun
N = 10000
S = Chebyshev(-1.0..1.0) # form Chebyshev basis
x = points(S, N) # chooses Chebyshev nodes
y = g.(x)
g_approx = Fun(S,ApproxFun.transform(S,y)) # transform fits the polynomial
@show norm(g_approx.(x) - g.(x), Inf)
plot(x_display, g_approx.(x_display), label = "P_10000(x)")
plot!(x_display, g.(x_display), w = 3, label = "g(x)")
```
norm(g_approx.(x) - g.(x), Inf) = 4.440892098500626e-16
Besides the use of a different polynomial basis, we are approximating at different nodes (i.e., [Chebyshev nodes](https://en.wikipedia.org/wiki/Chebyshev_nodes)). Interpolation with Chebyshev polynomials at the Chebyshev nodes ends up minimizing (but not eliminating) Runge’s Phenomenon.
#### Lessons for Approximation and Interpolation
To summarize:
1. Check the condition number on systems you suspect might be ill-conditioned (based on intuition of collinearity).
1. If you are working with ill-conditioned matrices, be especially careful not to take the inverse or multiply by the transpose.
1. Avoid a monomial polynomial basis. Instead, use polynomials (e.g., Chebyshev or Lagrange) orthogonal under an appropriate inner product, or use a non-global basis such as cubic splines.
1. If possible, avoid using a uniform grid for interpolation and approximation, and choose nodes appropriate for the basis.
However, sometimes you can’t avoid ill-conditioned matrices. This is especially common with discretization of PDEs and with linear least squares.
## Stationary Iterative Algorithms for Linear Systems
As before, consider solving the equation
$$
A x = b
$$
We will now
focus on cases where $ A $ is both massive (e.g., potentially millions of equations) and sparse, and sometimes ill-conditioned - but where there is always a unique solution.
While this may seem excessive, it occurs in practice due to the curse of dimensionality, discretizations
of PDEs, and when working with big data.
The methods in the previous lectures (e.g., factorization and approaches similar to Gaussian elimination) are called direct methods, and are able
in theory to converge to the exact solution in a finite number of steps while directly working with the matrix in memory.
Instead, iterative solutions start with a guess on a solution and iterate until convergence. The benefit will be that
each iteration uses a lower-order operation (e.g., an $ O(N^2) $ matrix-vector product) which will make it possible to
1. solve much larger systems, even if done less precisely.
1. define linear operators in terms of the matrix-vector products, rather than storing as a matrix.
1. get approximate solutions in progress prior to the completion of all algorithm steps, unlike the direct methods, which provide a solution only at the end.
Of course, there is no free lunch, and the computational order of the iterations themselves would be comparable to the direct methods for a given level of tolerance (e.g., $ O(N^3) $ operations may be required to solve a dense unstructured system).
There are two types of iterative methods we will consider. The first type is stationary methods, which iterate on a map in a way that’s similar to fixed-point problems, and the second type is [Krylov](https://en.wikipedia.org/wiki/Krylov_subspace) methods, which iteratively solve using left-multiplications of the linear operator.
For our main examples, we will use the valuation of the continuous-time Markov chain from the [numerical methods for linear algebra](numerical_linear_algebra.html) lecture. That is, given a payoff vector $ r $, a
discount rate $ \rho $, and the infinitesimal generator of the Markov chain $ Q $, solve the equation
$$
\rho v = r + Q v
$$
With the sizes and types of matrices here, iterative methods are inappropriate in practice, but they will help us understand
the characteristics of convergence and how they relate to matrix conditioning.
### Stationary Methods
First, we will solve with a direct method, which will give the solution to machine precision.
```julia
using LinearAlgebra, IterativeSolvers, Statistics
α = 0.1
N = 100
Q = Tridiagonal(fill(α, N-1), [-α; fill(-2α, N-2); -α], fill(α, N-1))
r = range(0.0, 10.0, length=N)
ρ = 0.05
A = ρ * I - Q
v_direct = A \ r
mean(v_direct)
```
100.00000000000004
Without proof, consider that given the discount rate of $ \rho > 0 $, this problem could be set up as a contraction for solving the Bellman
equation through methods such as value-function iteration.
The condition we will examine here is called [**diagonal dominance**](https://en.wikipedia.org/wiki/Diagonally_dominant_matrix).
$$
|A_{ii}| \geq \sum_{j\neq i} |A_{ij}| \quad\text{for all } i = 1\ldots N
$$
That is, in every row, the diagonal element is weakly greater in absolute value than the sum of all of the other elements in the row. In cases
where it is strictly greater, we say that the matrix is strictly diagonally dominant.
With our example, given that $ Q $ is the infinitesimal generator of a Markov chain, we know that each row sums to 0, and hence
it is weakly diagonally dominant.
However, notice that when $ \rho > 0 $, and since the diagonal of $ Q $ is negative, $ A = ρ I - Q $ makes the matrix strictly diagonally dominant.
### Jacobi Iteration
For matrices that are **strictly diagonally dominant**, you can prove that a simple decomposition and iteration procedure
will converge.
To solve a system $ A x = b $, split the matrix $ A $ into its diagonal and off-diagonal elements. That is,
$$
A = D + R
$$
where
$$
D = \begin{bmatrix} A_{11} & 0 & \ldots & 0\\
0 & A_{22} & \ldots & 0\\
\vdots & \vdots & \vdots & \vdots\\
0 & 0 & \ldots & A_{NN}
\end{bmatrix}
$$
and
$$
R = \begin{bmatrix} 0 & A_{12} & \ldots & A_{1N} \\
A_{21} & 0 & \ldots & A_{2N} \\
\vdots & \vdots & \vdots & \vdots\\
A_{N1} & A_{N2} & \ldots & 0
\end{bmatrix}
$$
Rearrange the $ (D + R)x = b $ as
$$
\begin{align}
D x &= b - R x\\
x &= D^{-1} (b - R x)
\end{align}
$$
where, since $ D $ is diagonal, its inverse is trivial to calculate with $ O(N) $ complexity.
To solve, take an iteration $ x^k $, starting from $ x^0 $, and then form a new guess with
$$
x^{k+1} = D^{-1}(b - R x^k)
$$
The complexity here is $ O(N^2) $ for the matrix-vector product, and $ O(N) $ for the vector subtraction and division.
The package [IterativeSolvers.jl](https://github.com/JuliaMath/IterativeSolvers.jl) package implements this method.
For our example, we start with a guess and solve for the value function and iterate
```julia
using IterativeSolvers, LinearAlgebra, SparseArrays
v = zeros(N)
jacobi!(v, A, r, maxiter = 40)
@show norm(v - v_direct, Inf)
```
norm(v - v_direct, Inf) = 0.022858373200932647
0.022858373200932647
With this, after 40 iterations we see that the error is in the order of `1E-2`
### Other Stationary Methods
In practice, there are many methods that are better than Jacobi iteration. For example [Gauss-Siedel](https://en.wikipedia.org/wiki/Gauss%E2%80%93Seidel_method)., which
splits the matrix $ A = L + U $ into a lower-triangular matrix $ L $ and an upper-triangular matrix $ U $ without the diagonal.
The iteration becomes
$$
L x^{k+1} = b - U x^k
$$
In that case, since the $ L $ matrix is triangular, the system can be solved in $ O(N^2) $ operations after $ b - U x^k $ is formed
```julia
v = zeros(N)
gauss_seidel!(v, A, r, maxiter = 40)
@show norm(v - v_direct, Inf);
```
norm(v - v_direct, Inf) = 1.5616376089155892e-5
The accuracy increases substantially. After 40 iterations, we see that the error is of the order of `1E-5`
Another example is [Successive Over-relaxation (SOR)](https://en.wikipedia.org/wiki/Successive_over-relaxation), which takes a relaxation parameter $ \omega > 1 $ and decomposes the matrix as $ A = L + D + U $, where $ L, U $ are strictly upper- and lower-diagonal matrices and $ D $ is diagonal.
Decompose the $ A $ matrix, multiply the system by $ \omega $, and rearrange to find
$$
(D + \omega L) x^{k+1} = \omega b - \left(\omega U +(\omega - 1)D \right)x^k
$$
In that case, $ D + \omega L $ is a triangular matrix, and hence the linear solution is $ O(N^2) $.
```julia
v = zeros(N)
sor!(v, A, r, 1.1, maxiter = 40)
@show norm(v - v_direct, Inf);
```
norm(v - v_direct, Inf) = 3.745356593753968e-7
The accuracy is now `1E-7`. If we change the parameter to $ \omega = 1.2 $, the accuracy further increases to `1E-9`.
This technique is common with iterative methods: Frequently, adding a damping or a relaxation parameter will counterintuitively speed up the convergence process.
**Note:** The stationary iterative methods are not always used directly, but are sometimes used as a “smoothing” step (e.g., running 5-10 times) prior to using other Krylov methods.
## Krylov Methods
A more commonly used set of iterative methods is based on [Krylov subspaces](https://en.wikipedia.org/wiki/Krylov_subspace), which involve iterating the $ A^k x $ matrix-vector product, and orthogonalizing to ensure that the resulting iteration is not too collinear.
The prototypical Krylov method is [Conjugate Gradient](https://en.wikipedia.org/wiki/Conjugate_gradient_method), which requires the $ A $ matrix to be
symmetric and positive definite.
Solving an example:
```julia
N = 100
A = sprand(100, 100, 0.1) # 10 percent non-zeros
A = A * A' # easy way to generate a symmetric positive-definite matrix
@show isposdef(A)
b = rand(N)
x_direct = A \ b # sparse direct solver more appropriate here
cond(Matrix(A * A'))
```
isposdef(A) = true
3.5791585364800934e10
Notice that the condition numbers tend to be large for large random matrices.
Solving this system with the conjugate gradient method:
```julia
x = zeros(N)
sol = cg!(x, A, b, log=true, maxiter = 1000)
sol[end]
```
Converged after 174 iterations.
### Introduction to Preconditioning
If you tell a numerical analyst that you are using direct methods, their first question may be, “which factorization?” But if you tell them you
are using an iterative method, they may ask “which preconditioner?”.
As discussed at the beginning of the lecture, the spectral properties of matrices determine the rate of convergence
of iterative methods. In particular, ill-conditioned matrices can converge slowly with iterative methods, for the same
reasons that naive value-function iteration will converge slowly if the discount rate is close to `1`.
Preconditioning solves this problem by adjusting the spectral properties of the matrix, at the cost of some extra computational
operations.
To see an example of a right-preconditioner, consider a matrix $ P $ which has a convenient and numerically stable inverse. Then
$$
\begin{align}
A x &= b\\
A P^{-1} P x &= b\\
A P^{-1} y &= b\\
P x &= y
\end{align}
$$
That is, solve $ (A P^{-1})y = b $ for $ y $, and then solve $ P x = y $ for $ x $.
There are all sorts of preconditioners, and they are specific to the particular problem at hand. The key features are that they have convenient (and lower-order!) ways to solve the
resulting system and they lower the condition number of the matrix. To see this in action, we can look at a simple preconditioner.
The diagonal precondition is simply `P = Diagonal(A)`. Depending on the matrix, this can change the condition number a little or a lot.
```julia
AP = A * inv(Diagonal(A))
@show cond(Matrix(A))
@show cond(Matrix(AP));
```
cond(Matrix(A)) = 189186.6473381337
cond(Matrix(AP)) = 175174.59095330362
But it may or may not decrease the number of iterations
```julia
using Preconditioners
x = zeros(N)
P = DiagonalPreconditioner(A)
sol = cg!(x, A, b, log=true, maxiter = 1000)
sol[end]
```
Converged after 174 iterations.
Another classic preconditioner is the incomplete LU decomposition
```julia
using IncompleteLU
x = zeros(N)
P = ilu(A, τ = 0.1)
sol = cg!(x, A, b, Pl = P, log=true, maxiter = 1000)
sol[end]
```
Converged after 86 iterations.
The `τ` parameter determines the degree of the LU decomposition to conduct, providing a tradeoff between preconditioner and solve speed.
A good rule of thumb is that you should almost always be using a preconditioner with iterative methods, and you should experiment to find preconditioners that are appropriate for your problem.
Finally, naively trying another preconditioning approach (called [Algebraic Multigrid](https://en.wikipedia.org/wiki/Multigrid_method#Algebraic_MultiGrid_%28AMG%29)) gives us a further drop in the number of iterations.
```julia
x = zeros(N)
P = AMGPreconditioner{RugeStuben}(A)
sol = cg!(x, A, b, Pl = P, log=true, maxiter = 1000)
sol[end]
```
Converged after 59 iterations.
*Note:* Preconditioning is also available for stationary, iterative methods (see [this example](https://en.wikipedia.org/wiki/Preconditioner#Preconditioned_iterative_methods)), but
is frequently not implemented since such methods are not often used for the complete solution.
### Methods for General Matrices
There are many algorithms which exploit matrix structure (e.g., the conjugate gradient method for positive-definite matrices, and MINRES for matrices that are only symmetric/Hermitian).
On the other hand, if there is no structure to a sparse matrix, then GMRES is a good approach.
To experiment with these methods, we will use our ill-conditioned interpolation problem with a monomial basis.
```julia
using IterativeSolvers
N = 10
f(x) = exp(x)
x = range(0.0, 10.0, length = N+1)
y = f.(x) # generate some data to interpolate
A = sparse([x_i^n for x_i in x, n in 0:N])
c = zeros(N+1) # initial guess required for iterative solutions
results = gmres!(c, A, y, log=true, maxiter = 1000)
println("cond(A) = $(cond(Matrix(A))), $(results[end]) Norm error $(norm(A*c - y, Inf))")
```
cond(A) = 4.462833495403007e12, Converged after 11 iterations. Norm error 7.62520357966423e-8
That method converged in 11 iterations. Now if we try it with an incomplete LU preconditioner, we see that it converges immediately.
```julia
N = 10
f(x) = exp(x)
x = range(0.0, 10.0, length = N+1)
y = f.(x) # generate some data to interpolate
A = [x_i^n for x_i in x, n in 0:N]
P = ilu(sparse(A), τ = 0.1)
c = zeros(N+1) # initial guess required for iterative solutions
results = gmres!(c, A, y, Pl = P,log=true, maxiter = 1000)
println("$(results[end]) Norm error $(norm(A*c - y, Inf))")
```
Converged after 1 iterations. Norm error 4.5034175855107605e-7
With other preconditioners (e.g., `DiagonalPreconditioner`), we may save only one or two iterations. Keep in mind,
however, to consider the cost of the preconditioning process in your problem.
### Matrix-Free Methods
First, lets use a Krylov method to solve our simple valuation problem
```julia
α = 0.1
N = 100
Q = Tridiagonal(fill(α, N-1), [-α; fill(-2α, N-2); -α], fill(α, N-1))
r = range(0.0, 10.0, length=N)
ρ = 0.05
A = ρ * I - Q
v = zeros(N)
results = gmres!(v, A, r, log=true)
v_sol = results[1]
println("$(results[end])")
```
Converged after 20 iterations.
While the `A` matrix was important to be kept in memory for direct methods, Krylov methods such as GMRES are built on matrix-vector products, i.e., $ A x $ for iterations on the $ x $.
This product can be written directly for a given $ x $,
$$
A x = \begin{bmatrix} (\rho + \alpha) x_1 - \alpha x_2\\
- \alpha x_1 + (\rho + 2 \alpha) x_2 - \alpha x_3\\
\vdots\\
- \alpha x_{N-2} + (\rho + 2 \alpha) x_{N-1} - \alpha x_{N}\\
- \alpha x_{N-1} + (\rho + \alpha) x_N
\end{bmatrix}
$$
This can be implemented as a function (either in-place or out-of-place) which calculates $ y = A x $
```julia
A_mul(x) = [ (ρ + α) * x[1] - α * x[2];
[-α * x[i-1] + (ρ + 2*α) * x[i] - α * x[i+1] for i in 2:N-1]; # comprehension
- α * x[end-1] + (ρ + α) * x[end]]
x = rand(N)
@show norm(A * x - A_mul(x)) # compare to matrix;
```
norm(A * x - A_mul(x)) = 0.0
The final line verifies that the `A_mul` function provides the same result as the matrix multiplication with our original `A` for a random vector.
In abstract mathematics, a finite-dimensional [linear operator](https://en.wikipedia.org/wiki/Linear_map) is a mapping $ A : R^N \to R^N $
that satisfies a number of criteria such as $ A (c_1 x_1 + c_2 x_2) = c_1 A x_1 + c_2 A x_2 $ for scalars $ c_i $ and vectors $ x_i $.
Moving from abstract mathematics to [generic programming](../more_julia/generic_programming.html), we can think of a linear operator
as a map that satisfies a number of requirements (e.g., it has a left-multiply to apply the map `*`, an in-place left-multiply `mul!`, an associated `size`). A Julia matrix
is just one possible implementation of the abstract concept of a linear operator.
Convenience wrappers can provide some of the boilerplate which turns the `A_mul` function into something that behaves like a matrix. One
package is [LinearMaps.jl](https://github.com/Jutho/LinearMaps.jl) and another is [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl)
```julia
using LinearMaps
A_map = LinearMap(A_mul, N) # map uses the A_mul function
```
LinearMaps.FunctionMap{Float64}(A_mul, 100, 100; ismutating=false, issymmetric=false, ishermitian=false, isposdef=false)
Now, with the `A_map` object, we can fulfill many of the operations we would expect from a matrix
```julia
x = rand(N)
@show norm(A_map * x - A * x)
y = similar(x)
mul!(y, A_map, x) # in-place multiplication
@show norm(y - A * x)
@show size(A_map)
@show norm(Matrix(A_map) - A)
@show nnz(sparse(A_map));
```
norm(A_map * x - A * x) = 0.0
norm(y - A * x) = 0.0
size(A_map) = (100, 100)
norm(Matrix(A_map) - A) = 0.0
nnz(sparse(A_map)) = 298
**Note:** In the case of `sparse(A_map)` and `Matrix(A_map)`, the code is using the left-multiplication operator with `N` standard basis vectors to construct
the full matrix. This should be used only for testing purposes.
But notice that as the linear operator does not have indexing operations, it is not an array or a matrix.
```julia
typeof(A_map) <: AbstractArray
```
false
As long as algorithms using linear operators are written generically (e.g., using the matrix-vector `*` or `mul!` functionss) and the types of functions are not
unnecessarily constrained to be `Matrix` or `AbstractArray` when it isn’t strictly necessary, then the `A_map` type can work in places which would otherwise require a matrix.
For example, the Krylov methods in `IterativeSolvers.jl` are written for generic left-multiplication
```julia
results = gmres(A_map, r, log = true) # Krylov method using the matrix-free type
println("$(results[end])")
```
Converged after 20 iterations.
These methods are typically not competitive with sparse, direct methods unless the problems become very large. In that case,
we often want to work with pre-allocated vectors. Instead of using `y = A * x` for matrix-vector products,
we would use the in-place `mul!(y, A, x)` function. The wrappers for linear operators all support in-place non-allocating versions for this purpose.
```julia
function A_mul!(y, x) # in-place version
y[1] = (ρ + α) * x[1] - α * x[2]
for i in 2:N-1
y[i] = -α * x[i-1] + (ρ + 2α) * x[i] -α * x[i+1]
end
y[end] = - α * x[end-1] + (ρ + α) * x[end]
return y
end
A_map_2 = LinearMap(A_mul!, N, ismutating = true) # ismutating == in-place
v = zeros(N)
@show norm(A_map_2 * v - A * v) # can still call with * and have it allocate
results = gmres!(v, A_map_2, r, log=true) # in-place gmres
println("$(results[end])")
```
norm(A_map_2 * v - A * v) = 0.0
Converged after 20 iterations.
Finally, keep in mind that the linear operators can compose, so that $ A (c_1 x) + B (c_2 x) + x = (c_1 A + c_2 B + I) x $ is well defined for any linear operators - just as
it would be for matrices $ A, B $ and scalars $ c_1, c_2 $.
For example, take $ 2 A x + x = (2 A + I) x \equiv B x $ as a new linear map,
```julia
B = 2.0 * A_map + I # composite linear operator
B * rand(N) # left-multiply works with the composition
typeof(B)
```
LinearMaps.LinearCombination{Float64,Tuple{LinearMaps.CompositeMap{Float64,Tuple{LinearMaps.FunctionMap{Float64,typeof(A_mul),Nothing},LinearMaps.UniformScalingMap{Float64}}},LinearMaps.UniformScalingMap{Bool}}}
The wrappers, such as `LinearMap` wrappers, make this composition possible by keeping the composition
graph of the expression (i.e., `LinearCombination`) and implementing the left-multiply recursively using the rules of linearity.
Another example is to solve the $ \rho v = r + Q v $ equation for $ v $ with composition of matrix-free methods for $ L $
rather than by creating the full $ A = \rho - Q $ operator, which we implemented as `A_mul`
```julia
Q_mul(x) = [ -α * x[1] + α * x[2];
[α * x[i-1] - 2*α * x[i] + α*x[i+1] for i in 2:N-1]; # comprehension
α * x[end-1] - α * x[end];]
Q_map = LinearMap(Q_mul, N)
A_composed = ρ * I - Q_map # map composition, performs no calculations
@show norm(A - sparse(A_composed)) # test produces the same matrix
gmres(A_composed, r, log=true)[2]
```
norm(A - sparse(A_composed)) = 0.0
Converged after 20 iterations.
In this example, the left-multiply of the `A_composed` used by `gmres` uses the left-multiply of `Q_map` and `I` with the rules
of linearity. The `A_composed = ρ * I - Q_map` operation simply creates the `LinearMaps.LinearCombination` type, and doesn’t perform any calculations on its own.
## Iterative Methods for Linear Least Squares
In theory, the solution to the least-squares problem, $ \min_x \| Ax -b \|^2 $, is simply the solution to the normal equations $ (A'A) x = A'b $.
We saw, however, that in practice, direct methods use a QR decomposition - in part because an ill-conditioned matrix $ A $ becomes even worse when $ A' A $ is formed.
For large problems, we can also consider Krylov methods for solving the linear least-squares problem. One formulation is the [LSMR](https://stanford.edu/group/SOL/software/lsmr/LSMR-SISC-2011.pdf) algorithm,
which can solve the regularized
$$
\min_x \| Ax -b \|^2 + \| \lambda x\|^2
$$
The purpose of the $ \lambda \geq 0 $ parameter is to dampen the iteration process and/or regularize the solution. This isn’t required, but can help convergence for ill-conditioned matrices $ A $. With the
damping parameter, the normalized equations would become $ (A'A + \lambda^2 I) x = A'b $.
We can compare solving the least-squares problem with LSMR and direct methods
```julia
M = 1000
N = 10000
σ = 0.1
β = rand(M)
# simulate data
X = sprand(N, M, 0.1)
y = X * β + σ * randn(N)
β_direct = X \ y
results = lsmr(X, y, log = true)
β_lsmr = results[1]
@show norm(β_direct - β_lsmr)
println("$(results[end])")
```
norm(β_direct - β_lsmr) = 9.139228893911292e-6
Converged after 14 iterations.
Note that rather than forming this version of the normal equations, the LSMR algorithm uses the $ A x $ and $ A' y $ (i.e., the matrix-vector product and the matrix-transpose vector product) to implement an iterative
solution. Unlike the previous versions, the left-multiply is insufficient since the least squares also deals with the transpose of the operator. For this reason, in order to use
matrix-free methods, we need to define the `A * x` and `transpose(A) * y` functions separately.
```julia
# Could implement as matrix-free functions.
X_func(u) = X * u # matrix-vector product
X_T_func(v) = X' * v # i.e., adjoint-vector product
X_map = LinearMap(X_func, X_T_func, N, M)
results = lsmr(X_map, y, log = true)
println("$(results[end])")
```
Converged after 14 iterations.
## Iterative Methods for Eigensystems
When you use `eigen` on a dense matrix, it calculates an eigendecomposition and provides all the eigenvalues and eigenvectors.
While this is sometimes necessary, a spectral decomposition of a dense, unstructured matrix is one of the costliest $ O(N^3) $ operations (i.e., it has
one of the largest constants). For large matrices, it is often infeasible.
Luckily, we frequently need only a few eigenvectors/eigenvalues (in some cases just one), which enables a different set of algorithms.
For example, in the case of a discrete-time Markov chain, in order to find the stationary distribution, we are looking for the
eigenvector associated with the eigenvalue 1. As usual, a little linear algebra goes a long way.
From the [Perron-Frobenius theorem](https://en.wikipedia.org/wiki/Perron%E2%80%93Frobenius_theorem#Stochastic_matrices), the largest eigenvalue of an irreducible stochastic matrix is 1 - the same eigenvalue we are looking for.
Iterative methods for solving eigensystems allow targeting the smallest magnitude, the largest magnitude, and many others. The easiest library
to use is [Arpack.jl](https://julialinearalgebra.github.io/Arpack.jl/latest/).
As an example,
```julia
using Arpack, LinearAlgebra
N = 1000
A = Tridiagonal([fill(0.1, N-2); 0.2], fill(0.8, N), [0.2; fill(0.1, N-2);])
A_adjoint = A'
λ, ϕ = eigs(A_adjoint, nev=1, which=:LM, maxiter=1000) # Find 1 of the largest magnitude eigenvalue
ϕ = real(ϕ) ./ sum(real(ϕ))
@show λ
@show mean(ϕ);
```
λ = Complex{Float64}[1.0000000000000189 + 0.0im]
mean(ϕ) = 0.0010000000000000002
Indeed, the `λ` is equal to `1`. If we choose `nev = 2`, it will provide the eigenpairs with the two eigenvalues of largest absolute value.
*Hint*: If you get errors using `Arpack`, increase the `maxiter` parameter for your problems.
Iterative methods for eigensystems rely on matrix-vector products rather than decompositions, and are amenable to matrix-free approaches. For example,
take the Markov chain for a simple counting process:
1. The count starts at $ 1 $ and has a maximum of $ N $.
1. With probability $ \theta \geq 0 $, an existing count is lost with probability $ \zeta \geq 0 $ such that $ \theta + \zeta \leq 1 $.
1. If the count is at $ 1 $, then the only transition is to add a count with probability $ \theta $.
1. If the current count is $ N $, then the only transition is to lose the count with probability $ \zeta $.
First, finding the transition matrix $ P $ and its adjoint directly as a check
```julia
θ = 0.1
ζ = 0.05
N = 5
P = Tridiagonal(fill(ζ, N-1), [1-θ; fill(1-θ-ζ, N-2); 1-ζ], fill(θ, N-1))
P'
```
5×5 Tridiagonal{Float64,Array{Float64,1}}:
0.9 0.05 ⋅ ⋅ ⋅
0.1 0.85 0.05 ⋅ ⋅
⋅ 0.1 0.85 0.05 ⋅
⋅ ⋅ 0.1 0.85 0.05
⋅ ⋅ ⋅ 0.1 0.95
Implementing the adjoint-vector product directly, and verifying that it gives the same matrix as the adjoint
```julia
P_adj_mul(x) = [ (1-θ) * x[1] + ζ * x[2];
[θ * x[i-1] + (1-θ-ζ) * x[i] + ζ * x[i+1] for i in 2:N-1]; # comprehension
θ * x[end-1] + (1-ζ) * x[end];]
P_adj_map = LinearMap(P_adj_mul, N)
@show norm(P' - sparse(P_adj_map))
```
norm(P' - sparse(P_adj_map)) = 0.0
0.0
Finally, solving for the stationary distribution using the matrix-free method (which could be verified against the decomposition approach of $ P' $)
```julia
λ, ϕ = eigs(P_adj_map, nev=1, which=:LM, maxiter=1000)
ϕ = real(ϕ) ./ sum(real(ϕ))
@show λ
@show ϕ
```
λ = Complex{Float64}[1.0 + 0.0im]
ϕ = [0.03225806451612657; 0.06451612903225695; 0.1290322580645172; 0.25806451612903425; 0.516129032258065]
5×1 Array{Float64,2}:
0.03225806451612657
0.06451612903225695
0.1290322580645172
0.25806451612903425
0.516129032258065
Of course, for a problem this simple, the direct eigendecomposition will be significantly faster. Use matrix-free iterative methods only for large systems where
you do not need all of the eigenvalues.
## Krylov Methods for Markov-Chain Dynamics
This example applies the methods in this lecture to a large continuous-time Markov chain, and provides some practice working with arrays of arbitrary dimensions.
Consider a version of the Markov-chain dynamics in [[Per19]](../zreferences.html#perla2019), where a firm has a discrete number of customers of different types. To keep things as simple as possible, assume that there are $ m=1, \ldots M $ types of customers and that the firm may have $ n = 1, \ldots N $ customers of each type.
To set the notation, let $ n_m \in \{1, \ldots N\} $ be the number of customers of type $ m $, so that the state of a firm is $ \{n_1, \ldots n_m \ldots, n_M\} $. The cardinality of possible states is then $ \mathbf{N}\equiv N^M $, which can blow up quickly as the number of types increases.
The stochastic process is a simple counting/forgetting process, as follows:
1. For every $ 1 \leq n_m(t) < N $, there is a $ \theta $ intensity of arrival of a new customer, so that $ n_m(t+\Delta) = n_m(t) + 1 $.
1. For every $ 1 < n_m(t) \leq N $, there is a $ \zeta $ intensity of losing a customer, so that $ n_m(t+\Delta) = n_m(t) - 1 $.
### Matrix-free Infinitesimal Generator
In order to define an intensity matrix $ Q $ of size $ \mathbf{N}\times \mathcal{N} $, we need to choose a consistent ordering of the states. But
before we enumerate them linearly, take a $ v\in R^{\mathbf{N}} $ interpreted as a multidimensional array and look at the left product of the linear operator $ Q v \to R^{\mathbf{N}} $.
For example, if we were implementing the product at the row of $ Q $ corresponding to the $ (n_1, \ldots, n_M) $ state, then
$$
\begin{align}
Q_{(n_1, \ldots n_M)} \cdot v &=
\theta \sum_{m=1}^M (n_m < N) v(n_1, \ldots, n_m + 1, \ldots, n_M)\\
&+ \zeta \sum_{m=1}^M (1 < n_m) v(n_1, \ldots, n_m - 1, \ldots, n_M)\\
&-\left(\theta\, \text{Count}(n_m < N) + \zeta\, \text{Count}( n_m > 1)\right)v(n_1, \ldots, n_M)
\end{align}
$$
Here:
- the first term includes all of the arrivals of new customers into the various $ m $
- the second term is the loss of a customer for the various $ m $
- the last term is the intensity of all exits from this state (i.e., counting the intensity of all other transitions, to ensure that the row will sum to $ 0 $)
In practice, rather than working with the $ f $ as a multidimensional type, we will need to enumerate the discrete states linearly, so that we can iterate $ f $ between $ 1 $ and $ \mathbf{N} $. An especially convenient
approach is to enumerate them in the same order as the $ K $-dimensional Cartesian product of the $ N $ states in the multi-dimensional array above.
This can be done with the `CartesianIndices` function, which is used internally in Julia for the `eachindex` function. For example,
```julia
N = 2
M = 3
shape = Tuple(fill(N, M))
v = rand(shape...)
@show typeof(v)
for ind in CartesianIndices(v)
println("v$(ind.I) = $(v[ind])") # .I gets the tuple to display
end
```
typeof(v) = Array{Float64,3}
v(1, 1, 1) = 0.639089412234831
v(2, 1, 1) = 0.4302368488000152
v(1, 2, 1) = 0.21490768283644002
v(2, 2, 1) = 0.7542051014748841
v(1, 1, 2) = 0.4330861190374067
v(2, 1, 2) = 0.07556766967902084
v(1, 2, 2) = 0.2143739072351467
v(2, 2, 2) = 0.43231874437572815
The added benefit of this approach is that it will be the most efficient way to iterate through vectors in the implementation.
For the counting process with arbitrary dimensions, we will frequently be incrementing or decrementing the $ m $ unit vectors of the `CartesianIndex` type with
```julia
e_m = [CartesianIndex((1:M .== i)*1...) for i in 1:M]
```
3-element Array{CartesianIndex{3},1}:
CartesianIndex(1, 0, 0)
CartesianIndex(0, 1, 0)
CartesianIndex(0, 0, 1)
and then use the vector to increment. For example, if the current count is `(1, 2, 2)` and we want to add a count of `1` to the first index and remove a count
of `1` from the third index, then
```julia
ind = CartesianIndex(1, 2, 2) # example counts coming from CartesianIndices
@show ind + e_m[1] # increment the first index
@show ind - e_m[3]; # decrement the third index
```
ind + e_m[1] = CartesianIndex(2, 2, 2)
ind - e_m[3] = CartesianIndex(1, 2, 1)
This works, of course, because the `CartesianIndex` type is written to support efficient addition and subtraction. Finally, to implement the operator, we need to count the indices in the states where increment and decrement occurs.
```julia
@show ind
@show count(ind.I .> 1)
@show count(ind.I .< N);
```
ind = CartesianIndex(1, 2, 2)
count(ind.I .> 1) = 2
count(ind.I .< N) = 1
With this, we are now able to write the $ Q $ operator on the $ f $ vector, which is enumerated by the Cartesian indices. First, collect the
parameters in a named tuple generator
```julia
using Parameters, BenchmarkTools
default_params = @with_kw (θ = 0.1, ζ = 0.05, ρ = 0.03, N = 10, M = 6,
shape = Tuple(fill(N, M)), # for reshaping vector to M-d array
e_m = ([CartesianIndex((1:M .== i)*1...) for i in 1:M]))
```
##NamedTuple_kw#256 (generic function with 2 methods)
Next, implement the in-place matrix-free product
```julia
function Q_mul!(dv, v, p)
@unpack θ, ζ, N, M, shape, e_m = p
v = reshape(v, shape) # now can access v, dv as M-dim arrays
dv = reshape(dv, shape)
@inbounds for ind in CartesianIndices(v)
dv[ind] = 0.0
for m in 1:M
n_m = ind[m]
if(n_m < N)
dv[ind] += θ * v[ind + e_m[m]]
end
if(n_m > 1)
dv[ind] += ζ *v[ind - e_m[m]]
end
end
dv[ind] -= (θ * count(ind.I .< N) + ζ * count(ind.I .> 1)) * v[ind]
end
end
p = default_params()
v = zeros(p.shape)
dv = similar(v)
@btime Q_mul!($dv, $v, $p)
```
53.485 ms (0 allocations: 0 bytes)
From the output of the benchmarking, note that the implementation of the left-multiplication takes less than 100 milliseconds, and allocates little or no memory, even though the Markov chain has a million possible states (i.e., $ N^M = 10^6 $).
### Solving a Valuation Problem
As before, we could use this Markov chain to solve a Bellman equation. Assume that the firm discounts at rate $ \rho > 0 $ and gets a flow payoff of a different $ z_m $ per
customer of type $ m $. For example, if the state of the firm is $ (n_1, n_2, n_3) = (2,3,2) $, then it gets $ \begin{bmatrix}2 & 3 & 2\end{bmatrix} \cdot \begin{bmatrix}z_1& z_2 & z_3\end{bmatrix} $ in flow profits.
Given this profit function, we can write the simple Bellman equation in our standard form of $ \rho v = r + Q v $, defining the appropriate payoff $ r $. For example, if $ z_m = m^2 $, then
```julia
function r_vec(p)
z = (1:p.M).^2 # payoffs per type m
r = [0.5 * dot(ind.I, z) for ind in CartesianIndices(p.shape)]
return reshape(r, p.N^p.M) # return as a vector
end
@show typeof(r_vec(p))
r_vec(p) |> mean
```
typeof(r_vec(p)) = Array{Float64,1}
250.25
Note that the returned $ r $ is a vector, enumerated in the same order as the $ n_m $ states.
Since the ordering of $ r $ is consistent with that of $ Q $, we can solve $ (\rho - Q) v = r $ as a linear system.
Below, we create a linear operator and compare the algorithm for a few different iterative methods [(GMRES, BiCGStab(l), IDR(s), etc.)](https://juliamath.github.io/IterativeSolvers.jl/dev/#What-method-should-I-use-for-linear-systems?-1) with a small problem
of only 10,000 possible states.
```julia
p = default_params(N=10, M=4)
Q = LinearMap((df, f) -> Q_mul!(df, f, p), p.N^p.M, ismutating = true)
A = p.ρ * I - Q
A_sparse = sparse(A) # expensive: use only in tests
r = r_vec(p)
v_direct = A_sparse \ r
iv = zero(r)
@btime $A_sparse \ $r # direct
@show norm(gmres(A, r) - v_direct)
@btime gmres!(iv, $A, $r) setup = (iv = zero(r))
@show norm(bicgstabl(A, r) - v_direct)
@btime bicgstabl!(iv, $A, $r) setup = (iv = zero(r))
@show norm(idrs(A, r) - v_direct)
@btime idrs($A, $r);
```
679.258 ms (75 allocations: 181.85 MiB)
norm(gmres(A, r) - v_direct) = 2.390837794373907e-5
5.272 ms (226 allocations: 3.37 MiB)
norm(bicgstabl(A, r) - v_direct) = 1.108634674599104e-5
23.129 ms (432 allocations: 9.86 MiB)
norm(idrs(A, r) - v_direct) = 4.992408654237078e-8
7.443 ms (312 allocations: 4.91 MiB)
Here, we see that even if the $ A $ matrix has been created, the direct sparse solver (which uses a sparse LU or QR) is at least an order of magnitude slower and allocates over an order of magnitude more memory. This is in addition to the allocation for the `A_sparse` matrix itself, which is not needed for iterative methods.
The different iterative methods have tradeoffs when it comes to accuracy, speed, convergence rate, memory requirements, and usefulness of preconditioning. Going much above $ \mathbf{N} = 10^4 $, the direct methods quickly become infeasible.
Putting everything together to solving much larger systems with GMRES as our linear solvers
```julia
function solve_bellman(p; iv = zeros(p.N^p.M))
@unpack ρ, N, M = p
Q = LinearMap((df, f) -> Q_mul!(df, f, p), N^M, ismutating = true)
A = ρ * I - Q
r = r_vec(p)
sol = gmres!(iv, A, r, log = false) # iterative solver, matrix-free
return sol
end
p = default_params(N=10, M=6)
@btime solve_bellman($p);
```
1.581 s (270 allocations: 366.23 MiB)
This solves a value function with a Markov chain of a million states in a little over a second! This general approach seems to scale roughly linearly. For example, try $ N=10, M=8 $
to solve an equation with a Markov chain with 100 million possible states, which can be solved in about 3-4 minutes. Above that order of magnitude, you may need to tinker with the linear solver parameters to ensure that you are not memory limited (e.g., change the `restart` parameter of GMRES).
### Markov Chain Steady State and Dynamics
Recall that given an $ N $-dimensional intensity matrix $ Q $ of a CTMC, the evolution of the pdf from an initial condition $ \psi(0) $ is the system of linear differential equations
$$
\dot{\psi}(t) = Q^T \psi(t)
$$
If $ Q $ is a matrix, we could just take its transpose to find the adoint. However, with matrix-free methods, we need to implement the
adjoint-vector product directly.
The logic for the adjoint is that for a given $ n = (n_1,\ldots, n_m, \ldots n_M) $, the $ Q^T $ product for that row has terms enter when
1. $ 1 < n_m \leq N $, entering into the identical $ n $ except with one less customer in the :math:`m`th position
1. $ 1 \leq n_m < N $, entering into the identical $ n $ except with one more customer in the :math:`m`th position
Implementing this logic, first in math and then in code,
$$
\begin{align}
Q^T_{(n_1, \ldots, n_M)} \cdot \psi &=
\theta \sum_{m=1}^M (n_m > 1) \psi(n_1, \ldots, n_m - 1, \ldots, n_M)\\
&+ \zeta \sum_{m=1}^M (n_m < N) \psi(n_1, \ldots, n_m + 1, \ldots, n_M)\\
&-\left(\theta\, \text{Count}(n_m < N) + \zeta\, \text{Count}( n_m > 1)\right)\psi(n_1, \ldots, n_M)
\end{align}
$$
```julia
function Q_T_mul!(dψ, ψ, p)
@unpack θ, ζ, N, M, shape, e_m = p
ψ = reshape(ψ, shape)
dψ = reshape(dψ, shape)
@inbounds for ind in CartesianIndices(ψ)
dψ[ind] = 0.0
for m in 1:M
n_m = ind[m]
if(n_m > 1)
dψ[ind] += θ * ψ[ind - e_m[m]]
end
if(n_m < N)
dψ[ind] += ζ *ψ[ind + e_m[m]]
end
end
dψ[ind] -= (θ * count(ind.I .< N) + ζ * count(ind.I .> 1)) * ψ[ind]
end
end
```
Q_T_mul! (generic function with 1 method)
The `sparse` function for the operator is useful for testing that the function is correct, and is the adjoint of
our `Q` operator.
```julia
p = default_params(N=5, M=4) # sparse is too slow for the full matrix
Q = LinearMap((df, f) -> Q_mul!(df, f, p), p.N^p.M, ismutating = true)
Q_T = LinearMap((dψ, ψ) -> Q_T_mul!(dψ, ψ, p), p.N^p.M, ismutating = true)
@show norm(sparse(Q)' - sparse(Q_T)); # reminder: use sparse only for testing!
```
norm((sparse(Q))' - sparse(Q_T)) = 0.0
As discussed previously, the steady state can be found as the eigenvector associated with the zero eigenvalue (i.e., the one that solves $ Q^T \psi = 0 \psi $). We could
do this with a dense eigenvalue solution for relatively small matrices
```julia
p = default_params(N=5, M=4)
eig_Q_T = eigen(Matrix(Q_T))
vec = real(eig_Q_T.vectors[:,end])
direct_ψ = vec ./ sum(vec)
@show eig_Q_T.values[end];
```
eig_Q_T.values[end] = -4.163336342344337e-16 + 0.0im
This approach relies on a full factorization of the underlying matrix, delivering the entire spectrum. For our purposes, this is not necessary.
Instead, we could use the `Arpack.jl` package to target the eigenvalue of smallest absolute value, which relies on an iterative method.
A final approach in this case is to notice that the $ \mathbf{N}\times\mathbf{N} $ matrix is of
rank $ \mathbf{N} - 1 $ when the Markov chain is irreducible. The stationary solution is a vector in the $ 1 $-dimensional nullspace
of the matrix.
Using Krylov methods to solve a linear system with the right-hand side all $ 0 $min_x ||A x - 0||_2` solved
iteratively from a non-zero initial condition will converge to a point in the nullspace.
We can use various Krylov methods for this trick (e.g., if the matrix is symmetric and positive definite, we could use Conjugate Gradient) but in our case we will
use GMRES since we do not have any structure.
```julia
p = default_params(N=5, M=4) # sparse is too slow for the full matrix
Q_T = LinearMap((dψ, ψ) -> Q_T_mul!(dψ, ψ, p), p.N^p.M, ismutating = true)
ψ = fill(1/(p.N^p.M), p.N^p.M) # can't use 0 as initial guess
sol = gmres!(ψ, Q_T, zeros(p.N^p.M)) # i.e., solve Ax = 0 iteratively
ψ = ψ / sum(ψ)
@show norm(ψ - direct_ψ);
```
norm(ψ - direct_ψ) = 6.098250476301026e-11
The speed and memory differences between these methods can be orders of magnitude.
```julia
p = default_params(N=4, M=4) # Dense and sparse matrices are too slow for the full dataset.
Q_T = LinearMap((dψ, ψ) -> Q_T_mul!(dψ, ψ, p), p.N^p.M, ismutating = true)
Q_T_dense = Matrix(Q_T)
Q_T_sparse = sparse(Q_T)
b = zeros(p.N^p.M)
@btime eigen($Q_T_dense)
@btime eigs($Q_T_sparse, nev=1, which=:SM, v0 = iv) setup = (iv = fill(1/(p.N^p.M), p.N^p.M))
@btime gmres!(iv, $Q_T, $b) setup = (iv = fill(1/(p.N^p.M), p.N^p.M));
```
38.993 ms (270 allocations: 2.28 MiB)
4.485 ms (341 allocations: 1.21 MiB)
180.634 μs (359 allocations: 66.03 KiB)
The differences become even more stark as the matrix grows. With `default_params(N=5, M=5)`, the `gmres` solution is at least 3 orders of magnitude faster, and uses close to 3 orders of magnitude less memory than the dense solver. In addition, the `gmres` solution is about an order of magnitude faster than the iterative sparse eigenvalue solver.
The algorithm can solve for the steady state of $ 10^5 $ states in a few seconds
```julia
function stationary_ψ(p)
Q_T = LinearMap((dψ, ψ) -> Q_T_mul!(dψ, ψ, p), p.N^p.M, ismutating = true)
ψ = fill(1/(p.N^p.M), p.N^p.M) # can't use 0 as initial guess
sol = gmres!(ψ, Q_T, zeros(p.N^p.M)) # i.e., solve Ax = 0 iteratively
return ψ / sum(ψ)
end
p = default_params(N=10, M=5)
@btime stationary_ψ($p);
```
3.183 s (4880 allocations: 19.32 MiB)
As a final demonstration, consider calculating the full evolution of the $ ψ(t) $ Markov chain. For the constant
$ Q' $ matrix, the solution to this system of equations is $ \psi(t) = \exp(Q') \psi(0) $
Matrix-free Krylov methods using a technique called [exponential integration](https://en.wikipedia.org/wiki/Exponential_integrator) can solve this for high-dimensional problems.
For this, we can set up a `MatrixFreeOperator` for our `Q_T_mul!` function (equivalent to the `LinearMap`, but with some additional requirements for the ODE solver) and use the [LinearExponential](http://docs.juliadiffeq.org/latest/solvers/ode_solve.html#Exponential-Methods-for-Linear-and-Affine-Problems-1) time-stepping method.
```julia
using OrdinaryDiffEq, DiffEqOperators
function solve_transition_dynamics(p, t)
@unpack N, M = p
ψ_0 = [1.0; fill(0.0, N^M - 1)]
O! = MatrixFreeOperator((dψ, ψ, p, t) -> Q_T_mul!(dψ, ψ, p), (p, 0.0), size=(N^M,N^M), opnorm=(p)->1.25)
# define the corresponding ODE problem
prob = ODEProblem(O!,ψ_0,(0.0,t[end]), p)
return solve(prob, LinearExponential(krylov=:simple), tstops = t)
end
t = 0.0:5.0:100.0
p = default_params(N=10, M=6)
sol = solve_transition_dynamics(p, t)
v = solve_bellman(p)
plot(t, [dot(sol(tval), v) for tval in t], xlabel = "t", label = ["E_t(v)"])
```
The above plot (1) calculates the full dynamics of the Markov chain from the $ n_m = 1 $ for all $ m $ initial condition; (2) solves the dynamics of a system of a million ODEs; and (3) uses the calculation of the Bellman equation to find the expected valuation during that transition. The entire process takes less than 30 seconds.
|
f54df79f8c991f101761771237efd5a1a407dadc
| 227,496 |
ipynb
|
Jupyter Notebook
|
tools_and_techniques/iterative_methods_sparsity.ipynb
|
shanemcmiken/quantecon-notebooks-julia
|
c9968403bc866fe1f520762619055bd21e09ad10
|
[
"MIT"
] | null | null | null |
tools_and_techniques/iterative_methods_sparsity.ipynb
|
shanemcmiken/quantecon-notebooks-julia
|
c9968403bc866fe1f520762619055bd21e09ad10
|
[
"MIT"
] | null | null | null |
tools_and_techniques/iterative_methods_sparsity.ipynb
|
shanemcmiken/quantecon-notebooks-julia
|
c9968403bc866fe1f520762619055bd21e09ad10
|
[
"MIT"
] | null | null | null | 63.581889 | 26,338 | 0.630719 | true | 18,046 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.752013 | 0.810479 | 0.60949 |
__label__eng_Latn
| 0.990846 | 0.254381 |
# Linear Vs. Non-Linear Functions
** October 2017 **
** Andrew Riberio @ [AndrewRib.com](http://www.andrewrib.com) **
Resources
* https://en.wikipedia.org/wiki/Linear_function
* https://www.montereyinstitute.org/courses/Algebra1/COURSE_TEXT_RESOURCE/U03_L2_T5_text_final.html
* https://en.wikipedia.org/wiki/Linear_combination
## Libraries
```python
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
```
## Linear Functions
A linear function of one variable has the geometric interpretation as a line when we plot the space of inputs against the output space: y = f(x). In three dimensions, in 3d space, we can do a similar thing z = f(x,y), but we arrive at the generalization of a line in 3d space called a *plane.*
```python
# There are no functions in one dimension since we cannot deliniate inputs/outputs.
# A single dimension is just a constant value of existence with no notion of difference.
# Two dimensional funciton
def f2(x):
return x
# Three dimensional funciton. Output is z.
def f3(x,y):
return x + y
# Four dimensional function. We cannot visualize this easily.
def f4(x,y,z):
return x + y + z
t1 = np.arange(0.0, 5.0, 0.1)
plt.figure(1)
plt.subplot(211)
plt.plot(t1, f2(t1))
X,Y = np.meshgrid(t1,t1)
plt.subplot(212,projection="3d")
plt.plot(X,Y, f3(t1,t1))
plt.show()
```
**Theorem:** The derivitive of every linear function is a constant.
```python
x_sym, y_sym = sp.symbols('x y')
print("dy/dx f2 = {0}".format(sp.diff(f2(x_sym))))
print("∂z/∂x f3 = {0}".format(sp.diff(f3(x_sym,y_sym),x_sym)))
```
dy/dx f2 = 1
∂z/∂x f3 = 1
**Definition:** A *linear combination* of varibles (x<sub>1</sub>, x<sub>2</sub>, ... , x<sub>n</sub>) = ∂<sub>1</sub>x<sub>1</sub> + ∂<sub>2</sub>x<sub>2</sub>+ ... + ∂<sub>n</sub>x<sub>n</sub> where ∂ are constants.
**Theorem:** All linear functions can be represented as a linear combination.
## Non-Linear Functions
Non-linear functions are more unpredictable than linear functions because the derivitive of a non-linear function is always a function, not a constant. The geometric interpretation of non-linear functions encompasses the diverse space of curves.
```python
# Two dimensional funciton
def f2(x):
return x**2
# Three dimensional funciton. Output is z.
def f3(x,y):
return x * y
t1 = np.arange(0.0, 5.0, 0.1)
plt.figure(1)
plt.subplot(211)
plt.plot(t1, f2(t1))
plt.subplot(212,projection="3d")
X,Y = np.meshgrid(t1,t1)
plt.plot(X,Y, f3(t1,t1))
plt.show()
```
**Theorem:** The derivitive of every non-linear function is **not a constant**.
```python
x_sym, y_sym = sp.symbols('x y')
print("dy/dx f2 = {0}".format(sp.diff(f2(x_sym))))
print("∂z/∂x f3 = {0}".format(sp.diff(f3(x_sym,y_sym),x_sym)))
```
dy/dx f2 = 2*x
∂z/∂x f3 = y
**Theorem:** Non-linear functions cannot be represented by a linear combination.
|
64a4f5c25499c5496728d5c87a37a5e551d60934
| 151,088 |
ipynb
|
Jupyter Notebook
|
Notebooks/Linear Vs. Non-Linear Functions.ipynb
|
Andrewnetwork/WorkshopScipy
|
739d24b9078fffb84408e7877862618d88d947dc
|
[
"MIT"
] | 433 |
2017-12-16T20:50:07.000Z
|
2021-11-08T13:05:57.000Z
|
Notebooks/Linear Vs. Non-Linear Functions.ipynb
|
Andrewnetwork/WorkshopScipy
|
739d24b9078fffb84408e7877862618d88d947dc
|
[
"MIT"
] | 3 |
2017-12-17T06:10:28.000Z
|
2018-11-14T15:50:10.000Z
|
Notebooks/Linear Vs. Non-Linear Functions.ipynb
|
Andrewnetwork/WorkshopScipy
|
739d24b9078fffb84408e7877862618d88d947dc
|
[
"MIT"
] | 47 |
2017-12-06T20:40:09.000Z
|
2019-06-01T11:33:57.000Z
| 614.178862 | 72,682 | 0.9375 | true | 893 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.972415 | 0.870597 | 0.846582 |
__label__eng_Latn
| 0.796037 | 0.805226 |
```python
from logicqubit.logic import *
from cmath import *
import numpy as np
import sympy as sp
import scipy
from scipy.optimize import *
import matplotlib.pyplot as plt
```
Cuda is not available!
logicqubit version 1.5.8
https://arxiv.org/abs/1304.3061
https://cpb-us-w2.wpmucdn.com/voices.uchicago.edu/dist/0/2327/files/2020/10/SimultaneousMeasurementVQE.pdf1
```python
gates = Gates(1)
ID = gates.ID()
X = gates.X()
Y = gates.Y()
Z = gates.Z()
```
```python
II = ID.kron(ID)
XX = X.kron(X)
XI = X.kron(ID)
IX = ID.kron(X)
XY = X.kron(Y)
XZ = X.kron(Z)
YY = Y.kron(Y)
YI = Y.kron(ID)
IY = ID.kron(Y)
YX = Y.kron(X)
YZ = Y.kron(Z)
ZZ = Z.kron(Z)
ZI = Z.kron(ID)
IZ = ID.kron(Z)
ZX = Z.kron(X)
ZY = Z.kron(Y)
sig_is = np.kron([1, 1], [1, -1])
sig_si = np.kron([1, -1], [1, 1])
ZZ.get()
```
array([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, 1]])
```python
#H = XZ + ZI*2 # -ZX + 2*ZI
H = XX*3 + ZZ*7
min(scipy.linalg.eig(H.get())[0])
```
(-10+0j)
```python
def ansatz(reg, params):
n_qubits = len(reg)
depth = n_qubits
for i in range(depth):
for j in range(n_qubits):
if(j < n_qubits-1):
reg[j+1].CNOT(reg[j])
reg[i].RY(params[j])
def ansatz_2q(q1, q2, params):
q2.CNOT(q1)
q1.RY(params[0])
q2.RY(params[1])
q1.CNOT(q2)
q1.RY(params[2])
q2.RY(params[3])
q2.CNOT(q1)
q1.RY(params[4])
q2.RY(params[5])
q1.CNOT(q2)
```
```python
def expectation_2q(params):
logicQuBit = LogicQuBit(2)
q1 = Qubit()
q2 = Qubit()
ansatz_2q(q1,q2,params)
psi = logicQuBit.getPsi()
return (psi.adjoint()*H*psi).get()[0][0]
minimum = minimize(expectation_2q, [0,0,0,0,0,0], method='Nelder-Mead', options={'xtol': 1e-10, 'ftol': 1e-10})
print(minimum)
```
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:586: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[k] = func(sim[k])
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:611: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[-1] = fxe
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:618: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[-1] = fxr
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:614: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[-1] = fxr
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:637: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[-1] = fxcc
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:627: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[-1] = fxc
final_simplex: (array([[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752],
[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752],
[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752],
[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752],
[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752],
[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752],
[ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752]]), array([-10., -10., -10., -10., -10., -10., -10.]))
fun: -10.000000000000004
message: 'Optimization terminated successfully.'
nfev: 1065
nit: 614
status: 0
success: True
x: array([ 5.31847022, -0.42327127, -3.1307354 , 1.06588762, 1.01423211,
-3.42077752])
/home/cleoner/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py:644: ComplexWarning: Casting complex values to real discards the imaginary part
fsim[j] = func(sim[j])
```python
def expectation_value(measurements, base = np.array([1,-1,-1,1])):
probabilities = np.array(measurements)
expectation = np.sum(base * probabilities)
return expectation
def sigma_xx(params):
logicQuBit = LogicQuBit(2, first_left = False)
q1 = Qubit()
q2 = Qubit()
ansatz_2q(q1,q2,params)
# medidas em XX
q1.RY(-pi/2)
q2.RY(-pi/2)
result = logicQuBit.Measure([q1,q2])
result = expectation_value(result)
return result
def sigma_yy(params):
logicQuBit = LogicQuBit(2, first_left = False)
q1 = Qubit()
q2 = Qubit()
ansatz_2q(q1,q2,params)
# medidas em YY
q1.RX(pi/2)
q2.RX(pi/2)
result = logicQuBit.Measure([q1,q2])
yy = expectation_value(result)
iy = expectation_value(result, sig_is)
yi = expectation_value(result, sig_si)
return yy, iy, yi
def sigma_zz(params):
logicQuBit = LogicQuBit(2, first_left = False)
q1 = Qubit()
q2 = Qubit()
ansatz_2q(q1,q2,params)
result = logicQuBit.Measure([q1,q2])
zz = expectation_value(result)
iz = expectation_value(result, sig_is) # [zz, iz] = 0
zi = expectation_value(result, sig_si) # [zz, zi] = 0
return zz, iz, zi
def sigma_zx(params):
logicQuBit = LogicQuBit(2, first_left = False)
q1 = Qubit()
q2 = Qubit()
ansatz_2q(q1,q2,params)
# medidas em X
#q1.RY(-pi/2)
q2.RY(-pi/2)
result = logicQuBit.Measure([q1,q2])
zx = expectation_value(result)
return zx
def sigma_xx_zz(params):
logicQuBit = LogicQuBit(2)
q1 = Qubit()
q2 = Qubit()
ansatz_2q(q1,q2,params)
# medida na base de bell
q2.CX(q1)
q1.H() # Hx1
result = logicQuBit.Measure([q1,q2])
zi = expectation_value(result, sig_si)
iz = expectation_value(result, sig_is)
xx = zi
zz = iz
return xx, zz
def expectation_energy(params):
#xx = sigma_xx(params)
#yy = sigma_yy(params)
#zz, iz, zi = sigma_zz(params)
#zx = sigma_zx(params)
#result = -zx + zi*2
xx, zz = sigma_xx_zz(params)
result = xx*3 + zz*7
return result
```
```python
minimum = minimize(expectation_energy, [0,0,0,0,0,0], method='Nelder-Mead', options={'xtol': 1e-10, 'ftol': 1e-10})
print(minimum)
```
final_simplex: (array([[ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844],
[ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844],
[ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844],
[ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844],
[ 2.19396352, -0.53207923, 0.91245828, -3.03402174, 0.31749551,
-1.89342844],
[ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844],
[ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844]]), array([-10., -10., -10., -10., -10., -10., -10.]))
fun: -10.000000000000005
message: 'Optimization terminated successfully.'
nfev: 932
nit: 543
status: 0
success: True
x: array([ 2.19396352, -0.53207923, 0.91245827, -3.03402174, 0.31749551,
-1.89342844])
```python
def gradient(params, evaluate, shift = pi/2):
n_params = params.shape[0]
gradients = np.zeros(n_params)
for i in range(n_params):
#parameter shift rule
shift_vect = np.array([shift if j==i else 0 for j in range(n_params)])
shift_right = params + shift_vect
shift_left = params - shift_vect
expectation_right = evaluate(shift_right)
expectation_left = evaluate(shift_left)
gradients[i] = expectation_right - expectation_left
return gradients
```
```python
params = np.random.uniform(-np.pi, np.pi, 6)
last_params = np.zeros(6)
```
```python
lr = 0.1
err = 1
while err > 1e-3:
grad = gradient(params, expectation_energy, pi/1.3)
params = params - lr*grad
err = abs(sum(params - last_params))
last_params = np.array(params)
print(err)
```
2.740998722971736
0.7664059946733903
2.6551149987384
1.2608214802431843
1.6463140203823585
0.9882522116289967
0.7597045148619697
0.7288509454263662
0.48616133345178747
0.5674605061569935
0.38406516697773163
0.43028564541804637
0.29331113347599436
0.3180409806639399
0.22291660083314208
0.23564204761260749
/tmp/ipykernel_13066/2399136664.py:14: ComplexWarning: Casting complex values to real discards the imaginary part
gradients[i] = expectation_right - expectation_left
0.17091061131625096
0.17690361978602842
0.13286436551320124
0.1350978736282606
0.1047919107121954
0.10491825192171336
0.0837058159106186
0.08266834303990159
0.06753875093640191
0.06589648464516769
0.05490283477970613
0.0529959455511186
0.04486783316222952
0.04290418378493158
0.03679987111384837
0.03490346410358293
0.030255093195600533
0.02849544969493395
0.024912658187603715
0.023323817622205006
0.0205332435402516
0.019126519558116256
0.016933316866161174
0.015705975012091533
0.013968936519843905
0.012910073278434617
0.011525275889824105
0.010619738007284787
0.009509607723144992
0.008740537754570302
0.007846417010252585
0.0071968454167906926
0.006473860085199415
0.005927646503809669
0.005341106487352892
0.004883446257502394
0.004406284195538374
0.004023932295785418
0.0036348551808148
0.0033161729228793763
0.002998310149989969
0.002733206203465899
0.0024731080036946296
0.0022529213447672614
0.002039807784688666
0.0018571634170972545
0.00168235488315098
0.0015310116825909825
0.0013874924506602632
0.001262194729184235
0.0011442752984405363
0.0010406145568409575
0.0009436681184822188
```python
expectation_energy(params)
```
(-9.999986367379375+0j)
```python
```
|
fa162dadd46578dfe84f9470e5f32a0293d7c66e
| 16,265 |
ipynb
|
Jupyter Notebook
|
vqe_2q bell base.ipynb
|
clnrp/logicqubit-codes
|
a86b85d704958143a9188dad2d76a591c3ac20dc
|
[
"Apache-2.0"
] | null | null | null |
vqe_2q bell base.ipynb
|
clnrp/logicqubit-codes
|
a86b85d704958143a9188dad2d76a591c3ac20dc
|
[
"Apache-2.0"
] | null | null | null |
vqe_2q bell base.ipynb
|
clnrp/logicqubit-codes
|
a86b85d704958143a9188dad2d76a591c3ac20dc
|
[
"Apache-2.0"
] | null | null | null | 29.201077 | 169 | 0.504826 | true | 3,822 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.897695 | 0.737158 | 0.661743 |
__label__eng_Latn
| 0.108575 | 0.375783 |
<!-- dom:TITLE: Project 4, deadline November 18 -->
# Project 4, deadline November 18
<!-- dom:AUTHOR: [Computational Physics I FYS3150/FYS4150](http://www.uio.no/studier/emner/matnat/fys/FYS3150/index-eng.html) at Department of Physics, University of Oslo, Norway -->
<!-- Author: -->
**[Computational Physics I FYS3150/FYS4150](http://www.uio.no/studier/emner/matnat/fys/FYS3150/index-eng.html)**, Department of Physics, University of Oslo, Norway
Date: **Fall semester 2019**
## Studies of phase transitions in magnetic systems
### Introduction
The aim of this project is to study a widely popular model to simulate phase transitions, the so-called Ising model in two dimensions. At a given critical temperature, this model exhbits a phase transition from a magnetic phase (a system with a finite magnetic moment) to a phase with zero magnetization.
This is a so-called binary system where the objects at each lattice site can only take two values. These could be $0$ and $1$ or other values. Here we will use spins pointing up or down as the model for our system. But we could replace the spins with blue and green balls for example.
The [Ising model](https://en.wikipedia.org/wiki/Ising_model) has been extremely popular, with applications spanning from studies of phase transitions to simulations in statistics. In one and two dimensions its has analytical solutions to several expectation values and it gives a qualitatively good underatanding of several types of phase transitions.
In its simplest form
the energy of the Ising model is expressed as, without an externally applied magnetic field,
$$
E=-J\sum_{< kl >}^{N}s_ks_l
$$
with
$s_k=\pm 1$. The quantity $N$ represents the total number of spins and $J$ is a coupling
constant expressing the strength of the interaction between
neighboring spins. The symbol $<kl>$ indicates that we sum over
nearest neighbors only. We will assume that we have a ferromagnetic
ordering, viz $J> 0$. We will use periodic boundary conditions and
the Metropolis algorithm only. The material on the Ising model can be found in chapter 13 of the lecture notes. The Metropolis algorithm is discussed in chapter 12.
**For this project you can hand in collaborative reports and programs.**
This project (together with projects 3 and 5) counts 1/3 of the final mark.
### Project 4a): A simple $2\times 2$ lattice, analytical expressions
Assume we have only two spins in each dimension, that is $L=2$.
Find the analytical expression
for the partition function and the corresponding
expectations values for the energy $E$, the mean absolute value of the magnetic moment $\vert M\vert$ (we will refer to this as the mean magnetization),
the specific heat $C_V$ and the susceptibility $\chi$
as functions of $T$ using periodic boundary conditions. These results will serve as benchmark calculations for our next steps.
### Project 4b): Writing a code for the Ising model
Write now a code for the Ising model which computes the mean energy
$E$, mean magnetization
$\vert M\vert$, the specific heat $C_V$ and the susceptibility $\chi$
as functions of $T$ using periodic boundary conditions for
$L=2$ in the $x$ and $y$ directions.
Compare your results with the expressions from a)
for a temperature $T=1.0$ (in units of $kT/J$).
How many Monte Carlo cycles do you need in order to achieve a
good agreeement?
### Project 4c): When is the most likely state reached?
We choose now a square lattice with $L=20$ spins in the $x$ and $y$ directions.
In the previous exercise we did not study carefully how many Monte Carlo cycles were needed in order to reach the most likely state. Here
we want to perform a study of the time (here it corresponds to the number
of Monte Carlo sweeps of the lattice) one needs before one reaches an equilibrium situation
and can start computing various expectations values. Our
first attempt is a rough and plain graphical
one, where we plot various expectations values as functions of the number of Monte Carlo cycles.
Choose first a temperature of $T=1.0$ (in units of $kT/J$) and study the
mean energy and magnetisation (absolute value) as functions of the number of Monte Carlo cycles. Let the number of Monte Carlo cycles (sweeps per lattice)
represent time.
Use both an ordered (all spins pointing in one direction) and a random
spin orientation as starting configuration.
How many Monte Carlo cycles do you need before you reach an equilibrium situation?
Repeat this analysis for $T=2.4$.
Can you, based on these values estimate an equilibration time?
Make also a plot of the total number of accepted configurations
as function of the total number of Monte Carlo cycles. How does the number of
accepted configurations behave as function of temperature $T$?
### Project 4d): Analyzing the probability distribution
Compute thereafter the probability
$P(E)$ for the previous system with $L=20$ and the same temperatures, that is at $T=1.0$ and $T=2.4$.
You compute this probability by simply counting the number of times a
given energy appears in your computation. Start the computation after
the steady state situation has been reached.
Compare your results with the computed variance in energy
$\sigma^2_E$ and discuss the behavior you observe.
### Studies of phase transitions
Near $T_C$ we can characterize the behavior of many physical quantities
by a power law behavior.
As an example, for the Ising class of models,
the mean magnetization is given by
$$
\langle M(T) \rangle \sim \left(T-T_C\right)^{\beta},
$$
where $\beta=1/8$ is a so-called critical exponent. A similar relation
applies to the heat capacity
$$
C_V(T) \sim \left|T_C-T\right|^{\alpha},
$$
and the susceptibility
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\chi(T) \sim \left|T_C-T\right|^{\gamma},
\label{_auto1} \tag{1}
\end{equation}
$$
with $\alpha = 0$ and $\gamma = 7/4$.
Another important quantity is the correlation length, which is expected
to be of the order of the lattice spacing for $T>> T_C$. Because the spins
become more and more correlated as $T$ approaches $T_C$, the correlation
length increases as we get closer to the critical temperature. The divergent
behavior of $\xi$ near $T_C$
is
<!-- Equation labels as ordinary links -->
<div id="eq:xi"></div>
$$
\begin{equation}
\xi(T) \sim \left|T_C-T\right|^{-\nu}.
\label{eq:xi} \tag{2}
\end{equation}
$$
A second-order phase transition is characterized by a
correlation length which spans the whole system.
Since we are always limited to a finite lattice, $\xi$ will
be proportional with the size of the lattice.
Through so-called finite size scaling relations
it is possible to relate the behavior at finite lattices with the
results for an infinitely large lattice.
The critical temperature scales then as
<!-- Equation labels as ordinary links -->
<div id="eq:tc"></div>
$$
\begin{equation}
T_C(L)-T_C(L=\infty) = aL^{-1/\nu},
\label{eq:tc} \tag{3}
\end{equation}
$$
with $a$ a constant and $\nu$ defined in Eq. ([2](#eq:xi)).
We set $T=T_C$ and obtain a mean magnetisation
<!-- Equation labels as ordinary links -->
<div id="eq:scale1"></div>
$$
\begin{equation}
\langle {\cal M}(T) \rangle \sim \left(T-T_C\right)^{\beta}
\rightarrow L^{-\beta/\nu},
\label{eq:scale1} \tag{4}
\end{equation}
$$
a heat capacity
<!-- Equation labels as ordinary links -->
<div id="eq:scale2"></div>
$$
\begin{equation}
C_V(T) \sim \left|T_C-T\right|^{-\gamma} \rightarrow L^{\alpha/\nu},
\label{eq:scale2} \tag{5}
\end{equation}
$$
and susceptibility
<!-- Equation labels as ordinary links -->
<div id="eq:scale3"></div>
$$
\begin{equation}
\chi(T) \sim \left|T_C-T\right|^{-\alpha} \rightarrow L^{\gamma/\nu}.
\label{eq:scale3} \tag{6}
\end{equation}
$$
### Project 4e): Numerical studies of phase transitions
We wish to study the behavior of the Ising model
in two dimensions close to the critical temperature as a function of
the lattice size $L\times L$. Calculate the expectation values for
$\langle E\rangle$ and $\langle \vert M\vert \rangle$, the specific heat
$C_V$ and the susceptibility $\chi$ as functions of $T$ for $L=40$,
$L=60$, $L=80$ and $L=100$ for $T\in [2.0,2.3]$ with a step in
temperature $\Delta T=0.05$ or smaller. You may find it convenient to narrow the domain for $T$.
Plot $\langle E\rangle$,
$\langle \vert M\vert\rangle$, $C_V$ and $\chi$ as functions of $T$. Can
you see an indication of a phase transition? Use the absolute value
$\langle \vert M\vert\rangle$ when you evaluate $\chi$. For these production runs you should
parallelize the code using MPI (recommended). Alternatively OpenMP can be used. Use optimization flags when compiling. Perform a timing analysis of some selected runs in order to see that you get an optimal speedup when parallelizing your code.
### Project 4f): Extracting the critical temperature
Use Eq. ([3](#eq:tc)) and the exact result
$\nu=1$ in order to estimate $T_C$ in the thermodynamic limit
$L\rightarrow \infty$
using your simulations with $L=40$, $L=60$, $L=80$ and $L=100$
The exact result for the critical temperature ([after Lars Onsager](http://journals.aps.org/pr/abstract/10.1103/PhysRev.65.117)) is
$kT_C/J=2/ln(1+\sqrt{2})\approx 2.269$ with $\nu=1$.
## Background literature
If you wish to read more about the Ising model and statistical physics here are three suggestions.
* [M. Plischke and B. Bergersen](http://www.worldscientific.com/worldscibooks/10.1142/5660), *Equilibrium Statistical Physics*, World Scientific, see chapters 5 and 6.
* [D. P. Landau and K. Binder](http://www.cambridge.org/no/academic/subjects/physics/computational-science-and-modelling/guide-monte-carlo-simulations-statistical-physics-4th-edition?format=HB), *A Guide to Monte Carlo Simulations in Statistical Physics*, Cambridge, see chapters 2,3 and 4.
* [M. E. J. Newman and T. Barkema](https://global.oup.com/academic/product/monte-carlo-methods-in-statistical-physics-9780198517979?cc=no&lang=en&), *Monte Carlo Methods in Statistical Physics*, Oxford, see chapters 3 and 4.
## Introduction to numerical projects
Here follows a brief recipe and recommendation on how to write a report for each
project.
* Give a short description of the nature of the problem and the eventual numerical methods you have used.
* Describe the algorithm you have used and/or developed. Here you may find it convenient to use pseudocoding. In many cases you can describe the algorithm in the program itself.
* Include the source code of your program. Comment your program properly.
* If possible, try to find analytic solutions, or known limits in order to test your program when developing the code.
* Include your results either in figure form or in a table. Remember to label your results. All tables and figures should have relevant captions and labels on the axes.
* Try to evaluate the reliabilty and numerical stability/precision of your results. If possible, include a qualitative and/or quantitative discussion of the numerical stability, eventual loss of precision etc.
* Try to give an interpretation of you results in your answers to the problems.
* Critique: if possible include your comments and reflections about the exercise, whether you felt you learnt something, ideas for improvements and other thoughts you've made when solving the exercise. We wish to keep this course at the interactive level and your comments can help us improve it.
* Try to establish a practice where you log your work at the computerlab. You may find such a logbook very handy at later stages in your work, especially when you don't properly remember what a previous test version of your program did. Here you could also record the time spent on solving the exercise, various algorithms you may have tested or other topics which you feel worthy of mentioning.
## Format for electronic delivery of report and programs
The preferred format for the report is a PDF file. You can also use DOC or postscript formats or as an ipython notebook file. As programming language we prefer that you choose between C/C++, Fortran2008 or Python. The following prescription should be followed when preparing the report:
* Use Devilry to hand in your projects, log in at <http://devilry.ifi.uio.no> with your normal UiO username and password and choose either 'fys3150' or 'fys4150'. There you can load up the files within the deadline.
* Upload **only** the report file! For the source code file(s) you have developed please provide us with your link to your github domain. The report file should include all of your discussions and a list of the codes you have developed. Do not include library files which are available at the course homepage, unless you have made specific changes to them.
* In your git repository, please include a folder which contains selected results. These can be in the form of output from your code for a selected set of runs and input parametxers.
* In this and all later projects, you should include tests (for example unit tests) of your code(s).
* Comments from us on your projects, approval or not, corrections to be made etc can be found under your Devilry domain and are only visible to you and the teachers of the course.
Finally,
we encourage you to work two and two together. Optimal working groups consist of
2-3 students.
## How to install openmpi and/or OpenMP on your PC/laptop
If you use your own laptop, for linux/ubuntu users, you need to install two packages (alternatively use the synaptic package manager)
sudo apt-get install libopenmpi-dev
sudo apt-get install openmpi-bin
For OS X users, install brew (after having installed xcode and gcc, needed for the
gfortran compiler of openmpi) and then run
brew install open-mpi
When compiling from the command line, depending on your choice of programming language you need to compile and link as for example
mpic++ -O3 -o <executable> <programname.cpp>
if you use c++ (alternatively mpicxx) and
mpif90 -O3 -o <executable> <programname.f90>
if you use Fortran90.
When running an executable, run as
mpirun -n 10 ./<executable>
where -n indicates the number of processes, 10 here.
With openmpi installed, when using Qt, add to your .pro file the instructions at [Svenn-Arne Dragly's site](http://dragly.org/2012/03/14/developing-mpi-applications-in-qt-creator/)
You may need to tell Qt where opempi is stored.
For the machines at the computer lab, openmpi is located at /usr/lib64/openmpi/bin
Add to your .bashrc file the following
export PATH=/usr/lib64/openmpi/bin:$PATH
For Windows users we recommend to follow the instructions at the [Open MPI site](https://www.open-mpi.org/software/ompi/v1.6/ms-windows.php).
If you use OpenMP, for linux users, compile and link with for example
c++ -O3 -fopenmp -o <executable> <programname.cpp>
For OS X users, you need to install libomp using brew, that is
brew install clang-omp
and then compile and link with for example
c++ -O3 -lomp -o <executable> <programname.cpp>
If you program in Fortran and use **gfortran**, compile as for example
gfortran -O3 -fopenmp -o <executable> <programname.f90>
If you have access to Intel's **ifort** compiler, compile as
ifort -O3 -fopenmp -o <executable> <programname.f90>
|
2558e3341603901280ac35b31cdaf8c527a23b3f
| 21,176 |
ipynb
|
Jupyter Notebook
|
doc/Projects/2019/Project4/ipynb/Project4.ipynb
|
solisius/ComputationalPhysics
|
94d32d177881695d443eea34af3410e886b8cb9a
|
[
"CC0-1.0"
] | 220 |
2016-08-25T09:18:33.000Z
|
2022-03-31T14:09:16.000Z
|
doc/Projects/2019/Project4/ipynb/Project4.ipynb
|
solisius/ComputationalPhysics
|
94d32d177881695d443eea34af3410e886b8cb9a
|
[
"CC0-1.0"
] | 1 |
2020-12-04T12:55:10.000Z
|
2020-12-04T12:55:10.000Z
|
doc/Projects/2019/Project4/ipynb/Project4.ipynb
|
solisius/ComputationalPhysics
|
94d32d177881695d443eea34af3410e886b8cb9a
|
[
"CC0-1.0"
] | 136 |
2016-08-25T09:04:56.000Z
|
2022-03-12T09:54:21.000Z
| 39.142329 | 410 | 0.619805 | true | 3,871 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.737158 | 0.853913 | 0.629469 |
__label__eng_Latn
| 0.998104 | 0.300797 |
```python
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
```python
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d, Axes3D
```
```python
from sympy.parsing.sympy_parser import parse_expr
from sympy import Matrix, symbols, expand
```
```python
from matplotlib.patches import FancyArrowPatch
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
```
```python
P = parse_expr('Matrix([0,0])')
u = parse_expr('Matrix([-1,-1])')
#f = parse_expr('((x+2)**2 + x*y + y**2 + 10) / ((x+2)**2 + y**2 + 1)')
f = parse_expr('x**2 + y**3')
if not ('Matrix' in str(type(P)) and P.shape == (2, 1)):
raise ValueError('Point P must be a 2x1 matrix.')
if len(P.free_symbols) != 0:
raise ValueError('Point P cannot contain any variables.')
if not ('Matrix' in str(type(u)) and u.shape == (2, 1)):
raise ValueError('Vector u must be a 2x1 matrix.')
if len(u.free_symbols) != 0:
raise ValueError('Vector u cannot contain any variables.')
if len(f.free_symbols) != 2:
raise ValueError('Function f requires two variables.')
variables = [str(s) for s in f.free_symbols]
variables.sort()
x, y = variables[0], variables[1]
t = symbols('t')
grad = Matrix([f.diff(x), f.diff(y)])
P_grad = grad.subs(x, P[0] + t).subs(y, P[1] + t)
Df = Matrix(np.dot(P_grad.T, u))
# create surface plot
fig = plt.figure(figsize=(12,12))
plt.subplot(211)
xs = np.linspace(-4, 4, 32)
ys = np.linspace(-4, 4, 32)
X, Y = np.meshgrid(xs, ys)
zs = np.array([expand(f).subs(x, xp).subs(y, yp).evalf() for xp, yp in zip(np.ravel(X), np.ravel(Y))]).astype('float')
Z = zs.reshape(X.shape)
minz = np.floor(np.min(Z))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, cmap=cm.magma, lw=3, alpha=0.3)
ax.contour(X, Y, Z, 10, lw=3, cmap="magma", linestyles="solid", offset=minz)
# plot directional derivative
px, py, pz = P[0], P[1], expand(f).subs(x, P[0]).subs(y, P[1]).evalf()
dx, dy, dz = u[0] + px, u[1] + py, expand(f).subs(x, u[0] + px).subs(y, u[1] + py).evalf()
plot([px], [py], [pz], 'o', zorder=3, c='purple', markersize=8)
ax.add_artist(Arrow3D([float(px), float(u[0])], [float(py), float(u[1])], [minz]*2, mutation_scale=15, lw=2, arrowstyle="-|>", ls='dashed', color="gray", zorder=5))
ax.add_artist(Arrow3D([float(px), float(px)], [float(py), float(py)], [minz, float(pz)], mutation_scale=15, lw=2, ls='dashed', arrowstyle="-", color="gray", zorder=5))
```
```python
P[0]
```
0
```python
```
|
c100c1f475a62b34fb7476e96eff616efe7d7001
| 268,891 |
ipynb
|
Jupyter Notebook
|
Applied Math/Y2S2/.ipynb_checkpoints/directional derivatives-checkpoint.ipynb
|
darkeclipz/jupyter-notebooks
|
5de784244ad9db12cfacbbec3053b11f10456d7e
|
[
"Unlicense"
] | 1 |
2018-08-28T12:16:12.000Z
|
2018-08-28T12:16:12.000Z
|
Applied Math/Y2S2/.ipynb_checkpoints/directional derivatives-checkpoint.ipynb
|
darkeclipz/jupyter-notebooks
|
5de784244ad9db12cfacbbec3053b11f10456d7e
|
[
"Unlicense"
] | null | null | null |
Applied Math/Y2S2/.ipynb_checkpoints/directional derivatives-checkpoint.ipynb
|
darkeclipz/jupyter-notebooks
|
5de784244ad9db12cfacbbec3053b11f10456d7e
|
[
"Unlicense"
] | null | null | null | 1,407.806283 | 263,616 | 0.957488 | true | 970 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.91848 | 0.766294 | 0.703826 |
__label__eng_Latn
| 0.31925 | 0.473554 |
# Mine-Sweeper and Neural Networks
## Getting Started
The goals of this project were to gain experience in trying to translate a problem into one solvable with neural networks. Beating a game of Mine-Sweeper, through predicting mine spaces, is not something that can be solved with iterative functions, so neural nets must be used. Even using neural nets, the problem can not be “solved” as it is NP-Complete, but with the proper training a net can get to the point where it can reliably do fairly well.
The data from Dataproduction.ipynb will be stored in "test.csv" and "train.csv". We will also need to import sympy, numpy, and keras. We will load the files into X and Y.
```python
import sympy as sp
import numpy as np
import keras
# '/path/to/train.csv'
X=np.loadtxt('/home/djc6w/S20-team4-project/Project Stuff/train.csv', delimiter = ',')
X=X.astype('float32')
# 'path/to/test.csv'
Y=np.loadtxt('/home/djc6w/S20-team4-project/Project Stuff/test.csv', delimiter = ',')
Y=Y.astype('float32')
```
Using TensorFlow backend.
Now we need to reshape the data to make it trainable.
```python
x_train = np.zeros((X.shape[0],9,9,1))
for i in range(X.shape[0]):
x_train[i,:,:,0] = X[i].reshape(9,9)
y_train = Y
```
## The Data
Let's see what our training data looks like!
```python
temp = x_train[2,:,:,0].round(decimals=1)
display(sp.Matrix(temp))
display(sp.Matrix(y_train[2].reshape(9,9)))
```
$\displaystyle \left[\begin{matrix}0.1 & 1.0 & 0.1 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0\\1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0\\1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0\\1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 1.0\\1.0 & 1.0 & 1.0 & 1.0 & 1.0 & 0.1 & 0.1 & 0.1 & 0.1\\1.0 & 1.0 & 1.0 & 0.2 & 0.1 & 0.1 & 0.0 & 0.0 & 0.0\\1.0 & 1.0 & 1.0 & 0.1 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0\\0.1 & 0.2 & 0.2 & 0.1 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0\\0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0\end{matrix}\right]$
$\displaystyle \left[\begin{matrix}0.1 & 1.0 & 0.1 & 0.0 & 0.0 & 0.1 & 0.1 & 0.1 & 0.0\\0.1 & 0.1 & 0.2 & 0.1 & 0.2 & 0.3 & 1.0 & 0.2 & 0.0\\0.0 & 0.1 & 0.2 & 1.0 & 0.2 & 1.0 & 1.0 & 0.3 & 0.1\\0.0 & 0.1 & 1.0 & 0.3 & 0.3 & 0.3 & 0.3 & 1.0 & 0.1\\0.0 & 0.1 & 0.1 & 0.2 & 1.0 & 0.1 & 0.1 & 0.1 & 0.1\\0.1 & 0.2 & 0.2 & 0.2 & 0.1 & 0.1 & 0.0 & 0.0 & 0.0\\0.1 & 1.0 & 1.0 & 0.1 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0\\0.1 & 0.2 & 0.2 & 0.1 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0\\0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0\end{matrix}\right]$
Beautiful, 9x9 grids full of a few known values based on essentially a couple of random moves in the game.
## The Model
Now let's develop our model.
```python
model = keras.Sequential()
model.add(keras.layers.Conv2D(18, kernel_size=(6,6),
activation = 'relu',
data_format='channels_last',
input_shape=[x_train.shape[1],
x_train.shape[2],
x_train.shape[3]]))
model.add(keras.layers.Conv2D(3,(3,3),activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(162,activation='relu'))
model.add(keras.layers.Dense(81, activation='sigmoid'))
model.compile(loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'])
model.summary()
```
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 4, 4, 18) 666
_________________________________________________________________
conv2d_2 (Conv2D) (None, 2, 2, 3) 489
_________________________________________________________________
flatten_1 (Flatten) (None, 12) 0
_________________________________________________________________
dense_1 (Dense) (None, 162) 2106
_________________________________________________________________
dense_2 (Dense) (None, 81) 13203
=================================================================
Total params: 16,464
Trainable params: 16,464
Non-trainable params: 0
_________________________________________________________________
For our demo, we have our weights for a previously trained model available to load. However, this is only necessary if you want to see strictly the results.
```python
#Uncomment and run this to load weights from the previously trained model
#Make sure to adjust the path to where you have it stored
#model.load_weights('/home/djc6w/S20-team4-project/Project Stuff/model.h5')
```
The code below is used to display a plot model of our convolutional network.
```python
from keras.utils.vis_utils import plot_model
# Visualization
plot_model(model,to_file='encdec.png',show_shapes=True,expand_nested=True)
```
## Training
Now let's train! It is recommended that you train this net with a small batch size (1-10) and in smaller numbers of epochs over an extended period of time in order to avoid errors due to rapid, unexpected, val-loss. Be patient though! It may take a good number of training sessions to see satisfying results.
```python
batch_size = 4
epochs = 300
history = model.fit(x_train,y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1)
```
This will display the accuracy and val-loss diagrams for the model at its currently trained state.
```python
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(211)
plt.plot(history.history['accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','val'],loc='upper left')
plt.subplot(212)
plt.plot(history.history['loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train','val'], loc='upper left')
plt.tight_layout()
plt.show()
```
```python
score = model.evaluate(x_train, Y, verbose=1)
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
```
5339/5339 [==============================] - 0s 53us/step
Test loss: 0.2663592856895792
Test accuracy: 0.46650430560112
## Results
Below, use this to display what the net is actually doing in comparison to the reality of the board state.
The first matrix shown is the incomplete boardstate that was fed into the net. Keep in mind that in the training matrix, the 1s are representative of completely unrevealed spaces, while in the predictions and the final board, the 1s are representative of known mines.
These matrices are made up of probabilities, so when the net predicts say a 0.8 then it is stating that it believes there is an 80% chance that a mine is in that space.
The second matrix shown is the predictions made by the net, aiming to guess the completed board state.
The third matrix shown is the completed board state.
```python
preds = model.predict(x_train[:,:,:,:]).astype("float32")
preds = preds.round(decimals=1)
temp = x_train[42,:,:,0].round(decimals=1)
print("Play Area")
display(sp.Matrix(temp))
print("Predicted Values")
display(sp.Matrix(preds[42].reshape(9,9)))
print("Actual Values")
display(sp.Matrix(y_train[42].reshape(9,9)))
```
Play Area
$\displaystyle \left[\begin{matrix}0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.1 & 1.0 & 0.1\\0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.1 & 0.2 & 0.2 & 0.1\\0.0 & 0.0 & 0.1 & 0.1 & 0.1 & 0.1 & 1.0 & 0.1 & 0.0\\0.0 & 0.0 & 0.1 & 1.0 & 0.1 & 0.1 & 0.1 & 0.1 & 0.0\\0.0 & 0.0 & 0.1 & 0.1 & 0.2 & 0.1 & 0.1 & 0.0 & 0.0\\0.1 & 0.1 & 0.1 & 0.0 & 0.2 & 1.0 & 0.3 & 0.1 & 0.1\\0.2 & 1.0 & 0.2 & 0.1 & 0.2 & 1.0 & 0.3 & 1.0 & 0.1\\1.0 & 0.3 & 1.0 & 0.1 & 0.2 & 0.2 & 0.3 & 0.1 & 0.1\\0.1 & 0.2 & 0.1 & 0.1 & 0.1 & 1.0 & 1.0 & 1.0 & 1.0\end{matrix}\right]$
Predicted Values
$\displaystyle \left[\begin{matrix}0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.2 & 1.0 & 0.1\\0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.1 & 0.2 & 0.3 & 0.3\\0.0 & 0.0 & 0.2 & 0.1 & 0.1 & 0.1 & 0.8 & 0.1 & 0.0\\0.0 & 0.0 & 0.1 & 0.9 & 0.2 & 0.1 & 0.1 & 0.0 & 0.0\\0.0 & 0.0 & 0.0 & 0.1 & 0.1 & 0.2 & 0.1 & 0.1 & 0.0\\0.1 & 0.2 & 0.2 & 0.0 & 0.1 & 0.8 & 0.2 & 0.1 & 0.1\\0.3 & 0.9 & 0.2 & 0.2 & 0.4 & 0.8 & 0.3 & 1.0 & 0.3\\1.0 & 0.2 & 0.9 & 0.3 & 0.3 & 0.4 & 0.5 & 0.1 & 0.2\\0.2 & 0.3 & 0.0 & 0.1 & 0.1 & 0.9 & 0.1 & 0.0 & 0.0\end{matrix}\right]$
Actual Values
$\displaystyle \left[\begin{matrix}0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.1 & 1.0 & 0.1\\0.0 & 0.0 & 0.0 & 0.0 & 0.0 & 0.1 & 0.2 & 0.2 & 0.1\\0.0 & 0.0 & 0.1 & 0.1 & 0.1 & 0.1 & 1.0 & 0.1 & 0.0\\0.0 & 0.0 & 0.1 & 1.0 & 0.1 & 0.1 & 0.1 & 0.1 & 0.0\\0.0 & 0.0 & 0.1 & 0.1 & 0.2 & 0.1 & 0.1 & 0.0 & 0.0\\0.1 & 0.1 & 0.1 & 0.0 & 0.2 & 1.0 & 0.3 & 0.1 & 0.1\\0.2 & 1.0 & 0.2 & 0.1 & 0.2 & 1.0 & 0.3 & 1.0 & 0.1\\1.0 & 0.3 & 1.0 & 0.1 & 0.2 & 0.2 & 0.3 & 0.1 & 0.1\\0.1 & 0.2 & 0.1 & 0.1 & 0.1 & 1.0 & 0.1 & 0.0 & 0.0\end{matrix}\right]$
## The Game
This is where the game implementation begins.
These functions are used to predict a move to make through the net and convert it into an acceptable input for the game.
```python
def min_net(pred, env):
retx = -1
rety = -1
temp = 1.0
for lorge in range(9):
for x in range(9):
for y in range(9):
if(float(env[x,y])==float(1.0)):
if(float(pred[x,y]) < float(lorge*0.1)):
if(float(pred[x,y])<temp):
retx = x
rety = y
temp = pred[x,y]
if(retx > -1):
return(retx,rety)
return(x,y)
#print(pred.shape)
def coordinate_net(y,x):
#print(x)
#print(y)
a = chr(x+97)
#print(a)
b = chr(y+49)
#print(a+b)
return (a+b)
#this,that = min_net(sp.Matrix(preds[2].reshape(9,9)), sp.Matrix(x_train[2,:,:,0]))
#print(this,that)
#coordinate_net(this,that)
```
This is the code for the actual game. It generates a simple implementation of Minesweeper, represented through text.
At the end of this code block, the playgame() function is called, beginning the net's attempt at playing through the given boardstate.
```python
import numpy as np
import random
import csv
import re
import time
import random
from string import ascii_lowercase
def setupgrid(gridsize, start, numberofmines):
# Initialize empty grid
emptygrid = [['0' for i in range(gridsize)] for i in range(gridsize)]
mines = getmines(emptygrid, start, numberofmines)
# Label mine spaces
for i, j in mines:
emptygrid[i][j] = 'X'
grid = getnumbers(emptygrid)
return (grid, mines)
# Output the grid
def showgrid(grid):
gridsize = len(grid)
horizontal = ' ' + (4 * gridsize * '-') + '-'
# Print top column letters
toplabel = ' '
for i in ascii_lowercase[:gridsize]:
toplabel = toplabel + i + ' '
print(toplabel + '\n' + horizontal)
# Print left row numbers
for idx, i in enumerate(grid):
row = '{0:2} |'.format(idx + 1)
for j in i:
row = str(row) + ' ' + str(j) + ' |'
print(row + '\n' + horizontal)
print('')
def getrandomcell(grid):
gridsize = len(grid)
a = random.randint(0, gridsize - 1)
b = random.randint(0, gridsize - 1)
return (a, b)
# Used to initialize neighboring cells / safe cells
def getneighbors(grid, rowno, colno):
gridsize = len(grid)
neighbors = []
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
elif -1 < (rowno + i) < gridsize and -1 < (colno + j) < gridsize:
neighbors.append((rowno + i, colno + j))
return neighbors
# Once the neighbors are initialized, this fills in random remaining spaces,
# excluding the starting cell and neighbors, up to the requested number of mines
def getmines(grid, start, numberofmines):
mines = []
neighbors = getneighbors(grid, *start) # initialize unavailable spaces
for i in range(numberofmines):
cell = getrandomcell(grid)
while cell == start or cell in mines or cell in neighbors:
cell = getrandomcell(grid)
mines.append(cell)
return mines
def getnumbers(grid):
for rowno, row in enumerate(grid):
for colno, cell in enumerate(row):
if cell != 'X':
# Gets the values of the neighbors
values = [grid[r][c] for r, c in getneighbors(grid,
rowno, colno)]
# Counts how many are mines
grid[rowno][colno] = str(values.count('X'))
return grid
def showcells(grid, currgrid, rowno, colno):
# Exit function if the cell was already shown
if currgrid[rowno][colno] != ' ':
return
# Show current cell
currgrid[rowno][colno] = grid[rowno][colno]
# Get the neighbors if the cell is empty
if grid[rowno][colno] == '0':
for r, c in getneighbors(grid, rowno, colno):
# Repeat function for each neighbor that doesn't have a flag
if currgrid[r][c] != 'F':
showcells(grid, currgrid, r, c)
def playagain():
choice = input('Play again? (y/n): ')
return choice.lower() == 'y'
def parseinput(inputstring, gridsize, helpmessage):
cell = ()
flag = False
message = "Invalid cell. " + helpmessage
# Reformat input for more flexible acceptance
pattern = r'([a-{}])([0-9]+)(f?)'.format(ascii_lowercase[gridsize - 1])
validinput = re.match(pattern, inputstring)
# Enter input into the associated grid space
if inputstring == 'help':
message = helpmessage
elif validinput:
rowno = int(validinput.group(2)) - 1
colno = ascii_lowercase.index(validinput.group(1))
flag = bool(validinput.group(3))
if -1 < rowno < gridsize:
cell = (rowno, colno)
message = ''
return {'cell': cell, 'flag': flag, 'message': message}
def getmove(grid, currgrid):
if not grid:
rand = random.randrange(10)
rand2 = str(chr(random.randrange(0,9)+97))
ret = rand2 + str(rand)
return ret
for x in range(0,9):
for y in range(0,9):
if (currgrid[x][y] == ' ' and grid[x][y] != 'X'):
ret = str(chr(y+97)) + str(x+1)
return ret
def pushtest(grid):
newgrid = np.array(csvprep(grid))
with open ('test.csv', mode='a') as grid_file:
file_writer = csv.writer(grid_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(newgrid.flatten())
grid_file.close()
def pushtrain(grid):
newgrid = np.array(csvprep(grid))
with open ('train.csv', mode='a') as grid_file:
file_writer = csv.writer(grid_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(newgrid.flatten())
grid_file.close()
##############################
#Change encoding values here
##############################
def csvprep(grid):
newgrid = [row[:]for row in grid]
for x in range(0,9):
for y in range(0,9):
if grid[x][y] == ' ':
newgrid[x][y] = 1
if grid[x][y] == '0':
newgrid[x][y] = 0.0
if grid[x][y] == '1':
newgrid[x][y] = 0.1
if grid[x][y] == '2':
newgrid[x][y] = 0.2
if grid[x][y] == '3':
newgrid[x][y] = 0.3
if grid[x][y] == '4':
newgrid[x][y] = 0.4
if grid[x][y] == '5':
newgrid[x][y] = 0.5
if grid[x][y] == '6':
newgrid[x][y] = 0.6
if grid[x][y] == '7':
newgrid[x][y] = 0.7
if grid[x][y] == '8':
newgrid[x][y] = 0.8
if grid[x][y] == 'X':
newgrid[x][y] = 1
return newgrid
def promptread():
fstream=open('response.txt','r')
thing = fstream.read()
fstream.close()
return thing
#
def feedbackload(flagcheck):
fstream=open('response.txt','w')
fstream.write(flagcheck)
fstream.close()
def playgame():
count = 0
gridsize = 9
numberofmines = 10
flagcheck = True
currgrid = [[' ' for i in range(gridsize)] for i in range(gridsize)]
grid = []
flags = []
starttime = 0
helpmessage = ("Type the column followed by the row (eg. a5). "
"To put or remove a flag, add 'f' to the cell (eg. a5f).")
#showgrid(currgrid)
print(helpmessage + " Type 'help' to show this message again.\n")
while True:
minesleft = numberofmines - len(flags)
newgrid = np.array(csvprep(currgrid))
i = np.zeros((1,9,9,1))
i[0,:,:,0] = newgrid
x,y = min_net(model.predict(i).reshape(9,9),newgrid)
prompt = coordinate_net(x,y)
print(prompt)
#print(prompt)
if not prompt:
playgame()
#prompt = input('Enter the cell ({} mines left): '.format(minesleft))
#prompt=promptread()
result = parseinput(prompt, gridsize, helpmessage + '\n')
message = result['message']
cell = result['cell']
if cell:
#print('\n\n')
rowno, colno = cell
currcell = currgrid[rowno][colno]
flag = result['flag']
if not grid:
grid, mines = setupgrid(gridsize, cell, numberofmines)
if not starttime:
starttime = time.time()
if flag:
# Add a flag if the cell is empty
if currcell == ' ':
currgrid[rowno][colno] = 'F'
flags.append(cell)
# Remove the flag if there is one
elif currcell == 'F':
currgrid[rowno][colno] = ' '
flags.remove(cell)
else:
message = 'Cannot put a flag there'
# If there is a flag there, show a message
elif cell in flags:
message = 'There is a flag there'
elif grid[rowno][colno] == 'X':
print('Game Over\n')
showgrid(grid)
flagcheck=False
if playagain():
playgame()
return
elif currcell == ' ':
showcells(grid, currgrid, rowno, colno)
else:
message = "That cell is already shown"
#if set(flags) == set(mines):
# minutes, seconds = divmod(int(time.time() - starttime), 60)
# print(
# 'You Win. '
# 'It took you {} minutes and {} seconds.\n'.format(minutes,
# seconds))
# showgrid(grid)
# if playagain():
# playgame()
# return
showgrid(currgrid)
print(message)
playgame()
```
Type the column followed by the row (eg. a5). To put or remove a flag, add 'f' to the cell (eg. a5f). Type 'help' to show this message again.
i1
a b c d e f g h i
-------------------------------------
1 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
2 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
3 | | | | | 1 | 1 | 1 | 1 | 0 |
-------------------------------------
4 | | | | | | | | 2 | 0 |
-------------------------------------
5 | | | | | | | | 2 | 0 |
-------------------------------------
6 | | | | | | | | 2 | 1 |
-------------------------------------
7 | | | | | | | | | |
-------------------------------------
8 | | | | | | | | | |
-------------------------------------
9 | | | | | | | | | |
-------------------------------------
h9
a b c d e f g h i
-------------------------------------
1 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
2 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
3 | | | | | 1 | 1 | 1 | 1 | 0 |
-------------------------------------
4 | | | | | | | | 2 | 0 |
-------------------------------------
5 | | | | | | | | 2 | 0 |
-------------------------------------
6 | | | | | | | | 2 | 1 |
-------------------------------------
7 | | | | | | | | | |
-------------------------------------
8 | | | | | | | | | |
-------------------------------------
9 | | | | | | | | 1 | |
-------------------------------------
d7
a b c d e f g h i
-------------------------------------
1 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
2 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
3 | | | | | 1 | 1 | 1 | 1 | 0 |
-------------------------------------
4 | | | | | | | | 2 | 0 |
-------------------------------------
5 | | | | | | | | 2 | 0 |
-------------------------------------
6 | | | | | | | | 2 | 1 |
-------------------------------------
7 | | | | 1 | | | | | |
-------------------------------------
8 | | | | | | | | | |
-------------------------------------
9 | | | | | | | | 1 | |
-------------------------------------
a9
a b c d e f g h i
-------------------------------------
1 | | | | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
2 | 1 | 1 | 1 | | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
3 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 |
-------------------------------------
4 | 0 | 0 | 0 | 0 | 0 | 2 | | 2 | 0 |
-------------------------------------
5 | 0 | 0 | 0 | 0 | 1 | 3 | | 2 | 0 |
-------------------------------------
6 | 0 | 0 | 0 | 0 | 1 | | | 2 | 1 |
-------------------------------------
7 | 0 | 1 | 1 | 1 | 2 | | | | |
-------------------------------------
8 | 0 | 2 | | | | | | | |
-------------------------------------
9 | 0 | 2 | | | | | | 1 | |
-------------------------------------
a1
Game Over
a b c d e f g h i
-------------------------------------
1 | X | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
2 | 1 | 1 | 1 | X | 1 | 0 | 0 | 0 | 0 |
-------------------------------------
3 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 |
-------------------------------------
4 | 0 | 0 | 0 | 0 | 0 | 2 | X | 2 | 0 |
-------------------------------------
5 | 0 | 0 | 0 | 0 | 1 | 3 | X | 2 | 0 |
-------------------------------------
6 | 0 | 0 | 0 | 0 | 1 | X | 2 | 2 | 1 |
-------------------------------------
7 | 0 | 1 | 1 | 1 | 2 | 2 | 2 | 1 | X |
-------------------------------------
8 | 0 | 2 | X | 2 | 1 | X | 1 | 2 | 2 |
-------------------------------------
9 | 0 | 2 | X | 2 | 1 | 1 | 1 | 1 | X |
-------------------------------------
In some cases, the net gets pretty far! With enough training on some differing data sets, we may be able to complete a game!
|
7f8a65937268c51e97f8bb9635b9bb9314d51460
| 103,344 |
ipynb
|
Jupyter Notebook
|
Project Stuff/DemoNotebook.ipynb
|
CSCI4850/S20-team4-project
|
89ff9a06b4bafcad2f1dedd8bcc6087f3d88a9e5
|
[
"MIT"
] | null | null | null |
Project Stuff/DemoNotebook.ipynb
|
CSCI4850/S20-team4-project
|
89ff9a06b4bafcad2f1dedd8bcc6087f3d88a9e5
|
[
"MIT"
] | null | null | null |
Project Stuff/DemoNotebook.ipynb
|
CSCI4850/S20-team4-project
|
89ff9a06b4bafcad2f1dedd8bcc6087f3d88a9e5
|
[
"MIT"
] | null | null | null | 106.650155 | 66,812 | 0.765173 | true | 7,716 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.766294 | 0.665411 | 0.5099 |
__label__eng_Latn
| 0.7231 | 0.022997 |
# The Linear Classifier
```{eval-rst}
Last updated |lastupdate|
```
[](https://colab.research.google.com/github/vanvalenlab/bebi205/blob/master/bebi205/notebooks/linear-classifier.ipynb)
[](https://colab.research.google.com/github/vanvalenlab/bebi205/blob/master/bebi205/notebooks/linear-classifier-key.ipynb)
To illustrate the workflow for training a deep learning model in a supervised manner, this notebook will walk you through the simple case of training a linear classifier to recognize images of cats and dogs. While deep learning might seem intimidating, dont worry. Its conceptual underpinnings are rooted in linear algebra and calculus - if you can perform matrix multiplication and take derivatives you can understand what is happening in a deep learning workflow.
Some code cells will be marked with
```
##########################
######## To Do ###########
##########################
```
This indicates that you are being asked to write a piece of code to complete the notebook.
## Load packages
In this cell, we load the python packages we need for this notebook.
```python
import imageio
import skimage
import sklearn.model_selection
import skimage.color
import skimage.transform
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
```
## The supervised machine learning workflow
Recall from class the conceptual workflow for a supervised machine learning project.
- First, we create a <em>training dataset</em>, a paired collection of raw data and labels where the labels contain information about the "insight" we wish to extract from the raw data.
- Once we have training data, we can then use it to train a <em>model</em>. The model is a mathematical black box - it takes in data and transforms it into an output. The model has some parameters that we can adjust to change how it performs this mapping.
- Adjusting these parameters to produce outputs that we want is called training the model. To do this we need two things. First, we need a notion of what we want the output to look like. This notion is captured by a <em>loss function</em>, which compares model outputs and labels and produces a score telling us if the model did a "good" job or not on our given task. By convention, low values of the loss function's output (e.g. the loss) correspond to good performance and high values to bad performance. We also need an <em>optimization algorithm</em>, which is a set of rules for how to adjust the model parameters to reduce the loss
- Using the training data, loss function, and optimization algorithm, we can then train the model
- Once the model is trained, we need to evaluate its performance to see how well it performs and what kinds of mistakes it makes. We can also perform this kind of monitoring during training (this is actually a standard practice).
Because this workflow defines the lifecycle of most machine learning projects, this notebook is structured to go over each of these steps while constructing a linear classifier.
## Create training data
The starting point of every machine learning project is data. In this case, we will start with a collection of RGB images of cats and dogs. Each image is a multi-dimensional array with size (128, 128, 1) - the first two dimensions are spatial while the last is a channel dimension (one channel because it is a grey scale image - for an RGB image there would be 3 channels). The dataset that we are working with is a subset of [Kaggle's Dogs vs. Cats dataset](https://www.kaggle.com/c/dogs-vs-cats/overview).
```python
!wget https://storage.googleapis.com/datasets-spring2021/cats-and-dogs-bw.npz
```
```python
# Load data from the downloaded npz file
with np.load('cats-and-dogs-bw.npz') as f:
X = f['X']
y = f['y']
print(X.shape, y.shape)
```
In the previous cell, you probably observed that there are 4 dimensions rather than the 3 you might have been expecting. This is because while each image is (128, 128, 1), the full dataset has many images. The different images are stacked along the first dimension. The full size of the training images is (# images, 128, 128, 1) - the first dimension is often called the batch dimension.
```python
##########################
######## To Do ###########
##########################
# Use matplotlib to visualze several images randomly drawn from the dataset
# For each image, set the title to be the y label for the image
```
For this exercise, we will want to flatten the training data into a vector.
```python
# Flatten the images into vectors of size (# images, 16384, 1)
X = np.reshape(X, (-1, 128*128, 1))
print(X.shape)
```
### Split the training dataset into training, validation, and testing datasets
How do we know how well our model is doing? A common practice to evaluate models is to evaluate them on splits of the original training dataset. Splitting the data is important, because we want to see how models perform on data that wasn't used to train them. This splitting practice usually produces 3 splits.
- The <em>training</em> dataset used to train the model
- A <em>validation </em> dataset used to evaluate the model during training.
- A held out <em>testing</em> dataset used to evaluate the final trained version of the model
While there is no hard and fast rule, 80%, 10%, 10% splits are a reasonable starting point.
```python
# Split the dataset into training, validation, and testing splits
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, train_size=0.75)
```
## The linear classifier
The linear classifier produces class scores that are a linear function of the pixel values. Mathematically, this can be written as $\vec{y} = W \vec{x}$, where $\vec{y}$ is the vector of class scores, $W$ is a matrix of weights and $\vec{x}$ is the image vector. The shape of the weights matrix is determined by the number of classes and the length of the image vector. In this case $W$ is 2 by 4096. Our learning task is to find a set of weights that maximize our performance on our classification task. We will solve this task by doing the following steps
- Randomly initializing a set of weights
- Defining a loss function that measures our performance on the classification task
- Use stochastic gradient descent to find "optimal" weights
### Create the matrix of weights
Properly initializing weights is essential for getting deep learning methods to work correctly. The two most common initialization methods you'll see in this class are [glorot uniform (also known as Xavier) initialization](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf?hc_location=ufi]) and [he initialization](http://openaccess.thecvf.com/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) - both papers are worth reading. For this exercise, we will randomly initialize weights by using glorot uniform initialization. In this initialization method, we sample our weights according to the formula
\begin{equation}
W_{ij} \sim U\left[ -\frac{1}{\sqrt{n}}, \frac{1}{\sqrt{n}} \right],
\end{equation}
where $n$ is the number of columns in the weight matrix (4096 in our case).
Lets create the linear classifier using object oriented programming, which will help with organization
```python
class LinearClassifier(object):
def __init__(self, image_size=16384):
self.image_size=image_size
# Initialize weights
self._initialize_weights()
def _initialize_weights(self):
##########################
######## To Do ###########
##########################
# Randomly initialize the weights matrix acccording to the glorot uniform initialization
self.W = # Add weights matrix here
```
### Apply the softmax transform to complete the model outputs
Our LinearClassifier class needs a method to perform predictions - which in our case is performing matrix multiplication and then applying the softmax transform. Recall from class that the softmax transform is given by
\begin{equation}
softmax(y_i) = \frac{e^{y_i}}{\sum_j e^{y_j}}
\end{equation}
and provides a convenient way to convert our class scores into probabilities
```python
##########################
######## To Do ###########
##########################
# Complete the predict function below to predict a label y from an input X
# Pay careful attention to the shape of your data at each step
def predict(self, X, epsilon=1e-5):
y = # matrix multiplication
y = # Apply softmax
return y
# Assign methods to class
setattr(LinearClassifier, 'predict', predict)
```
Now lets see what happens when we try to predict the class of images in our training dataset using randomly initialized weights.
```python
lc = LinearClassifier()
fig, axes = plt.subplots(4, 2, figsize=(20,20))
for i in range(8):
# Get an example image
X_sample = X[[i],...]
# Reshape flattened vector to image
X_reshape = np.reshape(X_sample, (128,128))
# Predict the label
y_pred = lc.predict(X_sample)
# Display results
axes.flatten()[i].imshow(X_reshape, cmap='gray')
axes.flatten()[i].set_title('Label ' + str(y[i]) +', Prediction ' + str(y_pred))
```
What do you notice about the initial results of the model?
## Stochastic gradient descent
To train this model, we will use stochastic gradient descent. In its simplest version, this algorithm consists of the following steps:
- Select several images from the training dataset at random
- Compute the gradient of the loss function with respect to the weights, given the selected images
- Update the weights using the update rule $\Delta W_{ij} \rightarrow \Delta W_{ij} - lr\frac{\partial loss}{\partial W_{ij}}$
Recall that the origin of this update rule is from multivariable calculus - the gradient tells us the direction in which the loss function increases the most. So to minimize the loss function we move in the opposite direction of the gradient.
Also recall from the course notes that for this problem we can compute the gradient analytically. The gradient is given by
\begin{equation}
\frac{\partial loss}{\partial W_{ij}} = \left(p_i - 1(i \mbox{ is correct}) \right)x_j,
\end{equation}
where $1$ is an indicator function that is 1 if the statement inside the parentheses is true and 0 if it is false.
```python
def grad(self, X, y):
# Get class probabilities
p = self.predict(X)
# Compute class 0 gradients
temp_0 = np.expand_dims(p[...,0] - (1-y), axis=-1)
grad_0 = temp_0 * X[...,0]
# Compute class 1 gradients
temp_1 = np.expand_dims(p[...,1] - y, axis=-1)
grad_1 = temp_1 * X[...,0]
gradient = np.stack([grad_0, grad_1], axis=1)
return gradient
def loss(self, X, y_true):
y_pred = self.predict(X)
# Convert y_true to one hot
y_true = np.stack([y_true, 1-y_true], axis=-1)
loss = np.mean(-y_true * np.log(y_pred))
return loss
def fit(self, X_train, y_train, n_epochs, batch_size=1, learning_rate=1e-5):
# Iterate over epochs
for epoch in range(n_epochs):
n_batches = np.int(np.floor(X_train.shape[0] / batch_size))
# Generate random index
index = np.arange(X_train.shape[0])
np.random.shuffle(index)
# Iterate over batches
loss_list = []
for batch in range(n_batches):
beg = batch*batch_size
end = (batch+1)*batch_size if (batch+1)*batch_size < X_train.shape[0] else -1
X_batch = X_train[beg:end]
y_batch = y_train[beg:end]
# Compute the loss
loss = self.loss(X_batch, y_batch)
loss_list.append(loss)
# Compute the gradient
gradient = self.grad(X_batch, y_batch)
# Compute the mean gradient over all the example images
gradient = np.mean(gradient, axis=0, keepdims=False)
# Update the weights
self.W -= learning_rate * gradient
return loss_list
# Assign methods to class
setattr(LinearClassifier, 'grad', grad)
setattr(LinearClassifier, 'loss', loss)
setattr(LinearClassifier, 'fit', fit)
lc = LinearClassifier()
loss = lc.fit(X_train, y_train, n_epochs=10, batch_size=16)
```
## Evaluate the model
Benchmarking performance is a critical part of the model development process. For this problem, we will use 3 different benchmarks
- Recall: the fraction of positive examples detected by a model. Mathematically, for a two-class classification problem, recall is calculated as (True positives)/(True positives + False negatives).
- Precision: the percentage of positive predictions from a model that are true. Mathematically, for a two-class prediction problem, precision is calculated as (True positives)/(True positives + False positives).
- F1 score: The harmonic mean between the recall and precision
We will evaluate these metrics on both the training dataset (the examples used during training) and our testing dataset (the examples that we held out). We can also use a confusion matrix to visualize the prediction results.
```python
# Visualize some predictions
fig, axes = plt.subplots(4, 2, figsize=(20,20))
for i in range(8):
# Get an example image
X_sample = X_test[[i],...]
# Reshape flattened vector to image
X_reshape = np.reshape(X_sample, (128,128))
# Predict the label
y_pred = lc.predict(X_sample)
# Display results
axes.flatten()[i].imshow(X_reshape, cmap='gray')
axes.flatten()[i].set_title('Label ' + str(y[i]) +', Prediction ' + str(y_pred))
```
```python
# Generate predictions
y_pred = lc.predict(X_train)
y_pred = np.argmax(y_pred, axis=-1)
# Compute metrics
recall = sklearn.metrics.recall_score(y_train, y_pred)
precision = sklearn.metrics.precision_score(y_train, y_pred)
f1 = sklearn.metrics.f1_score(y_train, y_pred)
print('Training Recall: {}'.format(recall))
print('Training Precision: {}'.format(precision))
print('Training F1 Score: {}'.format(f1))
# Generate predictions
y_pred = lc.predict(X_test)
y_pred = np.argmax(y_pred, axis=-1)
# Compute metrics
recall = sklearn.metrics.recall_score(y_test, y_pred)
precision = sklearn.metrics.precision_score(y_test, y_pred)
f1 = sklearn.metrics.f1_score(y_test, y_pred)
print('Testing Recall: {}'.format(recall))
print('Testing Precision: {}'.format(precision))
print('Testing F1 Score: {}'.format(f1))
```
## Exercise
Try running your training algorithm a few times and record the results. What do you note about the overall performance? What about the differences between training runs? What about the difference in performance when evaluated on training data as opposed to validation data?
|
8780a0be254be93ce28b887962fdd5691af6d986
| 21,942 |
ipynb
|
Jupyter Notebook
|
bebi205/notebooks/linear-classifier.ipynb
|
vanvalenlab/bebi205
|
14f0d9d266b1fb329f34cc882c4f27778bca626a
|
[
"Apache-2.0"
] | null | null | null |
bebi205/notebooks/linear-classifier.ipynb
|
vanvalenlab/bebi205
|
14f0d9d266b1fb329f34cc882c4f27778bca626a
|
[
"Apache-2.0"
] | 1 |
2021-12-13T00:58:26.000Z
|
2021-12-13T00:58:26.000Z
|
bebi205/notebooks/linear-classifier.ipynb
|
vanvalenlab/bebi205
|
14f0d9d266b1fb329f34cc882c4f27778bca626a
|
[
"Apache-2.0"
] | null | null | null | 38.293194 | 648 | 0.603227 | true | 3,411 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.880797 | 0.795658 | 0.700813 |
__label__eng_Latn
| 0.993276 | 0.466556 |
```julia
using Catalyst
# NOTE: both models MUST preserve the same ordering of reactions in order to detect
# how the nonlinear reactions are to be transformed using LMA
rn_nonlinear = @reaction_network begin
σ_b, g + p → 0
σ_u*(1-g), 0 ⇒ g + p
ρ_u, g → g + p
ρ_b*(1-g), 0 ⇒ p
1, p → 0
end σ_b σ_u ρ_b ρ_u
rn_linear = @reaction_network begin
σ_b_LMA, g → 0 # typing ̄σ_b is not allowed it seems
σ_u*(1-g), 0 ⇒ g
ρ_u, g → g+p
(ρ_b*(1-g)), 0 ⇒ p
1, p → 0
end σ_b_LMA σ_u ρ_b ρ_u
```
\begin{align}
\require{mhchem}
\ce{ g &<=>[\sigma_{b\_LMA}][\sigma_{u} \left( 1 - g\left( t \right) \right)] \varnothing}\\
\ce{ g &->[\rho_{u}] g + p}\\
\ce{ \varnothing &<=>[\rho_{b} \left( 1 - g\left( t \right) \right)][1] p}
\end{align}
```julia
using MomentClosure
# NOTE: we have to provide the indices of binary variables in the system
# as they are ordered in the *nonlinear* GRN.
# The distinction here between linear and nonlinear GRNs is important as in some cases
# the internal ordering of variables of the two Catalyst models can differ
@parameters t
@variables g(t)
binary_vars = [speciesmap(rn_nonlinear)[g]]
LMA_eqs, effective_params = linear_mapping_approximation(rn_nonlinear, rn_linear, binary_vars, combinatoric_ratelaw=false)
display(effective_params)
```
OrderedCollections.OrderedDict{Any, Any} with 1 entry:
σ_b_LMA => σ_b*μ₁₁(t)*(μ₁₀(t)^-1)
```julia
using Latexify
latexify(LMA_eqs)
```
\begin{align*}
\frac{d\mu{_{10}}}{dt} =& \sigma_{u} - \sigma_{b} \mu{_{11}} - \sigma_{u} \mu{_{10}} \\
\frac{d\mu{_{01}}}{dt} =& \rho_{b} + \rho_{u} \mu{_{10}} - \mu{_{01}} - \rho_{b} \mu{_{10}} \\
\frac{d\mu{_{11}}}{dt} =& \rho_{u} \mu{_{10}} + \sigma_{u} \mu{_{01}} - \mu{_{11}} - \sigma_{u} \mu{_{11}} - \sigma_{b} \mu{_{10}}^{-1} \mu{_{11}}^{2} \\
\frac{d\mu{_{02}}}{dt} =& \rho_{b} + \rho_{u} \mu{_{10}} + 2 \rho_{b} \mu{_{01}} + 2 \rho_{u} \mu{_{11}} + \mu{_{01}} - 2 \mu{_{02}} - \rho_{b} \mu{_{10}} - 2 \rho_{b} \mu{_{11}}
\end{align*}
```julia
println(latexify(LMA_eqs))
```
\begin{align*}
\frac{d\mu{_{10}}}{dt} =& \sigma_{u} - \sigma_{b} \mu{_{11}} - \sigma_{u} \mu{_{10}} \\
\frac{d\mu{_{01}}}{dt} =& \rho_{b} + \rho_{u} \mu{_{10}} - \mu{_{01}} - \rho_{b} \mu{_{10}} \\
\frac{d\mu{_{11}}}{dt} =& \rho_{u} \mu{_{10}} + \sigma_{u} \mu{_{01}} - \mu{_{11}} - \sigma_{u} \mu{_{11}} - \sigma_{b} \mu{_{10}}^{-1} \mu{_{11}}^{2} \\
\frac{d\mu{_{02}}}{dt} =& \rho_{b} + \rho_{u} \mu{_{10}} + 2 \rho_{b} \mu{_{01}} + 2 \rho_{u} \mu{_{11}} + \mu{_{01}} - 2 \mu{_{02}} - \rho_{b} \mu{_{10}} - 2 \rho_{b} \mu{_{11}}
\end{align*}
```julia
using OrdinaryDiffEq, Sundials, Plots
# [g, p] as in species(rn_nonlinear)
u₀ = [1.0, 0.001]
p = [0.004, 0.25, 25.0, 60.0]
tspan = (0., 15.)
dt = 0.1
u₀map = deterministic_IC(u₀, LMA_eqs)
oprob_LMA = ODEProblem(LMA_eqs, u₀map, tspan, p)
sol_LMA = solve(oprob_LMA, CVODE_BDF(), saveat=dt)
plot(sol_LMA, vars=(0, [2]), label="LMA", ylabel="⟨p⟩", xlabel="time", fmt="svg", guidefontsize=12)
```
(process:8552): GLib-GIO-WARNING **: 17:16:47.816: Unexpectedly, UWP app `KDEe.V.Okular_20.1202.546.0_x64__7vt06qxq7ptv8' (AUMId `KDEe.V.Okular_7vt06qxq7ptv8!KDEe.V.Okular') supports 5 extensions but has no verbs
```julia
#savefig("../docs/src/assets/LMA_feedback_loop_mean_protein_number.svg")
```
```julia
using FiniteStateProjection
fsp_sys = FSPSystem(rn_nonlinear, combinatoric_ratelaw=false)
# Truncate the state space of the system
# The gene has two states (G or G*) whereas we consider protein number from 0 to 100
state_space = [2, 201]
# The initial condition is the matrix of probabilities representing the state of the system
# We assume zero protein and the gene to be in the state G, hence the probability of this
# specific state should be set to 1 initially
u0 = zeros(state_space...)
u0[2, 1] = 1.0
# construct an ODE problem from the FSPSystem and solve it
fsp_prob = ODEProblem(fsp_sys, u0, tspan, p)
sol_FSP = solve(fsp_prob, CVODE_BDF(), saveat=dt)
# extract the 1st order raw moments from the FSP solution
μ_FSP = get_moments_FSP(sol_FSP, 1, "raw")
plot!(sol_FSP.t, μ_FSP[(0,1)], label="FSP", legend=:bottomright)
```
```julia
#savefig("../docs/src/assets/LMA+FSP_feedback_loop_mean_protein_number.svg")
```
```julia
using TaylorSeries, HypergeometricFunctions
function t_pFq(α::AbstractVector, β::AbstractVector, a::Taylor1)
order = a.order
aux = pFq(α, β, constant_term(a))
c = Taylor1(aux, order)
iszero(order) && return c
coeffs = t_pFq(α.+1, β.+1, Taylor1(a[0:end-1], a.order-1))
factor = prod(α)/prod(β)
for k in 1:length(a)-1
c[k] = sum(i * a[i] * coeffs[k-i] for i in 1:k) * factor / k
end
return c
end
```
t_pFq (generic function with 1 method)
```julia
# calculate the raw moments up to time t at a fine temporal resolution
T = 15.0
tspan = (0., T)
dt = 0.001
oprob_LMA = remake(oprob_LMA, tspan=tspan)
sol_LMA = solve(oprob_LMA, CVODE_BDF(), saveat=dt)
# rebuild the symbolic expression for the effective parameter as a function of raw moments
μ_sym = LMA_eqs.odes.states
p_sub = Pair.(LMA_eqs.odes.ps, p)
avg_σ_b_sym = collect(values(effective_params))[1]
fn = build_function(substitute(avg_σ_b_sym, p_sub), μ_sym)
avg_σ_b = eval(fn)
# evaluate the time-averaged value of the effective parameter
@time σ_b_avg = sum(avg_σ_b.(sol_LMA[:])) * dt / T
```
0.166009 seconds (303.68 k allocations: 20.226 MiB, 23.01% gc time, 99.56% compilation time)
0.20985247655841632
```julia
# need higher-precision numerics as Float64 can be unstable here due to very small numbers
# DoubleFloats is sufficient for this example and much more efficient than BigFloat
using DoubleFloats
# define the numerical values of the parameters
σ_u = p[2]; ρ_b = p[3]; ρ_u = p[4]
Σ = 1 + σ_b_avg + σ_u
ρ_Δ = ρ_b - ρ_u
n = 100 # expansion order (or max protein number to evaluate)
w₀ = -1 # value around which to expand
# compute the Taylor expansion (note the use of Double64)
w = w₀ + Taylor1(Double64, n)
@time f = σ_b_avg/(Σ-1)*exp(-T*(Σ-1))*exp(-ρ_u*w*exp(-T))*t_pFq([σ_u], [Σ], -ρ_Δ*w*exp(-T))
@time g = σ_u/(Σ-1)*exp(-ρ_u*w*exp(-T))*t_pFq([-σ_b_avg], [2-Σ], -ρ_Δ*w*exp(-T))
@time G00 = exp(ρ_b*w)*(f * t_pFq([1-σ_b_avg], [2-Σ], -ρ_Δ*w) +
g * t_pFq([1+σ_u], [Σ], -ρ_Δ*w) )
@time G11 = σ_u^(-1) * exp(ρ_b*w) * (-σ_u*f*t_pFq([-σ_b_avg], [2-Σ], -ρ_Δ*w) +
σ_b_avg*g*t_pFq([σ_u], [Σ], -ρ_Δ*w))
probs = (G00+G11).coeffs
# check that the probability distribution is more or less normalised to 1
# need higher numerical precision if not
isapprox(sum(probs), 1.0, rtol=1e-2)
```
4.622658 seconds (7.25 M allocations: 377.657 MiB, 2.09% gc time, 99.73% compilation time)
0.110327 seconds (42.19 k allocations: 1.676 MiB, 77.20% gc time, 12.72% compilation time)
0.081298 seconds (85.06 k allocations: 3.613 MiB)
0.085832 seconds (85.81 k allocations: 3.670 MiB, 3.77% compilation time)
true
```julia
plot(0:n, probs, xlabel="n", ylabel="P(n, t=4)", label="LMA", fmt="svg")
# plot the FSP probability of protein number by marginalising over the gene states
plot!(0:n, sum(sol_FSP[:, 151], dims=1)'[1:n+1], label="FSP")
```
```julia
#savefig("../docs/src/assets/LMA+FSP_feedback_loop_distribution.svg")
```
|
c6faf8a7ad7b74b22a2a77b627a32044828ff684
| 108,505 |
ipynb
|
Jupyter Notebook
|
examples/LMA_example.ipynb
|
FHoltorf/MomentClosure.jl
|
8c2a1b7870973e96ce9382d47cd63023f1891cf6
|
[
"MIT"
] | 27 |
2021-02-21T00:44:05.000Z
|
2022-03-25T23:48:52.000Z
|
examples/LMA_example.ipynb
|
FHoltorf/MomentClosure.jl
|
8c2a1b7870973e96ce9382d47cd63023f1891cf6
|
[
"MIT"
] | 10 |
2021-02-26T15:44:04.000Z
|
2022-03-16T12:48:27.000Z
|
examples/LMA_example.ipynb
|
FHoltorf/MomentClosure.jl
|
8c2a1b7870973e96ce9382d47cd63023f1891cf6
|
[
"MIT"
] | 3 |
2021-02-21T01:20:10.000Z
|
2022-03-24T13:18:07.000Z
| 133.462485 | 19,820 | 0.664366 | true | 2,787 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.841826 | 0.857768 | 0.722091 |
__label__eng_Latn
| 0.636359 | 0.515991 |
# Section 2.1 $\quad$ Echelon Form of a Matrix
## Definitions
An $m\times n$ matrix $A$ is said to be in $\underline{\hspace{3in}}$ if <br />
(a) <br /><br /><br /><br />
(b) <br /><br /><br /><br />
(c) <br /><br /><br /><br />
(d) <br /><br /><br /><br />
An $m\times n$ matrix satisfying properties **a**, **b**, **c** is said to be in $\underline{\hspace{2in}}$.
We can define $\underline{\hspace{2in}}$ and $\underline{\hspace{2.5in}}$ in a similar manner.
### Example 1
Determine whether the following matrices are in (reduced) row echelon form
$$
A =
\left[
\begin{array}{cccc}
1 & 2 & 0 & 4 \\
0 & 0 & 0 & 0 \\
0 & 0 & 1 & -3 \\
\end{array}
\right],~~~
B =
\left[
\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & 1 & 1 & 0 \\
0 & 0 & 0 & 1 \\
\end{array}
\right],~~~
C =
\left[
\begin{array}{cccc}
1 & 0 & 3 & 4 \\
0 & 2 & -2 & 5 \\
0 & 0 & 1 & 2 \\
\end{array}
\right],~~~
D =
\left[
\begin{array}{ccccc}
1 & 2 & 0 & 0 & 1\\
0 & 0 & 1 & 2 & 3\\
0 & 0 & 0 & 1 & 0 \\
\end{array}
\right].~~~
$$
<br /><br /><br /><br /><br /><br /><br />
An **elementary row operation** on a matrix $A$ is any one of the following operations: <br />
(a) <br /><br /><br /> $\qquad$ **notation:** <br />
(b) <br /><br /><br /> $\qquad$ **notation:** <br />
(c) <br /><br /><br /> $\qquad$ **notation:** <br />
### Example 2
Let
\begin{equation*}
A = \left[
\begin{array}{cccc}
0 & 0 & 1 & 2 \\
2 & 3 & 0 & -2 \\
3 & 3 & 6 & -9 \\
\end{array}
\right],~~~
\end{equation*}
Then
<br /><br /><br />
$$B = A_{r_1\leftrightarrow r_3} = \hspace{3in}$$
<br /><br /><br />
$$C = A_{\frac{1}{3}r_3\rightarrow r_3} = \hspace{3in}$$
<br /><br /><br />
$$D = A_{-2r_2+r_3\rightarrow r_3} = \hspace{3in}$$
<br /><br /><br />
An $m\times n$ matrix $B$ is said to be $\underline{\hspace{2in}}$ to an $m\times n$ matrix A if
<br /><br /><br /><br />
```python
from sympy import *
A = Matrix([[0, 0, 1, 2], [2, 3, 0 ,-2], [3, 3, 6, -9]]);
A.row_swap(0, 2);
A
```
Matrix([
[3, 3, 6, -9],
[2, 3, 0, -2],
[0, 0, 1, 2]])
```python
from sympy import *
A = Matrix([[0, 0, 1, 2], [2, 3, 0 ,-2], [3, 3, 6, -9]]);
A[2, :] = A[2, :]/3;
A
```
Matrix([
[0, 0, 1, 2],
[2, 3, 0, -2],
[1, 1, 2, -3]])
```python
from sympy import *
A = Matrix([[0, 0, 1, 2], [2, 3, 0 ,-2], [3, 3, 6, -9]]);
A[2, :] = A[2, :] - 2 * A[1, :];
A
```
Matrix([
[ 0, 0, 1, 2],
[ 2, 3, 0, -2],
[-1, -3, 6, -5]])
>**Theorem** Every nonzero $m\times n$ matrix $A = [a_{ij}]$
<br /><br /><br /><br />
### Example 3
Find a matrix in row echelon form that is row equivalent to the matrix
$$
A = \left[
\begin{array}{ccccc}
0 & 2 & 3 & -4 & 1 \\
0 & 0 & 2 & 3 & 4 \\
2 & 2 & -5 & 2 & 4 \\
2 & 0 & -6 & 9 & 7 \\
\end{array}
\right]
$$
```python
from sympy import *
A = Matrix([[0, 2, 3, -4, 1], [0, 0, 2, 3, 4], [2, 2, -5, 2, 4], [2, 0, -6, 9, 7]]);
A.rref()
```
(Matrix([
[1, 0, 0, 9, 19/2],
[0, 1, 0, -17/4, -5/2],
[0, 0, 1, 3/2, 2],
[0, 0, 0, 0, 0]]), [0, 1, 2])
>**Theorem** Every nonzero $m\times n$ matrix $A=[a_{ij}]$
<br /><br /><br /><br />
**Remark:**
<br /><br /><br /><br />
### Example 4
Find the reduced row echelon form of the matrix
$$
A = \left[
\begin{array}{ccc}
1 & 0 & -2 \\
-2 & 1 & 9 \\
3 & 2 & -4 \\
\end{array}
\right]
$$
```python
from sympy import *
A = Matrix([[1, 0, -2], [-2, 1, 9], [3, 2, -4]]);
A.rref()
```
(Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]), [0, 1, 2])
|
9bb33d4a003307f9b23fe157413c36c05cde52f8
| 8,111 |
ipynb
|
Jupyter Notebook
|
Jupyter_Notes/Lecture05_Sec2-1_EchelonForm.ipynb
|
xiuquan0418/MAT341
|
2fb7ec4e5f0771f10719cb5e4a00a7ab07c49b59
|
[
"MIT"
] | null | null | null |
Jupyter_Notes/Lecture05_Sec2-1_EchelonForm.ipynb
|
xiuquan0418/MAT341
|
2fb7ec4e5f0771f10719cb5e4a00a7ab07c49b59
|
[
"MIT"
] | null | null | null |
Jupyter_Notes/Lecture05_Sec2-1_EchelonForm.ipynb
|
xiuquan0418/MAT341
|
2fb7ec4e5f0771f10719cb5e4a00a7ab07c49b59
|
[
"MIT"
] | null | null | null | 21.921622 | 117 | 0.372087 | true | 1,653 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.743168 | 0.819893 | 0.609318 |
__label__eng_Latn
| 0.48535 | 0.253981 |
# Linear Regression
# Simple Linear Regression
Running a SLR in Python is fairly simple once you know how to use the relevant functions. What might be confusing is that there exist several packages which provide functions for linear regression. We will use functions from the `statsmodels` (sub-)package. Other packages such as e.g. `scikit-learn` have linear regression functions too, but what makes `statsmodels` stand out from other packages is its broad set of auxiliary functions for regression diagnostics. As usual we start by importing the packages needed for our task.
```python
# Load relevant packages
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
plt.rcParams['font.size'] = 14
```
C:\Anaconda\lib\site-packages\statsmodels\compat\pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
from pandas.core import datetools
As toy data we will use the 'Advertising' data set in this section, introduced in the previous chapter. The data is taken from James et al. (2013). A copy is provided on the book's website where we will download it from.
```python
# From Advertising data set read cols 2:4
url = 'http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv'
ad = pd.read_csv(url, sep=',', usecols=(np.arange(1, 5, 1)))
print(ad.head())
```
TV radio newspaper sales
0 230.1 37.8 69.2 22.1
1 44.5 39.3 45.1 10.4
2 17.2 45.9 69.3 9.3
3 151.5 41.3 58.5 18.5
4 180.8 10.8 58.4 12.9
Next we run a linear regression to calculate the coefficients and print a summary output.
```python
# Run regression and calculate fit
reg = sm.OLS(ad.sales, exog=sm.add_constant(ad.TV)).fit()
# Alternatively: reg = sm.OLS(ad.sales, sm.add_constant(ad.TV)).fit()
print(reg.summary())
```
OLS Regression Results
==============================================================================
Dep. Variable: sales R-squared: 0.612
Model: OLS Adj. R-squared: 0.610
Method: Least Squares F-statistic: 312.1
Date: Sun, 11 Feb 2018 Prob (F-statistic): 1.47e-42
Time: 20:34:34 Log-Likelihood: -519.05
No. Observations: 200 AIC: 1042.
Df Residuals: 198 BIC: 1049.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 7.0326 0.458 15.360 0.000 6.130 7.935
TV 0.0475 0.003 17.668 0.000 0.042 0.053
==============================================================================
Omnibus: 0.531 Durbin-Watson: 1.935
Prob(Omnibus): 0.767 Jarque-Bera (JB): 0.669
Skew: -0.089 Prob(JB): 0.716
Kurtosis: 2.779 Cond. No. 338.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
**Side note:** If you are the `R` guy and prefer their syntax, you could import the statsmodels.formula.api subpackage and run something like:
> `from statsmodels.formula.api import ols
reg = ols("Sales ~ TV", data=ad).fit()
reg.summary()`
Intercept will automatically be calculated in the above setting.
Both p-values for intercept ($\hat{\beta}_0$) and slope ($\hat{\beta}_1$) are smaller than any reasonable significance level and thus we can reject the null hypothesis that either of the coefficients is zero (or irrelevant).
Instead of printing the whole summary, we could also access each of the three summary as follows:
```python
reg.summary().tables[1]
```
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>const</th> <td> 7.0326</td> <td> 0.458</td> <td> 15.360</td> <td> 0.000</td> <td> 6.130</td> <td> 7.935</td>
</tr>
<tr>
<th>TV</th> <td> 0.0475</td> <td> 0.003</td> <td> 17.668</td> <td> 0.000</td> <td> 0.042</td> <td> 0.053</td>
</tr>
</table>
## Plotting the Fit
Many details were calculated by calling the `sm.OLS` function. All information is attached to the `reg` object. For example to plot the data we would need the model's fitted values. These can be accessed by combining the regression variable/object with the attribute `.fittedvalues` as in `reg.fittedvalues`. In below plot it is shown how this can be of use. We plot the data and fit using the standard plotting functions.
```python
# Plot scatter & lm
plt.figure(figsize=(12,8))
plt.scatter(ad.TV, ad.sales, marker='.', label='Sample')
plt.plot(ad.TV, reg.fittedvalues, c='k', label='Fit')
plt.ylabel('Sales')
plt.xlabel('TV')
plt.legend();
```
Let us plot residuals versus fitted values to do some visual regression diagnostics:
```python
plt.figure(figsize=(12, 8))
plt.scatter(ad.TV, reg.resid)
plt.axhline(y=0, c='k')
plt.xlabel('TV (fitted)')
plt.ylabel('Residuals');
```
The above two plots are just standard `matplotlib` plots serving the purpose of visual diagnostics. Beyond that, the `statsmodel` package has separate built-in plotting functions suited for visual regression diagnostics. We will not discuss them here but if interested you'll find plenty of sources on the web (e.g. [here](http://www.statsmodels.org/dev/examples/notebooks/generated/regression_plots.html), [here](http://www.statsmodels.org/stable/graphics.html) or [here](http://mpastell.com/2013/04/19/python_regression/))
## Accessing the Output
The `reg` object contains a ton of information which you can all access. To see what there is, type `reg.` and press tab. Two examples are shown below. Notice that some are attributes (like `x.shape`, `x.size`) and don not need paranthesis to call them. Others are methods (similar to `.sum()`, `.min()`) and require parenthesis.
```python
# Regression coefficients
print(reg.params, '\n')
print(reg.resid.head())
```
const 7.032594
TV 0.047537
dtype: float64
0 4.129225
1 1.252026
2 1.449776
3 4.265605
4 -2.727218
dtype: float64
# Confidence Intervals & Hypthesis Testing
The 95%-confidence interval (CI) is printed in the summary above. If one wishes to calculate it for a different significance level (`alpha`), it is done as follows:
```python
# 99% CI (alpha = 1%) based on t-distribution
reg.conf_int(alpha=0.01)
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
</tr>
</thead>
<tbody>
<tr>
<th>const</th>
<td>5.841796</td>
<td>8.223391</td>
</tr>
<tr>
<th>TV</th>
<td>0.040539</td>
<td>0.054535</td>
</tr>
</tbody>
</table>
</div>
The regression summary provides $t$-statistic and p-value for the null hypothesis $H_0: \hat{\beta}_j = 0$, $H_a: \hat{\beta}_j \neq 0$. You can call the resulting $t$-statistic and p-value with its attributes.
```python
print(reg.tvalues, '\n')
print(reg.pvalues)
```
const 15.360275
TV 17.667626
dtype: float64
const 1.406300e-35
TV 1.467390e-42
dtype: float64
If you wish to test a different null hypothesis, e.g. $H_0: \hat{\beta}_{TV} = 0.054$ vs. $H_1: \hat{\beta}_{TV} \neq 0.054$ use the following code:
```python
reg.t_test('TV=0.054')
```
<class 'statsmodels.stats.contrast.ContrastResults'>
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 0.0475 0.003 -2.402 0.017 0.042 0.053
==============================================================================
As far as I know, `Statsmodels` does not provide a function to calculate 'greater than' or 'smaller than' alternative hypothesis. Reason being: because with symmetric distributions, the one-sided test can be backed out from the two-sided test. A one-sided p-value is just half of the two-sided p-value. This means that given p and $t$ values from a two-tailed test, you would reject the null hypothesis of a greater-than test when p/2 < alpha and $t$ > 0, and of a less-than test when p/2 < alpha and $t$ < 0.
## Coefficient of Determination
The $R^2$ measure, or "coefficient of determination", displays the proportion of the variability in $y$ that is well explained by the regression fit. It is defined as
$$\begin{equation}
R^2 = \frac{TSS - SSR}{TSS} = 1 - \frac{SSR}{TSS}
\end{equation}$$
where TSS is the *total sum of squares*, defined as $TSS = \sum (y_i - \bar{y})^2$, and SSR is the *sum of squared residuals*, given by $SSR = \sum (y_i - \hat{y}_i)^2$.
It is easy to call the $R^2$ value from the regression object `reg` as the following line shows.
```python
# R squared measure
reg.rsquared
```
0.61187505085007099
## Regression Diagnostics
### Test of Homoskedasticity
In general we assume a constant variance of the error term (homoskedasticity; $Var(\epsilon_i) = \sigma^2$ for $i = 1, \ldots, N$). From the residuals vs. fitted plot we have to question this assumption. To test it mathematically, you can run a heteroskedasticity test. The stats package offers several test options; the more common ones are White's or the one from Breusch-Pagan. See [here for more details](http://www.statsmodels.org/dev/diagnostic.html) on tests on heteroskedasticity.
Below the White test is applied as an example. The parameter 'reg.model.exog' simply contains the X matrix (here a 200x2 matrix with constant 1 in first column and values for TV in second). Instead of calling the `reg` object, we could also use `Xconst` from above. The output becomes clear when you check the function's help page (use `?sm.stats.diagnostic.het_white`).
The null hypothesis is that the error variance does **not** depend on x, thus is homoskedastic. Based on the large f-statistic value we can gently reject the null-hypothesis that the error variance is homoskedastic.
```python
# Test for heteroskedasticity with White test
wht = sm.stats.diagnostic.het_white(resid=reg.resid, exog=reg.model.exog)
print('f-statistic: {0:>19.4f} \n'
'p-value for f-statistic: {1:>7.4f}'.format(wht[2], wht[3]))
```
f-statistic: 35.1683
p-value for f-statistic: 0.0000
If you wish to run tests with heteroskedastistic robust standard errors you can either access the `reg` object's robust standard errors (`reg.HC0_se`, `reg.HC1_se`, `reg.HC2_se`, `reg.HC3_se`) or, more conveniently, run directly define the covariance estimator (e.g. 'HC3' as below) when you generate the object in the first place. Below example shows how you can do this. See also [here](http://www.statsmodels.org/dev/generated/statsmodels.regression.linear_model.RegressionResults.html) or [here](https://stackoverflow.com/questions/30553838/getting-statsmodels-to-use-heteroskedasticity-corrected-standard-errors-in-coeff) for some further information.
```python
regRobust = sm.OLS(ad.sales, exog=sm.add_constant(ad.TV)).fit(cov_type='HC3')
print(regRobust.HC3_se, '\n')
print(reg.HC3_se)
```
const 0.336426
TV 0.002890
dtype: float64
const 0.336426
TV 0.002890
dtype: float64
### Other Relevant Checks
The `statsmodels` package offers many more functions to run regression diagnostics, e.g. checks for autocorrelation, non-linearity, normality of residuals etc. These functions are applicable to both simple as well as multiple linear regression models. There's a short [Jupyther notebook](http://www.statsmodels.org/dev/examples/notebooks/generated/regression_diagnostics.html) detailing some of the options.
## Application: Stock Beta
A stock beta measures the systematic risk of a security, the tendency of a security to respond to swings in the broad market. Typically, a large, well diversified index is taken as a proxy for the market portfolio (e.g. S&P500, Euro Stoxx 50, SPI, etc.). There are different ways to calculate a stock beta. We will show the regression approach, where a stock's beta is the slope of the following linear regression:
$$\begin{equation}
r - r_f = \alpha + \beta(r_M - r_f) + e
\end{equation}$$
Let us look into Geberit's stock beta. As a proxy for the market portfolio we use the Swiss market index (SMI). The risk free rate is set to $r_f=0$, which is a fairly reasonable approach in light of [the Swiss national bank's (SNB) interest rates](https://data.snb.ch/de/topics/ziredev#!/cube/zimoma) for the past five years. We will work with monthly returns for the past five years (60 months) - though other approaches (e.g. last 24 monthly returns, weekly returns for last 2 years, etc.) are reasonable choices too. The data we will load from a csv that was sourced through a financial data provider.
```python
df = pd.read_csv('Data/SMIDataMonthly.csv', sep=',',
parse_dates=['Date'], dayfirst=True,
index_col=['Date'])
df.head()
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ABBN</th>
<th>ADEN</th>
<th>BAER</th>
<th>CFR</th>
<th>CSGN</th>
<th>GEBN</th>
<th>GIVN</th>
<th>LHN</th>
<th>LONN</th>
<th>NESN</th>
<th>...</th>
<th>ROG</th>
<th>SCMN</th>
<th>SGSN</th>
<th>SLHN</th>
<th>SREN</th>
<th>UBSG</th>
<th>UHR</th>
<th>ZURN</th>
<th>SIK</th>
<th>SMI</th>
</tr>
<tr>
<th>Date</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2018-01-31</th>
<td>25.95</td>
<td>76.60</td>
<td>63.9</td>
<td>89.32</td>
<td>17.96</td>
<td>441.0</td>
<td>2240.0</td>
<td>57.00</td>
<td>258.9</td>
<td>80.42</td>
<td>...</td>
<td>229.45</td>
<td>508.4</td>
<td>2503</td>
<td>349.5</td>
<td>91.80</td>
<td>18.90</td>
<td>426.5</td>
<td>305.9</td>
<td>8065</td>
<td>9335.40</td>
</tr>
<tr>
<th>2017-12-29</th>
<td>26.12</td>
<td>74.55</td>
<td>59.6</td>
<td>88.30</td>
<td>17.40</td>
<td>429.1</td>
<td>2252.0</td>
<td>54.95</td>
<td>263.3</td>
<td>83.80</td>
<td>...</td>
<td>246.50</td>
<td>518.5</td>
<td>2541</td>
<td>345.0</td>
<td>91.25</td>
<td>17.94</td>
<td>397.4</td>
<td>296.6</td>
<td>7740</td>
<td>9381.87</td>
</tr>
<tr>
<th>2017-11-30</th>
<td>25.19</td>
<td>74.40</td>
<td>57.8</td>
<td>84.70</td>
<td>16.66</td>
<td>427.8</td>
<td>2238.0</td>
<td>53.80</td>
<td>257.1</td>
<td>84.25</td>
<td>...</td>
<td>248.20</td>
<td>519.0</td>
<td>2432</td>
<td>330.2</td>
<td>92.20</td>
<td>16.99</td>
<td>358.7</td>
<td>297.2</td>
<td>7600</td>
<td>9318.77</td>
</tr>
<tr>
<th>2017-10-31</th>
<td>26.08</td>
<td>79.15</td>
<td>59.0</td>
<td>92.00</td>
<td>15.73</td>
<td>451.6</td>
<td>2228.0</td>
<td>56.35</td>
<td>265.0</td>
<td>83.90</td>
<td>...</td>
<td>230.50</td>
<td>504.0</td>
<td>2464</td>
<td>346.8</td>
<td>93.85</td>
<td>16.98</td>
<td>391.0</td>
<td>304.5</td>
<td>7385</td>
<td>9242.18</td>
</tr>
<tr>
<th>2017-09-29</th>
<td>23.94</td>
<td>75.40</td>
<td>57.3</td>
<td>88.50</td>
<td>15.33</td>
<td>458.1</td>
<td>2107.0</td>
<td>56.60</td>
<td>254.0</td>
<td>81.10</td>
<td>...</td>
<td>247.20</td>
<td>496.2</td>
<td>2323</td>
<td>341.1</td>
<td>87.70</td>
<td>16.55</td>
<td>402.8</td>
<td>295.4</td>
<td>7205</td>
<td>9157.46</td>
</tr>
</tbody>
</table>
<p>5 rows × 21 columns</p>
</div>
The dataframe `df` contains monthly closing prices on all SMI-stocks (incl. SMI index) with a date index in descending order. Let us create a separate Pandas dataframe with the returns of the past 60 months (`dfRets`).
```python
# Calculate returns and assign to variable dfRets
dfRets = pd.DataFrame()
dfRets['GEBNrets'] = np.log(df['GEBN'] / df['GEBN'].shift(-1))
dfRets['SMIrets'] = np.log(df['SMI'] / df['SMI'].shift(-1))
print(dfRets.head())
```
GEBNrets SMIrets
Date
2018-01-31 0.027355 -0.004965
2017-12-29 0.003034 0.006748
2017-11-30 -0.054141 0.008253
2017-10-31 -0.014291 0.009209
2017-09-29 0.044640 0.025662
Having done that, we are already set to run the regression and print the results.
```python
# Set observation period (last 60 monthly returns)
months = 60
# Create OLS object, run regression and calculate fit
regBeta = sm.OLS(endog=dfRets.iloc[:months, 0],
exog=sm.add_constant(dfRets.iloc[:months, 1])).fit()
# Show table on coefficients
print(regBeta.summary())
```
OLS Regression Results
==============================================================================
Dep. Variable: GEBNrets R-squared: 0.311
Model: OLS Adj. R-squared: 0.300
Method: Least Squares F-statistic: 26.23
Date: Sun, 11 Feb 2018 Prob (F-statistic): 3.62e-06
Time: 20:34:35 Log-Likelihood: 107.89
No. Observations: 60 AIC: -211.8
Df Residuals: 58 BIC: -207.6
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 0.0086 0.005 1.617 0.111 -0.002 0.019
SMIrets 0.9156 0.179 5.121 0.000 0.558 1.273
==============================================================================
Omnibus: 4.808 Durbin-Watson: 2.225
Prob(Omnibus): 0.090 Jarque-Bera (JB): 4.204
Skew: 0.405 Prob(JB): 0.122
Kurtosis: 4.013 Cond. No. 34.0
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
Based on the regression output we have no reason to reject the null of Geberit's stock beta being equal to zero. The $R^2$ measure, though, shows that only a small amount of the variation in Geberits monthly returns is explained by SMI's monthly returns.
```python
# Get relevant information
beta = regBeta.params['SMIrets']
alpha = regBeta.params["const"]
rsqr = regBeta.rsquared
# Plot scatter & lm; add text with alpha, beta , R2
plt.figure(figsize=(12, 8))
plt.scatter(dfRets.iloc[:months, 0],
dfRets.iloc[:months, 1],
marker='.', label='Monthly Returns')
plt.plot(dfRets.iloc[:months, 1], regBeta.fittedvalues, c='k', label='Fit')
plt.axis('equal')
plt.ylabel('Geberit Monthly Returns')
plt.xlabel('SMI Monthly Returns')
plt.legend(loc='lower right')
plt.text(-0.07, 0.06, 'Beta: {0: .2f}'.format(beta))
plt.text(-0.07, 0.05, 'Alpha: {0: .2f}'.format(alpha))
plt.text(-0.07, 0.04, 'R^2: {0: .2f}'.format(rsqr));
```
What we calculated above is often referred to as the raw beta. The beta value of a stock has been found to be on average closer to the mean value of 1.0, the beta of an average-systematic-risk portfolio, than to the value of the raw beta (Pinto et al. (2016)). This is why data providers such as Bloomberg publish the adjusted beta as first introduced by Blume (1971), which is calculated as
$$ \text{Adjusted beta} = 2/3 \cdot \text{raw beta} + 1/3 \cdot 1$$
Now, let us assume we are given the task to investigate whether a beta indeed regresses to 1 over time. For that we could, as a starting point, assess a stock's rolling beta over the past years. Note that this is just an example of use. Computationally it would be much faster to calculate the stock beta via the covariance/variance formula.
```python
def rollingBeta(df, window=60):
'''Calculates the running beta of a stock.
Parameters
==========
df : [n x 2] pandas dataframe with log-returns for
stock and market portfolio. Index should be
datetime series.
window : rolling window with default value 60 [optional]
Returns
=======
rb : Pandas dataframe with (backward-looking) rolling beta.
'''
# Drop NA rows from df
df = df.dropna()
# Set up empty results array
res = np.empty(len(df) - window + 1)
# Loop through df
for i in range(0, len(df)):
# As long as remaining subset is >= window, we proceed
if (len(df) - i) >= window:
# Subset df
sub = df.iloc[i:window+i, :]
# Run Regression
model = sm.OLS(endog=sub.iloc[:, 0],
exog=sm.add_constant(sub.iloc[:, 1])).fit()
# Read out beta coefficient
res[i] = model.params[1]
# Format output to dataframe
rb = pd.DataFrame(data=res, index=df.index[:(len(df)-window+1)])
rb.columns = ['RollingBeta']
return rb
```
Now we are ready to call the function. This time we use the last two years of monthly returns. Thus we set `window=24` to overwrite the default value of 60.
```python
# Call function and save output to 'rollBeta'
rollBeta = rollingBeta(df=dfRets, window=24)
```
```python
# Import 'mdates' library to format dates in x-axis
import matplotlib.dates as mdates
# Plot rolling beta
fig, ax = plt.subplots(1, figsize=(12, 8))
ax.plot(rollBeta, label='Geberit Rolling Beta')
ax.axhline(y=1, c='gray', linestyle=':') # Horizontal line
ax.legend(fontsize=12)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=6))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%Y'))
fig.autofmt_xdate(); # Autorotate x-axis for readability
plt.savefig('C:/Users/Ben Zimmermann/Dropbox/Uni/MA/ML Seminar/LaTeX_Script/graphics/0205_RollingBetaPlot.pdf')
```
Though this is far away from a thorough analysis, plotting the results shows that at least in Geberit's case, there is indeed some truth to the assessment, that the beta exhibits some reversion to the market beta value of 1.
# Multiple Linear Regression
## Estimating the Regression Coefficients
Simple linear regression serves well to introduce the concept and to build a good understanding. However, in reality we often have to work with more than one predictor. In the advertising data set for example we had not only data on TV advertising spendings but also on radio newspaper. It thus makes sense to extend the simple to a multiple linear regression model.
We again use the Advertising data set to see how this is done in Python. The same functions from the `statsmodels` package apply to multiple linear regression. We run the following regression
$$\text{sales} = \beta_0 + \beta_1 \text{TV} + \beta_2 \text{radio} + \beta_3 \text{newspaper} + \epsilon$$
```python
# Assign features and response to X and y
y = ad.sales
X = ad[['TV', 'radio', 'newspaper']]
X = sm.add_constant(X)
```
```python
# Run regression and print summary
mlReg = sm.OLS(endog=y, exog=X).fit()
print(mlReg.summary())
```
OLS Regression Results
==============================================================================
Dep. Variable: sales R-squared: 0.897
Model: OLS Adj. R-squared: 0.896
Method: Least Squares F-statistic: 570.3
Date: Sun, 11 Feb 2018 Prob (F-statistic): 1.58e-96
Time: 20:34:36 Log-Likelihood: -386.18
No. Observations: 200 AIC: 780.4
Df Residuals: 196 BIC: 793.6
Df Model: 3
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 2.9389 0.312 9.422 0.000 2.324 3.554
TV 0.0458 0.001 32.809 0.000 0.043 0.049
radio 0.1885 0.009 21.893 0.000 0.172 0.206
newspaper -0.0010 0.006 -0.177 0.860 -0.013 0.011
==============================================================================
Omnibus: 60.414 Durbin-Watson: 2.084
Prob(Omnibus): 0.000 Jarque-Bera (JB): 151.241
Skew: -1.327 Prob(JB): 1.44e-33
Kurtosis: 6.332 Cond. No. 454.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
The coefficient for radio (0.1885) tells us, that - holding all other factors fixed - an additional 1'000 dollars in radio advertising spendings will boost the product's sales by 188.5 units.
Again: If you are an `R` guy and prefer their syntax, you could import the statsmodels.formula.api subpackage and run something like:
> `from statsmodels.formula.api import ols
mlReg = ols("Sales ~ TV + radio + newspaper", data=ad).fit()
mlReg.summary()`
## Hypothesis Tests
Again the summary above provides $t$-statistic and p-value for each individual regression coefficient. As was the case for the simple linear regression, the underlying null hypothesis is that each parameter is zero ($H_0: \beta_{j,\, H_0} = 0$). For TV and Radio we reject the null even at the 1% significance level. However, given the large p-value for Newspaper we fail to reject the null for $\beta_{\text{Newspaper}} = 0$ at any reasonable level. Thus we can conclude that leaving Newspaper data out might be a reasonable option. If other null hypothesis' ought to be tested, we can use the same command as shown above.
```python
# t-test on H0: beta(TV) = 0.0475
mlReg.t_test('TV=0.0475')
```
<class 'statsmodels.stats.contrast.ContrastResults'>
Test for Constraints
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
c0 0.0458 0.001 -1.244 0.215 0.043 0.049
==============================================================================
The output shows that we fail to reject the null hypothesis that $\beta_{TV} = 0.0475$.
Beyond the element-wise hypothesis tests the regression summary also provides the **F-statistic** (and the corresponding p-value) on the combined hypothesis that
$$\begin{align}
H_0&: \quad \beta_j = \beta_1, \beta_2, \ldots \beta_p = 0 \\
H_a&: \quad \beta_j \neq 0 \text{ for at least one $j$}
\end{align}$$
On the basis of the corresponding p-value (i.e. 1.58e-96) we can reject the null at any reasonable significance level. Should we be interested in assessing a particular hypothesis, say
$$\begin{align}
H_0&: \quad \beta_{TV} = \beta_{\text{Radio}} \\
H_a&: \quad \beta_{TV} \neq \beta_{\text{Radio}}
\end{align}$$
we use the `.f_test()` method.
```python
# Test H0: beta(radio) = beta(newspaper) = 0
mlReg.f_test('const = radio = newspaper = 0.1')
```
<class 'statsmodels.stats.contrast.ContrastResults'>
<F test: F=array([[ 126.9935335]]), p=1.0186277412639764e-45, df_denom=196, df_num=3>
See the [documentation page](http://www.statsmodels.org/dev/generated/statsmodels.regression.linear_model.RegressionResults.f_test.html) for further examples on how this function can be used.
### Coefficient of Determination
The $R^2$ measure for the MLR is the same as for the SLR. However, in the case of MLR it has one drawback: the value will always increase when more explanatory variables are added to the model - even if those variables are only weakly associated with the response. To make good on this disadvantage a modificated measure is often used: **adjusted $R^2$**.
$$\begin{equation}
R^2_{adj} = 1 - (1-R^2) \frac{n-1}{n-p-1}
\end{equation}$$
To get this measure in Python, simply use the OLS object and call the `.rsquareed_adj` attribute.
```python
mlReg.f_test('TV=radio=newspaper=0')
```
<class 'statsmodels.stats.contrast.ContrastResults'>
<F test: F=array([[ 570.27070366]]), p=1.575227256093874e-96, df_denom=196, df_num=3>
## Application: Factor Models
### Fama-French Three Factor Model
We will apply the concept of multiple linear regression in the context of Fama-French's three factor model (Fama and French (1993)). Their model follows Ross' arbitrage pricinge theory which postulates that excess returns are linearly related to a set of systematic risk factors (Ross et al. (1973)). The factors can be returns on other assets, such as the market portfolio, or any other variable (e.g. interest rates, inflation, consumption growth, market sentiment, hedging demands etc.. Fama-French empirically discovered three factors to capture the systematic risk: firm size, book-to-market ratio (B/M) and market risk. To quantify their findings, Fama-French constructed zero-net-investment factor portfolios capturing the systematic risk on firm size ('small minus big' (SMB), i.e. long on small and short on big size stocks) and B/M ('high minus low' (HML), i.e. long on high B/M, short on low B/M stocks). For details on how these portfolios are constructed see e.g. Bodie et al. (2014). The third factor, market risk, is simply the excess return on a well diversified market portfolio.
With that, the sensitivity of individual stocks to the three factors is given by the estimated coefficients of a multiple linear regression. As a group they predict the total risk premium. The expected excess return $R_{it}$ of asset $i$ at time $t$ in the Fama-French three-factor model is described by
$$\begin{equation}
R_{it} = \alpha_i + \beta_i^{M} (r_{M,t} - r_{f,t}) + \beta_i^{SMB} SMB_t + \beta_i^{HML} HML_t + \epsilon_{it}
\end{equation}$$
The above Fama-French factors are calculated on a monthly basis and published on [Kenneth R. French's website](http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html). There you will also find information on the methodology of the model and lots of other possible factors. To run this regression in Python we use a shortcut. The `pandas_datareader` package is capable of loading the data without having to download a txt or csv file in a separate step.
We will calculate the factor beta for all 20 SMI stocks. For that we use Fama-French's 'European 3 Factors' data. Following our 'Stock Beta' example from above, the risk free rate for Switzerland is again assumed to be zero over the past 5 years.
### Prepare Data for Fama-French Model
We start by importing the `pandas_datareader.data` package and defining some key parameter.
```python
import pandas_datareader as web
# Define obs. period, start & enddate
months = 60
startdate = '2012-06-01'
enddate = '2017-12-31'
```
** Side Note: ** If you want to know what data is available (and their labels), you can run the `get_available_datasets()` function.
> `from pandas_datareader.famafrench import get_available_datasets
get_available_datasets()`
or simply check [Kenneth R. French's website](http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html)
```python
# Load FF factors
data = web.DataReader('Europe_3_Factors', data_source='famafrench',
start=startdate, end=enddate)
```
Variable `data` is a dictionary with three entries: monhtly data, annual data and a description. We select the monthly rates in dictionary 0 and format the data.
```python
# Select monthly data
ff = data[0]
# Sort data in descending order
ff = ff.sort_index(ascending=False)
# Convert returns to decimal percentages
ff = ff/100
print(ff.head(3))
```
Mkt-RF SMB HML RF
Date
2017-12 0.0147 0.0156 0.0046 0.0009
2017-11 -0.0002 -0.0040 0.0108 0.0008
2017-10 0.0060 -0.0090 -0.0020 0.0009
Next we calculate the log-returns of all SMI stocks. The share prices are taken from dataframe `df` which we loaded above.
```python
shsRets = np.log(df / df.shift(-1))
shsRets = shsRets['2017-12-31':'2012-06-01']
shsRets = shsRets.iloc[:, :-1] # We exclude last column (with SMI data)
shsRets.head(3)
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ABBN</th>
<th>ADEN</th>
<th>BAER</th>
<th>CFR</th>
<th>CSGN</th>
<th>GEBN</th>
<th>GIVN</th>
<th>LHN</th>
<th>LONN</th>
<th>NESN</th>
<th>NOVN</th>
<th>ROG</th>
<th>SCMN</th>
<th>SGSN</th>
<th>SLHN</th>
<th>SREN</th>
<th>UBSG</th>
<th>UHR</th>
<th>ZURN</th>
<th>SIK</th>
</tr>
<tr>
<th>Date</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2017-12-29</th>
<td>0.036254</td>
<td>0.002014</td>
<td>0.030667</td>
<td>0.041625</td>
<td>0.043460</td>
<td>0.003034</td>
<td>0.006236</td>
<td>0.021150</td>
<td>0.023829</td>
<td>-0.005356</td>
<td>-0.021609</td>
<td>-0.006873</td>
<td>-0.000964</td>
<td>0.043844</td>
<td>0.043846</td>
<td>-0.010357</td>
<td>0.054408</td>
<td>0.102457</td>
<td>-0.002021</td>
<td>0.018253</td>
</tr>
<tr>
<th>2017-11-30</th>
<td>-0.034722</td>
<td>-0.061889</td>
<td>-0.020549</td>
<td>-0.082673</td>
<td>0.057441</td>
<td>-0.054141</td>
<td>0.004478</td>
<td>-0.046309</td>
<td>-0.030265</td>
<td>0.004163</td>
<td>0.024040</td>
<td>0.073984</td>
<td>0.029328</td>
<td>-0.013072</td>
<td>-0.049050</td>
<td>-0.017738</td>
<td>0.000589</td>
<td>-0.086221</td>
<td>-0.024266</td>
<td>0.028697</td>
</tr>
<tr>
<th>2017-10-31</th>
<td>0.085618</td>
<td>0.048538</td>
<td>0.029237</td>
<td>0.038786</td>
<td>0.025758</td>
<td>-0.014291</td>
<td>0.055839</td>
<td>-0.004427</td>
<td>0.042396</td>
<td>0.033943</td>
<td>-0.008480</td>
<td>-0.069947</td>
<td>0.015597</td>
<td>0.058927</td>
<td>0.016573</td>
<td>0.067776</td>
<td>0.025650</td>
<td>-0.029733</td>
<td>0.030341</td>
<td>0.024676</td>
</tr>
</tbody>
</table>
</div>
### Calculate Fama-French Coefficients
In order to run the regression we need to have matching indices. Fama-French's index is 'yyyy-mm' while our dataframe with returns has format 'yyyy-mm-dd'. Since we know that the length of both dataframes is equal, we can simply overwrite the index of one of the dataframes.
```python
# Create matching indices
ff.index = shsRets.index
```
We are now in a position to run the multiple linear regression. We will again use the past 60 months. From the Fama French set we just need the first three columns. Column 4 is the risk free rate which we do not use.
```python
# Add constant to matrix for alphas (=intercept)
X = sm.add_constant(ff.iloc[:months, :3])
```
```python
# Assign ticker to variable
tickers = shsRets.columns
```
```python
# Create results matrix to paste beta factors
res = np.empty(shape=(5, len(tickers)))
```
```python
# Run regression for each ticker
for i in range(0, len(tickers)):
# Select returns of share i
sub = shsRets.iloc[:months, i]
# Run regression
model = sm.OLS(endog=sub, exog=X).fit()
# Paste beta factors to 'res' matrix
res[0:4, i] = model.params
res[4, i] = model.rsquared_adj
```
```python
# Format output to dataframe
ff3f = pd.DataFrame(data=res, index=['Alpha', 'BetaMkt', 'BetaSMB', 'BetaHML', 'R2_adj'])
ff3f.columns = tickers
```
And here are our factors:
```python
ff3f
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ABBN</th>
<th>ADEN</th>
<th>BAER</th>
<th>CFR</th>
<th>CSGN</th>
<th>GEBN</th>
<th>GIVN</th>
<th>LHN</th>
<th>LONN</th>
<th>NESN</th>
<th>NOVN</th>
<th>ROG</th>
<th>SCMN</th>
<th>SGSN</th>
<th>SLHN</th>
<th>SREN</th>
<th>UBSG</th>
<th>UHR</th>
<th>ZURN</th>
<th>SIK</th>
</tr>
</thead>
<tbody>
<tr>
<th>Alpha</th>
<td>-0.001025</td>
<td>-0.001959</td>
<td>0.005041</td>
<td>-0.001922</td>
<td>-0.011472</td>
<td>0.011149</td>
<td>0.013058</td>
<td>-0.011969</td>
<td>0.022697</td>
<td>0.003409</td>
<td>0.003714</td>
<td>0.002670</td>
<td>0.005087</td>
<td>0.000832</td>
<td>0.010098</td>
<td>0.005727</td>
<td>-0.002099</td>
<td>-0.009045</td>
<td>0.002611</td>
<td>0.018352</td>
</tr>
<tr>
<th>BetaMkt</th>
<td>0.702400</td>
<td>0.848991</td>
<td>0.645388</td>
<td>0.915350</td>
<td>0.960051</td>
<td>0.457179</td>
<td>0.500261</td>
<td>1.177939</td>
<td>0.804210</td>
<td>0.505555</td>
<td>0.504823</td>
<td>0.449106</td>
<td>0.340536</td>
<td>0.522752</td>
<td>0.485764</td>
<td>0.255057</td>
<td>0.750614</td>
<td>0.929691</td>
<td>0.415227</td>
<td>0.653636</td>
</tr>
<tr>
<th>BetaSMB</th>
<td>0.293945</td>
<td>0.703264</td>
<td>0.048114</td>
<td>-0.401858</td>
<td>0.369227</td>
<td>-0.524710</td>
<td>-0.699341</td>
<td>0.114422</td>
<td>0.077635</td>
<td>-0.411104</td>
<td>-0.403297</td>
<td>-0.317893</td>
<td>-0.795906</td>
<td>-0.273266</td>
<td>0.911996</td>
<td>-0.577227</td>
<td>0.034922</td>
<td>-0.149020</td>
<td>-0.640211</td>
<td>-0.437440</td>
</tr>
<tr>
<th>BetaHML</th>
<td>0.122113</td>
<td>0.444320</td>
<td>0.628217</td>
<td>0.273710</td>
<td>1.694172</td>
<td>-0.138601</td>
<td>-0.551688</td>
<td>0.353747</td>
<td>-0.210786</td>
<td>-0.582850</td>
<td>-0.378915</td>
<td>-0.306853</td>
<td>-0.204705</td>
<td>-0.438750</td>
<td>0.569497</td>
<td>-0.287004</td>
<td>1.019220</td>
<td>0.288909</td>
<td>0.040235</td>
<td>0.023124</td>
</tr>
<tr>
<th>R2_adj</th>
<td>0.188357</td>
<td>0.339020</td>
<td>0.173241</td>
<td>0.293771</td>
<td>0.354041</td>
<td>0.103003</td>
<td>0.228293</td>
<td>0.434920</td>
<td>0.214598</td>
<td>0.310364</td>
<td>0.165969</td>
<td>0.108944</td>
<td>0.160359</td>
<td>0.102738</td>
<td>0.159904</td>
<td>0.043001</td>
<td>0.289225</td>
<td>0.216220</td>
<td>0.099337</td>
<td>0.171639</td>
</tr>
</tbody>
</table>
</div>
```python
# Transpose matrix (.T) and display stats summary
print(ff3f.T.describe())
```
Alpha BetaMkt BetaSMB BetaHML R2_adj
count 20.000000 20.000000 20.000000 20.000000 20.000000
mean 0.003248 0.641227 -0.153888 0.117856 0.207847
std 0.008978 0.239535 0.465744 0.567100 0.101348
min -0.011969 0.255057 -0.795906 -0.582850 0.043001
25% -0.001931 0.478618 -0.459258 -0.291966 0.147164
50% 0.003040 0.584070 -0.295580 0.031680 0.180799
75% 0.006820 0.815405 0.086832 0.376390 0.290361
max 0.022697 1.177939 0.911996 1.694172 0.434920
### Hedge a Portolio
Now that we have all the factors, let's assume we want to build a portfolio with all 20 SMI stocks that maximizes the Sharpe ratio (SR). As a further condition we want to limit our exposure to the SMB factor to, let's say, $\beta_p^{SMB} = 0$. How would we allocate our investment under these conditions? In mathematical terms we have the following optimization problem:
$$\begin{equation}
\max_{w_i} SR = \frac{\mathbb{E}[r_p] - r_f}{\sigma_p} \qquad s.t. \qquad
\begin{cases}
\sum w_i &= 1 \\
\beta_p^{SMB} &= 0
\end{cases}
\end{equation}$$
Usually, to calculate the expected return $\mathbb{E}[r_p]$, historical returns are taken. For our case here, we will take the expected returns given by our Fama-French 3 Factor model (denoted $\mathbf{R_{ff}}$). The portfolio variance $\sigma_p$ however, we estimate using historical data. Alternatively one could think of taking the SMI volatility index value as proxy. But this is only approximately true because we will not have the same weights per stock as the SMI and this thus might be a questionable. With that we have
$$\begin{equation}
\max_{w_i} SR = \frac{\mathbf{w}^T \left(r_f + \mathbf{\alpha} + \mathbf{\beta}^{M} (r_{M} - r_{f}) + \mathbf{\beta}^{SMB} SMB + \mathbf{\beta}^{HML} HML \right) - r_f}{\mathbf{w}^T \mathbf{\Sigma}\mathbf{w}} \qquad s.t. \qquad
\begin{cases}
\sum w_i &= 1 \\
\beta_p^{SMB} &= 0
\end{cases}
\end{equation}$$
Python can solve this problem numerically. We first set the stage by defining a auxiliary function `pfStats` that returns the expected portfolio return, volatility and Sharpe ratio given a vector of weights. Note that the function also makes use of other data like monthly returns and the riskfree rate as previously defined (which is again set to 0) but only weights are a function input value. This is necessary for the optimization function.
```python
# Define rf and (equally spread) start weights
rf = 0
wghts = np.repeat(1. / len(tickers), len(tickers))
# Expected stock returns based on ff3f model
expShsRets = rf + ff3f.T.Alpha + \
ff3f.T.BetaMkt * ff['Mkt-RF'].mean() + \
ff3f.T.BetaSMB * ff.SMB.mean() + \
ff3f.T.BetaHML * ff.HML.mean()
```
```python
def pfStats(weights):
'''Returns basic measures for a portfolio
Parameters
==========
weights : array-like
weights for different securities in portfolio
Returns
=======
expPfRet : float
weighted, annualized expected portfolio return based on ff3f model
pfVol : float
historical annualized portfolio volatility
SR : float
portfolio Sharpe ratio for given riskfree rate
'''
expPfRet = np.sum(weights * expShsRets) * 12
pfVol = np.sqrt(np.dot(weights.T, np.dot(shsRets.cov() * 12, weights)))
SR = (expPfRet - rf) / pfVol
return np.array([expPfRet, pfVol, SR])
```
Python's scipy package has a sublibrary for constrained optimization problems. We will use the minimize function and minimize the negative value of the Sharpe ratio (which is obviously equal to maximizing the SR)
```python
import scipy.optimize as sco
def minSR(wghts):
return -pfStats(wghts)[2]
```
Our constraints are as stated above: $\sum w_i = 1$, $\beta_P^{SMB} = 0$. Additionally we set bounds for the weights such that short/long position are allowed but only up to 100% per share ($w_i \in [-1, 1]\; \forall i \in [1, 2, \ldots, n]$).
```python
# Constraints and bounds
constr = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1},
{'type': 'eq', 'fun': lambda x: np.sum(x * ff3f.T.BetaSMB) - 0}]
bnds = tuple((-1,1) for x in range(len(tickers)))
```
```python
# Minimization function
optPf = sco.minimize(minSR, x0=wghts, method='SLSQP', bounds=bnds, constraints=constr)
```
```python
# Check if conditions are actually met
print('Sum of weights: ', np.sum(optPf['x']))
print('Beta SMB factor: ', np.sum(optPf['x'] * ff3f.T.BetaSMB))
```
Sum of weights: 1.0
Beta SMB factor: 6.940115149234316e-12
```python
# Calculate portfolio stats given optimal weights
rsltsOptPf = pfStats(optPf['x'])
# Format weights into dataframe with Tickers as heading
optWghts = pd.DataFrame(data=optPf['x'], index=tickers)
optWghts.columns = ['optimalWghts']
# Print results
print('Portfolio return: ', str(rsltsOptPf[0]))
print('Portfolio volatility: ', str(rsltsOptPf[1]))
print('Portfolio SR: ', str(rsltsOptPf[2]), '\n')
print(str(optWghts))
```
Portfolio return: 0.66978301121
Portfolio volatility: 0.202898675338
Portfolio SR: 3.30107138499
optimalWghts
ABBN -0.051382
ADEN -0.255488
BAER 0.099283
CFR 0.263565
CSGN -0.191812
GEBN 0.075917
GIVN 0.609015
LHN -0.471116
LONN 0.533256
NESN 0.232630
NOVN 0.395617
ROG -0.690954
SCMN -0.121551
SGSN -0.269585
SLHN 0.755819
SREN -0.055902
UBSG 0.076473
UHR -0.282089
ZURN -0.474419
SIK 0.822725
# Further Ressources
In writing this notebook, many ressources were consulted. For internet ressources the links are provided within the textflow above and will therefore not be listed again. Beyond these links, the following ressources were consulted and are recommended as further reading on the discussed topics:
* Blume, Marshall E., 1971, On the Assessment of Risk, *The Journal of Finance* 26, 1-10.
* Fama, Eugene F, and Kenneth R French, 1993, Common risk factors in the returns on stocks and bonds, *Journal of Financial Economics* 33, 3–56.
* Hilpisch, Yves, 2015, Python for Finance (O'Reilly Media, Sebastopol, CA).
* James, Gareth, Daniela Witten, Trevor Hastie, and Robert Tibshirani, 2013, *An Introduction to Statistical Learning: With Applications in R* (Springer Science & Business Media, New York, NY).
* Müller, Andreas C., and Sarah Guido, 2017, *Introduction to Machine Learning with Python* (O’Reilly Media, Sebastopol, CA).
* Pinto, Jerald E., Elaine Henry, Thomas R. Robinson, and John D. Stowe, 2016, *Equity Asset Valuation* (MIT Press, Cambridge, MA).
* Ross, Stephen A., et al., 1973, Return, risk and arbitrage (Rodney L. White Center for Financial Research, The Wharton School, University of Pennyslvania).
* Sheppard, Kevin, 2017, Introduction to Python for Econometrics, Statistics and Data Analysis from Website https://www.kevinsheppard.com/images/b/b3/Python_introduction-2016.pdf, 07/07/2017.
* Wooldridge, Jeffrey M, 2015, *Introductory Econometrics: A modern approach* (Cengage Learning, Boston, MA).
|
fb1a363f3ebe7431d210622f1ed073a9aa889de8
| 235,587 |
ipynb
|
Jupyter Notebook
|
0205_LinearRegression.ipynb
|
mauriciocpereira/ML_in_Finance_UZH
|
d99fa0f56b92f4f81f9bbe024de317a7949f0d38
|
[
"MIT"
] | null | null | null |
0205_LinearRegression.ipynb
|
mauriciocpereira/ML_in_Finance_UZH
|
d99fa0f56b92f4f81f9bbe024de317a7949f0d38
|
[
"MIT"
] | null | null | null |
0205_LinearRegression.ipynb
|
mauriciocpereira/ML_in_Finance_UZH
|
d99fa0f56b92f4f81f9bbe024de317a7949f0d38
|
[
"MIT"
] | null | null | null | 104.473171 | 57,242 | 0.792582 | true | 15,282 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.79053 | 0.795658 | 0.628992 |
__label__eng_Latn
| 0.853723 | 0.299689 |
```python
import sympy
from einsteinpy.symbolic import MetricTensor, ChristoffelSymbols, RiemannCurvatureTensor
import astropy.units as u
from einsteinpy import constant
from einsteinpy.utils import scalar_factor as sf
from einsteinpy.utils import scalar_factor_derivative as sfd
from einsteinpy.utils import time_velocity
sympy.init_printing()
```
```python
syms = sympy.symbols('t r theta phi')
# define the metric
k = 1.0
a = sf
t, r = syms[0], syms[1]
den = 1 - k*(syms[1]**2)
metric = [[0 for i in range(4)] for i in range(4)]
metric[0][0] = -1
metric[1][1] = sf
metric[2][2] = a
# metric[3][3] = a**2 * r**2 * sin([syms[2]]**2)
# m_obj = MetricTensor(metric, syms)
# m_obj.tensor()
```
```python
```
```python
```
|
d4356a2ed46c1d8139dc8c08e0f8b04254169f62
| 3,874 |
ipynb
|
Jupyter Notebook
|
Friedman-Robertson-Walker Spacetime.ipynb
|
SheepWaitForWolf/General-Relativity
|
e9eb0f8cc65be9368c6648c8afaa1f8e631516a8
|
[
"MIT"
] | 1 |
2021-06-04T11:01:54.000Z
|
2021-06-04T11:01:54.000Z
|
Friedman-Robertson-Walker Spacetime.ipynb
|
SheepWaitForWolf/General-Relativity
|
e9eb0f8cc65be9368c6648c8afaa1f8e631516a8
|
[
"MIT"
] | null | null | null |
Friedman-Robertson-Walker Spacetime.ipynb
|
SheepWaitForWolf/General-Relativity
|
e9eb0f8cc65be9368c6648c8afaa1f8e631516a8
|
[
"MIT"
] | null | null | null | 41.655914 | 1,584 | 0.603511 | true | 245 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.887205 | 0.689306 | 0.611555 |
__label__eng_Latn
| 0.540223 | 0.259178 |
```
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sps
```
# Dataset, binary data and continuous data
```
def digit_basis(geometry):
num_bit = np.prod(geometry)
M = 2**num_bit
x = np.arange(M)
return x
def binary_basis(geometry):
num_bit = np.prod(geometry)
M = 2**num_bit
x = np.arange(M)
return unpacknbits(x[:,None], num_bit).reshape((-1,)+geometry)
def unpacknbits(arr, nbit, axis=-1):
'''unpack numbers to bitstrings.'''
nd = np.ndim(arr)
if axis < 0:
axis = nd + axis
return (((arr & (1 << np.arange(nbit - 1, -1, -1)).reshape([-1] + [1] * (nd - axis - 1)))) > 0).astype('int8')
def packnbits(arr, axis=-1):
'''pack bitstrings to numbers.'''
nd = np.ndim(arr)
nbit = np.shape(arr)[axis]
if axis < 0:
axis = nd + axis
return (arr * (1 << np.arange(nbit - 1, -1, -1)).reshape([-1] + [1] * (nd - axis - 1))\
).sum(axis=axis, keepdims=True).astype('int')
```
```
def gaussian_pdf(geometry, mu, sigma):
'''get gaussian distribution function'''
x = digit_basis(geometry)
pl = 1. / np.sqrt(2 * np.pi * sigma**2) * \
np.exp(-(x - mu)**2 / (2. * sigma**2))
return pl/pl.sum()
```
```
def barstripe_pdf(geometry):
'''get bar and stripes PDF'''
x = binary_basis(geometry)
pl = is_bs(x)
return pl/pl.sum()
def is_bs(samples):
'''a sample is a bar or a stripe.'''
return (np.abs(np.diff(samples,axis=-1)).sum(axis=(1,2))==0\
)|((np.abs(np.diff(samples, axis=1)).sum(axis=(1,2)))==0)
```
```
num_bit = 9
pl1 = gaussian_pdf((num_bit,), mu=2**(num_bit-1)-0.5, sigma=2**(num_bit-2))
plt.plot(pl1)
plt.show()
```
```
geometry = (3,3)
pl2 = barstripe_pdf(geometry)
plt.plot(pl2)
# show ticks
basis = binary_basis(geometry)
plt.xticks([0, 511], [basis[0], basis[511]])
plt.show()
# generate samples
samples = basis[pl2>1e-2]
# show bar and stripes
def plot_bs(samples, size):
plt.figure(facecolor='#777777')
gs = plt.GridSpec(*size)
for i in range(size[0]):
for j in range(size[1]):
if i*size[1]+j == len(samples): break
plt.subplot(gs[i,j]).imshow(samples[i*size[1]+j], vmin=0, vmax=1)
plt.axis('equal')
plt.axis('off')
plt.show()
size = (4, 4)
plot_bs(samples, size)
```
# Kernel Two Sample Test
## Kernel Method
Given a vector $x$ in $n$-dimensional space, we perform the mapping to reproducing kernel Hilbert space (RKHS) $x\rightarrow \phi(x)$.
Define the inner product as $\phi(x)^T\phi = K(x,x')$, this is the kernel function.
Good let's see a simple example of polynomial kernel function $K(x,y)=(x^T y)^2=\sum_{ij} x_i y_i x_j y_j$
The mapping should be $\phi(x)=x\otimes x$.
#### Ref
Hofmann, T., Scholkopf, B., & Smola, A. J. (2008). Kernel methods in machine learning. Annals of Statistics, 36(3), 1171–1220. https://doi.org/10.1214/009053607000000677
```
# define the kernel and mapping
K = lambda x, y: x.dot(y)**2
phi = lambda x: (x[:,None]*x).ravel()
# randomly generate some vectors
num_sample = 10
num_dim = 100
x = np.random.randn(num_sample, num_dim)
print("Size of x = %d"%x[0].size)
print("Size of phi(x) = %d"%phi(x[0]).size)
kmat = np.zeros([num_sample, num_sample])
for i in range(num_sample):
for j in range(num_sample):
kmat[i, j] = K(x[i], x[j])
np.testing.assert_almost_equal(kmat[i, j], phi(x[i]).dot(phi(x[j])))
```
Size of x = 100
Size of phi(x) = 10000
## Properties
* K is ususally chosen as **positive definite**, the Moore–Aronszajn theorem states that, for every positive definite kernel, there exists a unique RKHS and vice versa.
* sometimes RKHS can be $\infty$-dimensional, e.g. RBF kernel $K(x,y) = \exp(\frac{\|x-y\|}{2\sigma})$
```
# get all eigen values
print(np.linalg.eigvalsh(kmat), "are all positive!")
```
[ 6705.47440995 8237.20293944 9835.08782136 10220.3915002
11378.71166636 11835.40403696 13443.38452008 13744.29091067
15806.48534292 16826.94894898] are all positive!
## Kernel Two Sample Test (MMD)
Defined as the distance in RKHS between two mean embeddings
\begin{align}
\mathcal{L} =& \left\|\sum_{x} p_\theta(x) \phi(x)- \sum_{x} \pi(x) \phi(x) \right\|^2 \\
=&\langle K(x,y)\rangle_{x\sim p_\theta, y\sim p_\theta}-2\langle K(x,y)\rangle_{x\sim p_\theta,y\sim \pi}+\langle K(x, y)\rangle_{x\sim \pi,y\sim \pi}
\end{align}
#### Ref
Gretton, A. (2012). A Kernel Two-Sample Test. Journal of Machine Learning Research, 13, 723–773. Retrieved from http://kyb.tuebingen.mpg.de/publications/attachments/NIPS2009-Gretton_[0].pdf
## Probability embedding
We next extend the notion of feature map to the embedding of a probability distribution: we will define an element $\mu(p)\in \mathcal H$ such that $E_x(f) = f^T \mu_p$ for all $f \in \mathcal H$.
**Theorem 5**
Let $\mathcal F$ be a unit ball in a **universal** RKHS $\mathcal{H}$, defined on the compact metric space $\mathcal X$, with associated continuous kernel $k(·, ·)$. Then MMD$[\mathcal F, p,q]=0$ if and only if $p=q$.
* universal RKHS: By the universality of $\mathcal H$, for any given $\epsilon > 0$ and $f \in C(X)$ (bounded continuous function) there exists a $g \in \mathcal H$ such that $\|f-g\|_\infty\leq \epsilon$. e.g. Gaussian and Laplace kernels (I. Steinwart. On the influence of the kernel on the consistency of support vector machines. Journal of Machine Learning Research, 2:67–93, 2001.).
#### Ref
A.Berlinet and C. Thomas-Agnan. Reproducing KernelHilbert Spaces in Probability and Statistics. Kluwer, 2004. Chapter 4.
```
class RBFMMD2(object):
def __init__(self, sigma_list, basis):
self.sigma_list = sigma_list
self.basis = basis
self.K = mix_rbf_kernel(self.basis, self.basis, self.sigma_list)
def __call__(self, px, py):
'''
Args:
px (1darray, default=None): probability for data set x, used only when self.is_exact==True.
py (1darray, default=None): same as px, but for data set y.
Returns:
float, loss.
'''
pxy = px-py
return self.kernel_expect(pxy, pxy)
def kernel_expect(self, px, py):
return px.dot(self.K.dot(py))
def witness(self, px, py):
'''witness function of this kernel.'''
return self.K.dot(px-py)
def mix_rbf_kernel(x, y, sigma_list):
ndim = x.ndim
if ndim == 1:
exponent = np.abs(x[:, None] - y[None, :])**2
elif ndim == 2:
exponent = ((x[:, None, :] - y[None, :, :])**2).sum(axis=2)
else:
raise
K = 0.0
for sigma in sigma_list:
gamma = 1.0 / (2 * sigma)
K = K + np.exp(-gamma * exponent)
return K
```
### Witness Function
To see how sensitive the MMD loss to the probability difference defined as $\delta (x) = p(x)-\pi(x)$. We define
$$W_\pi(x)\propto \langle \phi(x), \mu_p-\mu_\pi \rangle_{\rm RKHS}=E_y[K(y\sim p,x)]-E_y[K(y\sim\pi, x)]$$
So that $\mathcal{L}=\delta (x)^TW_\pi(x)$.
Give a target empirical distribution $\hat{\pi}(x) = \sum\limits_{d_i \in \mathcal{D}} \delta(x-d_i)$, see witness function.
```
basis = digit_basis(num_bit)
mmd = RBFMMD2([20], basis)
print("If p = q, then mmd = %.4f"%mmd(pl1, pl1))
print("If p != q, then mmd = %.4f"%mmd(pl1, pl2))
def show_witness(p, pi):
wit = mmd.witness(p, pi)
plt.plot(basis, np.transpose([pi, p, wit/20]))
plt.legend(['$\pi$', 'p', 'witness'])
plt.ylim(-0.008,0.008)
plt.show()
def _empirical(pi, num_sample):
'''turn a distribution function to empirical distribution function.'''
samples = np.random.choice(np.arange(2**num_bit), (num_sample,), p=pl1)
px = np.bincount(samples, minlength=2**num_bit)
px = px / float(np.sum(px))
return px
# witness function using band-width 20
show_witness(pl1, pl2)
# witness function using empirical (spiky) distribution
pl1_ = _empirical(pl1, 100)
show_witness(pl2, pl1_)
# witness function using narrower bandwidth
mmd = RBFMMD2([1], basis)
show_witness(pl1, pl2)
```
# Building Differentiable Circuits
<div>
$$
\newcommand{\dataset}{{\mathcal{D}}}
\newcommand{\wfunc}{{\psi}}
\newcommand{\thetav}{{\boldsymbol{\theta}}}
\newcommand{\gammav}{{\boldsymbol{\gamma}}}
\newcommand{\thetai}{{\theta^\alpha_l}}
\newcommand{\Expect}{{\mathbb{E}}}
\newcommand{\etc}{{\it etc~}}
\newcommand{\etal}{{\it etal~}}
\newcommand{\xset}{\mathbf{X}}
\newcommand{\gammaset}{\boldsymbol{\Gamma}}
\newcommand{\ei}{\mathbf{e}_l^\alpha}
\newcommand{\sigmag}{{\nu}}
\newcommand{\BAS}{Bars-and-Stripes}
\newcommand{\qexpect}[1]{{\left\langle #1\right\rangle}}
\newcommand{\expect}[2]{{\mathop{\mathbb{E}}\limits_{\substack{#2}}\left[#1\right]}}
\newcommand{\pshift}[1]{{p_{\thetav+#1}}}
$$
</div>
### For an Obserable
Consider the expect of $B$ on state <span>$\vert\psi_N\rangle = U_{N:k+1} U_k(\eta)U_{k-1:1}\vert\psi_0\rangle$</span> with $U_k(\eta)=e^{i\Xi(\eta)}$,
The first task is to get the derivation of the graident of an expectation value that appear in Ref. [2-4] briefly, the gradient
<div>$$\begin{align}\frac{\partial \langle B\rangle_\eta}{\partial \eta} &=i\langle \psi_0\vert U_{N:1}^\dagger BU_{N:k+1} \frac{\partial \Xi(\eta)}{\partial \eta} U_{k:1}\vert \psi_0\rangle-i\langle \psi_0\vert U_{k:1}^\dagger \frac{\partial \Xi(\eta)}{\partial \eta} U_{N:k+1}^\dagger BU_{N:1}\vert \psi_0\rangle\end{align}$$</div>
Here, we have used the fact that $\Xi(\eta)$ is Hermitian. Define $O_{k+1}\equiv U_{N:k+1}^\dagger BU_{N:k+1}$ and $\vert \psi_{k}\rangle\equiv U_{k:1}\vert \psi_0\rangle$, we have
<div>$$\begin{equation}\frac{\partial \langle B\rangle_\eta}{\partial \eta} =\langle\psi_{k}\vert i\left[O_{k+1}, \frac{\partial \Xi(\eta)}{\partial \eta}\right]\vert \psi_{k}\rangle.\end{equation}$$</div>
Define $A_\pm\equiv\frac{1}{\sqrt{2}} (1\pm i\frac{\partial \Xi(\eta)}{\partial \eta})$, we can easily verify that $ i\left[O_{k+1}, \frac{\partial \Xi(\eta)}{\partial \eta}\right]= A^\dagger_+ O_{k+1}A_+-A_-^\dagger O_{k+1}A_-$,
which can be estimated unbiasedly by constructing $\vert \psi_N\rangle_\pm = U_{N:k+1}A_\pm U_{k:1}\vert \psi_0\rangle$.
Noticing for a non-dissipative system, we further require $A$ being unitary, which means $\frac{\partial \Xi(\eta)}{\partial \eta}^2=1$ (e.g. Pauli operators, CNOT and SWAP).
```
###### Pauli Matrices ########
I2 = sps.eye(2).tocsr()
sx = sps.csr_matrix([[0,1],[1,0.]])
sy = sps.csr_matrix([[0,-1j],[1j,0.]])
sz = sps.csr_matrix([[1,0],[0,-1.]])
p0 = (sz + I2) / 2
p1 = (-sz + I2) / 2
h = (sx + sz) / np.sqrt(2.)
sxyz = [I2, sx, sy, sz]
# single bit rotation matrices
def _ri(si, theta):
return np.cos(theta/2.)*I2 - 1j*np.sin(theta/2.)*si
def rx(theta):
return _ri(sx, theta)
def ry(theta):
return _ri(sy, theta)
def rz(theta):
return _ri(sz, theta)
def rot(t1, t2, t3):
'''
a general rotation gate rz(t3)rx(r2)rz(t1).
Args:
t1, t2, t3 (float): three angles.
Returns:
2x2 csr_matrix: rotation matrix.
'''
return rz(t3).dot(rx(t2)).dot(rz(t1))
# multiple bit construction
def CNOT(ibit, jbit, n):
res = _([p0, I2], [ibit, jbit], n)
res = res + _([p1, sx], [ibit, jbit], n)
return res
def _(ops, locs, n):
'''
Put operators in a circuit and compile them.
notice the big end are high loc bits!
Args:
ops (list): list of single bit operators.
locs (list): list of positions.
n (int): total number of bits.
Returns:
csr_matrix: resulting matrix.
'''
if np.ndim(locs) == 0:
locs = [locs]
if not isinstance(ops, (list, tuple)):
ops = [ops]
locs = np.asarray(locs)
locs = n - locs
order = np.argsort(locs)
locs = np.concatenate([[0], locs[order], [n + 1]])
return _wrap_identity([ops[i] for i in order], np.diff(locs) - 1)
def _wrap_identity(data_list, num_bit_list):
if len(num_bit_list) != len(data_list) + 1:
raise Exception()
res = sps.eye(2**num_bit_list[0])
for data, nbit in zip(data_list, num_bit_list[1:]):
res = sps.kron(res, data)
res = sps.kron(res, sps.eye(2**nbit, dtype='complex128'))
return res
def initial_wf(num_bit):
wf = np.zeros(2**num_bit, dtype='complex128')
wf[0] = 1.
return wf
```
```
class ArbitraryRotation(object):
def __init__(self, num_bit):
self.num_bit = num_bit
self.mask = np.array([True] * (3*num_bit), dtype='bool')
def __str__(self):
return 'Rotate[%d]'%(self.num_param)
@property
def num_param(self):
return self.mask.sum()
def tocsr(self, theta_list):
'''transform this block to csr_matrix.'''
theta_list_ = np.zeros(3*self.num_bit)
theta_list_[self.mask] = theta_list
rots = [rot(*ths) for ths in theta_list_.reshape([self.num_bit,3])]
res = [_([r], [i], self.num_bit) for i,r in enumerate(rots)]
return res
class CNOTEntangler(object):
def __init__(self, num_bit, pairs):
self.num_bit = num_bit
self.pairs = pairs
def __str__(self):
pair_str = ','.join(['%d-%d'%(i,j) for i,j in self.pairs])
return 'CNOT(%s)'%(pair_str)
@property
def num_param(self):
return 0
def tocsr(self, theta_list):
'''transform this block to csr_matrix.'''
i, j = self.pairs[0]
res = CNOT(i, j, self.num_bit)
for i, j in self.pairs[1:]:
res = CNOT(i,j,self.num_bit).dot(res)
res.eliminate_zeros()
return [res]
```
```
class BlockQueue(list):
'''
Block Queue that keep track of theta_list changing history, for fast update.
'''
def __init__(self, *args):
list.__init__(self, *args)
self.theta_last = None
self.memo = None
def __call__(self, qureg, theta_list):
# cache? if theta_list change <= 1 parameters, then don't touch memory.
remember = self.theta_last is None or (abs(self.theta_last-theta_list)>1e-12).sum() > 1
mats = []
theta_last = self.theta_last
if remember:
self.theta_last = theta_list.copy()
qureg_ = qureg
for iblock, block in enumerate(self):
# generate or use a block matrix
num_param = block.num_param
theta_i, theta_list = np.split(theta_list, [num_param])
if theta_last is not None:
theta_o, theta_last = np.split(theta_last, [num_param])
if self.memo is not None and (num_param==0 or np.abs(theta_i-theta_o).max()<1e-12):
# use data cached in memory
mat = self.memo[iblock]
else:
if self.memo is not None and not remember:
# update the changed gate, but not touching memory.
mat = _rot_tocsr_update1(block, self.memo[iblock], theta_o, theta_i)
else:
# regenerate one
mat = block.tocsr(theta_i)
for mat_i in mat:
qureg_ = mat_i.dot(qureg_)
mats.append(mat)
if remember:
# cache data
self.memo = mats
# update register
qureg[...] = qureg_
@property
def num_bit(self):
return self[0].num_bit
@property
def num_param(self):
return sum([b.num_param for b in self])
def __str__(self):
return '\n'.join([str(b) for b in self])
def _rot_tocsr_update1(layer, old, theta_old, theta_new):
'''
rotation layer csr_matrix update method.
Args:
layer (ArbitraryRotation): rotatio layer.
old (csr_matrix): old matrices.
theta_old (1darray): old parameters.
theta_new (1darray): new parameters.
Returns:
csr_matrix: new rotation matrices after the theta changed.
'''
idiff_param = np.where(abs(theta_old-theta_new)>1e-12)[0].item()
idiff = np.where(layer.mask)[0][idiff_param]
# get rotation parameters
isite = idiff//3
theta_list_ = np.zeros(3*layer.num_bit)
theta_list_[layer.mask] = theta_new
new = old[:]
new[isite] = _(rot(*theta_list_[isite*3:isite*3+3]), isite, layer.num_bit)
return new
```
```
def get_nn_pairs(geometry):
'''define pairs that cnot gates will apply.'''
num_bit = np.prod(geometry)
if len(geometry) == 2:
nrow, ncol = geometry
res = []
for ij in range(num_bit):
i, j = ij // ncol, ij % ncol
res.extend([(ij, i_ * ncol + j_)
for i_, j_ in [((i + 1) % nrow, j), (i, (j + 1) % ncol)]])
return res
elif len(geometry) == 1:
res = []
for inth in range(2):
for i in range(inth, num_bit, 2):
res = res + [(i, i_ % num_bit) for i_ in range(i + 1, i + 2)]
return res
else:
raise NotImplementedError('')
```
```
def get_diff_circuit(num_bit, depth, pairs):
'''
build a differentiable circuit
Args:
num_bit (int): number of qubit.
depth (int): depth of circuit.
pairs (list): list of tuples that represent where CNOT gates applied.
'''
blocks = []
# build circuit
for idepth in range(depth+1):
blocks.append(ArbitraryRotation(num_bit))
if idepth!=depth:
blocks.append(CNOTEntangler(num_bit, pairs))
# set leading and trailing Rz to disabled
blocks[0].mask[::3] = False
blocks[-1].mask[2::3] = False
return BlockQueue(blocks)
```
```
depth = 2
geometry = (4,)
num_bit = np.prod(geometry)
pairs = get_nn_pairs(geometry)
circuit = get_diff_circuit(num_bit, depth, pairs)
print(circuit)
```
Rotate[8]
CNOT(0-1,2-3,1-2,3-0)
Rotate[12]
CNOT(0-1,2-3,1-2,3-0)
Rotate[8]
```
theta_list = np.random.random(circuit.num_param)*np.pi*2
wf = initial_wf(num_bit)
circuit(wf, theta_list)
print(wf)
```
[ 0.00056575+0.09888234j 0.19134579-0.30111828j -0.1689071 +0.31456431j
-0.03176943+0.12489008j -0.0963301 +0.2707479j -0.10215699-0.2329753j
-0.25322989+0.02847686j -0.21620301-0.08289819j 0.1910678 -0.08112196j
-0.12659273-0.319008j 0.16075672+0.14750737j -0.18150813+0.20632003j
-0.17513398-0.16878919j -0.04635411-0.27225991j 0.14554747-0.00530493j
-0.06474349+0.0903669j ]
```
# construct an observable
observable = np.random.randn(*[2**num_bit]*2)+1j*np.random.randn(*[2**num_bit]*2)
observable += observable.T.conj()
def expect_val(theta_list):
wf = initial_wf(num_bit)
circuit(wf, theta_list)
return wf.conj().dot(observable.dot(wf))
def gradient_numerical(theta_list, delta=1e-2):
'''
numerical differenciation.
'''
grad = []
for i in range(len(theta_list)):
theta_list[i] += delta/2.
loss_pos = expect_val(theta_list)
theta_list[i] -= delta
loss_neg = expect_val(theta_list)
theta_list[i] += delta/2.
grad_i = (loss_pos - loss_neg)/delta
grad.append(grad_i)
return np.array(grad)
def gradient(theta_list):
'''
cheat and get gradient.
'''
grad = []
for i in range(len(theta_list)):
# pi/2 phase
theta_list[i] += np.pi/2.
mean_pos = expect_val(theta_list)
# -pi/2 phase
theta_list[i] -= np.pi
mean_neg = expect_val(theta_list)
# recover
theta_list[i] += np.pi/2.
grad.append((mean_pos - mean_neg)/2.)
return np.array(grad)
g1 = gradient(theta_list)
print("Exact = ", g1)
g2 = gradient_numerical(theta_list)
print("Diff = ", g1 - g2)
```
Exact = [-0.89463303-1.38777878e-16j -0.11217499-1.11022302e-16j
-1.79280291-5.55111512e-17j -0.5864455 -1.11022302e-16j
1.26719505-1.52655666e-16j -0.09476827-3.88578059e-16j
0.72094707+1.45716772e-16j 1.93379526+5.55111512e-17j
-2.4973808 -2.77555756e-17j 0.35375861+2.49800181e-16j
-1.48451599+5.55111512e-17j -0.68485904-1.11022302e-16j
0.690804 -5.55111512e-17j -0.59058008-5.55111512e-17j
0.44002793-1.11022302e-16j -2.35894687+4.16333634e-17j
0.59581721+4.16333634e-17j -2.59120551-1.66533454e-16j
0.05863847+5.55111512e-17j -2.62490017+0.00000000e+00j
-0.22833212+4.16333634e-17j 0.56197748-1.11022302e-16j
-1.62767971-5.55111512e-17j -0.82558004+1.38777878e-16j
-0.99766406-8.32667268e-17j 0.67353332-2.63677968e-16j
-0.13788348+9.02056208e-17j 2.27469931+1.11022302e-16j]
Diff = [-3.72763313e-06+1.65145675e-14j -4.67395367e-07-1.95399252e-14j
-7.47000269e-06-3.05866443e-14j -2.44351980e-06-1.39888101e-14j
5.27997280e-06-1.52655666e-16j -3.94867201e-07+2.38697950e-15j
3.00394229e-06+8.47238946e-15j 8.05747028e-06+2.22599716e-14j
-1.04057402e-05-8.35442826e-15j 1.47399244e-06-3.02813330e-14j
-6.18547567e-06+2.78110868e-14j -2.85357564e-06-1.95399252e-14j
2.87834626e-06+4.99045250e-14j -2.46074718e-06+4.43534098e-14j
1.83344727e-06-2.23154828e-14j -9.82893314e-06-1.93872696e-14j
2.48256844e-06-2.73392420e-15j -1.07966763e-05+2.20379270e-14j
2.44326573e-07-2.72004641e-15j -1.09370703e-05-6.38378239e-14j
-9.51382633e-07+3.88994392e-14j 2.34156982e-06-1.12132525e-14j
-6.78199028e-06+3.60267371e-14j -3.43991266e-06-2.63677968e-15j
-4.15692840e-06+2.21211938e-14j 2.80638530e-06-1.41414658e-14j
-5.74513745e-07+1.95191086e-14j 9.47790194e-06+1.39888101e-14j]
### For statistic functional (MMD)
Next, we describe a new class of differenciable loss which can not be written as an obserable easily, the statistic functionals, for simplicity, we consider an arbitrary statistic functional $f(\xset)$, with a sequence of bit strings $\xset\equiv\{x_1,x_2,\ldots, x_r\}$ as its arguments.
Let's define the following expectation of this function
<div>$$\begin{equation}\Expect_f(\gammaset)\equiv\expect{f(\xset)}{\{x_i\sim \pshift{\gammav_i}\}_{i=1}^{r}}. \end{equation}$$</div>
Here, $\gammaset=\{\gammav_1, \gammav_2,\ldots,\gammav_r\}$ is the offset angles applied to circuit parameters,
%Its element $\gammav_i$ is defined in the same parameter space as $\thetav$ that represents a shift to $\thetav$.
which means the probability distributions of generated samples is
$\{\pshift{\gammav_1}, \pshift{\gammav_2},\ldots ,\pshift{\gammav_r}\}$.
Writing out the above expectation explicitly, we have
<div>$$\begin{equation}\Expect_f(\gammaset)=\sum\limits_\xset f(\xset)\prod\limits_i \pshift{\gammav_i}(x_i),\end{equation}$$</div>
where index $i$ runs from $1$ to $r$. Its partial derivative with respect to $\thetai$ is
<div>$$\begin{equation}\frac{\partial \Expect_f(\gammaset)}{\partial \thetai}=\sum\limits_\xset f(\xset)\sum\limits_j\frac{\partial \pshift{\gammav_j}(x_j)}{\partial\thetai}\prod\limits_{i\neq j} \pshift{\gammav_i}(x_i)\end{equation}$$</div>
Again, using the gradient of probability, we have
<div>$$\begin{align}\frac{\partial \Expect_f(\gammaset)}{\partial \thetai}&=\frac{1}{2}\sum\limits_{j,s=\pm}\sum\limits_\xset f(\xset){\pshift{\gammav_j+s\frac{\pi}{2}\ei}(x_j)}\prod\limits_{i\neq j} \pshift{\gammav_i}(x_i)\\&=\frac{1}{2}\sum\limits_{j,s=\pm}\Expect_f(\{\gammav_i+s\delta_{ij}\frac{\pi}{2}\ei\}_{i=1}^{r})\end{align}$$</div>
If $f$ is symmetric, $\Expect_f(\mathbf{0})$ becomes a V-statistic, then the gradient can be further simplified to
<div>$$\begin{align}\frac{\partial \Expect_f(\gammaset)}{\partial \thetai}=\frac{r}{2}\sum\limits_{s=\pm}\Expect_f\left(\{\gammav_0+s\frac{\pi}{2}\ei,\gammav_1,\ldots,\gammav_r\}\right),\end{align}$$</div>
which contains only two terms. This result can be readily verified by calculating the gradient of MMD loss,
noticing the expectation of a kernel function is a V-statistic of degree $2$.
By repeatedly applying the gradient formula, we will be able to obtain higher order gradients.
# Build The Gradient training framework for Born Machine
```
class QCBM(object):
'''
Quantum Circuit Born Machine,
Args:
circuit (BlockQueue): the circuit architechture.
batch_size (int|None): introducing sampling error, None for no sampling error.
'''
def __init__(self, circuit, mmd, p_data, batch_size=None):
self.circuit = circuit
self.mmd = mmd
self.p_data = p_data
self.batch_size = batch_size
@property
def depth(self):
return (len(self.circuit)-1)//2
def pdf(self, theta_list):
'''get probability distribution function'''
wf = initial_wf(self.circuit.num_bit)
self.circuit(wf, theta_list)
pl = np.abs(wf)**2
# introducing sampling error
if self.batch_size is not None:
pl = prob_from_sample(sample_from_prob(np.arange(len(pl)), pl, self.batch_size),
len(pl), False)
return pl
def mmd_loss(self, theta_list):
'''get the loss'''
# get and cahe probability distritbution of Born Machine
self._prob = self.pdf(theta_list)
# use wave function to get mmd loss
return self.mmd(self._prob, self.p_data)
def gradient(self, theta_list):
'''
cheat and get gradient.
'''
prob = self.pdf(theta_list)
grad = []
for i in range(len(theta_list)):
# pi/2 phase
theta_list[i] += np.pi/2.
prob_pos = self.pdf(theta_list)
# -pi/2 phase
theta_list[i] -= np.pi
prob_neg = self.pdf(theta_list)
# recover
theta_list[i] += np.pi/2.
grad_pos = self.mmd.kernel_expect(prob, prob_pos) - self.mmd.kernel_expect(prob, prob_neg)
grad_neg = self.mmd.kernel_expect(self.p_data, prob_pos) - self.mmd.kernel_expect(self.p_data, prob_neg)
grad.append(grad_pos - grad_neg)
return np.array(grad)
def gradient_numerical(self, theta_list, delta=1e-2):
'''
numerical differenciation.
'''
grad = []
for i in range(len(theta_list)):
theta_list[i] += delta/2.
loss_pos = self.mmd_loss(theta_list)
theta_list[i] -= delta
loss_neg = self.mmd_loss(theta_list)
theta_list[i] += delta/2.
grad_i = (loss_pos - loss_neg)/delta
grad.append(grad_i)
return np.array(grad)
def sample_from_prob(x, pl, num_sample):
'''
sample x from probability.
'''
pl = 1. / pl.sum() * pl
indices = np.arange(len(x))
res = np.random.choice(indices, num_sample, p=pl)
return np.array([x[r] for r in res])
def prob_from_sample(dataset, hndim, packbits):
'''
emperical probability from data.
'''
if packbits:
dataset = packnbits(dataset).ravel()
p_data = np.bincount(dataset, minlength=hndim)
p_data = p_data / float(np.sum(p_data))
return p_data
```
```
def load_gaussian(num_bit, depth, batch_size=None):
'''gaussian distribution.'''
geometry = (num_bit,)
hndim = 2**num_bit
# standard circuit
pairs = get_nn_pairs(geometry)
circuit = get_diff_circuit(num_bit, depth, pairs)
# bar and stripe
p_bs = gaussian_pdf(geometry, mu=hndim/2., sigma=hndim/4.)
# mmd loss
mmd = RBFMMD2(sigma_list=[0.25,4], basis=digit_basis(geometry))
# Born Machine
bm = QCBM(circuit, mmd, p_bs, batch_size=batch_size)
return bm
```
```
bm = load_gaussian(6, depth)
theta_list = np.random.random(bm.circuit.num_param)*2*np.pi
print(bm.mmd_loss(theta_list))
```
0.03315822936156594
```
g1 = bm.gradient(theta_list)
print(g1)
g2 = bm.gradient_numerical(theta_list)
print(g1-g2)
```
[ 5.42728081e-03 9.10485946e-03 -1.30299761e-02 -7.05065456e-05
4.31816240e-03 -4.79203832e-05 3.56761090e-03 3.84413542e-03
-4.35883820e-03 1.38246431e-03 3.58551616e-03 4.92344825e-03
2.20604531e-03 -5.04621581e-04 1.39651786e-02 1.90459756e-03
-3.13879420e-03 -1.11116678e-02 -2.89498314e-03 2.55574896e-03
-9.70312153e-04 -3.34279719e-03 -1.60688966e-02 2.08389163e-03
8.82359256e-03 -9.58209336e-03 2.78738079e-03 -2.52477728e-03
-5.77484406e-03 -3.89837130e-03 -3.46751009e-03 2.82087082e-03
2.96495644e-04 -6.05037633e-03 -8.88916704e-05 -7.05716254e-03
1.12644988e-03 1.52902085e-04 8.85146949e-03 5.39032545e-03
-1.66131241e-03 7.14929170e-04]
[ 1.04943764e-07 2.65206660e-07 -1.97721522e-07 -8.47951755e-09
8.58076832e-08 -4.73371911e-10 8.58643784e-08 3.15749608e-08
-1.46184334e-07 -5.40492179e-10 1.63113497e-07 5.62470692e-08
-1.53586267e-08 -3.59366711e-08 1.50532489e-07 1.49675920e-07
-1.19718795e-07 1.23396361e-07 -1.65125155e-07 9.87286134e-08
-1.76287260e-09 -3.05225820e-08 -1.53085829e-07 2.56574020e-08
6.25544895e-08 -1.63353482e-07 7.40252018e-08 -4.93648129e-08
2.41868066e-08 -4.79960755e-08 -4.39826612e-08 5.15779414e-08
1.70194484e-10 -8.99080903e-08 1.81158089e-10 -1.31713408e-07
2.71245671e-08 9.93821506e-08 2.71624440e-08 1.51837504e-09
-1.17952479e-07 1.17750322e-07]
# Different Training Strategies
```
from scipy.optimize import OptimizeResult
def spsa(fun, x0, args=(), bounds=None, ac=(0.2, 0.5), alpha=0.602,
A=None, gamma=0.101, maxiter=5000, callback=None):
'''
simultaneous perturbation stochastic approximation.
Spall, J. C. (1998).
Implementation of the simultaneous perturbation algorithm for stochastic optimization.
IEEE Transactions on Aerospace and Electronic Systems.
https://doi.org/10.1109/7.705889
Args:
fun (func): loss function.
x0 (ndarray): initial variables.
args (tuple, default=()): additional input parameters for func.
bounds (tuple|None, default=None): lower bound and higher bound for variables, None for no bounds.
ac (tuple, default=(0.2,0.5)): initial learning rate and initial perturbation stength.
A (number, default=0.1*maxiter): statbility constant.
alpha (float, default=0.602): decay rate for learning speed.
gamma (float, default=0.101): decay rate for perturbation strength.
maxiter (int, default=5000): maximum number of iteration.
callback (func): func(iiter, x) called after each update, with `iiter` the iteration step and `x` the value of variables.
Note:
The choice of parameters,
* `alpha` and `gamma` have thoretically valid default values 0.602 and 0.101.
* in hight noise setting, pick smaller `a` and larger `c`.
'''
if A is None:
A = 0.1 * maxiter
a, c = ac
p = len(x0)
for k in range(maxiter):
ak = a / (k + 1 + A)**alpha
ck = c / (k + 1)**gamma
g = _get_g(fun, x0, args, ck, return_hessian=False)
x0 -= ak * g
if bounds is not None:
np.clip(x0, *bounds, out=x0)
if callback is not None:
callback(x0)
return OptimizeResult(x=x0, fun=fun(x0, *args), success=True)
def _get_g(fun, x0, args, ck, return_hessian):
'''calculate gradient'''
p = len(x0)
delta = (2 * np.random.randint(0, 2, p) - 1) * ck
xpos, xneg = x0 + delta, x0 - delta
fpos, fneg = fun(xpos, *args), fun(xneg, *args)
g = (fpos - fneg) / (2 * delta)
if return_hessian:
delta1 = (2 * np.random.randint(0, 2, p) - 1) * ck
fneg_ = fun(xneg + delta1, *args)
fpos_ = fun(xpos + delta1, *args)
g1n = (fneg_ - fneg) / delta1
g1p = (fpos_ - fpos) / delta1
hessian = (g1p - g1n) / 4. / delta[:, None]
hessian += hessian.T
return g, hessian
return g
```
```
def train(bm, theta_list, method, max_iter=1000, popsize=50, step_rate=0.1):
'''train a Born Machine.'''
step = [0]
def callback(x, *args, **kwargs):
step[0] += 1
if step[0]%(max_iter//10) == 1:
print('step = %d, loss = %s'%(step[0], bm.mmd_loss(x)))
theta_list = np.array(theta_list)
if method == 'SPSA':
res = spsa(bm.mmd_loss, x0=theta_list,
maxiter=max_iter, callback=callback,
)
return res.fun, res.x
elif method == 'Adam':
try:
from climin import Adam
except:
!pip install git+https://github.com/BRML/climin.git
from climin import Adam
optimizer = Adam(wrt=theta_list, fprime=bm.gradient,step_rate=step_rate)
for info in optimizer:
callback(theta_list)
if step[0] == max_iter:
break
return bm.mmd_loss(theta_list), theta_list
else:
from scipy.optimize import minimize
res = minimize(bm.mmd_loss, x0=theta_list,
method=method, jac = bm.gradient, tol=1e-12,
options={'maxiter': max_iter, 'disp': 0, 'gtol':1e-10, 'ftol':0},
callback=callback,
)
return res.fun, res.x
```
```
depth = 6
np.random.seed(2)
bm = load_gaussian(6, depth)
theta_list = np.random.random(bm.circuit.num_param)*2*np.pi
loss, theta_list = train(bm, theta_list, 'L-BFGS-B', max_iter=20)
pl = bm.pdf(theta_list)
# display
plt.plot(bm.p_data)
plt.plot(pl)
plt.legend(['Data', 'Gradient Born Machine'])
plt.show()
```
```
def load_barstripe(geometry, depth):
'''3 x 3 bar and stripes.'''
num_bit = np.prod(geometry)
# standard circuit
pairs = get_nn_pairs(geometry)
circuit = get_diff_circuit(num_bit, depth, pairs)
# bar and stripe
p_bs = barstripe_pdf(geometry)
# mmd loss
mmd = RBFMMD2(sigma_list=[2.], basis=binary_basis((num_bit,)))
# Born Machine
bm = QCBM(circuit, mmd, p_bs)
return bm
```
# Training with Noisy Data
Sample size $N=20000$ controls the noises.
We compare training using Adam and SPSA
```
np.random.seed(2)
depth = 4
geometry = (2, 3)
bm = load_barstripe(geometry, depth)
bm.batch_size = 2000
theta_list = np.random.random(bm.circuit.num_param)*2*np.pi
loss, theta_list = train(bm, theta_list, 'Adam', max_iter=100, popsize=10)
pl = bm.pdf(theta_list)
# display
plt.plot(bm.p_data)
plt.plot(pl)
plt.legend(['Data', 'Gradient Born Machine'])
plt.show()
```
```
# generate samples
pl = bm.pdf(theta_list)
indices = np.random.choice(np.arange(len(pl)), np.prod(size), p=pl)
samples = binary_basis(geometry)[indices]
# show
size = (7,5)
plot_bs(samples, size)
```
```
theta_list = np.random.random(bm.circuit.num_param)*2*np.pi
loss, theta_list = train(bm, theta_list, 'SPSA', max_iter=100*bm.circuit.num_param, popsize=10)
pl = bm.pdf(theta_list)
# display
plt.plot(bm.p_data)
plt.plot(pl)
plt.legend(['Data', 'Gradient Born Machine (SPSA)'])
plt.show()
```
# SPSA not Working?
Lets optimize a problem with 24 parameters
```
test_func = lambda x: np.cos(x).sum()
max_iter = 2000
step = [0]
def callback(x, *args, **kwargs):
step[0] += 1
if step[0]%(max_iter//50) == 1:
print('step = %d, loss = %s'%(step[0], test_func(x)))
res = spsa(test_func, x0=np.random.random(24),
maxiter=max_iter, callback=callback,
)
```
step = 1, loss = 20.796581751465737
step = 41, loss = 17.67989562802431
step = 81, loss = 14.08103567206227
step = 121, loss = 8.745535205647682
step = 161, loss = 4.02103044679887
step = 201, loss = -0.07336469454172878
step = 241, loss = -2.797866790716694
step = 281, loss = -5.78570627582404
step = 321, loss = -8.718937120989885
step = 361, loss = -11.091224065672787
step = 401, loss = -13.055019525299844
step = 441, loss = -14.502209466639925
step = 481, loss = -16.099918437533287
step = 521, loss = -17.25192374217347
step = 561, loss = -18.078309988841006
step = 601, loss = -18.986836301802008
step = 641, loss = -19.573928884899914
step = 681, loss = -20.243853615671508
step = 721, loss = -21.268868044065307
step = 761, loss = -21.74049667232036
step = 801, loss = -22.37825658815569
step = 841, loss = -22.60812656731494
step = 881, loss = -22.882512300222977
step = 921, loss = -23.094404364598198
step = 961, loss = -23.30299976104161
step = 1001, loss = -23.454831268909643
step = 1041, loss = -23.553484173799554
step = 1081, loss = -23.62627573507488
step = 1121, loss = -23.661460288637393
step = 1161, loss = -23.74388550471457
step = 1201, loss = -23.78603435687063
step = 1241, loss = -23.815359056210152
step = 1281, loss = -23.841780252815113
step = 1321, loss = -23.86293791434619
step = 1361, loss = -23.8795268652448
step = 1401, loss = -23.900278455052
step = 1441, loss = -23.914259581091514
step = 1481, loss = -23.92589747248898
step = 1521, loss = -23.93845703198031
step = 1561, loss = -23.950247653366162
step = 1601, loss = -23.958551052042164
step = 1641, loss = -23.965449023306057
step = 1681, loss = -23.970440896625725
step = 1721, loss = -23.974319086828523
step = 1761, loss = -23.978254013848492
step = 1801, loss = -23.981154360769914
step = 1841, loss = -23.983476436793808
step = 1881, loss = -23.98612699154696
step = 1921, loss = -23.98821088452821
step = 1961, loss = -23.989658502179193
SPSA is ok, but not for noisy quantum circuits.
# Inference using Amplitude Amplification
```
```
# Gradient Vanishing?
* Using random circuit
* the derivative with respect to the parameters $\theta$ is Lipschitz continuous with some parameter $\eta$ that depends on the operator $H$.
$$\langle \partial_k E\rangle =\int dU p(U)\partial _k\langle0|U(\theta)^\dagger HU(\theta)|0\rangle$$
Mcclean, J. R., Boixo, S., Smelyanskiy, V. N., Babbush, R., & Neven, H. (2018). Barren plateaus in quantum neural network training landscapes. arXiv:1803.11173, 1–7. Retrieved from https://arxiv.org/pdf/1803.11173.pdf
```
depth = 6
np.random.seed(2)
num_bit_list = np.arange(2, 14, 2)
grad_absmean_list = []
for num_bit in num_bit_list:
print('number of bit = %d'%num_bit)
bm = load_gaussian(num_bit, depth)
grad = bm.gradient(np.random.random(bm.circuit.num_param)*2*np.pi)
grad_absmean_list.append(np.abs(grad).mean())
```
number of bit = 2
number of bit = 4
number of bit = 6
number of bit = 8
number of bit = 10
number of bit = 12
```
plt.plot(num_bit_list, grad_absmean_list)
plt.yscale('log')
plt.ylabel("gradient", fontsize=18)
plt.xlabel("number of qubit", fontsize=18)
plt.show()
```
# Inference and Learning Wave Functions
to be added...
#### Ref
Low, Guang Hao, Theodore J. Yoder, and Isaac L. Chuang. "Quantum inference on Bayesian networks." Physical Review A 89.6 (2014): 062315.
```
```
|
11bde836fbb8a20d4d076bfafb9670731b66eb6b
| 395,481 |
ipynb
|
Jupyter Notebook
|
notebooks/qcbm_advanced.ipynb
|
GiggleLiu/QuantumCircuitBornMachine
|
cb0648843210a249392b3600577eb69455b78b3d
|
[
"MIT"
] | 32 |
2018-04-10T14:34:28.000Z
|
2021-08-20T15:03:21.000Z
|
notebooks/qcbm_advanced.ipynb
|
GiggleLiu/QuantumCircuitBornMachine
|
cb0648843210a249392b3600577eb69455b78b3d
|
[
"MIT"
] | null | null | null |
notebooks/qcbm_advanced.ipynb
|
GiggleLiu/QuantumCircuitBornMachine
|
cb0648843210a249392b3600577eb69455b78b3d
|
[
"MIT"
] | 8 |
2018-06-17T01:09:50.000Z
|
2021-04-19T20:53:19.000Z
| 174.067342 | 63,538 | 0.84182 | true | 12,857 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.824462 | 0.766294 | 0.63178 |
__label__eng_Latn
| 0.388405 | 0.306167 |
# Taylor series
> ### $ f(x) = a_0 + a_1x + a_2x^2 + a_3x^3 + a_4x^4 + ... = \displaystyle \sum_{i=0}^{\infty} a_{i}x^{i} $
> ### $ f(x) = a_0(x-a)^0 + a_1(x-a)^1 + a_2(x-a)^2 + a_3(x-a)^3 + a_4(x-a)^4 + ... = \displaystyle \sum_{i=0}^{\infty} a_{i}(x-a)^{i} $
$$\require{cancel}$$
# differential
> ### $ \therefore f(x)|_{x=a} = \color{red}{f(a) = a_0} \\ f(x) = f(a) + a_1(x-a) + a_2(x-a)^2 + a_3(x-a)^3 + a_4(x-a)^4 + ...$
> ### $ f'(x)= \cancelto{}{a_0} + a_1 + \frac{a_2(x-a)}{2} + \frac{a_3(x-a)^2}{3} + \frac{a_3(x-a)^3}{4} + \frac{a_3(x-a)^4}{5} + .... \\ \therefore f'(x)|_{x=a} = \color{red}{f'(a) = a_1} $
> ### $ f''(x)= \cancelto{}{a_0} + \cancelto {}{a_1} + \frac{a_2\cancel{(x-a)}}{2} + \frac{a_3(x-a)^1}{3*2} + \frac{a_3(x-a)^2}{4*3} + \frac{a_3(x-a)^3}{5*4} + .... \\ \therefore f''(x)|_{x=a}= \color{red}{f''(a) = \frac{a_2}{2}} $
> ### $ f^{3}(x)= \cancelto{}{a_0} + \cancelto {}{a_1} + \cancelto{}{\frac{a_2}{2}} + \frac{a_3\cancel{(x-a)^1}}{3*2} + \frac{a_3(x-a)^1}{4*3*2} + \frac{a_3(x-a)^2}{5*4*3} + .... \\ \therefore f^{3}(x)|_{x=a} = \color{red}{f^{3}(a) = \frac{a_2}{3*2}} $
---
> ### $
f(x) = f(a) + f'(a)(x-a) + \frac{f''(a)(x-a)^2}{2} + \frac{f^{(3)}(a)(x-a)^3}{3*2} + \frac{f^{(4)}(a) (x-a)^4}{4*3*2} + \frac{f^{(5)}(a) (x-a)^5}{5*4*3*2} + ...
\\
f(x) = \frac{f(a)}{0!} + \frac{f'(a)(x-a)}{1!} + \frac{f''(a)(x-a)^2}{2!} + \frac{f^{(3)}(a)(x-a)^3}{3!} + \frac{f^{(4)}(a) (x-a)^4}{4!} + \frac{f^{(5)}(a) (x-a)^5}{5!} + ...
$
> ## $ \displaystyle \therefore f(x)|_{x=a} = \color{red}{\sum_{n=0}^{\infty} \frac{(x-a)^n}{n!} f^{(n)}(a)}\;$
> ## $ \displaystyle \therefore f(x)|_{x=0} = \color{red}{\sum_{n=0}^{\infty} \frac{x^n}{n!}f^{(n)}(0)} $
# $\color{red}{e^{x}|_{x=0}}$
> ### $
\left .
\begin{array}{}
e^0 = 1, \therefore a_0 = 1 \\
\frac{d}{dx}e^x|_{x=0} \to e^{0} \therefore a_1 = 1 \\
\frac{d^2}{dx^2}e^x|_{x=0} \to e^{0} \therefore a_2 = 1 \\
\frac{d^3}{dx^3}e^x|_{x=0} \to e^{0} \therefore a_3 = 1 \\
\end{array}\right \}
\begin{array}{}
\displaystyle \sum_{n=0}^{\infty} \frac{x^n}{n!}f^{(n)}(0) \\
e^x = 1 + \frac{x}{1} + \frac{x^2}{2!} + \frac{x^3}{3!} + ..,
= \displaystyle \sum_{n=0}^{\infty} \frac{x^{n}}{n!}
\end{array}
$
# [$\cos(x)|_{x=0}$](https://www.youtube.com/watch?v=LDBnS4c7YbA&t=160s)
> ### $
\left .
\begin{array}{}
cos(0) = 1 \therefore a_0 =1 \\
\frac{d}{dx}\cos x|_{x=0} = -sin(x)|_{x=0} \therefore a_1 = 0 \\
\frac{d^2}{dx^2}\cos x|_{x=0} = -cos(x)|_{x=0} \therefore a_2 = -1 \\
\frac{d^3}{dx^3}\cos x|_{x=0} = sin(x)|_{x=0} \therefore a_3 = 0 \\
\frac{d^4}{dx^4}\cos x|_{x=0} = cos(x)|_{x=0} \therefore a_4 = 1 \\
\frac{d^5}{dx^5}\cos x|_{x=0} = a_1 \\
\frac{d^6}{dx^6}\cos x|_{x=0} = a_2 \\
\end{array}\right \}
\begin{array}{}
1 + 0\frac{x}{1} + -1\frac{x^2}{2!} + 0\frac{x^3}{3!} + 1\frac{x^4}{4!} + 0 \frac{x^5}{5!} + -1\frac{x^6}{6!} + .., \\
1 - \frac{x^2}{2!} + \frac{x^4}{4!} - \frac{x^6}{6!} + \frac{x^8}{8!} .., = \displaystyle \sum_{n=0}^{\infty} \frac{x^{2n}}{(2n)!}(-1)^n
\end{array}
$
# $\color{red}{\sin(x)|_{x=0}}$
> ### $
\left .
\begin{array}{}
sin(0) = 0 \therefore a_0 = 0 \\
\frac{d}{dx}\sin x|_{x=0} = cos(x) \therefore a_1 = 1 \\
\frac{d^2}{dx^2}\sin x|_{x=0} = -sin(x) \therefore a_2 = 0 \\
\frac{d^3}{dx^3}\sin x|_{x=0} = -cos(x) \therefore a_3 = -1 \\
\frac{d^4}{dx^4}\sin x|_{x=0} = sin(x) \therefore a_4 = a_0 = 0 \\
\frac{d^5}{dx^5}\sin x|_{x=0} \therefore a_1 = a_5 = 1 \\
\frac{d^6}{dx^6}\sin x \therefore a_2 = a_6 = 0 \\
\end{array}\right \}
\begin{array}{}
0 + 1\frac{x}{1} + (0)\frac{x^2}{2!} + (-1)\frac{x^3}{3!} + (0)\frac{x^4}{4!} + (1) \frac{x^5}{5!} + (0)\frac{x^6}{6!} + .., \\
\frac{x^1}{1!} - \frac{x^3}{3!} + \frac{x^5}{5!} - \frac{x^7}{7!} ..,
= \displaystyle \sum_{n=0}^{\infty} \frac{x^{2n+1}}{(2n+1)!}(-1)^n
\end{array}
$
# [$\Delta f$](https://www.youtube.com/watch?v=6fBiTe3LA8c)
> ### $[x_0-h] - [x_0] - [x_0+h]$
>> ### $[x_0]$ is constant, but x is independent variable
>>> ### $ f(x_0) = f(x_0) + (x-x_0)\,f'(x_0) + \frac{(x-x_0)^2}{2!}f''(x_0) + \frac{(x-x_0)^3}{3!}f^{(3)}(x_0) + ...$
>> ### $[x_0 + h]$ - substitude $x$ to $[x_0 + h]$
>>> ### $ f(x_0 + h) = f(x_0) + (x_0+h - x_0)f'(x_0) + \frac{((x_0 + h) - x_0)^2}{2!}f''(x_0) + \frac{((x_0+h)-x_0)^3}{3!}f^{(3)}(x_0) + ...$
>>> ### $ f(x_0 + h) = f(x_0) + hf'(x_0) + \frac{h^2}{2!}f''(x_0) + \frac{h^3}{3!}f^{(3)}(x_0) + ...$
>> ### gneralization
>>> ### $ f(x+h) = f(x) + h\, f'(x) + \frac{h^2}{2!}f''(x) + \frac{h^3}{3!}f^{(3)}(x) + ...$
> ### $\because f(x+h) - f(x) = h f'(x) + \frac{h^2}{2!}f''(x) + \frac{h^3}{3!}f^{(3)}(x) + ... $
> ### $ h = \Delta x$
> ### $ \because \color{magenta} {\Delta f} = f(\color{magenta} {x+h}) - f(\color{magenta}{x}) = \color{magenta} {\Delta{x}} f'(x) + \frac{\color{magenta}{\Delta {x}^2}}{2!}f''(x) + \frac{\color{magenta}{\Delta{x}^3}}{3!}f^{(3)}(x) \, + ... $
# [Taylor series](https://www.youtube.com/watch?v=of6_jCLW5HI&list=PLxjFN6S-kT9-BUCQ0oTKW1X6KG2gkomwp&index=62&t=558s)
## $f(x,y,z) = \begin{cases}
f(a,b,c)\: +
\\
\frac{(x-a)}{1!}\; \frac{\partial f}{\partial{x}}\; \big|_{(a,b,c)}\: +\:
\frac{(y-b)}{1!}\; \frac{\partial f}{\partial{y}}\; \big|_{(a,b,c)}\: +\:
\frac{(z-c)}{1!}\; \frac{\partial f}{\partial{z}}\; \big|_{(a,b,c)}\: +\:
\\
\frac{(x-a)^2}{2!}\frac{\partial^2 f}{\partial{x}^2}\,\big|_{(a,b,c)}\: +\:
\frac{(y-b)^2}{2!}\frac{\partial^2 f}{\partial{y}^2}\,\big|_{(a,b,c)}\: +\:
\frac{(z-c)^2}{2!}\frac{\partial^2 f}{\partial{z}^2}\,\big|_{(a,b,c)}\: +\:
\frac{(x-a)(x-b)}{2!}\frac{\partial f}{\partial{x}}\, \frac{\partial f}{\partial{y}}\,\big|_{(a,b,c)}\: +\:
\\
\frac{(x-a)^3}{3!}\frac{\partial^3 f}{\partial{x}^3}\,\big|_{(a,b,c)}\: +\:
\frac{(y-b)^3}{3!}\frac{\partial^3 f}{\partial{y}^3}\,\big|_{(a,b,c)}\: +\:
\frac{(z-c)^3}{3!}\frac{\partial^3 f}{\partial{z}^3}\,\big|_{(a,b,c)}\: +\: \dots
\end{cases}
$
> ## $ \text{such that }\begin{cases}
\delta x = x-a\\
\delta y = y-b\\
\delta z = z-c\\
\end{cases}$
> ### $
df = f(x,y,z) - f(a,b,c) \\
\quad =\frac{(x-a)}{1!}\frac{\partial f}{\partial x}\big|_{(a,b,c)}
+
\frac{(y-b)}{1!}\frac{\partial f}{\partial y}\big|_{(a,b,c)}
+
\frac{(z-c)}{1!}\frac{\partial f}{\partial z}\big|_{(a,b,c)}
+
\frac{(x-a)^2}{2!}\frac{\partial^2f}{\partial x^2}\big|_{(a,b,c)}
+
\frac{(y-b)^2}{2!}\frac{\partial^2f}{\partial y^2}\big|_{(a,b,c)}
+
\frac{(z-c)^2}{2!}\frac{\partial^2f}{\partial z^2}\big|_{(a,b,c)}\\
\quad =\frac{dx}{1!}\frac{\partial f}{\partial x}\big|_{(a,b,c)}
+
\frac{dy}{1!}\frac{\partial f}{\partial y}\big|_{(a,b,c)}
+
\frac{dz}{1!}\frac{\partial f}{\partial z}\big|_{(a,b,c)}
+
\frac{(dx)^2}{2!}\frac{\partial^2f}{\partial x^2}\big|_{(a,b,c)}
+
\frac{(dy)^2}{2!}\frac{\partial^2f}{\partial y^2}\big|_{(a,b,c)}
+
\frac{(dz)^2}{2!}\frac{\partial^2f}{\partial z^2}\big|_{(a,b,c)}
\\
\quad =\frac{dx}{1!}\frac{\partial f}{\partial x}\big|_{(a,b,c)}
+
\frac{dy}{1!}\frac{\partial f}{\partial y}\big|_{(a,b,c)}
+
\frac{dz}{1!}\frac{\partial f}{\partial z}\big|_{(a,b,c)}
+ \dots
\\
\quad =
(\frac{\partial f}{\partial x},
\frac{\partial f}{\partial y},
\frac{\partial f}{\partial z}) \cdot (dx, dy, dz) \\
\quad =
(\frac{\partial }{\partial x}f,
\frac{\partial }{\partial y}f,
\frac{\partial }{\partial z}f) \cdot (dx, dy, dz) \\
\quad =
(\frac{\partial }{\partial x},
\frac{\partial }{\partial y},
\frac{\partial }{\partial z})f \cdot (dx, dy, dz) \\
\begin{cases}
\quad \because \nabla =
(\frac{\partial}{\partial x},
\frac{\partial}{\partial y},
\frac{\partial}{\partial z}) \\
\nabla f \cdot (dx,dy,dz) \\
\nabla f \cdot d\vec{r} \\
\quad d\vec{r} = \text{ 쬐끔 변하는 방향벡터}
\end{cases}
$
## Del() nabla ($\nabla$)
> sympy.vector.Del()
>> sympy.vector.Del(f),...dot(u), ..cross(u)
>>> where, u is potential field vector, f is scalar function
> ### $ \nabla = \; <
\frac{\partial}{\partial x_0},
\frac{\partial}{\partial x_1},
\frac{\partial}{\partial x_2},
\frac{\partial}{\partial x_3},
\dots>
$
>> ### $\nabla f(x,y,z) = [f(x,y,z)_x, f(x,y.z)_y, f(x,y,z)_z]$
>>> ### $ \because f(x,y,z) \text{ is scalar function}$
>> ### $\nabla \cdot f(x,y,z) = <g(t)_x, h(t)_y, k(t)_z>$
>>> #### $ \because f(x,y,z) \begin{cases} x=g(t),\\y=h(t),\\z=k(t)\end{cases}
\text{ is vector function of parameterized}$
## Gradient ($\nabla f$)
> ## f is scalar function
>> ## the result turns out a vector
> ## sympy.vector.Del(f)
>> $\therefore f $ has to return a real number
---
# 전미분 (df)
> ## $ df = \frac{\partial f}{\partial x}dx + \frac{\partial f}{\partial y}dy + \frac{\partial f}{\partial z}dz$
>> ## $ (\frac{\partial f}{\partial x} , \frac{\partial f}{\partial y} , \frac{\partial f}{\partial z}) \cdot (dx,dy,dz)$
>> ## $ (\frac{\partial}{\partial x} , \frac{\partial}{\partial y} , \frac{\partial}{\partial z})f \cdot (dx,dy,dz)$
>> ## $ (\frac{\partial}{\partial x} , \frac{\partial}{\partial y} , \frac{\partial}{\partial z})f \cdot d\vec{r}$
>> ## $ \nabla f \cdot d\vec{r} $
> ## 전미분에서 방향 벡터를 제거하면 그래디언트
>> ## $\nabla f \cdot d\vec{r} \Rightarrow \nabla f\\
\because \nabla f = (\frac{\partial}{\partial x} , \frac{\partial}{\partial y} , \frac{\partial}{\partial z})f $
> ## 그래디언트에서 포텐셜 필드 함수를 빼내고 나면 델(nabla)가 된다.
>> ## $\nabla f \cdot d\vec{r} \Rightarrow \nabla f \Rightarrow \nabla \\
\because \nabla = (\frac{\partial}{\partial x} , \frac{\partial}{\partial y} , \frac{\partial}{\partial z}) $
---
# Directional Derivative
> ## $ \nabla_v f(x) = \nabla f(x) \cdot \vec{v}
$
>> ### 함수 f가 어떤 포인트에서 벡터 v방향으로 움직일때 그 지점에서 함수의 도함수(기울기,함수값의 변화율)를 구하려는 것임
> ## $ \nabla_v f(x) = \nabla f(x) \cdot \frac{\vec{v}}{||\vec{v}||}
$
> ## $ \quad \text{ where } \vec{u} = <a,b,c>, \\
\nabla_uf(x_0,x_1,x_2)
=
\lim\limits_{h\to0}
\frac{f(x_0 + h_a,\; x_1+h_b,\; x_2h_c) - f(x_0,\;x_1,\;x_2)}{h} \\
\quad = f_{x_0}a + f_{x_1}b + f_{x_2}c \\
\quad = \;< f_{x_0}, \;f_{x_1}, \; f_{x_2}> \cdot < a,\; b,\; c > \\
\quad = \;<
\frac{\partial}{\partial x_0},
\frac{\partial}{\partial x_0},
\frac{\partial}{\partial x_0}>
f(x_0,\;x_1\;,x_2) \; \cdot < a,\; b,\; c >
\\
$
> ## $ \quad \text{ restriction to a unit vector } \vec{u} = <a,b,c>, \\
\nabla_{\hat{u}}f(x_0,x_1,x_2)
=
\lim\limits_{h\to0}
\frac{f(x_0 + h_a,\; x_1+h_b,\; x_2h_c) - f(x_0,\;x_1,\;x_2)}{h|v|}
$
> # $ D_uf(x,y) = \lim\limits_{h\to 0} \frac{f(x+hcos\theta,\; y + hsin\theta) - f(x,y)}{h}$
> [link](https://www.youtube.com/watch?v=r0NL1s6urhs)
>> ### if $\theta = 0$ then $D_uf(x,y) = f_x $
>> ### if $\theta = \frac{\pi}{2}$ then $D_uf(x,y) = f_y$
# [problem](https://www.youtube.com/watch?v=H2eND512nQQ&t=596s)
> ## $ f(x,y) = x^2y^3 - 4y, \quad v=<2,5> $
> ## find $ D_{u}F(3,-1) $
> ## $\vec{v}$ 방향으로 함수 f가 <3,-1> 지점에서 도함수를 구하라
```python
# https://www.youtube.com/watch?v=H2eND512nQQ&t=596s
# f(x,y) = x^y^3 - 4y v=<2,5> find D_{u}F(3,-1)
## \vec{v} 방향으로 함수f가 <3,-1> 지점에서 도함수를 구하라
import sympy as sm
import sympy.vector
B = sm.vector.CoordSys3D('')
n,x,y,z = sm.symbols('n x y z')
f = B.x**2 * B.y**3 - 4*B.y
# u vector and matrix
um = sm.Matrix([3, -1, 0])
uv = sm.vector.matrix_to_vector(umat,B)
# normal u vector and matirx
uvn = uvec/sm.sqrt(uvec.dot(uvec))
umn = umat/sm.sqrt(umat.dot(umat))
# \nabla f(x,y) = gradient
nabla = sm.vector.Del()
nabla.gradient(f).doit()
# gradient = f_x + f_y + f_z
gradient = sm.Matrix([f.diff(B.x), f.diff(B.y), f.diff(B.z)])
grad = gradient.subs({B.x:3,B.y:-1,B.z:0})
# \nabla f(x,y)|_{x=3,y=-1,z=0} \iff matrix.subs|_{}
nabla.gradient(f).subs({B.x:3,B.y:-1,B.z:0}).doit()
nabla.gradient(f).subs({B.x:3,B.y:-1,B.z:0}).doit().dot(uvn)
gradient.subs({B.x:3,B.y:-1,B.z:0}).dot(umn)
```
$\displaystyle - \frac{41 \sqrt{10}}{10}$
# Mean value theorem $\iff$ total derivative
> ## $ f'(c) = \frac{f(b)-f(a)}{(b-a)}$
>> ## $ \quad a < c < b$
>> ## $ f'(c)(b-a) = f(b) - f(a)$
>> ## $ f'(c)(b-a) + f(a) = f(b)$
> ## $\therefore f(b) = f(a) + f'(c)(b-a)$
> ## $\therefore f(a+h) = f(a) + f'(a+(c-a))h, $
> ## $\therefore f(x + \Delta x) = f(x) + f'(x+\Delta x - \epsilon_0)\Delta x$
> ## $\therefore f(x + dx) \approx f(x) + f'(x)\;dx$
> ## $\therefore df \approx f'(x)\;dx$
# Sequences and Series
> ## $$ \sum_{n=0}^{\infty}\frac{6}{4^n}$$
```python
sm.Sum(6/4**n,(n,0,sm.oo))
```
$\displaystyle \sum_{n=0}^{\infty} 6 \cdot 4^{- n}$
```python
sm.Sum(6/4**n,(n,0,sm.oo)).doit()
```
$\displaystyle 8$
> ## approximation = n()
>> n() : n is acronym of number
>> ## $$ \sum_{n=1}^{\infty} \frac{tan^{-1}(n)}{n^{1.1}}$$
```python
eq = sm.atan(n) / n**sm.Rational(11,10)
eq
```
$\displaystyle \frac{\operatorname{atan}{\left(n \right)}}{n^{\frac{11}{10}}}$
```python
sm.Sum(eq,(n,1,sm.oo)).doit()
```
$\displaystyle \sum_{n=1}^{\infty} \frac{\operatorname{atan}{\left(n \right)}}{n^{\frac{11}{10}}}$
```python
sm.Sum(eq,(n,1,sm.oo)).n()
```
$\displaystyle 15.3028821020457$
```python
import sympy as sm
x,a = sm.symbols('x a')
f = sm.symbols('f',cls=sm.Function)
sm.series(f(a-x),x,a,6,"+")
```
$\displaystyle f{\left(0 \right)} - \left(- a + x\right) \left. \frac{d}{d \xi} f{\left(\xi \right)} \right|_{\substack{ \xi=0 }} + \frac{\left(- a + x\right)^{2} \left. \frac{d^{2}}{d \xi^{2}} f{\left(\xi \right)} \right|_{\substack{ \xi=0 }}}{2} - \frac{\left(- a + x\right)^{3} \left. \frac{d^{3}}{d \xi^{3}} f{\left(\xi \right)} \right|_{\substack{ \xi=0 }}}{6} + \frac{\left(- a + x\right)^{4} \left. \frac{d^{4}}{d \xi^{4}} f{\left(\xi \right)} \right|_{\substack{ \xi=0 }}}{24} - \frac{\left(- a + x\right)^{5} \left. \frac{d^{5}}{d \xi^{5}} f{\left(\xi \right)} \right|_{\substack{ \xi=0 }}}{120} + O\left(\left(- a + x\right)^{6}; x\rightarrow a\right)$
# [Tayloar series]( https://www.youtube.com/watch?v=UT4QY-H4Dyk&list=PLIxff5DJJR7oBEy0Kdg12WWSlS6XFtr6r&index=18)
> ### $
f(x+hx, y+hy) =
f(x,y) +
\Big(hx \frac{\partial f}{\partial x} + hy \frac{\partial f}{\partial y}\big) +
\frac{1}{2}\Big({hx}^2 \frac{\partial^2 f}{\partial x^2} +
2{hx}^2{hy}^2 \frac{\partial^2 f}{\partial x \partial y} +
{hy}^2 \frac{\partial^2 f}{\partial y^2} \Big) + ...
$
> ### $
f(x+hx, y+hy) = f(x,y) +
\begin{bmatrix}
\frac {\partial f}{\partial x} & \frac{\partial f}{\partial y}
\end{bmatrix}
\begin{bmatrix} hx \\ hy \end{bmatrix}
+ \frac{1}{2}
\begin{bmatrix} hx & hy \end{bmatrix}
\begin{bmatrix}
\frac{\partial^2 f}{\partial x^2} & \frac{\partial^2 f}{\partial x \partial y} \\
\frac{\partial^2 f}{\partial y \partial x} & \frac{\partial^2 f}{\partial y^2}
\end{bmatrix}
\begin{bmatrix} hx \\ hy \end{bmatrix}
$
# 독립변수 미소 변화량
>> ### $ d = \begin{bmatrix}hx \\ hy \end{bmatrix}$
# gradient matrix
>> ### $ g = \frac{\partial f}{\partial \vec{x}}
=
\Big(
\frac{\partial f}{\partial x_i}
\Big)$
>> ### $ df = g^T[d\vec{x}]$
# Hesian matrix
> ### $
H = \frac{\partial^2 f}{\partial \vec{x} \partial \vec{x}}
= \Big(\frac{\partial^2 f}{\partial x_i \partial x_j}\Big)
=
\begin{bmatrix}
\frac{\partial^2 f}{\partial x^2} &
\frac{\partial^2 f}{\partial x \partial y }\\
\frac{\partial^2 f}{\partial y \partial x } &
\frac{\partial^2 f}{\partial y2}\\
\end{bmatrix}
$
> ### $ df = [d\vec{x}]^TH[d\vec{x}]$
# $
f(x+d) = f(x) + \Big( \frac{\partial f}{\partial\vec{x}}\Big)^T d + d^THd
$
# $
f(x+dx) = f(x) + \Big( \frac{\partial f}{\partial\vec{x}}\Big)^T dx + (dx)^TH(dx)
$
# Multi function
> ## multi independent variable
>> ### $x = \begin{bmatrix} x \\ y \end{bmatrix}$
> ## multi dependent variable
>> ### $ f = \begin{bmatrix} f_1 \\ f_2\end{bmatrix}$
> ### $
df_1(x,y) =
\frac{\partial f_1}{\partial x}dx +
\frac{\partial f_1}{\partial y}dy$
> ### $
df_2(x,y) =
\frac{\partial f_2}{\partial x}dx +
\frac{\partial f_2}{\partial y}dy$
> ### $\begin{bmatrix} df_1 \\ df_2 \end{bmatrix}
= \begin{bmatrix}
\frac{\partial f_1}{\partial x} &
\frac{\partial f_1}{\partial y} \\
\frac{\partial f_2}{\partial x} &
\frac{\partial f_2}{\partial y}
\end{bmatrix}
\begin{bmatrix}
dx \\ dy
\end{bmatrix}$
> ### $ \frac{\partial \vec{f}}{\partial \vec{x}}
= \Big( \frac{\partial f_i}{\partial x_j} \Big)
= \begin{bmatrix}
\frac{\partial f_1}{\partial x_1} &
\frac{\partial f_1}{\partial x_2} \\
\frac{\partial f_2}{\partial x_1} &
\frac{\partial f_2}{\partial x_2}
\end{bmatrix} $
# Jacobian
> ### $ J = \frac{\partial \vec{f}}{\partial \vec{x}}
= \Big( \frac{\partial f_i}{\partial x_j}\Big)$
> ### $ [d\vec{f}] = J[d \vec{x}]$
# gradient in n-space
>### $
d\vec{r} = \vec{e}_1dx_1 + \vec{e}_2dx_2 + ... + \vec{e}_ndx_n,
\quad \vec{e}_i\cdot \vec{e}_j = \delta_{ij}$
> ### $
f(\vec{r} + d\vec{r}) = f(\vec{r}) + dx_1\frac{\partial f}{\partial x_1} +
dx_2\frac{\partial f}{\partial x_2} + ... +
dx_n\frac{\partial f}{\partial x_n} \\
= f(\vec{r}) + d\vec{r}\cdot \frac{\partial f}{\partial \vec{r}} + ...
$
> ### $
\frac{df}{d\vec{r}} = \frac{d}{d\vec{r}}(f) = \Big(\vec{e}_1 \frac{\partial}{\partial x_1} + \vec{e}_2 \frac{\partial}{\partial x_2}+...+ \vec{e}_n \frac{\partial}{\partial x_n} \Big)(f) = \nabla f
$
# $ \nabla f = \frac{df}{dr} \\ \therefore d = dr \cdot \nabla$
```python
```
|
5ce5371943aaa03e908095d54545ceefd2ee15dd
| 27,964 |
ipynb
|
Jupyter Notebook
|
python/TaylorSeries.ipynb
|
karng87/nasm_game
|
a97fdb09459efffc561d2122058c348c93f1dc87
|
[
"MIT"
] | null | null | null |
python/TaylorSeries.ipynb
|
karng87/nasm_game
|
a97fdb09459efffc561d2122058c348c93f1dc87
|
[
"MIT"
] | null | null | null |
python/TaylorSeries.ipynb
|
karng87/nasm_game
|
a97fdb09459efffc561d2122058c348c93f1dc87
|
[
"MIT"
] | null | null | null | 36.602094 | 738 | 0.449292 | true | 7,915 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.932453 | 0.795658 | 0.741914 |
__label__eng_Latn
| 0.150096 | 0.562047 |
Trusted Notebook" width="500 px" align="left">
# Elementary arithmetic operations
In this tutorial, we are going to provide a construction of quantum networks effecting basic arithmetic operations, covering from addition to modular exponentiation, providing some executable examples using the simulator and five qubit device.
\begin{align}
Plain\; addition \rightarrow Modular\; addition \rightarrow Modular\; multiplication \rightarrow Modular\; exponentiation
\end{align}
### Contributors
Carlos Bravo Prieto
## Introduction
Nowadays, there is no known efficient classical algorithm for factoring. In the past years, the mathematician and MIT professor Peter Shor discovered an efficient algorithm for factoring large numbers using quantum effects, showing the potential power of quantum computation.
There is a lot of interest in an efficient factoring algorithm, because most of the modern encryption is based on the impossibility to factorize large numbers. Therefore, Shor's algorithm could represent the fall of the actual cryptography.
Shor's algorithm uses the quantum fourier transform to find the order $r$ of a randomly chosen number $x$ in the multiplicative group. This is done by exponentiating $x$ modulo $N$. Here, we focus in the quantum modular exponentiation which seems to be the most difficult part. Although the quantum modular exponentiation does not represent any speed up respect their classical analog, it has to be done in order to implement the quantum Shor's algorithm.
## Plain addition
Our first step is to build the most basic operation, the plain addition, which can be written as
\begin{align}
|a, b \rangle \, \rightarrow \, |a, a+b \rangle \,.
\end{align}
To prevent overflows, if $a$ and $b$ are encoded in $n$ qubits, the second register must be of size $n+1$. We will provisionally write the carries of the addition in a temporary register of size $n-1$ initially in state $|0\rangle$. The operation of the plain addition can be understood as:
1. First we compute the most significant bit of the result $a + b$. We have to compute all the carries $c_i$ through
the relation $c_i$ $\leftarrow$ $a_i$ AND $b_i$ AND $c_{i−1}$, where $a_i$, $b_i$ and $c_i$ represent the $i$th qubit of the first, second and temporary register respectively.
2. Finally we reverse all these operations, except for the last one which computed the leading bit of the
result. This way we can reuse the same temporary register if we require repeated additions. Meanwhile we are doing the resetting process, the other qubits of the result are computed through the relation $b_i$ $\leftarrow$ $a_i$ XOR $b_i$ XOR $c_{i−1}$.
The total number of qubits needed is $3n$.
With this figure we seek a generalization of the algorithm, but the first carry qubit $c_0$ is not really needed, because the value will always be $0$.
### Implementation in IBM Q quantum device. Example 1.
We are going to start with the easiest example, the implementation of the plain addition of two one-qubit numbers $a$ and $b$. This program can be executed a 5-qubit device.
```python
# importing Qiskit
from qiskit import Aer, IBMQ, execute
from qiskit.backends.ibmq import least_busy
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
# import basic plotting tools
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.qcvv.tomography import marginal_counts
```
```python
IBMQ.load_accounts()
```
```python
# Use local qasm simulator
# backend = Aer.get_backend('qasm_simulator')
# Use the IBM Quantum Experience
backend = least_busy(IBMQ.backends(simulator=False))
# Use the IBM qasm simulator
# backend = IBMQ.get_backend('ibmq_qasm_simulator')
```
First let's define the plain addition algorithm for two one-qubit numbers $a$ and $b$. We implement the quantum plain addition algorithm based on the previous figure. We also define a function that creates those initial states (assuming that our input in Qiskit code is integer). We have to build the Toffoli gate from the elementary gates of the quantum computer.
```python
# quantum plain addition algorithm for 1-qubit numbers
def addition_1bit(circuit, q):
circuit.h(q[2])
circuit.cx(q[1], q[2])
circuit.tdg(q[2])
circuit.cx(q[0], q[2])
circuit.t(q[2])
circuit.cx(q[1], q[2])
circuit.tdg(q[2])
circuit.cx(q[0], q[2])
circuit.t(q[2])
circuit.h(q[2])
circuit.t(q[1])
circuit.cx(q[0], q[1])
circuit.t(q[0])
circuit.tdg(q[1])
# n-qubit number input state
def number_state(circuit, q, a, b):
if a == 1:
circuit.x(q[0]) # q[0] contains the value of a
if b == 1:
circuit.x(q[1]) # q[1] contain the value of b
```
Let's now implement the plain addition of two one-qubit numbers $a+b$, for instance $1+1$, that should return $2 \,(10)$.
In the Quantum Experience composer this circuit would look like:
```python
# we define the values (0 or 1)
a = 1
b = 1
# one single quantum register which contains 'a' (1 qubit) and 'b' (2 qubits)
q = QuantumRegister(3, name="q") # 3 qubits
# clasical register
c = ClassicalRegister(2, name="cr") # 2 bits
# quantum circuit involving the quantum register and the classical register
add1bit_circuit = QuantumCircuit(q ,c, name="add")
# create the state containing a and b
number_state(add1bit_circuit, q, a, b)
# addition
addition_1bit(add1bit_circuit, q)
# measurements to see the result, which has been written in b (q[1]q[2])
add1bit_circuit.measure(q[1], c[0])
add1bit_circuit.measure(q[2], c[1])
# compile and execute the quantum program in the backend
result = execute(add1bit_circuit, backend=backend, shots=1024).result()
# show the results
print(result)
print(result.get_data(add1bit_circuit))
counts = marginal_counts(result.get_counts(add1bit_circuit), [0, 1])
plot_histogram(counts)
print("Backend:", backend.name())
print("Highest probability outcome: {}".format(int(max(counts, key = lambda x: counts[x]).replace(" ", ""), 2)))
```
We see that the highest probability outcome is $2 \;(10)$ when we execute the code on IBM Q quantum device.
### Implementation in the quantum simulator. Example 2.
The second example is the implementation of the plain addition of two $n$-qubit numbers $a$ and $b$. This program can be executed in the quantum simulator.
First let's define different modules that we are going to use repeatedly, as well as the plain addition algorithm for two $n$-qubit numbers.
```python
# Use local qasm simulator
backend = Aer.get_backend('qasm_simulator')
# Use the IBM qasm simulator
# backend = IBMQ.get_backend('ibmq_qasm_simulator')
```
```python
def carry(circuit, q0, q1, q2, q3):
"carry module"
circuit.ccx(q1, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q0, q2, q3)
def carry_inv(circuit, q0, q1, q2, q3):
"carry module but running backwards"
circuit.ccx(q0, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q1, q2, q3)
def summation(circuit, q0, q1, q2):
"summation module"
circuit.cx(q1, q2)
circuit.cx(q0, q2)
# quantum plain addition algorithm for n-qubit numbers
def addition_nbit(circuit, qa, qb, qcar, n):
if n == 1:
circuit.ccx(qa[0], qb[0], qb[1])
circuit.cx(qa[0], qb[0])
else:
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
circuit.cx(qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
summation(circuit, qcar[i-1], qa[i], qb[i])
carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1])
summation(circuit, qcar[0], qa[1], qb[1])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
```
Now we are going to define a function that creates the states containing $a$ and $b$.
```python
# n-qubit number input state
def number_state(circuit, q, x, n):
# integer to binary
x = "{0:b}".format(x)
x = x.zfill(n)
# creating the state
for i in range(n):
if int(x[n-1-i]) == 1:
circuit.x(q[i])
```
Let's now implement the plain addition of two numbers $a+b$, for instance $9+14$, that should return $23$ $(10111)$:
```python
# we define the values
a = 9
b = 14
# computing the number of qubits n needed
n = len("{0:b}".format(a))
n2 = len("{0:b}".format(b))
if n2 > n:
n = n2
# classical register with n+1 bits.
c = ClassicalRegister(n+1, name="cr")
# quantum registers
qa = QuantumRegister(n, name="qa") # a qubits
qb = QuantumRegister(n+1, name="qb") # b qubits
# if n = 1, no need of carry register
if n == 1:
qcar = 0
# quantum circuit involving the quantum registers and the classical register
addnbit_circuit = QuantumCircuit(qa, qb,c, name="add")
else:
qcar = QuantumRegister(n-1, name="qcar") # carry qubits
# quantum circuit involving the quantum registers and the classical register
addnbit_circuit = QuantumCircuit(qa, qb, qcar,c, name="add")
# create the state containing a
number_state(addnbit_circuit, qa, a, n)
# create the state containing b
number_state(addnbit_circuit, qb, b, n)
# addition
addition_nbit(addnbit_circuit, qa, qb, qcar, n)
# measurements to see the result
for i in range(n+1):
addnbit_circuit.measure(qb[i], c[i])
# compile and execute the quantum program in the backend
result = execute(addnbit_circuit, backend=backend, shots=1024).result()
# show the results.
print(result)
print(result.get_data(addnbit_circuit))
counts = result.get_counts(addnbit_circuit)
plot_histogram(counts)
print("Backend:", backend.name())
print("Highest probability outcome: {}".format(int(max(counts, key = lambda x: counts[x]).replace(" ", ""), 2)))
```
We indeed see that the only appearing outcome is $23\,(10111)$ when we execute the code on local_qasm_simulator.
## Modular addition
As you may already expect, as we increase the complexity of the arithmetic operations we also increase the complexity of the circuit network. Our next goal is to build a network that effects
\begin{align}
| a, b \rangle\, \rightarrow \, | a, a+b\, mod \,N \rangle
\end{align}
where $a+b < 2N$ (this condition is enough in order to build the following arithmetic operations). The approach consists essentially on taking the output of the plain adder network and then substracting $N$, depending on whether the value $a+b$ is bigger or smaller than $N$. In order to do so, we use a temporary qubit which indicates whether there has been an overflow in the substraction or not, and adding back $N$ depending on this. The last part of the network takes care of reseting every register (except the second register which contains the result) to its initial state.
The total number of qubits needed is $5n + 2$.
### Implementation in the quantum simulator. Example 3.
The third example is the implementation of the modular addition of two $n$-qubit numbers $a$ and $b$ (modulo $N$). This program can be executed in the quantum simulator.
Now we define again the different modules that we are going to use repeatedly, as well as the modular addition algorithm for two $n$-qubit numbers. This time we also have to substract or use conditional adders (in order to substract $N$ or not), therefore we have to implement new modules such as the conditional plain addition or the plain substraction.
```python
# Use local qasm simulator
backend = Aer.get_backend('qasm_simulator')
# Use the IBM qasm simulator
# backend = IBMQ.get_backend('ibmq_qasm_simulator')
```
```python
def carry(circuit, q0, q1, q2, q3):
"carry module"
circuit.ccx(q1, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q0, q2, q3)
def carry_inv(circuit, q0, q1, q2, q3):
"carry module running backwards"
circuit.ccx(q0, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q1, q2, q3)
def summation(circuit, q0, q1, q2):
"summation module"
circuit.cx(q1, q2)
circuit.cx(q0, q2)
def summation_inv(circuit, q0, q1, q2):
"summation module running backwards"
circuit.cx(q0, q2)
circuit.cx(q1, q2)
# quantum plain addition algorithm for n-qubit numbers
def addition_nbit(circuit, qa, qb, qcar, n):
if n == 1:
circuit.ccx(qa[0], qb[0], qb[1])
circuit.cx(qa[0], qb[0])
else:
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
circuit.cx(qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
summation(circuit, qcar[i-1], qa[i], qb[i])
carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1])
summation(circuit, qcar[0], qa[1], qb[1])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
# quantum plain substraction algorithm for n-qubit numbers
def subs_nbit(circuit, qa, qb, qcar, n):
"same circuit as the plain addition but going backwards"
if n == 1:
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qb[1])
else:
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
summation_inv(circuit, qcar[0], qa[1], qb[1])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
summation_inv(circuit, qcar[i+1], qa[i+2], qb[i+2])
circuit.cx(qa[n-1], qb[n-1])
carry_inv(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
for i in range(n-2, 0, -1):
carry_inv(circuit, qcar[i-1], qa[i], qb[i], qcar[i])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
def cond_toffoli(circuit, qcond, q1, q2, q3):
"toffoli gate conditioned by an external qubit"
circuit.h(q3)
circuit.ccx(qcond, q2, q3)
circuit.tdg(q3)
circuit.ccx(qcond, q1, q3)
circuit.t(q3)
circuit.ccx(qcond, q2, q3)
circuit.tdg(q3)
circuit.ccx(qcond, q1, q3)
circuit.t(q3)
circuit.h(q3)
circuit.t(q2)
circuit.ccx(qcond, q1, q2)
circuit.t(q1)
circuit.tdg(q2)
circuit.ccx(qcond, q1, q2)
def cond_carry(circuit, q0, q1, q2, q3, qcond):
"conditional carry module"
cond_toffoli(circuit, qcond, q1, q2, q3)
circuit.ccx(qcond, q1, q2)
cond_toffoli(circuit, qcond, q0, q2, q3)
def cond_carry_inv(circuit, q0, q1, q2, q3, qcond):
"conditional carry module running backwards"
cond_toffoli(circuit, qcond, q0, q2, q3)
circuit.ccx(qcond, q1, q2)
cond_toffoli(circuit, qcond, q1, q2, q3)
def cond_summation(circuit, q0, q1, q2, qcond):
"conditional summation module"
circuit.ccx(qcond, q1, q2)
circuit.ccx(qcond, q0, q2)
def cond_summation_inv(circuit, q0, q1, q2, qcond):
"conditional summation module running backwards"
circuit.ccx(qcond, q0, q2)
circuit.ccx(qcond, q1, q2)
# quantum conditional plain addition algorithm for n-qubit numbers
def cond_addition_nbit(circuit, qa, qb, qcar, qcond, n):
"plain addition algorithm conditioned by an external qubit"
if n == 1:
cond_toffoli(circuit, qcond[0], qa[0], qb[0], qb[1])
circuit.ccx(qcond[0], qa[0], qb[0])
else:
cond_toffoli(circuit, qcond[0], qa[0], qb[0], qcar[0])
circuit.ccx(qcond[0], qa[0], qb[0])
for i in range(n-2):
cond_carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1], qcond[0])
cond_carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n], qcond[0])
circuit.ccx(qcond[0], qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
cond_summation(circuit, qcar[i-1], qa[i], qb[i], qcond[0])
cond_carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1], qcond[0])
cond_summation(circuit, qcar[0], qa[1], qb[1], qcond[0])
circuit.ccx(qcond[0], qa[0], qb[0])
cond_toffoli(circuit, qcond[0], qa[0], qb[0], qcar[0])
circuit.ccx(qcond[0], qa[0], qb[0])
# quantum conditional plain substraction algorithm for n-qubit numbers
def cond_subs_nbit(circuit, qa, qb, qcar, qcond, n):
"same circuit as the conditional plain addition but going backwards"
if n == 1:
circuit.ccx(qcond[0], qa[0], qb[0])
cond_toffoli(circuit, qcond[0], qa[0], qb[0], qb[1])
else:
circuit.ccx(qcond[0], qa[0], qb[0])
cond_toffoli(circuit, qcond[0], qa[0], qb[0], qcar[0])
circuit.ccx(qcond[0], qa[0], qb[0])
cond_summation_inv(circuit, qcar[0], qa[1], qb[1], qcond[0])
for i in range(n-2):
cond_carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1], qcond[0])
cond_summation_inv(circuit, qcar[i+1], qa[i+2], qb[i+2], qcond[0])
circuit.ccx(qcond[0], qa[n-1], qb[n-1])
cond_carry_inv(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n], qcond[0])
for i in range(n-2, 0, -1):
cond_carry_inv(circuit, qcar[i-1], qa[i], qb[i], qcar[i], qcond[0])
circuit.ccx(qcond[0], qa[0], qb[0])
cond_toffoli(circuit, qcond[0], qa[0], qb[0], qcar[0])
# quantum modular addition algorithm for n-qubit numbers
def mod_addition_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, n):
addition_nbit(circuit, qa, qb, qcar, n)
subs_nbit(circuit, qN, qb, qcar, n)
circuit.x(qb[n])
circuit.cx(qb[n], qtemp[0])
circuit.x(qb[n])
cond_subs_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
addition_nbit(circuit, qN, qb, qcar, n)
cond_addition_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
subs_nbit(circuit, qa, qb, qcar, n)
circuit.cx(qb[n], qtemp[0])
addition_nbit(circuit, qa, qb, qcar, n)
```
Now we define the function that creates the states containing $a$, $b$ and $N$.
```python
# n-qubit number input state
def number_state(circuit, q, x, n):
# integer to binary
x = "{0:b}".format(x)
x = x.zfill(n)
# creating the state
for i in range(n):
if int(x[n-1-i]) == 1:
circuit.x(q[i])
```
Let's implement the modular addition of two numbers $a+b \, mod \, N$, for instance $2+3 \,mod\, 3$, that should return $2 \, (010)$:
```python
# we define the values
a = 2
b = 3
N = 3
# computing number of qubits n needed
n = len("{0:b}".format(a))
n2 = len("{0:b}".format(b))
n3 = len("{0:b}".format(N))
if n2 > n:
n = n2
if n3 > n:
n = n3
# classical register with n+1 bits.
c = ClassicalRegister(n+1, name="cr")
# quantum registers
qa = QuantumRegister(n, name="qa") # a qubits
qb = QuantumRegister(n+1, name="qb") # b qubits
qN = QuantumRegister(n+1, name="qN") # N qubits
qNtemp = QuantumRegister(n, name="qNtemp") # temporary N qubits
qtemp = QuantumRegister(1, name="qtemp") # temporary qubit
# if n = 1, no need of carry register
if n == 1:
qcar = 0
# quantum circuit involving the quantum registers and the classical register
mod_add_circuit = QuantumCircuit(qa, qb, qN, qNtemp, qtemp,c, name="mod_add")
else:
qcar = QuantumRegister(n-1, name="qcar") # carry qubits
# quantum circuit involving the quantum registers and the classical register
mod_add_circuit = QuantumCircuit(qa, qb, qN, qcar, qNtemp, qtemp, c, name="mod_add")
# create the state containing 'a'
number_state(mod_add_circuit, qa, a, n)
# create the state containing 'b'
number_state(mod_add_circuit, qb, b, n)
# create the state containing 'N'
number_state(mod_add_circuit, qN, N, n)
# create the temporary state containing 'N'
number_state(mod_add_circuit, qNtemp, N, n)
# modular addition
mod_addition_nbit(mod_add_circuit, qa, qb, qN, qNtemp, qcar, qtemp, n)
# measurements to see the result
for i in range(n+1):
mod_add_circuit.measure(qb[i], c[i])
# compile and execute the quantum program in the backend
result = execute(mod_add_circuit, backend=backend, shots=1024).result()
# show the results.
print(result)
print(result.get_data("mod_add"))
counts = result.get_counts("mod_add")
plot_histogram(counts)
print("Backend:", backend.name())
print("Highest probability outcome: {}".format(int(max(counts, key = lambda x: counts[x]).replace(" ", ""), 2)))
```
Indeed the result is what we were expecting, $2\, (010)$.
## Controlled modular multiplication
The modular multiplication can be implemented by repeated conditional additions modulo $N$: $ax = 2^0 ax_0 + 2^1 ax_1 + ... + 2^{n-1} ax_{n-1}$. We start from a register initiated in the state $|0\rangle$. Then, the network consists in adding conditionally the value $2^i a$, depending on the state of the qubit $|x_i\rangle$. Anyway, in order to implement the modular exponentiation, we need a more slightly complicated circuit, by the fact that we want the multiplication be effected conditionally depending on the value of some external qubit $|c\rangle$. If $|c\rangle = |1\rangle$
\begin{align}
|c; x,0\rangle \, \rightarrow \, |c;x, a \times x\; mod\, N\rangle
\end{align}
This is done by applying Toffoli gates to $|x_i\rangle$ and control qubits $|c\rangle$ and the corresponding target qubit. The resetting of the register to its initial state is done by applying the same Toffoli gates again. If the control qubit is $|0\rangle$, no operations are done, giving the state $|c; x,0\rangle$. Since we want the state to be $|c;x,x\rangle$ we copy the values of the input register to the result register. This is done in the final part of the network.
However, this circuit requires that the value to be multiplied $a$ be hardwired into the circuit.
\begin{align}
a \times x = (a_{n-1} 2^{n-1} + ... + a_0 2^0)\times (x_{n-1} 2^{n-1} + ... + x_0 2^0) = 2^{2n-2}(a_{n-1} x_{n-1}) + ...+ 2^{n-1}(a_0 x_{n-1} + a_1 x_{n-2} + .... + a_{n-2} x_1 + a_{n-1} x_0) + ... + 2^0 (a_0 x_0).
\end{align}
Notice that the indices of each pair of qubits $a_i x_j$ indicate the respective power of $2$ ($2^{i+j}$). It is important to notice that $2^{i+j}$ can exceed our modulus N. Thus, we classically compute this value at each step and create a temporary register to hold these values.
Summarizing, the controlled multiplication proceed as follows:
1. We begin with $n$-qubits inputs $a$ and $x$ and examine each qubit individually. We apply a three-Toffoli gate to the qubits $a_i$ and $x_j$. If both qubits along with the control qubit are $|1\rangle$ we XOR the incoming value $2^{i+j} \, mod \, N$ to the temporary register.
2. The result is then fed into an addition modulo N module which adds the result to the register holding a running total. After this, the temporary register is set to $|0\rangle$.
The total number of qubits needed is $7n + 3$.
### Implementation in the quantum simulator. Example 4.
The fourth example is the implementation of the controlled modular multiplication of two $n$-qubit numbers $a$ and $x$. This program can be executed in the quantum simulator.
We define the same modules as in the modular addition, as well as the controlled modular multiplication algorithm for $n$-qubit numbers.
```python
# Use local qasm simulator
backend = Aer.get_backend('qasm_simulator')
# Use the IBM qasm simulator
# backend = IBMQ.get_backend('ibmq_qasm_simulator')
```
```python
def carry(circuit, q0, q1, q2, q3):
"carry module"
circuit.ccx(q1, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q0, q2, q3)
def carry_inv(circuit, q0, q1, q2, q3):
"carry module running backwards"
circuit.ccx(q0, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q1, q2, q3)
def summation(circuit, q0, q1, q2):
"summation module"
circuit.cx(q1, q2)
circuit.cx(q0, q2)
def summation_inv(circuit, q0, q1, q2):
"summation module running backwards"
circuit.cx(q0, q2)
circuit.cx(q1, q2)
# quantum plain addition algorithm for n-qubit numbers
def addition_nbit(circuit, qa, qb, qcar, n):
if n == 1:
circuit.ccx(qa[0], qb[0], qb[1])
circuit.cx(qa[0], qb[0])
else:
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
circuit.cx(qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
summation(circuit, qcar[i-1], qa[i], qb[i])
carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1])
summation(circuit, qcar[0], qa[1], qb[1])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
# quantum plain substraction algorithm for n-qubit numbers
def subs_nbit(circuit, qa, qb, qcar, n):
"same circuit as the addition but going backwards"
if n == 1:
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qb[1])
else:
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
summation_inv(circuit, qcar[0], qa[1], qb[1])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
summation_inv(circuit, qcar[i+1], qa[i+2], qb[i+2])
circuit.cx(qa[n-1], qb[n-1])
carry_inv(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
for i in range(n-2, 0, -1):
carry_inv(circuit, qcar[i-1], qa[i], qb[i], qcar[i])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
def cond_toffoli(circuit, qcond, q1, q2, q3):
"toffoli gate conditioned by an external qubit"
circuit.h(q3)
circuit.ccx(qcond, q2, q3)
circuit.tdg(q3)
circuit.ccx(qcond, q1, q3)
circuit.t(q3)
circuit.ccx(qcond, q2, q3)
circuit.tdg(q3)
circuit.ccx(qcond, q1, q3)
circuit.t(q3)
circuit.h(q3)
circuit.t(q2)
circuit.ccx(qcond, q1, q2)
circuit.t(q1)
circuit.tdg(q2)
circuit.ccx(qcond, q1, q2)
def cond_carry(circuit, q0, q1, q2, q3, qcond):
"conditional carry module"
cond_toffoli(circuit, qcond, q1, q2, q3)
circuit.ccx(qcond, q1, q2)
cond_toffoli(circuit, qcond, q0, q2, q3)
def cond_carry_inv(circuit, q0, q1, q2, q3, qcond):
"conditional carry module running backwards"
cond_toffoli(circuit, qcond, q0, q2, q3)
circuit.ccx(qcond, q1, q2)
cond_toffoli(circuit, qcond, q1, q2, q3)
def cond_summation(circuit, q0, q1, q2, qcond):
"conditional summation module"
circuit.ccx(qcond, q1, q2)
circuit.ccx(qcond, q0, q2)
def cond_summation_inv(circuit, q0, q1, q2, qcond):
"conditional summation module running backwards"
circuit.ccx(qcond, q0, q2)
circuit.ccx(qcond, q1, q2)
# quantum conditional plain addition algorithm for n-qubit numbers
def cond_addition_nbit(circuit, qa, qb, qcar, qcond, n):
if n == 1:
cond_toffoli(circuit, qcond, qa[0], qb[0], qb[1])
circuit.ccx(qcond, qa[0], qb[0])
else:
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
circuit.ccx(qcond, qa[0], qb[0])
for i in range(n-2):
cond_carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1], qcond)
cond_carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n], qcond)
circuit.ccx(qcond, qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
cond_summation(circuit, qcar[i-1], qa[i], qb[i], qcond)
cond_carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1], qcond)
cond_summation(circuit, qcar[0], qa[1], qb[1], qcond)
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
circuit.ccx(qcond, qa[0], qb[0])
# quantum conditional plain substraction algorithm for n-qubit numbers
def cond_subs_nbit(circuit, qa, qb, qcar, qcond, n):
"same circuit as the conditional plain addition but going backwards"
if n == 1:
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qb[1])
else:
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
circuit.ccx(qcond, qa[0], qb[0])
cond_summation_inv(circuit, qcar[0], qa[1], qb[1], qcond)
for i in range(n-2):
cond_carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1], qcond)
cond_summation_inv(circuit, qcar[i+1], qa[i+2], qb[i+2], qcond)
circuit.ccx(qcond, qa[n-1], qb[n-1])
cond_carry_inv(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n], qcond)
for i in range(n-2, 0, -1):
cond_carry_inv(circuit, qcar[i-1], qa[i], qb[i], qcar[i], qcond)
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
# quantum modular addition algorithm for n-qubit numbers
def mod_addition_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, n):
addition_nbit(circuit, qa, qb, qcar, n)
subs_nbit(circuit, qN, qb, qcar, n)
circuit.x(qb[n])
circuit.cx(qb[n], qtemp)
circuit.x(qb[n])
cond_subs_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
addition_nbit(circuit, qN, qb, qcar, n)
cond_addition_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
subs_nbit(circuit, qa, qb, qcar, n)
circuit.cx(qb[n], qtemp)
addition_nbit(circuit, qa, qb, qcar, n)
# quantum controlled modular multiplication algorithm for n-qubit numbers
def cont_mod_mult_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, qtempst, qX, qext, N, n):
for i in range(n):
for j in range(n):
classical_mod = (2**(i+j))%N
cond_number_state(circuit, qtempst, classical_mod, qext, qa[i], qX[j], n)
mod_addition_nbit(circuit, qtempst, qb, qN, qNtemp, qcar, qtemp, n)
cond_number_state(circuit, qtempst, classical_mod, qext, qa[i], qX[j], n)
circuit.x(qext)
cond_addition_nbit(circuit, qX, qb, qcar, qext, n)
circuit.x(qext)
```
We define the function that creates the different states. This time we also define another function, what does essentially the same, but controlled by 3 control qubits.
```python
# n-qubit number input state
def number_state(circuit, q, x, n):
# integer to binary
x = "{0:b}".format(x)
x = x.zfill(n)
# creating the state
for i in range(n):
if int(x[n-1-i]) == 1:
circuit.x(q[i])
# n-qubit number input state, controlled by 2 control qubits
def cond_number_state(circuit, q, x, ext, control1, control2, n):
# integer to binary
x = "{0:b}".format(x)
x = x.zfill(n)
# creating the state
for i in range(n):
if int(x[n-1-i]) == 1:
cond_toffoli(circuit, ext, control1, control2, q[i])
```
Let's implement now the controlled modular multiplication of two numbers $a \times x \, mod \, N$, for instance $1 \times 1 \,mod\, 1$ (in order to avoid long computing times, but you can try with two-qubit numbers), that should return $0 \, (00$), if the control qubit is $|1\rangle$ (otherwise the result would be $1 \, (01)$):
```python
# we define the values
a = 1
x = 1
N = 1
# computing number of qubits n needed
n = len("{0:b}".format(a))
n2 = len("{0:b}".format(x))
n3 = len("{0:b}".format(N))
if n2 > n:
n = n2
if n3 > n:
n = n3
# classical register with n+1 bits.
c = ClassicalRegister(n+1, name="cr")
# quantum registers
qa = QuantumRegister(n, name="qa") # a qubits
qb = QuantumRegister(n+1, name="qb") # result register
qN = QuantumRegister(n+1, name="qN") # N qubits
qNtemp = QuantumRegister(n, name="qNtemp") # temporary N qubits
qtemp = QuantumRegister(1, name="qtemp") # temporary qubit
qtempst = QuantumRegister(n, name="qtempst") # temporary register
qX = QuantumRegister(n, name="qX") # x register
qext = QuantumRegister(1, name="qext")
# if n = 1, no need of carry register
if n == 1:
qcar = 0
# quantum circuit involving the quantum registers and the classical register
mod_mult_circuit = QuantumCircuit(qa, qb, qN, qNtemp, qtemp, qtempst, qX, qext, c, name="mod_mult")
else:
qcar = QuantumRegister(n-1, name="qcar") # carry qubits
# quantum circuit involving the quantum register and the classical register
mod_mult_circuit = QuantumCircuit(qa, qb, qN, qcar, qNtemp, qtemp, qtempst, qX, qext, c, name="mod_mult")
# create the state containing 'a'
number_state(mod_mult_circuit, qa, a, n)
# create the state containing 'b'
number_state(mod_mult_circuit, qX, x, n)
# create the state containing 'N'
number_state(mod_mult_circuit, qN, N, n+1)
# create a temporary state containing 'N'
number_state(mod_mult_circuit, qNtemp, N, n)
mod_mult_circuit.x(qext[0]) # we set the control qubit to |1>
# controlled modular multiplication
cont_mod_mult_nbit(mod_mult_circuit, qa, qb, qN, qNtemp, qcar, qtemp[0], qtempst, qX, qext[0], N, n)
# measurements to see the result
for i in range(n+1):
mod_mult_circuit.measure(qb[i], c[i])
# compile and execute the quantum program in the backend
result = execute(mod_mult_circuit, backend=backend, shots=1024).result()
# show the results.
print(result)
print(result.get_data(mod_mult_circuit))
counts = result.get_counts(mod_mult_circuit)
plot_histogram(counts)
print("Backend:", backend.name())
print("Highest probability outcome: {}".format(int(max(counts, key = lambda x: counts[x]).replace(" ", ""), 2)))
```
We indeed obtain the expected result $0 \, (00)$.
## Modular exponentiation
Using the previous constructions, we can finally implement the exponentiation modulo $N$: $a^x \,mod \,N$. The value $a^x$ can be written as $a^x = a^{2^{0}x_0} · a^{2^{1}x_1} · ... · a^{2^{m-1}x_{m-1}}$. The modular exponentiation can be implemented by initially setting a register to $|1\rangle$ and then effecting $n$ modular multiplications by $a^{2^i}$ depending on the value of the qubit $|x_i\rangle$. Let's look into the operations carefully. If $x_i = 1$:
\begin{align}
|a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}}, 0 \rangle \;\rightarrow\; |a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}}, a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}} · a^{2^i}\rangle
\end{align}
otherwise, if $x_i = 0$:
\begin{align}
|a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}}, 0 \rangle \;\rightarrow\; |a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}}, a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}}\rangle \,.
\end{align}
In both cases the result can be written as $|a^{2^{0}x_{0} + ... + 2^{i-1}x_{i-1}}, a^{2^{0}x_{0} + ... + 2^{i}x_{i}}\rangle$. We must take care about the accumulation of intermediate data. To avoid this partial information generated we run backwards a controlled multiplication network with the value $a^{-2^i} \,mod\, N$. This quantity is precomputed classically, but $a$ and $N$ must be coprimes.
The total number of qubits needed is $7n + 3 + n_x$, where $n_x$ are the number of qubits of the value $x$.
### Implementation in the quantum simulator. Example 5.
As you may expect, we do not want to finish this Qiskit tutorial of basic arithmetic operations without the keystone of the Shor's factorization algorithm. The final example is the implementation of the modular exponentiation $a^x \; mod\, N$. This program can be executed in the quantum simulator.
This time we have to run backwards the controlled modular multiplication block, therefore we will need to define new modules such as running backwards the modular addition.
```python
# Use local qasm simulator
backend = Aer.get_backend('qasm_simulator')
# Use the IBM qasm simulator
# backend = IBMQ.get_backend('ibmq_qasm_simulator')
```
```python
def carry(circuit, q0, q1, q2, q3):
"carry module"
circuit.ccx(q1, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q0, q2, q3)
def carry_inv(circuit, q0, q1, q2, q3):
"carry module running backwards"
circuit.ccx(q0, q2, q3)
circuit.cx(q1, q2)
circuit.ccx(q1, q2, q3)
def summation(circuit, q0, q1, q2):
"summation module"
circuit.cx(q1, q2)
circuit.cx(q0, q2)
def summation_inv(circuit, q0, q1, q2):
"summation module running backwards"
circuit.cx(q0, q2)
circuit.cx(q1, q2)
# quantum plain addition algorithm for n-qubit numbers
def addition_nbit(circuit, qa, qb, qcar, n):
if n == 1:
circuit.ccx(qa[0], qb[0], qb[1])
circuit.cx(qa[0], qb[0])
else:
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
circuit.cx(qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
summation(circuit, qcar[i-1], qa[i], qb[i])
carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1])
summation(circuit, qcar[0], qa[1], qb[1])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
# quantum plain substraction algorithm for n-qubit numbers
def subs_nbit(circuit, qa, qb, qcar, n):
"same as the plain addition but running backwards"
if n == 1:
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qb[1])
else:
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
circuit.cx(qa[0], qb[0])
summation_inv(circuit, qcar[0], qa[1], qb[1])
for i in range(n-2):
carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1])
summation_inv(circuit, qcar[i+1], qa[i+2], qb[i+2])
circuit.cx(qa[n-1], qb[n-1])
carry_inv(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n])
for i in range(n-2, 0, -1):
carry_inv(circuit, qcar[i-1], qa[i], qb[i], qcar[i])
circuit.cx(qa[0], qb[0])
circuit.ccx(qa[0], qb[0], qcar[0])
def cond_toffoli(circuit, qcond, q1, q2, q3):
"conditional toffoli gate"
circuit.h(q3)
circuit.ccx(qcond, q2, q3)
circuit.tdg(q3)
circuit.ccx(qcond, q1, q3)
circuit.t(q3)
circuit.ccx(qcond, q2, q3)
circuit.tdg(q3)
circuit.ccx(qcond, q1, q3)
circuit.t(q3)
circuit.h(q3)
circuit.t(q2)
circuit.ccx(qcond, q1, q2)
circuit.t(q1)
circuit.tdg(q2)
circuit.ccx(qcond, q1, q2)
def cond_carry(circuit, q0, q1, q2, q3, qcond):
"conditional carry module"
cond_toffoli(circuit, qcond, q1, q2, q3)
circuit.ccx(qcond, q1, q2)
cond_toffoli(circuit, qcond, q0, q2, q3)
def cond_carry_inv(circuit, q0, q1, q2, q3, qcond):
"conditional carry module running backwards"
cond_toffoli(circuit, qcond, q0, q2, q3)
circuit.ccx(qcond, q1, q2)
cond_toffoli(circuit, qcond, q1, q2, q3)
def cond_summation(circuit, q0, q1, q2, qcond):
"conditional summation"
circuit.ccx(qcond, q1, q2)
circuit.ccx(qcond, q0, q2)
def cond_summation_inv(circuit, q0, q1, q2, qcond):
"conditional summation running backwards"
circuit.ccx(qcond, q0, q2)
circuit.ccx(qcond, q1, q2)
# quantum conditional plain addition algorithm for n-qubit numbers
def cond_addition_nbit(circuit, qa, qb, qcar, qcond, n):
if n == 1:
cond_toffoli(circuit, qcond, qa[0], qb[0], qb[1])
circuit.ccx(qcond, qa[0], qb[0])
else:
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
circuit.ccx(qcond, qa[0], qb[0])
for i in range(n-2):
cond_carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1], qcond)
cond_carry(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n], qcond)
circuit.ccx(qcond, qa[n-1], qb[n-1])
for i in range(n-1, 1, -1):
cond_summation(circuit, qcar[i-1], qa[i], qb[i], qcond)
cond_carry_inv(circuit, qcar[i-2], qa[i-1], qb[i-1], qcar[i-1], qcond)
cond_summation(circuit, qcar[0], qa[1], qb[1], qcond)
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
circuit.ccx(qcond, qa[0], qb[0])
# quantum conditional plain substraction algorithm for n-qubit numbers
def cond_subs_nbit(circuit, qa, qb, qcar, qcond, n):
"same as conditional plain addition but running backwards"
if n == 1:
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qb[1])
else:
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
circuit.ccx(qcond, qa[0], qb[0])
cond_summation_inv(circuit, qcar[0], qa[1], qb[1], qcond)
for i in range(n-2):
cond_carry(circuit, qcar[i], qa[i+1], qb[i+1], qcar[i+1], qcond)
cond_summation_inv(circuit, qcar[i+1], qa[i+2], qb[i+2], qcond)
circuit.ccx(qcond, qa[n-1], qb[n-1])
cond_carry_inv(circuit, qcar[n-2], qa[n-1], qb[n-1], qb[n], qcond)
for i in range(n-2, 0, -1):
cond_carry_inv(circuit, qcar[i-1], qa[i], qb[i], qcar[i], qcond)
circuit.ccx(qcond, qa[0], qb[0])
cond_toffoli(circuit, qcond, qa[0], qb[0], qcar[0])
# quantum modular addition algorithm for n-qubit numbers
def mod_addition_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, n):
addition_nbit(circuit, qa, qb, qcar, n)
subs_nbit(circuit, qN, qb, qcar, n)
circuit.x(qb[n])
circuit.cx(qb[n], qtemp)
circuit.x(qb[n])
cond_subs_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
addition_nbit(circuit, qN, qb, qcar, n)
cond_addition_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
subs_nbit(circuit, qa, qb, qcar, n)
circuit.cx(qb[n], qtemp)
addition_nbit(circuit, qa, qb, qcar, n)
# quantum modular substraction algorithm for n-qubit numbers
def mod_subs_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, n):
"same as modular addition but running backwards"
subs_nbit(circuit, qa, qb, qcar, n)
circuit.cx(qb[n], qtemp)
addition_nbit(circuit, qa, qb, qcar, n)
cond_subs_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
subs_nbit(circuit, qN, qb, qcar, n)
cond_addition_nbit(circuit, qNtemp, qN, qcar, qtemp, n)
circuit.x(qb[n])
circuit.cx(qb[n], qtemp)
circuit.x(qb[n])
addition_nbit(circuit, qN, qb, qcar, n)
subs_nbit(circuit, qa, qb, qcar, n)
# quantum controlled modular multiplication algorithm for n-qubit numbers
def cont_mod_mult_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, qtempst, qX, qext, N, n):
for i in range(n):
for j in range(n):
classical_mod = (2**(i+j))%N
cond_number_state(circuit, qtempst, classical_mod, qext, qa[i], qX[j], n)
mod_addition_nbit(circuit, qtempst, qb, qN, qNtemp, qcar, qtemp, n)
cond_number_state(circuit, qtempst, classical_mod, qext, qa[i], qX[j], n)
circuit.x(qext)
cond_addition_nbit(circuit, qX, qb, qcar, qext, n)
circuit.x(qext)
def cont_inv_mod_mult_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, qtempst, qX, qext, N, n):
"same as the controlled modular multiplication but running backwards"
circuit.x(qext)
cond_subs_nbit(circuit, qX, qb, qcar, qext, n)
circuit.x(qext)
for i in range(n):
for j in range(n):
classical_mod = (2**(i+j))%N
cond_number_state(circuit, qtempst, classical_mod, qext, qa[i], qX[j], n)
mod_subs_nbit(circuit, qtempst, qb, qN, qNtemp, qcar, qtemp, n)
cond_number_state(circuit, qtempst, classical_mod, qext, qa[i], qX[j], n)
# quantum modular exponentiation algorithm for n-qubit numbers
def mod_exp_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, qtempst, q1, qX, N, a, n):
for k in range(len(qX)):
clas_value = (a**(2**(k)))%N
if k % 2 == 0:
number_state(circuit, qa, clas_value, n)
cont_mod_mult_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, qtempst, q1, qX[k], N, n)
number_state(circuit, qa, clas_value, n)
clas_value = modinv(a**(2**(k)), N)
number_state(circuit, qa, clas_value, n)
cont_inv_mod_mult_nbit(circuit, qa, q1, qN, qNtemp, qcar, qtemp, qtempst, qb, qX[k], N, n)
number_state(circuit, qa, clas_value, n)
else:
number_state(circuit, qa, clas_value, n)
cont_mod_mult_nbit(circuit, qa, q1, qN, qNtemp, qcar, qtemp, qtempst, qb, qX[k], N, n)
number_state(circuit, qa, clas_value, n)
clas_value = modinv(a**(2**(k)), N)
number_state(circuit, qa, clas_value, n)
cont_inv_mod_mult_nbit(circuit, qa, qb, qN, qNtemp, qcar, qtemp, qtempst, q1, qX[k], N, n)
number_state(circuit, qa, clas_value, n)
```
We define the function that creates the different states. This time we also define another two functions, which compute the value of the modular multiplicative inverse $a^{-1} \, mod \, m$.
```python
# n-qubit number input state
def number_state(circuit, q, x, n):
# integer to binary
x = "{0:b}".format(x)
x = x.zfill(n)
# creating the state
for i in range(n):
if int(x[n-1-i]) == 1:
circuit.x(q[i])
# n-qubit number input state, controlled by 2 control qubits
def cond_number_state(circuit, q, x, ext, control1, control2, n):
# integer to binary
x = "{0:b}".format(x)
x = x.zfill(n)
# creating the state
for i in range(n):
if int(x[n-1-i]) == 1:
cond_toffoli(circuit, ext, control1, control2, q[i])
# efficient algorithm for computing the modular multiplicative inverse a^-1 mod m
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
```
Let's implement now the modular exponentiation of two $n$-qubit numbers $a$ and $N$ ($a^x \, mod \, N$), for instance $1^1 \,mod\, 1$ (in order to avoid long computing times, but you can try with two-qubit numbers), that should return $0 \, (00)$:
```python
# we define the values, a and N must be coprimes
a = 1
x = 1
N = 1
# computing number of qubits n needed
n = len("{0:b}".format(a))
n2 = len("{0:b}".format(x))
n3 = len("{0:b}".format(N))
if n3 > n:
n = n3
# classical register with n+1 bits.
c = ClassicalRegister(n+1, name="cr")
# quantum registers
qa = QuantumRegister(n, name="qa") # a qubits
qb = QuantumRegister(n+1, name="qb") # initial state |0>
qN = QuantumRegister(n+1, name="qN") # N qubits
qNtemp = QuantumRegister(n, name="qNtemp") # temporary N qubits
qtemp = QuantumRegister(1, name="qtemp") # temporary qubit
qtempst = QuantumRegister(n, name="qtempst") # temporary register
q1 = QuantumRegister(n+1, name="q1") # initial state |1>
qX = QuantumRegister(n2, name="qX") # x register
# if n = 1, no need of carry register
if n == 1:
qcar = 0
# quantum circuit involving the quantum registers and the classical register
mod_exp_circuit = QuantumCircuit(qa, qb, qN, qNtemp, qtemp, qtempst, q1, qX, c, name="mod_exp")
else:
qcar =QuantumCircuit(n-1, name="qcar") # carry qubits
# quantum circuit involving the quantum registers and the classical register
mod_exp_circuit = QuantumCircuit(qa, qb, qN, qcar, qNtemp, qtemp, qtempst, q1, qX, c, name="mod_exp")
# create the initial state |1>. If N = 1, initial state is |0>
if N != 1:
number_state(mod_exp_circuit, q1, 1, 1)
# create the state containing 'x'
number_state(mod_exp_circuit, qX, x, n2)
# create the state containing 'N'
number_state(mod_exp_circuit, qN, N, n+1)
# create a temporary state containing 'N'
number_state(mod_exp_circuit, qNtemp, N, n)
# modular exponentiation
mod_exp_nbit(mod_exp_circuit, qa, qb, qN, qNtemp, qcar, qtemp[0], qtempst, q1, qX, N, a, n)
# measurements to see the result, the result would be in one of those registers, q1 or qb
if n2 % 2 == 0:
for i in range(n+1):
mod_exp_circuit.measure(q1[i], c[i])
else:
for i in range(n+1):
mod_exp_circuit.measure(qb[i], c[i])
# compile and execute the quantum program in the backend
result = execute(mod_exp_circuit, backend=backend, shots=1024).result()
# show the results.
print(result)
print(result.get_data(mod_exp_circuit))
counts = result.get_counts(mod_exp_circuit)
plot_histogram(counts)
print("Backend:", backend.name())
print("Highest probability outcome: {}".format(int(max(counts, key = lambda x: counts[x]).replace(" ", ""), 2)))
```
We indeed have obtained the expected result $0 \, (00)$.
|
11f8cb48432f80d3eba40e4cee40d5a779391931
| 104,271 |
ipynb
|
Jupyter Notebook
|
awards/teach_me_qiskit_2018/elementary_arithmetic_operations/elementary_arithmetic_operations.ipynb
|
Aniruddha120/qiskit-community-tutorials
|
3212a140f3753c2aa0f53691cee96952c50dc898
|
[
"Apache-2.0"
] | null | null | null |
awards/teach_me_qiskit_2018/elementary_arithmetic_operations/elementary_arithmetic_operations.ipynb
|
Aniruddha120/qiskit-community-tutorials
|
3212a140f3753c2aa0f53691cee96952c50dc898
|
[
"Apache-2.0"
] | null | null | null |
awards/teach_me_qiskit_2018/elementary_arithmetic_operations/elementary_arithmetic_operations.ipynb
|
Aniruddha120/qiskit-community-tutorials
|
3212a140f3753c2aa0f53691cee96952c50dc898
|
[
"Apache-2.0"
] | null | null | null | 65.869236 | 9,568 | 0.700271 | true | 15,396 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.882428 | 0.828939 | 0.731479 |
__label__eng_Latn
| 0.841978 | 0.537802 |
# Homework 01
Congratulations! You've managed to open this Juypter notebook on either Github or on your local machine.
Help for Jupyter Notebooks can be found in the Jupyter Lab by going to `Help > Notebook Reference`. You can also go to the [Notebook basics](https://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/Notebook%20Basics.html) documentation.
The basics are that you can write *markdown* cells that have math, words, and other markdown (like Rmarkdown!)... and *code* cells that, well, have code and display the output below (as in Rmarkdown). Switch *mode* to change between the two (sidebar will change colors).
If you want to export your document to .pdf (you don't have to!), you an go to `File > Export Notebook As... > PDF`. To do this, I had to install the *Inkscape* application. (I did this with the [Chocolatey](https://chocolatey.org/) package manager on Windows. You can probably do this with [Homewbrew](https://brew.sh/) on a Mac)
# Instructions
Your homework is to generate 3d plots of a quadratic function in $\mathbb R^2$ and to examine the relationship between eigenvalues of the Hessian matrices, shapes of the functions, and the (possible) existence of minima and maxima.
You can find the documentation for `Plots.jl` at <http://docs.juliaplots.org/latest/>
For the following functions
\begin{align}
f^a(x,y) &= -x^2 - y^2 \\
f^b(x,y) &= -x^2 + xy - y^2 \\
f^c(x,y) &= -x^2 + 2xy - y^2 \\
f^d(x,y) &= -x^2 + 3xy - y^2
\end{align}
1. Write the Hessian matrix in \LaTeX
2. Compute the determinants by hand. Are the Hessians PD, PSD, NSD, or ND? What does this imply about convexity / concavity of the function? What about the existence of a minimum or maximum over the domain $\mathbb R^2$?
3. `@assert` statements are wonderful to include in your functions because they make sure that the inputs meet certain assumptions... such as that the length of two vectors is the same. Using them regularly can help you avoid errors
Use an `@assert` statement to check that your determinants computed by hand are correct. See what Julia does when you put the wrong determinanmt in. See [`LinearAlgebra.det`](https://docs.julialang.org/en/v1/stdlib/LinearAlgebra/index.html#LinearAlgebra.det) docs
```julia
@assert det(Ha) == ???
```
4. Compute the eigenvalues of your matrix using [`LinearAlgebra.eigvals`](https://docs.julialang.org/en/v1/stdlib/LinearAlgebra/index.html#LinearAlgebra.eigvals)
5. Create a function in Julia to compute $f^a, f^b, f^c, f^d$ as above. Plot them!
To submit your homework, commit this notebook to your personal homework repo, push it, and issue a pull request to turn it in to me.
# Top of your Julia Code
```julia
# Load libraries first of all
using LinearAlgebra # load LinearAlgebra standard library
using Plots # loads the Plots module
```
┌ Info: Precompiling Plots [91a5bcdd-55d7-5caf-9e0b-520d859cae80]
└ @ Base loading.jl:1242
```julia
# tells Plots to use the GR() backend.
# Note: for interactive 3d plots, you can also install
# PyPlot or PlotlyJS and try using those. You might
# need to use Atom or the REPL to get the interactivity
gr()
```
Plots.GRBackend()
```julia
# define a range we can iterate over
xrange = -3.0 : 0.1 : 3.0
```
-3.0:0.1:3.0
# Question 1
$$
f^a(x,y) = -x^2 - y^2
$$
## Part 1a
Hessian is $H^a = \begin{bmatrix} -2 & 0 \\ 0 & -2 \end{bmatrix}$
## Part 1b
The determinant is $|H^a| = 4$, so the matrix is negative definite, and it has a global maximum at [0,0]
```julia
# define the Hessian for H^a.
# Note:
# Julia in Jupyter Notebooks and Atom can handle latexy characters
# I got fancy by typing H\^a [tab] and getting a superscript
# We could have also gotten greek letters with \beta [tab]
# or (very important) approximately equals with \approx [tab]
Hᵃ = [-2 0 ; 0 -2]
```
2×2 Array{Int64,2}:
-2 0
0 -2
## Part 1c
```julia
@assert det(Hᵃ) == 4
```
## Part 1d
```julia
eigvals(Hᵃ)
```
2-element Array{Float64,1}:
-2.0
-2.0
```julia
# functions to plot
fa(x,y) = -x^2 - y^2
```
fa (generic function with 1 method)
```julia
plot(xrange, xrange, fa, st = :surface, title = "\$a=0\$")
```
# Question 2
$$
f^b(x,y) = -x^2 +xy - y^2
$$
## Part 2a
Hessian is $H^b = \begin{bmatrix} -2 & 1 \\ 1 & -2 \end{bmatrix}$
## Part 2b
Determinant is $|H^b| = 2$, so the matrix is negative definite, and it has a global maximum at [0,0]
```julia
Hᵇ= [-2 1; 1 -2]
```
2×2 Array{Int64,2}:
-2 1
1 -2
```julia
@assert det(Hᵇ)== 3
```
```julia
eigvals(Hᵇ)
```
2-element Array{Float64,1}:
-3.0
-1.0
```julia
# functions to plot
fb(x,y) = -x^2 + x*y - y^2
```
fb (generic function with 1 method)
```julia
plot(xrange, xrange, fb, st = :surface, title = "\$a=0\$")
```
plot(xrange, xrange, fa, st = :surface, title = "\$a=0\$")
# Question 3
$$
f^c(x,y) = -x^2 +2xy -y^2
$$
## Part 3a
Hessian is $H^c = \begin{bmatrix} -2 & 2 \\ 2 & -2 \end{bmatrix}$
## Part 3b
Determinant is $|H^c| = 0$, so the matrix is negative semi definite, and it has a non unique global maximum at [0,0]
```julia
Hᶜ= [-2 2; 2 -2]
```
2×2 Array{Int64,2}:
-2 2
2 -2
```julia
@assert det(Hᶜ)== 0
```
```julia
eigvals(Hᶜ)
```
2-element Array{Float64,1}:
-4.0
0.0
```julia
# functions to plot
fᶜ(x,y) = -x^2 + 2*x*y - y^2
```
fᶜ (generic function with 1 method)
```julia
plot(xrange, xrange, fᶜ, st = :surface, title = "\$a=0\$")
```
# Question 4
$$
f^c(x,y) = -x^2 +3xy- y^2
$$
## Part 4a
Hessian is $H^d = \begin{bmatrix} -2 & 3 \\ 3 & -2 \end{bmatrix}$
## Part 4b
Determinant is $|H^d| = -5$, so the matrix is indefinite, and $f^d$ has no global maximum.
```julia
Hᵈ= [2 3; 3 2]
```
2×2 Array{Int64,2}:
2 3
3 2
```julia
@assert det(Hᵈ) == -5
```
```julia
eigvals(Hᵈ)
```
2-element Array{Float64,1}:
-0.9999999999999998
5.0
```julia
fᵈ(x,y) = -x^2 + 3*x*y - y^2
```
fᵈ (generic function with 1 method)
```julia
plot(xrange, xrange, fᵈ, st = :surface, title = "\$a=0\$")
```
```julia
```
|
069f3acc64e8ebf5d4894d3de3ba1b21b2dae102
| 576,915 |
ipynb
|
Jupyter Notebook
|
Hwk01.ipynb
|
jhbuckner/Hwk01
|
8077ad222921b4825c7c99739fe72587c75ee9c3
|
[
"MIT"
] | null | null | null |
Hwk01.ipynb
|
jhbuckner/Hwk01
|
8077ad222921b4825c7c99739fe72587c75ee9c3
|
[
"MIT"
] | null | null | null |
Hwk01.ipynb
|
jhbuckner/Hwk01
|
8077ad222921b4825c7c99739fe72587c75ee9c3
|
[
"MIT"
] | null | null | null | 80.891054 | 339 | 0.779838 | true | 2,181 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.851953 | 0.94079 | 0.801508 |
__label__eng_Latn
| 0.961096 | 0.700505 |
```python
# A geometric algebra for the unit sphere in R^3
# as a submanifold of R^3 with spherical coordintes.
# Make SymPy available to this program:
import sympy
from sympy import *
# Make GAlgebra available to this program:
from galgebra.ga import *
from galgebra.mv import *
from galgebra.printer import Fmt, GaPrinter, Format
# Fmt: sets the way that a multivector's basis expansion is output.
# GaPrinter: makes GA output a little more readable.
# Format: turns on latex printer.
from galgebra.gprinter import gFormat, gprint
gFormat()
```
```python
# A geometric algebra for the unit sphere in R^3
# as a submanifold of R^3 with spherical coordintes.
# sp3: Base manifold
sp3coords = (r, phi, theta) = symbols('r phi theta', real=True)
sp3 = Ga('e', g=None, coords=sp3coords, \
X=[r*sin(phi)*cos(theta), r*sin(phi)*sin(theta), r*cos(phi)], norm=True)
(er, ephi, etheta) = sp3.mv()
# sp2: Submanifold
sp2coords = (p,t) = symbols('phi theta', real=True) # they output as Greek phi and theta
# Parameterize the unit sphere using the spherical coordinates of sp3:
sp2param = [1, p, t]
# Map the sp3 coordinates of the sphere to its sp2 coordinates:
sp2 = sp3.sm(sp2param, sp2coords, norm=True)
(ep, et) = sp2.mv()
(rp, rt) = sp2.mvr()
# Derivatives
grad = sp2.grad
from galgebra.dop import *
pdph = Pdop(p)
pdth = Pdop(t)
```
```python
gprint(grad)
```
```python
```
|
27280692b3525be9efce2283b5f909382be79565
| 2,629 |
ipynb
|
Jupyter Notebook
|
python/GeometryAG/gaprimer/sp2sp3.ipynb
|
karng87/nasm_game
|
a97fdb09459efffc561d2122058c348c93f1dc87
|
[
"MIT"
] | null | null | null |
python/GeometryAG/gaprimer/sp2sp3.ipynb
|
karng87/nasm_game
|
a97fdb09459efffc561d2122058c348c93f1dc87
|
[
"MIT"
] | null | null | null |
python/GeometryAG/gaprimer/sp2sp3.ipynb
|
karng87/nasm_game
|
a97fdb09459efffc561d2122058c348c93f1dc87
|
[
"MIT"
] | null | null | null | 26.826531 | 97 | 0.550399 | true | 430 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.907312 | 0.785309 | 0.71252 |
__label__eng_Latn
| 0.880221 | 0.493754 |
# Free Body Diagram for Rigid Bodies
Renato Naville Watanabe
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
```
## Equivalent systems
A set of forces and moments is considered equivalent if its resultant force and sum of the moments computed relative to a given point are the same. Normally, we want to reduce all the forces and moments being applied to a body into a single force and a single moment.
We have done this with particles for the resultant force. The resultant force is simply the sum of all the forces being applied to the body.
\begin{equation}
\vec{\bf{F}} = \sum\limits_{i=1}^n \vec{\bf{F_i}}
\end{equation}
where $\vec{\bf{F_i}}$ is each force applied to the body.
Similarly, the total moment applied to the body relative to a point O is:
\begin{equation}
\vec{\bf{M_O}} = \sum\limits_{i}\vec{\bf{r_{i/O}}} \times \vec{\bf{F_i}}
\end{equation}
where $\vec{\bf{r_{i/O}}} $ is the vector from the point O to the point where the force $\vec{\bf{F_i}}$ is being applied.
But where the resultant force should be applied in the body? If the resultant force were applied to any point different from the point O, it would produce an additional moment to the body relative to point O. So, the resultant force must be applied to the point O.
So, any set of forces can be reduced to a moment relative to a chosen point O and a resultant force applied to the point O.
To compute the resultant force and moment relative to another point O', the new moment is:
\begin{equation}
\vec{\bf{M_{O'}}} = \vec{\bf{M_O}} + \vec{\bf{r_{O'/O}}} \times \vec{\bf{F}}
\end{equation}
And the resultant force is the same.
It is worth to note that if the resultant force $\vec{\bf{F}}$ is zero, than the moment is the same relative to any point.
<figure></figure>
### Steps to draw a free-body diagram
The steps to draw the free-body diagram of a body is very similar to the case of particles.
1 - Draw separately each object considered in the problem. How you separate depends on what questions you want to answer.
2 - Identify the forces acting on each object. If you are analyzing more than one object, remember the third Newton law (action and reaction), and identify where the reaction of a force is being applied. Whenever a movement of translation of the body is constrained, a force at the direction of the constraint must exist.
3 - Identify the moments acting on each object. In the case you are analyzing more than one object, you must consider the action and reaction law (third Newton-Euler law). Whenever a movement of rotation of the body is constrained, a moment at the direction of the constraint must exist.
4 - Draw all the identified forces, representing them as vectors. The vectors should be represented with the origin in the object.
5 - Draw all the identified moments, representing them as vectors. In planar movements, the moments we be orthogonal to the considered plane. In these cases, normally the moment vectors are represented as curved arrows.
6 - If necessary, you should represent the reference frame (or references frames in case you use more than one reference frame) in the free-body diagram.
7 - After this, you can solve the problem using the First and Second Newton-Euler Laws (see, e.g, [Newton-Euler Laws](newton_euler_equations.ipynb)) to find the motion of the body.
\begin{equation}
\vec{\bf{F}} = m\vec{\bf{a_{cm}}} = m\frac{d^2\vec{\bf{r_{cm}}}}{dt^2}
\end{equation}
\begin{equation}
\vec{\bf{M_O}} = I_{zz}^{cm}\alpha + m \vec{\bf{r_{cm/O}}} \times \vec{\bf{a_{cm}}}=I_{zz}^{cm}\frac{d^2\theta}{dt^2} + m \vec{\bf{r_{cm/O}}} \times \frac{d^2\vec{\bf{r_{cm}}}}{dt^2}
\end{equation}
Below, we will see some examples of how to draw the free-body diagram and obtain the equation of motion.
## 1) Horizontal fixed bar
The first example is an example of statics. The bar has no velocity and no acceleration.
<figure>
The free-body diagram of the bar is depicted below. At the point where the bar is connected to the wall, there is a force $\vec{\bf{F_1}}$ constraining the translation movement of the point O and a moment $\vec{\bf{M}}$ constraining the rotation of the bar.
<figure>
The resultant force being applied to the bar is:
\begin{equation}
\vec{\bf{F}} = -mg\hat{\bf{j}} + \vec{\bf{F_1}}
\end{equation}
And the total moment in the z direction around the point O is:
\begin{equation}
\vec{\bf{M_O}} = \vec{\bf{r_{C/O}}}\times-mg\hat{\bf{j}} + \vec{\bf{M}}
\end{equation}
The vector from the point O to the point C is given by $\vec{\bf{r_{C/O}}} =\frac{l}{2}\hat{\bf{i}}$.
As the bar is fixed, all the accelerations are zero. So we can find the forces and the moment at the constraint.
\begin{equation}
\vec{\bf{F}} = \vec{\bf{0}} \rightarrow -mg\hat{\bf{j}} + \vec{\bf{F_1}} = \vec{\bf{0}} \rightarrow \vec{\bf{F_1}} = mg\hat{\bf{j}}
\end{equation}
\begin{equation}
\vec{\bf{M_O}} = \vec{\bf{0}} \rightarrow \vec{\bf{r_{C/O}}}\times-mg\hat{\bf{j}} + \vec{\bf{M}} = \vec{\bf{0}} \rightarrow \frac{l}{2}\hat{\bf{i}}\times-mg\hat{\bf{j}} + \vec{\bf{M}} = \vec{\bf{0}} \rightarrow -\frac{mgl}{2}\hat{\bf{k}} + \vec{\bf{M}} = \vec{\bf{0}} \rightarrow \vec{\bf{M}} = \frac{mgl}{2}\hat{\bf{k}}
\end{equation}
## 2) Rotating ball with drag force
A basketball has a mass $m = 0.63$ kg and radius equal $R = 12$ cm. A basketball player shots the ball from the free-throw line (4.6 m from the basket) with a speed of 9.5 m/s, angle of 51 degrees with the court, height of 2 m and angular velocity of 42 rad/s. At the ball is acting a drag force proportional to the modulus of the ball velocity in the opposite direction of the velocity and a drag moment, proportional and in the opposite direction of the angular velocity of the ball. Consider the moment of inertia of the ball as $I_{zz}^{cm} = \frac{2mR^2}{3}$.
Below is depicted the free-body diagram of the ball.
<figure>
The resultant force being applied at the ball is:
\begin{equation}
\vec{\bf{F}} = -mg\hat{\bf{j}} - b_l\vec{\bf{v}} + b_m\omega\hat{\bf{k}}\times \vec{\bf{v}} = -mg\hat{\bf{j}} - b_l\frac{d\vec{\bf{r}}}{dt} = -mg\hat{\bf{j}} - b_l\frac{d\vec{\bf{r}}}{dt}
\end{equation}
\begin{equation}
\vec{\bf{M_C}} = - b_r\omega\hat{\bf{k}}=- b_r\frac{d\theta}{dt}\hat{\bf{k}}
\end{equation}
\begin{equation}
\frac{d\vec{\bf{H_C}}}{dt} = I_{zz}^{C}\frac{d^2\theta}{dt^2}\hat{\bf{k}}
\end{equation}
So, by the second Newton-Euler law:
\begin{equation}
\frac{d\vec{\bf{H_C}}}{dt}=\vec{\bf{M_C}} \rightarrow I_{zz}^{C}\frac{d^2\theta}{dt^2} = - b_r\frac{d\theta}{dt}
\end{equation}
and by the first Newton-Euler law (for a revision on Newton-Euler laws, [see this notebook](newton_euler_equations.ipynb)):
\begin{equation}
m\frac{d\vec{\bf{r}}}{dt}=\vec{\bf{F}} \rightarrow \frac{d\vec{\bf{r}}}{dt} = -g\hat{\bf{j}} - \frac{b_l}{m}\frac{d\vec{\bf{r}}}{dt}
\end{equation}
So, we can split the differential equations above in three equations:
\begin{equation}
\frac{d^2\theta}{dt^2} = - \frac{b_r}{I_{zz}^{C}}\frac{d\theta}{dt}
\end{equation}
\begin{equation}
\frac{d^2x}{dt^2} = - \frac{b_l}{m}\frac{dx}{dt}
\end{equation}
\begin{equation}
\frac{d^2y}{dt^2} = -g - \frac{b_l}{m}\frac{dy}{dt}
\end{equation}
To solve these equations numerically, we can split each of these equations in first-order equations and the use a numerical method to integrate them. The first-order equations we be written in a matrix form, considering the moment of inertia of the ball as $I_{zz}^{C}=\frac{2mR^2}{3}$:
\begin{equation}
\left[\begin{array}{c}\frac{d\omega}{dt}\\\frac{dv_x}{dt}\\\frac{dv_y}{dt}\\\frac{d\theta}{dt}\\\frac{dx}{dt}\\\frac{dy}{dt} \end{array}\right] = \left[\begin{array}{c}- \frac{3b_r}{2mR^2}\omega\\- \frac{b_l}{m}v_x\\-g - \frac{b_l}{m}v_y\\\omega\\v_x\\v_y\end{array}\right]
\end{equation}
Below, the equations were solved numerically by using the Euler method (for a revision on numerical methods to solve ordinary differential equations, [see this notebook](OrdinaryDifferentialEquation.ipynb)).
```python
m = 0.63
R = 0.12
I = 2.0/3*m*R**2
bl = 0.5
br = 0.001
g = 9.81
x0 = 0
y0 = 2
v0 = 9.5
angle = 51*np.pi/180.0
vx0 = v0*np.cos(angle)
vy0 = v0*np.sin(angle)
dt = 0.001
t = np.arange(0, 2.1, dt)
x = x0
y = y0
vx = vx0
vy = vy0
omega = 42
theta = 0
r = np.array([x,y])
ballAngle = np.array([theta])
state = np.array([omega, vx, vy, theta, x, y])
while state[4]<=4.6:
dstatedt = np.array([-br/I*state[0],-bl/m*state[1], -g-bl/m*state[2],state[0], state[1], state[2] ])
state = state + dt * dstatedt
r = np.vstack((r, [state[4], state[5]]))
ballAngle = np.vstack((ballAngle, [state[3]]))
plt.figure()
plt.plot(r[0:-1:50,0], r[0:-1:50,1], 'o', color = np.array([1, 0.6,0]), markerSize =10)
plt.plot(np.array([4, 4.45]), np.array([3.05, 3.05]))
for i in range(len(r[0:-1:50,0])):
plt.plot(r[i*50,0]+np.array([-0.05*(np.cos(ballAngle[i*50])-np.sin(ballAngle[i*50])), 0.05*(np.cos(ballAngle[i*50])-np.sin(ballAngle[i*50]))]),
r[i*50,1] + np.array([-0.05*(np.sin(ballAngle[i*50])+np.cos(ballAngle[i*50])), 0.05*(np.sin(ballAngle[i*50])+np.cos(ballAngle[i*50]))]),'k')
plt.ylim((0,4.5))
plt.show()
print(state[0])
```
<IPython.core.display.Javascript object>
34.5034544615
Above is the trajectory of the ball until it reaches the basket (height of 3.05 m, marked with a blue line).
<a id='pendulum'></a>
## 3) Pendulum
Now, we will analyze a pendulum. It consists of a bar with its upper part linked to a hinge.
<figure>
Below is the free-body diagram of the bar. On the center of mass of the bar is acting the gravitational force. On the point O, where there is a hinge, a force $\vec{\bf{F_1}}$ restrains the point to translate. As the hinge does not constrains a rotational movement of the bar, there is no moment applied by the hinge.
<figure>
To find the equation of motion of the bar we can use the second Newton-Euler law. So, we must find the sum of the moments and the resultant force being applied to the bar. The sum of moments could be computed relative to any point, but if we choose the fixed point O, it is easier because we can ignore the force $\vec{\bf{F_1}}$.
The moment around the fixed point is:
\begin{equation}
\vec{\bf{M_O}} = \vec{\bf{r_{cm/O}}} \times (-mg\hat{\bf{j}})
\end{equation}
The resultant force applied in the bar is:
\begin{equation}
\vec{\bf{F}} = -mg\hat{\bf{j}} + \vec{\bf{F_1}}
\end{equation}
The angular momentum derivative of the bar around point O is:
\begin{equation}
\frac{d\vec{\bf{H_O}}}{dt} = I_{zz}^{cm} \frac{d^2\theta}{dt^2} \hat{\bf{k}} + m \vec{\bf{r_{cm/O}}} \times \vec{\bf{a_{cm}}}
\end{equation}
The vector from point O to the center of mass is:
\begin{equation}
\vec{\bf{r_{cm/O}}} = \frac{l}{2}\sin{\theta}\hat{\bf{i}}-\frac{l}{2}\cos{\theta}\hat{\bf{j}}
\end{equation}
The position of the center of mass is, considering the point O as the origin, equal to $\vec{\bf{r_{cm/O}}}$. So, the center of mass acceleration is obtained by deriving it twice.
\begin{equation}
\vec{\bf{v_{cm}}} = \frac{\vec{\bf{r_{cm/O}}}}{dt} = \frac{l}{2}(\cos{\theta}\hat{\bf{i}}+\sin{\theta}\hat{\bf{j}})\frac{d\theta}{dt} \rightarrow \vec{\bf{a_{cm}}} = \frac{l}{2}(-\sin{\theta}\hat{\bf{i}}+\cos{\theta}\hat{\bf{j}})\left(\frac{d\theta}{dt}\right)^2 + \frac{l}{2}(\cos{\theta}\hat{\bf{i}}+\sin{\theta}\hat{\bf{j}})\frac{d^2\theta}{dt^2}
\end{equation}
So, the moment around the point O:
\begin{equation}
\vec{\bf{M_O}} = \left(\frac{l}{2}\sin{\theta}\hat{\bf{i}}-\frac{l}{2}\cos{\theta}\hat{\bf{j}}\right) \times (-mg\hat{\bf{j}}) = \frac{-mgl}{2}\sin{\theta}\hat{\bf{k}}
\end{equation}
And the derivative of the angular momentum is:
\begin{equation}
\frac{d\vec{\bf{H_O}}}{dt} = I_{zz}^{cm} \frac{d^2\theta}{dt^2}\hat{\bf{k}} + m \frac{l}{2}(\sin{\theta}\hat{\bf{i}}-\cos{\theta}\hat{\bf{j}}) \times \left[ \frac{l}{2}(-\sin{\theta}\hat{\bf{i}}+\cos{\theta}\hat{\bf{j}})\left(\frac{d\theta}{dt}\right)^2 + \frac{l}{2}(\cos{\theta}\hat{\bf{i}}+\sin{\theta}\hat{\bf{j}})\frac{d^2\theta}{dt^2} \right] = I_{zz}^{cm} \frac{d^2\theta}{dt^2}\hat{\bf{k}} + m \frac{l^2}{4}\frac{d^2\theta}{dt^2} \hat{\bf{k}} =\left(I_{zz}^{cm} + \frac{ml^2}{4}\right)\frac{d^2\theta}{dt^2} \hat{\bf{k}}
\end{equation}
Now, by using the Newton-Euler laws, we can obtain the differential equation that describes the bar angle along time:
\begin{equation}
\frac{d\vec{\bf{H_O}}}{dt} = \vec{\bf{M_O}} \rightarrow \frac{-mgl}{2}\sin{\theta} = \left(I_{zz}^{cm} + \frac{ml^2}{4}\right)\frac{d^2\theta}{dt^2} \rightarrow \frac{d^2\theta}{dt^2} = \frac{-2mgl}{\left(4I_{zz}^{cm} + ml^2\right)}\sin{\theta}
\end{equation}
The moment of inertia of the bar relative to its center of mass is $I_{zz}^{cm} = \frac{ml^2}{12}$ (for a revision on moment of inertia see [this notebook](CenterOfMassAndMomentOfInertia.ipynb)). So, the equation of motion of the pendulum is:
\begin{equation}
\frac{d^2\theta}{dt^2} = \frac{-3g}{2l}\sin{\theta}
\end{equation}
## 4) Inverted Pendulum
This example we analyze the inverted pendulum. It consists of a bar with its lowest extremity linked to a hinge.
<figure>
The free-body diagram of the bar is depicted below. At the point where the bar is linked to the hinge, a force $\vec{\bf{F_1}}$ acts at the bar due to the restraint of the translation imposed to the point O by the hinge. Additionally, the gravitational force acts at the center of mass of the bar.
<figure>
Similarly to pendulum, in this case we can find the equation of motion of the bar by using the second Newton-Euler law. To do that we must find the sum of the moments and the resultant force being applied to the bar. The sum of moments could be computed relative to any point, but if we choose the fixed point O, it is easier because we can ignore the force $\vec{\bf{F_1}}$.
The moment around the fixed point is:
\begin{equation}
\vec{\bf{M_O}} = \vec{\bf{r_{cm/O}}} \times (-mg\hat{\bf{j}})
\end{equation}
The resultant force applied in the bar is:
\begin{equation}
\vec{\bf{F}} = -mg\hat{\bf{j}} + \vec{\bf{F_1}}
\end{equation}
The angular momentum derivative of the bar around point O is:
\begin{equation}
\frac{d\vec{\bf{H_O}}}{dt} = I_{zz}^{cm} \frac{d^2\theta}{dt^2} \hat{\bf{k}} + m \vec{\bf{r_{cm/O}}} \times \vec{\bf{a_{cm}}}
\end{equation}
Until this part, the equations are exactly the same of the pendulum example. The difference is in the kinematics of the bar. Now,the vector from point O to the center of mass is:
\begin{equation}
\vec{\bf{r_{cm/O}}} = -\frac{l}{2}\sin{\theta}\hat{\bf{i}}+\frac{l}{2}\cos{\theta}\hat{\bf{j}}
\end{equation}
The position of the center of mass of the bar is equal to the vector $\vec{\bf{r_{cm/O}}}$, since the point O is with velocity zero relative to the global reference frame. So the center of mass acceleration can be obtained by deriving this vector twice:
\begin{equation}
\vec{\bf{v_{cm}}} = \frac{\vec{\bf{r_{cm/O}}}}{dt} = -\frac{l}{2}(\cos{\theta}\hat{\bf{i}}+\sin{\theta}\hat{\bf{j}})\frac{d\theta}{dt} \rightarrow \vec{\bf{a_{cm}}} = \frac{l}{2}(\sin{\theta}\hat{\bf{i}}-\cos{\theta}\hat{\bf{j}})\left(\frac{d\theta}{dt}\right)^2 - \frac{l}{2}(\cos{\theta}\hat{\bf{i}}+\sin{\theta}\hat{\bf{j}})\frac{d^2\theta}{dt^2}
\end{equation}
So, the moment around the point O is:
\begin{equation}
\vec{\bf{M_O}} = \left(-\frac{l}{2}\sin{\theta}\hat{\bf{i}}+\frac{l}{2}\cos{\theta}\hat{\bf{j}}\right) \times (-mg\hat{\bf{j}}) = \frac{mgl}{2}\sin{\theta} \hat{\bf{k}}
\end{equation}
And the derivative of the angular momentum is:
\begin{equation}
\frac{d\vec{\bf{H_O}}}{dt} = I_{zz}^{cm} \frac{d^2\theta}{dt^2} \hat{\bf{k}} + m \left(-\frac{l}{2}\sin{\theta}\hat{\bf{i}}+\frac{l}{2}\cos{\theta}\hat{\bf{j}}\right) \times \left[\frac{l}{2}(\sin{\theta}\hat{\bf{i}}-\cos{\theta}\hat{\bf{j}})\left(\frac{d\theta}{dt}\right)^2 - \frac{l}{2}(\cos{\theta}\hat{\bf{i}}+\sin{\theta}\hat{\bf{j}})\frac{d^2\theta}{dt^2}\right] = I_{zz}^{cm} \frac{d^2\theta}{dt^2} \hat{\bf{k}} + m \frac{l^2}{4}\frac{d^2\theta}{dt^2}\hat{\bf{k}} = \left(I_{zz}^{cm} + \frac{ml^2}{4}\right)\frac{d^2\theta}{dt^2}\hat{\bf{k}}
\end{equation}
By using the Newton-Euler laws, we can find the equation of motion of the bar:
\begin{equation}
\frac{d\vec{\bf{H_O}}}{dt} = \vec{\bf{M_O}} \rightarrow \left(I_{zz}^{cm} + \frac{ml^2}{4}\right)\frac{d^2\theta}{dt^2} = \frac{mgl}{2}\sin{\theta} \rightarrow \frac{d^2\theta}{dt^2} = \frac{2mgl}{\left(4I_{zz}^{cm} + ml^2\right)}\sin(\theta)
\end{equation}
The moment of inertia of the bar is $I_{zz}^{cm} = \frac{ml^2}{12}$. So, the equation of motion of the bar is:
\begin{equation}
\frac{d^2\theta}{dt^2} = \frac{3g}{2l}\sin(\theta)
\end{equation}
<a id='quietstanding'></a>
## 5) Human quiet standing
A very simple model of the human quiet standing (and still today frequently used) is to use an inverted pendulum to model the human body. On [this notebook](http://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/IP_Model.ipynb) there is a more comprehensive explanation of human standing.
<figure><figcaption><center>Adapted from [Elias, Watanabe and Kohn (2014)](http://dx.doi.org/10.1371/journal.pcbi.1003944) </center></figcaption></figure>
Below is depicted the free-body diagram of the foot and the rest of the body. At the ankle joint there is a constraint to the translation of the ankle joint. So, a force $\vec{\bf{F_1}}$ is applied to the body at the ankle joint. By the third Newton law, a force $-\vec{\bf{F_1}}$ is applied to the foot at the ankle joint. At the center of mass of the body and of the foot, gravitational forces are applied. Additionally, a ground reaction force is applied to the foot at the center of pressure of the forces applied at the foot.
Additionally, a moment $\vec{\bf{T_A}}$ is applied at the body and its reaction is applied to the foot. This moment $\vec{\bf{T_A}}$ comes from the muscles around the ankle joint. It is usual in Biomechanics to represent the net torque generated by all the muscles on a single joint as a single moment applied to the body.
<figure></figure>
The process to obtain the equation of motion of the angle is very similar. The moment around the ankle being applied to the body is:
\begin{equation}
\vec{\bf{M_A}} = \vec{\bf{T_A}} + \vec{\bf{r_{cm/A}}} \times (-m_Bg\hat{\bf{j}})
\end{equation}
And the derivative of the angular momentum is:
\begin{equation}
\frac{d\vec{\bf{H_A}}}{dt} = I_{zz}^{cm}\frac{d\theta_A}{dt} + m\vec{\bf{r_{cm/A}}} \times \vec{\bf{a_{cm}}}
\end{equation}
To find the kinematics of the bar, we could do the same procedure we have used in the pendulum and inverted pendulum examples, but this time we will use polar coordinates (for a revision on polar coordinates, see [Polar coordinates notebook](PolarCoordinates.ipynb)).
\begin{equation}
\vec{\bf{r_{cm/A}}} = \vec{\bf{r_{cm}}} = h_G\hat{\bf{e_r}} \rightarrow \vec{\bf{v_{cm}}} = h_G\frac{d\theta_A}{dt}\hat{\bf{e_\theta}} \rightarrow \vec{\bf{a_{cm}}} = -h_G\left(\frac{d\theta_A}{dt}\right)^2\hat{\bf{e_r}} + h_G\frac{d^2\theta_A}{dt^2}\hat{\bf{e_\theta}}
\end{equation}
where $\hat{\bf{e_r}} = -\sin(\theta_A)\hat{\bf{i}} + \cos(\theta_A)\hat{\bf{j}}$ and $\hat{\bf{e_\theta}} = -\cos(\theta_A)\hat{\bf{i}} - \sin(\theta_A)\hat{\bf{j}}$
Having the kinematics computed, we can go back to the moment and derivative of the angular momentum:
\begin{equation}
\vec{\bf{M_A}} = \vec{\bf{T_A}} + h_G\hat{\bf{e_r}} \times (-m_Bg\hat{\bf{j}}) = T_A\hat{\bf{k}} + h_Gm_Bg\sin(\theta_A)\hat{\bf{k}}
\end{equation}
\begin{equation}
\frac{d\vec{\bf{H_A}}}{dt} = I_{zz}^{cm}\frac{d^2\theta_A}{dt^2} \hat{\bf{k}} + mh_G\hat{\bf{e_r}} \times \left(-h_G\left(\frac{d^2\theta_A}{dt^2}\right)^2\hat{\bf{e_r}} + h_G\frac{d^2\theta_A}{dt^2}\hat{\bf{e_\theta}}\right) = I_{zz}^{cm}\frac{d^2\theta_A}{dt^2}\hat{\bf{k}} + mh_G^2\frac{d^2\theta_A}{dt^2}\hat{\bf{k}} = \left(I_{zz}^{cm} + mh_G^2\right)\frac{d^2\theta_A}{dt^2}\hat{\bf{k}}
\end{equation}
By using the Newton-Euler equations, we can now find the equation of motion of the body during quiet standing:
\begin{equation}
\vec{\bf{M_A}}=\frac{d\vec{\bf{H_A}}}{dt} \rightarrow \left(I_{zz}^{cm} + mh_G^2\right)\frac{d^2\theta_A}{dt^2} = T_A + h_Gm_B g\sin(\theta_A) \rightarrow \frac{d^2\theta_A}{dt^2} = \frac{h_Gm_B g}{I_{zz}^{cm} + mh_G^2}\sin(\theta_A)+ \frac{T_A}{I_{zz}^{cm} + m_Bh_G^2}
\end{equation}
If we consider the body as a rigid uniform bar, the moment of inertia is $I_{zz}^{cm} = \frac{m_Bh_G^2}{3}$. So, the equation of motion of the body is:
\begin{equation}
\frac{d^2\theta_A}{dt^2} = \frac{3 g}{4h_g}\sin(\theta_A)+ \frac{3 T_A}{4m_bh_G^2}
\end{equation}
## 6) Force platform
From the same example above, now we will find the position of the center of pressure during the quiet standing. The center of pressure is the point where the ground reaction force is being applied. The point O is linked to the ground in a way that it constraints the translation and rotation movement of the platform. Also, it is in the point O that sensors of force and moments are located.
<figure>
The free-body diagram of the foot and the force platform is depicted below.
<figure>
In the platform , the distance $y_0$ is known. As the platform is in equilibrium its derivative of the angular momentum is zero. Now, we must find the sum of the moments. As we can choose any point, we will choose, the point where the force $\vec{\bf{F}}$ is being applied, and equal it to zero.
\begin{equation}
\vec{\bf{M}} + \vec{\bf{r_{COP/O}}} \times (-\vec{\bf{GRF}}) = 0
\end{equation}
The vector $\vec{\bf{r_{COP/O}}}$ is:
\begin{equation}
\vec{\bf{r_{COP/O}}} = COP_x \hat{\bf{i}} + y_0 \hat{\bf{j}}
\end{equation}
So, from the equation of the sum of the moments we can isolate the postion of the center of pressure:
\begin{equation}
M - COP_x GRF_y + y_0 GRF_x = 0 \rightarrow COP_x = \frac{M+y_0 GRF_x}{GRF_y}
\end{equation}
Using the expression above, we can track the position of the center of pressure during an experiment.
## 7) Segway
The horizontal acceleration of the point A $a_A$ of the segway is known. There is a motor at point A that generates a moment $\vec{\bf{T}}$ at the bar.
<figure>
The free-body diagrams of the upper part of the segway and the cart are depicted below. At the upper part are applied the moment $\vec{\bf{T}}$ due to the motor, the gravitational force at the center of mass of the bar and the constraint force at the hinge at the point A. At the cart are applied the ground reaction force, the reaction of $\vec{\bf{F_1}}$ at the hinge and the reaction of the moment due to the motor. As the cart has an acceleration, there is a force $\vec{\bf{F}}$ to produce an acceleration.
<figure>
The equation of motion of the bar can be obtained by using the Newton-Euler equations.
Considering the bar, the moment around the point A is:
\begin{equation}
\vec{\bf{M_A}} = \vec{\bf{r_{cm/A}}} \times (-mg\hat{\bf{j}}) + \vec{\bf{T}}
\end{equation}
And the derivative of the angular momentum around the point A is:
\begin{equation}
\frac{d\vec{H_A}}{dt} = I_{zz}^{cm}\frac{d^2\theta}{dt^2}+m\vec{\bf{r_{cm/A}}}\times \vec{\bf{a_{cm}}}
\end{equation}
The vector from the point A to the center of mass is:
\begin{equation}
\vec{\bf{r_{cm/A}}} = \frac{l}{2}\hat{\bf{e_R}}
\end{equation}
and the acceleration of the center of mass is obtained by deriving the position of the center of mass twice:
\begin{equation}
\vec{\bf{r_{cm}}} = \vec{\bf{r_{A}}} + \vec{\bf{r_{cm/A}}} = \vec{\bf{r_{A}}} + \frac{l}{2}\hat{\bf{e_R}} \rightarrow \vec{\bf{v_{cm}}} = \vec{\bf{v_{A}}} + \frac{l}{2}\frac{d\theta}{dt}\hat{\bf{e_\theta}} \rightarrow \vec{\bf{a_{cm}}} = \vec{\bf{a_{A}}} - \frac{l}{2}\left(\frac{d\theta}{dt}\right)^2\hat{\bf{e_R}}+\frac{l}{2}\frac{d^2\theta}{dt^2}\hat{\bf{e_\theta}}
\end{equation}
where $\hat{\bf{e_R}} = -\sin(\theta)\hat{\bf{i}}+\cos(\theta)\hat{\bf{j}}$ and $\hat{\bf{e_\theta}} = -\cos(\theta)\hat{\bf{i}}-\sin(\theta)\hat{\bf{j}}$.
Using the Newton-Euler laws, we can find the equation of motion of the upper part of the segway:
\begin{equation}
\frac{d\vec{H_A}}{dt} = \vec{\bf{M_A}} \rightarrow I_{zz}^{cm}\frac{d^2\theta}{dt^2}\hat{\bf{k}}+m\vec{\bf{r_{cm/A}}}\times \vec{\bf{a_{cm}}} = \vec{\bf{r_{cm/A}}} \times (-mg\hat{\bf{j}}) + \vec{\bf{T}} \rightarrow I_{zz}^{cm}\frac{d^2\theta}{dt^2}+m\frac{l}{2}\hat{\bf{e_R}}\times\left(a_{A}\hat{\bf{i}} - \frac{l}{2}\left(\frac{d\theta}{dt}\right)^2\hat{\bf{e_R}}+\frac{l}{2}\frac{d^2\theta}{dt^2}\hat{\bf{e_\theta}}\right) = ( \frac{l}{2}\hat{\bf{e_R}} \times (-mg\hat{\bf{j}}) + \vec{\bf{T}}) \rightarrow I_{zz}^{cm}\frac{d^2\theta}{dt^2}\hat{\bf{k}}-\frac{ml}{2}a_{A}\cos(\theta)\hat{\bf{k}} +\frac{ml^2}{4}\frac{d^2\theta}{dt^2}\hat{\bf{k}} = \frac{mgl}{2}\sin(\theta)\hat{\bf{k}} + T\hat{\bf{k}}
\end{equation}
So, the equation of motion of the upper part of the segway is:
\begin{equation}
\frac{d^2\theta}{dt^2} = \frac{mgl}{2\left(I_{zz}^{cm}+\frac{ml^2}{4}\right)}\sin(\theta) + \frac{ml}{2\left(I_{zz}^{cm}+\frac{ml^2}{4}\right)}a_{A}\cos(\theta)+ \frac{T}{\left(I_{zz}^{cm}+\frac{ml^2}{4}\right)}
\end{equation}
Considering the moment of inertia of the bar is $I_{zz}^{cm}=\frac{ml^2}{12}$, the equation of motion of the bar is:
\begin{equation}
\frac{d^2\theta}{dt^2} = \frac{3g}{2l}\sin(\theta) + \frac{3ml}{2ml^2}a_{A}\cos(\theta)+ \frac{3T}{2ml^2}
\end{equation}
<a id='doublependulum'></a>
## 8) Double pendulum with actuators
The figure below shows a double pendulum. This model can represent, for example, the arm and forearm of a subject.
<figure>
The free-body diagrams of both bars are depicted below. There are constraint forces at all joints and moments from the muscles from both joints.
<figure>
The versors $\hat{\bf{e_{R_1}}}$, $\hat{\bf{e_{R_2}}}$, $\hat{\bf{e_{\theta_1}}}$ and $\hat{\bf{e_{\theta_2}}}$ are defined as:
\begin{equation}
\hat{\bf{e_{R_1}}} = \sin(\theta_1) \hat{\bf{i}} - \cos(\theta_1) \hat{\bf{j}}
\end{equation}
\begin{equation}
\hat{\bf{e_{\theta_1}}} = \cos(\theta_1) \hat{\bf{i}} + \sin(\theta_1) \hat{\bf{j}}
\end{equation}
\begin{equation}
\hat{\bf{e_{R_2}}} = \sin(\theta_2) \hat{\bf{i}} - \cos(\theta_2) \hat{\bf{j}}
\end{equation}
\begin{equation}
\hat{\bf{e_{\theta_2}}} = \cos(\theta_2) \hat{\bf{i}} + \sin(\theta_2) \hat{\bf{j}}
\end{equation}
First, we can analyze the sum of the moments and the derivative of the angular momentum at the upper bar. They will be computed relative to the point O1.
\begin{equation}
\vec{\bf{M_{O1}}} = \vec{\bf{r_{C1/O1}}} \times (-m_1g\hat{\bf{j}}) + \vec{\bf{r_{O2/O1}}} \times (-\vec{\bf{F_{12}}}) + \vec{\bf{M_{1}}} - \vec{\bf{M_{12}}} = \frac{l_1}{2} \hat{\bf{e_{R_1}}} \times (-m_1g\hat{\bf{j}}) + l_1\hat{\bf{e_{R_1}}} \times (-\vec{\bf{F_{12}}}) + \vec{\bf{M_{1}}} - \vec{\bf{M_{12}}} = \frac{-m_1gl_1}{2}\sin(\theta_1)\hat{\bf{k}} + l_1\hat{\bf{e_{R_1}}} \times (-\vec{\bf{F_{12}}}) + \vec{\bf{M_{1}}} - \vec{\bf{M_{12}}}
\end{equation}
\begin{equation}
\frac{d\vec{\bf{H_{O1}}}}{dt} = I_{zz}^{C1} \frac{d^2\theta_1}{dt^2} + m_1 \vec{\bf{r_{C1/O1}}} \times \vec{\bf{a_{C1}}} = I_{zz}^{C1} \frac{d^2\theta_1}{dt^2} + m_1 \frac{l_1}{2} \hat{\bf{e_{R_1}}} \times \left[-\frac{l_1}{2}\left(\frac{d\theta_1}{dt}\right)^2 \hat{\bf{e_{R_1}}} + \frac{l_1}{2}\frac{d^2\theta_1}{dt^2} \hat{\bf{e_{\theta_1}}}\right] = \left( I_{zz}^{C1} + m_1 \frac{l_1^2}{4}\right)\frac{d^2\theta_1}{dt^2}
\end{equation}
Note that the force $\vec{\bf{F_{12}}}$ is unknown. We will take care of this later.
Now, we analyze the the sum of the moments and the derivative of the angular momentum at the lower bar relative to the point O2.
\begin{equation}
\vec{\bf{M_{O2}}} = \vec{\bf{r_{C2/O2}}} \times (-m_2g\hat{\bf{j}}) + \vec{\bf{M_{12}}} = \frac{l_2}{2} \hat{\bf{e_{R_2}}} \times (-m_2g\hat{\bf{j}}) + \vec{\bf{M_{12}}} = -\frac{m_2gl_2}{2}\sin(\theta_2)\hat{\bf{k}} + \vec{\bf{M_{12}}}
\end{equation}
\begin{equation}
\frac{d\vec{\bf{H_{O2}}}}{dt} = I_{zz}^{C2} \frac{d^2\theta_2}{dt^2} + m_2 \vec{\bf{r_{C2/O2}}} \times \vec{\bf{a_{C2}}} = I_{zz}^{C2} \frac{d^2\theta_2}{dt^2} + m_2 \frac{l_2}{2} \hat{\bf{e_{R_2}}} \times \left[-l_1\left(\frac{d\theta_1}{dt}\right)^2 \hat{\bf{e_{R_1}}} + l_1\frac{d^2\theta_1}{dt^2} \hat{\bf{e_{\theta_1}}} -\frac{l_2}{2}\left(\frac{d\theta_2}{dt}\right)^2 \hat{\bf{e_{R_2}}} + \frac{l_2}{2}\frac{d^2\theta_2}{dt^2} \hat{\bf{e_{\theta_2}}}\right]
\end{equation}
To complete the computation of the derivative of the angular momentum we must find the products $\hat{\bf{e_{R_2}}} \times \hat{\bf{e_{R_1}}} $, $\hat{\bf{e_{R_2}}} \times \hat{\bf{e_{\theta_1}}}$ and $\hat{\bf{e_{R_1}}} \times \hat{\bf{e_{\theta_2}}}$:
\begin{equation}
\hat{\bf{e_{R_2}}} \times \hat{\bf{e_{\theta_1}}} = \left[\begin{array}{c}\sin(\theta_2)\\-\cos(\theta_2)\end{array}\right] \times \left[\begin{array}{c}\cos(\theta_1)\\\sin(\theta_1)\end{array}\right] = \sin(\theta_1)\sin(\theta_2)+\cos(\theta_1)\cos(\theta_2) = \cos(\theta_1-\theta_2)
\end{equation}
\begin{equation}
\hat{\bf{e_{R_2}}} \times \hat{\bf{e_{R_1}}} = \left[\begin{array}{c}\sin(\theta_2)\\-\cos(\theta_2)\end{array}\right] \times \left[\begin{array}{c}\sin(\theta_1)\\-\cos(\theta_1)\end{array}\right] = -\sin(\theta_2)\cos(\theta_1)+\cos(\theta_2)\sin(\theta_1) = \sin(\theta_1-\theta_2)
\end{equation}
\begin{equation}
\hat{\bf{e_{R_1}}} \times \hat{\bf{e_{\theta_2}}} =\left[\begin{array}{c}\sin(\theta_1)\\-\cos(\theta_1)\end{array}\right] \times \left[\begin{array}{c}\cos(\theta_2)\\\sin(\theta_2)\end{array}\right] = \sin(\theta_2)\sin(\theta_1)+\cos(\theta_2)\cos(\theta_1) = \cos(\theta_1-\theta_2)
\end{equation}
So, the derivative of the angular momentum is:
\begin{align}
\begin{split}
\frac{d\vec{\bf{H_{O2}}}}{dt} &= I_{zz}^{C2} \frac{d^2\theta_2}{dt^2} - \frac{m_2l_1l_2}{2}\left(\frac{d\theta_1}{dt}\right)^2 \sin(\theta_1-\theta_2) + \frac{m_2l_1l_2}{2}\frac{d^2\theta_1}{dt^2} \cos(\theta_1-\theta_2) + \frac{m_2l_2^2}{4}\frac{d^2\theta_2}{dt^2} \\
&= \frac{m_2l_1l_2}{2}\cos(\theta_1-\theta_2)\frac{d^2\theta_1}{dt^2} + \left(I_{zz}^{C2} + \frac{m_2l_2^2}{4} \right)\frac{d^2\theta_2}{dt^2}- \frac{m_2l_1l_2}{2}\left(\frac{d\theta_1}{dt}\right)^2 \sin(\theta_1-\theta_2)
\end{split}
\end{align}
It remains to find the force $\vec{\bf{F_{12}}}$ that is in the sum of moment of the upper bar. It can be found by finding the resultant force in the lower bar and use the First Newton-Euler law:
\begin{equation}
\vec{\bf{F_{12}}} - m_2g \hat{\bf{j}} = m\vec{\bf{a_{C2}}} \rightarrow \vec{\bf{F_{12}}} = m_2g \hat{\bf{j}} + m_2\left[-l_1\left(\frac{d\theta_1}{dt}\right)^2 \hat{\bf{e_{R_1}}} + l_1\frac{d^2\theta_1}{dt^2} \hat{\bf{e_{\theta_1}}} -\frac{l_2}{2}\left(\frac{d\theta_2}{dt}\right)^2 \hat{\bf{e_{R_2}}} + \frac{l_2}{2}\frac{d^2\theta_2}{dt^2} \hat{\bf{e_{\theta_2}}}\right]
\end{equation}
Now, we can go back to the moment $\vec{\bf{M_{O1}}}$:
\begin{align}
\begin{split}
\vec{\bf{M_{O1}}} &= \frac{-m_1gl_1}{2}\sin(\theta_1)\hat{\bf{k}} + l_1\hat{\bf{e_{R_1}}} \times (-\vec{\bf{F_{12}}}) + \vec{\bf{M_{1}}} - \vec{\bf{M_{12}}} \\
&= -\frac{m_1gl_1}{2}\sin(\theta_1)\hat{\bf{k}} - l_1\hat{\bf{e_{R_1}}} \times m_2\left[g \hat{\bf{j}}-l_1\left(\frac{d\theta_1}{dt}\right)^2 \hat{\bf{e_{R_1}}} + l_1\frac{d^2\theta_1}{dt^2} \hat{\bf{e_{\theta_1}}} -\frac{l_2}{2}\left(\frac{d\theta_2}{dt}\right)^2 \hat{\bf{e_{R_2}}} + \frac{l_2}{2}\frac{d^2\theta_2}{dt^2} \hat{\bf{e_{\theta_2}}}\right] + \vec{\bf{M_{1}}} - \vec{\bf{M_{12}}} \\
&= -\frac{m_1gl_1}{2}\sin(\theta_1)\hat{\bf{k}} - m_2l_1g \sin(\theta_1)\hat{\bf{k}} - m_2l_1^2\frac{d^2\theta_1}{dt^2}\hat{\bf{k}} + \frac{m_2l_1l_2}{2}\left(\frac{d\theta_2}{dt}\right)^2 \sin(\theta_1-\theta_2)\hat{\bf{k}} - \frac{m_2l_1l_2}{2}\frac{d^2\theta_2}{dt^2} \cos(\theta_1-\theta_2)\hat{\bf{k}} + M_1\hat{\bf{k}} - M_{12}\hat{\bf{k}}
\end{split}
\end{align}
Finally, using the second Newton-Euler law for both bars, we can find the equation of motion of both angles:
\begin{equation}
\frac{d\vec{\bf{H_{O1}}}}{dt} = \vec{\bf{M_{O1}}} \rightarrow \left( I_{zz}^{C1} + m_1 \frac{l_1^2}{4}\right)\frac{d^2\theta_1}{dt^2} = -\frac{m_1gl_1}{2}\sin(\theta_1)- m_2l_1g \sin(\theta_1) - m_2l_1^2\frac{d^2\theta_1}{dt^2} + \frac{m_2l_1l_2}{2}\left(\frac{d\theta_2}{dt}\right)^2 \sin(\theta_1-\theta_2) - \frac{m_2l_1l_2}{2}\frac{d^2\theta_2}{dt^2} \cos{(\theta_1-\theta_2)} + M_1 - M_{12}
\end{equation}
\begin{equation}
\frac{d\vec{\bf{H_{O2}}}}{dt} = \vec{\bf{M_{O2}}} \rightarrow \frac{m_2l_1l_2}{2}\cos(\theta_1-\theta_2)\frac{d^2\theta_1}{dt^2} + \left(I_{zz}^{C2} + \frac{m_2l_2^2}{4} \right)\frac{d^2\theta_2}{dt^2}- \frac{m_2l_1l_2}{2}\left(\frac{d\theta_1}{dt}\right)^2 \sin(\theta_1-\theta_2) = -\frac{m_2gl_2}{2}\sin(\theta_2) + M_{12}
\end{equation}
Considering the moment of inertia of the bars as $I_{zz}^{C1}=\frac{m_1l_1^2}{12}$ and $I_{zz}^{C2}=\frac{m_2l_2^2}{12}$ the differential equations above are:
\begin{equation}
\left(\frac{m_1l_1^2}{3} +m_2l_1^2\right)\frac{d^2\theta_1}{dt^2} + \frac{m_2l_1l_2}{2} \cos{(\theta_1-\theta_2)\frac{d^2\theta_2}{dt^2}} = -\frac{m_1gl_1}{2}\sin(\theta_1)- m_2l_1g \sin(\theta_1) + \frac{m_2l_1l_2}{2}\left(\frac{d\theta_2}{dt}\right)^2 \sin(\theta_1-\theta_2) + M_1 - M_{12}
\end{equation}
\begin{equation}
\frac{m_2l_1l_2}{2}\cos(\theta_1-\theta_2)\frac{d^2\theta_1}{dt^2} + \frac{m_2l_2^2}{3}\frac{d^2\theta_2}{dt^2} = -\frac{m_2gl_2}{2}\sin(\theta_2) + \frac{m_2l_1l_2}{2}\left(\frac{d\theta_1}{dt}\right)^2 \sin(\theta_1-\theta_2)+ M_{12}
\end{equation}
We could isolate the angular accelerations but this would lead to long equations.
### Problems
- Solve the problems 6, 7 and 8 from [this Notebook](FreeBodyDiagram.ipynb).
- Solve the problems 16.2.9, 18.3.26, 18.3.29 from Ruina and Pratap book.
### References
- Ruina A., Rudra P. (2015) [Introduction to Statics and Dynamics](http://ruina.tam.cornell.edu/Book/index.html). Oxford University Press.
- Duarte M. (2017) [Free body diagram](FreeBodyDiagram.ipynb)
- Elias, L A, Watanabe, R N, Kohn, A F.(2014) [Spinal Mechanisms May Provide a Combination of Intermittent and Continuous Control of Human Posture: Predictions from a Biologically Based Neuromusculoskeletal Model.](http://dx.doi.org/10.1371/journal.pcbi.1003944) PLOS Computational Biology (Online)
- Winter D. A., (2009) Biomechanics and motor control of human movement. John Wiley and Sons.
- Duarte M. (2017) [The inverted pendulum model of the human standing](http://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/IP_Model.ipynb)
|
99b5ba0df64a87d9b02bda047cc0e0295bd7cec0
| 113,766 |
ipynb
|
Jupyter Notebook
|
notebooks/FreeBodyDiagramForRigidBodies.ipynb
|
ahmadhassan01/bmc
|
3114b7d3ecd1f7c678fac0c04e8e139ac2898992
|
[
"MIT"
] | null | null | null |
notebooks/FreeBodyDiagramForRigidBodies.ipynb
|
ahmadhassan01/bmc
|
3114b7d3ecd1f7c678fac0c04e8e139ac2898992
|
[
"MIT"
] | null | null | null |
notebooks/FreeBodyDiagramForRigidBodies.ipynb
|
ahmadhassan01/bmc
|
3114b7d3ecd1f7c678fac0c04e8e139ac2898992
|
[
"MIT"
] | null | null | null | 71.10375 | 33,421 | 0.652181 | true | 12,859 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.637031 | 0.83762 | 0.53359 |
__label__eng_Latn
| 0.817628 | 0.078037 |
### Introduction to ARCH and GARCH models
Various problems such as option pricing in finance have motivated the study of the volatility, or variability, of a time series. ARMA models were used to model the conditional mean of a process when the conditional variance was constant. In many problems, however, the assumption of a constant conditional variance will be violated. Models such as the autoregressive conditionally heteroscedastic or ARCH model were developed to model changes in volatility. These models were later extended to generalized ARCH, or GARCH models.
In these problems, we are concerned with modeling the return or growth rate of a series. For example, if $x_t$ is the value of an asset at time $t$, then the return or relative gain, $r_t$, of the asset at time $t$ is
$$
r_t = \frac{x_t-x_{t-1}}{x_{t-1}}.
$$
If the return represents a small (in magnitude) percentage change then
$$
\log(\frac{x_t}{x_{t-1}}) = \log(1+r_t) \approx r_t.
$$
Typically, for financial series, the return $r_t$, does not have a constant conditional variance, and highly volatile periods tend to be clustered together. In other words, there is a strong dependence of sudden bursts of variability in a return on the series own past.
The simplest ARCH model, the **ARCH(1)**, models the return as
$$
\begin{equation}
r_t = \mu+\epsilon_t\\
\epsilon_t = \sigma_t e_t, \ \ e_t ∼ \ N(0, 1)\\
\sigma_t^2 = \alpha_0 + \alpha_1 r^2_{t-1}.
\end{equation}
$$
The Normal assumption may be relaxed. We must impose some constraints on the model parameters to obtain desirable properties. An obvious constraint is that $\alpha_0, \alpha_1 \geq 0$ because $\sigma^2$ is a variance.
The ARCH(1) models return as a white noise process with nonconstant conditional variance, and that conditional variance depends on the previous return.
The ARCH(1) model can be extended to the general **ARCH(p) model** in an obvious way, by changing the last component in the definition
$$
\sigma_t^2 = \alpha_0 + \alpha_1 r^2_{t-1}+ ... +\alpha_p r^2_{t-p}.
$$
Estimation of the parameters $\alpha_0,\alpha_1,...,\alpha_p$ of the ARCH(p) model is typically accomplished by conditional MLE. The conditional likelihood of the data $r_{p+1},...,r_n$ given $r_1,...,r_p$, is given by
$$
L(\mu,\alpha) = p(r_1,...,r_p \ | \ \mu,\alpha)= \prod_{p+1}^n f_{\alpha}(r_t \ | \ r_{t−1},...,r_{t−p}),
$$
where under the assumption of normality, the conditional densities $f_\alpha(\cdot | \cdot)$ are, for t > p, given by
$$
r_t | r_{t-1},...,r_{t-p} ∼ N(0,\alpha_0 +\alpha_1r_{t-1}^2 +···+\alpha_p r_{t-p}^2 ).
$$
Here the constraints are $\alpha_i ≥ 0, i = 1,2,...,p$ and $(\alpha_1 + · · · + \alpha_p ) < 1$.
The **GARCH(1,1) model** is defined by changing the last component in the following way
$$
\sigma_t^2 = \alpha_0 + \alpha_1 r^2_{t-1} + \beta_1\sigma^2_{t-1} .
$$
*Reference*:
- *Time series analysis and it's aplications*, fourth edition, Robert H. Shumway and David S. Stoffer (2016)
### Load libraries and Federal Reserve FX Data
```python
# Load packages
import pandas as pd
import numpy as np
import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
import scipy.stats as scs
from arch.univariate import GARCH
plt.rc("figure", figsize=(12, 6))
plt.rc("font", family="sans-serif")
plt.rc("font", size=10)
plt.rc("lines", linewidth=1.5)
sns.set_style("darkgrid")
start = datetime.datetime(2005,10,15)
end = datetime.datetime(2015,10,15)
# Federal Reserve FX Data (FRED) from the St. Louis Federal Reserve
# U.S. Dollars to Euro Spot Exchange Rate
fx_useu = web.DataReader("DEXUSEU",'fred', start, end)
```
```python
fx_useu.plot();
```
```python
fx_useu.info()
```
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 2609 entries, 2005-10-17 to 2015-10-15
Data columns (total 1 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 DEXUSEU 2513 non-null float64
dtypes: float64(1)
memory usage: 40.8 KB
```python
# Check number of missing values
print(fx_useu.isna().sum())
```
DEXUSEU 96
dtype: int64
```python
# Perform linear interpolation at missing data points
fx_useu = fx_useu.interpolate()
```
```python
print(fx_useu.isna().sum())
```
DEXUSEU 0
dtype: int64
```python
# Compute returns (log returns) of irregular daily time series
rets = np.log(fx_useu / fx_useu.shift(1))
rets.columns = ['Return']
rets = rets.dropna()
```
```python
rets.head().round(3)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Return</th>
</tr>
<tr>
<th>DATE</th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2005-10-18</th>
<td>-0.008</td>
</tr>
<tr>
<th>2005-10-19</th>
<td>0.004</td>
</tr>
<tr>
<th>2005-10-20</th>
<td>-0.001</td>
</tr>
<tr>
<th>2005-10-21</th>
<td>-0.002</td>
</tr>
<tr>
<th>2005-10-24</th>
<td>0.003</td>
</tr>
</tbody>
</table>
</div>
```python
# Plot return
rets.plot(ylabel='Return');
```
Next we present two plots:
- A histogram of the returns is created and the Gaussian/normal density corresponding to the maximum-likelihood fit is drawn.
This graph may appear to display a reasonable fit, but other diagnostics are useful to evaluate whether the distribution is heavier-tailed than a Gaussian distribution.
- A normal qq-plot of the returns is created. The sample of returns is sorted from smallest to largest:
$$
y_{[1]} ≤ y_{[2]} ≤ ··· ≤ y_{[n]}.
$$
Consider a sample of size n from a $N(0,1)$ distribution, $X_1,X_2,...,X_n$.
Define the order statistics as the sorted sample elements:
$$
X_{[1]} ≤X_{[2]} ≤···≤X_{[n]}.
$$
Define $x_1,...,x_n$ such that
$$
x_j = E[X_{[j]} | n], \ \text{the expected value conditional on $n$}.
$$
The qq-plot, plots the points $(x_j , y_j)$. If the sample $\{y_j\}$ is consistent with the Gaussian assumption, then the points will fall close to a straight line (with slope equal to the Gaussian standard deviation, and y intercept equal to the Gaussian mean).
```python
plt.figure(figsize=(10,4))
sns.histplot(rets['Return'], kde=True);
```
```python
# Normal QQ plot of returns
fig, ax =plt.subplots(figsize=(10,4))
sm.graphics.qqplot(rets['Return'], line='s', fit=True, ax=ax)
plt.title('US/EU');
```
Obviously, the sample qantil values do not lie on straight line indicating "nonnormality". On the left and right sides there are many values that lie well below and above the line, respectively. In other words time series exhibits fat tails.
### Geometric Brownian Motion Model
The geometric Brownian motion model for $\{S(t), t\geq 0\}$ is defined by Itô stochastic differential equation,
$$
dS(t) = S(t)[\mu dt+\sigma dB(t)], \ \ S(0)>0.
$$
The solution of this equation satisfies
$$
S(t)=S(0)\exp\Big[(\mu-\frac{\sigma^2}{2})t+\sigma B(t)\Big],
$$
from which it follows at once that $X(t) = log(S(t))$ satisfies
$$
X(t) = X(0) + (\mu-\frac{\sigma^2}{2})t+\sigma B(t),
$$
or equivalently
$$
dX(t) = (\mu-\frac{\sigma^2}{2})dt+\sigma dB(t).
$$
This implies that
gen_path() generates sample Monte Carlo paths for Geometric Brownein Motion.
```python
# This code credit to "Python for Finance: Mastering Data-Driven Finance" by Yves Hilpisch
def gen_paths(S0, r, sigma, T, M, I):
'''Generate Monte carlo paths for Geometric Brownein Motion.
Parameters
==========
S0: float
initial/index level
r: float
constant short rate
sigma: float
constant vtality
T: float
final time horizon
M: int
number of time steps
I: number of path to be stimulated
Returns
=======
paths: ndarrays, shape (M+1,I)
stimulated paths given the parameters
'''
dt = T/M
paths = np.zeros((M + 1,I))
paths[0] = S0
for t in range(1, M + 1):
rand = np.random.standard_normal(I)
rand = (rand-rand.mean())/rand.std()
paths[t] = paths[t - 1]*np.exp((r - 0.5*sigma**2)*dt + sigma*np.sqrt(dt)*rand)
return paths
```
```python
S0 = 100.
r = 0.05
sigma = 0.2
T = 1.0
M = 50
I = 250000
np.random.seed(1000)
```
```python
paths = gen_paths(S0, r, sigma, T, M, I)
```
```python
S0*np.exp(r*T)
```
105.12710963760242
```python
paths[-1].mean()
```
105.12645392478755
```python
paths[:,0].round(4)
```
array([100. , 97.821 , 98.5573, 106.1546, 105.899 , 99.8363,
100.0145, 102.6589, 105.6643, 107.1107, 108.7943, 108.2449,
106.4105, 101.0575, 102.0197, 102.6052, 109.6419, 109.5725,
112.9766, 113.0225, 112.5476, 114.5585, 109.942 , 112.6271,
112.7502, 116.3453, 115.0443, 113.9586, 115.8831, 117.3705,
117.9185, 110.5539, 109.9687, 104.9957, 108.0679, 105.7822,
105.1585, 104.3304, 108.4387, 105.5963, 108.866 , 108.3284,
107.0077, 106.0034, 104.3964, 101.0637, 98.3776, 97.135 ,
95.4254, 96.4271, 96.3386])
```python
# Ten stimulation paths for Geometric Brownian Motion
plt.figure()
plt.plot(paths[:,:10])
plt.xlabel('time steps')
plt.ylabel('index level');
```
```python
log_returns = np.log(paths[1:]/paths[:-1])
```
```python
fig, ax = plt.subplots(1,2, figsize=(16, 4))
ax[0].hist(log_returns.flatten(), bins=70, density=True, label='frequency', color='b');
ax[0].set_xlabel('log return')
ax[0].set_ylabel('frequency')
ax[0].set_title('Histogram of log returns of GBM and normal density function')
x = np.linspace(ax[0].axis()[0], ax[0].axis()[1])
ax[0].plot(x, scs.norm.pdf(x, loc=r/M, scale=sigma/np.sqrt(M)), 'r', lw=2.0, label='pdf')
ax[0].legend()
sm.graphics.qqplot(log_returns.flatten()[::500], line='s', ax=ax[1])
ax[1].set_title('QQ plot for log returns of geometric Brownian motion');
```
### Time Dependence in Squared-Returns
Nonlinear time dependence in the time series of exchange rate returns is exhibited with the time dependence of the squared returns.
The autocorrelation function (ACF) and the partial autocorrelation function (PACF) are computed for the exchange rate returns and for their squared values. Marginally significant time dependence is present in the returns, while highly significant time dependence is apparent in the squared returns. (The blue lines in the plots are at +/- two standard deviations for the sample correlation coefficients under the null hypothesis of no time-series dependence.)
```python
fig, ax = plt.subplots(1,2, figsize=(14, 4))
sm.graphics.tsa.plot_acf(rets.values.squeeze(), title ='returns', ax=ax[0])
ax[0].set_xlabel('Lag')
ax[0].set_ylabel('ACF')
sm.graphics.tsa.plot_pacf(rets.values.squeeze(), title ='returns', ax=ax[1])
ax[1].set_xlabel('Lag')
ax[1].set_ylabel('Partial ACF')
plt.show()
```
```python
fig, ax = plt.subplots(1,2, figsize=(14, 4))
sm.graphics.tsa.plot_acf(rets.values.squeeze()**2, title ='returns^2', ax=ax[0])
ax[0].set_xlabel('Lag')
ax[0].set_ylabel('ACF')
sm.graphics.tsa.plot_pacf(rets.values.squeeze()**2, title ='returns^2', ax=ax[1])
ax[1].set_xlabel('Lag')
ax[1].set_ylabel('Partial ACF')
plt.show()
```
### Gaussian ARCH and GARCH Models
The following models are fit the EUR/USD exchange rate:
- ARCH(1)
- ARCH(2)
- ARCH(10)
- GARCH(1,1)
The python function arch_model() is fits both ARCH(p) and GARCH(p,q) models by maximum likelihood, assuming Gaussian distributions for the model innovations.
```python
#import sys
#!{sys.executable} -m pip install --user arch
```
```python
from arch import arch_model
import warnings
```
```python
warnings.filterwarnings("ignore")
am = arch_model(100*rets['Return'], p=1, q=0)
res1 = am.fit(update_freq=5)
print(res1.summary())
```
Iteration: 5, Func. Count: 38, Neg. LLF: 2191.6941027200633
Iteration: 10, Func. Count: 78, Neg. LLF: 2183.202659087334
Optimization terminated successfully. (Exit mode 0)
Current function value: 2183.086474157681
Iterations: 14
Function evaluations: 102
Gradient evaluations: 14
Constant Mean - GARCH Model Results
==============================================================================
Dep. Variable: Return R-squared: 0.000
Mean Model: Constant Mean Adj. R-squared: 0.000
Vol Model: GARCH Log-Likelihood: -2183.09
Distribution: Normal AIC: 4374.17
Method: Maximum Likelihood BIC: 4397.64
No. Observations: 2608
Date: Mon, Nov 22 2021 Df Residuals: 2607
Time: 12:01:36 Df Model: 1
Mean Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
mu 4.8766e-03 9.866e-03 0.494 0.621 [-1.446e-02,2.421e-02]
Volatility Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
omega 1.0609e-03 7.999e-04 1.326 0.185 [-5.068e-04,2.629e-03]
alpha[1] 0.0325 4.837e-03 6.724 1.764e-11 [2.304e-02,4.200e-02]
beta[1] 0.9650 5.294e-03 182.286 0.000 [ 0.955, 0.975]
=============================================================================
Covariance estimator: robust
plot() can be used to quickly visualize the standardized residuals and conditional volatility.
```python
fig = res1.plot(annualize="D")
```
```python
am = arch_model(100*rets['Return'], p=2, q=0)
res2 = am.fit(update_freq=5)
print(res2.summary())
```
Iteration: 5, Func. Count: 42, Neg. LLF: 2374.512902110685
Iteration: 10, Func. Count: 73, Neg. LLF: 2371.830066102596
Optimization terminated successfully. (Exit mode 0)
Current function value: 2371.8300661025714
Iterations: 10
Function evaluations: 73
Gradient evaluations: 10
Constant Mean - ARCH Model Results
==============================================================================
Dep. Variable: Return R-squared: 0.000
Mean Model: Constant Mean Adj. R-squared: 0.000
Vol Model: ARCH Log-Likelihood: -2371.83
Distribution: Normal AIC: 4751.66
Method: Maximum Likelihood BIC: 4775.13
No. Observations: 2608
Date: Sun, Nov 21 2021 Df Residuals: 2607
Time: 18:29:34 Df Model: 1
Mean Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
mu 3.9064e-03 1.164e-02 0.336 0.737 [-1.890e-02,2.671e-02]
Volatility Model
==========================================================================
coef std err t P>|t| 95.0% Conf. Int.
--------------------------------------------------------------------------
omega 0.2799 1.656e-02 16.905 4.161e-64 [ 0.247, 0.312]
alpha[1] 0.1056 4.295e-02 2.460 1.391e-02 [2.146e-02, 0.190]
alpha[2] 0.1580 3.275e-02 4.826 1.396e-06 [9.384e-02, 0.222]
==========================================================================
Covariance estimator: robust
```python
am = arch_model(100*rets['Return'], p=10, q=0)
res10 = am.fit(update_freq=5)
print(res10.summary())
```
Iteration: 5, Func. Count: 80, Neg. LLF: 2303.4474151105883
Iteration: 10, Func. Count: 155, Neg. LLF: 2259.1464364755147
Iteration: 15, Func. Count: 233, Neg. LLF: 2251.63518588464
Iteration: 20, Func. Count: 312, Neg. LLF: 2250.616887357195
Iteration: 25, Func. Count: 383, Neg. LLF: 2250.557696484909
Optimization terminated successfully. (Exit mode 0)
Current function value: 2250.5576964850393
Iterations: 25
Function evaluations: 383
Gradient evaluations: 25
Constant Mean - ARCH Model Results
==============================================================================
Dep. Variable: Return R-squared: 0.000
Mean Model: Constant Mean Adj. R-squared: 0.000
Vol Model: ARCH Log-Likelihood: -2250.56
Distribution: Normal AIC: 4525.12
Method: Maximum Likelihood BIC: 4595.51
No. Observations: 2608
Date: Sun, Nov 21 2021 Df Residuals: 2607
Time: 18:29:34 Df Model: 1
Mean Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
mu 6.8632e-05 1.100e-02 6.237e-03 0.995 [-2.150e-02,2.164e-02]
Volatility Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
omega 0.1055 1.700e-02 6.207 5.405e-10 [7.221e-02, 0.139]
alpha[1] 0.0381 2.789e-02 1.367 0.172 [-1.654e-02,9.279e-02]
alpha[2] 0.0716 2.639e-02 2.715 6.637e-03 [1.992e-02, 0.123]
alpha[3] 0.0940 3.455e-02 2.720 6.528e-03 [2.626e-02, 0.162]
alpha[4] 0.0624 3.132e-02 1.993 4.628e-02 [1.029e-03, 0.124]
alpha[5] 0.1114 3.311e-02 3.366 7.622e-04 [4.656e-02, 0.176]
alpha[6] 0.1405 3.234e-02 4.344 1.396e-05 [7.712e-02, 0.204]
alpha[7] 0.0670 3.957e-02 1.693 9.041e-02 [-1.056e-02, 0.145]
alpha[8] 0.0223 2.758e-02 0.807 0.420 [-3.180e-02,7.630e-02]
alpha[9] 0.0754 3.327e-02 2.267 2.340e-02 [1.021e-02, 0.141]
alpha[10] 0.0749 2.467e-02 3.034 2.412e-03 [2.651e-02, 0.123]
=============================================================================
Covariance estimator: robust
```python
am = arch_model(100*rets['Return'], p=1, q=1)
res11 = am.fit(update_freq=5)
print(res11.summary())
```
Iteration: 5, Func. Count: 38, Neg. LLF: 2191.6941027200633
Iteration: 10, Func. Count: 78, Neg. LLF: 2183.202659087334
Optimization terminated successfully. (Exit mode 0)
Current function value: 2183.086474157681
Iterations: 14
Function evaluations: 102
Gradient evaluations: 14
Constant Mean - GARCH Model Results
==============================================================================
Dep. Variable: Return R-squared: 0.000
Mean Model: Constant Mean Adj. R-squared: 0.000
Vol Model: GARCH Log-Likelihood: -2183.09
Distribution: Normal AIC: 4374.17
Method: Maximum Likelihood BIC: 4397.64
No. Observations: 2608
Date: Sun, Nov 21 2021 Df Residuals: 2607
Time: 18:29:34 Df Model: 1
Mean Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
mu 4.8766e-03 9.866e-03 0.494 0.621 [-1.446e-02,2.421e-02]
Volatility Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
omega 1.0609e-03 7.999e-04 1.326 0.185 [-5.068e-04,2.629e-03]
alpha[1] 0.0325 4.837e-03 6.724 1.764e-11 [2.304e-02,4.200e-02]
beta[1] 0.9650 5.294e-03 182.286 0.000 [ 0.955, 0.975]
=============================================================================
Covariance estimator: robust
```python
df = pd.concat([.01*res1.conditional_volatility,
.01*res2.conditional_volatility,
.01*res10.conditional_volatility,
.01*res11.conditional_volatility],axis=1)
df.columns = ['GARCH(1)', 'GARCH(2)', 'GARCH(10)', 'GARCH(1,1)']
subplot = df.plot();
#subplot.set_xlim(xlim)
```
```python
```
The estimated mean is $\hat{\mu} = 0.0049$ and the minimum-AICC GARCH model (with Gaussian noise) for the residuals, is found to be the GARCH(1,1) with estimated parameter values $\hat{\alpha}_0 = 0.0010, \hat{\alpha}_1 = 0.0325, \hat{\beta}_1 = 0.9650$.
Note:
- The ARCH models have a hard lower bound ($\hat{α}_0$) which gets lower with
higher-order p values.
- The GARCH(1,1) model provides an extremely parsimonious model compared to that of the ARCH(10) model.
- The GARCH(1,1) model is quite smooth when compared to every ARCH
model.
- The GARCH(1,1) model is very close to being non-stationary
$$
\hat{\alpha}_1 + \hat{\beta}_1 = 0.9975
$$
This near-non stationarity is consistent with there being no long-term mean volatility. Instead, the volatility evolves slowly over time (i.e., with high value $\beta_1$) with no tendency to revert toward any specific mean volatility level.
### AR(1)-GARCH(1,1) Models with t Distributions
In this section, we fit AR(1)-GARCH(1,1) model with t distributions for the innovations. This model explicitly incorporates non-linear dependence in the residuals (i.e., volatility) and provides a specific distribution alternative to the Gaussian with excess kurtosis (i.e., heavier tails).
```python
am = arch_model(rets['Return'], mean='AR', lags=1, p=1, q=1, dist='StudentsT')
res11 = am.fit(update_freq=5)
print(res11.summary())
```
Iteration: 5, Func. Count: 62, Neg. LLF: -4044.097391968343
Iteration: 10, Func. Count: 133, Neg. LLF: 41834.76688127982
Iteration: 15, Func. Count: 183, Neg. LLF: 15256.291147191834
Iteration: 20, Func. Count: 239, Neg. LLF: 25101.459066206015
Iteration: 25, Func. Count: 283, Neg. LLF: 25066.252509233804
Iteration: 30, Func. Count: 323, Neg. LLF: 22649.57451713406
Optimization terminated successfully. (Exit mode 0)
Current function value: 10948.938144351581
Iterations: 36
Function evaluations: 341
Gradient evaluations: 32
AR - GARCH Model Results
====================================================================================
Dep. Variable: Return R-squared: -502145.785
Mean Model: AR Adj. R-squared: -502338.548
Vol Model: GARCH Log-Likelihood: -10948.9
Distribution: Standardized Student's t AIC: 21909.9
Method: Maximum Likelihood BIC: 21945.1
No. Observations: 2607
Date: Mon, Nov 22 2021 Df Residuals: 2605
Time: 14:06:32 Df Model: 2
Mean Model
============================================================================
coef std err t P>|t| 95.0% Conf. Int.
----------------------------------------------------------------------------
Const 0.7666 9.702e-02 7.901 2.762e-15 [ 0.576, 0.957]
Return[1] 697.8144 23.753 29.377 1.069e-189 [6.513e+02,7.444e+02]
Volatility Model
=============================================================================
coef std err t P>|t| 95.0% Conf. Int.
-----------------------------------------------------------------------------
omega 1.7089e-09 1.910e-06 8.949e-04 0.999 [-3.741e-06,3.744e-06]
alpha[1] 0.9714 3.162e-02 30.720 3.063e-207 [ 0.909, 1.033]
beta[1] 0.0252 2.417e-03 10.430 1.817e-25 [2.047e-02,2.994e-02]
Distribution
========================================================================
coef std err t P>|t| 95.0% Conf. Int.
------------------------------------------------------------------------
nu 11.2058 0.680 16.482 4.903e-61 [ 9.873, 12.538]
========================================================================
Covariance estimator: robust
```python
fig, ax =plt.subplots(figsize=(10,4))
sm.graphics.qqplot(res11.resid[1:], line='s', fit=True, ax=ax)
plt.title('Q−Q Plot of Standardized Residuals AR(1)−GARCH(1,1) With t−Dist.');
```
```python
fig, ax = plt.subplots(1,2, figsize=(14, 4))
sm.graphics.tsa.plot_acf(res11.resid[1:]**2, title='residuals^2', ax=ax[0])
ax[0].set_xlabel('Lag')
ax[0].set_ylabel('ACF')
sm.graphics.tsa.plot_pacf(res11.resid[1:]**2, title='residuals^2', ax=ax[1])
ax[1].set_xlabel('Lag')
ax[1].set_ylabel('Partial ACF')
plt.show()
```
Looks like we have achieved a good model fit as there is no obvious autocorrelation in the squared residuals.
|
23904cae2ef7e03c1bbeee5bb4f874f6529dc693
| 621,085 |
ipynb
|
Jupyter Notebook
|
Volatility modeling.ipynb
|
ddeMoivre/Time-Series-Analysis
|
02ad1dae6795e7036252eb738a137ab5af0a81cb
|
[
"MIT"
] | null | null | null |
Volatility modeling.ipynb
|
ddeMoivre/Time-Series-Analysis
|
02ad1dae6795e7036252eb738a137ab5af0a81cb
|
[
"MIT"
] | null | null | null |
Volatility modeling.ipynb
|
ddeMoivre/Time-Series-Analysis
|
02ad1dae6795e7036252eb738a137ab5af0a81cb
|
[
"MIT"
] | null | null | null | 513.717949 | 138,276 | 0.931177 | true | 7,949 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.931462 | 0.849971 | 0.791716 |
__label__eng_Latn
| 0.581134 | 0.677755 |
```python
import numpy as np
from scipy import ndimage
from scipy import spatial
from scipy import io
from scipy import sparse
from scipy.sparse import csgraph
from scipy import linalg
from matplotlib import pyplot as plt
import seaborn as sns
from skimage import data
from skimage import color
from skimage import img_as_float
import graph3d
%matplotlib inline
```
# Load data
```python
image = img_as_float(data.camera()[::2, ::2])
```
```python
fig, ax = plt.subplots()
plt.imshow(image, cmap='gray')
plt.grid('off')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_title('Original image')
plt.savefig('../img/tikhonov_regularization_0.pdf', bbox_inches='tight')
```
# Crop and add noise
```python
image = image[40:80, 100:140]
noisy_image = image + 0.05*np.random.randn(*image.shape)
```
```python
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].imshow(image, cmap='gray')
ax[1].imshow(noisy_image, cmap='gray')
ax[0].grid('off')
ax[1].grid('off')
ax[0].xaxis.set_ticks([])
ax[0].yaxis.set_ticks([])
ax[1].xaxis.set_ticks([])
ax[1].yaxis.set_ticks([])
ax[0].set_title('Cropped image')
ax[1].set_title('Noisy image')
plt.savefig('../img/tikhonov_regularization_1.pdf', bbox_inches='tight')
```
# Perform graph filtering
#### Given a signal $f_0$ corrupted by Gaussian noise $\eta$
\begin{equation}
\mathbf{y} = \mathbf{f_0} + \mathbf{\eta}
\end{equation}
#### Solve the regularization problem
\begin{equation}
\underset{f}{\text{argmin}} \{ ||f - y||_2^2 + \gamma f^T L f\}
\end{equation}
#### Solution is given by
\begin{equation}
f_{*}(i) = \sum_{l=0}^{N-1} \bigg[ \frac{1}{1 + \gamma \lambda_l} \bigg] \hat{y}
(\lambda_l) u_l(i)
\end{equation}
#### Or equivalently
\begin{equation}
\mathbf{f} = \hat{h}(L) \mathbf{y}
\end{equation}
#### Where L is the laplacian of the adjacency matrix defined by:
\begin{equation}
W_{i,j} =
\begin{cases}
\exp \bigg( - \frac{[dist(i, j)]^2}{2 \theta^2} \bigg) & \text{if $dist(i,j)$ < $\kappa$} \\
0 & \text{otherwise}
\end{cases}
\end{equation}
```python
# Parameters
kappa = np.sqrt(2)
theta = 20
gamma = 10
```
```python
# Query neighboring pixels for each pixel
yx = np.vstack(np.dstack(np.indices(noisy_image.shape)))
tree = spatial.cKDTree(yx)
q = tree.query_ball_point(yx, kappa)
# Get pixels I, and neighbors J
I = np.concatenate([np.repeat(k, len(q[k])) for k in range(len(q))])
J = np.concatenate(q)
# Distance metric is difference between neighboring pixels
dist_ij = np.sqrt(((noisy_image.flat[I] - noisy_image.flat[J])**2))
# Thresholded Gaussian kernel weighting function
W = np.exp(- ((dist_ij)**2 / 2*(theta**2)) )
```
```python
# Construct sparse adjacency matrix
A = sparse.lil_matrix((noisy_image.size, noisy_image.size))
for i, j, w in zip(I, J, W):
A[i, j] = w
A[j, i] = w
A = A.todense()
```
```python
# Compute Laplacian
L = csgraph.laplacian(A)
# Compute eigenvalues and eigenvectors of laplacian
l, u = linalg.eigh(L)
# Compute filtering kernel
h = u @ np.diag(1 / (1 + gamma*l)) @ u.T
# Filter the image using the kernel
graph_filtered_image = (h @ noisy_image.ravel()).reshape(noisy_image.shape)
```
```python
# Filter the image using traditional gaussian filtering
traditional_filtered_image = ndimage.gaussian_filter(noisy_image, 0.8)
```
```python
# Plot the result
fig, ax = plt.subplots(2, 2, figsize=(6, 6))
ax.flat[0].imshow(image, cmap='gray')
ax.flat[1].imshow(noisy_image, cmap='gray')
ax.flat[2].imshow(graph_filtered_image, cmap='gray')
ax.flat[3].imshow(traditional_filtered_image, cmap='gray')
ax.flat[0].grid('off')
ax.flat[1].grid('off')
ax.flat[2].grid('off')
ax.flat[3].grid('off')
ax.flat[0].xaxis.set_ticks([])
ax.flat[0].yaxis.set_ticks([])
ax.flat[1].xaxis.set_ticks([])
ax.flat[1].yaxis.set_ticks([])
ax.flat[2].xaxis.set_ticks([])
ax.flat[2].yaxis.set_ticks([])
ax.flat[3].xaxis.set_ticks([])
ax.flat[3].yaxis.set_ticks([])
ax.flat[0].set_title('Cropped Image')
ax.flat[1].set_title('Noisy Image')
ax.flat[2].set_title('Graph Filtered')
ax.flat[3].set_title('Gaussian Filtered')
plt.tight_layout()
plt.savefig('../img/tikhonov_regularization_2.pdf', bbox_inches='tight')
```
```python
```
|
dcd8908014717b8c56834699449c6194682808a1
| 128,538 |
ipynb
|
Jupyter Notebook
|
notebooks/tikhonov_regularization.ipynb
|
mdbartos/graph-signals
|
e598d8fe7bcef101f740f7b810414043c607f06e
|
[
"MIT"
] | 22 |
2018-10-23T12:13:38.000Z
|
2022-01-26T00:00:44.000Z
|
notebooks/tikhonov_regularization.ipynb
|
mdbartos/graph-signals
|
e598d8fe7bcef101f740f7b810414043c607f06e
|
[
"MIT"
] | 1 |
2021-03-25T07:49:54.000Z
|
2021-03-25T07:49:54.000Z
|
notebooks/tikhonov_regularization.ipynb
|
mdbartos/graph-signals
|
e598d8fe7bcef101f740f7b810414043c607f06e
|
[
"MIT"
] | 6 |
2018-10-23T13:37:49.000Z
|
2021-12-11T03:52:51.000Z
| 382.553571 | 75,888 | 0.927088 | true | 1,304 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.926304 | 0.831143 | 0.769891 |
__label__eng_Latn
| 0.410553 | 0.627047 |
# Driving a skyrmion with spin-polarised current
**Author:** Weiwei Wang (2014)
**Edited:** Marijan Beg (2016)
The implemented equation in finmag with STT is [1,2],
\begin{equation}
\frac{\partial \mathbf{m}}{\partial t} = - \gamma \mathbf{m} \times \mathbf{H} + \alpha \mathbf{m} \times \frac{\partial \mathbf{m}}{\partial t} + u (\mathbf{j}_s \cdot \nabla) \mathbf{m} - \beta u [\mathbf{m}\times (\mathbf{j}_s \cdot \nabla)\mathbf{m}]
\end{equation}
where $\mathbf{j}_s$ is the current density. $u$ is the material parameter, and by default,
$$u=u_{ZL}=\frac{u_0}{1+\beta^2}$$
There is an option "using_u0" in sim.set_zhangli method, u=u0 if "using_u0 = True" and
$$u_0=\frac{g \mu_B P}{2 |e| M_s}=\frac{g \mu_B P a^3}{2 |e| \mu_s}$$
where $\mu_B=|e|\hbar/(2m)$ is the Bohr magneton, $P$ is the polarization rate, $e$ is the electron charge.
The implemented Landau-Lifshitz-Gilbert equation with Slonczewski spin-transfer torque is [3],
\begin{equation}
\frac{\partial \mathbf{m}}{\partial t} = - \gamma \mathbf{m} \times \mathbf{H} + \alpha \mathbf{m} \times \frac{\partial \mathbf{m}}{\partial t}
+ \gamma \beta \epsilon (\mathbf{m} \times \mathbf{m}_p \times \mathbf{m})
\end{equation}
where
\begin{align*}
\beta&=\left|\frac{\hbar}{\mu_0 e}\right|\frac{J}{tM_\mathrm{s}}\,\,\, \mathrm{and}\\
\epsilon&=\frac{P\Lambda^2}{(\Lambda^2+1)+(\Lambda^2-1)(\mathbf{m}\cdot\mathbf{m}_p)}
\end{align*}
[1] S. Zhang and Z. Li, Roles of nonequilibrium conduction electrons on the magnetization dynamics of ferromagnets, Phys. Rev. Lett. 93, 127204 (2004).
[2] A. Thiaville, Y. Nakatani, J. Miltat and Y. Suzuki, Micromagnetic understanding of current-driven domain wall motion in patterned nanowires, Europhys. Lett. 69, 990 (2005).
[3] J. Xiao, A. Zangwill, and M. D. Stiles, “Boltzmann test of Slonczewski’s theory of spin-transfer torque,” Phys. Rev. B, 70, 172405 (2004).
## Skyrmion nucleation
Import the related modules and create a two-dimensional rectangular mesh:
```python
%matplotlib inline
import os
import matplotlib.pyplot as plt
import dolfin as df
import numpy as np
from finmag import Simulation as Sim
from finmag.energies import Exchange, DMI, UniaxialAnisotropy, Zeeman
from finmag.util.dmi_helper import find_skyrmion_center_2d
from finmag.util.helpers import set_logging_level
import finmag
mesh = df.RectangleMesh(df.Point(0, 0), df.Point(200, 40), 200, 40)
```
[2016-07-05 12:15:41] INFO: Finmag logging output will be appended to file: '/home/mb4e10/.finmag/global.log'
[2016-07-05 12:15:41] DEBUG: Building modules in 'native'...
[2016-07-05 12:15:42] DEBUG: FinMag f28296c990ff2e117b748be86dbe1c03d67bde7a
[2016-07-05 12:15:42] DEBUG: Dolfin 1.6.0 Matplotlib 1.5.1
[2016-07-05 12:15:42] DEBUG: Numpy 1.11.0 Scipy 0.15.1
[2016-07-05 12:15:42] DEBUG: IPython 4.2.0 Python 2.7.6
[2016-07-05 12:15:42] DEBUG: Paraview 4.0.1-1ubuntu1 Sundials 2.5.0
[2016-07-05 12:15:42] DEBUG: Boost-Python <unknown> Linux Ubuntu 14.04.4 LTS
[2016-07-05 12:15:42] DEBUG: Registering debug signal handler. Press Ctrl-Z any time to stop execution and jump into the debugger.
We define a function to generate a skyrmion in the track,
```python
def m_init_one(pos):
x, y = pos
x0 = 50
y0 = 20
if (x-x0)**2 + (y-y0)**2 < 10**2:
return (0, 0, -1)
else:
return (0, 0, 1)
```
Create function that can plot scalar field of one magnetisation component:
```python
def plot_2d_comp(sim, comp='z', title=None):
"""expects a simulation object sim and a component to plot. Component can be
'x' or 'y' or 'z'
Not optimised for speed.
"""
finmag.logger.info("plot_2d_comp: at t = {:g}".format(sim.t))
comps = {'x': 0, 'y': 1, 'z': 2}
assert comp in comps, "print unknown component {}, we know: {}".format(comp, comp.keys())
m = sim.get_field_as_dolfin_function('m')
# get mesh coordinates for plotting
coords = mesh.coordinates()
mym = []
for coord in coords:
mym.append(m(coord))
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
x = [ r[0] for r in coords]
y = [ r[1] for r in coords]
# extract i-ith component of magnetisation
mi = [ m[comps[comp]] for m in mym]
# Create the Triangulation; no triangles so Delaunay triangulation created.
triang = tri.Triangulation(x, y)
# tripcolor plot.
plt.figure()
plt.gca().set_aspect('equal')
plt.tripcolor(triang, mi, shading='flat', cmap=plt.cm.rainbow)
if title:
plt.title(title)
else:
plt.title('Plot of {} component of m at t={:.3f}ns'.format(comp, sim.t * 1e9))
```
```python
sim = Sim(mesh, Ms=5.8e5, unit_length=1e-9, pbc=None)
sim.add(UniaxialAnisotropy(K1=6e5, axis=[0, 0, 1]))
sim.add(Exchange(A=1.5e-11))
sim.add(DMI(D=3e-3))
sim.add(Zeeman((0, 0, 1e5)))
sim.alpha = 0.5
sim.set_m(m_init_one)
sim.relax()
plot_2d_comp(sim, comp='z', title='relaxed magnetisation (z-comp)')
```
## Moving a skyrmion with Zhang-Li term
We apply a spin-polarised current in the $x$ direction.
```python
Jx = (-2e12, 0, 0)
```
Now, we can add a Zhang-Li term to the LLG equation.
```python
#We use the zhang-li spin-transfer torque with parameters that polarisation=0.5 and beta=0.01
sim.set_zhangli(Jx, P=0.5, beta=0.01, using_u0=False)
# every 0.1ns save vtk data
sim.schedule('save_vtk', every=1e-10, filename='vtks/m.pvd', overwrite=True)
# every 0.1ns save raw data
sim.schedule('save_m', every=1e-10, filename='npys/m.pvd', overwrite=True)
# every 0.1ns create plot for notebook
sim.schedule(plot_2d_comp, every=1e-10)
# now do the calculation (events scheduled above will be done automatically)
sim.run_until(0.5e-9)
```
## Slonczewski spin-transfer torque
If we want to move a skyrmion with perpendicular current (Slonczewski STT term), the following line should be used instead of `set_zhangli` method.
```python
#sim.set_stt(current_density=1e10, polarisation=0.5, thickness=0.4e-9, direction=(0,1,0))
```
|
2025b0601ba319caa7701841ae4f7a4a8d334aa8
| 103,362 |
ipynb
|
Jupyter Notebook
|
doc/ipython_notebooks_src/tutorial-skyrmion-nucleation-and-manipulation.ipynb
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 10 |
2018-03-24T07:43:17.000Z
|
2022-03-26T10:42:27.000Z
|
doc/ipython_notebooks_src/tutorial-skyrmion-nucleation-and-manipulation.ipynb
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 21 |
2018-03-26T15:08:53.000Z
|
2021-07-10T16:11:14.000Z
|
doc/ipython_notebooks_src/tutorial-skyrmion-nucleation-and-manipulation.ipynb
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 7 |
2018-04-09T11:50:48.000Z
|
2021-06-10T09:23:25.000Z
| 218.063291 | 16,732 | 0.877508 | true | 2,110 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.757794 | 0.709019 | 0.537291 |
__label__eng_Latn
| 0.712471 | 0.086636 |
# Regression
## Linear Regression
### Step 1 : Setup the Environment
```python
# Import necessary Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
```
### Step 2 : Clean the data and Visually analyse the data
```python
x = 10 * np.random.random(50)
y = 2 * x - 1 + np.random.randn(50)
print("x = ",x)
print("y = ",y)
```
x = [4.71575962 2.79611957 6.53365325 6.89171715 1.79343983 3.40168302
2.75966938 6.47687307 4.66321201 7.44681371 8.62711037 4.93215967
0.55907761 6.34443264 1.39039105 6.18648423 7.48840008 1.55065097
8.22250359 6.35686719 8.57282616 5.26966386 2.56339095 6.60315119
3.15469759 9.32347228 9.20683701 2.55341251 5.05858655 5.0380703
1.17126163 1.98061133 6.10325314 2.00837555 4.8957142 7.17552295
5.41394894 2.23837019 5.32201578 0.22963054 7.16808765 9.33971103
9.98976715 5.21045821 5.16935013 7.52269385 9.7892179 2.47902801
1.63778159 6.15557107]
y = [10.03012272 4.49529193 12.74188777 12.72551805 1.43077755 6.65275082
4.86431874 12.11007062 7.54992829 12.89914534 16.6046464 10.44832372
-0.85667271 11.8215409 2.96789924 11.30315529 15.32275064 2.45224635
16.28522892 10.90016452 16.27444692 10.60256859 5.33915333 12.59200911
5.1195451 18.68928542 18.44462835 4.80234441 9.21489562 9.60413798
1.95439708 2.08977758 10.94412696 2.84986311 8.16031285 13.31348924
10.20114927 2.53016296 8.73657176 -1.51279597 13.24798197 18.51094194
20.27965885 10.72682573 11.09500694 13.20130847 18.60031984 3.51043453
4.14270882 10.35636608]
```python
x = 10 * np.random.random(50)
y = 2 * x - 1 + np.random.randn(50)
plt.figure(dpi=120)
sns.scatterplot(x,y)
plt.xlabel("X - InDependend Variable")
plt.ylabel("Y - Dependend Variable")
plt.show()
```
### Step 3 : Generate X "Feature matrix" and y "Target vector"
```python
print(x.shape)
print(x.ndim)
print(y.shape)
```
(50,)
1
(50,)
```python
# Convert the x vector to X matrix
X=x[:,np.newaxis]
print(X.shape)
```
(50, 1)
### Step 4 : Choose a class of model
##### -> Linear Regression
```python
from sklearn.linear_model import LinearRegression
model=LinearRegression()
print(model)
```
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,
normalize=False)
### Step 5 : Fit the model using the labeled data
```python
model.fit(X,y)
```
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,
normalize=False)
```python
print("The slope value is ",model.coef_)
print("The Intercept value is ",model.intercept_)
```
The slope value is [1.96255648]
The Intercept value is -0.9280522283510404
### Step 6 : Predict the new data using unlabeled data
```python
y_pred=model.predict(X)
print(y_pred)
```
[ 9.1701522 11.82289582 5.97712395 1.84536583 14.12704812 12.9763063
2.45114674 5.62634149 3.68247813 3.87402249 8.70465804 3.48268106
16.97372721 6.34891233 6.70186044 14.64568883 3.70685779 13.52536676
10.14846648 15.29284059 -0.13688814 1.14230571 12.55199304 -0.76546266
16.33990974 4.22307493 5.02121503 3.37163395 12.85077063 5.59682773
2.08999748 7.71392691 9.33150718 11.10757381 18.47969758 18.27654415
16.02077928 12.20984107 8.87712953 3.3043861 16.72153174 13.60792001
7.68073427 17.67166163 4.50254112 17.55047799 6.45759014 2.68415955
-0.55791672 16.55194399]
### Step 7 : Computer the accuracy of the model
#### Visual method of accuracy
```python
plt.figure(dpi=120)
plt.scatter(x,y,c="Red",alpha=0.6,label="Actual Value")
plt.plot(x,y_pred,linewidth=5,c="green",alpha=0.4,label="Predicted Value")
plt.xlabel("X - InDependend Variable",fontsize=18)
plt.ylabel("Y - Dependend Variable",fontsize=18)
plt.legend(loc="best",fontsize=20)
plt.show()
```
## Prediction using metric method
### Mean Absolute Error
Mean of the absolute difference between the actual value and the predicted value.
\begin{equation}
\frac{1}{N}\sum_{i=1}^N |y_i-\hat{y_i}|
\end{equation}
>Note: 0 -> Best
```python
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y,y_pred)
```
0.7774688999128481
### Mean Squared Error
Mean of the square of the difference between the actual value and the predicted value.
\begin{equation}
\frac{1}{N}\sum_{i=1}^N (y_i-\hat{y_i})^2
\end{equation}
>Note: 0 -> Best
```python
from sklearn.metrics import mean_squared_error
mean_squared_error(y, y_pred)
```
0.8885217699569701
### Root Mean Squared Error
Squareroot of the average of squared difference between the predicted and estimated value
\begin{equation}
\sqrt{\frac{1}{N}\sum_{i=1}^N (y_i-\hat{y_i})^2}
\end{equation}
>Note: 0 -> Best
```python
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(y,y_pred))
```
0.9426143272606088
### R Squared
The total sum of squares (proportional to the variance of the data):
\$ SS_{tot}=\sum_{i} (y_i-\hat{y_i})^2 $
The regression sum of squares, also called the explained sum of squares:
\$ SS_{reg}=\sum_{i} (f_i-\hat{y_i})^2 $
The most general definition of the coefficient of determination is
\$ R^2 = \frac{SS_{reg}}{SS_{tot}} $
### R Squared
- Measure of the best estimate
- Coefficient of Determination
> Note: 1 -> Best
```python
from sklearn.metrics import r2_score
r2_score(y, y_pred)
```
0.9732624820891722
### Models inbuilt score function
Calculates the R^2 values
```python
model.score(X,y)
```
0.9732624820891722
## Input new data
```python
xnew=np.array([3,4,7])
Xnew=xnew[:,np.newaxis]
model.predict(Xnew)
```
array([ 4.95961722, 6.92217371, 12.80984316])
# Non-linear equations
```python
x = 10 * np.random.random(100)
y = 2 * np.sin(x) - 1 + np.random.randn(100)
print("x = ",x[:11])
print("y = ",y[:11])
plt.figure(dpi=120)
sns.scatterplot(x,y)
plt.xlabel("X - InDependend Variable")
plt.ylabel("Y - Dependend Variable")
plt.show()
```
### Predict using the normal Linear Regression
```python
X=x[:,np.newaxis]
from sklearn.linear_model import LinearRegression
model=LinearRegression()
model.fit(X,y)
xnew=np.linspace(0,10,100)
Xnew=xnew[:,np.newaxis]
y_pred=model.predict(Xnew)
```
```python
plt.figure(dpi=120,figsize=(12,7))
plt.scatter(x,y,c="Red",alpha=0.6,label="Actual Value")
plt.plot(xnew,y_pred,linewidth=5,c="green",alpha=0.4,label="Predicted Value")
plt.xlabel("X - InDependend Variable",fontsize=18)
plt.ylabel("Y - Dependend Variable",fontsize=18)
plt.legend(loc="best",fontsize=20)
plt.show()
```
### Calculate the accuracy
```python
model.score(X,y)
```
0.001899013622799095
```python
model.score(X,y)*100
```
0.1899013622799095
## Polynomial Regression
```python
from sklearn.preprocessing import PolynomialFeatures
```
```python
dummy=np.array([2,3,4])
poly=PolynomialFeatures(degree=4)
poly.fit_transform(dummy[:,np.newaxis])
```
array([[ 1., 2., 4., 8., 16.],
[ 1., 3., 9., 27., 81.],
[ 1., 4., 16., 64., 256.]])
### Transform our actual data
```python
poly=PolynomialFeatures(degree=3)
poly_out=poly.fit_transform(X)
print(poly_out.shape)
print(poly_out[:10])
```
(100, 4)
[[1.00000000e+00 4.84034425e+00 2.34289324e+01 1.13404098e+02]
[1.00000000e+00 8.24433792e+00 6.79691077e+01 5.60360292e+02]
[1.00000000e+00 5.00138680e+00 2.50138699e+01 1.25104039e+02]
[1.00000000e+00 2.83545135e+00 8.03978437e+00 2.27964174e+01]
[1.00000000e+00 9.35763097e+00 8.75652573e+01 8.19403364e+02]
[1.00000000e+00 3.61606590e+00 1.30759326e+01 4.72834340e+01]
[1.00000000e+00 7.03069465e+00 4.94306673e+01 3.47531928e+02]
[1.00000000e+00 2.94759886e+00 8.68833902e+00 2.56097382e+01]
[1.00000000e+00 3.38868882e+00 1.14832119e+01 3.89130319e+01]
[1.00000000e+00 2.90474480e-01 8.43754234e-02 2.45089072e-02]]
### Ouput of Polynomial features are fitted to normal Linear Regression
```python
model=LinearRegression()
model.fit(poly_out,y)
xnew=np.linspace(0,10,100)
Xnew=xnew[:,np.newaxis]
poly=PolynomialFeatures(degree=3)
poly_out_new=poly.fit_transform(Xnew)
y_pred=model.predict(poly_out_new)
```
```python
print("The slope value is ",model.coef_)
print("The Intercept value is ",model.intercept_)
```
The slope value is [ 0. -3.04072636 0.66144071 -0.03995414]
The Intercept value is 2.3936755999800496
```python
plt.figure(dpi=120,figsize=(15,7))
plt.scatter(x,y,c="Red",alpha=0.6,label="Actual Value")
plt.plot(xnew,y_pred,linewidth=5,c="green",alpha=0.4,label="Predicted Value")
plt.xlabel("X - InDependend Variable",fontsize=18)
plt.ylabel("Y - Dependend Variable",fontsize=18)
plt.legend(loc="best",fontsize=20)
plt.show()
```
## Accuracy score
```python
model.score(poly_out,y)*100
```
29.76026958218042
# Automate the polynomial degree
```python
deg,acc=[],[]
for i in np.arange(2,20):
poly=PolynomialFeatures(degree=i)
poly_out=poly.fit_transform(X)
model=LinearRegression()
model.fit(poly_out,y)
deg.append(i)
acc.append(model.score(poly_out,y)*100)
print("Degree :",deg[-1],"Accuracy score ->",acc[-1])
bha=pd.DataFrame({"Degree":deg,"Accuracy":acc})
```
Degree : 2 Accuracy score -> 7.846218787887116
Degree : 3 Accuracy score -> 29.76026958218042
Degree : 4 Accuracy score -> 63.81832646336568
Degree : 5 Accuracy score -> 67.1770768798496
Degree : 6 Accuracy score -> 71.38923906135227
Degree : 7 Accuracy score -> 71.94237240276576
Degree : 8 Accuracy score -> 72.80588501511075
Degree : 9 Accuracy score -> 73.10447113345143
Degree : 10 Accuracy score -> 73.45112588076599
Degree : 11 Accuracy score -> 73.45123595642593
Degree : 12 Accuracy score -> 73.64777733910813
Degree : 13 Accuracy score -> 73.73233739729207
Degree : 14 Accuracy score -> 73.84875220899892
Degree : 15 Accuracy score -> 73.3347529364348
Degree : 16 Accuracy score -> 72.61308273270183
Degree : 17 Accuracy score -> 72.44276162854572
Degree : 18 Accuracy score -> 72.35501554325751
Degree : 19 Accuracy score -> 71.69718613551315
```python
plt.figure(dpi=120)
sns.lineplot(data=bha,x="Degree",y="Accuracy")
plt.show()
```
```python
degr=6
poly=PolynomialFeatures(degree=degr)
poly_out=poly.fit_transform(X)
model=LinearRegression()
model.fit(poly_out,y)
xnew=np.linspace(0,10,100)
Xnew=xnew[:,np.newaxis]
poly=PolynomialFeatures(degree=degr)
poly_out_new=poly.fit_transform(Xnew)
y_pred=model.predict(poly_out_new)
print(model.score(poly_out,y)*100)
```
71.38923906135227
```python
plt.figure(dpi=120,figsize=(15,7))
plt.scatter(x,y,c="Red",alpha=0.6,label="Actual Value")
plt.plot(xnew,y_pred,linewidth=5,c="green",alpha=0.4,label="Predicted Value")
plt.xlabel("X - InDependend Variable",fontsize=18)
plt.ylabel("Y - Dependend Variable",fontsize=18)
plt.legend(loc="best",fontsize=20)
plt.show()
```
```python
print("Model Coeffients",model.coef_)
print("Model Intercept",model.intercept_)
```
Model Coeffients [ 0.00000000e+00 5.49235132e-01 1.35614195e+00 -1.22862598e+00
3.12573504e-01 -3.14694607e-02 1.10639586e-03]
Model Intercept -0.2794770369857815
# Questions ??
|
aca73995dcf3599260dfe94bdd06efe06309a886
| 456,937 |
ipynb
|
Jupyter Notebook
|
python/MachineLearning/Basic Linear regression.ipynb
|
BharathC15/NielitChennai
|
c817aaf63b741eb7a8e4c1df16b5038a0b4f0df7
|
[
"MIT"
] | null | null | null |
python/MachineLearning/Basic Linear regression.ipynb
|
BharathC15/NielitChennai
|
c817aaf63b741eb7a8e4c1df16b5038a0b4f0df7
|
[
"MIT"
] | null | null | null |
python/MachineLearning/Basic Linear regression.ipynb
|
BharathC15/NielitChennai
|
c817aaf63b741eb7a8e4c1df16b5038a0b4f0df7
|
[
"MIT"
] | 1 |
2020-06-11T08:04:43.000Z
|
2020-06-11T08:04:43.000Z
| 361.500791 | 112,772 | 0.93902 | true | 4,036 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.924142 | 0.868827 | 0.802919 |
__label__eng_Latn
| 0.250366 | 0.703783 |
# Contraste Bilateral: Cálculo del Error de tipo II
## Parámetro $p$ en variables de $Bernoulli$
#### Autor:
Sergio García Prado - [garciparedes.me](https://garciparedes.me)
#### Fecha:
Abril de 2018
#### Agradecimientos:
Me gustaría agradecer a la profesora [Pilar Rodríguez del Tío](http://www.eio.uva.es/~pilar/) la revisión y correcciones sobre este trabajo.
## Descripción:
Las variables aleatorias de $Bernoulli$ surgen cuando se pretende estudiar fenómenos de carácter binario como por ejemplo, la ocurrencia o no de un determinado suceso. Estas variables se caracterizan por el ratio de ocurrencia del suceso de estudio, el cual se denota por $p \in [0, 1]$.
Sin embargo, este parámetro es desconocido en la mayoría de casos, por lo que es habitual que surja la pregunta sobre qué valor es el que toma. Para hacer inferencia sobre $p$ existen distintas técnicas, entre las que se encuentran los contrastes de hipótesis. En los contrastes se utilizan por dos parámetros de error conocidos como $\alpha$ y $\beta$ que representan la probabilidad de rechazar la hipótesis cuando era cierta y de aceptarla cuando era falsa.
Estos errores están relacionados entre si, y cuando uno disminuye el otro aumenta, por lo que es necesario estudiar su comportamiento en detalle para llegar a extraer conclusiones razonables de nuestros datos.
En este trabajo nos hemos centrado en estudiar la variación del error de tipo II (probabilidad de aceptar la hipótesis $p = c$ cuando en realidad era falsa) en el contraste bilateral para el parámetro p sobre una muestra aleatoria simple de variables de Bernoulli.
## Procedimiento:
Sea:
$$X_1,..., X_i,..., X_n \ m.a.s \mid X_i \sim B(p) $$
Sabemos que:
$$\widehat{p} = \bar{X} = \frac{\sum_{i = 1}^nX_i}{n}$$
Para realizar el contraste:
$$H_0: p = c\\H_1:p \neq c$$
Sabemos que:
\begin{align}
\widehat{p} \simeq N(p, \frac{p(1-p)}{n}) &\quad \text{(bajo cualquier hipótesis)}\\
\widehat{p} \simeq N(c, \frac{c(1-c)}{n}) &\quad \text{(bajo $H_0$)}
\end{align}
Si tipificamos para en ambos casos:
\begin{align}
\frac{\widehat{p} - p}{\sqrt{\frac{p(1-p)}{n}}} \simeq N(0, 1) &\quad \text{(bajo cualquier hipótesis)}\\
\frac{\widehat{p} - c}{\sqrt{\frac{c(1-c)}{n}}} \simeq N(0, 1) &\quad \text{(bajo $H_0$)}
\end{align}
Ahora, queremos construir la región crítica o de rechazo tal que:
$$P_{H_0}(C) = \alpha$$
Por lo tanto:
\begin{align}
C
&= \left\{ \left| \ N(0,1) \ \right| \geq Z_{1 - \frac{\alpha}{2}}\right\} \\
&= \left\{ \left| \ \frac{\widehat{p} - c}{\sqrt{\frac{c(1-c)}{n}}} \ \right| \geq Z_{1 - \frac{\alpha}{2}} \right\} \\
&= \left\{ \left| \ \widehat{p} - c \ \right| \geq Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right\} \\
&= \left\{ \widehat{p} \leq c - Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right\} \cup \left\{c + Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}} \leq \widehat{p}\right\} \\
\end{align}
(Donde $Z_{1 - \frac{\alpha}{2}}$ se refiere al cuantil $1-\alpha/2$ de la distribución Normal estándar)
Sin embargo, buscamos calcular el error de tipo II:
$$\beta\left(p\right) = P_p(\bar{C})$$
Luego, obtenemos el complementario de la región crítica:
\begin{align}
\bar{C}
&= \left\{ \left| \ N(0,1) \ \right| < Z_{1 - \frac{\alpha}{2}}\right\} \\
&= \left\{ \left| \ \frac{\widehat{p} - c}{\sqrt{\frac{c(1-c)}{n}}} \ \right| < Z_{1 - \frac{\alpha}{2}} \right\} \\
&= \left\{ \left| \ \widehat{p} - c\ \right| < Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right\} \\
&= \left\{ c - Z_{1-\frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}} < \widehat{p} < c + Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right\} \\
\end{align}
Para después desarrollar el cálculo de dicha probabilidad:
\begin{align}
\beta(p)
&= P_p(\bar{C}) \\
&= P_p\left(\left| \frac{\widehat{p} - c}{\sqrt{\frac{c(1-c)}{n}}} \right| < Z_{1 - \frac{\alpha}{2}}\right) \\
&= P_p\left(c - Z_{\frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}} < \widehat{p} < c + Z_{ 1- \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right) \\
&= \Phi\left(\frac{\left(c + Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right)-p}{\sqrt{\frac{p(1-p)}{n}}}\right) - \Phi\left(\frac{\left(c - Z_{1 - \frac{\alpha}{2}} \sqrt{\frac{c(1-c)}{n}}\right)-p}{\sqrt{\frac{p(1-p)}{n}}}\right)
\end{align}
(Donde $\Phi(x)$ se refiere a la función de distribución de la Normal estándar)
A continuación se muestra la implementación de los cálculos superiores en __R__:
## Implementación:
#### Cálculo del varlor crítico de nivel $\alpha$ para variables de Bernoulli:
```R
CriticalValue <- function(n, p, alpha) {
qnorm(alpha) * sqrt((p * (1 - p)) / n)
}
```
#### Cálculo de la probabilidad $P(\bar{C})$:
```R
PNegateC <- function(p, n, c, alpha) {
pnorm(
(
c + CriticalValue(n, c, 1 - alpha / 2) - p
) / sqrt((p * (1 - p)) / n)
) -
pnorm(
(
c - CriticalValue(n, c, 1 - alpha / 2) - p
) / sqrt((p * (1 - p)) / n)
)
}
```
#### Representación gráfica de $\beta(p)$ tomando distintos valores $c$ y $n$ (manteniendo $\alpha$ fijo) para comprobar su variación
```R
n.vec <- 10 ^ (1:3)
c.vec <- c(0.25, 0.5, 0.75)
p <- seq(0, 1, length = 200)
```
## Resultados:
```R
par(mfrow = c(length(n.vec), length(c.vec)))
for (n in n.vec) {
for (c in c.vec) {
plot(p, 1 - PNegateC(p, n, c, 0.05), type = "l",
main = paste("c =", c, "\nn =", n),
ylab = "A(p) = 1 - B(p)")
}
}
```
Tal y como se puede apreciar, la función del error tan solo es simétrica en el caso $c = \frac{1}{2}$, lo cual es más marcado para valores de $n$ pequeños. Además, conforme aumenta $n$ la función $\beta(p)$ se vuelve mucho más apuntada entorno a $c$, por lo que el error de tipo II se mantiene muy bajo excepto para valores $p \simeq c$. Esto tiene sentido ya que el error cometido por rechazar que el verdadero valor de $p$ es $c$ será bajo cuando realmente no sea $c$, mientras que será elevado cuando si que lo sea. En dicho caso, este error está condicionado por el error de no rechazo, es decir, de tipo I (en este ejemplo $\alpha = 0.05$).
|
e753369dd0ebdf2771f44ba295a525af5cec3745
| 90,656 |
ipynb
|
Jupyter Notebook
|
notebooks/beta-error-bernoulli-hypothesis-test.ipynb
|
garciparedes/r-examples
|
0e0e18439ad859f97eafb27c5e7f77d33da28bc6
|
[
"Apache-2.0"
] | 1 |
2017-09-15T19:56:31.000Z
|
2017-09-15T19:56:31.000Z
|
notebooks/beta-error-bernoulli-hypothesis-test.ipynb
|
garciparedes/r-examples
|
0e0e18439ad859f97eafb27c5e7f77d33da28bc6
|
[
"Apache-2.0"
] | 5 |
2018-03-23T09:34:55.000Z
|
2019-01-09T14:13:32.000Z
|
notebooks/beta-error-bernoulli-hypothesis-test.ipynb
|
garciparedes/r-examples
|
0e0e18439ad859f97eafb27c5e7f77d33da28bc6
|
[
"Apache-2.0"
] | null | null | null | 370.02449 | 81,660 | 0.907772 | true | 2,238 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.835484 | 0.793106 | 0.662627 |
__label__spa_Latn
| 0.945848 | 0.377835 |
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Designing Nonlinear Kalman Filters
```python
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
```
<style>
@import url('http://fonts.googleapis.com/css?family=Source+Code+Pro');
@import url('http://fonts.googleapis.com/css?family=Lora');
//@import url('http://fonts.googleapis.com/css?family=Open+Sans');
//@import url('http://fonts.googleapis.com/css?family=Vollkorn');
//@import url('http://fonts.googleapis.com/css?family=Karla');
//@import url('http://fonts.googleapis.com/css?family=Poppins');
//@import url('http://fonts.googleapis.com/css?family=Arimo');
//@import url('http://fonts.googleapis.com/css?family=Roboto');
//@import url('http://fonts.googleapis.com/css?family=Lato');
//@import url('http://fonts.googleapis.com/css?family=Domine');
//@import url('http://fonts.googleapis.com/css?family=Chivo');
//@import url('http://fonts.googleapis.com/css?family=Cardo');
//@import url('http://fonts.googleapis.com/css?family=Arvo');
//@import url('http://fonts.googleapis.com/css?family=Crimson+Text');
//@import url('http://fonts.googleapis.com/css?family=Ubuntu');
//@import url('http://fonts.googleapis.com/css?family=Fontin');
//@import url('http://fonts.googleapis.com/css?family=Raleway');
//@import url('http://fonts.googleapis.com/css?family=Merriweather');
.CodeMirror pre {
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
div.cell{
//width: 950px;
margin-left: 0% !important;
margin-right: auto;
}
div.text_cell_render{
font-family: 'Lora';
//font-family: 'Open Sans';
//font-family: 'Karla',verdana,arial,sans-serif;
//font-family: 'Roboto',verdana,arial,sans-serif;
//font-family: 'Lato',verdana,arial,sans-serif;
//font-family: 'Domine',verdana,arial,sans-serif;
//font-family: 'Chivo',verdana,arial,sans-serif;
//font-family: 'Cardo',verdana,arial,sans-serif;
//font-family: 'Arvo',verdana,arial,sans-serif;
//font-family: 'Poppins',verdana,arial,sans-serif;
//font-family: 'Ubuntu',verdana,arial,sans-serif;
//font-family: 'Fontin',verdana,arial,sans-serif;
//font-family: 'Raleway',verdana,arial,sans-serif;
//font-family: 'Merriweather',verdana,arial,sans-serif;
//font-family: 'Crimson Text', verdana,arial,sans-serif;
//font-family: verdana,arial,sans-serif;
//font-family: arial,sans-serif;
line-height: 125%;
font-size: 130%;
text-align: justify;
text-justify:inter-word;
}
div.text_cell code {
background: transparent;
color: #000000;
font-weight: 400;
font-size: 12pt;
//font-style: bold;
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
h1 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
div.input_area {
background: #F6F6F9;
border: 1px solid #586e75;
}
.text_cell_render h1 {
font-weight: 200;
font-size: 30pt;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1em;
display: block;
white-space: wrap;
text-align: left;
}
h2 {
font-family: 'Open sans',verdana,arial,sans-serif;
text-align: left;
}
.text_cell_render h2 {
font-weight: 200;
font-size: 16pt;
font-style: italic;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1.5em;
display: block;
white-space: wrap;
text-align: left;
}
h3 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h3 {
font-weight: 200;
font-size: 14pt;
line-height: 100%;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 2em;
display: block;
white-space: wrap;
text-align: left;
}
h4 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h4 {
font-weight: 100;
font-size: 14pt;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
h5 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h5 {
font-weight: 200;
font-style: normal;
color: #1d3b84;
font-size: 16pt;
margin-bottom: 0em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
div.output_subarea.output_text.output_pyout {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_subarea.output_stream.output_stdout.output_text {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_wrapper{
margin-top:0.2em;
margin-bottom:0.2em;
}
code{
font-size: 6pt;
}
.rendered_html code{
background-color: transparent;
}
ul{
margin: 2em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li li{
padding-left: 0.2em;
margin-bottom: 0.2em;
margin-top: 0.2em;
}
ol{
margin: 2em;
}
ol li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.2em;
}
a:link{
color:#447adb;
}
a:visited{
color: #1d3b84;
}
a:hover{
color: #1d3b84;
}
a:focus{
color:#447adb;
}
a:active{
font-weight: bold;
color:#447adb;
}
.rendered_html :link {
text-decoration: underline;
}
.rendered_html :hover {
text-decoration: none;
}
.rendered_html :visited {
text-decoration: none;
}
.rendered_html :focus {
text-decoration: none;
}
.rendered_html :active {
text-decoration: none;
}
.warning{
color: rgb( 240, 20, 20 )
}
hr {
color: #f3f3f3;
background-color: #f3f3f3;
height: 1px;
}
blockquote{
display:block;
background: #fcfcfc;
border-left: 5px solid #c76c0c;
font-family: 'Open sans',verdana,arial,sans-serif;
width:680px;
padding: 10px 10px 10px 10px;
text-align:justify;
text-justify:inter-word;
}
blockquote p {
margin-bottom: 0;
line-height: 125%;
font-size: 100%;
}
</style>
## Introduction
** Author's note: I was initially planning to have a design nonlinear chapter that compares various approaches. This may or may not happen, but for now this chapter has no useful content and I suggest not reading it. **
We see that the Kalman filter reasonably tracks the ball. However, as already explained, this is a silly example; we can predict trajectories in a vacuum with arbitrary precision; using a Kalman filter in this example is a needless complication.
### Kalman Filter with Air Drag
I will dispense with the step 1, step 2, type approach and proceed in a more natural style that you would use in a non-toy engineering problem. We have already developed a Kalman filter that does excellently at tracking a ball in a vacuum, but that does not incorporate the effects of air drag into the model. We know that the process model is implemented with $\textbf{F}$, so we will turn our attention to that immediately.
Notionally, the computation that $\textbf{F}$ computes is
$$x' = Fx$$
With no air drag, we had
$$
\mathbf{F} = \begin{bmatrix}
1 & \Delta t & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 1 & \Delta t & \frac{1}{2}{\Delta t}^2 \\
0 & 0 & 0 & 1 & \Delta t \\
0 & 0 & 0 & 0 & 1
\end{bmatrix}
$$
which corresponds to the equations
$$
\begin{aligned}
x &= x + v_x \Delta t \\
v_x &= v_x \\
\\
y &= y + v_y \Delta t + \frac{a_y}{2} {\Delta t}^2 \\
v_y &= v_y + a_y \Delta t \\
a_y &= a_y
\end{aligned}
$$
From the section above we know that our new Euler equations must be
$$
\begin{aligned}
x &= x + v_x \Delta t \\
v_x &= v_x \\
\\
y &= y + v_y \Delta t + \frac{a_y}{2} {\Delta t}^2 \\
v_y &= v_y + a_y \Delta t \\
a_y &= a_y
\end{aligned}
$$
## Realistic 2D Position Sensors
The position sensor in the last example are not very realistic. In general there is no 'raw' sensor that provides (x,y) coordinates. We have GPS, but GPS already uses a Kalman filter to create a filtered output; we should not be able to improve the signal by passing it through another Kalman filter unless we incorporate additional sensors to provide additional information. We will tackle that problem later.
Consider the following set up. In an open field we put two transmitters at a known location, each transmitting a signal that we can detect. We process the signal and determine how far we are from that signal, with some noise. First, let's look at a visual depiction of that.
```python
import matplotlib.pyplot as plt
circle1=plt.Circle((-4, 0), 5, color='#004080',
fill=False, linewidth=20, alpha=.7)
circle2=plt.Circle((4, 0), 5, color='#E24A33',
fill=False, linewidth=5, alpha=.7)
fig = plt.gcf()
ax = fig.gca()
plt.axis('equal')
plt.xlim((-10, 10))
plt.ylim((-10, 10))
plt.plot ([-4, 0], [0, 3], c='#004080')
plt.plot ([4, 0], [0, 3], c='#E24A33')
plt.text(-4, -.5, "A", fontsize=16, horizontalalignment='center')
plt.text(4, -.5, "B", fontsize=16, horizontalalignment='center')
ax.add_artist(circle1)
ax.add_artist(circle2)
plt.show()
```
Here I have attempted to show transmitter A, drawn in red, at (-4,0) and a second one B, drawn in blue, at (4,0). The red and blue circles show the range from the transmitters to the robot, with the width illustrating the effect of the $1\sigma$ angular error for each transmitter. Here I have given the blue transmitter more error than the red one. The most probable position for the robot is where the two circles intersect, which I have depicted with the red and blue lines. You will object that we have two intersections, not one, but we will see how we deal with that when we design the measurement function.
This is a very common sensor set up. Aircraft still use this system to navigate, where it is called DME (Distance Measuring Equipment). Today GPS is a much more common navigation system, but I have worked on an aircraft where we integrated sensors like this into our filter along with the GPS, INS, altimeters, etc. We will tackle what is called *multi-sensor fusion* later; for now we will just address this simple configuration.
The first step is to design our state variables. We will assume that the robot is traveling in a straight direction with constant velocity. This is unlikely to be true for a long period of time, but is acceptable for short periods of time. This does not differ from the previous problem - we will want to track the values for the robot's position and velocity. Hence,
$$\mathbf{x} =
\begin{bmatrix}x\\v_x\\y\\v_y\end{bmatrix}$$
The next step is to design the state transition function. This also will be the same as the previous problem, so without further ado,
$$
\mathbf{x}' = \begin{bmatrix}1& \Delta t& 0& 0\\0& 1& 0& 0\\0& 0& 1& \Delta t\\ 0& 0& 0& 1\end{bmatrix}\mathbf{x}$$
The next step is to design the control inputs. We have none, so we set ${\mathbf{B}}=0$.
The next step is to design the measurement function $\mathbf{z} = \mathbf{Hx}$. We can model the measurement using the Pythagorean theorem.
$$
z_a = \sqrt{(x-x_A)^2 + (y-y_A)^2} + v_a\\[1em]
z_b = \sqrt{(x-x_B])^2 + (y-y_B)^2} + v_b
$$
where $v_a$ and $v_b$ are white noise.
We see an immediate problem. The Kalman filter is designed for linear equations, and this is obviously nonlinear. In the next chapters we will look at several ways to handle nonlinear problems in a robust way, but for now we will do something simpler. If we know the approximate position of the robot than we can linearize these equations around that point. I could develop the generalized mathematics for this technique now, but instead let me just present the worked example to give context to that development.
Instead of computing $\mathbf{H}$ we will compute the partial derivative of $\mathbf{H}$ with respect to the robot's position $\mathbf{x}$. You are probably familiar with the concept of partial derivative, but if not, it just means how $\mathbf{H}$ changes with respect to the robot's position. It is computed as the partial derivative of $\mathbf{H}$ as follows:
$$\frac{\partial \mathbf{h}}{\partial \mathbf{x}} =
\begin{bmatrix}
\frac{\partial h_1}{\partial x_1} & \frac{\partial h_1}{\partial x_2} &\dots \\
\frac{\partial h_2}{\partial x_1} & \frac{\partial h_2}{\partial x_2} &\dots \\
\vdots & \vdots
\end{bmatrix}
$$
Let's work the first partial derivative. We want to find
$$\frac{\partial }{\partial x} \sqrt{(x-x_A)^2 + (y-y_A)^2}
$$
Which we compute as
$$
\begin{aligned}
\frac{\partial h_1}{\partial x} &= ((x-x_A)^2 + (y-y_A)^2))^\frac{1}{2} \\
&= \frac{1}{2}\times 2(x-x_a)\times ((x-x_A)^2 + (y-y_A)^2))^{-\frac{1}{2}} \\
&= \frac{x_r - x_A}{\sqrt{(x_r-x_A)^2 + (y_r-y_A)^2}}
\end{aligned}
$$
We continue this computation for the partial derivatives of the two distance equations with respect to $x$, $y$, $dx$ and $dy$, yielding
$$\frac{\partial\mathbf{h}}{\partial\mathbf{x}}=
\begin{bmatrix}
\frac{x_r - x_A}{\sqrt{(x_r-x_A)^2 + (y_r-y_A)^2}} & 0 &
\frac{y_r - y_A}{\sqrt{(x_r-x_A)^2 + (y_r-y_A)^2}} & 0 \\
\frac{x_r - x_B}{\sqrt{(x_r-x_B)^2 + (y_r-y_B)^2}} & 0 &
\frac{y_r - y_B}{\sqrt{(x_r-x_B)^2 + (y_r-y_B)^2}} & 0 \\
\end{bmatrix}
$$
That is pretty painful, and these are very simple equations. Computing the Jacobian can be extremely difficult or even impossible for more complicated systems. However, there is an easy way to get Python to do the work for you by using the SymPy module [1]. SymPy is a Python library for symbolic mathematics. The full scope of its abilities are beyond this book, but it can perform algebra, integrate and differentiate equations, find solutions to differential equations, and much more. We will use it to compute our Jacobian!
First, a simple example. We will import SymPy, initialize its pretty print functionality (which will print equations using LaTeX). We will then declare a symbol for NumPy to use.
```python
import sympy
from sympy import init_printing
init_printing(use_latex='png')
phi, x = sympy.symbols('\phi, x')
phi
```
Notice how we use a latex expression for the symbol `phi`. This is not necessary, but if you do it will render as LaTeX when output. Now let's do some math. What is the derivative of $\sqrt{\phi}$?
```python
sympy.diff('sqrt(phi)')
```
We can factor equations.
```python
sympy.factor('phi**3 -phi**2 + phi - 1')
```
SymPy has a remarkable list of features, and as much as I enjoy exercising its features we cannot cover them all here. Instead, let's compute our Jacobian.
```python
from sympy import symbols, Matrix
phi = symbols('\phi')
phi
x, y, xa, xb, ya, yb, dx, dy = symbols('x y x_a x_b y_a y_b dx dy')
H = Matrix([[sympy.sqrt((x-xa)**2 + (y-ya)**2)],
[sympy.sqrt((x-xb)**2 + (y-yb)**2)]])
state = Matrix([x, dx, y, dy])
H.jacobian(state)
```
In a nutshell, the entry (0,0) contains the difference between the x coordinate of the robot and transmitter A's x coordinate divided by the distance between the robot and A. (2,0) contains the same, except for the y coordinates of the robot and transmitters. The bottom row contains the same computations, except for transmitter B. The 0 entries account for the velocity components of the state variables; naturally the range does not provide us with velocity.
The values in this matrix change as the robot's position changes, so this is no longer a constant; we will have to recompute it for every time step of the filter.
If you look at this you may realize that this is just a computation of x/dist and y/dist, so we can switch this to a trigonometic form with no loss of generality:
$$\frac{\partial\mathbf{h}}{\partial\mathbf{x}}=
\begin{bmatrix}
-\cos{\theta_A} & 0 & -\sin{\theta_A} & 0 \\
-\cos{\theta_B} & 0 & -\sin{\theta_B} & 0
\end{bmatrix}
$$
However, this raises a huge problem. We are no longer computing $\mathbf{H}$, but $\Delta\mathbf{H}$, the change of $\mathbf{H}$. If we passed this into our Kalman filter without altering the rest of the design the output would be nonsense. Recall, for example, that we multiply $\mathbf{Hx}$ to generate the measurements that would result from the given estimate of $\mathbf{x}$ But now that $\mathbf{H}$ is linearized around our position it contains the *change* in the measurement function.
We are forced, therefore, to use the *change* in $\mathbf{x}$ for our state variables. So we have to go back and redesign our state variables.
>Please note this is a completely normal occurrence in designing Kalman filters. The textbooks present examples like this as *fait accompli*, as if it is trivially obvious that the state variables needed to be velocities, not positions. Perhaps once you do enough of these problems it would be trivially obvious, but at that point why are you reading a textbook? I find myself reading through a presentation multiple times, trying to figure out why they made a choice, finally to realize that it is because of the consequences of something on the next page. My presentation is longer, but it reflects what actually happens when you design a filter. You make what seem reasonable design choices, and as you move forward you discover properties that require you to recast your earlier steps. As a result, I am going to somewhat abandon my **step 1**, **step 2**, etc., approach, since so many real problems are not quite that straightforward.
If our state variables contain the velocities of the robot and not the position then how do we track where the robot is? We can't. Kalman filters that are linearized in this fashion use what is called a *nominal trajectory* - i.e. you assume a position and track direction, and then apply the changes in velocity and acceleration to compute the changes in that trajectory. How could it be otherwise? Recall the graphic showing the intersection of the two range circles - there are two areas of intersection. Think of what this would look like if the two transmitters were very close to each other - the intersections would be two very long crescent shapes. This Kalman filter, as designed, has no way of knowing your true position from only distance measurements to the transmitters. Perhaps your mind is already leaping to ways of working around this problem. If so, stay engaged, as later sections and chapters will provide you with these techniques. Presenting the full solution all at once leads to more confusion than insight, in my opinion.
So let's redesign our *state transition function*. We are assuming constant velocity and no acceleration, giving state equations of
$$
\dot{x}' = \dot{x} \\
\ddot{x}' = 0 \\
\dot{y}' = \dot{y} \\
\dot{y}' = 0$$
This gives us the the *state transition function* of
$$
\mathbf{F} = \begin{bmatrix}0 &1 & 0& 0\\0& 0& 0& 0\\0& 0& 0& 1\\ 0& 0& 0& 0\end{bmatrix}$$
A final complication comes from the measurements that we pass in. $\mathbf{Hx}$ is now computing the *change* in the measurement from our nominal position, so the measurement that we pass in needs to be not the range to A and B, but the *change* in range from our measured range to our nominal position.
There is a lot here to take in, so let's work through the code bit by bit. First we will define a function to compute $\frac{\partial\mathbf{h}}{\partial\mathbf{x}}$ for each time step.
```python
from math import sin, cos, atan2
def H_of(pos, pos_A, pos_B):
""" Given the position of our object at 'pos' in 2D, and two
transmitters A and B at positions 'pos_A' and 'pos_B', return
the partial derivative of H
"""
theta_a = atan2(pos_a[1] - pos[1], pos_a[0] - pos[0])
theta_b = atan2(pos_b[1] - pos[1], pos_b[0] - pos[0])
return np.array([[0, -cos(theta_a), 0, -sin(theta_a)],
[0, -cos(theta_b), 0, -sin(theta_b)]])
```
Now we need to create our simulated sensor.
```python
from numpy.random import randn
class DMESensor(object):
def __init__(self, pos_a, pos_b, noise_factor=1.0):
self.A = pos_a
self.B = pos_b
self.noise_factor = noise_factor
def range_of(self, pos):
""" returns tuple containing noisy range data to A and B
given a position 'pos'
"""
ra = math.sqrt((self.A[0] - pos[0])**2 + (self.A[1] - pos[1])**2)
rb = math.sqrt((self.B[0] - pos[0])**2 + (self.B[1] - pos[1])**2)
return (ra + randn()*self.noise_factor,
rb + randn()*self.noise_factor)
```
Finally, we are ready for the Kalman filter code. I will position the transmitters at x=-100 and 100, both with y=-20. This gives me enough space to get good triangulation from both as the robot moves. I will start the robot at (0,0) and move by (1,1) each time step.
```python
import code.book_plots as bp
from filterpy.kalman import KalmanFilter
import math
import numpy as np
pos_a = (100, -20)
pos_b = (-100, -20)
f1 = KalmanFilter(dim_x=4, dim_z=2)
f1.F = np.array ([[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]], dtype=float)
f1.R *= 1.
f1.Q *= .1
f1.x = np.array([[1, 0, 1, 0]], dtype=float).T
f1.P = np.eye(4) * 5.
# initialize storage and other variables for the run
count = 30
xs, ys = [], []
pxs, pys = [], []
# create the simulated sensor
d = DMESensor(pos_a, pos_b, noise_factor=3.)
# pos will contain our nominal position since the filter does not
# maintain position.
pos = [0, 0]
for i in range(count):
# move (1,1) each step, so just use i
pos = [i, i]
# compute the difference in range between the nominal track
# and measured ranges
ra,rb = d.range_of(pos)
rx,ry = d.range_of((pos[0] + f1.x[0, 0], pos[1] + f1.x[2, 0]))
z = np.array([[ra - rx], [rb - ry]])
# compute linearized H for this time step
f1.H = H_of (pos, pos_a, pos_b)
# store stuff so we can plot it later
xs.append(f1.x[0, 0]+i)
ys.append(f1.x[2, 0]+i)
pxs.append(pos[0])
pys.append(pos[1])
# perform the Kalman filter steps
f1.predict()
f1.update(z)
bp.plot_filter(xs, ys)
bp.plot_track(pxs, pys)
plt.legend(loc=2)
plt.show()
```
## Linearizing the Kalman Filter
Now that we have seen an example of linearizing the Kalman filter we are in a position to better understand the math.
We start by assuming some function $\mathbf f$
## Example: A falling Ball
**author's note: ignore this section for now. **
In the **Designing Kalman Filters** chapter I first considered tracking a ball in a vacuum, and then in the atmosphere. The Kalman filter performed very well for vacuum, but diverged from the ball's path in the atmosphere. Let us look at the output; to avoid littering this chapter with code from that chapter I have placed it all in the file `ekf_internal.py'.
```python
import code.ekf_internal as ekf
ekf.plot_ball()
```
We can artificially force the Kalman filter to track the ball by making $Q$ large. That would cause the filter to mistrust its prediction, and scale the kalman gain $K$ to strongly favor the measurments. However, this is not a valid approach. If the Kalman filter is correctly predicting the process we should not 'lie' to the filter by telling it there are process errors that do not exist. We may get away with that for some problems, in some conditions, but in general the Kalman filter's performance will be substandard.
Recall from the **Designing Kalman Filters** chapter that the acceleration is
$$a_x = (0.0039 + \frac{0.0058}{1+\exp{[(v-35)/5]}})*v*v_x \\
a_y = (0.0039 + \frac{0.0058}{1+\exp{[(v-35)/5]}})*v*v_y- g
$$
These equations will be *very* unpleasant to work with while we develop this subject, so for now I will retreat to a simpler one dimensional problem using this simplified equation for acceleration that does not take the nonlinearity of the drag coefficient into account:
$$\begin{aligned}
\ddot{y} &= \frac{0.0034ge^{-y/20000}\dot{y}^2}{2\beta} - g \\
\ddot{x} &= \frac{0.0034ge^{-x/20000}\dot{x}^2}{2\beta}
\end{aligned}$$
Here $\beta$ is the ballistic coefficient, where a high number indicates a low drag.
This is still nonlinear, so we need to linearize this equation at the current state point. If our state is position and velocity, we need an equation for some arbitrarily small change in $\mathbf{x}$, like so:
$$ \begin{bmatrix}\Delta \dot{x} \\ \Delta \ddot{x} \\ \Delta \dot{y} \\ \Delta \ddot{y}\end{bmatrix} =
\large\begin{bmatrix}
\frac{\partial \dot{x}}{\partial x} &
\frac{\partial \dot{x}}{\partial \dot{x}} &
\frac{\partial \dot{x}}{\partial y} &
\frac{\partial \dot{x}}{\partial \dot{y}} \\
\frac{\partial \ddot{x}}{\partial x} &
\frac{\partial \ddot{x}}{\partial \dot{x}}&
\frac{\partial \ddot{x}}{\partial y}&
\frac{\partial \dot{x}}{\partial \dot{y}}\\
\frac{\partial \dot{y}}{\partial x} &
\frac{\partial \dot{y}}{\partial \dot{x}} &
\frac{\partial \dot{y}}{\partial y} &
\frac{\partial \dot{y}}{\partial \dot{y}} \\
\frac{\partial \ddot{y}}{\partial x} &
\frac{\partial \ddot{y}}{\partial \dot{x}}&
\frac{\partial \ddot{y}}{\partial y}&
\frac{\partial \dot{y}}{\partial \dot{y}}
\end{bmatrix}\normalsize
\begin{bmatrix}\Delta x \\ \Delta \dot{x} \\ \Delta \dot{y} \\ \Delta \ddot{y}\end{bmatrix}$$
The equations do not contain both an x and a y, so any partial derivative with both in it must be equal to zero. We also know that $\large\frac{\partial \dot{x}}{\partial x}\normalsize = 0$ and that $\large\frac{\partial \dot{x}}{\partial \dot{x}}\normalsize = 1$, so our matrix ends up being
$$\mathbf{F} = \begin{bmatrix}0&1&0&0 \\
\frac{0.0034e^{-x/22000}\dot{x}^2g}{44000\beta}&0&0&0
\end{bmatrix}$$
$$\begin{aligned}\ddot{x} &= -\frac{1}{2}C_d\rho A \dot{x}\\
\ddot{y} &= -\frac{1}{2}C_d\rho A \dot{y}-g\end{aligned}$$
```python
from sympy.abc import *
from sympy import *
init_printing(pretty_print=True, use_latex='mathjax')
x1 = (0.0034*g*exp(-x/22000)*((x)**2))/(2*b) - g
x2 = (a*g*exp(-x/c)*(Derivative(x)**2))/(2*b) - g
#pprint(x1)
#pprint(Derivative(x)*Derivative(x,n=2))
#pprint(diff(x2, x))
```
** orphan text
This approach has many issues. First, of course, is the fact that the linearization does not produce an exact answer. More importantly, we are not linearizing the actual path, but our filter's estimation of the path. We linearize the estimation because it is statistically likely to be correct; but of course it is not required to be. So if the filter's output is bad that will cause us to linearize an incorrect estimate, which will almost certainly lead to an even worse estimate. In these cases the filter will quickly diverge. This is where the 'black art' of Kalman filter comes in. We are trying to linearize an estimate, and there is no guarantee that the filter will be stable. A vast amount of the literature on Kalman filters is devoted to this problem. Another issue is that we need to linearize the system using analytic methods. It may be difficult or impossible to find an analytic solution to some problems. In other cases we may be able to find the linearization, but the computation is very expensive. **
## References
[1] http://sympy.org
|
5100e477137421ce9795850b0a68ac07e4dd5698
| 131,570 |
ipynb
|
Jupyter Notebook
|
Appendix-G-Designing-Nonlinear-Kalman-Filters.ipynb
|
asfaltboy/Kalman-and-Bayesian-Filters-in-Python
|
4669507d7a8274a40cff93a011d34b6171227ea6
|
[
"CC-BY-4.0"
] | 4 |
2017-10-17T06:53:41.000Z
|
2021-04-03T14:16:06.000Z
|
Appendix-G-Designing-Nonlinear-Kalman-Filters.ipynb
|
asfaltboy/Kalman-and-Bayesian-Filters-in-Python
|
4669507d7a8274a40cff93a011d34b6171227ea6
|
[
"CC-BY-4.0"
] | null | null | null |
Appendix-G-Designing-Nonlinear-Kalman-Filters.ipynb
|
asfaltboy/Kalman-and-Bayesian-Filters-in-Python
|
4669507d7a8274a40cff93a011d34b6171227ea6
|
[
"CC-BY-4.0"
] | 4 |
2017-12-08T09:27:49.000Z
|
2022-02-21T17:14:06.000Z
| 125.185538 | 34,930 | 0.819343 | true | 7,848 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.685949 | 0.847968 | 0.581663 |
__label__eng_Latn
| 0.982423 | 0.189728 |
# Detectors Comparison
O Detectors Comparison é um software coletor de estatísticas de algoritmos de detecção e extração de keypoints em fotografias turisticas.
Os dados estatísticos coletados são utilizados para avaliar o desempenho e precisão dos algoritmos: [ORB](), [BRISK](), [AKAZE](), [SIFT]() e [SURF]() em relação ao tempo de execução, quantidade de keypoints, quantidade de matches e porcentagem de acerto. A comparação é dividida em quatro categorias/casos/situações de pares de fotos: retratando o mesmo objeto na mesma escala, o mesmo objeto em escalas diferentes, objetos diferentes na mesma escala e objetos diferentes em escalas diferentes. Todos os pares de imagens se encontram relativamente no mesmo ângulo de visão.
Para realizarmos a tarefa proposta utilizamos como biblioteca principal a [OpenCV](), a qual fornece os algoritmos comparados. A demais bibliotecas ([NumPy](), [SciPy](), [SymPy](), [Time]() e [Matplotlib]()) funcionam como auxiliáres para a coleta de dados. Os dados, por sua vez, são salvos pelo [SQLite 3]().
```python
import cv2
import sqlite3
import numpy as np
from scipy import stats
from sympy import Point, Line
from time import time, strftime
from matplotlib import pyplot as plt
```
## Processo de coleta de dados
A etapa principal do processo de coleta de cados consiste em:
+ Encontrar os Keypoints;
+ Encontrar os Matches através de uma busca completa;
+ Avaliar a taxa de acerto;
+ Calcular os ângulos entre as retas que passam pelo centro da imagem e cada keypoint com a reta horizontal (que passa pelo centro da imagem e o pixel mais à direita de mesma altura/coordenada y);
+ Calcular as diferenças entre os ângulos dos keypoints;
+ Calcular as razões entre as distâncias dos centros das imagens e seus keypoints, que chamamos de escala entre as imagens;
+ Calcular as médias e desvios padrão dos ângulos dos keypoints e das escalas;
+ Rotacionar a imagem da esquerda com a média dos ângulos dos keypoints;
+ Ampliar a imagem da esquerda com a escala;
+ Rencontrar os novos keypoints e matches;
+ Reavaliar a taxa de acerto;
+ Remover os falsos Matches:
+ Filtrar os ângulos e escalas menores que a média menos um desvio padrão ou maiores que a média mais um desvio padrão.
+ Recalcular as médias e os desvios padrão dos ângulos dos keypoints e das escalas;
+ Reaplicar a rotação e a ampliação com os novos valores de média;
+ Rencontrar os novos keypoints e matches;
+ Reavaliar a taxa de acerto;
```python
NUM_OF_PAIRS = 1
TABLE_NAME = 'datas_{}'.format(strftime('%y%m%d_%H%M%S'))
```
```python
# Finds the image's center
def image_center(image):
return Point(image.shape[1] / 2, image.shape[0] / 2)
```
```python
# Finds the angles between the horizontal axis
# and the lines passing through the image center
# and each keypoint
def g_find_kp_angles(image, kps):
angles = []
center = image_center(image)
h_axis = Line(center, center.translate(center.x))
for kp in kps:
p = Point(kp.pt[0], kp.pt[1])
kp_line = Line(center, p)
angles.append(float(h_axis.angle_between(kp_line)))
return angles
```
```python
def angles_dif(angles_img1, angles_img2, matches):
dif = []
for match in matches :
dif.append(angles_img1[match.queryIdx] - angles_img2[match.trainIdx])
return dif
```
```python
def remove_fake_matches(matches,dif_angles,angles_mean,angles_std,scales,scale_mean,scale_std):
new_scales,new_dif_angles = [],[]
for i in range(len(matches)):
if dif_angles[i] < angles_mean + angles_std and dif_angles[i] > angles_mean - angles_std and scales[i] < scale_mean + scale_std and scales[i] > angles_mean - scale_std:
new_scales.append(scales[i])
new_dif_angles.append(dif_angles[i])
return new_dif_angles,new_scales
```
```python
# Finds the Key's points Angles
def find_kp_angles(kp1, kp2, matches, center1, center2):
central_line = Line(center1, center2.translate(2 * center2.x))
angles = []
for match in matches:
p1 = Point(kp1[match.queryIdx].pt[0], kp1[match.queryIdx].pt[1])
p2 = Point(kp2[match.trainIdx].pt[0], kp2[match.trainIdx].pt[1])
match_line = Line(p1, p2.translate(2 * center2.x))
angles.append(float(central_line.angle_between(match_line)))
return angles
```
```python
def g_find_scale(image, kps):
scale = []
center = image_center(image)
for kp in kps:
p = Point(kp.pt[0], kp.pt[1])
d = center.distance(p)
scale.append(d)
return scale
```
```python
# Finds the ratio of the keypoints scale between images
def find_scale_ratios(img1, kp1, img2, kp2, matches):
ratios = []
scale1 = g_find_scale(img1, kp1)
scale2 = g_find_scale(img2, kp2)
for match in matches:
# scale list preserves the ordering from keypoints list
d1 = scale1[match.queryIdx]
d2 = scale2[match.trainIdx]
ratios.append(float(d1 / d2))
return ratios
```
```python
# Finds the Scale between images
def find_scale(kp1, kp2, matches, center1, center2):
scale = []
for match in matches:
p1 = Point(kp1[match.queryIdx].pt[0], kp1[match.queryIdx].pt[1])
p2 = Point(kp2[match.trainIdx].pt[0], kp2[match.trainIdx].pt[1])
d1 = center1.distance(p1)
d2 = center2.distance(p2)
scale.append(float(d1 / d2))
return scale
```
```python
def affine_trans(img,angles,scale):
center = image_center(img)
m = cv2.getRotationMatrix2D((center.y, center.x), angles, scale)
return cv2.warpAffine(img, m, (img.shape[1],img.shape[0]))
```
```python
def save(conn,cursor,values):
cursor.execute("""
INSERT INTO {} (kp1,kp2,matches,time,anglesMean,anglesSD,scaleMean,scaleSD,technique,situation,pathImg1,pathImg2,phase)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)
""".format(TABLE_NAME), values)
conn.commit()
```
```python
def ploting_image_pair(left,right):
fig = plt.figure()
fig.add_subplot(1,2,1)
plt.imshow(left)
fig.add_subplot(1,2,2)
plt.imshow(right)
plt.show()
```
```python
def getStats(method,img1, img2):
timeI = time()
# find the keypoints and descriptors with ORB
kp1, des1 = method.detectAndCompute(img1, None)
kp2, des2 = method.detectAndCompute(img2, None)
timeF = time()
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
# Match descriptors. (query,train)
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
return [kp1,kp2, matches, timeF - timeI]
```
```python
def prep_values(img1,img2,method,name,case,pair):
values = getStats(method,img1,img2)
kp1,kp2,matches = values[0],values[1],values[2]
values[0],values[1],values[2] = len(kp1),len(kp2),len(matches)
angles_img1 = g_find_kp_angles(img1,kp1)
angles_img2 = g_find_kp_angles(img2,kp2)
angles_dif = angles_dif(angles_img1,angles_img2,matches)
scales = find_scale_ratios(img1, kp1, img2, kp2, matches)
angles_mean = stats.tstd(angles_dif)
angles_std = stats.tstd(angles_dif)
scale_mean = stats.tmean(scales)
scale_std = stats.tstd(scales)
values.append(angles_mean)
values.append(angles_std)
values.append(scale_mean)
values.append(scale_std)
values.append(name)
values.append(case)
values.append('{}a.jpg'.format(pair))
values.append('{}b.jpg'.format(pair))
return angles_dif,scales,matches, values
```
```python
def main():
executeTimeI = time()
conn = sqlite3.connect('banco.db')
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE {} (
technique TEXT,
situation TEXT,
kp1 INTEGER,
kp2 INTEGER,
matches INTEGER,
time FLOAT,
anglesMean FLOAT,
anglesSD FLOAT,
scaleMean FLOAT,
scaleSD FLOAT,
pathImg1 TEXT,
pathImg2 TEXT,
phase INTEGER
);""".format(TABLE_NAME)
)
# Initiate detectors
# SIFT = cv2.xfeatures2d.SIFT_create()
# SURF = cv2.xfeatures2d.SURF_create()
ORB = cv2.ORB.create()
# # KAZE = cv2.KAZE.create()
# AKAZE = cv2.AKAZE.create()
# BRISK = cv2.BRISK.create()
methods = {
# 'SIFT': SIFT,
# 'SURF': SURF,
'ORB': ORB,
# 'KAZE': KAZE,
# 'AKAZE': AKAZE,
# 'BRISK': BRISK
}
cases = [
'Same Object, Same Scale',
# 'Same Object, Different Scale',
# 'Different Object, Same Scale',
# 'Different Object, Different Scale',
]
for case in cases:
print(case)
for pair in range(NUM_OF_PAIRS):
print('Pair {}/{}'.format(pair + 1, NUM_OF_PAIRS))
img1 = cv2.imread('photos/{}/{}a.jpg'.format(case,pair),0)
img2 = cv2.imread('photos/{}/{}b.jpg'.format(case,pair),0)
for name, method in methods.items():
print(name)
print("Phase One: Compares unaltered images")
angles_dif,scales,matches,original_values = prep_values(img1,img2,method,name,case,pair)
original_values.append(1)
save(conn, cursor,tuple(original_values))
print('Phase two: Calculates the transformation')
angles_mean = original_values[4]
scale_mean = original_values[6]
dst = gmt.affine_trans(img1,angles_mean,scale_mean)
ploting_image_pair(dst,img2)
_,_,_,values = prep_values(dst,img2,method,name,case,pair)
values.append(2)
save(conn, cursor,tuple(values))
print("Phase three: Removes fake matches")
angles_mean = original_values[4]
angles_std = original_values[5]
scale_mean = original_values[6]
scale_std = original_values[7]
angles_dif,scales = gmt.remove_fake_matches(matches,angles_dif,angles_mean,angles_std,scales,scale_mean,scale_std)
angles_mean = stats.tstd(angles_dif)
angles_std = stats.tstd(angles_dif)
scale_mean = stats.tmean(scales)
scale_std = stats.tstd(scales)
dst = gmt.affine_trans(img1,angles_mean,scale_mean)
ploting_image_pair(dst,img2)
_,_,_,values = prep_values(dst,img2,method,name,case,pair)
values.append(3)
save(conn, cursor,tuple(values))
del img1
del img2
conn.close()
executeTimeF = time()
print('Test executed in {} seconds'.format(executeTimeF-executeTimeI))
```
```python
if(__name__ == '__main__'):
main()
```
|
87fbfe87c525bed6262ef8937f3cdec00c7007cc
| 20,051 |
ipynb
|
Jupyter Notebook
|
Features.ipynb
|
oraphaBorges/detectors_comparison
|
e63ccddcf8ca8b6f5c0daa85b4b6f5491f82d288
|
[
"Unlicense"
] | null | null | null |
Features.ipynb
|
oraphaBorges/detectors_comparison
|
e63ccddcf8ca8b6f5c0daa85b4b6f5491f82d288
|
[
"Unlicense"
] | null | null | null |
Features.ipynb
|
oraphaBorges/detectors_comparison
|
e63ccddcf8ca8b6f5c0daa85b4b6f5491f82d288
|
[
"Unlicense"
] | null | null | null | 39.862823 | 1,615 | 0.579173 | true | 2,890 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.795658 | 0.692642 | 0.551106 |
__label__por_Latn
| 0.346808 | 0.118734 |
```python
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
sns.set_context('notebook', font_scale=1.5)
```
```python
import warnings
warnings.simplefilter('ignore', FutureWarning)
```
**1**. (25 points)
In this exercise, we will practice using Pandas dataframes to explore and summarize a data set `heart`.
This data contains the survival time after receiving a heart transplant, the age of the patient and whether or not the survival time was censored
- Number of Observations - 69
- Number of Variables - 3
Variable name definitions::
- survival - Days after surgery until death
- censors - indicates if an observation is censored. 1 is uncensored
- age - age at the time of surgery
Answer the following questions (5 points each) with respect to the `heart` data set:
- How many patients were censored?
- What is the correlation coefficient between age and survival for uncensored patients?
- What is the average age for censored and uncensored patients?
- What is the average survival time for censored and uncensored patients under the age of 45?
- What is the survival time of the youngest and oldest uncensored patient?
```python
import statsmodels.api as sm
heart = sm.datasets.heart.load_pandas().data
heart.head(n=6)
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>survival</th>
<th>censors</th>
<th>age</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>15.0</td>
<td>1.0</td>
<td>54.3</td>
</tr>
<tr>
<th>1</th>
<td>3.0</td>
<td>1.0</td>
<td>40.4</td>
</tr>
<tr>
<th>2</th>
<td>624.0</td>
<td>1.0</td>
<td>51.0</td>
</tr>
<tr>
<th>3</th>
<td>46.0</td>
<td>1.0</td>
<td>42.5</td>
</tr>
<tr>
<th>4</th>
<td>127.0</td>
<td>1.0</td>
<td>48.0</td>
</tr>
<tr>
<th>5</th>
<td>64.0</td>
<td>1.0</td>
<td>54.6</td>
</tr>
</tbody>
</table>
</div>
```python
#How many patients were censored?
heart[heart.censors==0].shape[0]
```
24
24 patients were censored.
```python
#What is the correlation coefficient between age and survival for uncensored patients?
uncensored = heart[heart.censors==1]
scipy.stats.pearsonr(uncensored.age, uncensored.survival)[0]
```
0.0032564992832119144
The correlation coefficient between age and survival for uncensored patients is 0.003.
```python
#What is the average age for censored and uncensored patients?
heart.groupby("censors").agg({'age':"mean"})
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>age</th>
</tr>
<tr>
<th>censors</th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0.0</th>
<td>41.729167</td>
</tr>
<tr>
<th>1.0</th>
<td>48.484444</td>
</tr>
</tbody>
</table>
</div>
The average age for censored patients is 48.48, for uncensored patients is 41.73.
```python
#What is the average survival time for censored and uncensored patients under the age of 45?
heart[heart.age<45].groupby("censors").agg({"survival":"mean"})
```
<div>
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>survival</th>
</tr>
<tr>
<th>censors</th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0.0</th>
<td>712.818182</td>
</tr>
<tr>
<th>1.0</th>
<td>169.909091</td>
</tr>
</tbody>
</table>
</div>
The average survival time for censored patients under 45 years old is 169.9 days.
The average survival time for uncensored patients under 45 years old is 712.8 days.
```python
#What is the survival time of the youngest and oldest uncensored patient?
survival_min = uncensored[uncensored.age == uncensored.age.min()].survival
survival_max = uncensored[uncensored.age == uncensored.age.max()].survival
```
```python
survival_min
```
41 228.0
Name: survival, dtype: float64
The survival time of the yougest uncensored patient is 228.
```python
survival_max
```
17 60.0
Name: survival, dtype: float64
The survival time of the oldest uncensored patient is 60.
**2**. (35 points)
- Consider a sequence of $n$ Bernoulli trials with success probabilty $p$ per trial. A string of consecutive successes is known as a success *run*. Write a function that returns the counts for runs of length $k$ for each $k$ observed in a dictionary. (10 points)
For example: if the trials were [0, 1, 0, 1, 1, 0, 0, 0, 0, 1], the function should return
```
{1: 2, 2: 1})
```
Test that it does so.
- What is the probability of observing at least one run of length 5 or more when $n=100$ and $p=0.5$?. Estimate this from 100,000 simulated experiments. Is this more, less or equally likely than finding runs of length 7 or more when $p=0.7$? (10 points)
- There is an exact solution
$$
s_n = \sum_{i=1}^n{f_i} \\
f_n = u_n - \sum_{i=1}^{n-1} {f_i u_{n-i}} \\
u_n = p^k - \sum_{i=1}^{k-1} u_{n-i} p^i
$$
Implement the exact solution using caching to avoid re-calculations and calculate the same two probabilities found by simulation. (15 points)
```python
def len_run(str_list):
s_str = "".join([str(s) for s in str_list]).split("0")
run = [s for s in s_str if s != ""]
unique = set(run)
l = [len(u) for u in unique]
count = []
for e in unique:
count.append(run.count(e))
return dict(zip(l, count))
```
```python
s = [0, 1, 0, 1, 1, 0, 0, 0, 0, 1]
len_run(s)
```
{1: 2, 2: 1}
```python
n = 100
p = 0.5
N = 100000
result = []
for i in range(N):
s = [np.random.binomial(1,p) for j in range(n)]
run_len = len_run(s).keys()
whether_5 = np.sum([e >= 5 for e in run_len])
if whether_5>=1:
result.append(1)
else:
result.append(0)
```
```python
np.mean(result)
```
0.81115999999999999
```python
n = 100
p = 0.7
N = 100000
result = []
for i in range(N):
s = [np.random.binomial(1,p) for j in range(n)]
run_len = len_run(s).keys()
whether_5 = np.sum([e >= 7 for e in run_len])
if whether_5>=1:
result.append(1)
else:
result.append(0)
```
```python
np.mean(result)
```
0.94891999999999999
```python
from functools import lru_cache
@lru_cache()
def u(n,k,p):
if n < k:
return 0
else:
return p**k - sum(u(n-i, k, p)*p**i for i in range(1, k))
@lru_cache()
def f(n,k,p):
return u(n,k,p) - sum(f(i,k,p)*u(n-i,k,p) for i in range(1,n))
@lru_cache()
def s(n,k,p):
return sum(f(i,k,p) for i in range(1, n+1))
```
```python
s(100,5,0.5)
```
0.8101095991963579
```python
s(100,7,0.7)
```
0.9491817984156692
**3**. (40 points)
Given matrix $M$
```python
[[7, 8, 8],
[1, 3, 8],
[9, 2, 1]]
```
- Normalize the given matrix $M$ so that all rows sum to 1.0. (5 points)
- The normalized matrix can then be considered as a transition matrix $P$ for a Markov chain. Find the stationary distribution of this matrix in the following ways using `numpy` and `numpy.linalg` (or `scipy.linalg`):
- By repeated matrix multiplication of a random probability vector $v$ (a row vector normalized to sum to 1.0) with $P$ using matrix multiplication with `np.dot`. (5 points)
- By raising the matrix $P$ to some large power until it doesn't change with higher powers (see `np.linalg.matrix_power`) and then calculating $vP$ (10 points)
- From the equation for stationarity $wP = w$, we can see that $w$ must be a left eigenvector of $P$ with eigenvalue $1$ (Note: np.linalg.eig returns the right eigenvectors, but the left eigenvector of a matrix is the right eigenvector of the transposed matrix). Use this to find $w$ using `np.linalg.eig`. (20 points)
Suppose $w = (w_1, w_2, w_3)$. Then from $wP = w$, we have:
\begin{align}
w_1 P_{11} + w_2 P_{21} + w_3 P_{31} &= w_1 \\
w_1 P_{12} + w_2 P_{22} + w_3 P_{32} &= w_2 \\
w_1 P_{13} + w_2 P_{23} + w_3 P_{331} &= w_3 \\
\end{align}
This is a singular system, but we also know that $w_1 + w_2 + w_3 = 1$. Use these facts to set up a linear system of equations that can be solved with `np.linalg.solve` to find $w$.
```python
M = np.array([[7,8,8],[1,3,8],[9,2,1]])
M
```
array([[7, 8, 8],
[1, 3, 8],
[9, 2, 1]])
```python
sum_row = M.sum(axis=1)
normalized_M = M/sum_row[:,None]
normalized_M
```
array([[ 0.30434783, 0.34782609, 0.34782609],
[ 0.08333333, 0.25 , 0.66666667],
[ 0.75 , 0.16666667, 0.08333333]])
```python
P = normalized_M
v = np.array([0.2, 0.7, 0.1])
```
```python
v_old = v
v_new = np.dot(v_old,P)
while any(v_old != v_new):
v_old = v_new
v_new = np.dot(v_old, P)
w1 = v_old
w1
```
array([ 0.39862184, 0.2605972 , 0.34078096])
```python
P_s = np.linalg.matrix_power(P,20)
P_s
```
array([[ 0.39862184, 0.2605972 , 0.34078096],
[ 0.39862184, 0.2605972 , 0.34078096],
[ 0.39862184, 0.2605972 , 0.34078096]])
```python
w2 = np.dot(v, P_s)
w2
```
array([ 0.39862184, 0.2605972 , 0.34078096])
```python
W,V = np.linalg.eig(P.T)
w3 = V[:, 0]/V[:, 0].sum()
w3
```
array([ 0.39862184+0.j, 0.26059720+0.j, 0.34078096+0.j])
```python
A = np.r_[P.T-np.eye(3),[np.ones(3)]][1:,:]
b = np.array([0,0,1])
w4 = np.linalg.solve(A,b)
w4
```
array([ 0.39862184, 0.2605972 , 0.34078096])
```python
```
|
ec3eb999b468de04a42d65de361970cc71efe3f8
| 21,288 |
ipynb
|
Jupyter Notebook
|
HW03.ipynb
|
lyz1206/STA663-Statistical-Computation-Python-
|
ed36e9a76746afdf5800ebb6d6632def0964f21b
|
[
"MIT"
] | null | null | null |
HW03.ipynb
|
lyz1206/STA663-Statistical-Computation-Python-
|
ed36e9a76746afdf5800ebb6d6632def0964f21b
|
[
"MIT"
] | null | null | null |
HW03.ipynb
|
lyz1206/STA663-Statistical-Computation-Python-
|
ed36e9a76746afdf5800ebb6d6632def0964f21b
|
[
"MIT"
] | null | null | null | 24.30137 | 331 | 0.460071 | true | 3,452 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.851953 | 0.810479 | 0.69049 |
__label__eng_Latn
| 0.944772 | 0.44257 |
# Part 3: Softmax Regression
```
# Execute this code block to install dependencies when running on colab
try:
import torch
except:
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-1.0.0-{platform}-linux_x86_64.whl torchvision
```
In the second part of the lab we saw how to make a linear binary classifier using logisitic regression. In this part of the lab we'll turn our attention to multi-class classification.
Softmax regression (or multinomial logistic regression) is a generalisation of logistic regression to the case where we want to handle multiple classes. In logistic regression we assumed that the labels were binary: $y_i\in \{0,1\}$. We used such a classifier to distinguish between two kinds of hand-written digits. Softmax regression allows us to handle $y_i \in \{1,\dots,K\}$ where $K$ is the number of classes.
Recall that in logistic regression, we had a training set $\{(\mathbf{x}_1,y_1),\dots,(\mathbf{x}_m,y_m)\}$ of $m$ labeled examples, where the input features are $\mathbf{x}_i \in \mathbb{R}^n$. In logistic regression, our hypothesis took the form:
\begin{align}
h_\theta(\mathbf{x}) &= \frac{1}{1 + \exp(-\mathbf{x}^\top\theta)} \equiv \sigma(\mathbf{x}^\top\theta)
\end{align}
and the model parameters $\theta$ were trained to minimise the cost function
\begin{align}
J(\theta) & = \sum_i y_i \log(\sigma(\mathbf{x}_i^\top\theta)) + (1-y_i) \log(1-\sigma(\mathbf{x}_i^\top\theta))
\end{align}
In the softmax regression setting, we are interested in multi-class classification, and so the label $y$
can take on $K$ different values, rather than only two. Thus, in our training set $\{(\mathbf{x}_1,y_1),\dots,(\mathbf{x}_m,y_m)\}$, we now have that $y_i \in \{1,\dots,K\}$.
Given a test input $\mathbf{x}$, we want our hypothesis to estimate the probability that $P(y=k|\mathbf{x})$ for each value of $k=1,\dots,K$. That is to say, we want to estimate the probability of the class label taking on each of the $K$ different possible values. Thus, our hypothesis will output a $K$-dimensional vector (whose elements sum to 1) giving us our $K$ estimated probabilities. Concretely, our hypothesis $h_\theta(\mathbf{x})$ takes the form:
\begin{align}
h_\theta(\mathbf{x}) =
\begin{bmatrix}
P(y = 1 | \mathbf{x}; \theta) \\
P(y = 2 | \mathbf{x}; \theta) \\
\vdots \\
P(y = K | \mathbf{x}; \theta)
\end{bmatrix}
=
\frac{1}{ \sum_{j=1}^{K}{\exp(\theta^{(j)\top} \mathbf{x}) }}
\begin{bmatrix}
\exp(\theta^{(1)\top} \mathbf{x} ) \\
\exp(\theta^{(2)\top} \mathbf{x} ) \\
\vdots \\
\exp(\theta^{(K)\top} \mathbf{x} ) \\
\end{bmatrix}
\end{align}
Here $\theta^{(1)},\theta^{(2)},\dots,\theta^{(K)} \in \mathbb{R}^n$ are the parameters of our model. Notice that the term $\frac{1}{\sum_{j=1}^K exp(\theta^{(j)\top} \mathbf{x})}$ normalizes the distribution, so that it sums to one.
For convenience, we will also write $\theta$ to denote all the parameters of our model. When you implement softmax regression, it is usually convenient to represent $\theta$ as a $n$-by-$K$ matrix obtained by concatenating $\theta_{(1)},\theta^{(2)},\dots,\theta^{(K)}$ into columns, so that
\begin{align}
\theta = \left[\begin{array}{cccc}| & | & | & | \\
\theta^{(1)} & \theta^{(2)} & \cdots & \theta^{(K)} \\
| & | & | & |
\end{array}\right].
\end{align}
## Cost Function
We now describe the cost function that we’ll use for softmax regression. In the equation below, $1\{\cdot\}$
is an "indicator function", such that $1\{\mathrm{a true statement}\}=1$, and $1\{\mathrm{a false statement}\}=0$. For example, $1\{2+2=4\}$ evaluates to $1$; whereas $1\{1+1=5\}$ evaluates to $0$. Our cost function will be:
\begin{align}
J(\theta) = - \left[ \sum_{i=1}^{m} \sum_{k=1}^{K} 1\left\{y_{i} = k\right\} \log \frac{\exp(\theta^{(k)\top} \mathbf{x}_i)}{\sum_{j=1}^K \exp(\theta^{(j)\top} \mathbf{x}_i)}\right]
\end{align}
Notice that this generalises the logistic regression cost function, which could also have been written:
\begin{align}
J(\theta) &= - \left[ \sum_{i=1}^m (1-y^{(i)}) \log (1-h_\theta(\mathbf{x}_i)) + y^{(i)} \log h_\theta(\mathbf{x}_i) \right] \\
&= - \left[ \sum_{i=1}^{m} \sum_{k=0}^{1} 1\left\{y^{(i)} = k\right\} \log P(y^{(i)} = k | \mathbf{x}_i ; \theta) \right]
\end{align}
The softmax cost function is similar, except that we now sum over the $K$ different possible values of the class label. Note also that in softmax regression, we have that
\begin{equation}
P(y_i = k | \mathbf{x}_i ; \theta) = \frac{\exp(\theta^{(k)\top} \mathbf{x}_i)}{\sum_{j=1}^K \exp(\theta^{(j)\top} \mathbf{x}_i) }
\end{equation}
We cannot solve for the minimum of $J(\theta)$ analytically, and thus we'll resort to using gradient descent as before. Taking derivatives, one can show that the gradient is:
\begin{align}
\nabla_{\theta^{(k)}} J(\theta) = - \sum_{i=1}^{m}{ \left[ \mathbf{x}_i \left( 1\{ y_i = k\} - P(y_i = k | \mathbf{x}_i; \theta) \right) \right] }
\end{align}
Armed with this formula for the derivative, one can then use it directly with a gradient descent solver (or any other 1st-order gradient based optimiser).
__Use the code box below to complete the implementation of the functions that return the gradients of the softmax loss function, $\nabla_{\theta^{(k)}} J(\theta) \,\, \forall k$ and the loss function itself, $J(\theta)$:__
```
import torch
import torch.nn.functional as f
# we wouldn't normally do this, but for this lab we want to work in double precision
# as we'll need the numerical accuracy later on for doing checks on our gradients:
torch.set_default_dtype(torch.float64)
'''
Theta: f*K(20*10)
X: num_items*f(100*20)
y: num_items*1(100*1) value:0~9
out: f*K
'''
def softmax_regression_loss_grad(Theta, X, y):
'''Implementation of the gradient of the softmax loss function.
Theta is the matrix of parameters, with the parameters of the k-th class in the k-th column
X contains the data vectors (one vector per row)
y is a column vector of the targets
'''
# YOUR CODE HERE
num_classes = Theta.size()[1] #K
features_dim = Theta.size()[0] #f
num_items = y.size()[0]
y_label = torch.zeros(num_items,num_classes,dtype=torch.float64).scatter_(1,y,1) #num_items * K
prob = f.softmax(X @ Theta, dim=1) #num_items*K, dim=1 for column
grad = - X.t() @ (y_label - prob)
return grad
def exp_prob(Theta_k,Theta,x_i):
sum=0
for j in range(Theta.shape[1]): #K
sum+=torch.exp(x_i @ Theta[:,j])
prob=torch.div(torch.exp(x_i @ Theta_k),sum)
return prob
def softmax_regression_loss(Theta, X, y):
'''Implementation of the softmax loss function.
Theta is the matrix of parameters, with the parameters of the k-th class in the k-th column
X contains the data vectors (one vector per row)
y is a column vector of the targets
'''
# YOUR CODE HERE
loss=0.0
for i in range(X.shape[0]): #num_items
x_i=X[i,:] #the i-th row,1*f
y_i=y[i]
class_indicator=0
for k in range(Theta.shape[1]): #K
Theta_k = Theta[:,k] #the k-th column,f*1
class_inidicator = int(y_i.numpy()==k) #numpy
prob=exp_prob(Theta_k,Theta,x_i)
loss += float(class_inidicator) * torch.log(prob) #float,* seperate
return -loss #-
def softmax_regression_loss2(Theta, X, y):
num_classes = Theta.size()[1] #K
features_dim = Theta.size()[0] #f
num_items = y.size()[0]
y_label=torch.zeros(num_items,num_classes,dtype=torch.float64).scatter_(1,y,1) #num_items * K
prob=-f.log_softmax(X @ Theta, dim=1) #num_items * K
loss=torch.sum(y_label*prob)
return loss
```
__Use the following code block to confirm that your implementation is correct using gradient checking. If there are problems with your gradient or loss, go back and fix them!:__
```
# from torch.autograd import gradcheck
from random import randrange
def grad_check(f, x, analytic_grad, num_checks=10, h=1e-3):
sum_error = 0
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape]) #randomly sample value to change
print("ix:",ix)
print("x[ix]:",x[ix])
oldval = x[ix].item()
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic) + 1e-8)
sum_error += rel_error
print('numerical: %f\tanalytic: %f\trelative error: %e' % (grad_numerical, grad_analytic, rel_error))
return sum_error / num_checks
# Create some test data:
num_classes = 10 #K
features_dim = 20 #f
num_items = 100
Theta = torch.randn((features_dim, num_classes)) #features_dim * num_classes(f*K), filled with random numbers from a normal distribution
X = torch.randn((num_items,features_dim)) #num_items * features_dim(num_items*f)
y = torch.torch.randint(0, num_classes, (num_items, 1)) #0~num_classes-1, size:num_items*1
# compute the analytic gradient
grad = softmax_regression_loss_grad(Theta, X, y)
# run the gradient checker
grad_check(lambda th: softmax_regression_loss(th, X, y), Theta, grad)
```
ix: (7, 3)
x[ix]: tensor(0.0487)
numerical: -3.040584 analytic: -3.040584 relative error: 8.887084e-09
ix: (13, 4)
x[ix]: tensor(-1.0416)
numerical: -3.542292 analytic: -3.542292 relative error: 1.051560e-08
ix: (1, 7)
x[ix]: tensor(-2.0798)
numerical: -6.047417 analytic: -6.047417 relative error: 6.418247e-09
ix: (3, 4)
x[ix]: tensor(1.8285)
numerical: 1.309945 analytic: 1.309945 relative error: 7.116945e-08
ix: (2, 5)
x[ix]: tensor(1.9467)
numerical: 14.097420 analytic: 14.097420 relative error: 3.242193e-09
ix: (13, 0)
x[ix]: tensor(0.9674)
numerical: 9.549632 analytic: 9.549632 relative error: 4.199948e-09
ix: (1, 7)
x[ix]: tensor(-2.0798)
numerical: -6.047417 analytic: -6.047417 relative error: 6.418247e-09
ix: (12, 9)
x[ix]: tensor(0.0319)
numerical: 1.614008 analytic: 1.614008 relative error: 1.278455e-08
ix: (4, 9)
x[ix]: tensor(0.4010)
numerical: 0.039318 analytic: 0.039318 relative error: 3.090074e-06
ix: (6, 5)
x[ix]: tensor(0.4945)
numerical: 4.357503 analytic: 4.357503 relative error: 6.102861e-08
tensor(3.2747e-07)
```
Theta = torch.Tensor([[1, 0], [0, 1]])
X = torch.Tensor([[1, 0], [0, 1]])
y = torch.LongTensor([[0], [1]])
loss=softmax_regression_loss(Theta, X, y)
print("loss:",loss)
assert torch.abs(softmax_regression_loss(Theta, X, y) - 0.6265) < 0.0001
grad = softmax_regression_loss_grad(Theta, X, y)
assert torch.torch.allclose(torch.abs(grad/0.2689), torch.ones_like(grad), atol=0.001)
#This function checks if all self and other satisfy the condition:∣self−other∣≤atol+rtol×∣other∣, rtol=1e-08
```
loss: tensor(0.6265)
## Training Softmax regression with gradient descent on real data
We'll now try gradient descent with our softmax regression using the digits dataset. As before, when we looked at logistic regression, we load the data and create test and training sets. Note that this time we'll use all the classes:
```
from sklearn.datasets import load_digits
X, y = (torch.Tensor(z) for z in load_digits(10, True)) #convert to pytorch Tensors
X = torch.cat((X, torch.ones((X.shape[0], 1))), 1) # append a column of 1's to the X's
X /= 255
y = y.reshape(-1, 1) # reshape y into a column vector
y = y.type(torch.LongTensor)
# We're also going to break the data into a training set for computing the regression parameters
# and a test set to evaluate the predictive ability of those parameters
perm = torch.randperm(y.shape[0])
X_train = X[perm[0:260], :] #261*f
y_train = y[perm[0:260]]
X_test = X[perm[260:], :]
y_test = y[perm[260:]]
```
We now define a simple gradient descent loop to train the model:
```
alpha = 0.1
theta_gd = torch.rand((X_train.shape[1], 10)) #Returns a tensor filled with random numbers from a uniform distribution on the interval [0, 1)
#num_items*10
for e in range(0, 1000):
gr = softmax_regression_loss_grad(theta_gd, X_train, y_train)
theta_gd -= alpha * gr
if e%100 == 0:
print("Training Loss: ", softmax_regression_loss(theta_gd, X_train, y_train))
# Compute the accuracy of the test set
proba = torch.softmax(X_test @ theta_gd, 1)
print(float((proba.argmax(1)-y_test[:,0]==0).sum()) / float(proba.shape[0])) #argmax(1) returns the indices of the maximum values of prob
print()
```
Training Loss: tensor(592.3777)
Training Loss: tensor(270.9790)
Training Loss: tensor(174.5995)
Training Loss: tensor(132.5848)
Training Loss: tensor(109.0211)
Training Loss: tensor(93.7592)
Training Loss: tensor(82.9511)
Training Loss: tensor(74.8239)
Training Loss: tensor(68.4447)
Training Loss: tensor(63.2739)
0.9232270657124269
Running the above, you should observe that the training loss decreases over time. The final accuracy on the test set is also printed and should be around 90% (it will depend on the particular training/test splits you generated as well as the initial parameters for the softmax).
# Overparameterisation in softmax regression
Softmax regression has an unusual property that it has a "redundant" set of parameters. To explain what this means, suppose we take each of our parameter vectors $\theta^{(j)}$, and subtract some fixed vector $\psi$ from it, so that every $\theta^{(j)}$ is now replaced with $\theta^{(j)}−\psi$ (for every $j=1,\dots,k$). Our hypothesis now estimates the class label probabilities as
\begin{align}
P(y^{(i)} = k | x^{(i)} ; \theta)
&= \frac{\exp((\theta^{(k)}-\psi)^\top x^{(i)})}{\sum_{j=1}^K \exp( (\theta^{(j)}-\psi)^\top x^{(i)})} \\
&= \frac{\exp(\theta^{(k)\top} x^{(i)}) \exp(-\psi^\top x^{(i)})}{\sum_{j=1}^K \exp(\theta^{(j)\top} x^{(i)}) \exp(-\psi^\top x^{(i)})} \\
&= \frac{\exp(\theta^{(k)\top} x^{(i)})}{\sum_{j=1}^K \exp(\theta^{(j)\top} x^{(i)})}.
\end{align}
__In other words, subtracting $\psi$ from every $\theta^{(j)}$ does not affect our hypothesis’ predictions at all!__ This shows that softmax regression’s parameters are "redundant". More formally, we say that our softmax model is "overparameterised" meaning that for any hypothesis we might fit to the data, there are multiple parameter settings that give rise to exactly the same hypothesis function $h_\theta$ mapping from inputs $\mathbf{x}$ to the predictions.
Further, if the cost function $J(\theta)$ is minimized by some setting of the parameters $(\theta^{(1)},\theta^{(2)},\dots,\theta^{(k)})$, then it is also minimised by $\theta^{(1)}-\psi,\theta^{(2)}-\psi,\dots,\theta^{(k)}-\psi)$ for any value of $\psi$. Thus, the minimiser of $J(\theta)$ is not unique.
(Interestingly, $J(\theta)$ is still convex, and thus gradient descent will not run into local optima problems. The Hessian is however singular/non-invertible, which causes a straightforward implementation of Newton's method (a second-order optimiser) to run into numerical problems.)
Notice also that by setting $\psi=\theta^{(K)}$, one can always replace $\theta^{(K)}$ with $\theta^{(K)}-\psi=\mathbf{0}$ (the vector of all $0$’s), without affecting the hypothesis. Thus, one could "eliminate" the vector of parameters $\theta^{(K)}$ (or any other $\theta^{(k)}$, for any single value of $k$), without harming the representational power of our hypothesis. Indeed, rather than optimising over the $K \cdot n$ parameters $(\theta^{(1)},\theta^{(2)},\dots,\theta^{(k)})$ (where $\theta^{(k)} \in \mathbb{R}^n$, one can instead set $\theta^{(K)}=\mathbf{0}$ and optimize only with respect to the $(K-1) \cdot n$ remaining parameters.
__Use the following block to implement the softmax gradients for the case where the final column of the parameters theta is fixed to be zero:__
```
import torch
import torch.nn.functional as f
'''
Theta: f*K(20*10)
X: num_items*f(100*20)
y: num_items*1(100*1) value:0~9
out: f*K
'''
def softmax_regression_loss_grad_0(Theta, X, y):
'''Implementation of the gradient of the softmax loss function, with the parameters of the
last class fixed to be zero.
Theta is the matrix of parameters, with the parameters of the k-th class in the k-th column;
K-1 classes are included, and the parameters of the last class are implicitly zero.
X contains the data vectors (one vector per row)
y is a column vector of the targets
'''
# add the missing column of zeros:
Theta = torch.cat((Theta, torch.zeros(Theta.shape[0],1)), 1)
# YOUR CODE HERE
num_classes = Theta.size()[1] #(K+1)
features_dim = Theta.size()[0] #f
num_items = y.size()[0]
y_label = torch.zeros(num_items,num_classes,dtype=torch.float64).scatter_(1,y,1) #num_items * (K+1)
prob = f.softmax(X @ Theta, dim=1) #num_items*K, dim=1 for column
grad = - X.t() @ (y_label - prob)
# remove the last column from the gradients
grad = grad[0:grad.shape[0], 0:grad.shape[1]-1]
return grad
```
```
Theta = torch.Tensor([[1, 0], [0, 0]])
X = torch.Tensor([[1, 0], [0, 1]])
y = torch.LongTensor([[0], [1]])
grad = softmax_regression_loss_grad(Theta, X, y)
grad0 = softmax_regression_loss_grad_0(Theta[:,0:grad.shape[1]-1], X, y)
assert torch.torch.allclose(grad[:,0:grad.shape[1]-1], grad0)
```
Finally, we can run gradient descent with our reduced paramter gradient function, and confirm that the results are similar to before:
```
alpha = 0.1
theta_gd = torch.rand((X_train.shape[1], 9))
for e in range(0, 1000):
gr = softmax_regression_loss_grad_0(theta_gd, X_train, y_train)
theta_gd -= alpha * gr
theta_gd = torch.cat((theta_gd, torch.zeros(theta_gd.shape[0], 1)), 1)
proba = torch.softmax(X_test @ theta_gd, 1)
print(float((proba.argmax(1)-y_test[:,0]==0).sum()) / float(proba.shape[0]))
print()
```
0.9108653220559532
```
```
|
8c92d073da9901855b4194df2df12f93ef2a920a
| 27,276 |
ipynb
|
Jupyter Notebook
|
1_3_softmax_regression_submit_29299675.ipynb
|
mjjackey/DL_Lab_Soton
|
5df0dc3124e6fae6c27bfb99d70c457dd77935c5
|
[
"Apache-2.0"
] | 1 |
2021-11-09T09:49:16.000Z
|
2021-11-09T09:49:16.000Z
|
1_3_softmax_regression_submit_29299675.ipynb
|
mjjackey/DL_Lab_Soton
|
5df0dc3124e6fae6c27bfb99d70c457dd77935c5
|
[
"Apache-2.0"
] | null | null | null |
1_3_softmax_regression_submit_29299675.ipynb
|
mjjackey/DL_Lab_Soton
|
5df0dc3124e6fae6c27bfb99d70c457dd77935c5
|
[
"Apache-2.0"
] | null | null | null | 27,276 | 27,276 | 0.666483 | true | 5,738 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.896251 | 0.798187 | 0.715376 |
__label__eng_Latn
| 0.918857 | 0.50039 |
# One-dimensional advection equation
We want to solve the following PDE:
\begin{equation}
\frac{\partial \phi}{\partial t} + u \frac{\partial \phi}{\partial x} = 0
\end{equation}
The independen variables (i.e, $x$ and $t$) are used as input values for the NN, and the solution (i.e. $\phi$) is the output. In order to find the solution, at each step the NN outputs are derived w.r.t the inputs. Then, a loss function that matches the PDE is built and the weights are updated accordingly. If the loss function goes to zero, we can assume that our NN is indeed the solution to our PDE. We will try to find a general solution for different values of $u$, so it will be set also as an input.
```python
# autoreload nangs
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
```python
# imports
import math
import numpy as np
import matplotlib.pyplot as plt
import torch
cuda = True
device = "cuda" if torch.cuda.is_available() and cuda else "cpu"
```
## Define PDE
First we define our PDE and set the values for training.
```python
# imports
from nangs.pde import PDE
from nangs.bocos import PeriodicBoco, DirichletBoco
from nangs.solutions import MLP
# define our PDE
class MyPDE(PDE):
def __init__(self, inputs=None, outputs=None):
super().__init__(inputs, outputs)
# the loss of our NN is the PDE !
def computePDELoss(self, grads, inputs, outputs, params):
dpdt, dpdx = grads['p']['t'], grads['p']['x']
u = inputs['u']
return [dpdt + u*dpdx]
# instanciate pde with keys for inputs/outputs
pde = MyPDE(inputs=['x', 't', 'u'], outputs=['p'])
# define input values for training
x_t = np.linspace(0,1,20)
t_t = np.linspace(0,1,30)
u_t = np.linspace(0,1,10)
pde.setValues({'x': x_t, 't': t_t, 'u': u_t})
# periodic b.c for the space dimension
x1, x2 = np.array([0]), np.array([1])
boco = PeriodicBoco('boco', {'x': x1, 't': t_t, 'u': u_t}, {'x': x2, 't': t_t, 'u': u_t})
pde.addBoco(boco)
# initial condition (dirichlet for temporal dimension)
p00, p0 = np.sin(2.*math.pi*x_t), np.array([])
for i in u_t:
p0 = np.concatenate((p0,p00)) # one for each value of 'u', keeping the order (x, t, u)
boco = DirichletBoco('initial_condition', {'x': x_t, 't': np.array([0]), 'u': u_t}, {'p': p0})
pde.addBoco(boco)
# define input values for validation
x_v = np.linspace(0,1,25)
t_v = np.linspace(0,1,15)
u_v = np.linspace(0,1,5)
pde.setValues({'x': x_v, 't': t_v, 'u': u_v}, train=False)
```
Now we define a topology for our solution and set the training parameters. Then we can find a solution for our PDE.
```python
# define solution topology
mlp = MLP(pde.n_inputs, pde.n_outputs, 5, 2048)
optimizer = torch.optim.Adam(mlp.parameters(), lr=3e-4)
pde.compile(mlp, optimizer)
```
```python
# find the solution
hist = pde.solve(epochs=50)
```
Epoch 1/50 Losses 0.39949
PDE [ 0.03317 ]
boco 0.04164
initial_condition 0.32468
Val [ 0.07423 ]<p>Epoch 2/50 Losses 0.20675
PDE [ 0.04072 ]
boco 0.04337
initial_condition 0.12265
Val [ 0.01944 ]<p>Epoch 3/50 Losses 0.07403
PDE [ 0.01658 ]
boco 0.02439
initial_condition 0.03307
Val [ 0.03006 ]<p>Epoch 4/50 Losses 0.02846
PDE [ 0.00848 ]
boco 0.01021
initial_condition 0.00976
Val [ 0.00333 ]<p>Epoch 5/50 Losses 0.01836
PDE [ 0.00458 ]
boco 0.00837
initial_condition 0.00541
Val [ 0.00862 ]<p>Epoch 6/50 Losses 0.01901
PDE [ 0.00595 ]
boco 0.00749
initial_condition 0.00557
Val [ 0.00521 ]<p>Epoch 7/50 Losses 0.01683
PDE [ 0.00390 ]
boco 0.00801
initial_condition 0.00493
Val [ 0.00600 ]<p>Epoch 8/50 Losses 0.02357
PDE [ 0.00678 ]
boco 0.00945
initial_condition 0.00734
Val [ 0.00382 ]<p>Epoch 9/50 Losses 0.01394
PDE [ 0.00361 ]
boco 0.00594
initial_condition 0.00440
Val [ 0.00293 ]<p>Epoch 10/50 Losses 0.01103
PDE [ 0.00280 ]
boco 0.00510
initial_condition 0.00313
Val [ 0.00313 ]<p>Epoch 11/50 Losses 0.01071
PDE [ 0.00322 ]
boco 0.00450
initial_condition 0.00299
Val [ 0.00394 ]<p>Epoch 12/50 Losses 0.02089
PDE [ 0.00515 ]
boco 0.00923
initial_condition 0.00651
Val [ 0.00347 ]<p>Epoch 13/50 Losses 0.01311
PDE [ 0.00344 ]
boco 0.00597
initial_condition 0.00370
Val [ 0.00678 ]<p>Epoch 14/50 Losses 0.00898
PDE [ 0.00265 ]
boco 0.00375
initial_condition 0.00258
Val [ 0.00286 ]<p>Epoch 15/50 Losses 0.01491
PDE [ 0.00444 ]
boco 0.00622
initial_condition 0.00425
Val [ 0.01150 ]<p>Epoch 16/50 Losses 0.01701
PDE [ 0.00535 ]
boco 0.00659
initial_condition 0.00507
Val [ 0.00427 ]<p>Epoch 17/50 Losses 0.01294
PDE [ 0.00367 ]
boco 0.00566
initial_condition 0.00360
Val [ 0.00391 ]<p>Epoch 18/50 Losses 0.00776
PDE [ 0.00226 ]
boco 0.00336
initial_condition 0.00214
Val [ 0.00174 ]<p>Epoch 19/50 Losses 0.00909
PDE [ 0.00258 ]
boco 0.00394
initial_condition 0.00257
Val [ 0.00359 ]<p>Epoch 20/50 Losses 0.01046
PDE [ 0.00268 ]
boco 0.00459
initial_condition 0.00319
Val [ 0.00431 ]<p>Epoch 21/50 Losses 0.01084
PDE [ 0.00379 ]
boco 0.00388
initial_condition 0.00317
Val [ 0.00216 ]<p>Epoch 22/50 Losses 0.01509
PDE [ 0.00353 ]
boco 0.00677
initial_condition 0.00478
Val [ 0.00307 ]<p>Epoch 23/50 Losses 0.01632
PDE [ 0.00329 ]
boco 0.00706
initial_condition 0.00597
Val [ 0.00333 ]<p>Epoch 24/50 Losses 0.00865
PDE [ 0.00248 ]
boco 0.00396
initial_condition 0.00221
Val [ 0.00262 ]<p>Epoch 25/50 Losses 0.00918
PDE [ 0.00224 ]
boco 0.00411
initial_condition 0.00283
Val [ 0.00392 ]<p>Epoch 26/50 Losses 0.00998
PDE [ 0.00243 ]
boco 0.00462
initial_condition 0.00293
Val [ 0.00274 ]<p>Epoch 27/50 Losses 0.02604
PDE [ 0.00447 ]
boco 0.01039
initial_condition 0.01117
Val [ 0.00342 ]<p>Epoch 28/50 Losses 0.01921
PDE [ 0.00422 ]
boco 0.00789
initial_condition 0.00710
Val [ 0.00378 ]<p>Epoch 29/50 Losses 0.01376
PDE [ 0.00327 ]
boco 0.00586
initial_condition 0.00463
Val [ 0.00168 ]<p>Epoch 30/50 Losses 0.00627
PDE [ 0.00188 ]
boco 0.00245
initial_condition 0.00194
Val [ 0.00467 ]<p>Epoch 31/50 Losses 0.01165
PDE [ 0.00294 ]
boco 0.00474
initial_condition 0.00396
Val [ 0.00302 ]<p>Epoch 32/50 Losses 0.02776
PDE [ 0.00738 ]
boco 0.01073
initial_condition 0.00966
Val [ 0.00243 ]<p>Epoch 33/50 Losses 0.00955
PDE [ 0.00298 ]
boco 0.00384
initial_condition 0.00273
Val [ 0.00775 ]<p>Epoch 34/50 Losses 0.01129
PDE [ 0.00302 ]
boco 0.00428
initial_condition 0.00399
Val [ 0.00228 ]<p>Epoch 35/50 Losses 0.00927
PDE [ 0.00241 ]
boco 0.00418
initial_condition 0.00267
Val [ 0.00177 ]<p>Epoch 36/50 Losses 0.00856
PDE [ 0.00262 ]
boco 0.00368
initial_condition 0.00227
Val [ 0.00375 ]<p>Epoch 37/50 Losses 0.01694
PDE [ 0.00568 ]
boco 0.00632
initial_condition 0.00494
Val [ 0.00356 ]<p>Epoch 38/50 Losses 0.01008
PDE [ 0.00280 ]
boco 0.00408
initial_condition 0.00320
Val [ 0.00156 ]<p>Epoch 39/50 Losses 0.01129
PDE [ 0.00282 ]
boco 0.00491
initial_condition 0.00356
Val [ 0.00234 ]<p>Epoch 40/50 Losses 0.01286
PDE [ 0.00394 ]
boco 0.00490
initial_condition 0.00402
Val [ 0.00296 ]<p>Epoch 41/50 Losses 0.01104
PDE [ 0.00315 ]
boco 0.00437
initial_condition 0.00352
Val [ 0.00349 ]<p>Epoch 42/50 Losses 0.01064
PDE [ 0.00305 ]
boco 0.00416
initial_condition 0.00342
Val [ 0.00285 ]<p>Epoch 43/50 Losses 0.01263
PDE [ 0.00330 ]
boco 0.00509
initial_condition 0.00425
Val [ 0.00732 ]<p>Epoch 44/50 Losses 0.01184
PDE [ 0.00325 ]
boco 0.00442
initial_condition 0.00417
Val [ 0.00332 ]<p>Epoch 45/50 Losses 0.01118
PDE [ 0.00296 ]
boco 0.00478
initial_condition 0.00344
Val [ 0.00434 ]<p>Epoch 46/50 Losses 0.01478
PDE [ 0.00332 ]
boco 0.00602
initial_condition 0.00544
Val [ 0.00187 ]<p>Epoch 47/50 Losses 0.01488
PDE [ 0.00349 ]
boco 0.00615
initial_condition 0.00523
Val [ 0.01453 ]<p>Epoch 48/50 Losses 0.01409
PDE [ 0.00388 ]
boco 0.00528
initial_condition 0.00493
Val [ 0.00431 ]<p>Epoch 49/50 Losses 0.01371
PDE [ 0.00418 ]
boco 0.00523
initial_condition 0.00430
Val [ 0.00372 ]<p>Epoch 50/50 Losses 0.01060
PDE [ 0.00241 ]
boco 0.00470
initial_condition 0.00350
Val [ 0.00191 ]
/home/sebastianm/.local/lib/python3.7/site-packages/numpy/core/fromnumeric.py:3257: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
/home/sebastianm/.local/lib/python3.7/site-packages/numpy/core/_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
```python
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5))
ax1.plot(hist['train_loss'], label="train_loss")
ax1.plot(hist['val_loss'], label="val_loss")
ax1.grid(True)
ax1.legend()
ax1.set_yscale("log")
for boco in pde.bocos:
ax2.plot(hist['bocos'][boco.name], label=boco.name)
ax2.legend()
ax2.grid(True)
ax2.set_yscale("log")
plt.show()
```
Finally, we can evaluate our solution. Since we trained it for also the parameter *u* we can obtain a solution for any value of *u* without having to perform additional computations.
```python
# evaluate the solution
x = np.linspace(0,1,50)
t = np.linspace(0,1,50)
u = np.linspace(0,1,5)
p, p0, l2 = [], [], []
for _t in t:
_p, _p0, _l2 = [], [], []
for _u in u:
_p0.append(np.sin(2.*math.pi*(x-_u*_t)))
pde.evaluate({'x': x, 't': np.array([_t]), 'u': np.array([_u])}, device)
_p.append(pde.outputs['p'])
_l2.append(np.mean((pde.outputs['p']-np.sin(2.*math.pi*(x-_u*_t)))**2))
p.append(_p)
p0.append(_p0)
l2.append(_l2)
from matplotlib import animation, rc
rc('animation', html='html5')
def plot(x, p, p0, t, l2, u):
ax.clear()
#tit = ax.set_title(f"t = {t:.2f}, l2 = {l2:.5f}", fontsize=14)
tit = ax.set_title(f"t = {t:.2f}", fontsize=14)
for i, _u in enumerate(u):
ax.plot(x, p0[i], label=f"Exact (u = {_u})")
ax.plot(x, p[i], ".k", label=f"NN (u = {_u}, l2 = {l2[i]:.5f})")
ax.set_xlabel("x", fontsize=14)
ax.set_ylabel("p", fontsize=14, rotation=np.pi/2)
ax.legend(loc="upper right")
ax.grid(True)
ax.set_xlim([0, 1])
ax.set_ylim([-1.2, 1.2])
return [tit]
def get_anim(fig, ax, x, p, p0, t, l2, u):
def anim(i):
return plot(x, p[i], p0[i], t[i], l2[i], u)
return anim
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111, autoscale_on=False)
animate = get_anim(fig, ax, x, p, p0, t, l2, u)
anim = animation.FuncAnimation(fig, animate, frames=len(t), interval=100, blit=True)
```
```python
anim
```
|
d3ccc6ec93f75a60af6203729c4d2f298b03c58c
| 625,051 |
ipynb
|
Jupyter Notebook
|
examples/examples/01_adv1d.ipynb
|
smatkovi/nangs
|
b9ab6f32fe3632d9ee403f197742cc203670217d
|
[
"Apache-2.0"
] | 2 |
2021-02-26T17:44:52.000Z
|
2021-04-05T10:27:44.000Z
|
examples/examples/01_adv1d.ipynb
|
smatkovi/nangs
|
b9ab6f32fe3632d9ee403f197742cc203670217d
|
[
"Apache-2.0"
] | null | null | null |
examples/examples/01_adv1d.ipynb
|
smatkovi/nangs
|
b9ab6f32fe3632d9ee403f197742cc203670217d
|
[
"Apache-2.0"
] | null | null | null | 101.965905 | 61,900 | 0.851107 | true | 4,213 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.851953 | 0.746139 | 0.635675 |
__label__yue_Hant
| 0.320506 | 0.315217 |
```python
import numpy as np
from sympy.solvers import solve
from sympy import Symbol
import sympy
import matplotlib.pyplot as plt
```
#### Реализовать явный метод Эйлера
```python
def euler_method(f, t0, tn, tau, y0):
eps = tau / 10000
while t0 < tn and abs(t0 - tn) > eps:
y0 += tau * f(t0, y0)
t0 += tau
return y0
```
##### Euler method testing
Function: $y'(t) = t\sqrt{y(t)}$ \
Initials: $t_0=0, \ y_0=y(t_0)=y(0)=1$ \
Step: $\tau=0.1$
```python
t = np.arange(0, 11, 1, int)
y1 = np.array([euler_method(lambda t, y: t * y ** 0.5, 0, i, 0.1, 1) for i in t])
y2 = (t ** 2 + 4) ** 2 / 16
plt.plot(t, y1, label='estimated', marker='.')
plt.plot(t, y2, label='calculated', marker='.')
plt.grid(linestyle='--')
plt.title("Euler method")
plt.xlabel('t')
plt.ylabel('y')
plt.legend()
plt.show()
for _t, _y1, _y2 in zip(t, y1, y2):
print(f"t = {_t}:\n\tEstimated: {_y1}\n\tCalculated: {_y2}\n")
```
```python
t = np.arange(0, 11, 1, int)
y1 = [euler_method(lambda t, y: t * y ** 0.5, 0, t[0], 0.1, 1)]
for i in range(1, len(t)):
y1 += [euler_method(lambda t, y: t * y ** 0.5, t[i - 1], t[i], 0.1, y1[-1])]
y2 = (t ** 2 + 4) ** 2 / 16
plt.plot(t, y1, label='estimated', marker='.')
plt.plot(t, y2, label='calculated', marker='.')
plt.grid(linestyle='--')
plt.title("Euler method")
plt.xlabel('t')
plt.ylabel('y')
plt.legend()
plt.show()
for _t, _y1, _y2 in zip(t, y1, y2):
print(f"t = {_t}:\n\tEstimated: {_y1}\n\tCalculated: {_y2}\n")
```
##### Implicit Euler method testing
Function: $y'(t) = t\sqrt{y(t)}$ \
Initials: $t_0=0, \ y_0=y(t_0)=y(0)=1$ \
Step: $\tau=0.1$
#### Реализовать неявный метод Эйлера
```python
def implicit_euler_method(f, t0, tn, tau, y0):
eps = tau / 10000
while t0 + tau < tn and abs(tn - t0) > eps:
t0 += tau
y = Symbol('y')
y0 = solve(y - tau * f(t0, y) - y0, y)[0]
return y0
```
```python
t = np.arange(0, 11, 1, int)
y1 = [implicit_euler_method(lambda t, y: t * y ** 0.5, 0, t[0], 0.1, 1)]
for i in range(1, len(t)):
y1 += [implicit_euler_method(lambda t, y: t * y ** 0.5, t[i - 1], t[i], 0.1, y1[-1])]
y2 = (t ** 2 + 4) ** 2 / 16
plt.plot(t, y1, label='estimated', marker='.')
plt.plot(t, y2, label='calculated', marker='.')
plt.grid(linestyle='--')
plt.title("Implicit Euler method")
plt.xlabel('t')
plt.ylabel('y')
plt.legend()
plt.show()
for _t, _y1, _y2 in zip(t, y1, y2):
print(f"t = {_t}:\n\tEstimated: {_y1}\n\tCalculated: {_y2}\n")
```
#### Реализовать метод Рунге-Кутты 4 порядка
```python
def runge_kutta(f, t0, tn, tau, y0):
eps = tau / 10000
while t0 < tn and (tn - t0) > eps:
k1 = f(t0, y0)
k2 = f(t0 + tau / 2, y0 + tau * k1 / 2)
k3 = f(t0 + tau / 2, y0 + tau * k2 / 2)
k4 = f(t0 + tau, y0 + tau * k3)
y0 += tau / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
t0 += tau
return y0
```
```python
t = np.arange(0, 11, 1, int)
y1 = [runge_kutta(lambda t, y: t * y ** 0.5, 0, t[0], 0.1, 1)]
for i in range(1, len(t)):
y1 += [runge_kutta(lambda t, y: t * y ** 0.5, t[i - 1], t[i], 0.1, y1[-1])]
y2 = (t ** 2 + 4) ** 2 / 16
plt.plot(t, y1, label='estimated', marker='.')
plt.plot(t, y2, label='calculated', marker='.')
plt.grid(linestyle='--')
plt.title("Runge-Kutta method")
plt.xlabel('t')
plt.ylabel('y')
plt.legend()
plt.show()
for _t, _y1, _y2 in zip(t, y1, y2):
print(f"t = {_t}:\n\tEstimated: {_y1}\n\tCalculated: {_y2}\n")
```
```python
```
|
5e8eecfaf78cfc8197f04d22b9f9a5749eea6328
| 114,099 |
ipynb
|
Jupyter Notebook
|
Damarad_Viktor/differencial_systems.ipynb
|
droidroot1995/DAFE_Python_914
|
0de65a84ab7f4c8f24b83a5747f71f52d57ecc20
|
[
"Unlicense"
] | null | null | null |
Damarad_Viktor/differencial_systems.ipynb
|
droidroot1995/DAFE_Python_914
|
0de65a84ab7f4c8f24b83a5747f71f52d57ecc20
|
[
"Unlicense"
] | 7 |
2021-05-08T22:02:59.000Z
|
2021-05-13T22:44:27.000Z
|
Damarad_Viktor/differencial_systems.ipynb
|
droidroot1995/DAFE_Python_914
|
0de65a84ab7f4c8f24b83a5747f71f52d57ecc20
|
[
"Unlicense"
] | 13 |
2021-02-13T07:32:10.000Z
|
2021-05-15T09:09:08.000Z
| 219 | 26,100 | 0.910131 | true | 1,448 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.935347 | 0.857768 | 0.80231 |
__label__eng_Latn
| 0.162611 | 0.702369 |
```python
from decodes.core import *
from decodes.io.jupyter_out import JupyterOut
out = JupyterOut.unit_square( )
```
# Alternate Coordinate Geometry
todo
\begin{align}
x = r \ cos\theta \\
y = r \ sin\theta
\end{align}
### Cylindrical Coordinates
\begin{eqnarray}
x &=& r \ cos\theta \\
y &=& r \ sin\theta \\
z &=& z
\end{eqnarray}
```python
"""
Cylindrical Evaluation of an Orthonormal CS
Returns a Point relative to this CS given three cylindrical coordinates.
"""
def eval_cyl(self, radius, radians, z):
pt = Point( radius * cos(radians), radius * sin(radians), z)
return self.eval(pt)
```
### Spherical Coordinates
\begin{eqnarray}
x &=& \varrho \ sin\varphi \ cos\theta \\
y &=& \varrho \ sin\varphi \ sin\theta \\
z &=& \varrho \ cos\varphi
\end{eqnarray}
```python
"""
Spherical Evaluation of an Orthonormal CS
Returns a Point relative to this CS given three spherical coordinates.
"""
def eval_sph(self, rho, phi, theta):
x = rho * sin(phi) * cos(theta)
y = rho * sin(phi) * sin(theta)
z = rho * cos(phi)
return self.eval(Point(x,y,z))
```
```python
```
|
01e426468b930e59a3e841bb870f26e27ac28a6e
| 4,196 |
ipynb
|
Jupyter Notebook
|
103 - Points, Vectors, and Coordinate Systems/109 - Alternate Coordinate Geometry.ipynb
|
ksteinfe/decodes_ipynb
|
2e4bb6b398472fc61ef8b88dad7babbdeb2a5754
|
[
"MIT"
] | 1 |
2018-05-15T14:31:23.000Z
|
2018-05-15T14:31:23.000Z
|
103 - Points, Vectors, and Coordinate Systems/109 - Alternate Coordinate Geometry.ipynb
|
ksteinfe/decodes_ipynb
|
2e4bb6b398472fc61ef8b88dad7babbdeb2a5754
|
[
"MIT"
] | null | null | null |
103 - Points, Vectors, and Coordinate Systems/109 - Alternate Coordinate Geometry.ipynb
|
ksteinfe/decodes_ipynb
|
2e4bb6b398472fc61ef8b88dad7babbdeb2a5754
|
[
"MIT"
] | 2 |
2020-05-19T05:40:18.000Z
|
2020-06-28T02:18:08.000Z
| 22.928962 | 147 | 0.517636 | true | 326 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.946597 | 0.787931 | 0.745853 |
__label__eng_Latn
| 0.550333 | 0.571198 |
```python
%reload_ext nb_black
```
<IPython.core.display.Javascript object>
```python
import numpy as np
import matplotlib.pyplot as plt
from quantum_systems import ODQD, GeneralOrbitalSystem
```
<IPython.core.display.Javascript object>
```python
l = 10
grid_length = 10
num_grid_points = 2001
omega = 0.25
odho = ODQD(
l, grid_length, num_grid_points, a=0.25, alpha=1, potential=ODQD.HOPotential(omega)
)
```
<IPython.core.display.Javascript object>
\begin{align}
\epsilon_i = \hbar \omega \left(i + \frac{1}{2}\right)
\end{align}
```python
np.diag(odho.h)
```
array([0.1249998 +0.j, 0.37499902+0.j, 0.62499748+0.j, 0.87499542+0.j,
1.12499515+0.j, 1.37501276+0.j, 1.62513397+0.j, 1.87570984+0.j,
2.12783907+0.j, 2.38410631+0.j])
<IPython.core.display.Javascript object>
```python
fig = plt.figure(figsize=(16, 10))
plt.plot(odho.grid, ODQD.HOPotential(omega)(odho.grid))
for i in range(l):
plt.plot(
odho.grid,
np.abs(odho.spf[i]) ** 2 + odho.h[i, i].real,
label=r"$\psi_{" + f"{i}" + r"}$",
)
plt.grid()
plt.legend()
plt.show()
```
```python
print(f"l = {odho.l}")
print(f"grid shape = {odho.grid.shape}")
print(f"h shape = {odho.h.shape}")
print(f"u shape = {odho.u.shape}")
print(f"x shape = {odho.position.shape}")
print(f"spf shape = {odho.spf.shape}")
```
l = 10
grid shape = (2001,)
h shape = (10, 10)
u shape = (10, 10, 10, 10)
x shape = (1, 10, 10)
spf shape = (10, 2001)
<IPython.core.display.Javascript object>
```python
system = GeneralOrbitalSystem(n=2, basis_set=odho, anti_symmetrize_u=True)
```
<IPython.core.display.Javascript object>
```python
print(f"l = {system.l}")
print(f"grid shape = {system._basis_set.grid.shape}")
print(f"h shape = {system.h.shape}")
print(f"u shape = {system.u.shape}")
print(f"x shape = {system.position.shape}")
print(f"spf shape = {system.spf.shape}")
```
l = 20
grid shape = (2001,)
h shape = (20, 20)
u shape = (20, 20, 20, 20)
x shape = (1, 20, 20)
spf shape = (20, 2001)
<IPython.core.display.Javascript object>
__Note:__ `system.u` in the `GeneralOrbitalSystem` are the anti-symmetric Coulomb matrix elements.
\begin{align}
\hat{H}
&= \hat{t} + \hat{v} + \hat{u}
= \hat{h} + \hat{u}
= \sum_{i}\left(
-\frac{1}{2}\frac{d^2}{dx^2}
+ \frac{1}{2}\omega^2 x^2
\right)
+ \sum_{i < j} \frac{\alpha}{\sqrt{(x_i - x_j)^2 + a^2}}.
\end{align}
\begin{align}
\hat{H} \Psi(x_1, \dots, x_N) = E \Psi(x_1, \dots, x_N)
\end{align}
In Hartree-Fock:
\begin{align}
\Psi(x_1, \dots, x_N) \approx \Phi(x_1, \dots, x_N)
= \begin{vmatrix}
\phi_1(x_1) & \dots & \phi_1(x_N) \\
\vdots & \ddots & \vdots \\
\phi_N(x_1) & \dots & \phi_N(x_N)
\end{vmatrix}
\end{align}
Variational principle:
\begin{align}
E_{gs} \leq E[\Psi] = \langle \Psi | \hat{H} | \Psi \rangle
\end{align}
Minimization:
\begin{align}
L = \langle \Phi | \hat{H} | \Phi \rangle - \lambda_{ji}\left( \langle \phi_i | \phi_j \rangle - \delta_{ij} \right)
\end{align}
$\implies$ Modern Quantum Chemistry - Szabo & Östlund $\implies$
\begin{align}
\hat{f}|\phi_i\rangle = \varepsilon_i |\phi_i\rangle.
\end{align}
Atomic orbitals (harmonic oscillator orbitals) $\{\chi_{\alpha}\}_{\alpha = 1}^{l}$ to the molecular orbitals (Hartree-Fock orbitals):
\begin{align}
|\phi_i \rangle = C_{\alpha i} | \chi_{\alpha} \rangle,
\end{align}
where $\{\phi_i\}_{i = 1}^{N}$ and $N$ is not necessarily equal to $l$.
Inserting the basis transformation into the Hartree-Fock equations:
\begin{gather}
\hat{f} C_{\alpha i} | \chi_{\alpha} \rangle = \varepsilon_i C_{\alpha i} | \chi_{\alpha} \rangle
\end{gather}
Left-projecting with $\chi_{\beta}$:
\begin{gather}
C_{\alpha i} \langle \chi_{\beta} | \hat{f} | \chi_{\alpha} \rangle
= \varepsilon_i C_{\alpha i} \langle \chi_{\beta} | \chi_\alpha \rangle
= \varepsilon_i C_{\beta i},
\end{gather}
as $\langle \chi_{\beta} | \chi_\alpha \rangle = \delta_{\beta \alpha}$ in our case.
Define the matrices $\mathbf{F}$ and $\mathbf{C}$, and the vector $\boldsymbol{\varepsilon}$. The elements of $\mathbf{F}$ are
\begin{gather}
[\mathbf{F}]_{\beta \alpha} \equiv \langle \chi_{\beta} | \hat{f} | \chi_{\alpha} \rangle.
\end{gather}
This lets us write the Roothan-Hall equations:
\begin{align}
\mathbf{F} \mathbf{C} = \mathbf{C} \boldsymbol{\varepsilon},
\end{align}
which is a generalized eigenvalue equation.
The Fock operator:
\begin{align}
\hat{f} = \hat{h} + \hat{u}^{direct} + \hat{u}^{exchange}
\end{align}
The matrix elements are:
\begin{align}
\langle \chi_{\beta} | \hat{f} | \chi_{\alpha} \rangle
= h_{\beta \alpha} + u^{direct}_{\beta \alpha} + u^{exchange}_{\beta \alpha},
\end{align}
where the one-body Hamiltonian is:
\begin{align}
h_{\beta \alpha}
= \int dx \chi_{\beta}(x) h \chi_{\alpha}(x),
\end{align}
(this is found in `odho.h` in the code.)
the direct interaction is:
\begin{align}
u^{direct}_{\beta \alpha}
= \sum_{i}^{N} \langle \chi_{\beta} \phi_i | \hat{u} | \chi_{\alpha} \phi_i \rangle,
\end{align}
the exchange interaction is:
\begin{align}
u^{exchange}_{\beta \alpha}
= \sum_{i}^{N} \langle \chi_{\beta} \phi_i | \hat{u} | \phi_i \chi_{\alpha} \rangle,
\end{align}
\begin{gather}
\langle \chi_{\beta} | \hat{f} | \chi_{\alpha} \rangle
= h_{\beta \alpha} + C^{*}_{??} C_{??} u_{\beta ? ? \alpha}
\end{gather}
The matrix elements $u^{\alpha \beta}_{\gamma \delta}$ can be found in `odho.u`. They are labelled:
\begin{align}
u^{\alpha \beta}_{\gamma \delta}
= \int d x_1 d x_2 \chi^{*}_{\alpha}(x_1) \chi^{*}_{\beta}(x_2) u(x_1, x_2) \chi_{\gamma}(x_1) \chi_{\delta}(x_2)
\end{align}
|
3aa1b9481b5f24b9afc2a4fde7762b672cb7c6d1
| 160,398 |
ipynb
|
Jupyter Notebook
|
odho-example.ipynb
|
Schoyen/tdhf-project-fys4411
|
b0231c0d759382c14257cc4572698aa80c1c94d0
|
[
"MIT"
] | 1 |
2021-06-03T00:34:57.000Z
|
2021-06-03T00:34:57.000Z
|
odho-example.ipynb
|
Schoyen/tdhf-project-fys4411
|
b0231c0d759382c14257cc4572698aa80c1c94d0
|
[
"MIT"
] | null | null | null |
odho-example.ipynb
|
Schoyen/tdhf-project-fys4411
|
b0231c0d759382c14257cc4572698aa80c1c94d0
|
[
"MIT"
] | null | null | null | 275.597938 | 139,532 | 0.897804 | true | 2,123 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.887205 | 0.692642 | 0.614515 |
__label__eng_Latn
| 0.244144 | 0.266055 |
# Travel Times in 1D Models
## Name:
## Date:
```python
# If using Anaconda3 on your machine you can do without this. This is for Azure people.
#!pip install obspy # TODO Uncomment if on Azure
```
## Computing the $X$ offset and travel time $T$ of the downgoing ray in a linear velocity gradient
To begin, recall that the offset, $X$, and travel time $T$ of the down-going ray path in $1D$ media is very generally given by
\begin{equation}
x(p) = \int_0^{h} \frac{p v(h)}{\sqrt{1 - p^2 v^2(h)}} dh
\end{equation}
\begin{equation}
t(p) = \int_0^{h} \frac{1}{v(h) \sqrt{1 - p^2 v^2(h)}} dh
\end{equation}
where $h$ is the thickness of the model, $v$ the velocity, and $p$ the ray parameter. We now introduce the velocity gradient
\begin{equation}
v = v(h) = v_0 + a h
\end{equation}
where $v_0$ is the velocity at $h=0$ and $a$ the velocity gradient. Inserting the velocity gradient into the offset and travel time equations
\begin{equation}
x(p) = \int_0^{h} \frac{p (v_0 + ah)}{\sqrt{1 - p^2(v_0 + ah)^2}} dh
\end{equation}
\begin{equation}
t(p) = \int_0^{h} \frac{1}{(v_0 + a h) \sqrt{1 - p^2 (v_0 + ah)^2} } dh
\end{equation}
Let's now change the integrals to be functions of $v$. To do this we use the change of variables $v = v_0 + a h$. The derivative requires that $dv = a dh$ or
\begin{equation}
dh = \frac{dv}{a}
\end{equation}
The limits of the integrands must also be reworked so that
\begin{equation}
v(h=0) = v_0
\end{equation}
and
\begin{equation}
v(h=h) = v_0 + \frac{(v_1 - v_0)}{h} h = v_1
\end{equation}
the offset and travel time integrals are then transformed into
\begin{equation}
x(p) = \frac{1}{a} \int_{v_0}^{v_1} \frac{pv}{\sqrt{1 - p^2 v^2}} dv
\end{equation}
\begin{equation}
t(p) = \frac{1}{a} \int_{v_0}^{v_1} \frac{1}{v \sqrt{1 - p^2 v^2} } dv
\end{equation}
The first integral has, for this entire time, corresponded to a fairly obvious u substitution. The second integral is less obviously the inverse hyperbolic cos. Let's make the mechanics of both integrals a little more clear by making one more substitution, $s = pv$. This requires that
\begin{equation}
dv = \frac{ds}{p}
\end{equation}
and new integral limits $s_1 = pv_0$ and $s_2 = pv_1$.
\begin{equation}
x(p) = \frac{1}{a} \int_{pv_0}^{pv_1} \frac{s}{\sqrt{1 - s^2}} \frac{ds}{p}
= \frac{1}{a p} \int_{pv_0}^{pv_1} \frac{s}{\sqrt{1 - s^2}} ds
\end{equation}
\begin{equation}
t(p) = \frac{1}{a} \int_{pv_0}^{pv_1} \frac{p}{s \sqrt{1 - s^2} } \frac{ds}{p}
= \frac{1}{a} \int_{pv_0}^{pv_1} \frac{1}{s \sqrt{1 - s^2} } ds
\end{equation}
The first integral is
\begin{equation}
x(p) =-\frac{1}{ap} \left . \sqrt{1 - s^2} \right |_{p v_0}^{p v_1}
= \frac{1}{ap} \left . \sqrt{1 - s^2} \right |_{p v_1}^{p v_0}
= \frac{1}{ap} \left . \sqrt{1 - v^2 p^2} \right |_{v_1}^{v_0}
= \frac{v}{a p} \left . \sqrt{\frac{1}{v^2} - p^2} \right |_{v_1}^{v_0}
\end{equation}
The second integral is a little more complicated so I'll write down the punchline.
<!--
In this case, you would say that $u=\sqrt{1 - s^2}$. Thus, $du =-\frac{s}{\sqrt{1 - s^2}} ds$ or $\frac{ds}{\sqrt{1 - s^2}} =-\frac{du}{s}$.
\begin{equation}
t(p) =-\frac{1}{a} \int \frac{du}{s^2}
=-\frac{1}{a} \int \frac{du}{1 - u^2}
= \frac{1}{a} \int \frac{du}{u^2 - 1}
= \frac{1}{a} \int \frac{1}{1 - u} - \frac{1}{u + 1} \, du
= \frac{1}{a} \left [ \log(1 - u) - \log(u + 1) \right ]
= \frac{1}{a} \left [ \log(1 - \sqrt{1-s^2}) - \log(\sqrt{1-s^2} + 1) \right ]
= \frac{1}{a} \left [ \frac{1 - \sqrt{1 - s^2}}{\sqrt{1 - s^2} + 1} \right ]
\end{equation}
-->
\begin{equation}
t(p) =-\frac{1}{a} \log \left ( \frac{1 + \sqrt{1 - s^2}}{s} \right )_{p v_0}^{p v_1}
= \frac{1}{a} \log \left ( \frac{1 + \sqrt{1 - p^2 v^2}}{p v} \right )_{v_1}^{v_0}
= \frac{1}{a} \log \left ( \frac{1}{vp} + \frac{1}{p} \sqrt{ \frac{1}{v^2} - p^2 } \right )_{v_1}^{v_0}
\end{equation}
Defining the vertical slowness $\eta = \sqrt{\frac{1}{v^2} - p^2}$ and $u=\frac{1}{v}$ we have that the offset is simply given by
\begin{equation}
x(p) = \left . \frac{1}{a u p} \eta \right |_{u_1}^{u_0}
\end{equation}
\begin{equation}
t(p) = \frac{1}{a} \log \left ( \frac{u + \eta}{p} \right )_{v_1}^{v_0}
\end{equation}
You would think that here Shearer calls it a day. But no. Let's further add by zero so that
\begin{equation}
t(p) = \frac{1}{a} \log \left ( \frac{u + \eta}{p} \right )_{v_1}^{v_0}
- p \left . \frac{1}{a u p} \eta \right |_{u_1}^{u_0}
+ p \left . \frac{1}{a u p} \eta \right |_{u_1}^{u_0}
= \frac{1}{a} \left ( \log \left ( \frac{u + \eta}{p} \right ) - \frac{\eta}{u} \right )_{v_1}^{v_0}
+ p x(p)
\end{equation}
## What Needs to be Programmed
And there you have it - the equations to implement for computing offset and travel time are
\begin{equation}
x(p) = \left . \frac{1}{a u p} \eta \right |_{u_1}^{u_0}
\end{equation}
\begin{equation}
t(p) = \frac{1}{a} \left ( \log \left ( \frac{u + \eta}{p} \right ) - \frac{\eta}{u} \right )_{v_1}^{v_0}
+ p x(p)
\end{equation}
In general we have to be somewhat careful. For example, if the slowness becomes undefined at the bottom of the layer we would say the ray has turned and only keep the first contribution. Additionally, we are not counting the upgoing contribution, thus, $x(p)$ would have to be multiplied by $2$ to obtain $X(p)$.
# Computing the Travel Time Integral
For the travel time integral we substitute $u=\sqrt{1 - s^2}$. The corresponding derivative is
$du =-\frac{s}{\sqrt{1 - s^2}} ds$ or
$$
\frac{ds}{\sqrt{1 - s^2}} =-\frac{du}{s}
$$
\begin{equation}
t(p) =-\frac{1}{a} \int \frac{du}{s^2}
=-\frac{1}{a} \int \frac{du}{1 - u^2}
\end{equation}
Using an integration table, $\int \frac{du}{1 - u^2} = \tanh^{-1}(u)$, so our integral becomes
\begin{equation}
t(p) =-\frac{1}{a} \tanh^{-1}(u)
\end{equation}
Now back-substitute
\begin{equation}
t(p) =-\frac{1}{a} \tanh^{-1} \left ( \sqrt{1 - s^2} \right )
\end{equation}
We now apply the definition of the inverse hyperbolic arctangent that
$$
\tanh^{-1} x = \frac{1}{2} \left [ \log (1 + x) - \log(1 - x) \right ]
$$
so that
$$
\tanh^{-1} s
= \frac{1}{2}
\left [ \log \left (1 + \sqrt{1 - s^2} \right ) - \log \left (1 - \sqrt{1 - s^2} \right ) \right ]
= \frac{1}{2}
\left [
\log \left (\frac{1 + \sqrt{1 - s^2}}{1 - \sqrt{1 - s^2}} \right )
\right ]
= \frac{1}{2}
\left [
\log
\left (
\left (\frac{1 + \sqrt{1 - s^2}}{1 - \sqrt{1 - s^2}} \right )
\left (\frac{1 + \sqrt{1 - s^2}}{1 + \sqrt{1 - s^2}} \right )
\right )
\right ]
$$
Notice the denominator is now $1^2 - \sqrt{1 - s^2}^2 = 1 - (1 - s^2) = s^2$. And the numerator is just
$\left ( 1 + \sqrt{1 - s^2} \right )^2$. We now apply the property of logarithms that $a \log x = \log x^a$.
$$
\tanh^{-1} s
= \log \left ( \sqrt { \left ( \frac{(1 + \sqrt{1 - s^2})^2}{s^2} \right ) } \right )
= \log \left ( \frac{1 + \sqrt{1 - s^2}}{s} \right )
%= \log \left (1 + \sqrt{1 - s^2} \right ) - \log s
$$
Therefore, we obtain
\begin{equation}
t(p)
=-\frac{1}{a} \int_{p v_0}^{p v_1} \frac{1}{s\sqrt{1 - s^2}} ds
=\left . -\frac{1}{a} \log \left ( \frac{1 + \sqrt{1 - s^2}}{s} \right ) \right |_{p v_0}^{p v_1}
\end{equation}
## Additional Notes
The cleanest derivations I've seen for the the linear gradient problem are from Slotnick's Lessons in Seismic Computing and Sergey Fomel's 2008 paper - Fast sweeping method for the factored eikonal equation. I ultimately used Slotnick's derivation because it is simpler but working from the ray-path equation defined as a first order system does open more potential doors. I've tried to rework Slotnick's work to match the results given in the Shearer's textbook.
<!---
If you want to have a go then uncomment the lines in this cell
$$
\cosh^{-1}(x) = \ln(x + \sqrt{x^2 -1})
$$
$$
\int \frac{1}{\sqrt{x^2 - 1}}
$$
$$
\ln \left ( \frac{1}{v(z)} + \sqrt{\frac{1}{v(z)^2} - p^2} \right )
- \frac{1}{v(z)} \sqrt{\frac{1}{v(z)^2} - p^2} - \ln(p)
$$
Using Wolfram this is really close to the integral of
$$
I=\int \sqrt{x^2 - p^2} dx =-\frac{p^2}{2} \ln(x^2 - p^2) + x\sqrt{x^2 - p^2} + constant
$$
which would be great because I can write
$$
t(p) = px(p) + \int \sqrt{\gamma(z)^2 - p^2} dz
$$
anyway...
$$
\ln \left ( x + \sqrt{x^2 - 1} \right )
-\frac{p}{v(z)} \sqrt{\frac{1}{p^2 v(z)^2} - 1}
$$
$$
\ln \left ( x + \sqrt{x^2 - 1} \right )
-\frac{p^2}{v(z)} \sqrt{\frac{1}{p^2 v(z)} - 1}
$$
$$
t(x) = \ln \left ( x + \sqrt{x^2 - 1} \right ) - p^2 x \sqrt{x^2 - 1}
$$
Differentiate and get
$$
\frac{dt}{dx}
=\frac{1}{\sqrt{x^2 - 1}} \frac{dx}{dv} \frac{dv}{dz}
- p^2 \sqrt{x^2 - 1} \frac{dx}{dv} \frac{dv}{dz}
- p^2 x^2 \frac{1}{\sqrt{x^2 - 1}} \frac{dx}{dv} \frac{dv}{dz}
$$
$$
\frac{1 - p^2 x^2}{\sqrt{x^2 - 1}} \frac{dx}{dz} - p^2 \sqrt{x^2 - 1} \frac{dx}{dz}
$$
$$
\frac{1 - p^2 x^2}{\sqrt{x^2 - 1}} \frac{dx}{dz} - \frac{p^2 (x^2 - 1)}{\sqrt{x^2 - 1}} \frac{dx}{dz}
$$
$$
\frac{1 + p^2(1 - 2 x^2)}{\sqrt{x^2 - 1}} \frac{dx}{dz}
$$
Maybe this is the time to evaluate $\frac{dv}{dz} = b$ and $\frac{dx}{dv} =-\frac{1}{p v(z)^2}$ so
that
$$
\frac{dx}{dv} \frac{dv}{dz} =-\frac{b}{p v(z)^2}
$$
And
$$
-\frac{1 + p^2(1 - 2 x^2)}{\sqrt{\frac{1}{pv(z)^2} - 1}} \frac{b}{p v(z)^2}
=-\frac{b + b p^2 \left (1 - 2 \frac{1}{p^2 v(z)^2} \right )}{v(z) \sqrt{1 - v(z)^2 p^2}}
=-\frac{b(1 + p^2) - 2 \frac{1}{v(z)^2} }{v(z) \sqrt{1 - v(z)^2 p^2}}
$$
--->
```python
from numpy import sqrt
from numpy import log
from numpy import linspace
from numpy import zeros
from numpy import asarray
def layerxt(p, h, vtop, vbottom):
"""
Calcualtes the change in position and change in time for a ray in a layer with a linear
velocity gradient. This is a Python translation of Shearer's layerxt which was heavily
modified from a portion of Chris Chapman's WKBJ program.
Input
-----
p : float
Horizontal slowness (s/km).
h : float
Layer thickness (km).
vtop : float
Velocity (km/s) at the top of the layer.
vbottom : float
Velocity (km/s) at the bottom of the layer.
Returns
-------
dx : float
Range offset (km).
dt : float
Travel time change (seconds).
irtr : integer
This is a return code that can mean:
-1 -> zero thickness layer.
0 -> ray turned above the layer.
1 -> ray passed through the layer.
2 -> ray turned in the layer and only one leg in the dx and dt is counted.
"""
# Checks
irtr =-1
dx = None
dt = None
if (h < 0.0):
print("Layer thickness cannot be negative")
return dx, dt, irtr
if (vtop <= 0.0 or vbottom <= 0.0):
print("Velocities must be positive")
return dx, dt, irtr
# Compute slownesses
u1 = 1.0/vtop # Slowness at top of layer
u2 = 1.0/vbottom # Slowness at bottom of layer
dx = 0.0
dt = 0.0
# Ray turned above this layer
if (p >= u1):
irtr = 0
return dx, dt, irtr
# Zero layer thickness
if (h == 0):
irtr = -1
return dx, dt, irtr
b = (vtop - vbottom)/h # Velocity gradient; Eqn 4.20 of Shearer
eta1 = sqrt(u1**2 - p**2)
# Constant velocity layer - ray must pass through
if (abs(b) == 0.0):
dx = h*p/eta1
dt = h*u1**2/eta1
irtr = 1
return dx, dt, irtr
x1 = eta1/(u1*b*p) # Eqn 4.21 of Shearer
tau1 = (log((u1 + eta1)/p) - eta1/u1)/b # Eqn 4.22 of Shearer
# Ray turns within layer - no contribution to integral from bottom point
# Ray turned in the layer
if (p >= u2):
dx = x1
dtau = tau1
dt = dtau + p*dx
irtr = 2
return dx, dt, irtr
# Ray passed through the layer
irtr = 1
eta2 = sqrt(u2**2 - p**2)
x2 = eta2/(u2*b*p) # Eqn 4.21 of Shearer
tau2 = (log((u2 + eta2)/p) - eta2/u2)/b # Eqn 4.22 of Shearer
dx = x1 - x2
dtau = tau1 - tau2
dt = dtau + p*dx
return dx, dt, irtr
```
```python
def computeXTInLayeredGradientModel(rayParameters, deps, vp, redvel = 8.0):
"""
Computes the travel-time distance in a layered model where each model
consists of velocity gradients. Here, the structural models increase
positively with depth.
Input
-----
rayParameters : array_like
Apparent slownesses (s/km)
deps : array_like
Depth to each interface (km). Note, there are len(h) - 1 layers and each
layer thickness is h[layer+1] - h[layer].
vp : array_like
Compressional velocity (km/s) at the layer interfaces.
redvel : float
Reduction velocity (km/s). Shearer uses 8 km/s in the HW.
Returns
-------
offset : array_like
The offsets in x (km), i.e, distance traveled for each ray parameter.
ttimes : array_like
The redued travel times (s) for each ray parameter.
Copyright
---------
Ben Baker distributed under the MIT license.
"""
# Loop on ray parameters
xtimes = zeros(len(rayParameters))
ttimes = zeros(len(rayParameters))
for ip in range(len(rayParameters)):
# Loop on model depths
x = 0.0 # Offset
t = 0.0 # Travel time
p = rayParameters[ip] # Ray parameter
for depth in range(len(deps)-1):
h = deps[depth+1] - deps[depth]
vtop = vp[depth]
vbottom = vp[depth+1]
dx, dt, irtr = layerxt(p, h, vtop, vbottom)
# Typically the ray continues through the layer
if (irtr == 1):
x = x + dx*2.0 # Up and downgoing leg
t = t + dt*2.0 # Up and downgoing leg
elif (irtr == 2):
x = x + dx*2.0 # Up and downgoing leg
t = t + dt*2.0 # Up and downgoing leg
break
# Ray turned in layer above - stop calculating (I should hit this)
if (irtr == 0):
break
# Loop on depths
xtimes[ip] = x # Tabulate distance
ttimes[ip] = t # Tabulate time
# Loop on ray parameters
#TODO - Something screwy - but x and t are negative. I need to implement from Chapman's book
offset =-xtimes # Make xtimes positive
ttimes =-ttimes
if (redvel > 0.0):
reducedTimes = ttimes - offset/redvel # Reduce travel times with reduction velocity
else:
reducedTimes = ttimes
return offset, reducedTimes
```
## Programming Question - Shearer Question 8 From Section 4.11
Compute the P-wave traveltime, $T(X)$ curve for the velocity model
| Depth (km) | $\alpha$ (km/s) | $\beta$ (km/s) | $\rho$ (g/cm^3) |
| ----------- |-------------:| -----:|----:|
| 0.0 | 4.50 | 2.40 | 2.0 |
| 1.5 | 6.80 | 3.75 | 2.8 |
| 6.0 | 7.00 | 3.85 | 2.9 |
| 6.5 | 8.00 | 4.60 | 3.1 |
| 10.0 | 8.10 | 4.70 | 3.1 |
for $100$ ray parameters from $0.1236$ s/km to $0.2217$ s/km. Use a reduction velocity of $8$ km/s.
As a hint, your function call should look something like:
offset, ttimes = computeXTInLayeredGradientModel(rayParameters, deps, velocities, redvel)
You'll probably want to program something like:
deps = [0.0, 1.5, 6.0, 6.5, 10.0] # Layer interfaces
vs = [2.4, 3.75, 3.85, 4.6, 4.7] # Shear velocity (km/s) - THIS IS OF NO USE TO YOU - CHANGE TO VP
redvel = 8.0 # Reduction velocity (km/s). This trick increases the resolution of the curve.
pmin = 0.1236 # Minimum slowness in x (s/km)
pmax = 0.2217 # Maximum slowness in x (s/km)
np = 100 # Number of ray parameters
rayParameters = linspace(pmin, pmax, np)
offset, ttimes = computeXTInLayeredGradientModel(rayParameters, deps, vp, redvel)
## Insert Your Code Here
```python
# Put your code here
```
## Question
Caustics are where $\frac{dp}{dx} = \infty$. Given this, plot a travel-time vs. offset curve from distance $0$ to $\max\{offset\}$ and identify the offsets at which the caustics are located. Be sure to fix the code so that your axes are correctly labeled.
```python
import matplotlib.pyplot as plt
plt.plot(offset, ttimes) # Plot travel time as a function of offset
plt.ylabel('label') # TODO put the correct label name
plt.xlabel('label') # TODO put the correct label name
plt.title('label') # TODO put an appropriate title for this type of plot
plt.xlim(0,max(offset))
plt.xticks(linspace(0,100,11))
plt.grid(True)
plt.show()
```
WRITE YOUR ANSWER HERE
## Question
We expect large amplitudes where rays are focused. A way to identify an abundance of rays is to look at places where the change in ray parameter to the change in distance quickly changes to $0$ - i.e., places where the ray parameter/distance curve becomes flat. At which distances would you expect the greatest amount of enregy to occur? As a hint, don't pick ~72 km - though there is energy focusing here it won't be the most significant because the ray density will decrease as distance increases (this is a way to describe geometrical spreading).
```python
import matplotlib.pyplot as plt
plt.plot(offset, rayParameters) # Plot ray-parameter as a function of offset
plt.ylabel('label') # TODO - put an appropriate label here
plt.xlabel('label') # TODO - put an appropriate label here
plt.title('label') # TODO - put an appropriate title here
plt.xlim(0,max(offset))
plt.xticks(linspace(0,100,11))
plt.grid(True)
plt.show()
```
WRITE YOUR ANSWER HERE
# Depth Phases and Event Depth Estimation
The ability to predict travel time as a function of distance is useful for a variety of tasks. One particularly important task is to locate earthquakes in $1D$ models. Here we will use as a-priori information the event epicenter and optimize for the a depth and origin time. At teleseismic distances, like in local events, we face a trade-off. This trade-off is that we could decrease the event depth by decreasing the origin-time. A way to eliminate this trade-off is to use depth phases that first travel up from the source and then reflect onto receiver location thereby emplacing a constraint on the depth. For this activity we'll get some data from a deep earthquake in Alaska that has $P$ phases and $pP$ depth phases. Then, we will generate theoretical times using a global theoretical travel time table. Finally, we will tabulate an objective function and see how depth and origin time trade-off at teleseismic distances with $P$ phases and with $P$ phases and $pP$ phases.
## Data
Here we will use data collected by some broadband Pacific Northwest Seismic Network stations. The information we extract is an estimate of the phase arrival time, or a travel time *pick*.
```python
# If using Anaconda3 on your machine you can do without this. This is for Azure people.
#!pip install obspy # TODO Uncomment if on Azure
```
```python
# A relatively deep earthquake in Alaska
from obspy import UTCDateTime
from obspy.clients.fdsn import Client
from obspy.geodetics.base import gps2dist_azimuth
from obspy import read
from obspy import Stream
from obspy.signal.trigger import recursive_sta_lta # Help with picking
from obspy.signal.trigger import plot_trigger # Visualizing what the picker
from obspy.taup import TauPyModel # This will let us compute global travel times
from numpy import meshgrid # Plotting stuff
from numpy import sqrt
import matplotlib.cm as cm
# Initialize some obspy things
model = TauPyModel(model="ak135") # For computing travel times
client = Client("IRIS")
# Pull waveforms BC earthquake from https://earthquake.usgs.gov/earthquakes/eventpage/us2000b2vq#executive
originTime = UTCDateTime("2017-10-08T22:34:33")
eventLat = 52.391 # Event latitude from catalog
eventLon =-176.769 # Event longitude from catalog
eventDepth = 119.0 # Event depth from catalog
def getStationLocation(network="IU", station="KEV", channel="BHZ", location="00"):
"""
A convenience function for getting the station latitude and longitude from the given station, network,
channel, and location.
Returns
stationLatitude : float
Station latitude in degrees.
stationLongitude : float
Station longitud in degrees.
"""
# It's much more convenient to wildcard and get ZNE all at once
stRaw = client.get_waveforms(network, station, location, channel, originTime, originTime+3000) #t, t + 60 * 60)
# Very awkward - but get the station lat/lon (I don't why obspy isn't lifting this from the miniSEED header)
inventory = client.get_stations(network=network, station=station,
starttime=originTime, endtime=originTime+400)
for net in inventory:
for sta in net:
stationLatitude = sta.latitude
stationLongitude = sta.longitude
return stationLatitude, stationLongitude
print("Error getting station location")
return None, None
# Pull data from some puget sound stations
windowStart = 410
windowEnd = windowStart + 70
nets = ["UW", "UW", "UW", "UW", "UW"]
stations = ["GNW", "SP2", "RATT", "DOSE", "TOLT"]
channels = ["BHZ", "BHZ", "BHZ", "BHZ", "BHZ"]
locs = ["--", "--", "--", "--", "--"]
# Get the data
dataList = []
for k in range(len(nets)):
# Fetch the waveform data
st = client.get_waveforms(nets[k], stations[k], locs[k], channels[k],
originTime + windowStart, originTime + windowEnd, attach_response=True)[0]
# Fetch the station lat/lon
stla, stlo = getStationLocation(network=nets[k], station=stations[k], channel=channels[k], location=locs[k])
# Compute the dsitance
d, az, baz = gps2dist_azimuth(eventLat, eventLon, stla, stlo)
distDeg = d*1.e-3/111.195
st.stats.coordinates = {'latitude': stla, 'longitude': stlo, 'distance': distDeg}
# Append the data to the data list
dataList.append(st)
# Make an obspy stream
data = Stream(traces=dataList)
```
```python
# Plot the picks and get my attention use an automatic picker
for k in range(len(data)):
cft = recursive_sta_lta(data[k].data, int(6 * data[k].stats.sampling_rate), int(20* data[k].stats.sampling_rate))
#plot_trigger(data[k].data, cft, 1.2, 0.5)
plot_trigger(data[k], cft, 2., 2.2)
```
```python
# Call these the pick times
from numpy import array
# Create an array of observed arrival times. Recall I'm requesting waveforms windowStart seconds after the
# catalog origin time so I need to add that back in for the full pick time.
observed_P_times = array([18, 22, 24, 16, 22]) + windowStart - 35.0 # I think NEIC's origin time is screwy
observed_pP_times = array([52, 56, 52, 48, 52]) + windowStart - 35.0 # Or it's possible ObsPy is grabbing waveforms awkwardly
print("The observed P pick times are:", observed_P_times, "seconds")
print("The observed pP pick times are:", observed_pP_times, "seconds")
```
The observed P pick times are: [ 393. 397. 399. 391. 397.] seconds
The observed pP pick times are: [ 427. 431. 427. 423. 427.] seconds
```python
def getPhasePickTime(eventLat, eventLon, eventDepth, stationLat, stationLon, phaseName=['P']):
"""
Convenience function to get the desired phase pick time from the ak135 travel time table.
Input
-----
eventLat : float
Event latitude in degrees
eventLon : float
Event longitude in degrees
eventDepth : float
Event depth in degrees
stationLat : float
Station latitude in degrees
stationLon : float
Station longitude in degrees
phaseName : list
Phase name - e.g., ['P'] for a P phase or ['pP'] for a P depth phase.
Result
------
phaseTime : float
Theoretical travel time for phase in seconds
Copyright
---------
Ben Baker distributed under the MIT license.
"""
# Compute the source/receiver distance in meters
d, az, baz = gps2dist_azimuth(eventLat, eventLon, stationLat, stationLon)
# Convert kilometers to degrees where 111.195 is the number of kilometers per degree
distDeg = d*1.e-3/111.195 # Convert to kilometers then to degrees
# Compute the travel time for the phase - for example look at Figure 4.20 of Shearer
pArrival = model.get_travel_times(source_depth_in_km=eventDepth,
distance_in_degree=distDeg,
phase_list=phaseName) #["P"])
return pArrival[0].time
# This is some debugging code
#k = 0
#for k in range(len(data)):
# stationLat = data[k].stats.coordinates.latitude
# stationLon = data[k].stats.coordinates.longitude
# P_time = getPhasePickTime(eventLat, eventLon, eventDepth, stationLat, stationLon, phaseName=['P'])
# pP_time = getPhasePickTime(eventLat, eventLon, eventDepth, stationLat, stationLon, phaseName=['pP'])
# #print(P_time, pP_time, pP_time - P_time)
```
## Algorithm
Now that we've made observed travel time picks and can make theoretical picks we will tabulate an objective functions for each candidate origin time, $t_0$ and depth $d$ as the residual squared of the observed and estimate travel times for the $P$ phase only
\begin{equation}
\mathcal{C}_{P}(t_0,d)
=\sum_{i=1}^{n_{obs}} (P_i^{obs} - P_i^{est}(d, \Delta_i) + t_0)^2
\end{equation}
and the combined $P$ and $pP$ phases
\begin{equation}
\mathcal{C}_{pP}(t_0,d)
=\sum_{i=1}^{n_{obs}} (P_i^{obs} - P_i^{est}(d, \Delta_i) + t_0)^2
+ (pP_i^{obs} - pP_i^{est}(d, \Delta_i) + t_0)^2
\end{equation}
Here $i$ is the observation counter and $\Delta_i$ the great-circle distance between the source and i'th observation.
```python
# Now we have the observed the estimate pick times - let's make an objective function. To do this, let's
# grid search over candidate depths from 80 - 160 and origin times from [-20s, 20s]
nobs = len(data)
nt = 31
nd = 81
relativeOriginTime = linspace(-30, 30, nt)
depths = linspace(60, 180, nd)
# Build up a travel time table at each depth and distance distance
print("Computing travel times...")
P_travelTimeTable = zeros([nd, nobs])
pP_travelTimeTable = zeros([nd, nobs])
for idep in range(nd):
for iobs in range(nobs):
# Compute the P theoretical travel time at this event depth/station distance
stationLat = data[k].stats.coordinates.latitude
stationLon = data[k].stats.coordinates.longitude
P_travelTimeTable[idep, iobs] = getPhasePickTime(eventLat, eventLon, depths[idep],
stationLat, stationLon, phaseName=['P'])
# Compute the pP theoretical travel time at this event depth/station distance
pP_travelTimeTable[idep, iobs] = getPhasePickTime(eventLat, eventLon, depths[idep],
stationLat, stationLon, phaseName=['pP'])
# Loop on observations
# Loop on depths
print("Tabulating objective function...")
# Now tabulate two objective functions. The first objective function
sumResidualsSquared_P_phase = zeros([nt, nd])
sumResidualsSquared_pPandP_phase = zeros([nt, nd])
for idep in range(nd):
for it in range(nt):
P_estimate = P_travelTimeTable[idep,:] + relativeOriginTime[it] # P estimate travel time
pP_estimate = pP_travelTimeTable[idep,:] + relativeOriginTime[it] # pP estimate travel time
# Tabulate the residuals
P_residuals = observed_P_times - P_estimate
pP_residuals = observed_pP_times - pP_estimate
# Compute the sum squared of the residuals
sumResidualsSquared_P_phase[it, idep] = sum(P_residuals**2)
sumResidualsSquared_pPandP_phase[it, idep] = sum(P_residuals**2) + sum(pP_residuals**2)
# Loop on travel times
# Loop on depths
# Compute the square root of the objective functions to make them l2 norms
sumResidualsSquared_P_phase = 0.5*sqrt(sumResidualsSquared_P_phase)
sumResidualsSquared_pPandP_phase = 0.5*sqrt(sumResidualsSquared_pPandP_phase)
```
Computing travel times...
Tabulating objective function...
```python
# Plot the results
[X, Y] = meshgrid(relativeOriginTime, depths)
extent = [min(relativeOriginTime), max(relativeOriginTime), min(depths), max(depths)]
im = plt.imshow(sumResidualsSquared_P_phase, interpolation='bilinear', cmap=cm.viridis,
extent=extent, aspect=1./2., origin='lower')
plt.title("P Phase Objective Function")
plt.xlabel('Relative Origin Time (s)')
plt.ylabel('Depths (km)')
plt.show()
im = plt.imshow(sumResidualsSquared_pPandP_phase, interpolation='bilinear', cmap=cm.viridis,
extent=extent, aspect=1/2., origin='lower')
plt.title("pP and P Phase Objective Function")
plt.xlabel('Relative Origin Time (s)')
plt.ylabel('Depths (km)')
plt.show()
```
## Question
In this color scheme dark-blue is good and bright yellow is bad. Which plot shows a clear trade-off with origin time and depth?
## Question
If the hypocenter of an earthquake observed at teleseismic distances only included $P$ primary phases in the inversion - would you have high-confidence in the recovered depth parameter?
## Question
At local distances we see the same trade-off with origin time and depth. Can you identify a phase in a regional network like the PNSN that would help to resolve this depth/origin time ambiguity? Note that all-phases leave upwards from the source so depth-phases are not a valid option.
```python
```
|
03d465c9f2f140b4163b3fe3d6a31173efe9fe36
| 254,057 |
ipynb
|
Jupyter Notebook
|
rayPaths/rayPaths.ipynb
|
bakerb845/ess412_introToSeismo
|
ad87b6acdfb40ad63ac100b15dbe168e56cdb4dd
|
[
"MIT"
] | 2 |
2019-08-28T15:46:26.000Z
|
2020-10-07T00:12:54.000Z
|
rayPaths/rayPaths.ipynb
|
bakerb845/ess412_introToSeismo
|
ad87b6acdfb40ad63ac100b15dbe168e56cdb4dd
|
[
"MIT"
] | null | null | null |
rayPaths/rayPaths.ipynb
|
bakerb845/ess412_introToSeismo
|
ad87b6acdfb40ad63ac100b15dbe168e56cdb4dd
|
[
"MIT"
] | 5 |
2018-02-16T23:39:59.000Z
|
2019-11-08T21:44:49.000Z
| 248.588063 | 27,692 | 0.885203 | true | 9,479 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.90053 | 0.847968 | 0.76362 |
__label__eng_Latn
| 0.909239 | 0.612478 |
# 3.2.3 Multiple Regression From Simple Univariate Regression
Suppose we have a *univariate* (p = 1) model with no intercept (3.23):
$$Y=X\beta+\varepsilon$$
The least squares estimate and residuals are (3.24):
$$
\begin{equation}
\hat{\beta} = \cfrac{\sum_1^N {x_iy_i}}{\sum_1^N {x_i^2}} \\
r_i = y_i - x_i\hat{\beta}
\end{equation}
$$
With the inner product:
$$
\begin{equation}
\hat{\beta} = \cfrac{\langle \mathbf{x}, \mathbf{y} \rangle}{\langle \mathbf{x}, \mathbf{x}\rangle}\\
\mathbf{r} = \mathbf{y} - \mathbf{x}\hat{\beta}
\end{equation}
$$
Suppose that the columns of the matrix **X** are orthogonal; that is $\langle \mathbf{x}_j, \mathbf{x}_k \rangle = 0$
then it is easy to check that $\hat{\beta_j} = \langle \mathbf{x}_j, \mathbf{y} \rangle / \langle \mathbf{x}_j, \mathbf{x}_j \rangle$, i.e the inputs have no effect on each other's parameter estimates.
Suppose next that we have an intercept and a single input x (3.27):
$$\hat{B}_1 = \cfrac{\langle \mathbf{x} - \overline{x}\mathbf{1}, \mathbf{y} \rangle}{ \langle \mathbf{x} - \overline{x}\mathbf{1}, \mathbf{x} - \overline{x}\mathbf{1} \rangle}$$
We can view the estimate as the result of two simple regression:
1. Regress **x** on **1** to produce the residual $\mathbf{z} = \mathbf{x} - \overline{x}\mathbf{1}$
2. Regress **y** on the residual **z** to give the coefficient $\hat{\beta}_1$.
Regress **b** on **a** means $\hat{\gamma}=\langle \mathbf{a},\mathbf{b} \rangle / \langle \mathbf{a}, \mathbf{a}\rangle$ and the residual vector $\mathbf{b} - \hat{\gamma}\mathbf{a}$.
This recipe generalizes to the case of *p* inputs, as shown in Algorithm 3.1.
**Algorithm 3.1 Regression by Successive Orthogonalization**
1. $\mathbf{z}_0 = \mathbf{x}_0 = \mathbf{1}$
2. For $j = 1, 2, \cdots, p$
* Regress $\mathbf{x}_j$ on $\mathbf{z}_0,...,\mathbf{z}_{j - 1}$ to produce $\hat{\gamma}_{lj}=\langle \mathbf{z}_l, \mathbf{x}_j \rangle / \langle \mathbf{z}_l,\mathbf{z}_l \rangle$ $l=0,\cdots,j-1$, and residualt vector $\mathbf{z}_j=\mathbf{x}_j - \sum_{k=0}^{j-1} \hat{\gamma}_{kj}\mathbf{z}_k$
3. Regress $\mathbf{y}$ on the residual $\mathbf{z}_p$ to give the estimate $\hat{\beta}_p$
```python
import numpy as np
import pandas as pd
from scipy import stats, linalg
df = pd.read_csv('../data/prostate/prostate.data', delimiter='\t', index_col=0)
mask_train = df.pop('train')
df_y = df.pop('lpsa')
df = df.apply(stats.zscore)
def orthogonalize(X):
p = X.shape[1]
G = np.eye(p)
Z = X.copy()
for j in range(1, p):
for l in range(j):
G[l, j] = np.dot(Z[:, l], X[:, j]) / np.dot(Z[:, l], Z[:, l])
for k in range(j):
Z[:, j] -= G[k, j] * Z[:, k]
return Z, G
```
The result of this algorithm is (3.28):
$$\hat{\beta}_p=\cfrac{\langle \mathbf{z}_p, \mathbf{y} \rangle}{\langle \mathbf{z}_p,\mathbf{z}_p \rangle}$$
If $\mathbf{x}_p$ is highly correlated with some of the other $\mathbf{x}_k$'s the residual vector $\mathbf{x}_p$ will be close to zero, and from (3.28) the coefficient $\hat{\beta}_p$ will be unstable.
From (3.28) we also obtain an alternative formula for the variance estimates, (3.29):
$$Var(\hat{\beta}_p) = \cfrac{\sigma^2}{\langle \mathbf{z}_p, \mathbf{z}_p \rangle}=\cfrac{\sigma^2}{||\mathbf{z}_p||^2} $$
On other words, the precision with which we can estimate $\hat{\beta}_p$ depends on the lengths of the residual vector $\mathbf{z}_p$;
Algorithm 3.1 is known as the *Gram–Schmidt* procedure for multiple regression. We can represent step 2 of Algorithm 3.1 in matrix form (3.30):
$$\mathbf{X}=\mathbf{Z\Gamma}$$
where $\mathbf{Z}$ has as columns the $z_j$ (in order), and $\mathbf{\Gamma}$ is the upper triangular matrix
with entries $\hat{\gamma}_{kj}$. Introducing the diagonal matrix $\mathbf{D}$ with $D_{jj}=||z_j||$, we get (3.31):
$$\mathbf{X}=\mathbf{Z}\mathbf{D}^{-1}\mathbf{D}\mathbf{\Gamma}=\mathbf{QR}$$
the so-called QR decomposition of $\mathbf{X}$. Here $\mathbf{Q}$ is an N × (p +1) orthogonal
matrix, $\mathbf{Q}^T\mathbf{Q} = \mathbf{I}$, and **R** is a (p + 1) × (p + 1) upper triangular matrix.
The least squares solution is given by:
$$
\hat{\beta}=\mathbf{R}^{-1}\mathbf{Q}^T\mathbf{y}
$$
*Proof*:
$$
\begin{equation}
\mathbf{X}^T\mathbf{y}=\mathbf{X}^T\mathbf{X}\hat{\beta}\\
\mathbf{R}^T\mathbf{Q}^T\mathbf{y}=\mathbf{R}^T\mathbf{Q}^T\mathbf{Q}\mathbf{R}\hat{\beta}\\
\mathbf{R}^T\mathbf{Q}^T\mathbf{y}=\mathbf{R}^T\mathbf{R}\hat{\beta}\\
\mathbf{Q}^T\mathbf{y}=\mathbf{R}\hat{\beta}\\
\end{equation}
$$
And the predicted training values:
$$
\hat{\mathbf{y}}=\mathbf{QQ}^T\mathbf{y}
$$
*Proof*:
$$
\begin{align}
\hat{\mathbf{y}}&=\mathbf{X}\hat{\beta}\\
&=\mathbf{QR}\mathbf{R}^{-1}\mathbf{Q}^T\mathbf{y}\\
&=\mathbf{QQ}^T\mathbf{y}
\end{align}
$$
We can obtain from it not just $\hat{\beta}_p$, but also the entire multiple least squares fit.
*Proof*:
We can easily derive that:
$$
\mathbf{R}\hat{\beta}=\mathbf{Q}^T\mathbf{y}
$$
which can be expanded into:
$$
\begin{equation}
\begin{bmatrix}
R_{0 0} & R_{02} & \dots & R_{0p} \\
0 & R_{11} & \dots & R_{1p} \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dots & R_{pp}
\end{bmatrix}
\begin{bmatrix}
\hat{\beta_0} \\
\hat{\beta_1} \\
\vdots \\
\hat{\beta_p}
\end{bmatrix}
=
\begin{bmatrix}
{Q_{0}}^T\mathbf{y} \\
{Q_{1}}^T\mathbf{y} \\
\vdots \\
{Q_{p}}^T\mathbf{y}
\end{bmatrix}
\end{equation}
$$
Now by applying the backward substitution it is possible to obtain the entire multiple least squares fit. For example to find the $\hat{\beta}_p$:
$$
\begin{equation}
R_{pp}\hat{\beta}_p = {Q_{p}}^T\mathbf{y}\\
\hat{\beta}_p = \cfrac{\langle Q_p, \mathbf{y} \rangle}{R_{pp}}=\cfrac{\langle \mathbf{z}_p, \mathbf{y} \rangle}{\langle \mathbf{z}_p,\mathbf{z}_p \rangle}
\end{equation}
$$
```python
def least_squares_qr(data_x, data_y):
X = np.c_[np.ones((len(data_x), 1)), data_x]
Z, G = orthogonalize(X)
D = linalg.norm(Z, axis=0)
Q = Z / D
R = np.diag(D) @ G
beta = linalg.solve_triangular(R, Q.T @ data_y)
return beta
beta = least_squares_qr(df[mask_train == 'T'].as_matrix(), df_y[mask_train == 'T'].as_matrix())
print ("Coefficient: ", beta)
```
Coefficient: [ 2.46493292 0.67601634 0.26169361 -0.14073374 0.20906052 0.30362332
-0.28700184 -0.02119493 0.26557614]
|
6971d0b863fc8c0b2200f7a0c7f9d1c49c7178ec
| 9,373 |
ipynb
|
Jupyter Notebook
|
chapter-03/3.2.3-multiple-regression-from-simple-multivariate-regression.ipynb
|
leduran/ESL
|
fcb6c8268d6a64962c013006d9298c6f5a7104fe
|
[
"MIT"
] | 360 |
2019-01-28T14:05:02.000Z
|
2022-03-27T00:11:21.000Z
|
chapter-03/3.2.3-multiple-regression-from-simple-multivariate-regression.ipynb
|
leduran/ESL
|
fcb6c8268d6a64962c013006d9298c6f5a7104fe
|
[
"MIT"
] | 1 |
2020-07-06T16:51:40.000Z
|
2020-07-06T16:51:40.000Z
|
chapter-03/3.2.3-multiple-regression-from-simple-multivariate-regression.ipynb
|
leduran/ESL
|
fcb6c8268d6a64962c013006d9298c6f5a7104fe
|
[
"MIT"
] | 79 |
2019-03-21T23:48:35.000Z
|
2022-03-31T13:05:10.000Z
| 36.613281 | 331 | 0.506881 | true | 2,403 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.935347 | 0.718594 | 0.672135 |
__label__eng_Latn
| 0.611044 | 0.399925 |
# Jastrow Factor Derivatives
This notebook calculates the expressions for the derivatives of the following Jastrow function:
$$J(\mathbf{X}) = \prod_{i < j} \exp\left(\frac{\alpha r_{ij}}{1 + \beta r_{ij}}\right)$$
with $N$ particles in $D$ dimensions, $\mathbf{X}\in\mathbb{R}^{N\times D}$ and $r_{ij} = ||\mathbf{X_i} - \mathbf{X_j}||$.
```python
%reload_ext blackcellmagic
from sympy import *
init_printing()
```
```python
N, D, i, j, k, l, d = symbols("N D i j k l d", integer=True)
X = IndexedBase("X", shape=(N, D))
```
## Definition
```python
class r(Function):
@classmethod
def eval(cls, i, j):
if i.is_Number and j.is_Number:
return sqrt(Sum((X[i, d] - X[j, d]) ** 2, (d, 1, D)))
def doit(self, **kwargs):
i, j = self.args
return sqrt(Sum((X[i, d] - X[j, d]) ** 2, (d, 1, D)))
def _eval_derivative(self, x):
return self.doit()._eval_derivative(x)
def J(X):
alpha, beta = symbols(r"\alpha \beta", real=True)
exponent = Sum(alpha * r(i, j) / (1 + beta * r(i, j)), (i, 1, N - 1), (j, i + 1, N))
return exp(exponent), (alpha, beta)
Jastrow, (alpha, beta) = J(X)
Jastrow
```
```python
log_Jastrow = expand_log(log(Jastrow), force=True)
log_Jastrow
```
## Derivative w.r.t. $\beta$
```python
dlogJ_dBeta = diff(log_Jastrow, beta, 1)
dlogJ_dBeta
```
## Derivative w.r.t. $X_{kl}$
```python
dlogJ_dXkl = diff(log_Jastrow, X[k, l], 1)
dlogJ_dXkl
```
```python
# Manually simplify the above:
dlogJ_dXkl = Sum(
alpha
* beta
* (KroneckerDelta(k, i) - KroneckerDelta(k, j))
* (X[i, l] - X[j, l])
/ (beta * r(i, j) + 1)
/ (beta * r(i, j) * (beta * r(i, j) + 1)),
(i, 1, N - 1),
(j, i + 1, N),
)
dlogJ_dXkl
```
## Second derivative w.r.t. $X_{kl}$
```python
d2J_dXkl2 = dlogJ_dXkl ** 2 + diff(dlogJ_dXkl, X[k, l], 1)
d2J_dXkl2.subs(beta, 0)
```
```python
# Manually simplify the above:
d2J_dXkl2 = dlogJ_dXkl ** 2 + Sum(
alpha
* (KroneckerDelta(k, i) - KroneckerDelta(k, j)) ** 2
* (X[i, l] - X[j, l]) ** 2
/ (beta * r(i, j) + 1) ** 3
/ r(i, j) ** 3
* (
(beta * r(i, j) + 1) * r(i, j) ** 2 / (X[i, l] - X[j, l]) ** 2
- 3 * beta * r(i, j)
- 1
),
(i, 1, N - 1),
(j, i + 1, N),
)
d2J_dXkl2
```
```python
```
|
285d05512dd894b6710a103f9ac9aae7cdc0e3b6
| 62,338 |
ipynb
|
Jupyter Notebook
|
scripts/Jastrow-Pade-sympy.ipynb
|
johanere/qflow
|
5453cd5c3230ad7f082adf9ec1aea63ab0a4312a
|
[
"MIT"
] | 5 |
2019-07-24T21:46:24.000Z
|
2021-06-11T18:18:24.000Z
|
scripts/Jastrow-Pade-sympy.ipynb
|
johanere/qflow
|
5453cd5c3230ad7f082adf9ec1aea63ab0a4312a
|
[
"MIT"
] | 22 |
2019-02-19T10:49:26.000Z
|
2019-07-18T09:42:13.000Z
|
scripts/Jastrow-Pade-sympy.ipynb
|
bsamseth/FYS4411
|
72b879e7978364498c48fc855b5df676c205f211
|
[
"MIT"
] | 2 |
2019-04-24T06:44:33.000Z
|
2019-06-12T20:34:38.000Z
| 120.111753 | 10,652 | 0.665148 | true | 917 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.926304 | 0.800692 | 0.741684 |
__label__eng_Latn
| 0.188493 | 0.561512 |
```python
%matplotlib inline
import numpy as np
import pylab as pl
import sympy as sym
from sympy.functions import Abs
#from sympy import Abs, Symbol, S
```
## Goals of today:
- Check how good or bad are the estimates given in the theoretical lecture
- Compare Equispaced with Chebyshev
- Compute errors, plot error tables
- Implement Bernstein interpolation
- Compare Bernstein interpolation with Lagrangian interpolation
## Lagrangian part
The estimate we want to check:
$$
|| f -p || \leq ||f^{n+1}||_\infty \frac{||w(x)||_\infty}{(n+1)!}
$$
in order to do so we need to define, simbolic and numerical functions. [`Sympy`]() is a very useful package to handle symbolic expressions and to export them to numerical functions. A the beginnnig of this notebook it is imported with the command: `import sympy as sym`. Let's define a simple function:
**Hint:** Try to play around, derivatives integrals...
```python
x = np.linspace(-10, 10, 2**10 + 1)
t = sym.symbols('t')
#We can define a function in these two equivalent ways
def runge(x):
return 1./(1. + x**2)
runge = lambda x:1. / (1 + x**2)
#We can now plot it
y = runge(x)
_ = pl.plot(x,y)
def l_infty(y):
return abs(y).max() # takes the abs of y values and then gets the max
l = np.linalg.norm(y, ord=np.inf)
print(l)
yy = np.random.rand(1000000)
print(len(yy))
l_infty = lambda y: np.linalg.norm(y, ord = np.inf)
l_infty(yy)
l_infty_0 = lambda y: abs(y).max()
def l_infty_1(y):
m = -1.0
for i in range(len(y)):
m = max(m,abs(y[i]))
def l_infty_2(y):
m = -1.0
for i in y:
m = max(m,abs(i)
%timeit l_infty(yy)
%timeit l_infty_0(yy) # the numpy solution is the best: it using vectorized operations
%timeit l_infty_1(yy)
%timeit l_infty_2(yy)
```
```python
# Contruct a symbolic function...
t = sym.var('t')
fs = 1.0/(1.0+t**2)
fs.diff(t,1)
# This will no work...
fs(x)
```
To make this function *digestible* by numpy we use the simple command `nf = sym.lambdify(t,f, 'numpy')`. This allows the function nf to be called with numpy arguments.
```python
nf = sym.lambdify(t, fs, "numpy")
nfprime = sym.lambdify(t, fs.diff(t,1), 'numpy')
_=pl.plot(x,nf(x))
_=pl.plot(x,nfprime(x))
```
```python
#def my_derivatives(f, n, t=t):
# return sym.lambdify(t,f.diff(t,n),'numpy')
my_derivatives = lambda f,n: sym.lambdify(t,f.diff(t,n), 'numpy')
fpp = my_derivatives(fs,2)
_=pl.plot(x,nf(x))
_=pl.plot(x,fpp(x))
```
```python
# Check for two functions..
function_set = [fs, sym.sin(2*sym.pi*t)]
for my_f in function_set:
print('**************')
print my_f
for i in range(5):
print l_infty(my_derivatives(my_f,i)(x))
```
**************
1.0/(t**2 + 1.0)
1.0
0.649412355588
2.0
4.66644172318
24.0
**************
sin(2*pi*t)
1.0
6.28318530718
39.4784176044
248.050213442
1558.54545654
We aim at controlling all of the pieces of the inequality above, plot how terms behave with the degree, and see what happens `:)`
Good thing is to start from the beginning and control the term $||f-p||_\infty$. We recall that:
$$
p = \mathcal{L}^n f := \sum_{i=0}^n f(x_i) l^n_i(x),
$$
with
$$
l^n_i(x) := \prod_{j=0, j\neq i}^n \frac{(x-x_j)}{(x_i-x_j)} \qquad
i = 0, \dots, n.
$$
Let's implment this guy. We want to fill the matrix `Ln` with $n+1$ rows and as many colums as the number of points where we evaluate the funtion.
$$
\text{Ln}_{ij}:= l_i(x_j)
$$
so that
$$
\text{Ln}_{ij} f(q_i) = \sum_{i=0}^n l_i(x_j) f(q_i) 0 (\mathcal{L}^nf)(x_j)
$$
A good idea would be to collect the few operations in a function, like this one:
```python
def lagrangian_interpolation_matrix(x,q):
...
return Ln
```
so that we can recall it whenever we need it.
**Hint:** I wouldn't call myself a good programmer, but I do my best to be like that. First construct the code in the main section of your program, run it, check that it works, then collect the precious commmands you wrote in an function.
### Step 0
```python
n = 3
q = np.linspace(-5,5,n+1)
Ln = np.zeros((n+1, len(x)))
for i in range(n+1):
Ln[i] = np.ones_like(x)
for j in range(n+1):
if j!=i:
Ln[i] *= (x - q[j])/(q[i]-q[j])
#Equivalently
#Ln[i] = np.product([(x - q[j])/(q[i]-q[j]) for j in range(n+1) if j is not i], axis = 0)
_ = pl.plot(x, Ln.T)
```
## Step 1
Now we transform this into a function that takes the points where we want to compute the
matrix, and the interpolation points we use to define the basis.
```python
def lagrangian_interpolation_matrix(x,q):
Ln = np.zeros((len(q), len(x)))
for i in range(len(q)):
Ln[i] = np.ones_like(x)
for j in range(len(q)):
if j!=i:
Ln[i] *= (x - q[j])/(q[i]-q[j])
return Ln
def lagrangian_interpolation_matrix_one_liner(x,q):
Ln = np.zeros((len(q), len(x)))
for i in range(len(q)):
Ln[i] = np.product([(x - q[j])/(q[i]-q[j]) for j in range(len(q)) if j is not i], axis = 0)
return Ln
```
```python
Error = lagrangian_interpolation_matrix(x,q) - lagrangian_interpolation_matrix_one_liner(x,q)
print 'Error:', np.linalg.norm(Error)
```
Error: 0.0
From the previous lecture we know that the mathemathical expression:
$$
(\mathcal{L}^nf)(x_i) := \sum_{j=0}^n f(q_j) l^n_j(x_i) = (\text{Ln}^T f)_i
$$
Can be easyly translated into the `numpy` line:
```python
Ln.T.dot(f(x))
```
Let's give it a try:
```python
#f = lambda x: np.sin(2*np.pi*x)
f = lambda x: np.sin(x)
n = 3
q = np.linspace(-5,5,n+1)
Ln = lagrangian_interpolation_matrix(x,q)
_ = pl.plot(x, f(x))
_ = pl.plot(x, Ln.T.dot(f(q)))
_ = pl.plot(q, f(q),'ro')
err = f(x) - Ln.T.dot(f(q))
_ = pl.plot(x, err)
_ = pl.plot(q, f(q),'ro')
Error = l_infty(err)
print 'Error:', Error
```
We need a very simple function to evaluate the norm infinity of a function $||\cdot||_\infty$:
```python
def linfty_norm(fun):
# Your function here
return
```
Now compute the following
$||f-p||_\infty = $ `error`
$||f^{n+1}||_\infty = $ `nth_der`
$w(x) =\prod_{i=0}^n (x-q_i), \quad ||w(x)||_\infty = $ `w`
```python
w = lambda x, q: np.product([ x - q[i] for i in range(len(q))], axis = 0)
q = np.linspace(-5,5,10)
_ = pl.plot(x, w(x,q))
Ln = lagrangian_interpolation_matrix(x,q)
Error = l_infty(f(x) - Ln.T.dot(f(q)))
fs = sym.sin(2*sym.pi*t)
fp = my_derivatives(fs, len(q))
nth_der = l_infty(fp(q))
w_infty = l_infty(w(x,q))
UpperEstimate = nth_der * w_infty/np.math.factorial(len(q))
print UpperEstimate
```
```python
fs = sym.sin(2*sym.pi*t)
points = range(2,15)
UpperEstimate = []
for n in points:
q = np.linspace(-5,5,n)
Ln = lagrangian_interpolation_matrix(x,q)
Error = l_infty(f(x) - Ln.T.dot(f(q)))
fp = my_derivatives(fs, len(q))
nth_der = l_infty(fp(q))
w_infty = l_infty(w(x,q))
UpperEstimate = nth_der * w_infty/np.math.factorial(len(q))
print UpperEstimate
_= pl.semilogy(points, UpperEstimate)
```
If I didn't mess the code this a good spot to play aroud with the function to be checked:
```python
# Test on one f
```
```python
# Make it a python function call
```
```python
```
|
2b20beffcc7b1b008c733555dd96e0eef1daf155
| 131,618 |
ipynb
|
Jupyter Notebook
|
python-lectures/03_error_estimation_template.ipynb
|
denocris/Introduction-to-Numerical-Analysis
|
45b40a7743e11457b644fc6a7de17a0854ece4f0
|
[
"CC-BY-4.0"
] | 8 |
2018-01-16T15:59:48.000Z
|
2022-03-31T09:29:31.000Z
|
python-lectures/03_error_estimation_template.ipynb
|
denocris/Introduction-to-Numerical-Analysis
|
45b40a7743e11457b644fc6a7de17a0854ece4f0
|
[
"CC-BY-4.0"
] | null | null | null |
python-lectures/03_error_estimation_template.ipynb
|
denocris/Introduction-to-Numerical-Analysis
|
45b40a7743e11457b644fc6a7de17a0854ece4f0
|
[
"CC-BY-4.0"
] | 8 |
2018-01-21T16:45:34.000Z
|
2021-06-25T15:56:27.000Z
| 200.331811 | 28,140 | 0.880457 | true | 2,346 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.76908 | 0.90053 | 0.69258 |
__label__eng_Latn
| 0.864343 | 0.447426 |
## Computing partition function of the $2-$D Ising model using tensor network
The partition function of $2-$D ferromagnetic Ising model can be written as
$$Z(\beta)=\sum_\mathbf{s}\prod_{ij}e^{\beta s_is_j}=\mathbf{Tr}\left( \mathcal{A^{(1)}} \times \mathcal{A^{(2)}}\times\cdots\times \mathcal{A^{(L\times L)}}\right).$$ Here the last term denotes tensor contraction of all the tensors, each of which is a tensor given by $$\mathcal{A}^{(i)} = I\times_1\sqrt{\mathbf{B}}\times_2\sqrt{\mathbf{B}}\times_3\sqrt{\mathbf{B}}\times_4\sqrt{\mathbf{B}},$$ where $I$ is a identity tensor with order equals to the degree of node $i$, and $B_{ij}$ defines a Boltzmann matrix with $$
\mathbf{B}=\left[\begin{array}{l}e^{\beta }& e^{-\beta } \\ e^{-\beta}& e^{\beta}\end{array}\right].$$ $\sqrt{B}$ indicates square root of matrix $B$, yileding $\sqrt{B}\times \sqrt{B} = B$.
Given eigen decomposition of the semi-definite symmetric matrix $B$, $B=V\Sigma V^T$, simple calculation gives
$$\sqrt{B}=V\sqrt{\Sigma}V^T=\frac{1}{2}\left[\begin{array}{l}\sqrt{2\cosh(\beta)}+\sqrt{2\sinh(\beta)} &\sqrt{2\cosh(\beta)} -\sqrt{2\sinh(\beta)} \\\sqrt{2\cosh(\beta)} -\sqrt{2\sinh(\beta)}& \sqrt{2\cosh(\beta)}+\sqrt{2\sinh(\beta)}\end{array}\right] $$.
Also notice that when we are considering open boundary conditions, tensors on the boundaries are contracting to $\sqrt{\mathbf{B}}$ only from boundary edges.
```python
import torch,math
import numpy as np
from scipy.linalg import sqrtm
```
## Boundary MPS method for contracting the $2-$ dimensional tensor network
* For convenidence, boundary tensors of the mps is made to a $3$-way tensor with an additional index having bond dimension $1$.
* Indexing of $3$-way and $4$-way tensors are clock-wise for tensors on the top half of the lattice.
```python
def get_lnZ(L,beta=0.44068679350977147,chi=16,mydtype=torch.float64,mydevice=torch.device('cpu')):
assert(L%2==0)
B=torch.tensor(sqrtm(np.array([[np.exp(beta),np.exp(-beta)],[np.exp(-beta),np.exp(beta)]])),dtype=mydtype,device=mydevice) # the Boltzmann matrix
A2=B@B #A2=torch.einsum("ij,ab,jb->ia",[B,B,I2])
A3=torch.einsum("i,j,k->ijk",B[:,0],B[:,0],B[:,0])+torch.einsum("i,j,k->ijk",B[:,1],B[:,1],B[:,1]) #A3=torch.einsum("ij,ab,cd,jbd->iac",[B,B,B,I3])
A4=torch.einsum("i,j,k,l->ijkl",B[:,0],B[:,0],B[:,0],B[:,0])+torch.einsum("i,j,k,l->ijkl",B[:,1],B[:,1],B[:,1],B[:,1]) # A4=torch.einsum("ij,ab,cd,xy,jbdy->iacx",[B,B,B,B,I4])
tensors=[]
tensors.append( [A2[None,:,:] if i==0 else( A2[:,:,None] if i==L-1 else A3) for i in range(L)] )
for j in range(1,L>>1):
tensors.append( [ A3[None,:,:,:] if i==0 else( A3[:,:,None,:] if i==L-1 else A4) for i in range(L) ] )
lnZ=0 # log of partition function
for head in range((L>>1)-1): # mps on the boundary is eating the next mpo, for L/2-1 times
[res,tensors[head+1]] = compress( eat(tensors[head][:],tensors[head+1][:]) , chi)
lnZ += res
return 2*lnZ
```
* Appling an MPO with bound dimension $2$ to an MPS results to a new MPS with a doubled bound dimension.
```python
def eat(mps,mpo):
return [ torch.einsum("ijk,abcj->iabkc",mps[i],mpo[i]).contiguous().view(mps[i].shape[0]*mpo[i].shape[0],2,-1) for i in range(len(mps))]
```
* Left canonicalization using sequential QR decompositions. After that, all tensors except the right most ones are isometry with a column orthogonal unforded matrix
* Then we do sequential singular value decompositions and a cut off on the spectrum to reduce bondimensions.
```python
def compress(mps,chi):
residual=0
for i in range(len(mps)-1): # From left to right, sweep once doing qr decompositions
Q,R=torch.qr(mps[i].contiguous().view(mps[i].shape[0]*2,-1))
mps[i] = Q.contiguous().view(mps[i].shape[0],2,-1)
mps[i+1] = torch.einsum("ij,jab->iab",[R,mps[i+1]])
for i in range(len(mps)-1,0,-1): # From right to left, sweep onece using svd on the tensor merged from two consecutive tensors.
[U,s,V]=torch.svd( torch.einsum("ijk,kab->ijab",mps[i-1],mps[i]).view(mps[i-1].shape[0]*2,mps[i].shape[2]*2) )
mps[i] = V[:,:chi].t().contiguous().view(-1,2,mps[i].shape[2])
mps[i-1] = (U[:,:chi]@torch.diag(s[:chi])).contiguous().view(mps[i-1].shape[0],2,-1)
tnorm=mps[i-1].norm()
mps[i-1] /= tnorm
residual += math.log(tnorm)
return residual,mps
```
```python
import kacward
L=16
beta_c=0.44068679350977147
chi=16
print("L=",L," chi=",chi)
lnZ=get_lnZ(L=L,beta=beta_c,chi=chi);print("lnZ_TN=",lnZ/L**2)
lnZ_exact=kacward.lnZ_2d_ferro_Ising(L,beta_c);print("lnZ_Exact=",lnZ_exact/L**2)
print("|lnZ-lnZ_exact|=%.2g"%(abs(lnZ-lnZ_exact)/L**2))
```
L= 16 chi= 16
lnZ_TN= 0.9085922273659681
lnZ_Exact= 0.9085922273659632
|lnZ-lnZ_exact|=4.9e-15
```python
```
```python
%matplotlib inline
import matplotlib.pyplot as plt
L=16
lnZ_exact=kacward.lnZ_2d_ferro_Ising(L,beta_c);
chis=[2, 3, 4, 5, 6, 7, 8, 9, 10, 12 ,14];
diff=np.abs(np.array([get_lnZ(L=L,beta=beta_c,chi=chi)/L**2 for chi in chis])-lnZ_exact/L**2)
plt.rcParams['figure.figsize']=(15,3.5)
plt.rcParams['font.size']= 20
plt.plot(np.array(chis),diff,'ro-',linewidth=2,markersize=5)
plt.yscale('log')
plt.xlabel('Bond dimension',fontsize=30)
plt.ylabel('Error',fontsize=30)
plt.title('L=16',fontsize=30)
plt.show()
input("Press any key to compute L=32")
L=32
lnZ_exact=kacward.lnZ_2d_ferro_Ising(L,beta_c);
chis=[2, 3, 4, 5, 6, 7, 8, 9, 10, 12 ,14];
diff=np.abs(np.array([get_lnZ(L=L,beta=beta_c,chi=chi)/L**2 for chi in chis])-lnZ_exact/L**2)
plt.rcParams['figure.figsize']=(15,3.5)
plt.plot(np.array(chis),diff,'bx-',linewidth=2,markersize=5)
plt.yscale('log')
plt.xlabel('Bond dimension',fontsize=30)
plt.ylabel('Error',fontsize=30)
plt.title('L=32',fontsize=30);
```
## Onsager solution
What we are going to compare with is the Onsager solution, with $\ln Z$, critical temperature $\beta_c$, and the spontaneous magnetization are given as
\begin{align} \ln Z&=\ln 2 +\frac{1}{8\pi^2}\int_0^{2\pi}d\theta_1\int_0^{2\pi}d\theta_2\ln\left[ \cosh^2(2\beta)-\sinh(2\beta)\cos(\theta_1)-\sinh(2\beta)\cos(\theta_2) \right]\\
\beta_c&=\frac{\ln (1+\sqrt{2})}{2},\\
m_{\beta \gt \beta_c}&=\left[ 1-\sinh^{-4}(2\beta) \right]^{\frac{1}{8}}.
\end{align}
```python
from scipy.integrate import nquad
def m_exact(beta):
if(beta>0.5*math.log(1+math.sqrt(2))):
return math.pow((1- math.pow(math.sinh(2*beta),-4)),1/8 )
else:
return 0
def Onsager(beta):
if(beta>0.5*math.log(1+math.sqrt(2))):
m = math.pow((1- math.pow(math.sinh(2*beta),-4)),1/8 )
else:
m=0
def func_Osg(theta1,theta2):
return math.log( math.cosh(2*beta)*math.cosh(2*beta) - math.sinh(2*beta)*math.cos(theta1)-math.sinh(2*beta)*math.cos(theta2) )
lnZ=nquad(func_Osg,[[0, 2*math.pi],[0, 2*math.pi]])[0]/(8*math.pi*math.pi)+math.log(2)
return m,lnZ
[m,lnZ_Onsager]=Onsager(beta_c)
lnZ_Onsager
```
0.9296953983416107
```python
import time
chi=16
Ls=[4,8,16,32,64,128,170,200]
time_used=[]
plt.rcParams['figure.figsize']=(20,3.5)
plt.rcParams['font.size']= 20
g, ax = plt.subplots(1,2)
lnZ=[]
for L in Ls:
t1=time.time()
res=get_lnZ(L=L,beta=beta_c,chi=chi)/L**2
lnZ.append(res)
t=time.time()-t1
time_used.append(t)
print("L=%d\tLnZ=%.15f\t lnZ_Onsager=%.15f \t time %.2f sec."%(L,res,lnZ_Onsager,t))
Ls=np.array(Ls)
ax[0].plot(Ls,np.array(lnZ),'ro-',linewidth=2,markersize=5)
ax[0].plot(Ls,Ls-Ls+lnZ_Onsager,'k--',linewidth=2,markersize=5)
ax[0].set_xlabel('L',fontsize=30)
ax[0].set_xlim(min(Ls)-0.1,max(Ls)+0.1)
ax[0].set_ylabel('lnZ',fontsize=30)
ax[0].legend(['Tensor Network','Onsager'])
ax[1].plot(np.array(Ls),np.array(time_used),'bx-',linewidth=2,markersize=5)
ax[1].set_xlim(min(Ls)-0.1,max(Ls)+0.1)
ax[1].set_xlabel('L',fontsize=30)
ax[1].set_ylabel('time(sec.)',fontsize=30);
```
# Question: How to obtain $L\to\infty$ results using tensor networks efficiently?
|
63f4ea977463dc6711e0a81812102bf0b2061867
| 97,534 |
ipynb
|
Jupyter Notebook
|
2_tensor_network/tensor_contraction_simple.ipynb
|
Ben1008611/SSSS
|
ae2932da2096216032789144e95e353f8801d4e0
|
[
"MIT"
] | 165 |
2019-03-28T08:46:17.000Z
|
2022-03-20T11:09:52.000Z
|
2_tensor_network/tensor_contraction_simple.ipynb
|
Ben1008611/SSSS
|
ae2932da2096216032789144e95e353f8801d4e0
|
[
"MIT"
] | 2 |
2019-03-31T12:15:55.000Z
|
2019-05-09T09:59:47.000Z
|
2_tensor_network/tensor_contraction_simple.ipynb
|
Ben1008611/SSSS
|
ae2932da2096216032789144e95e353f8801d4e0
|
[
"MIT"
] | 64 |
2019-04-22T14:41:07.000Z
|
2022-03-03T13:25:09.000Z
| 265.038043 | 36,152 | 0.908934 | true | 2,909 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.934395 | 0.803174 | 0.750482 |
__label__eng_Latn
| 0.454905 | 0.581952 |
# Controlling a system
**automated irrigation system**
* input: hydration level of soil
* desired reference: "kinda damp"
* disturbances: rain (do nothing until water evaporates); sun (output water); fauna
```python
# "magic" commands, prefaced with "%", changes settings in the notebook
# this ensures plots are embedded in notebook web page
%matplotlib inline
# pdb = Python debugger, so this command turns the debugger OFF
%pdb off
# numpy = numerical Python, implements arrays (/ matrices)
import numpy as np
# limit number of decimal places printed for floating-point numbers
np.set_printoptions(precision=3)
# scipy = scientific Python, implements operations on arrays / matrices
import scipy as sp
# linalg = linear algebra, implements eigenvalues, matrix inverse, etc
from scipy import linalg as la
# optimize = optimization, root finding, etc
from scipy import optimize as op
# produce matlab-style plots
import matplotlib as mpl
# increase font size on plots
mpl.rc('font',**{'size':18})
# use LaTeX to render symbols
mpl.rc('text',usetex=False)
# animation
from matplotlib import animation as ani
# Matlab-style plotting
import matplotlib.pyplot as plt
# symbolic computation, i.e. computer algebra (like Mathematica, Wolfram Alpha)
import sympy as sym
def Jacobian(g,y,d=1e-4):
"""
approximate derivative via finite-central-differences
input:
g - function - g : R^n -> R^m
y - n array
(optional)
d - scalar - finite differences displacement parameter
output:
Dg(y) - m x n - approximation of Jacobian of g at y
"""
# given $g:\mathbb{R}^n\rightarrow\mathbb{R}^m$:
# $$D_y g(y)e_j \approx \frac{1}{2\delta}(g(y+\delta e_j) - g(y - \delta e_j)),\ \delta\ll 1$$
e = np.identity(len(y))
Dyg = []
for j in range(len(y)):
Dyg.append((.5/d)*(g(y+d*e[j]) - g(y-d*e[j])))
return np.array(Dyg).T
def numerical_simulation(f,t,x,t0=0.,dt=1e-4,ut=None,ux=None,utx=None,return_u=False):
"""
simulate x' = f(x,u)
input:
f : R x X x U --> X - vector field
X - state space (must be vector space)
U - control input set
t - scalar - final simulation time
x - initial condition; element of X
(optional:)
t0 - scalar - initial simulation time
dt - scalar - stepsize parameter
return_u - bool - whether to return u_
(only one of:)
ut : R --> U
ux : X --> U
utx : R x X --> U
output:
t_ - N array - time trajectory
x_ - N x X array - state trajectory
(if return_u:)
u_ - N x U array - state trajectory
"""
t_,x_,u_ = [t0],[x],[]
inputs = sum([1 if u is not None else 0 for u in [ut,ux,utx]])
assert inputs <= 1, "more than one of ut,ux,utx defined"
if inputs == 0:
assert not return_u, "no input supplied"
else:
if ut is not None:
u = lambda t,x : ut(t)
elif ux is not None:
u = lambda t,x : ux(x)
elif utx is not None:
u = lambda t,x : utx(t,x)
while t_[-1]+dt < t:
if inputs == 0:
_t,_x = t_[-1],x_[-1]
dx = f(t_[-1],x_[-1]) * dt
else:
_t,_x,_u = t_[-1],x_[-1],u(t_[-1],x_[-1])
dx = f(_t,_x,_u) * dt
u_.append( _u )
x_.append( _x + dx )
t_.append( _t + dt )
if return_u:
return np.asarray(t_),np.asarray(x_),np.asarray(u_)
else:
return np.asarray(t_),np.asarray(x_)
```
Automatic pdb calling has been turned OFF
## plotting output from transfer function
$G_{yv}$
```python
b = 1
a = 1
k_i = 1
k_p = 1
x0 = np.array([5,0])
A = np.array([[0,1],[-b*k_i,-a-b*k_p]])
B = np.array([[0],[1]])
def f(t,x):
u = 1
xdot = np.squeeze(np.dot(x, A.T) + u * B.T)
return xdot
```
```python
t = 30. # sec; simulation duration
t_,x_ = numerical_simulation(f,t,x0)
y_,z_ = x_.T
v_ = np.dot(np.array([0, b]),x_)
plt.figure(figsize=(8,8))
ax = plt.subplot(3,1,1)
ax.plot(t_,y_)
ylim = ax.get_ylim()
# for _ in [5., 20.]:
# ax.plot(_*np.ones(2),ylim,'k--')
# ax.plot(t_,v_r(t_),'k--')
# ax.set_ylim(ylim)
# ax.set_xticklabels([])
# ax.set_ylabel(r'velocity $y$')
# ax = plt.subplot(3,1,2)
# ax.plot(t_,v(t_))
# ylim = ax.get_ylim()
# for _ in [5., 20.]:
# ax.plot(_*np.ones(2),ylim,'k--')
# ax.set_ylim(ylim)
# ax.set_xticklabels([])
# ax.set_ylabel(r'external force $v$')
# ax = plt.subplot(3,1,3)
# ax.plot(t_,u(t_,y_,z_))
# ylim = ax.get_ylim()
# for _ in [5., 20.]:
# ax.plot(_*np.ones(2),ylim,'k--')
# ax.set_ylim(ylim)
# ax.set_ylabel(r'input $u$')
# ax.set_xlabel(r'time $t$ (sec)')
plt.tight_layout(); # semicolon suppresses text printout
```
```python
v_ = np.dot(x_,np.array([0, b]))
plt.figure(figsize=(8,8))
ax = plt.subplot(3,1,1)
ax.plot(t_,v_)
ax.set_ylabel('y(t)')
```
```python
```
|
ce157e39e130844fa982bef2df9d33bfbaf0d7d6
| 26,791 |
ipynb
|
Jupyter Notebook
|
tutorial3.ipynb
|
my-13/447
|
aa3da2440b42ac9f28b5b7d2a2da0ef43404322f
|
[
"CC0-1.0"
] | null | null | null |
tutorial3.ipynb
|
my-13/447
|
aa3da2440b42ac9f28b5b7d2a2da0ef43404322f
|
[
"CC0-1.0"
] | null | null | null |
tutorial3.ipynb
|
my-13/447
|
aa3da2440b42ac9f28b5b7d2a2da0ef43404322f
|
[
"CC0-1.0"
] | null | null | null | 26,791 | 26,791 | 0.866037 | true | 1,510 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.845942 | 0.803174 | 0.679439 |
__label__eng_Latn
| 0.673378 | 0.416895 |
# Linear Algebra using SymPy
## Introduction
This notebook is a short tutorial of Linear Algebra calculation using SymPy. For further information refer to SymPy official [tutorial](http://docs.sympy.org/latest/tutorial/index.html).
You can also check the [SymPy in 10 minutes](./SymPy_in_10_minutes.ipynb) tutorial.
```python
from sympy import *
init_session()
```
IPython console for SymPy 1.0 (Python 2.7.13-64-bit) (ground types: python)
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
>>> init_printing()
Documentation can be found at http://docs.sympy.org/1.0/
A matrix $A \in \mathbb{R}^{m\times n}$ is a rectangular array of real number with $m$ rows and $n$ columns. To specify a matrix $A$, we specify the values for its components as a list of lists:
```python
A = Matrix([
[3, 2, -1, 1],
[2, -2, 4, -2],
[-1, S(1)/2, -1, 0]])
display(A)
```
$$\left[\begin{matrix}3 & 2 & -1 & 1\\2 & -2 & 4 & -2\\-1 & \frac{1}{2} & -1 & 0\end{matrix}\right]$$
We can access the matrix elements using square brackets, we can also use it for submatrices
```python
A[0, 1] # row 0, column 1
```
```python
A[0:2, 0:3] # top-left 2x3 submatrix
```
$$\left[\begin{matrix}3 & 2 & -1\\2 & -2 & 4\end{matrix}\right]$$
We can also create some common matrices. Let us create an identity matrix
```python
eye(2)
```
$$\left[\begin{matrix}1 & 0\\0 & 1\end{matrix}\right]$$
```python
zeros(2, 3)
```
$$\left[\begin{matrix}0 & 0 & 0\\0 & 0 & 0\end{matrix}\right]$$
We can use algebraic operations like addition $+$, substraction $-$, multiplication $*$, and exponentiation $**$ with ``Matrix`` objects.
```python
B = Matrix([
[2, -3, -8],
[-2, -1, 2],
[1, 0, -3]])
C = Matrix([
[sin(x), exp(x**2), 1],
[0, cos(x), 1/x],
[1, 0, 2]])
```
```python
B + C
```
$$\left[\begin{matrix}\sin{\left (x \right )} + 2 & e^{x^{2}} - 3 & -7\\-2 & \cos{\left (x \right )} - 1 & 2 + \frac{1}{x}\\2 & 0 & -1\end{matrix}\right]$$
```python
B ** 2
```
$$\left[\begin{matrix}2 & -3 & 2\\0 & 7 & 8\\-1 & -3 & 1\end{matrix}\right]$$
```python
C ** 2
```
$$\left[\begin{matrix}\sin^{2}{\left (x \right )} + 1 & e^{x^{2}} \sin{\left (x \right )} + e^{x^{2}} \cos{\left (x \right )} & \sin{\left (x \right )} + 2 + \frac{e^{x^{2}}}{x}\\\frac{1}{x} & \cos^{2}{\left (x \right )} & \frac{1}{x} \cos{\left (x \right )} + \frac{2}{x}\\\sin{\left (x \right )} + 2 & e^{x^{2}} & 5\end{matrix}\right]$$
```python
tan(x) * B ** 5
```
$$\left[\begin{matrix}52 \tan{\left (x \right )} & 27 \tan{\left (x \right )} & - 28 \tan{\left (x \right )}\\- 2 \tan{\left (x \right )} & - \tan{\left (x \right )} & - 78 \tan{\left (x \right )}\\11 \tan{\left (x \right )} & 30 \tan{\left (x \right )} & 57 \tan{\left (x \right )}\end{matrix}\right]$$
And the ``transpose`` of the matrix, that flips the matrix through its main diagonal:
```python
A.transpose() # the same as A.T
```
$$\left[\begin{matrix}3 & 2 & -1\\2 & -2 & \frac{1}{2}\\-1 & 4 & -1\\1 & -2 & 0\end{matrix}\right]$$
## Row operations
```python
M = eye(4)
```
```python
M[1, :] = M[1, :] + 5*M[0, :]
```
```python
M
```
$$\left[\begin{matrix}1 & 0 & 0 & 0\\5 & 1 & 0 & 0\\0 & 0 & 1 & 0\\0 & 0 & 0 & 1\end{matrix}\right]$$
The notation ``M[1, :]`` refers to entire rows of the matrix. The first argument specifies the 0-based row index, for example the first row of ``M`` is ``M[0, :]``. The code example above implements the row operation $R_2 \leftarrow R_2 + 5R_1$. To scale a row by a constant $c$, use the ``M[1, :] = c*M[1, :]``. To swap rows $1$ and $j$, we can use the Python tuple-assignment syntax ``M[1, :], M[j, :] = M[j, :], M[1, :]``.
## Reduced row echelon form
The Gauss-Jordan elimination procedure is a sequence of row operations that can be performed on any matrix to bring it to its _reduced row echelon form_ (RREF). In Sympy, matrices have a ``rref`` method that compute it:
```python
A.rref()
```
$$\left ( \left[\begin{matrix}1 & 0 & 0 & 1\\0 & 1 & 0 & -2\\0 & 0 & 1 & -2\end{matrix}\right], \quad \left [ 0, \quad 1, \quad 2\right ]\right )$$
It return a tuple, the first value is the RREF of the matrix $A$, and the second tells the location of the leading ones (pivots). If we just want the RREF, we can just get the first entry of the matrix, i.e.
```python
A.rref()[0]
```
$$\left[\begin{matrix}1 & 0 & 0 & 1\\0 & 1 & 0 & -2\\0 & 0 & 1 & -2\end{matrix}\right]$$
## Matrix fundamental spaces
Consider the matrix $A \in \mathbb{R}^{m\times n}$. The fundamental spaces of a matrix are its column space $\mathcal{C}(A)$, its null space $\mathcal{N}(A)$, and its row space $\mathcal{R}(A)$. These vector spaces are importan when we consider the matrix product $A\mathbf{x} = \mathbf{y}$ as a linear transformation $T_A:\mathbb{R}^n\rightarrow \mathbb{R}^n$ of the input vector $\mathbf{x}\in\mathbb{R}^n$ to produce an output vector $\mathbf{y} \in \mathbb{R}^m$.
**Linear transformations** $T_A: \mathbb{R}^n \rightarrow \mathbb{R}^m$ can be represented as $m\times n$ matrices. The fundamental spaces of a matrix $A$ gives us information about the domain and image of the linear transformation $T_A$. The column space $\mathcal{C}(A)$ is the same as the image space $\mathrm{Im}(T_A)$ (the set of all possible outputs). The null space $\mathcal{N}(A)$ is also called kernel $\mathrm{Ker}(T_A)$, and is the set of all input vectors that are mapped to the zero vector. The row space $\mathcal{R}(A)$ is the orthogonal complement of the null space, i.e., the vectors that are mapped to vectors different from zero. Input vectors in the row space of $A$ are in a one-to-one correspondence with the output vectors in the column space of $A$.
Let us see how to compute these spaces, or a base for them!
The non-zero rows in the reduced row echelon form $A$ are a basis for its row space, i.e.
```python
[A.rref()[0][row, :] for row in A.rref()[1]]
```
$$\left [ \left[\begin{matrix}1 & 0 & 0 & 1\end{matrix}\right], \quad \left[\begin{matrix}0 & 1 & 0 & -2\end{matrix}\right], \quad \left[\begin{matrix}0 & 0 & 1 & -2\end{matrix}\right]\right ]$$
The column space of $A$ is the span of the columns of $A$ that contain the pivots.
```python
[A[:, col] for col in A.rref()[1]]
```
$$\left [ \left[\begin{matrix}3\\2\\-1\end{matrix}\right], \quad \left[\begin{matrix}2\\-2\\\frac{1}{2}\end{matrix}\right], \quad \left[\begin{matrix}-1\\4\\-1\end{matrix}\right]\right ]$$
We can also use the ``columnspace`` method
```python
A.columnspace()
```
$$\left [ \left[\begin{matrix}3\\2\\-1\end{matrix}\right], \quad \left[\begin{matrix}2\\-2\\\frac{1}{2}\end{matrix}\right], \quad \left[\begin{matrix}-1\\4\\-1\end{matrix}\right]\right ]$$
Note that we took columns from the original matrix and not from its RREF.
To find (a base for) the null space of $A$ we use the ``nullspace`` method:
```python
A.nullspace()
```
$$\left [ \left[\begin{matrix}-1\\2\\2\\1\end{matrix}\right]\right ]$$
## Determinants
The determinant of a matrix, denoted by $\det(A)$ or $|A|$, isis a useful value that can be computed from the elements of a square matrix. It can be viewed as the scaling factor of the transformation described by the matrix.
```python
M = Matrix([
[1, 2, 2],
[4, 5, 6],
[7, 8, 9]])
```
```python
M.det()
```
## Matrix inverse
For invertible matrices (those with $\det(A)\neq 0$), there is an inverse matrix $A^{-1}$ that have the _inverse_ effect (if we are thinking about linear transformations).
```python
A = Matrix([
[1, -1, -1],
[0, 1, 0],
[1, -2, 1]])
```
```python
A.inv()
```
$$\left[\begin{matrix}\frac{1}{2} & \frac{3}{2} & \frac{1}{2}\\0 & 1 & 0\\- \frac{1}{2} & \frac{1}{2} & \frac{1}{2}\end{matrix}\right]$$
```python
A.inv() * A
```
$$\left[\begin{matrix}1 & 0 & 0\\0 & 1 & 0\\0 & 0 & 1\end{matrix}\right]$$
```python
A * A.inv()
```
$$\left[\begin{matrix}1 & 0 & 0\\0 & 1 & 0\\0 & 0 & 1\end{matrix}\right]$$
## Eigenvectors and Eigenvalues
To find the eigenvalues of a matrix, use ``eigenvals``. ``eigenvals`` returns a dictionary of ``eigenvalue:algebraic multiplicity``.
```python
M = Matrix([
[3, -2, 4, -2],
[5, 3, -3, -2],
[5, -2, 2, -2],
[5, -2, -3, 3]])
M
```
$$\left[\begin{matrix}3 & -2 & 4 & -2\\5 & 3 & -3 & -2\\5 & -2 & 2 & -2\\5 & -2 & -3 & 3\end{matrix}\right]$$
```python
M.eigenvals()
```
This means that ``M`` has eigenvalues -2, 3, and 5, and that the eigenvalues -2 and 3 have algebraic multiplicity 1 and that the eigenvalue 5 has algebraic multiplicity 2.
To find the eigenvectors of a matrix, use ``eigenvects``. ``eigenvects`` returns a list of tuples of the form ``(eigenvalue:algebraic multiplicity, [eigenvectors])``.
```python
M.eigenvects()
```
$$\left [ \left ( -2, \quad 1, \quad \left [ \left[\begin{matrix}0\\1\\1\\1\end{matrix}\right]\right ]\right ), \quad \left ( 3, \quad 1, \quad \left [ \left[\begin{matrix}1\\1\\1\\1\end{matrix}\right]\right ]\right ), \quad \left ( 5, \quad 2, \quad \left [ \left[\begin{matrix}1\\1\\1\\0\end{matrix}\right], \quad \left[\begin{matrix}0\\-1\\0\\1\end{matrix}\right]\right ]\right )\right ]$$
This shows us that, for example, the eigenvalue 5 also has geometric multiplicity 2, because it has two eigenvectors. Because the algebraic and geometric multiplicities are the same for all the eigenvalues, ``M`` is diagonalizable.
To diagonalize a matrix, use diagonalize. diagonalize returns a tuple $(P,D)$, where $D$ is diagonal and $M=PDP^{−1}$.
```python
P, D = M.diagonalize()
```
```python
P
```
$$\left[\begin{matrix}0 & 1 & 1 & 0\\1 & 1 & 1 & -1\\1 & 1 & 1 & 0\\1 & 1 & 0 & 1\end{matrix}\right]$$
```python
D
```
$$\left[\begin{matrix}-2 & 0 & 0 & 0\\0 & 3 & 0 & 0\\0 & 0 & 5 & 0\\0 & 0 & 0 & 5\end{matrix}\right]$$
```python
P * D * P.inv()
```
$$\left[\begin{matrix}3 & -2 & 4 & -2\\5 & 3 & -3 & -2\\5 & -2 & 2 & -2\\5 & -2 & -3 & 3\end{matrix}\right]$$
```python
P * D * P.inv() == M
```
True
Note that since ``eigenvects`` also includes the ``eigenvalues``, you should use it instead of ``eigenvals`` if you also want the ``eigenvectors``. However, as computing the eigenvectors may often be costly, ``eigenvals`` should be preferred if you only wish to find the eigenvalues.
If all you want is the characteristic polynomial, use ``charpoly``. This is more efficient than ``eigenvals``, because sometimes symbolic roots can be expensive to calculate.
```python
lamda = symbols('lamda')
p = M.charpoly(lamda)
factor(p)
```
**Note:** ``lambda`` is a reserved keyword in Python, so to create a Symbol called λ, while using the same names for SymPy Symbols and Python variables, use ``lamda`` (without the b). It will still pretty print as λ.
Non-square matrices don’t have eigenvectors and therefore don’t
have an eigendecomposition. Instead, we can use the singular value
decomposition to break up a non-square matrix A into left singular
vectors, right singular vectors, and a diagonal matrix of singular
values. Use the singular_values method on any matrix to find its
singular values.
```python
A
```
$$\left[\begin{matrix}1 & -1 & -1\\0 & 1 & 0\\1 & -2 & 1\end{matrix}\right]$$
```python
A.singular_values()
```
## References
1. SymPy Development Team (2016). [Sympy Tutorial: Matrices](http://docs.sympy.org/latest/tutorial/matrices.html)
2. Ivan Savov (2016). [Taming math and physics using SymPy](https://minireference.com/static/tutorials/sympy_tutorial.pdf)
The following cell change the style of the notebook.
```python
from IPython.core.display import HTML
def css_styling():
styles = open('./styles/custom_barba.css', 'r').read()
return HTML(styles)
css_styling()
```
<link href='http://fonts.googleapis.com/css?family=Fenix' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Alegreya+Sans:100,300,400,500,700,800,900,100italic,300italic,400italic,500italic,700italic,800italic,900italic' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Source+Code+Pro:300,400' rel='stylesheet' type='text/css'>
<style>
/* Based on Lorena Barba template available at: https://github.com/barbagroup/AeroPython/blob/master/styles/custom.css*/
@font-face {
font-family: "Computer Modern";
src: url('http://mirrors.ctan.org/fonts/cm-unicode/fonts/otf/cmunss.otf');
}
div.cell{
width:800px;
margin-left:16% !important;
margin-right:auto;
}
h1 {
font-family: 'Alegreya Sans', sans-serif;
}
h2 {
font-family: 'Fenix', serif;
}
h3{
font-family: 'Fenix', serif;
margin-top:12px;
margin-bottom: 3px;
}
h4{
font-family: 'Fenix', serif;
}
h5 {
font-family: 'Alegreya Sans', sans-serif;
}
div.text_cell_render{
font-family: 'Alegreya Sans',Computer Modern, "Helvetica Neue", Arial, Helvetica, Geneva, sans-serif;
line-height: 135%;
font-size: 120%;
width:600px;
margin-left:auto;
margin-right:auto;
}
.CodeMirror{
font-family: "Source Code Pro";
font-size: 90%;
}
/* .prompt{
display: None;
}*/
.text_cell_render h1 {
font-weight: 200;
font-size: 50pt;
line-height: 100%;
color:#CD2305;
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
}
.text_cell_render h5 {
font-weight: 300;
font-size: 16pt;
color: #CD2305;
font-style: italic;
margin-bottom: .5em;
margin-top: 0.5em;
display: block;
}
.warning{
color: rgb( 240, 20, 20 )
}
</style>
```python
```
|
503dab8963b7b367a390b5dbaecd26d88378b788
| 41,577 |
ipynb
|
Jupyter Notebook
|
notebooks/sympy/linear_algebra.ipynb
|
nicoguaro/AdvancedMath
|
2749068de442f67b89d3f57827367193ce61a09c
|
[
"MIT"
] | 26 |
2017-06-29T17:45:20.000Z
|
2022-02-06T20:14:29.000Z
|
notebooks/sympy/linear_algebra.ipynb
|
nicoguaro/AdvancedMath
|
2749068de442f67b89d3f57827367193ce61a09c
|
[
"MIT"
] | null | null | null |
notebooks/sympy/linear_algebra.ipynb
|
nicoguaro/AdvancedMath
|
2749068de442f67b89d3f57827367193ce61a09c
|
[
"MIT"
] | 13 |
2019-04-22T08:08:56.000Z
|
2022-01-27T08:15:53.000Z
| 31.593465 | 2,346 | 0.516247 | true | 4,632 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.941654 | 0.90053 | 0.847988 |
__label__eng_Latn
| 0.888082 | 0.808493 |
## Quantum circuit for an exponential of pauli strings
For SUSY QM, the Hamiltonian, $H$ can be qubitized, which results in the Hamiltonian being written as a sum of terms, with each term containing a produce of pauli matrices acting on the qubits. Given some initial state, we can apply the time evolution operator,
\begin{equation}
e^{iHt}.
\end{equation}
To realize this on a quantum computer, we use the Suzuki-Trotter formula
\begin{equation}
e^{i\sum_j H_j t}=\prod_j e^{i H_j \delta t} + \mathcal{O}()
\end{equation}
Since qubitizing the Hamiltonian results in an expression for $H$ in terms of pauli operators, we need to be able to write down the quantum circuit for an exponential of pauli matrices. This is accomplished with the so-called "ladder" circuit, which we now detail.
First we go through some example cases showing that the exponential of the Hamiltonian is a quantum circuit.
```python
import numpy as np #we will use numpy's kron function for tensor products, and its matmul for matrix multiplication.
#definition of the identity, pauli X and Z matrices, and the two-qubit CNOT matrix.
ID=np.array([[1,0],[0,1]])
X=np.array([[0,1],[1,0]])
Z=np.array([[1,0],[0,-1]])
CNOT=np.array([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])
```
```python
#A quick check that we are doing kronecker products correctly
#The CNOT gate is the identity, if the control qubit is zero, and a NOT(X) gate otherwise
assert(CNOT.all() == (np.kron([[1,0],[0,0]],ID) + np.kron([[0,0],[0,1]],X)).all())
```
```python
#To avoid using an algebraic library, like sci-py, I pick specific values for cos(t) and i sin(t)...
#cos(t) = A
#i sin(t) = B
A=0.2
B=0.3
RZ=A*ID + B*Z #a rotation around the z-axis with given values for t (that don't make sense)
```
Now we can check that the circuit for
\begin{equation}
e^{-i(Z \otimes Z)t} = \text{CNOT}\times(\mathcal{1}\otimes R_z)\times \text{CNOT}
\end{equation}
```python
LHS=A*np.kron(ID,ID) + B*np.kron(Z,Z)
RHS=np.matmul(CNOT,np.matmul(np.kron(ID,RZ),CNOT))
#print(LHS)
#print(RHS)
assert(LHS.all()==RHS.all())
```
```python
#We now repeat this for a pauli Z applied to 3 qubits.
LHS = A*np.kron(ID,np.kron(ID,ID)) + B*np.kron(Z,np.kron(Z,Z))
CNOT1=np.kron(CNOT,ID)
CNOT2=np.kron(ID,CNOT)
RZ3=np.kron(ID,np.kron(ID,RZ))
RHS=np.matmul(CNOT1,np.matmul(CNOT2,np.matmul(RZ3,np.matmul(CNOT2,CNOT1))))
assert(LHS.all()==RHS.all())
```
QISKIT already contains a method for implementing Trotterization to a exponential written as a sum of pauli matrices.
```python
from qiskit.aqua.operators import I,X,Y,Z, PauliTrotterEvolution
from qiskit import QuantumCircuit, transpile
```
```python
operator = ((Z^Z).exp_i())
trotter_op = PauliTrotterEvolution(trotter_mode='suzuki').convert(operator)
print(operator)
print(trotter_op)
qc = QuantumCircuit(2,2)
qc.append(trotter_op, [0,1])
transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id']).draw('mpl')
```
```python
transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id'],optimization_level=3).draw('mpl')
```
```python
operator = ((X^Z).exp_i())
trotter_op = PauliTrotterEvolution(trotter_mode='suzuki').convert(operator)
print(operator)
print(trotter_op)
qc = QuantumCircuit(2,2)
qc.append(trotter_op, [0,1])
transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id']).draw('mpl')
```
```python
operator = ((X^Y^Z).exp_i())
trotter_op = PauliTrotterEvolution(trotter_mode='suzuki').convert(operator)
print(operator)
print(trotter_op)
qc = QuantumCircuit(3,3)
qc.append(trotter_op, [0,1,2])
transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id']).draw('mpl')
```
|
81a404ffcd1719bd4ea01cecc643e5f1576ba02a
| 39,178 |
ipynb
|
Jupyter Notebook
|
tutorials/LadderCircuits.ipynb
|
daschaich/SUSY_QuantumComputing
|
fdf2b50c2e80a1bd5d1ebdf36629dfdd0aaf69aa
|
[
"MIT"
] | null | null | null |
tutorials/LadderCircuits.ipynb
|
daschaich/SUSY_QuantumComputing
|
fdf2b50c2e80a1bd5d1ebdf36629dfdd0aaf69aa
|
[
"MIT"
] | null | null | null |
tutorials/LadderCircuits.ipynb
|
daschaich/SUSY_QuantumComputing
|
fdf2b50c2e80a1bd5d1ebdf36629dfdd0aaf69aa
|
[
"MIT"
] | null | null | null | 135.564014 | 11,992 | 0.859462 | true | 1,215 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.944995 | 0.857768 | 0.810586 |
__label__eng_Latn
| 0.736635 | 0.721596 |
```python
from sympy import pi, cos, sin, symbols
from sympy.utilities.lambdify import implemented_function
import pytest
from sympde.calculus import grad, dot
from sympde.calculus import laplace
from sympde.topology import ScalarFunctionSpace
from sympde.topology import element_of
from sympde.topology import NormalVector
from sympde.topology import Square
from sympde.topology import Union
from sympde.expr import BilinearForm, LinearForm, integral
from sympde.expr import Norm
from sympde.expr import find, EssentialBC
from sympde.expr.expr import linearize
from psydac.fem.basic import FemField
from psydac.api.discretization import discretize
x,y,z = symbols('x1, x2, x3')
```
# Non-Linear Poisson in 2D
In this section, we consider the non-linear Poisson problem:
$$
-\nabla \cdot \left( (1+u^2) \nabla u \right) = f, \Omega
\\
u = 0, \partial \Omega
$$
where $\Omega$ denotes the unit square.
For testing, we shall take a function $u$ that fulfills the boundary condition, the compute $f$ as
$$
f(x,y) = -\nabla^2 u - F(u)
$$
The weak formulation is
$$
\int_{\Omega} (1+u^2) \nabla u \cdot \nabla v ~ d\Omega = \int_{\Omega} f v ~d\Omega, \quad \forall v \in \mathcal{V}
$$
For the sack of generality, we shall consider the linear form
$$
G(v;u,w) := \int_{\Omega} (1+w^2) \nabla u \cdot \nabla v ~ d\Omega, \quad \forall u,v,w \in \mathcal{V}
$$
Our problem is then
$$
\mbox{Find } u \in \mathcal{V}, \mbox{such that}\\
G(v;u,u) = l(v), \quad \forall v \in \mathcal{V}
$$
where
$$
l(v) := \int_{\Omega} f v ~d\Omega, \quad \forall v \in \mathcal{V}
$$
#### Topological domain
```python
domain = Square()
B_dirichlet_0 = domain.boundary
```
#### Function Space
```python
V = ScalarFunctionSpace('V', domain)
```
#### Defining the Linear form $G$
```python
u = element_of(V, name='u')
v = element_of(V, name='v')
w = element_of(V, name='w')
# Linear form g: V --> R
g = LinearForm(v, integral(domain, (1+w**2)*dot(grad(u), grad(v))))
```
#### Defining the Linear form L
```python
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*(sin(pi*x)**2*sin(pi*y)**2 + 1)*sin(pi*x)*sin(pi*y) - 2*pi**2*sin(pi*x)**3*sin(pi*y)*cos(pi*y)**2 - 2*pi**2*sin(pi*x)*sin(pi*y)**3*cos(pi*x)**2
```
```python
# Linear form l: V --> R
l = LinearForm(v, integral(domain, f * v))
```
### Picard Method
$$
\mbox{Find } u_{n+1} \in \mathcal{V}_h, \mbox{such that}\\
G(v;u_{n+1},u_n) = l(v), \quad \forall v \in \mathcal{V}_h
$$
### Newton Method
Let's define
$$
F(v;u) := G(v;u,u) -l(v), \quad \forall v \in \mathcal{V}
$$
Newton method writes
$$
\mbox{Find } u_{n+1} \in \mathcal{V}_h, \mbox{such that}\\
F^{\prime}(\delta u,v; u_n) = - F(v;u_n), \quad \forall v \in \mathcal{V} \\
u_{n+1} := u_{n} + \delta u, \quad \delta u \in \mathcal{V}
$$
#### Computing $F^{\prime}$ the derivative of $F$
**SymPDE** allows you to linearize a linear form and get a bilinear form, using the function **linearize**
```python
F = LinearForm(v, g(v,w=u)-l(v))
du = element_of(V, name='du')
Fprime = linearize(F, u, trials=du)
```
## Picard Method
#### Abstract Model
```python
un = element_of(V, name='un')
# Bilinear form a: V x V --> R
a = BilinearForm((u, v), g(v, u=u,w=un))
# Dirichlet boundary conditions
bc = [EssentialBC(u, 0, B_dirichlet_0)]
# Variational problem
equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc)
# Error norms
error = u - solution
l2norm = Norm(error, domain, kind='l2')
```
#### Discretization
```python
# Create computational domain from topological domain
domain_h = discretize(domain, ncells=[16,16], comm=None)
# Discrete spaces
Vh = discretize(V, domain_h, degree=[2,2])
# Discretize equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# Discretize error norms
l2norm_h = discretize(l2norm, domain_h, Vh)
```
#### Picard solver
```python
def picard(niter=10):
Un = FemField( Vh, Vh.vector_space.zeros() )
for i in range(niter):
Un = equation_h.solve(un=Un)
# Compute error norms
l2_error = l2norm_h.assemble(u=Un)
print('l2_error = ', l2_error)
return Un
```
```python
Un = picard(niter=5)
```
l2_error = 0.1041623200353605
l2_error = 0.019794500321162495
l2_error = 0.0032729508639899856
l2_error = 0.00043360362547357383
l2_error = 5.42731202704659e-05
```python
from matplotlib import pyplot as plt
from utilities.plot import plot_field_2d
nbasis = [w.nbasis for w in Vh.spaces]
p1,p2 = Vh.degree
x = Un.coeffs._data[p1:-p1,p2:-p2]
u = x.reshape(nbasis)
plot_field_2d(Vh.knots, Vh.degree, u) ; plt.colorbar()
```
## Newton Method
#### Abstract Model
```python
# Dirichlet boundary conditions
bc = [EssentialBC(du, 0, B_dirichlet_0)]
# Variational problem
equation = find(du, forall=v, lhs=Fprime(du, v,u=un), rhs=-F(v,u=un), bc=bc)
```
#### Discretization
```python
# Create computational domain from topological domain
domain_h = discretize(domain, ncells=[16,16], comm=None)
# Discrete spaces
Vh = discretize(V, domain_h, degree=[2,2])
# Discretize equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# Discretize error norms
l2norm_h = discretize(l2norm, domain_h, Vh)
```
#### Newton Solver
```python
def newton(niter=10):
Un = FemField( Vh, Vh.vector_space.zeros() )
for i in range(niter):
delta_x = equation_h.solve(un=Un)
Un = FemField( Vh, delta_x.coeffs + Un.coeffs )
# Compute error norms
l2_error = l2norm_h.assemble(u=Un)
print('l2_error = ', l2_error)
return Un
```
```python
un = newton(niter=5)
```
l2_error = 0.1041623200353605
l2_error = 0.011366075929785831
l2_error = 0.00019827168123576672
l2_error = 2.6121623867937704e-05
l2_error = 2.612317723593809e-05
```python
nbasis = [w.nbasis for w in Vh.spaces]
p1,p2 = Vh.degree
x = un.coeffs._data[p1:-p1,p2:-p2]
u = x.reshape(nbasis)
plot_field_2d(Vh.knots, Vh.degree, u) ; plt.colorbar()
```
```python
```
```python
```
|
4a662005b40e142c7746bb89a9886c3fba699548
| 34,575 |
ipynb
|
Jupyter Notebook
|
lessons/Chapter3/01_nonlinear_poisson_2d.ipynb
|
pyccel/IGA-Python
|
e3604ba3d76a20e3d30ed3c7c952dcd2dc8147bb
|
[
"MIT"
] | 2 |
2022-01-21T08:51:30.000Z
|
2022-03-17T12:14:02.000Z
|
lessons/Chapter3/01_nonlinear_poisson_2d.ipynb
|
pyccel/IGA-Python
|
e3604ba3d76a20e3d30ed3c7c952dcd2dc8147bb
|
[
"MIT"
] | null | null | null |
lessons/Chapter3/01_nonlinear_poisson_2d.ipynb
|
pyccel/IGA-Python
|
e3604ba3d76a20e3d30ed3c7c952dcd2dc8147bb
|
[
"MIT"
] | 1 |
2022-03-01T06:41:54.000Z
|
2022-03-01T06:41:54.000Z
| 62.297297 | 10,964 | 0.799103 | true | 2,080 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.890294 | 0.672332 | 0.598573 |
__label__eng_Latn
| 0.515405 | 0.229016 |
# Resolución de sistemas de ecuaciones lineales
Juan Pablo Echeagaray González
Data Science Club Academy
27 de septiembre del 2021
## Librerías básicas
Siempre vale la pena tener estas 2 librerías a la mano, numpy suele ser mucho más eficaz que las funciones nativas de python en cuanto a operaciones matemáticas se refiere, y matplotlib es básicamente el estándar para realizar visualizaciones en este lenguaje.
```python
import numpy as np
import sympy as sm
```
Para más información, consulta el cuaderno de OneNote de la clase.
[Presiona aquí](https://tecmx-my.sharepoint.com/:o:/g/personal/a00830646_itesm_mx/EjsM5FC-YCxHnkM61ZZqfdwBub6PRS6cHAuM-EmeLETCEg?e=lLrH5s) para ver el cuaderno de OneNote.
## Ejemplos
### Ejemplo 1
```python
A = np.array([[1, 1, 1], [1, -1, 2], [2, 0, 3]])
b = np.array([[3], [2], [1]])
np.linalg.solve(A, b)
```
```python
augmented = np.concatenate((A, b), axis=1)
augmented = sm.Matrix(augmented)
augmented.rref()[0]
```
$\displaystyle \left[\begin{matrix}1 & 0 & \frac{3}{2} & 0\\0 & 1 & - \frac{1}{2} & 0\\0 & 0 & 0 & 1\end{matrix}\right]$
No te asustes por el error de la sección pasada, numpy ha funcionado como debería, y nos ha avisado que le fue imposible calcular el inverso de la matriz A, así que la función ```np.linalg.solve(A,b)``` nos dio un error.
El siguiente paso que podemos tomar es calcular su forma **RREF**, *row reduced echelon form*, y de ahí hacer el análisis pertinente. En este caso se tiene una incosistencia, ya que no es posible que al multiplicar un conjunto de variables por 0 y sumarlas se obtenga un 1.
### Ejemplo 2
```python
A = np.array([[1, 1, 1], [1, -1, 2], [0, 1, 1]])
b = np.array([[3], [2], [2]])
x = np.linalg.solve(A, b)
x
```
array([[1.],
[1.],
[1.]])
```python
augmented = np.concatenate((A, b), axis=1)
augmented = sm.Matrix(augmented)
augmented.rref()[0]
```
$\displaystyle \left[\begin{matrix}1 & 0 & 0 & 1\\0 & 1 & 0 & 1\\0 & 0 & 1 & 1\end{matrix}\right]$
Aquí tenemos un ejemplo bonito, la función ```np.linalg.solve(A,b)``` pudo encontrar un vector que satisfaciera la ecuación matricial del sistema. No es necesario que hagamos otra operación, pero de cualquier manera quisiera que veas cómo se ve la matriz en su forma RREF.
Puedes notar que dentro de esa matriz se encuentra la mismísima matriz identidad I<sub>3</sub> , y a su derecha se encuentra el vector solución x.
### Ejemplo 3
```python
A = np.array([[1, 1, 1], [1, -1, 2], [2, 0, 3]])
b = np.array([[3], [2], [5]])
x = np.linalg.solve(A, b)
x
```
```python
augmented = np.concatenate((A, b), axis=1)
augmented = sm.Matrix(augmented)
augmented.rref()[0]
```
$\displaystyle \left[\begin{matrix}1 & 0 & \frac{3}{2} & \frac{5}{2}\\0 & 1 & - \frac{1}{2} & \frac{1}{2}\\0 & 0 & 0 & 0\end{matrix}\right]$
Este es por mucho el ejemplo más interesante de los 3. La función ```np.linalg.solve(A,b)``` no pudo encontrar un vector que satisfaciera la ecuación matricial del sistema, y la forma **RREF** de la matriz tiene una fila entera de ceros. Esto no significa que la matriz no tenga soluciones; no, al contrario, tiene una cantidad infinita de ellas!
Ve el cuaderno de la clase para que identifiques la forma que tiene la solución del sistema.
|
18684edb8feb7ea0e63f8c08f6684f5d6be38982
| 17,179 |
ipynb
|
Jupyter Notebook
|
Linear Algebra/lin_eq.ipynb
|
JuanEcheagaray75/DSC-scripts
|
f38ebcf274234fd969e0fb153ae5e756509bf1c3
|
[
"Apache-2.0"
] | null | null | null |
Linear Algebra/lin_eq.ipynb
|
JuanEcheagaray75/DSC-scripts
|
f38ebcf274234fd969e0fb153ae5e756509bf1c3
|
[
"Apache-2.0"
] | null | null | null |
Linear Algebra/lin_eq.ipynb
|
JuanEcheagaray75/DSC-scripts
|
f38ebcf274234fd969e0fb153ae5e756509bf1c3
|
[
"Apache-2.0"
] | null | null | null | 60.066434 | 1,898 | 0.640841 | true | 1,070 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.917303 | 0.853913 | 0.783296 |
__label__spa_Latn
| 0.961378 | 0.658193 |
```python
%matplotlib inline
from typing import List
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.linalg
import scipy.ndimage
import scipy.optimize
import scipy.special
import sklearn.datasets
from chmp.ds import mpl_set, get_color_cycle, Loop
```
```python
# helper for gradient checking
def approximate_gradient(x, func, eps=1e-5):
res = np.zeros(x.size)
for i in range(x.size):
d = np.zeros(x.size)
d[i] = eps
res[i] = (func(x + d) - func(x - d)) / (2 * eps)
return res
```
# Linear Models for classification
## Discriminant Functions
### Fischer's Linear Discriminant
```python
n_samples = 200
y = np.random.binomial(1, p=0.5, size=n_samples)
mu = np.asarray([[-1, 0.5], [1, 0]])
sigma = np.asarray([
[[+0.65, +0.35],
[+0.35, +0.65]],
[[+0.75, +0.35],
[+0.35, +0.75]],
])
x = mu[y] + np.einsum('ij...,ij...->i...', sigma[y], np.random.normal(size=(n_samples, 2)))
simple_w = np.mean(x[y == 0], axis=0) - np.mean(x[y == 1], axis=0)
simple_w = simple_w / (simple_w @ simple_w) ** 0.5
delta_0 = x[y == 0] - np.mean(x[y == 0], axis=0, keepdims=True)
delta_1 = x[y == 1] - np.mean(x[y == 1], axis=0, keepdims=True)
S_w = delta_0.T @ delta_0 + delta_1.T @ delta_1
fischer_w = np.linalg.pinv(S_w) @ simple_w
fischer_w = fischer_w / (fischer_w @ fischer_w) ** 0.5
```
```python
c0, c1 = get_color_cycle(2)
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plt.scatter(x[y == 0, 0], x[y == 0, 1], color=c0, alpha=0.4, marker='.')
plt.scatter(x[y == 1, 0], x[y == 1, 1], color=c1, alpha=0.4, marker='.')
plt.subplot(1, 3, 2)
plt.hist(x[y == 0] @ simple_w, alpha=0.4, range=(-3, +3), bins=21, color=c0)
plt.hist(x[y == 1] @ simple_w, alpha=0.4, range=(-3, +3), bins=21, color=c1)
plt.subplot(1, 3, 3)
plt.hist(x[y == 0] @ fischer_w, alpha=0.4, range=(-3, +3), bins=21, color=c0)
plt.hist(x[y == 1] @ fischer_w, alpha=0.4, range=(-3, +3), bins=21, color=c1)
pass
```
### Perceptron Algorithm
```python
n_samples = 200
y = np.random.binomial(1, p=0.5, size=n_samples)
mu = np.asarray([[-1, 0.5], [1, 0]])
sigma = np.asarray([
[[+0.65, +0.35],
[+0.35, +0.65]],
[[+0.75, +0.35],
[+0.35, +0.75]],
])
x = mu[y] + np.einsum('ij...,ij...->i...', sigma[y], np.random.normal(size=(n_samples, 2)))
x = np.concatenate([np.ones(n_samples)[:, None], x], axis=1)
y = 2 * y - 1
```
```python
w = np.random.normal(loc=1e-2, size=3)
errors: List[float] = []
for loop, _ in Loop.over(range(10)):
idx = np.arange(n_samples)
np.random.shuffle(idx)
for i in idx:
if y[i] * np.sign(x[i, :] @ w):
w += y[i] * x[i, :]
errors += [np.mean(y * np.sign(x @ w) + 1) / 2]
print(f'{loop}'.ljust(20), end='\r')
```
[⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ 0.20s / 0.20s]
```python
plt.plot(errors, '.', alpha=0.1)
plt.plot(scipy.ndimage.gaussian_filter(errors, 20), '-')
mpl_set(ylabel='Accuracy', xlabel='Iteration')
```
## Probabilistic Generative Models
Assume probability of data given class $p(x|C_k)$. This results in the posterior:
$$
\begin{align}
p(C_k|x)
&= \frac{p(x|C_k) p(C_k)}{\sum_k p(x|C_k) p(C_k)} \\
&= \frac{e^{a_k(x)}}{\sum_k e^{a_k(x)}}
\end{align}
$$
with $a_k(x) = \log p(x|C_k) + \log p(C_k)$. For two classes this can be written as:
$$
\begin{align}
p(C_1|x) &= \sigma(a(x)) \\
a(x) &= c_1(x) - c_0(x)
\end{align}
$$
For example for continuous features $p(x|C_k)$ could be modeled as a Gaussian:
$$
a(x) =
-\frac{1}{2} (x - \mu_1)^T \Sigma_1 (x - \mu_1) +
\frac{1}{2} (x - \mu_2)^T \Sigma_2 (x - \mu_2) +
\frac{1}{2} \log \frac{|\Sigma_1|}{|\Sigma_2|} +
\log \frac{p(C_1)}{p(C_0)}
$$
To fit, parametrize $p(C_1) = \pi, p(C_0) = 1 - \pi$ and optimize the joint loglikelihood $p(x|C_k)p(C_k)$. Then use Bayes theorem to obtain class probabilities.
To reduce the number of parameters for discrete features use Naive Bayes approximation:
$$
\begin{align}
p(\{x\}|C_k)
&= \sum_i p(x_i|C_k) &\; \\
&= \sum_i \mu_{ik}^{x_i} (1 - \mu_{ik})^{1 - x_i} &&\text{for binary $x_i$}
\end{align}
$$
This results in
$$
\begin{align}
a(\{ x \}) &=
\sum_i x_i \log \frac{\mu_{i1} (1 - \mu_{i0})}{\mu_{i0} (1 - \mu_{i1})} +
\sum_i \log \frac{1 - \mu_{i0}}{1 - \mu_{i1}} +
\log \frac{p(C_1)}{p(C_0)}
\end{align}
$$
## Probabilistic Discriminative Models
Instead of modelling the data and using Bayes theorem. Approximate the class probabilities directly by using the functional form.
### Logistic Regression
Take $y \in \{-1, +1\}$. Then, the negative loglikelihood is given by:
$$
\begin{align}
NLL
&= -\sum_n \log \sigma(y_n x_n^T w) + \frac{1}{2} \lambda w^T w
\\
\frac{\partial}{\partial w_\alpha} NLL
&= -\sum_n \sigma(-y_n x_n^T w) y_n x_{n\alpha} + \lambda w_\alpha
\\
\frac{\partial^2}{\partial w_\alpha \partial w_\beta} NLL
&= \sum_n \sigma(y_n x_n^T w) \sigma(-y_n x_n^T w) x_{n\alpha} x_{n\beta} + \lambda
\end{align}
$$
```python
def fit_logistic_regression(x, y, reg=0.0):
N, M = x.shape
y = 2 * y - 1
w0 = np.random.uniform(low=-1e-2, high=+1e-2, size=M)
def loss(w):
return -np.mean(np.log(sigmoid(y * (x @ w)))) + (0.5 * reg / N) * (w @ w)
def grad(w):
return (
-np.mean(sigmoid(-y * (x @ w))[:, None] * y[:, None] * x, axis=0) +
reg / N * w
)
res = scipy.optimize.minimize(loss, w0, jac=grad, method='BFGS')
if not res.success:
raise ValueError(f'could not fit: {res.message}')
return res.x
def sigmoid(x):
p = x > 0
s = 1.0 / (1.0 + np.exp(-np.abs(x)))
return p * s + (1 - p) * (1 - s)
```
```python
# do not shuffle to see posterior structure more clearly
x, y = sklearn.datasets.make_classification(random_state=42, shuffle=False)
w_logit = fit_logistic_regression(x, y)
w_logit_reg = fit_logistic_regression(x, y, reg=1e-3)
print('likelihood (no reg)', np.prod(sigmoid((2 * y - 1) * (x @ w_logit))))
print('likelihood (w/ reg)', np.prod(sigmoid((2 * y - 1) * (x @ w_logit_reg))))
```
likelihood (no reg) 0.999652286703
likelihood (w/ reg) 0.979098237265
### Probit regression
Assume noisy threshold model
$$
\begin{align}
p(t = 1 | x)
&= \mathbb{E}_\theta \left[ w^T x > \theta \right] \\
&= \int_{-\infty}^{+\infty}\mathrm{d}\theta\; p(\theta) \mathbb{1}\left[ w^T x > \theta \right] \\
&= \int_{-\infty}^{+w^T x}\mathrm{d}\theta\; p(\theta)
\end{align}
$$
This model is called probit regression for $\theta \sim \mathcal{N}(0, 1)$.
```python
def fit_probit_regression(x, y):
N, M = x.shape
y = 2 * y - 1
w0 = np.random.uniform(low=-1e-2, high=+1e-2, size=M)
def loss(w):
return -np.mean(np.log(probit(y * (x @ w))))
def grad(w):
arg = y * (x @ w)
scalar = probit_grad(arg) / probit(arg) * y
return -np.mean(scalar[:, None] * x, axis=0)
res = scipy.optimize.minimize(loss, w0, jac=grad, method='BFGS')
if not res.success:
raise ValueError(f'could not fit: {res.message}')
return res.x
def probit(u):
# NOTE the definition of the error function between Bishop and scipy differ
return 0.5 * (1 + scipy.special.erf(u / np.sqrt(2)))
def probit_grad(u):
return np.exp(-u ** 2.0 / 2) / (np.sqrt(2 * np.pi))
```
```python
u = np.linspace(-5, +5, 100)
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 1)
plt.plot(u, probit(u), label='probit function')
plt.plot(u, sigmoid(u), label='sigmoid function')
plt.legend(loc='best')
plt.subplot(1, 2, 2)
plt.plot(u, np.asarray([approximate_gradient(np.asarray([x]), probit) for x in u]), label='probit deriv.')
plt.plot(u, probit_grad(u), ls='--', c='k')
plt.plot(u, np.asarray([approximate_gradient(np.asarray([x]), sigmoid) for x in u]), label='sigmoid deriv.')
plt.plot(u, sigmoid(u) * (1 - sigmoid(u)), ls='--', c='k')
plt.legend(loc='best')
pass
```
```python
w_probit = fit_probit_regression(x, y)
print('probit likelihood', np.prod(probit((2 * y - 1) * (x @ w_probit))))
```
probit likelihood 0.999933414634
```python
pd.DataFrame({'probit': w_probit, 'logit': w_logit}).plot.bar()
plt.title('Comparision Probit / Logistic regression coefficients')
pass
```
## Laplace Approximation
Talyor expand the log probability $p(z)$ around its maximum $\partial_{z} \log p(z) \big|_{z_0} = 0$.
This results in:
$$
\begin{align}
\log p(z)
&\approx
\log p(z_0) +
\frac{1}{2} (z - z_0)_\alpha (z - z_0)_\beta \;
\left[ \frac{\partial^2}{\partial z_\alpha \partial z_\beta} \log p(z) \right]_{z=z_0}
\\
&=
\log p(z_0) - \frac{1}{2} (z - z_0)^T A (z - z_0)
\\
&\approx
\mathcal{N}(z|z_0, A^{-1})
\\
A_{\alpha\beta} &= -\left[ \frac{\partial^2}{\partial z_\alpha \partial z_\beta} \log p(z) \right]_{z=z_0}
\end{align}
$$
Note: As a result of the central limit theorem, the posterior distribution for a model is expected to become increasingly better approximated by a Gaussian as the number of observed data points is increased, and so we would expect the Laplace approximation to be most useful in situations where the number of data points is relatively large. (p. 216).
The same approach can be used to estimate the normalization constant of the data likelihood:
$$
\begin{align}
\log p(\mathcal{D})
&=
\log p(\mathcal{D}|\theta) + \log p(\theta) -
\log \int \mathrm{d}\theta\; p(\mathcal{D}|\theta) p(\theta)
\\
&\approx
\log p(\mathcal{D}|\theta_\mathrm{MAP}) + \log p(\theta_\mathrm{MAP}) +
\frac{M}{2} \log 2\pi - \frac{1}{2} \log |A|
\\
A_{\alpha\beta} &=
-\left[
\frac{\partial^2}{\partial \theta_\alpha \partial \theta_\beta}
\log p(\mathcal{D}|\theta) p(\theta)
\right]_{\theta = \theta_\mathrm{MAP}}
\end{align}
$$
## Bayesian Logistic Regression
$$
\begin{align}
p(w) &= \mathcal{N}(w|0, \sigma_0^2)
\\
p(y|w, x) &= \sigma(y w^T x)
\\
\log p(w|\left\{y, x \right\})
&=
\sum_n \log \sigma (y_n w^T x_n) -
\frac{1}{2} \sigma_0^{-2} w^T w +
\mathrm{const}
\\
&\approx
\mathcal{N}(w|w_\mathrm{map}, A^{-1})
\end{align}
$$
Identify $\sigma_0^{-2} = \lambda$
```python
N, M = x.shape
```
```python
prior_var = 100
w_map = fit_logistic_regression(x, y, reg=1.0 / prior_var)
```
```python
arg = (2 * y - 1) * (x @ w_map)
hessian = 1 / prior_var + np.sum(
(sigmoid(arg) * sigmoid(-arg))[:, None, None] * x[:, None, :] * x[:, :, None],
axis=0
)
```
```python
posterior_var = np.linalg.pinv(hessian)
# NOTE: imaginary values can appear to due to numerical inaccuracies
posterior_std = np.real(scipy.linalg.sqrtm(posterior_var))
# draw 10 posterior samples
posterior_sample = w_map[None, :] + np.random.normal(size=[10, M]) @ posterior_std
```
```python
plt.imshow(posterior_var, origin='lower')
plt.xticks(range(0, 20, 2))
plt.yticks(range(0, 20, 2))
plt.title('posterior variance')
pass
```
```python
```
```python
```
|
bc628b3ca3179ad86810259aa7ead89ff3856cc2
| 101,661 |
ipynb
|
Jupyter Notebook
|
BuildingBlocks/Bishop_Notes_04.ipynb
|
chmp/misc-exp
|
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
|
[
"MIT"
] | 6 |
2017-10-31T20:54:37.000Z
|
2020-10-23T19:03:00.000Z
|
BuildingBlocks/Bishop_Notes_04.ipynb
|
chmp/misc-exp
|
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
|
[
"MIT"
] | 7 |
2020-03-24T16:14:34.000Z
|
2021-03-18T20:51:37.000Z
|
BuildingBlocks/Bishop_Notes_04.ipynb
|
chmp/misc-exp
|
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
|
[
"MIT"
] | 1 |
2019-07-29T07:55:49.000Z
|
2019-07-29T07:55:49.000Z
| 121.604067 | 32,472 | 0.856749 | true | 3,949 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.867036 | 0.853913 | 0.740373 |
__label__eng_Latn
| 0.383927 | 0.558466 |
| |Pierre Proulx, ing, professeur|
|:---|:---|
|Département de génie chimique et de génie biotechnologique |** GCH200-Phénomènes d'échanges I **|
#### Détails de la transformation d'équation partielle en équation différentielle ordinaire.
#### Pour commencer la partie b), je fais calculer les dérivées qui me serviront à faire la transformation, je ferai ensuite le reste à la main.
```python
#
# Pierre Proulx
#
#
# Préparation de l'affichage et des outils de calcul symbolique
#
import sympy as sp
from IPython.display import *
sp.init_printing(use_latex=True)
%matplotlib inline
eta,x,z=sp.symbols('eta x z')
f=sp.Function('f')(eta)
C_A=sp.Function('C_A')(x,z)
C_A0,x,z,D_AB,V_m=sp.symbols('C_A0,x,z,D_AB,V_m')
eq18511=sp.Derivative(C_A,z)-D_AB/V_m*sp.Derivative(C_A,x,x)
display(eq18511)
dfdeta=sp.diff(f,eta)
d2fdeta2=sp.diff(f,eta,eta)
eta=x/(4*D_AB/V_m*z)**(1/2)
display('eta',eta)
detadx=eta.diff(x)
display('deta/dx',detadx)
detadx=eta/x
detadz=eta.diff(z)
display('deta/dz',detadz)
detadz=-1/2*eta/z
display('deta/dz',detadz)
display('df/deta',dfdeta*detadz)
display('d2f/deta2',d2fdeta2*detadx**2)
display("en substituant dans l'équation originale")
eq=-D_AB/V_m*d2fdeta2*detadx**2+dfdeta*detadz
display(eq)
display("en substituant dans l'équation originale")
eq=-D_AB/V_m*d2fdeta2*detadx**2+dfdeta*detadz
display(eq)
```
#### Je fais la suite à la main, cela me semble plus facile.
### $- \frac {D_{AB}}{V_m} \frac {\partial^2 C_A} {\partial x^2} + \frac {\partial C_A} {\partial z}= 0$
### $- \frac {D_{AB}}{V_m} \frac {\partial^2 C_A} {\partial \eta ^2} \left ( \frac {d \eta}{ dx} \right )^2 \frac {1}{ C_{A0}} + \frac {\partial C_A} {\partial \eta } \frac {d \eta}{ dz} \frac {1}{ C_{A0}}= 0$
### $- \frac {D_{AB}}{V_m} \frac {d^2 f} {d \eta^2} \left ( \frac {d \eta}{ dx} \right )^2 + \frac {df} {d \eta} \frac {d \eta}{ dz} = 0$
```python
eta=sp.symbols('eta') # redéfinir pour avoir l'affichage de l'équation différentielle
f=sp.Function('f')(eta)
eq=sp.diff(f,eta,eta)+2*eta**2*sp.diff(f,eta)
display(eq)
f=sp.dsolve(eq,f)
display(f)
f=f.rhs
cl1=sp.Eq(f.subs(eta,0)-1,0)
cl2=sp.Eq(f.subs(eta,sp.oo))
display(cl1,cl2)
constantes=sp.solve([cl1,cl2],sp.symbols('C1 C2'))
display(constantes)
fsolution=f.subs(constantes)
display(fsolution.simplify())
import matplotlib.pyplot as plt
%matplotlib inline
sp.plot(fsolution,(eta,0,2))
```
```python
```
|
f771da70ae4c1e99d487d6903ad3cf16bdb375a2
| 62,772 |
ipynb
|
Jupyter Notebook
|
Chap-18-Section-18-5-details.ipynb
|
pierreproulx/GCH200
|
66786aa96ceb2124b96c93ee3d928a295f8e9a03
|
[
"MIT"
] | 1 |
2018-02-26T16:29:58.000Z
|
2018-02-26T16:29:58.000Z
|
Chap-18-Section-18-5-details.ipynb
|
pierreproulx/GCH200
|
66786aa96ceb2124b96c93ee3d928a295f8e9a03
|
[
"MIT"
] | null | null | null |
Chap-18-Section-18-5-details.ipynb
|
pierreproulx/GCH200
|
66786aa96ceb2124b96c93ee3d928a295f8e9a03
|
[
"MIT"
] | 2 |
2018-02-27T15:04:33.000Z
|
2021-06-03T16:38:07.000Z
| 128.895277 | 14,104 | 0.83507 | true | 893 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.884039 | 0.833325 | 0.736692 |
__label__fra_Latn
| 0.258472 | 0.549913 |
# Catching Heuristics Are Robust to Systematic Disturbances and Can Be Found by Reinforcement Learning
## Supplementary Material: Proofs for Chapman's Strategy (Section 4)
This material accompanies my doctoral thesis *On Decomposability in Robot Reinforcement Learning* and the paper *Catching Heuristics Are Robust to Systematic Disturbances and Can Be Found by Reinforcement Learning*.
© 2017 Sebastian Höfer
In this notebook, we verify the proofs regarding Chapman's strategy in Section 4 of the paper (Thesis Chapter 4).
```python
from IPython.display import Image, display
import sympy as sp
import numpy as np
sp.init_printing()
import os
import sys
# load scripts
#sys.path.append(os.path.join(os.getcwd(), "../../src"))
#import multi_experiment_plot
#from utils import pdfcrop
def concrete_case(formula, V_, theta_, yb0_=0):
global GRAVITY
return formula.subs(g,GRAVITY).subs(V,V_).subs(theta,theta_).subs(yb0,yb0_)
def concrete_trajectory(formula, V_, theta_, T_, yb0_=0, N=100):
time = np.linspace(0, float(T_), N)
return np.array(map(lambda t_: concrete_case(formula, V_, theta_, yb0_).subs(t, t_), time)), time, time[1]-time[0]
```
```python
%pylab inline
import matplotlib
#matplotlib.rcParams['ps.useafm'] = True
#matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = r"\usepackage{charter}, \usepackage{tgheros}, \renewcommand*\familydefault{\sfdefault}"
matplotlib.rcParams['figure.autolayout'] = True
matplotlib.rcParams['font.size'] = 24
#from matplotlib import rc
#rc('font',**{'family':'serif','serif':['Computer Modern Roman'], 'size': 30})
matplotlib.use('pdf')
```
Populating the interactive namespace from numpy and matplotlib
/Users/Hoefer/anaconda/lib/python2.7/site-packages/matplotlib/__init__.py:1350: UserWarning: This call to matplotlib.use() has no effect
because the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
warnings.warn(_use_error_msg)
```python
def plot_surface(fig, ax, title, X, Y, Z, labels, linewidth=0.2, vlim=None, zlim=[None,None], contour=False, elev=20, azi=45):
if vlim is None:
vlim = zlim
ax.set_title(title)
#ax = fig.gca(projection='3d')
xlabelpad, ylabelpad, zlabelpad = [35,35,10]
if zlim[1] is not None:
Z[np.where(Z > zlim[1])] = zlim[1]
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, vmin=vlim[0], vmax=vlim[1],
linewidth=linewidth, antialiased=False, )
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.01f'))
ax.set_xlabel(labels[0], labelpad=xlabelpad)
ax.set_ylabel(labels[1], labelpad=ylabelpad)
ax.set_zlabel(labels[2], labelpad=zlabelpad)
if zlim[0] == 0 and zlim[1] == 5:
# hacky: for agent_ball_distance
ax.set_zticks([0,1,2,3,4,5])
ax.set_zticklabels(map (lambda x: "$%d$" % x, [0,1,2,3,4,5]))
# reformat ticklabels
for ticks_and_labels, setter, in zip([ zip(ax.get_xticks(), ax.get_xticklabels()),
zip(ax.get_yticks(), ax.get_yticklabels()),
zip(ax.get_zticks(), ax.get_zticklabels())],
(ax.set_xticklabels, ax.set_yticklabels, ax.set_zticklabels)):
ticklbls = []
fnt_size = "huge"
tkz = zip(*ticks_and_labels)[0]
tick_range = np.max(tkz) - np.min(tkz)
for tick, lbl in ticks_and_labels:
#txt = lbl.get_text()
#if txt == "":
if tick_range <= 1.5:
tl = "%.1f" % tick
else:
tl = "%d" % tick
txt = r"\%s{$%s$}" % (fnt_size, tl)
#else:
# txt = r"\%s{%s}" % (fnt_size, tick)
lbl.set_text(txt)
ticklbls.append(lbl)
setter(ticklbls)
# move y ticks a bit to the left, so the -15 does not collide
[t.set_ha('right') for t in ax.get_yticklabels()]
vmin = 0.
vmax = 1.
ax.view_init(elev, azi)
if contour:
if np.min(Z) != np.max(Z):
cset = ax.contour(X, Y, Z, zdir='z', offset=np.min(Z), cmap=cm.coolwarm)
if np.min(X) != np.max(X):
#cset = ax.contour(X, Y, Z, zdir='x', offset=np.min(X), cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=np.max(X), cmap=cm.coolwarm) # project to opposite side
if np.min(Y) != np.max(Y):
#cset = ax.contour(X, Y, Z, zdir='y', offset=np.min(Y), cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=np.max(Y), cmap=cm.coolwarm) # project to opposite side
if zlim != [None,None]:
ax.set_zlim3d(zlim[0], zlim[1])
fig.colorbar(surf, shrink=0.75, aspect=20)
return ax
import subprocess
def pdfcrop(path):
#call = ["perl", "/usr/bin/pdfcrop", path, path ]
call = ["pdfcrop", path, path ]
print " ".join(call)
pipe = subprocess.Popen(call)
return pipe
```
```python
# HElPER FUNCTIONS
# sympy has problems of simplifying (V^2 sin(theta)^2)^{1/2} to V sin(theta)
def sympy_sqrt_fix(formula):
# return sp.simplify(formula.subs((V**2*sp.sin(theta)**2)**(0.5), V*sp.sin(theta)))
return sp.simplify(formula.subs(sp.sqrt( (V**2) * (sp.sin(theta))**2), V*sp.sin(theta)))
def zero_yb0(formula, val=0):
return sympy_sqrt_fix(formula.subs(yb0,val))
GRAVITY=9.81
# Variable
t, yb0, theta, psi, V, g, rv = sp.symbols("t, b_{y0}, \\varphi, \\psi, \\nu, g, \\vvaref")
#yb0 = 0
yb = V*sp.sin(theta)*t - 0.5*g*t*t + yb0
xb = V*sp.cos(theta)*t
T = sp.solve(yb, t)[1]
R = xb.subs(t, T)
```
$\newcommand{\vva}{\theta}
\newcommand{\dvva}{\dot{\theta}}
\newcommand{\ddvva}{\ddot{\theta}}
\newcommand{\dddvva}{\dddot{\theta}}
\newcommand{\vvaref}{\dot{\theta}_\mathrm{ref}}
\newcommand{\vvarefzero}{\dot{\theta}_{\mathrm{ref},0}}
\newcommand{\vvarefcon}{{\vvaref^*}}
\newcommand{\vvarefincon}{{\vvaref^{\sim}}}
\newcommand{\xyzb}{\mathbf{b}}
\newcommand{\dxyzb}{\dot{\mathbf{b}}}
\newcommand{\ddxyzb}{\ddot{\mathbf{b}}}
\newcommand{\xb}{{b_x}}
\newcommand{\dxb}{{\dot{b}_x}}
\newcommand{\ddxb}{{\dot{b}_x}}
\newcommand{\yb}{{b_y}}
\newcommand{\dyb}{{\dot{b}_y}}
\newcommand{\ddyb}{{\ddot{b}_y}}
\newcommand{\zb}{{b_z}}
\newcommand{\dzb}{{\dot{b}_z}}
\newcommand{\ddzb}{{\ddot{b}_z}}
\newcommand{\xbzero}{{b_{x,0}}}
\newcommand{\ybzero}{{b_{y,0}}}
\newcommand{\dxbzero}{{\dot{b}_{x,0}}}
\newcommand{\dybzero}{{\dot{b}_{y,0}}}
\newcommand{\xyza}{\mathbf{a}}
\newcommand{\dxyza}{\dot{\mathbf{a}}}
\newcommand{\ddxyza}{\ddot{\mathbf{a}}}
\newcommand{\xa}{{a_x}}
\newcommand{\dxa}{{\ddot{a}_x}}
\newcommand{\ddxa}{{\ddot{a}_x}}
\newcommand{\za}{{a_z}}
\newcommand{\dza}{{\ddot{a}_z}}
\newcommand{\ddza}{{\ddot{a}_z}}
\newcommand{\xazero}{{a_{x,0}}}
\newcommand{\aref}{a_\mathrm{ref}}
\newcommand{\daref}{\dot{a}_\mathrm{ref}}
\newcommand{\arefcon}{{\aref^*}}
\newcommand{\darefcon}{{\daref^*}}
\newcommand{\azero}{{a_0}}
\newcommand{\azeroincon}{{a_0^{\sim}}}
\newcommand{\damax}{\dot{a}_\mathrm{max}}
\newcommand{\ddamax}{\ddot{a}_\mathrm{max}}
\newcommand{\vvareftext}{tangent reference velocity\xspace}
\newcommand{\areftext}{agent reference\xspace}
\newcommand{\refcontext}{consistent\xspace}
\newcommand{\refincontext}{inconsistent\xspace}
\newcommand{\refcontextup}{Consistent\xspace}$
## Theorem 4.1.1 (Chapman's Proof)
We begin by formalizing the ball's trajectory. We assume that it is a parabola, parametrized by initial ball throwing velocity $V$, throwing angle $\theta$ and initial height $y_{b,0}$:
\begin{align}
b_x(t) &= \nu \cos(\varphi) t + b_{x,0},\\
b_y(t) &= \nu \sin(\varphi) t - \frac{1}{2} g t^2 + b_{y,0}.
\end{align}
W.l.o.g. we assume $b_x(0)=b_{x,0}=:=0$ and $b_y(0)=b_{y,0} \geq 0$.
We compute the impact time $T$ and the impact position (or ball range) $R$:
```python
T
```
```python
R
```
In the following we will inspect the case $b_{y,0}=0$
Then the previous expressions simplify to:
$T=$
```python
T = zero_yb0(T)
T
```
$R=$
```python
R = zero_yb0(R)
R
```
We now relate the tangent reference velocity $\vvaref$ to the motion of agent and ball, i.e. $\dvva=0$.
<i>Ansatz</i>: Assume $\tan \alpha$ as seen by the agent is a linear function with constant slope $\vvaref$.
\begin{align}
\frac{\yb(t)}{\aref(t)-\xb(t)} &= \vvaref t + \vva_{0} \\
&= \vvaref\ t + \frac{\ybzero}{\azero} \qquad (1).
\end{align}
with $\aref(0)=\azero \neq 0$, $\xb(0)=0$.
<!--
When does this equation have a solution for $rv$ or $a(t)$?
<ul>
<li>$t=0, y_{b,0}=0$<br/>
If $y_b(0)=y_{b,0}=0$, we get $0=0$ so we get infinitely many solutions.
</li>
<li>$t=0, y_{b,0}\neq0$<br/>
We do not get a solution for $rv$ but we get $\frac{y_b(t)}{a(t)-x_b(t)} = \frac{y_{b,0}}{a_0}$ which is true by assumption.
</li>
<li>$t>0, y_b(t)=0, y_{b,0}=0$<br/>
Since $y_b$ has only two null points, $t=T$. Now depending on $a(T)$ either $rv$ is undefined (if $a(T)=R$) or $rv=0$. WHY?
</li>
<li>$t=T, y_{b,0}=0$<br/>
Equivalent to last case.
</li>
</ul>
-->
<!--
Note that this equation has infinitely many solutions for $t=0$.
We see that this equation is undefined when $a_0=0$, i.e. when the agent and the ball are at the same horizontal position at time $t=0$.
-->
```python
# Set-up Eq.(1)
a0 = sp.symbols("a_0")
a = sp.Function(r"\aref")(t)
# we know rv0 = yb(0)/(a(0)-xb) = yb(0)/a(0) = yb0/a0 (since xb=0)
rv0 = yb0/a0
tan_alpha_eq = yb/(a-xb)
linear_rv_eq = rv*t + rv0
eq = tan_alpha_eq - linear_rv_eq
```
We now solve this Eq. (1) for $\aref(t)$ and verify that $\aref(T) = R$.
$\aref(t)=$
```python
#a_expr = sp.solve(eq, a)[0]
a_expr = sp.solve(zero_yb0(eq), a)[0]
#print sp.latex(a_expr)
a_expr
```
/Users/Hoefer/anaconda/lib/python2.7/site-packages/matplotlib/font_manager.py:1288: UserWarning: findfont: Font family [u'sans-serif'] not found. Falling back to Bitstream Vera Sans
(prop.get_family(), self.defaultFamily[fontext]))
$$\frac{1}{\vvaref} \left(\nu \vvaref t \cos{\left (\varphi \right )} + \nu \sin{\left (\varphi \right )} - 0.5 g t\right)$$
```python
# agent at terminal time
#sp.simplify(a_expr.subs(t, zero_yb0(T)))
```
$\aref(T) - R=$
```python
sp.simplify(a_expr.subs(t, T) - R)
```
$a(0)=$
```python
sp.simplify(a_expr.subs(t, 0))
#a_expr.subs(t, 0)
```
$$\frac{\nu}{\vvaref} \sin{\left (\varphi \right )}$$
That shows that $\aref(T) = R$. We can verify that this holds in the limit, too.
Assume $\ybzero=0$ and $t=T-\delta$, i.e. shortly before impact. How far is the agent away from impact point $R$?
```python
delta = sp.symbols("delta")
ateps_m_R = sp.simplify(zero_yb0(a_expr.subs(t, T-delta) - R))
#ateps_m_R = sp.simplify(a_expr.subs(t, T-delta) - R)
##print sp.latex(ateps_m_R)
ateps_m_R
#sp.limit(ateps_m_R, delta, 0)
```
$$- 1.0 \nu \delta \cos{\left (\varphi \right )} + \frac{0.5 \delta}{\vvaref} g$$
```python
# re-arrange
ateps_m_R = sp.simplify(zero_yb0(a_expr.subs(t, T-delta) - R))
ateps_m_R2 = delta * (-V*sp.cos(theta) + 0.5*g/rv)
assert(sp.simplify(ateps_m_R2-ateps_m_R) == 0.)
##print sp.latex(ateps_m_R2)
ateps_m_R2
```
$$\delta \left(- \nu \cos{\left (\varphi \right )} + \frac{0.5 g}{\vvaref}\right)$$
$= \aref(T-\delta) - R$. We see that $\delta$ is multiplied with a constant expression which depends on $\vvaref$. Hence, for $\delta \rightarrow 0: |\aref(T-\delta)-R| \rightarrow 0$.
However, we still need to make sure to choose $\vvaref$ s.t. $\aref(0)=a_0$.
## Lemma 4.1.2
If we use Chapman's result, the *consistent* tangent velocity reference computes as $\vvarefcon=$
```python
rv_opt = sp.solve(zero_yb0(a_expr-a0).subs(t,0), rv)[0]
rv_opt
```
and plug this into $\aref(\delta)$, i.e. shortly after the ball is thrown, we obtain:
```python
a_delta_raw = zero_yb0(a_expr.subs(t, delta).subs(rv, rv_opt))
# re-arrange
a_delta = a0 + delta * (V*sp.cos(theta) - a0*g/(2*V*sp.sin(theta)))
assert (sp.simplify(a_delta - a_delta_raw) == 0.)
a_delta
```
As $\delta \rightarrow 0$ only the term $a_0$ remains.
## Lemma 4.1.3
So there exists a unique $\vvaref$ which fulfills the constraints $\aref(0)=a_0, \aref(T)=R$.
What does the *consistent* agent reference trajectory $\arefcon(t)$ look like?
```python
#at_rvstar_yb0_zero = a_delta.subs(delta, t)
#print sp.latex(at_rvstar_yb0_zero)
#at_rvstar_yb0_zero
at_rvstar_yb0_zero = a_expr.subs(rv, rv_opt)
##print sp.latex(at_rvstar_yb0_zero)
at_rvstar_yb0_zero
```
$=\arefcon(t)$. We see that its derivative is constant:
```python
#adot0 = zero_yb0( (R - a0 ) / T)
at_rvstar_yb0_zero_diff = sp.diff(at_rvstar_yb0_zero, t)
at_rvstar_yb0_zero_diff = sp.simplify(at_rvstar_yb0_zero_diff)
##print sp.latex(at_rvstar_yb0_zero_diff)
at_rvstar_yb0_zero_diff
#sp.simplify(at_rvstar_yb0_zero_diff - adot0)
```
$= \darefcon$.
It corresponds to the velocity the agent needs to arrive at $R$ at time $T$ starting at $a_0$ at time $t=0$, which is $\frac{R-a_0}{T}$:
```python
adot0 = zero_yb0( (R - a0 ) / T)
sp.simplify(at_rvstar_yb0_zero_diff - adot0)
```
This shows that the consistent agent reference trajectory is constant.
Note that this result is similar to (Chapman, 1968), Eq. (9).
### Figure 4.1: Plotting $\vvaref$
Given this expression we can now plot what the consistent $\vvarefcon$ for reaching the target looks like, with varying throwing angle $\nu$ and distance from impact point $d = -R+a_0$. We keep the throwing angle fixed at $\varphi=\frac{\pi}{4}$ since it is only a scaling factor ($\sin \frac{\pi}{4} \approx 0.7$):
```python
#from matplotlib import rcParams
#rcParams.update({'figure.subplot.bottom': 0.1})
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%pylab tk
X = np.linspace(-50, 50, 15) # distance d
Y = np.linspace(5, 50, 25) # ball velocity V
expr = rv_opt
t_ = 0
#theta_ = np.pi/4
yb0_ = 0.0
for theta_ in [np.pi/4]: #[np.pi/4, np.pi/8, 3*np.pi/8]:
#fig = plt.figure(figsize=(15,10))
fig = plt.figure(figsize=(10,7))
ax = fig.add_subplot(111, projection='3d')
DIFF = np.zeros( (len(X), len(Y)) )
for i,d_ in enumerate(X):
for j,V_ in enumerate(Y):
R_ = concrete_case(R, V_, theta_, yb0_)
T_ = concrete_case(T, V_, theta_, yb0_)
cc = concrete_case(expr, V_, theta_, yb0_).subs(t, t_)
val = cc.subs(a0,R_+d_).evalf()
DIFF[i,j] = np.abs(float(val)) if val != sp.zoo else 0.
# clamp diff
DIFF[i,j] = DIFF[i,j] if DIFF[i,j] <= 1 else 1
DIFF = DIFF.T
X,Y = np.meshgrid(X,Y)
vvarefopt = r"\dot{\theta}_\mathrm{ref}^*"
# clamp diff
labels = [r"\begin{center}initial distance to \\impact point $D_0$ \end{center}",
r"\begin{center}initial ball \\ velocity $\nu$ \end{center}",
r"\begin{center}$%s$\end{center}" % vvarefopt ,]
if theta_ == np.pi/8:
pi_str = r"\frac{\pi}{8}"
pi_slug = "pi8"
elif theta_ == np.pi/4:
pi_str = r"\frac{\pi}{4}"
pi_slug = "pi4"
elif theta_ == 3*np.pi/8:
pi_str = r"\frac{3\pi}{8}"
pi_slug = "3pi8"
title = r"Consistent $%s$ ($\nu = %s$)" % (vvarefopt, pi_str)
ax = plot_surface(fig, ax, title, X, Y, DIFF, labels, zlim=(0,1), contour=False,
elev=20., azi=-40.)
#elev=15, azi=-135)
ax.zaxis.set_rotate_label(False)
#plt.tight_layout()
plt.subplots_adjust(top=9.5, bottom=8.5)
plt.tight_layout()
fn = "v_r_optimal_%s.pdf" % pi_slug
plt.savefig(fn,bbox_inches='tight', pad_inches=1.)
pdfcrop(fn)
#plt.show()
```
## Lemma 4.1.5: Disturbed Trajectories
Although the previous findings give support for Chapman's strategy, they only hold for perfectly parabolic ball trajectories. Realistic ball trajectories are not perfectly parabolic due to random disturbances such as spin and wind or systematic disturbances such as air resistance.
We therefore extend our previous results by showing that all VVA implementations are robust to any additive or multiplicative perturbation of the ball's horizontal component. If we assume that any random or systematic disturbance can be approximated by such perturbations this insight provides evidence that Chapman's strategy successfully copes with realistic ball trajectories.
#### Proof
We consider arbitrary additive perturbations to the ball trajectory:
$\xb'(t) = \xb(t) + \varepsilon(t)$
And then we solve for $\aref(t)$.
```python
eps_x = sp.Function("varepsilon")(t)
xb_e = xb+eps_x
tan_alpha_eq_e = yb/(a-xb_e)
eq_e = tan_alpha_eq_e - linear_rv_eq
agent_t_e = sp.solve(zero_yb0(eq_e), a)[0]
agent_t_e = sp.simplify(agent_t_e)
##print sp.latex(agent_t_e)
agent_t_e
```
$$\frac{1}{\vvaref} \left(\nu \sin{\left (\varphi \right )} + \vvaref \left(\nu t \cos{\left (\varphi \right )} + \varepsilon{\left (t \right )}\right) - 0.5 g t\right)$$
$= \aref'(t)$
How do the agent trajectory in case of undisturbed and disturbed ball flight relate?
```python
# undisturbed
agent_t = sp.solve(zero_yb0(eq), a)[0]
# difference between undisturbed and disturbed trajectory
sp.simplify(agent_t_e - agent_t)
```
We see that the disturbed trajectory only differs by $\varepsilon$. Hence, if the agent compensates the disturbance $\varepsilon$ in $x$ direction the same guarantees as previously hold: the agent will intercept the ball and $rv$ will remain constant.
### Example: Approximating drag (Figure 4.4)
As an example, consider a noise term which varies quadratically w.r.t. time. The characteristics of such a type of disturbance are very similar to air resistance trajectories.
(We only show this by example; there is no closed-form equation for drag trajectories, therefore we cannot show this analytically.)
```python
eps_x = sp.Symbol("varepsilon")
xb_e = xb+eps_x*t**2
```
```python
# HELPER FUNCTIONS
def ball_trajectory_with_drag(v0, theta, dt, t1):
# drag parameters
mass = 0.15
r = 0.0366
c = 0.5
A = np.pi * r * r
rho = 1.293
global GRAVITY
x = 0
y = 0
vx = v0 * np.cos(theta)
vy = v0 * np.sin(theta)
ax = 0
ay = -GRAVITY
T = np.arange(dt,t1,dt)
X = np.zeros( (T.shape[0]+1, 5) )
X[0,:] = (0, x, y, vx, vy)
print X[0,:]
for i,t in enumerate(T):
x += vx * dt + 0.5*ax*dt*dt
y += vy * dt + 0.5*ay*dt*dt
vx += ax*dt
vy += ay*dt
v = np.array( [vx,vy] )
ax, ay = [0, -GRAVITY] - v*v * 0.5 * rho * c * A/mass
y = 0.0 if y <= 0 else y
X[i+1,:] = (t, x, y, vx, vy)
return X[:i+1,:]
# %pylab inline
V_ = 30
theta_ = np.pi/4
T_ = concrete_case(T, V_, theta_)
tau_x, t_range, dt= concrete_trajectory(xb, V_, theta_, T_, 0)
tau_y, t_range, dt= concrete_trajectory(yb, V_, theta_, T_, 0)
tau_x_eps, t_range, dt= concrete_trajectory(xb_e.subs(eps_x,-1.11), V_, theta_, T_, 0)
tau_drag = ball_trajectory_with_drag(V_*1.11, theta_, dt=dt, t1=T_)
# import matplotlib
# matplotlib.rcParams['ps.useafm'] = True
# matplotlib.rcParams['pdf.use14corefonts'] = True
# matplotlib.rcParams['text.usetex'] = True
# #matplotlib.rcParams['text.latex.preamble'] = [\
# #r"\usepackage{accents}",]
# matplotlib.rcParams['figure.autolayout'] = True
# from matplotlib import rc
# rc('font',**{'family':'serif','serif':['Computer Modern Roman'], 'size': 30})
matplotlib.rcParams['font.size'] = 24
plt.figure(figsize=(8,4))
plt.plot(tau_x, tau_y, lw=5.0, c="k", ls='-', label="Ideal")
plt.plot(tau_drag[:,1], tau_drag[:,2], lw=5.0, c="b", label="Drag")
plt.plot(tau_x_eps, tau_y, lw=7.0, c="r", ls='--', label="Pseudo-drag")
plt.legend(fontsize=16)#, loc='upper right', bbox_to_anchor=(0.5, 0.5))
plt.xlabel("x")
plt.ylabel("y")
plt.xlim(-5, 130)
plt.ylim(0, 25) #np.max(tau_y))
plt.savefig("pseudo_drag_trajectory.pdf")
```
We see that the pseudo-drag trajectory (red) which is computed by adding $\varepsilon(t)=-1.11 t^2$ to $x_b$ is highly similar to a ball trajectory with drag (blue).
#### Multiplicative Noise
It is similarly easy to show, that multiplicative noise can be compensated by the agent, too.
```python
eps_x = sp.Function("varepsilon")(t)
xb_e = xb*eps_x
tan_alpha_eq_e = yb/(a-xb_e)
eq_e = tan_alpha_eq_e - linear_rv_eq
agent_t_e = sp.solve(zero_yb0(eq_e), a)[0]
agent_t_e
sp.simplify(agent_t_e - agent_t)
```
Note that these results only hold if the agent does not have any velocity or acceleration constraints which prevent it from executing the action required to compensate the disturbance.
## Lemma 4.1.4 (Deviating from Consistent $\vvarefcon$)
### Example
We have previously shown that for given $a_0$ there exists an $\vvaref = \frac{\nu}{a_0} \sin \varphi$ which will result in the agent running towards the impact point with constant speed. But what if given an initial position $a_0$ we use a different $\vvarefincon \neq \vvaref$ for control?
```python
# CONCRETE EXAMPLE
N = 100
V_ = 30.
theta_ = np.pi/4
yb0_ = 1e-15 # 0 is undefined!
D_ = 10.
eq_yb0 = sympy_sqrt_fix(eq.subs(yb0, yb0_))
agent_t = sp.solve(eq, a)[0]
R_ = concrete_case(sympy_sqrt_fix(R.subs(yb0, yb0_)), V_, theta_, yb0_)
T_ = concrete_case(sympy_sqrt_fix(T.subs(yb0, yb0_)), V_, theta_, yb0_)
a0_ = R_+D_ # 5m right of landing point
adot0_ = sp.simplify( (R_ - a0_ ) / T_)
rv_const_speed_eq = eq_yb0.subs(a, adot0_*t + a0)
rv_const_speed_eq = sp.simplify(sp.solve(rv_const_speed_eq, rv)[0])
rv_const_speed_ = concrete_case(rv_const_speed_eq, V_, theta_, yb0_).subs(a0, a0_).subs(t,0)
agent_tau_const_speed, t_range, dt = concrete_trajectory(agent_t.subs(rv, rv_const_speed_).subs(a0, a0_), V_, theta_, T_, yb0_)
```
We calculated the consistent reference $\vvarefcon$ as follows:
```python
rv_const_speed_
```
```python
print "R=%f" % R_
```
R=91.743119
We check the first and last steps of the agent's trajectory and see that the agent arrives at the impact point:
```python
agent_tau_const_speed[:5], agent_tau_const_speed[-5:,]
```
(array([101.743119266055, 101.642109165045, 101.541099064035,
101.440088963025, 101.339078862015], dtype=object),
array([92.1471596700954, 92.0461495690853, 91.9451394680753,
91.8441293670651, 91.7431192660550], dtype=object))
Now we investigate what happens if we alter $\vvarefcon$ and observe the agent's trajectory using $\vvarefincon = \vvarefcon + \delta$.
```python
N = 1000
delta_ = -0.02
rv_tilde_ = rv_const_speed_+delta_
agent_tau_const_speed_tilde, t_range, dt = concrete_trajectory(agent_t.subs(rv, rv_tilde_).subs(a0, a0_), V_, theta_, T_, yb0_, N)
```
Again we check the trajectory:
```python
agent_tau_const_speed_tilde[:5], agent_tau_const_speed_tilde[-5:],
```
(array([101.743119266055, 112.517462570583, 112.496646595328,
112.475830620073, 112.455014644818], dtype=object),
array([91.8263831670752, 91.8055671918202, 91.7847512165651,
91.7639352413101, 91.7431192660550], dtype=object))
Again, the agent reaches the ball - but it moves with very large acceleration in the opposite direction, i.e. <i>away</i> from the ball impact point - to position $\azeroincon \approx 125$. We now show that for starting posiition $\azeroincon$, $\vvarefincon$ would be the optimal reference velocity.
```python
# CONCRETE EXAMPLE
N = 1000
R_tilde = concrete_case(sympy_sqrt_fix(R.subs(yb0, yb0_)), V_, theta_, yb0_)
T_tilde = concrete_case(sympy_sqrt_fix(T.subs(yb0, yb0_)), V_, theta_, yb0_)
a0_tilde = agent_tau_const_speed_tilde[1]
adot0_tilde = sp.simplify( (R_tilde - a0_tilde ) / T_tilde)
rv_const_speed_tilde = concrete_case(rv_const_speed_eq, V_, theta_, yb0_).subs(a0, a0_tilde).subs(t,0)
rv_const_speed_tilde
```
```python
rv_tilde_
```
In conclusion, we can predict the behavior of an agent that compensates an inconsistent $\vvaref$. It will run to the corresponding starting position $\azeroincon$ as fast as possible. It also shows that setting $\vvaref$ to an arbitrary value such as suggested by (Marken, 2001) can lead to undesired behavior.
Our simulation experiments show that this behavior also occurs when imposing agent velocity and acceleration constraints (see below).
### Proof of Lemma 4.1.4 (Using Observed Value of $\dvva$)
Instead of setting $\vvaref$ to an arbitrary value, how much is the agent off the consistent $\vvarefcon$ if it observes $\dvva$ before starting to run and then uses this quantity as an estimate of $\vvarefcon$?
Let us assume the agent can perfectly observe $\vvarefincon = \dvva$ after 1% of the trajectory. This would compute as follows:
```python
D = sp.symbols("D_0")
pc = sp.symbols("\delta")
#pc = 0.01
#pc = 0.025
T_pc = sympy_sqrt_fix(pc*T.subs(yb0,0)) # t=1% of trajectory
# simplify: assume yb0=0
eq_yb0 = sympy_sqrt_fix(eq.subs(yb0,0))
# real rv at impact point
#rv_expr = sp.solve(eq_yb0.subs(a, sympy_sqrt_fix(R.subs(yb0,0))), rv)[0]
# rv observed at a0 after 1% of trajectory
#rv_expr_pc = sp.simplify(sp.solve(eq_yb0.subs(a, a0).subs(t, T_pc), rv)[0])
rv_expr_pc = sp.simplify(sp.solve(eq_yb0.subs(a, zero_yb0(R)+D).subs(t, T_pc), rv)[0])
##print (sp.latex(rv_expr_pc))
rv_expr_pc
```
$ = \vvarefincon$
($D$ denotes the agent's distance to the impact point)
<!--
(Why is there an $a_0$ in the equation? Because rv_tilde of course depends on where the agent is standing when observing it - and we assume that the agent did not move yet, therefore there is a0.)
-->
Let's compute $\tilde{a}_0$, i.e. the position the agent implicitly assumes to be at when using this rv:
```python
# old a0
a0_expr= sp.solve(rv_opt - rv, a0)[0]
# a0_tilde: a0 when using rv_tilde (=rv_expr_pc) instead of rv
a0_tilde_expr = sp.simplify(a0_expr.subs(rv, rv_expr_pc))
##print (sp.latex(a0_tilde_expr))
a0_tilde_expr
```
$= \azeroincon$.
<!---
(Why is there still an $a_0$ in the equation? If rv_tilde depends on a0, also a0_tilde does!)
-->
Now let's look at the difference $\azeroincon - \azero$, i.e. how far the position that the agent implicitly assumes to be at is from the real $\azero$:
```python
a0_diff = sp.simplify(a0_tilde_expr - a0_expr)
a0_diff = sp.simplify(a0_diff.subs(rv, rv_opt))
a0_diff
```
$= \azeroincon - \azero$.
Let's substitute $a_0$ by the known position of the agent, namely $R+D$:
```python
a0_diff_D = sp.simplify(a0_diff.subs(a0, zero_yb0(R) + D))
a0_diff_D
```
We can further simplify that:
```python
a0_diff_D = a0_diff_D.subs(sp.sin(2.0*theta), sp.sin(2*theta)) # some sympy bug?
a0_diff_D
a0_diff_D2 = a0_diff_D
a0_diff_D = pc*D / (1.0-pc)
assert (sp.simplify(a0_diff_D - a0_diff_D2) == 0)
a0_diff_D
```
$= \azeroincon - \azero$.
This means that the offset that the agent will try to compensate by running does not depend on the initial angle or velocity of the ball. It only depends on how far the agent is from the impact point and how long it waits.
#### Figure 4.3
```python
#pc_ = np.linspace(0,1, 50)
pc_ = 0.01
D_ = np.linspace(-50, 50, 5)
matplotlib.rcParams['font.size'] = 24
plt.figure(figsize=(10,6))
plt.plot( D_, [ a0_diff_D.subs(pc, 0.01).subs(D, D__).evalf() for D__ in D_], lw=3.0, label="$\delta=1\%$" )
plt.plot( D_, [ a0_diff_D.subs(pc, 0.05).subs(D, D__).evalf() for D__ in D_], lw=3.0, label="$\delta=5\%$" )
plt.plot( D_, [ a0_diff_D.subs(pc, 0.1).subs(D, D__).evalf() for D__ in D_], lw=3.0, label="$\delta=10\%$" )
plt.xlabel("$D_0$")
plt.ylabel(r"${a_0^{\sim}} - a_0$")
plt.xlim([-50,50])
plt.title("Consistent vs. inconsistent initial agent position")
plt.legend(loc=2, fontsize=24)
plt.tight_layout()
plt.savefig("tilde_a0_error.pdf")
```
Let us look at how this value scales with $\nu$ and $d = R-a_0$ with fixed $\varphi=\frac{\pi}{4}$:
```python
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%pylab tk
#%pylab inline
fig = plt.figure(figsize=(15,10))
ax = fig.add_subplot(111, projection='3d')
X = np.linspace(-50, 50, 20) # distance d
Y = np.linspace(5, 50, 20) # ball velocity V
t_ = pc
#theta_ = np.pi/4
theta_ = 3*np.pi/8
yb0_ = 0.0
pc_ = 0.01
DIFF = np.zeros( (len(X), len(Y)) )
ZERO = np.zeros( (len(X), len(Y)) )
for i,d_ in enumerate(X):
for j,V_ in enumerate(Y):
R_ = concrete_case(R, V_, theta_, yb0_)
T_ = concrete_case(T, V_, theta_, yb0_)
# cc = concrete_case(rv_diff, V_, theta_, yb0_).subs(t, t_)
# cc = concrete_case(a0_diff, V_, theta_, yb0_).subs(t, t_)
cc = concrete_case(a0_diff_D, V_, theta_, yb0_).subs(t, t_)
cc = cc.subs(pc, pc_)
#DIFF[i,j] = float(cc.subs(dist0,d_).subs(t,t_))
# if np.abs(d_) >= R_:
# DIFF[i,j] = 1. #np.nan
if True:
#val = cc.subs(a0,R_+d_).evalf()
val = cc.subs(D,d_).evalf()
#DIFF[i,j] = np.abs(float(val)) if val != sp.zoo else 0.
DIFF[i,j] = float(val) if val != sp.zoo else 0.
# clamp diff
#DIFF[i,j] = DIFF[i,j] if DIFF[i,j] <= 1 else 1
DIFF = DIFF.T
X,Y = np.meshgrid(X,Y)
# clamp diff
p = ax.plot_surface(X,Y, DIFF, rstride=1, cstride=1, cmap=plt.cm.coolwarm, linewidth=0.3, antialiased=False, alpha=0.9)
#cb = fig.colorbar(p, shrink=0.5)
ax.set_xlabel("$D_0$")
ax.set_ylabel(r"$\nu$")
ax.set_zlabel(r"$\tilde{a}_0 - a_0$")
#ax.set_zlim3d(0, 1)
ax.set_zlim3d(-0.5, 0.5)
p = ax.plot_surface(X,Y, ZERO, rstride=1, cstride=1, cmap=plt.cm.coolwarm, antialiased=False,
alpha=0.25, linewidth=0.0)
Z = DIFF
#if np.min(Z) != np.max(Z):
# cset = ax.contour(X, Y, Z, zdir='z', offset=np.min(Z), cmap=cm.coolwarm)
#if np.min(X) != np.max(X):
if True:
#cset = ax.contour(X, Y, Z, zdir='x', offset=np.min(X), cmap=cm.coolwarm)
#cset = ax.contour(X, Y, Z, zdir='x', offset=np.max(X), cmap=cm.coolwarm) # project to opposite side
#if np.min(Y) != np.max(Y):
#cset = ax.contour(X, Y, Z, zdir='y', offset=np.min(Y), cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=np.max(Y), cmap=cm.coolwarm) # project to opposite side
cb = fig.colorbar(p, shrink=0.75, aspect=20)
#ax.view_init(elev=15., azim=45.)
ax.view_init(elev=10., azim=-45.)
#ax.view_init(elev=1., azim=1.)
ax.xaxis._axinfo['label']['space_factor'] = 2.5
ax.yaxis._axinfo['label']['space_factor'] = 2.5
ax.zaxis._axinfo['label']['space_factor'] = 2.0
plt.tight_layout()
#plt.savefig("a0_tilde_diff.pdf" % pi_slug)
#plt.show()
```
<!--
We see that the difference between the observed and the "true" $rv$ moderately increases with the distance to the target and dramatically increases with low initial velocity $V$. We also see that negative distances $d$ affect $rv$ more than positive distances. Again, this is due to the fact that for negative $d$ the agent is closer to the ball's starting position.
To conclude, choosing arbitrary $rv$ at the beginning of the trajectory will usually generate good behavior. However, short ball trajectories with low initial velocity will be hard to catch.
-->
## Understanding the McBeath's Illustration of Chapman's Strategy
This is a brief explanation how to explain the "elevator" picture from (McBeath et al. 1995)
It is easy to see that the length of the segment $s_t$ computes as
$$s_t = (R+D_0) \theta(t), $$
assuming that the agent moves towards the impact point with constant velocity $a_x(t) = -\frac{a_0 - R}{T}$
```python
# Optimal agent motion (constant towards goal)
a_opt = sp.simplify(R + t/T * D)
a_opt
```
```python
ybt = zero_yb0(yb)
theta_aopt = tan_alpha_eq.subs(a, a_opt).simplify()
theta_aopt
#sp.sqrt((D**2)/4 + (R * theta))
```
```python
dist = a_opt - xb
dist
```
```python
dist2 = ((R+D*t/T) - (R*t/T)).simplify()
# sanity check
dist - dist2 == 0
```
True
```python
s = ((R+D)*theta_aopt).simplify()
s
```
```python
s.subs(t, T/2).simplify()
```
```python
```
|
82ed02660602a05cc692669e411e789521201bb0
| 405,819 |
ipynb
|
Jupyter Notebook
|
notebook/proofs-chapman.ipynb
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | 1 |
2017-07-22T11:36:02.000Z
|
2017-07-22T11:36:02.000Z
|
notebook/proofs-chapman.ipynb
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | null | null | null |
notebook/proofs-chapman.ipynb
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | null | null | null | 177.834794 | 115,164 | 0.876854 | true | 10,582 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.672332 | 0.79053 | 0.531499 |
__label__eng_Latn
| 0.734012 | 0.073179 |
## Histograms of Oriented Gradients (HOG)
As we saw with the ORB algorithm, we can use keypoints in images to do keypoint-based matching to detect objects in images. These type of algorithms work great when you want to detect objects that have a lot of consistent internal features that are not affected by the background. For example, these algorithms work well for facial detection because faces have a lot of consistent internal features that don’t get affected by the image background, such as the eyes, nose, and mouth. However, these type of algorithms don’t work so well when attempting to do more general object recognition, say for example, pedestrian detection in images. The reason is that people don’t have consistent internal features, like faces do, because the body shape and style of every person is different (see Fig. 1). This means that every person is going to have a different set of internal features, and so we need something that can more generally describe a person.
<br>
<figure>
<figcaption style = "text-align:left; font-style:italic">Fig. 1. - Pedestrians.</figcaption>
</figure>
<br>
One option is to try to detect pedestrians by their contours instead. Detecting objects in images by their contours (boundaries) is very challenging because we have to deal with the difficulties brought about by the contrast between the background and the foreground. For example, suppose you wanted to detect a pedestrian in an image that is walking in front of a white building and she is wearing a white coat and black pants (see Fig. 2). We can see in Fig. 2, that since the background of the image is mostly white, the black pants are going to have a very high contrast, but the coat, since it is white as well, is going to have very low contrast. In this case, detecting the edges of pants is going to be easy but detecting the edges of the coat is going to be very difficult. This is where **HOG** comes in. HOG stands for **Histograms of Oriented Gradients** and it was first introduced by Navneet Dalal and Bill Triggs in 2005.
<br>
<figure>
<figcaption style = "text-align:left; font-style:italic">Fig. 2. - High and Low Contrast.</figcaption>
</figure>
<br>
The HOG algorithm works by creating histograms of the distribution of gradient orientations in an image and then normalizing them in a very special way. This special normalization is what makes HOG so effective at detecting the edges of objects even in cases where the contrast is very low. These normalized histograms are put together into a feature vector, known as the HOG descriptor, that can be used to train a machine learning algorithm, such as a Support Vector Machine (SVM), to detect objects in images based on their boundaries (edges). Due to its great success and reliability, HOG has become one of the most widely used algorithms in computer vison for object detection.
In this notebook, you will learn:
* How the HOG algorithm works
* How to use OpenCV to create a HOG descriptor
* How to visualize the HOG descriptor.
# The HOG Algorithm
As its name suggests, the HOG algorithm, is based on creating histograms from the orientation of image gradients. The HOG algorithm is implemented in a series of steps:
1. Given the image of particular object, set a detection window (region of interest) that covers the entire object in the image (see Fig. 3).
2. Calculate the magnitude and direction of the gradient for each individual pixel in the detection window.
3. Divide the detection window into connected *cells* of pixels, with all cells being of the same size (see Fig. 3). The size of the cells is a free parameter and it is usually chosen so as to match the scale of the features that want to be detected. For example, in a 64 x 128 pixel detection window, square cells 6 to 8 pixels wide are suitable for detecting human limbs.
4. Create a Histogram for each cell, by first grouping the gradient directions of all pixels in each cell into a particular number of orientation (angular) bins; and then adding up the gradient magnitudes of the gradients in each angular bin (see Fig. 3). The number of bins in the histogram is a free parameter and it is usually set to 9 angular bins.
5. Group adjacent cells into *blocks* (see Fig. 3). The number of cells in each block is a free parameter and all blocks must be of the same size. The distance between each block (known as the stride) is a free parameter but it is usually set to half the block size, in which case you will get overlapping blocks (*see video below*). The HOG algorithm has been shown empirically to work better with overlapping blocks.
6. Use the cells contained within each block to normalize the cell histograms in that block (see Fig. 3). If you have overlapping blocks this means that most cells will be normalized with respect to different blocks (*see video below*). Therefore, the same cell may have several different normalizations.
7. Collect all the normalized histograms from all the blocks into a single feature vector called the HOG descriptor.
8. Use the resulting HOG descriptors from many images of the same type of object to train a machine learning algorithm, such as an SVM, to detect those type of objects in images. For example, you could use the HOG descriptors from many images of pedestrians to train an SVM to detect pedestrians in images. The training is done with both positive a negative examples of the object you want detect in the image.
9. Once the SVM has been trained, a sliding window approach is used to try to detect and locate objects in images. Detecting an object in the image entails finding the part of the image that looks similar to the HOG pattern learned by the SVM.
<br>
<figure>
<figcaption style = "text-align:left; font-style:italic">Fig. 3. - HOG Diagram.</figcaption>
</figure>
<br>
<figure>
<figcaption style = "text-align:left; font-style:italic">Vid. 1. - HOG Animation.</figcaption>
</figure>
# Why The HOG Algorithm Works
As we learned above, HOG creates histograms by adding the magnitude of the gradients in particular orientations in localized portions of the image called *cells*. By doing this we guarantee that stronger gradients will contribute more to the magnitude of their respective angular bin, while the effects of weak and randomly oriented gradients resulting from noise are minimized. In this manner the histograms tell us the dominant gradient orientation of each cell.
### Dealing with contrast
Now, the magnitude of the dominant orientation can vary widely due to variations in local illumination and the contrast between the background and the foreground.
To account for the background-foreground contrast differences, the HOG algorithm tries to detect edges locally. In order to do this, it defines groups of cells, called **blocks**, and normalizes the histograms using this local group of cells. By normalizing locally, the HOG algorithm can detect the edges in each block very reliably; this is called **block normalization**.
In addition to using block normalization, the HOG algorithm also uses overlapping blocks to increase its performance. By using overlapping blocks, each cell contributes several independent components to the final HOG descriptor, where each component corresponds to a cell being normalized with respect to a different block. This may seem redundant but, it has been shown empirically that by normalizing each cell several times with respect to different local blocks, the performance of the HOG algorithm increases dramatically.
### Loading Images and Importing Resources
The first step in building our HOG descriptor is to load the required packages into Python and to load our image.
We start by using OpenCV to load an image of a triangle tile. Since, the `cv2.imread()` function loads images as BGR we will convert our image to RGB so we can display it with the correct colors. As usual we will convert our BGR image to Gray Scale for analysis.
```python
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Set the default figure size
plt.rcParams['figure.figsize'] = [17.0, 7.0]
# Load the image
image = cv2.imread('./images/triangle_tile.jpeg')
# Convert the original image to RGB
original_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert the original image to gray scale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Print the shape of the original and gray scale images
print('The original image has shape: ', original_image.shape)
print('The gray scale image has shape: ', gray_image.shape)
# Display the images
plt.subplot(121)
plt.imshow(original_image)
plt.title('Original Image')
plt.subplot(122)
plt.imshow(gray_image, cmap='gray')
plt.title('Gray Scale Image')
plt.show()
```
The original image has shape: (250, 250, 3)
The gray scale image has shape: (250, 250)
<matplotlib.figure.Figure at 0x7fb4e4548390>
# Creating The HOG Descriptor
We will be using OpenCV’s `HOGDescriptor` class to create the HOG descriptor. The parameters of the HOG descriptor are setup using the `HOGDescriptor()` function. The parameters of the `HOGDescriptor()` function and their default values are given below:
`cv2.HOGDescriptor(win_size = (64, 128),
block_size = (16, 16),
block_stride = (8, 8),
cell_size = (8, 8),
nbins = 9,
win_sigma = DEFAULT_WIN_SIGMA,
threshold_L2hys = 0.2,
gamma_correction = true,
nlevels = DEFAULT_NLEVELS)`
Parameters:
* **win_size** – *Size*
Size of detection window in pixels (*width, height*). Defines the region of interest. Must be an integer multiple of cell size.
* **block_size** – *Size*
Block size in pixels (*width, height*). Defines how many cells are in each block. Must be an integer multiple of cell size and it must be smaller than the detection window. The smaller the block the finer detail you will get.
* **block_stride** – *Size*
Block stride in pixels (*horizontal, vertical*). It must be an integer multiple of cell size. The `block_stride` defines the distance between adjecent blocks, for example, 8 pixels horizontally and 8 pixels vertically. Longer `block_strides` makes the algorithm run faster (because less blocks are evaluated) but the algorithm may not perform as well.
* **cell_size** – *Size*
Cell size in pixels (*width, height*). Determines the size fo your cell. The smaller the cell the finer detail you will get.
* **nbins** – *int*
Number of bins for the histograms. Determines the number of angular bins used to make the histograms. With more bins you capture more gradient directions. HOG uses unsigned gradients, so the angular bins will have values between 0 and 180 degrees.
* **win_sigma** – *double*
Gaussian smoothing window parameter. The performance of the HOG algorithm can be improved by smoothing the pixels near the edges of the blocks by applying a Gaussian spatial window to each pixel before computing the histograms.
* **threshold_L2hys** – *double*
L2-Hys (Lowe-style clipped L2 norm) normalization method shrinkage. The L2-Hys method is used to normalize the blocks and it consists of an L2-norm followed by clipping and a renormalization. The clipping limits the maximum value of the descriptor vector for each block to have the value of the given threshold (0.2 by default). After the clipping the descriptor vector is renormalized as described in *IJCV*, 60(2):91-110, 2004.
* **gamma_correction** – *bool*
Flag to specify whether the gamma correction preprocessing is required or not. Performing gamma correction slightly increases the performance of the HOG algorithm.
* **nlevels** – *int*
Maximum number of detection window increases.
As we can see, the `cv2.HOGDescriptor()`function supports a wide range of parameters. The first few arguments (`block_size, block_stride, cell_size`, and `nbins`) are probably the ones you are most likely to change. The other parameters can be safely left at their default values and you will get good results.
In the code below, we will use the `cv2.HOGDescriptor()`function to set the cell size, block size, block stride, and the number of bins for the histograms of the HOG descriptor. We will then use `.compute(image)`method to compute the HOG descriptor (feature vector) for the given `image`.
```python
# Specify the parameters for our HOG descriptor
# Cell Size in pixels (width, height). Must be smaller than the size of the detection window
# and must be chosen so that the resulting Block Size is smaller than the detection window.
cell_size = (6, 6)
# Number of cells per block in each direction (x, y). Must be chosen so that the resulting
# Block Size is smaller than the detection window
num_cells_per_block = (2, 2)
# Block Size in pixels (width, height). Must be an integer multiple of Cell Size.
# The Block Size must be smaller than the detection window
block_size = (num_cells_per_block[0] * cell_size[0],
num_cells_per_block[1] * cell_size[1])
# Calculate the number of cells that fit in our image in the x and y directions
x_cells = gray_image.shape[1] // cell_size[0]
y_cells = gray_image.shape[0] // cell_size[1]
# Horizontal distance between blocks in units of Cell Size. Must be an integer and it must
# be set such that (x_cells - num_cells_per_block[0]) / h_stride = integer.
h_stride = 1
# Vertical distance between blocks in units of Cell Size. Must be an integer and it must
# be set such that (y_cells - num_cells_per_block[1]) / v_stride = integer.
v_stride = 1
# Block Stride in pixels (horizantal, vertical). Must be an integer multiple of Cell Size
block_stride = (cell_size[0] * h_stride, cell_size[1] * v_stride)
# Number of gradient orientation bins
num_bins = 9
# Specify the size of the detection window (Region of Interest) in pixels (width, height).
# It must be an integer multiple of Cell Size and it must cover the entire image. Because
# the detection window must be an integer multiple of cell size, depending on the size of
# your cells, the resulting detection window might be slightly smaller than the image.
# This is perfectly ok.
win_size = (x_cells * cell_size[0] , y_cells * cell_size[1])
# Print the shape of the gray scale image for reference
print('\nThe gray scale image has shape: ', gray_image.shape)
print()
# Print the parameters of our HOG descriptor
print('HOG Descriptor Parameters:\n')
print('Window Size:', win_size)
print('Cell Size:', cell_size)
print('Block Size:', block_size)
print('Block Stride:', block_stride)
print('Number of Bins:', num_bins)
print()
# Set the parameters of the HOG descriptor using the variables defined above
hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, num_bins)
# Compute the HOG Descriptor for the gray scale image
hog_descriptor = hog.compute(gray_image)
```
The gray scale image has shape: (250, 250)
HOG Descriptor Parameters:
Window Size: (246, 246)
Cell Size: (6, 6)
Block Size: (12, 12)
Block Stride: (6, 6)
Number of Bins: 9
# Number of Elements In The HOG Descriptor
The resulting HOG Descriptor (feature vector), contains the normalized histograms from all cells from all blocks in the detection window concatenated in one long vector. Therefore, the size of the HOG feature vector will be given by the total number of blocks in the detection window, multiplied by the number of cells per block, times the number of orientation bins:
<span class="mathquill">
\begin{equation}
\mbox{total_elements} = (\mbox{total_number_of_blocks})\mbox{ } \times \mbox{ } (\mbox{number_cells_per_block})\mbox{ } \times \mbox{ } (\mbox{number_of_bins})
\end{equation}
</span>
If we don’t have overlapping blocks (*i.e.* the `block_stride`equals the `block_size`), the total number of blocks can be easily calculated by dividing the size of the detection window by the block size. However, in the general case we have to take into account the fact that we have overlapping blocks. To find the total number of blocks in the general case (*i.e.* for any `block_stride` and `block_size`), we can use the formula given below:
<span class="mathquill">
\begin{equation}
\mbox{Total}_i = \left( \frac{\mbox{block_size}_i}{\mbox{block_stride}_i} \right)\left( \frac{\mbox{window_size}_i}{\mbox{block_size}_i} \right) - \left [\left( \frac{\mbox{block_size}_i}{\mbox{block_stride}_i} \right) - 1 \right]; \mbox{ for } i = x,y
\end{equation}
</span>
Where <span class="mathquill">Total$_x$</span>, is the total number of blocks along the width of the detection window, and <span class="mathquill">Total$_y$</span>, is the total number of blocks along the height of the detection window. This formula for <span class="mathquill">Total$_x$</span> and <span class="mathquill">Total$_y$</span>, takes into account the extra blocks that result from overlapping. After calculating <span class="mathquill">Total$_x$</span> and <span class="mathquill">Total$_y$</span>, we can get the total number of blocks in the detection window by multiplying <span class="mathquill">Total$_x$ $\times$ Total$_y$</span>. The above formula can be simplified considerably because the `block_size`, `block_stride`, and `window_size`are all defined in terms of the `cell_size`. By making all the appropriate substitutions and cancelations the above formula reduces to:
<span class="mathquill">
\begin{equation}
\mbox{Total}_i = \left(\frac{\mbox{cells}_i - \mbox{num_cells_per_block}_i}{N_i}\right) + 1\mbox{ }; \mbox{ for } i = x,y
\end{equation}
</span>
Where <span class="mathquill">cells$_x$</span> is the total number of cells along the width of the detection window, and <span class="mathquill">cells$_y$</span>, is the total number of cells along the height of the detection window. And <span class="mathquill">$N_x$</span> is the horizontal block stride in units of `cell_size` and <span class="mathquill">$N_y$</span> is the vertical block stride in units of `cell_size`.
Let's calculate what the number of elements for the HOG feature vector should be and check that it matches the shape of the HOG Descriptor calculated above.
```python
# Calculate the total number of blocks along the width of the detection window
tot_bx = np.uint32(((x_cells - num_cells_per_block[0]) / h_stride) + 1)
# Calculate the total number of blocks along the height of the detection window
tot_by = np.uint32(((y_cells - num_cells_per_block[1]) / v_stride) + 1)
# Calculate the total number of elements in the feature vector
tot_els = (tot_bx) * (tot_by) * num_cells_per_block[0] * num_cells_per_block[1] * num_bins
# Print the total number of elements the HOG feature vector should have
print('\nThe total number of elements in the HOG Feature Vector should be: ',
tot_bx, 'x',
tot_by, 'x',
num_cells_per_block[0], 'x',
num_cells_per_block[1], 'x',
num_bins, '=',
tot_els)
# Print the shape of the HOG Descriptor to see that it matches the above
print('\nThe HOG Descriptor has shape:', hog_descriptor.shape)
print()
```
The total number of elements in the HOG Feature Vector should be: 40 x 40 x 2 x 2 x 9 = 57600
The HOG Descriptor has shape: (57600, 1)
# Visualizing The HOG Descriptor
We can visualize the HOG Descriptor by plotting the histogram associated with each cell as a collection of vectors. To do this, we will plot each bin in the histogram as a single vector whose magnitude is given by the height of the bin and its orientation is given by the angular bin that its associated with. Since any given cell might have multiple histograms associated with it, due to the overlapping blocks, we will choose to average all the histograms for each cell to produce a single histogram for each cell.
OpenCV has no easy way to visualize the HOG Descriptor, so we have to do some manipulation first in order to visualize it. We will start by reshaping the HOG Descriptor in order to make our calculations easier. We will then compute the average histogram of each cell and finally we will convert the histogram bins into vectors. Once we have the vectors, we plot the corresponding vectors for each cell in an image.
The code below produces an interactive plot so that you can interact with the figure. The figure contains:
* the grayscale image,
* the HOG Descriptor (feature vector),
* a zoomed-in portion of the HOG Descriptor, and
* the histogram of the selected cell.
**You can click anywhere on the gray scale image or the HOG Descriptor image to select a particular cell**. Once you click on either image a *magenta* rectangle will appear showing the cell you selected. The Zoom Window will show you a zoomed in version of the HOG descriptor around the selected cell; and the histogram plot will show you the corresponding histogram for the selected cell. The interactive window also has buttons at the bottom that allow for other functionality, such as panning, and giving you the option to save the figure if desired. The home button returns the figure to its default value.
**NOTE**: If you are running this notebook in the Udacity workspace, there is around a 2 second lag in the interactive plot. This means that if you click in the image to zoom in, it will take about 2 seconds for the plot to refresh.
```python
%matplotlib notebook
import copy
import matplotlib.patches as patches
# Set the default figure size
plt.rcParams['figure.figsize'] = [9.8, 9]
# Reshape the feature vector to [blocks_y, blocks_x, num_cells_per_block_x, num_cells_per_block_y, num_bins].
# The blocks_x and blocks_y will be transposed so that the first index (blocks_y) referes to the row number
# and the second index to the column number. This will be useful later when we plot the feature vector, so
# that the feature vector indexing matches the image indexing.
hog_descriptor_reshaped = hog_descriptor.reshape(tot_bx,
tot_by,
num_cells_per_block[0],
num_cells_per_block[1],
num_bins).transpose((1, 0, 2, 3, 4))
# Print the shape of the feature vector for reference
print('The feature vector has shape:', hog_descriptor.shape)
# Print the reshaped feature vector
print('The reshaped feature vector has shape:', hog_descriptor_reshaped.shape)
# Create an array that will hold the average gradients for each cell
ave_grad = np.zeros((y_cells, x_cells, num_bins))
# Print the shape of the ave_grad array for reference
print('The average gradient array has shape: ', ave_grad.shape)
# Create an array that will count the number of histograms per cell
hist_counter = np.zeros((y_cells, x_cells, 1))
# Add up all the histograms for each cell and count the number of histograms per cell
for i in range (num_cells_per_block[0]):
for j in range(num_cells_per_block[1]):
ave_grad[i:tot_by + i,
j:tot_bx + j] += hog_descriptor_reshaped[:, :, i, j, :]
hist_counter[i:tot_by + i,
j:tot_bx + j] += 1
# Calculate the average gradient for each cell
ave_grad /= hist_counter
# Calculate the total number of vectors we have in all the cells.
len_vecs = ave_grad.shape[0] * ave_grad.shape[1] * ave_grad.shape[2]
# Create an array that has num_bins equally spaced between 0 and 180 degress in radians.
deg = np.linspace(0, np.pi, num_bins, endpoint = False)
# Each cell will have a histogram with num_bins. For each cell, plot each bin as a vector (with its magnitude
# equal to the height of the bin in the histogram, and its angle corresponding to the bin in the histogram).
# To do this, create rank 1 arrays that will hold the (x,y)-coordinate of all the vectors in all the cells in the
# image. Also, create the rank 1 arrays that will hold all the (U,V)-components of all the vectors in all the
# cells in the image. Create the arrays that will hold all the vector positons and components.
U = np.zeros((len_vecs))
V = np.zeros((len_vecs))
X = np.zeros((len_vecs))
Y = np.zeros((len_vecs))
# Set the counter to zero
counter = 0
# Use the cosine and sine functions to calculate the vector components (U,V) from their maginitudes. Remember the
# cosine and sine functions take angles in radians. Calculate the vector positions and magnitudes from the
# average gradient array
for i in range(ave_grad.shape[0]):
for j in range(ave_grad.shape[1]):
for k in range(ave_grad.shape[2]):
U[counter] = ave_grad[i,j,k] * np.cos(deg[k])
V[counter] = ave_grad[i,j,k] * np.sin(deg[k])
X[counter] = (cell_size[0] / 2) + (cell_size[0] * i)
Y[counter] = (cell_size[1] / 2) + (cell_size[1] * j)
counter = counter + 1
# Create the bins in degress to plot our histogram.
angle_axis = np.linspace(0, 180, num_bins, endpoint = False)
angle_axis += ((angle_axis[1] - angle_axis[0]) / 2)
# Create a figure with 4 subplots arranged in 2 x 2
fig, ((a,b),(c,d)) = plt.subplots(2,2)
# Set the title of each subplot
a.set(title = 'Gray Scale Image\n(Click to Zoom)')
b.set(title = 'HOG Descriptor\n(Click to Zoom)')
c.set(title = 'Zoom Window', xlim = (0, 18), ylim = (0, 18), autoscale_on = False)
d.set(title = 'Histogram of Gradients')
# Plot the gray scale image
a.imshow(gray_image, cmap = 'gray')
a.set_aspect(aspect = 1)
# Plot the feature vector (HOG Descriptor)
b.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 5)
b.invert_yaxis()
b.set_aspect(aspect = 1)
b.set_facecolor('black')
# Define function for interactive zoom
def onpress(event):
#Unless the left mouse button is pressed do nothing
if event.button != 1:
return
# Only accept clicks for subplots a and b
if event.inaxes in [a, b]:
# Get mouse click coordinates
x, y = event.xdata, event.ydata
# Select the cell closest to the mouse click coordinates
cell_num_x = np.uint32(x / cell_size[0])
cell_num_y = np.uint32(y / cell_size[1])
# Set the edge coordinates of the rectangle patch
edgex = x - (x % cell_size[0])
edgey = y - (y % cell_size[1])
# Create a rectangle patch that matches the the cell selected above
rect = patches.Rectangle((edgex, edgey),
cell_size[0], cell_size[1],
linewidth = 1,
edgecolor = 'magenta',
facecolor='none')
# A single patch can only be used in a single plot. Create copies
# of the patch to use in the other subplots
rect2 = copy.copy(rect)
rect3 = copy.copy(rect)
# Update all subplots
a.clear()
a.set(title = 'Gray Scale Image\n(Click to Zoom)')
a.imshow(gray_image, cmap = 'gray')
a.set_aspect(aspect = 1)
a.add_patch(rect)
b.clear()
b.set(title = 'HOG Descriptor\n(Click to Zoom)')
b.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 5)
b.invert_yaxis()
b.set_aspect(aspect = 1)
b.set_facecolor('black')
b.add_patch(rect2)
c.clear()
c.set(title = 'Zoom Window')
c.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 1)
c.set_xlim(edgex - cell_size[0], edgex + (2 * cell_size[0]))
c.set_ylim(edgey - cell_size[1], edgey + (2 * cell_size[1]))
c.invert_yaxis()
c.set_aspect(aspect = 1)
c.set_facecolor('black')
c.add_patch(rect3)
d.clear()
d.set(title = 'Histogram of Gradients')
d.grid()
d.set_xlim(0, 180)
d.set_xticks(angle_axis)
d.set_xlabel('Angle')
d.bar(angle_axis,
ave_grad[cell_num_y, cell_num_x, :],
180 // num_bins,
align = 'center',
alpha = 0.5,
linewidth = 1.2,
edgecolor = 'k')
fig.canvas.draw()
# Create a connection between the figure and the mouse click
fig.canvas.mpl_connect('button_press_event', onpress)
plt.show()
```
The feature vector has shape: (57600, 1)
The reshaped feature vector has shape: (40, 40, 2, 2, 9)
The average gradient array has shape: (41, 41, 9)
<IPython.core.display.Javascript object>
# Understanding The Histograms
Let's take a look at a couple of snapshots of the above figure to see if the histograms for the selected cell make sense. Let's start looking at a cell that is inside a triangle and not near an edge:
<br>
<figure>
<figcaption style = "text-align:center; font-style:italic">Fig. 4. - Histograms Inside a Triangle.</figcaption>
</figure>
<br>
In this case, since the triangle is nearly all of the same color there shouldn't be any dominant gradient in the selected cell. As we can clearly see in the Zoom Window and the histogram, this is indeed the case. We have many gradients but none of them clearly dominates over the other.
Now let’s take a look at a cell that is near a horizontal edge:
<br>
<figure>
<figcaption style = "text-align:center; font-style:italic">Fig. 5. - Histograms Near a Horizontal Edge.</figcaption>
</figure>
<br>
Remember that edges are areas of an image where the intensity changes abruptly. In these cases, we will have a high intensity gradient in some particular direction. This is exactly what we see in the corresponding histogram and Zoom Window for the selected cell. In the Zoom Window, we can see that the dominant gradient is pointing up, almost at 90 degrees, since that’s the direction in which there is a sharp change in intensity. Therefore, we should expect to see the 90-degree bin in the histogram to dominate strongly over the others. This is in fact what we see.
Now let’s take a look at a cell that is near a vertical edge:
<br>
<figure>
<figcaption style = "text-align:center; font-style:italic">Fig. 6. - Histograms Near a Vertical Edge.</figcaption>
</figure>
<br>
In this case we expect the dominant gradient in the cell to be horizontal, close to 180 degrees, since that’s the direction in which there is a sharp change in intensity. Therefore, we should expect to see the 170-degree bin in the histogram to dominate strongly over the others. This is what we see in the histogram but we also see that there is another dominant gradient in the cell, namely the one in the 10-degree bin. The reason for this, is because the HOG algorithm is using unsigned gradients, which means 0 degrees and 180 degrees are considered the same. Therefore, when the histograms are being created, angles between 160 and 180 degrees, contribute proportionally to both the 10-degree bin and the 170-degree bin. This results in there being two dominant gradients in the cell near the vertical edge instead of just one.
To conclude let’s take a look at a cell that is near a diagonal edge.
<br>
<figure>
<figcaption style = "text-align:center; font-style:italic">Fig. 7. - Histograms Near a Diagonal Edge.</figcaption>
</figure>
<br>
To understand what we are seeing, let’s first remember that gradients have an *x*-component, and a *y*-component, just like vectors. Therefore, the resulting orientation of a gradient is going to be given by the vector sum of its components. For this reason, on vertical edges the gradients are horizontal, because they only have an x-component, as we saw in Figure 4. While on horizontal edges the gradients are vertical, because they only have a y-component, as we saw in Figure 3. Consequently, on diagonal edges, the gradients are also going to be diagonal because both the *x* and *y* components are non-zero. Since the diagonal edges in the image are close to 45 degrees, we should expect to see a dominant gradient orientation in the 50-degree bin. This is in fact what we see in the histogram but, just like in Figure 4., we see there are two dominant gradients instead of just one. The reason for this is that when the histograms are being created, angles that are near the boundaries of bins, contribute proportionally to the adjacent bins. For example, a gradient with an angle of 40 degrees, is right in the middle of the 30-degree and 50-degree bin. Therefore, the magnitude of the gradient is split evenly into the 30-degree and 50-degree bin. This results in there being two dominant gradients in the cell near the diagonal edge instead of just one.
Now that you know how HOG is implemented, in the workspace you will find a notebook named *Examples*. In there, you will be able set your own paramters for the HOG descriptor for various images. Have fun!
|
ee9fedb137f4ad61d044671174af88869c357eaa
| 368,002 |
ipynb
|
Jupyter Notebook
|
1_4_Feature_Vectors/3_1. HOG.ipynb
|
mariabardon/nanodegree_computer_vision
|
03cc7cdc1fba65732f8b2a1f7eaca62f71c24f4c
|
[
"MIT"
] | null | null | null |
1_4_Feature_Vectors/3_1. HOG.ipynb
|
mariabardon/nanodegree_computer_vision
|
03cc7cdc1fba65732f8b2a1f7eaca62f71c24f4c
|
[
"MIT"
] | null | null | null |
1_4_Feature_Vectors/3_1. HOG.ipynb
|
mariabardon/nanodegree_computer_vision
|
03cc7cdc1fba65732f8b2a1f7eaca62f71c24f4c
|
[
"MIT"
] | null | null | null | 250.001359 | 292,399 | 0.885316 | true | 7,815 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.76908 | 0.757794 | 0.582805 |
__label__eng_Latn
| 0.997504 | 0.192381 |
# Exploring Data with Python
A significant part of a data scientist's role is to explore, analyze, and visualize data. There's a wide range of tools and programming languages that they can use to do this, and of the most popular approaches is to use Jupyter notebooks (like this one) and Python.
Python is a flexible programming language that is used in a wide range of scenarios; from web applications to device programming. It's extremely popular in the data science and machine learning community because of the many packages it supports for data analysis and visualization.
In this notebook, we'll explore some of these packages, and apply basic techniques to analyze data. This is not intended to be a comprehensive Python programming exercise; or even a deep dive into data analysis. Rather, it's intended as a crash course in some of the common ways in which data scientists can use Python to work with data.
> **Note**: If you've never used the Jupyter Notebooks environment before, there are a few things you should be aware of:
>
> - Notebooks are made up of *cells*. Some cells (like this one) contain *markdown* text, while others (like the one beneath this one) contain code.
> - The notebook is connected to a Python *kernel* (you can see which one at the top right of the page - if you're running this notebook in an Azure Machine Learning compute instance it should be connected to the **Python 3.6 - AzureML** kernel). If you stop the kernel or disconnect from the server (for example, by closing and reopening the notebook, or ending and resuming your session), the output from cells that have been run will still be displayed; but any variables or functions defined in those cells will have been lost - you must rerun the cells before running any subsequent cells that depend on them.
> - You can run each code cell by using the **► Run** button. The **◯** symbol next to the kernel name at the top right will briefly turn to **⚫** while the cell runs before turning back to **◯**.
> - The output from each code cell will be displayed immediately below the cell.
> - Even though the code cells can be run individually, some variables used in the code are global to the notebook. That means that you should run all of the code cells <u>**in order**</u>. There may be dependencies between code cells, so if you skip a cell, subsequent cells might not run correctly.
## Exploring data arrays with NumPy
Let's start by looking at some simple data.
Suppose a college takes a sample of student grades for a data science class.
Run the code in the cell below by clicking the **► Run** button to see the data.
```python
data = [50,50,47,97,49,3,53,42,26,74,82,62,37,15,70,27,36,35,48,52,63,64]
print(data)
```
[50, 50, 47, 97, 49, 3, 53, 42, 26, 74, 82, 62, 37, 15, 70, 27, 36, 35, 48, 52, 63, 64]
The data has been loaded into a Python **list** structure, which is a good data type for general data manipulation, but not optimized for numeric analysis. For that, we're going to use the **NumPy** package, which includes specific data types and functions for working with *Num*bers in *Py*thon.
Run the cell below to load the data into a NumPy **array**.
```python
import numpy as np
grades = np.array(data)
print(grades)
```
[50 50 47 97 49 3 53 42 26 74 82 62 37 15 70 27 36 35 48 52 63 64]
Just in case you're wondering about the differences between a **list** and a NumPy **array**, let's compare how these data types behave when we use them in an expression that multiplies them by 2.
```python
print (type(data),'x 2:', data * 2)
print('---')
print (type(grades),'x 2:', grades * 2)
```
<class 'list'> x 2: [50, 50, 47, 97, 49, 3, 53, 42, 26, 74, 82, 62, 37, 15, 70, 27, 36, 35, 48, 52, 63, 64, 50, 50, 47, 97, 49, 3, 53, 42, 26, 74, 82, 62, 37, 15, 70, 27, 36, 35, 48, 52, 63, 64]
---
<class 'numpy.ndarray'> x 2: [100 100 94 194 98 6 106 84 52 148 164 124 74 30 140 54 72 70
96 104 126 128]
Note that multiplying a list by 2 creates a new list of twice the length with the original sequence of list elements repeated. Multiplying a NumPy array on the other hand performs an element-wise calculation in which the array behaves like a *vector*, so we end up with an array of the same size in which each element has been multiplied by 2.
The key takeaway from this is that NumPy arrays are specifically designed to support mathematical operations on numeric data - which makes them more useful for data analysis than a generic list.
You might have spotted that the class type for the numpy array above is a **numpy.ndarray**. The **nd** indicates that this is a structure that can consists of multiple *dimensions* (it can have *n* dimensions). Our specific instance has a single dimension of student grades.
Run the cell below to view the **shape** of the array.
```python
grades.shape
```
(22,)
The shape confirms that this array has only one dimension, which contains 22 elements (there are 22 grades in the original list). You can access the individual elements in the array by their zero-based ordinal position. Let's get the first element (the one in position 0).
```python
grades[0]
```
50
Alright, now you know your way around a NumPy array, it's time to perform some analysis of the grades data.
You can apply aggregations across the elements in the array, so let's find the simple average grade (in other words, the *mean* grade value).
```python
grades.mean()
```
49.18181818181818
So the mean grade is just around 50 - more or less in the middle of the possible range from 0 to 100.
Let's add a second set of data for the same students, this time recording the typical number of hours per week they devoted to studying.
```python
# Define an array of study hours
study_hours = [10.0,11.5,9.0,16.0,9.25,1.0,11.5,9.0,8.5,14.5,15.5,
13.75,9.0,8.0,15.5,8.0,9.0,6.0,10.0,12.0,12.5,12.0]
# Create a 2D array (an array of arrays)
student_data = np.array([study_hours, grades])
# display the array
student_data
```
array([[10. , 11.5 , 9. , 16. , 9.25, 1. , 11.5 , 9. , 8.5 ,
14.5 , 15.5 , 13.75, 9. , 8. , 15.5 , 8. , 9. , 6. ,
10. , 12. , 12.5 , 12. ],
[50. , 50. , 47. , 97. , 49. , 3. , 53. , 42. , 26. ,
74. , 82. , 62. , 37. , 15. , 70. , 27. , 36. , 35. ,
48. , 52. , 63. , 64. ]])
Now the data consists of a 2-dimensional array - an array of arrays. Let's look at its shape.
```python
# Show shape of 2D array
student_data.shape
```
(2, 22)
The **student_data** array contains two elements, each of which is an array containing 22 elements.
To navigate this structure, you need to specify the position of each element in the hierarchy. So to find the first value in the first array (which contains the study hours data), you can use the following code.
```python
# Show the first element of the first element
student_data[0][0]
```
10.0
Now you have a multidimensional array containing both the student's study time and grade information, which you can use to compare data. For example, how does the mean study time compare to the mean grade?
```python
# Get the mean value of each sub-array
avg_study = student_data[0].mean()
avg_grade = student_data[1].mean()
print('Average study hours: {:.2f}\nAverage grade: {:.2f}'.format(avg_study, avg_grade))
```
Average study hours: 10.52
Average grade: 49.18
## Exploring tabular data with Pandas
While NumPy provides a lot of the functionality you need to work with numbers, and specifically arrays of numeric values; when you start to deal with two-dimensional tables of data, the **Pandas** package offers a more convenient structure to work with - the **DataFrame**.
Run the following cell to import the Pandas library and create a DataFrame with three columns. The first column is a list of student names, and the second and third columns are the NumPy arrays containing the study time and grade data.
```python
import pandas as pd
df_students = pd.DataFrame({'Name': ['Dan', 'Joann', 'Pedro', 'Rosie', 'Ethan', 'Vicky', 'Frederic', 'Jimmie',
'Rhonda', 'Giovanni', 'Francesca', 'Rajab', 'Naiyana', 'Kian', 'Jenny',
'Jakeem','Helena','Ismat','Anila','Skye','Daniel','Aisha'],
'StudyHours':student_data[0],
'Grade':student_data[1]})
df_students
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
</tr>
<tr>
<th>5</th>
<td>Vicky</td>
<td>1.00</td>
<td>3.0</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.50</td>
<td>53.0</td>
</tr>
<tr>
<th>7</th>
<td>Jimmie</td>
<td>9.00</td>
<td>42.0</td>
</tr>
<tr>
<th>8</th>
<td>Rhonda</td>
<td>8.50</td>
<td>26.0</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.50</td>
<td>74.0</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.50</td>
<td>82.0</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.75</td>
<td>62.0</td>
</tr>
<tr>
<th>12</th>
<td>Naiyana</td>
<td>9.00</td>
<td>37.0</td>
</tr>
<tr>
<th>13</th>
<td>Kian</td>
<td>8.00</td>
<td>15.0</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.50</td>
<td>70.0</td>
</tr>
<tr>
<th>15</th>
<td>Jakeem</td>
<td>8.00</td>
<td>27.0</td>
</tr>
<tr>
<th>16</th>
<td>Helena</td>
<td>9.00</td>
<td>36.0</td>
</tr>
<tr>
<th>17</th>
<td>Ismat</td>
<td>6.00</td>
<td>35.0</td>
</tr>
<tr>
<th>18</th>
<td>Anila</td>
<td>10.00</td>
<td>48.0</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.00</td>
<td>52.0</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.50</td>
<td>63.0</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.00</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
Note that in addition to the columns you specified, the DataFrame includes an *index* to unique identify each row. We could have specified the index explicitly, and assigned any kind of appropriate value (for example, an email address); but because we didn't specify an index, one has been created with a unique integer value for each row.
### Finding and filtering data in a DataFrame
You can use the DataFrame's **loc** method to retrieve data for a specific index value, like this.
```python
# Get the data for index value 5
df_students.loc[5]
```
Name Vicky
StudyHours 1.0
Grade 3.0
Name: 5, dtype: object
You can also get the data at a range of index values, like this:
```python
# Get the rows with index values from 0 to 5
df_students.loc[0:5]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
</tr>
<tr>
<th>5</th>
<td>Vicky</td>
<td>1.00</td>
<td>3.0</td>
</tr>
</tbody>
</table>
</div>
In addition to being able to use the **loc** method to find rows based on the index, you can use the **iloc** method to find rows based on their ordinal position in the DataFrame (regardless of the index):
```python
# Get data in the first five rows
df_students.iloc[0:5]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
</tr>
</tbody>
</table>
</div>
Look carefully at the `iloc[0:5]` results, and compare them to the `loc[0:5]` results you obtained previously. Can you spot the difference?
The **loc** method returned rows with index *label* in the list of values from *0* to *5* - which includes *0*, *1*, *2*, *3*, *4*, and *5* (six rows). However, the **iloc** method returns the rows in the *positions* included in the range 0 to 5, and since integer ranges don't include the upper-bound value, this includes positions *0*, *1*, *2*, *3*, and *4* (five rows).
**iloc** identifies data values in a DataFrame by *position*, which extends beyond rows to columns. So for example, you can use it to find the values for the columns in positions 1 and 2 in row 0, like this:
```python
df_students.iloc[0,[1,2]]
```
StudyHours 10.0
Grade 50.0
Name: 0, dtype: object
Let's return to the **loc** method, and see how it works with columns. Remember that **loc** is used to locate data items based on index values rather than positions. In the absence of an explicit index column, the rows in our dataframe are indexed as integer values, but the columns are identified by name:
```python
df_students.loc[0,'Grade']
```
50.0
Here's another useful trick. You can use the **loc** method to find indexed rows based on a filtering expression that references named columns other than the index, like this:
```python
df_students.loc[df_students['Name']=='Aisha']
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.0</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
Actually, you don't need to explicitly use the **loc** method to do this - you can simply apply a DataFrame filtering expression, like this:
```python
df_students[df_students['Name']=='Aisha']
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.0</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
And for good measure, you can achieve the same results by using the DataFrame's **query** method, like this:
```python
df_students.query('Name=="Aisha"')
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.0</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
The three previous examples underline an occassionally confusing truth about working with Pandas. Often, there are multiple ways to achieve the same results. Another example of this is the way you refer to a DataFrame column name. You can specify the column name as a named index value (as in the `df_students['Name']` examples we've seen so far), or you can use the column as a property of the DataFrame, like this:
```python
df_students[df_students.Name == 'Aisha']
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.0</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
### Loading a DataFrame from a file
We constructed the DataFrame from some existing arrays. However, in many real-world scenarios, data is loaded from sources such as files. Let's replace the student grades DataFrame with the contents of a text file.
```python
df_students = pd.read_csv('data/grades.csv',delimiter=',',header='infer')
df_students.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
</tr>
</tbody>
</table>
</div>
The DataFrame's **read_csv** method is used to load data from text files. As you can see in the example code, you can specify options such as the column delimiter and which row (if any) contains column headers (in this case, the delimiter is a comma and the first row contains the column names - these are the default settings, so the parameters could have been omitted).
### Handling missing values
One of the most common issues data scientists need to deal with is incomplete or missing data. So how would we know that the DataFrame contains missing values? You can use the **isnull** method to identify which individual values are null, like this:
```python
df_students.isnull()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>1</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>2</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>3</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>4</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>5</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>6</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>7</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>8</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>9</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>10</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>11</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>12</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>13</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>14</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>15</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>16</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>17</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>18</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>19</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>20</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>21</th>
<td>False</td>
<td>False</td>
<td>False</td>
</tr>
<tr>
<th>22</th>
<td>False</td>
<td>False</td>
<td>True</td>
</tr>
<tr>
<th>23</th>
<td>False</td>
<td>True</td>
<td>True</td>
</tr>
</tbody>
</table>
</div>
Of course, with a larger DataFrame, it would be inefficient to review all of the rows and columns individually; so we can get the sum of missing values for each column, like this:
```python
df_students.isnull().sum()
```
Name 0
StudyHours 1
Grade 2
dtype: int64
So now we know that there's one missing **StudyHours** value, and two missing **Grade** values.
To see them in context, we can filter the dataframe to include only rows where any of the columns (axis 1 of the DataFrame) are null.
```python
df_students[df_students.isnull().any(axis=1)]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>22</th>
<td>Bill</td>
<td>8.0</td>
<td>NaN</td>
</tr>
<tr>
<th>23</th>
<td>Ted</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
When the DataFrame is retrieved, the missing numeric values show up as **NaN** (*not a number*).
So now that we've found the null values, what can we do about them?
One common approach is to *impute* replacement values. For example, if the number of study hours is missing, we could just assume that the student studied for an average amount of time and replace the missing value with the mean study hours. To do this, we can use the **fillna** method, like this:
```python
df_students.StudyHours = df_students.StudyHours.fillna(df_students.StudyHours.mean())
df_students
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.000000</td>
<td>50.0</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.500000</td>
<td>50.0</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.000000</td>
<td>47.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.000000</td>
<td>97.0</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.250000</td>
<td>49.0</td>
</tr>
<tr>
<th>5</th>
<td>Vicky</td>
<td>1.000000</td>
<td>3.0</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.500000</td>
<td>53.0</td>
</tr>
<tr>
<th>7</th>
<td>Jimmie</td>
<td>9.000000</td>
<td>42.0</td>
</tr>
<tr>
<th>8</th>
<td>Rhonda</td>
<td>8.500000</td>
<td>26.0</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.500000</td>
<td>74.0</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.500000</td>
<td>82.0</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.750000</td>
<td>62.0</td>
</tr>
<tr>
<th>12</th>
<td>Naiyana</td>
<td>9.000000</td>
<td>37.0</td>
</tr>
<tr>
<th>13</th>
<td>Kian</td>
<td>8.000000</td>
<td>15.0</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.500000</td>
<td>70.0</td>
</tr>
<tr>
<th>15</th>
<td>Jakeem</td>
<td>8.000000</td>
<td>27.0</td>
</tr>
<tr>
<th>16</th>
<td>Helena</td>
<td>9.000000</td>
<td>36.0</td>
</tr>
<tr>
<th>17</th>
<td>Ismat</td>
<td>6.000000</td>
<td>35.0</td>
</tr>
<tr>
<th>18</th>
<td>Anila</td>
<td>10.000000</td>
<td>48.0</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.000000</td>
<td>52.0</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.500000</td>
<td>63.0</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.000000</td>
<td>64.0</td>
</tr>
<tr>
<th>22</th>
<td>Bill</td>
<td>8.000000</td>
<td>NaN</td>
</tr>
<tr>
<th>23</th>
<td>Ted</td>
<td>10.413043</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
Alternatively, it might be important to ensure that you only use data you know to be absolutely correct; so you can drop rows or columns that contains null values by using the **dropna** method. In this case, we'll remove rows (axis 0 of the DataFrame) where any of the columns contain null values.
```python
df_students = df_students.dropna(axis=0, how='any')
df_students
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
</tr>
<tr>
<th>5</th>
<td>Vicky</td>
<td>1.00</td>
<td>3.0</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.50</td>
<td>53.0</td>
</tr>
<tr>
<th>7</th>
<td>Jimmie</td>
<td>9.00</td>
<td>42.0</td>
</tr>
<tr>
<th>8</th>
<td>Rhonda</td>
<td>8.50</td>
<td>26.0</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.50</td>
<td>74.0</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.50</td>
<td>82.0</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.75</td>
<td>62.0</td>
</tr>
<tr>
<th>12</th>
<td>Naiyana</td>
<td>9.00</td>
<td>37.0</td>
</tr>
<tr>
<th>13</th>
<td>Kian</td>
<td>8.00</td>
<td>15.0</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.50</td>
<td>70.0</td>
</tr>
<tr>
<th>15</th>
<td>Jakeem</td>
<td>8.00</td>
<td>27.0</td>
</tr>
<tr>
<th>16</th>
<td>Helena</td>
<td>9.00</td>
<td>36.0</td>
</tr>
<tr>
<th>17</th>
<td>Ismat</td>
<td>6.00</td>
<td>35.0</td>
</tr>
<tr>
<th>18</th>
<td>Anila</td>
<td>10.00</td>
<td>48.0</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.00</td>
<td>52.0</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.50</td>
<td>63.0</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.00</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
### Explore data in the DataFrame
Now that we've cleaned up the missing values, we're ready to explore the data in the DataFrame. Let's start by comparing the mean study hours and grades.
```python
# Get the mean study hours using the column name as an index
mean_study = df_students['StudyHours'].mean()
# Get the mean grade using the column name as a property (just to make the point!)
mean_grade = df_students.Grade.mean()
# Print the mean study hours and mean grade
print('Average weekly study hours: {:.2f}\nAverage grade: {:.2f}'.format(mean_study, mean_grade))
```
Average weekly study hours: 10.52
Average grade: 49.18
OK, let's filter the DataFrame to find only the students who studied for more than the average amount of time.
```python
# Get students who studied for the mean or more hours
df_students[df_students.StudyHours > mean_study]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.50</td>
<td>53.0</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.50</td>
<td>74.0</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.50</td>
<td>82.0</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.75</td>
<td>62.0</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.50</td>
<td>70.0</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.00</td>
<td>52.0</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.50</td>
<td>63.0</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.00</td>
<td>64.0</td>
</tr>
</tbody>
</table>
</div>
Note that the filtered result is itself a DataFrame, so you can work with its columns just like any other DataFrame.
For example, let's find the average grade for students who undertook more than the average amount of study time.
```python
# What was their mean grade?
df_students[df_students.StudyHours > mean_study].Grade.mean()
```
66.7
Let's assume that the passing grade for the course is 60.
We can use that information to add a new column to the DataFrame, indicating whether or not each student passed.
First, we'll create a Pandas **Series** containing the pass/fail indicator (True or False), and then we'll concatenate that series as a new column (axis 1) in the DataFrame.
```python
passes = pd.Series(df_students['Grade'] >= 60)
df_students = pd.concat([df_students, passes.rename("Pass")], axis=1)
df_students
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
<th>Pass</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
<td>False</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
<td>False</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
<td>False</td>
</tr>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
<td>True</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
<td>False</td>
</tr>
<tr>
<th>5</th>
<td>Vicky</td>
<td>1.00</td>
<td>3.0</td>
<td>False</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.50</td>
<td>53.0</td>
<td>False</td>
</tr>
<tr>
<th>7</th>
<td>Jimmie</td>
<td>9.00</td>
<td>42.0</td>
<td>False</td>
</tr>
<tr>
<th>8</th>
<td>Rhonda</td>
<td>8.50</td>
<td>26.0</td>
<td>False</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.50</td>
<td>74.0</td>
<td>True</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.50</td>
<td>82.0</td>
<td>True</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.75</td>
<td>62.0</td>
<td>True</td>
</tr>
<tr>
<th>12</th>
<td>Naiyana</td>
<td>9.00</td>
<td>37.0</td>
<td>False</td>
</tr>
<tr>
<th>13</th>
<td>Kian</td>
<td>8.00</td>
<td>15.0</td>
<td>False</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.50</td>
<td>70.0</td>
<td>True</td>
</tr>
<tr>
<th>15</th>
<td>Jakeem</td>
<td>8.00</td>
<td>27.0</td>
<td>False</td>
</tr>
<tr>
<th>16</th>
<td>Helena</td>
<td>9.00</td>
<td>36.0</td>
<td>False</td>
</tr>
<tr>
<th>17</th>
<td>Ismat</td>
<td>6.00</td>
<td>35.0</td>
<td>False</td>
</tr>
<tr>
<th>18</th>
<td>Anila</td>
<td>10.00</td>
<td>48.0</td>
<td>False</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.00</td>
<td>52.0</td>
<td>False</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.50</td>
<td>63.0</td>
<td>True</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.00</td>
<td>64.0</td>
<td>True</td>
</tr>
</tbody>
</table>
</div>
DataFrames are designed for tabular data, and you can use them to perform many of the kinds of data analytics operation you can do in a relational database; such as grouping and aggregating tables of data.
For example, you can use the **groupby** method to group the student data into groups based on the **Pass** column you added previously, and count the number of names in each group - in other words, you can determine how many students passed and failed.
```python
print(df_students.groupby(df_students.Pass).Name.count())
```
Pass
False 15
True 7
Name: Name, dtype: int64
You can aggregate multiple fields in a group using any available aggregation function. For example, you can find the mean study time and grade for the groups of students who passed and failed the course.
```python
print(df_students.groupby(df_students.Pass)['StudyHours', 'Grade'].mean())
```
StudyHours Grade
Pass
False 8.783333 38.000000
True 14.250000 73.142857
DataFrames are amazingly versatile, and make it easy to manipulate data. Many DataFrame operations return a new copy of the DataFrame; so if you want to modify a DataFrame but keep the existing variable, you need to assign the result of the operation to the existing variable. For example, the following code sorts the student data into descending order of Grade, and assigns the resulting sorted DataFrame to the original **df_students** variable.
```python
# Create a DataFrame with the data sorted by Grade (descending)
df_students = df_students.sort_values('Grade', ascending=False)
# Show the DataFrame
df_students
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
<th>Pass</th>
</tr>
</thead>
<tbody>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
<td>True</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.50</td>
<td>82.0</td>
<td>True</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.50</td>
<td>74.0</td>
<td>True</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.50</td>
<td>70.0</td>
<td>True</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.00</td>
<td>64.0</td>
<td>True</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.50</td>
<td>63.0</td>
<td>True</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.75</td>
<td>62.0</td>
<td>True</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.50</td>
<td>53.0</td>
<td>False</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.00</td>
<td>52.0</td>
<td>False</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
<td>False</td>
</tr>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
<td>False</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
<td>False</td>
</tr>
<tr>
<th>18</th>
<td>Anila</td>
<td>10.00</td>
<td>48.0</td>
<td>False</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
<td>False</td>
</tr>
<tr>
<th>7</th>
<td>Jimmie</td>
<td>9.00</td>
<td>42.0</td>
<td>False</td>
</tr>
<tr>
<th>12</th>
<td>Naiyana</td>
<td>9.00</td>
<td>37.0</td>
<td>False</td>
</tr>
<tr>
<th>16</th>
<td>Helena</td>
<td>9.00</td>
<td>36.0</td>
<td>False</td>
</tr>
<tr>
<th>17</th>
<td>Ismat</td>
<td>6.00</td>
<td>35.0</td>
<td>False</td>
</tr>
<tr>
<th>15</th>
<td>Jakeem</td>
<td>8.00</td>
<td>27.0</td>
<td>False</td>
</tr>
<tr>
<th>8</th>
<td>Rhonda</td>
<td>8.50</td>
<td>26.0</td>
<td>False</td>
</tr>
<tr>
<th>13</th>
<td>Kian</td>
<td>8.00</td>
<td>15.0</td>
<td>False</td>
</tr>
<tr>
<th>5</th>
<td>Vicky</td>
<td>1.00</td>
<td>3.0</td>
<td>False</td>
</tr>
</tbody>
</table>
</div>
## Visualizing data with Matplotlib
DataFrames provide a great way to explore and analyze tabular data, but sometimes a picture is worth a thousand rows and columns. The **Matplotlib** library provides the foundation for plotting data visualizations that can greatly enhance your ability to analyze the data.
Let's start with a simple bar chart that shows the grade of each student.
```python
# Ensure plots are displayed inline in the notebook
%matplotlib inline
from matplotlib import pyplot as plt
# Create a bar plot of name vs grade
plt.bar(x=df_students.Name, height=df_students.Grade)
# Display the plot
plt.show()
```
Well, that worked; but the chart could use some improvements to make it clearer what we're looking at.
Note that you used the **pyplot** class from Matplotlib to plot the chart. This class provides a whole bunch of ways to improve the visual elements of the plot. For example, the following code:
- Specifies the color of the bar chart.
- Adds a title to the chart (so we know what it represents)
- Adds labels to the X and Y (so we know which axis shows which data)
- Adds a grid (to make it easier to determine the values for the bars)
- Rotates the X markers (so we can read them)
```python
# Create a bar plot of name vs grade
plt.bar(x=df_students.Name, height=df_students.Grade, color='orange')
# Customize the chart
plt.title('Student Grades')
plt.xlabel('Student')
plt.ylabel('Grade')
plt.grid(color='#95a5a6', linestyle='--', linewidth=2, axis='y', alpha=0.7)
plt.xticks(rotation=90)
# Display the plot
plt.show()
```
A plot is technically contained with a **Figure**. In the previous examples, the figure was created implicitly for you; but you can create it explicitly. For example, the following code creates a figure with a specific size.
```python
# Create a Figure
fig = plt.figure(figsize=(8,3))
# Create a bar plot of name vs grade
plt.bar(x=df_students.Name, height=df_students.Grade, color='orange')
# Customize the chart
plt.title('Student Grades')
plt.xlabel('Student')
plt.ylabel('Grade')
plt.grid(color='#95a5a6', linestyle='--', linewidth=2, axis='y', alpha=0.7)
plt.xticks(rotation=90)
# Show the figure
plt.show()
```
A figure can contain multiple subplots, each on its own *axis*.
For example, the following code creates a figure with two subplots - one is a bar chart showing student grades, and the other is a pie chart comparing the number of passing grades to non-passing grades.
```python
# Create a figure for 2 subplots (1 row, 2 columns)
fig, ax = plt.subplots(1, 2, figsize = (10,4))
# Create a bar plot of name vs grade on the first axis
ax[0].bar(x=df_students.Name, height=df_students.Grade, color='orange')
ax[0].set_title('Grades')
ax[0].set_xticklabels(df_students.Name, rotation=90)
# Create a pie chart of pass counts on the second axis
pass_counts = df_students['Pass'].value_counts()
ax[1].pie(pass_counts, labels=pass_counts)
ax[1].set_title('Passing Grades')
ax[1].legend(pass_counts.keys().tolist())
# Add a title to the Figure
fig.suptitle('Student Data')
# Show the figure
fig.show()
```
Until now, you've used methods of the Matplotlib.pyplot object to plot charts. However, Matplotlib is so foundational to graphics in Python that many packages, including Pandas, provide methods that abstract the underlying Matplotlib functions and simplify plotting. For example, the DataFrame provides its own methods for plotting data, as shown in the following example to plot a bar chart of study hours.
```python
df_students.plot.bar(x='Name', y='StudyHours', color='Blue', figsize=(6,4))
```
## Getting started with statistical analysis
Now that you know how to use Python to manipulate and visualize data, you can start analyzing it.
A lot of data science is rooted in *statistics*, so we'll explore some basic statistical techniques.
> **Note**: This is not intended to teach you statistics - that's much too big a topic for this notebook. It will however introduce you to some statistical concepts and techniques that data scientists use as they explore data in preparation for machine learning modeling.
### Descriptive statistics and data distribution
When examining a *variable* (for example a sample of student grades), data scientists are particularly interested in its *distribution* (in other words, how are all the different grade values spread across the sample). The starting point for this exploration is often to visualize the data as a histogram, and see how frequently each value for the variable occurs.
```python
# Get the variable to examine
var_data = df_students['Grade']
# Create a Figure
fig = plt.figure(figsize=(10,4))
# Plot a histogram
plt.hist(var_data)
# Add titles and labels
plt.title('Data Distribution')
plt.xlabel('Value')
plt.ylabel('Frequency')
# Show the figure
fig.show()
```
The histogram for grades is a symmetric shape, where the most frequently occurring grades tend to be in the middle of the range (around 50), with fewer grades at the extreme ends of the scale.
#### Measures of central tendency
To understand the distribution better, we can examine so-called *measures of central tendency*; which is a fancy way of describing statistics that represent the "middle" of the data. The goal of this is to try to find a "typical" value. Common ways to define the middle of the data include:
- The *mean*: A simple average based on adding together all of the values in the sample set, and then dividing the total by the number of samples.
- The *median*: The value in the middle of the range of all of the sample values.
- The *mode*: The most commonly occuring value in the sample set<sup>\*</sup>.
Let's calculate these values, along with the minimum and maximum values for comparison, and show them on the histogram.
> <sup>\*</sup>Of course, in some sample sets , there may be a tie for the most common value - in which case the dataset is described as *bimodal* or even *multimodal*.
```python
# Get the variable to examine
var = df_students['Grade']
# Get statistics
min_val = var.min()
max_val = var.max()
mean_val = var.mean()
med_val = var.median()
mod_val = var.mode()[0]
print('Minimum:{:.2f}\nMean:{:.2f}\nMedian:{:.2f}\nMode:{:.2f}\nMaximum:{:.2f}\n'.format(min_val,
mean_val,
med_val,
mod_val,
max_val))
# Create a Figure
fig = plt.figure(figsize=(10,4))
# Plot a histogram
plt.hist(var)
# Add lines for the statistics
plt.axvline(x=min_val, color = 'gray', linestyle='dashed', linewidth = 2)
plt.axvline(x=mean_val, color = 'cyan', linestyle='dashed', linewidth = 2)
plt.axvline(x=med_val, color = 'red', linestyle='dashed', linewidth = 2)
plt.axvline(x=mod_val, color = 'yellow', linestyle='dashed', linewidth = 2)
plt.axvline(x=max_val, color = 'gray', linestyle='dashed', linewidth = 2)
# Add titles and labels
plt.title('Data Distribution')
plt.xlabel('Value')
plt.ylabel('Frequency')
# Show the figure
fig.show()
```
For the grade data, the mean, median, and mode all seem to be more or less in the middle of the minimum and maximum, at around 50.
Another way to visualize the distribution of a variable is to use a *box* plot (sometimes called a *box-and-whiskers* plot). Let's create one for the grade data.
```python
# Get the variable to examine
var = df_students['Grade']
# Create a Figure
fig = plt.figure(figsize=(10,4))
# Plot a histogram
plt.boxplot(var)
# Add titles and labels
plt.title('Data Distribution')
# Show the figure
fig.show()
```
The box plot shows the distribution of the grade values in a different format to the histogram. The *box* part of the plot shows where the inner two *quartiles* of the data reside - so in this case, half of the grades are between approximately 36 and 63. The *whiskers* extending from the box show the outer two quartiles; so the other half of the grades in this case are between 0 and 36 or 63 and 100. The line in the box indicates the *median* value.
It's often useful to combine histograms and box plots, with the box plot's orientation changed to align it with the histogram (in some ways, it can be helpful to think of the histogram as a "front elevation" view of the distribution, and the box plot as a "plan" view of the distribution from above.)
```python
# Create a function that we can re-use
def show_distribution(var_data):
from matplotlib import pyplot as plt
# Get statistics
min_val = var_data.min()
max_val = var_data.max()
mean_val = var_data.mean()
med_val = var_data.median()
mod_val = var_data.mode()[0]
print('Minimum:{:.2f}\nMean:{:.2f}\nMedian:{:.2f}\nMode:{:.2f}\nMaximum:{:.2f}\n'.format(min_val,
mean_val,
med_val,
mod_val,
max_val))
# Create a figure for 2 subplots (2 rows, 1 column)
fig, ax = plt.subplots(2, 1, figsize = (10,4))
# Plot the histogram
ax[0].hist(var_data)
ax[0].set_ylabel('Frequency')
# Add lines for the mean, median, and mode
ax[0].axvline(x=min_val, color = 'gray', linestyle='dashed', linewidth = 2)
ax[0].axvline(x=mean_val, color = 'cyan', linestyle='dashed', linewidth = 2)
ax[0].axvline(x=med_val, color = 'red', linestyle='dashed', linewidth = 2)
ax[0].axvline(x=mod_val, color = 'yellow', linestyle='dashed', linewidth = 2)
ax[0].axvline(x=max_val, color = 'gray', linestyle='dashed', linewidth = 2)
# Plot the boxplot
ax[1].boxplot(var_data, vert=False)
ax[1].set_xlabel('Value')
# Add a title to the Figure
fig.suptitle('Data Distribution')
# Show the figure
fig.show()
# Get the variable to examine
col = df_students['Grade']
# Call the function
show_distribution(col)
```
All of the measurements of central tendency are right in the middle of the data distribution, which is symmetric with values becoming progressively lower in both directions from the middle.
To explore this distribution in more detail, you need to understand that statistics is fundamentally about taking *samples* of data and using probability functions to extrapolate information about the full *population* of data. For example, the student data consists of 22 samples, and for each sample there is a grade value. You can think of each sample grade as a variable that's been randomly selected from the set of all grades awarded for this course. With enough of these random variables, you can calculate something called a *probability density function*, which estimates the distribution of grades for the full population.
The Pandas DataFrame class provides a helpful plot function to show this density.
```python
def show_density(var_data):
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10,4))
# Plot density
var_data.plot.density()
# Add titles and labels
plt.title('Data Density')
# Show the mean, median, and mode
plt.axvline(x=var_data.mean(), color = 'cyan', linestyle='dashed', linewidth = 2)
plt.axvline(x=var_data.median(), color = 'red', linestyle='dashed', linewidth = 2)
plt.axvline(x=var_data.mode()[0], color = 'yellow', linestyle='dashed', linewidth = 2)
# Show the figure
plt.show()
# Get the density of Grade
col = df_students['Grade']
show_density(col)
```
As expected from the histogram of the sample, the density shows the characteristic 'bell curve" of what statisticians call a *normal* distribution with the mean and mode at the center and symmetric tails.
Now let's take a look at the distribution of the study hours data.
```python
# Get the variable to examine
col = df_students['StudyHours']
# Call the function
show_distribution(col)
```
The distribution of the study time data is significantly different from that of the grades.
Note that the whiskers of the box plot only extend to around 6.0, indicating that the vast majority of the first quarter of the data is above this value. The minimum is marked with an **o**, indicating that it is statistically an *outlier* - a value that lies significantly outside the range of the rest of the distribution.
Outliers can occur for many reasons. Maybe a student meant to record "10" hours of study time, but entered "1" and missed the "0". Or maybe the student was abnormally lazy when it comes to studying! Either way, it's a statistical anomaly that doesn't represent a typical student. Let's see what the distribution looks like without it.
```python
# Get the variable to examine
col = df_students[df_students.StudyHours>1]['StudyHours']
# Call the function
show_distribution(col)
```
In this example, the dataset is small enough to clearly see that the value **1** is an outlier for the **StudyHours** column, so you can exclude it explicitly. In most real-world cases, it's easier to consider outliers as being values that fall below or above percentiles within which most of the data lie. For example, the following code uses the Pandas **quantile** function to exclude observations below the 0.01th percentile (the value above which 99% of the data reside).
```python
q01 = df_students.StudyHours.quantile(0.01)
# Get the variable to examine
col = df_students[df_students.StudyHours>q01]['StudyHours']
# Call the function
show_distribution(col)
```
> **Tip**: You can also eliminate outliers at the upper end of the distribution by defining a threshold at a high percentile value - for example, you could use the **quantile** function to find the 0.99 percentile below which 99% of the data reside.
With the outliers removed, the box plot shows all data within the four quartiles. Note that the distribution is not symmetric like it is for the grade data though - there are some students with very high study times of around 16 hours, but the bulk of the data is between 7 and 13 hours; The few extremely high values pull the mean towards the higher end of the scale.
Let's look at the density for this distribution.
```python
# Get the density of StudyHours
show_density(col)
```
This kind of distribution is called *right skewed*. The mass of the data is on the left side of the distribution, creating a long tail to the right because of the values at the extreme high end; which pull the mean to the right.
#### Measures of variance
So now we have a good idea where the middle of the grade and study hours data distributions are. However, there's another aspect of the distributions we should examine: how much variability is there in the data?
Typical statistics that measure variability in the data include:
- **Range**: The difference between the maximum and minimum. There's no built-in function for this, but it's easy to calculate using the **min** and **max** functions.
- **Variance**: The average of the squared difference from the mean. You can use the built-in **var** function to find this.
- **Standard Deviation**: The square root of the variance. You can use the built-in **std** function to find this.
```python
for col_name in ['Grade','StudyHours']:
col = df_students[col_name]
rng = col.max() - col.min()
var = col.var()
std = col.std()
print('\n{}:\n - Range: {:.2f}\n - Variance: {:.2f}\n - Std.Dev: {:.2f}'.format(col_name, rng, var, std))
```
Grade:
- Range: 94.00
- Variance: 472.54
- Std.Dev: 21.74
StudyHours:
- Range: 15.00
- Variance: 12.16
- Std.Dev: 3.49
Of these statistics, the standard deviation is generally the most useful. It provides a measure of variance in the data on the same scale as the data itself (so grade points for the Grade distribution and hours for the StudyHours distribution). The higher the standard deviation, the more variance there is when comparing values in the distribution to the distribution mean - in other words, the data is more spread out.
When working with a *normal* distribution, the standard deviation works with the particular characteristics of a normal distribution to provide even greater insight. Run the cell below to see the relationship between standard deviations and the data in the normal distribution.
```python
import scipy.stats as stats
# Get the Grade column
col = df_students['Grade']
# get the density
density = stats.gaussian_kde(col)
# Plot the density
col.plot.density()
# Get the mean and standard deviation
s = col.std()
m = col.mean()
# Annotate 1 stdev
x1 = [m-s, m+s]
y1 = density(x1)
plt.plot(x1,y1, color='magenta')
plt.annotate('1 std (68.26%)', (x1[1],y1[1]))
# Annotate 2 stdevs
x2 = [m-(s*2), m+(s*2)]
y2 = density(x2)
plt.plot(x2,y2, color='green')
plt.annotate('2 std (95.45%)', (x2[1],y2[1]))
# Annotate 3 stdevs
x3 = [m-(s*3), m+(s*3)]
y3 = density(x3)
plt.plot(x3,y3, color='orange')
plt.annotate('3 std (99.73%)', (x3[1],y3[1]))
# Show the location of the mean
plt.axvline(col.mean(), color='cyan', linestyle='dashed', linewidth=1)
plt.axis('off')
plt.show()
```
The horizontal lines show the percentage of data within 1, 2, and 3 standard deviations of the mean (plus or minus).
In any normal distribution:
- Approximately 68.26% of values fall within one standard deviation from the mean.
- Approximately 95.45% of values fall within two standard deviations from the mean.
- Approximately 99.73% of values fall within three standard deviations from the mean.
So, since we know that the mean grade is 49.18, the standard deviation is 21.74, and distribution of grades is approximately normal; we can calculate that 68.26% of students should achieve a grade between 27.44 and 70.92.
The descriptive statistics we've used to understand the distribution of the student data variables are the basis of statistical analysis; and because they're such an important part of exploring your data, there's a built-in **Describe** method of the DataFrame object that returns the main descriptive statistics for all numeric columns.
```python
df_students.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>StudyHours</th>
<th>Grade</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>22.000000</td>
<td>22.000000</td>
</tr>
<tr>
<th>mean</th>
<td>10.522727</td>
<td>49.181818</td>
</tr>
<tr>
<th>std</th>
<td>3.487144</td>
<td>21.737912</td>
</tr>
<tr>
<th>min</th>
<td>1.000000</td>
<td>3.000000</td>
</tr>
<tr>
<th>25%</th>
<td>9.000000</td>
<td>36.250000</td>
</tr>
<tr>
<th>50%</th>
<td>10.000000</td>
<td>49.500000</td>
</tr>
<tr>
<th>75%</th>
<td>12.375000</td>
<td>62.750000</td>
</tr>
<tr>
<th>max</th>
<td>16.000000</td>
<td>97.000000</td>
</tr>
</tbody>
</table>
</div>
## Comparing data
Now that you know something about the statistical distribution of the data in your dataset, you're ready to examine your data to identify any apparent relationships between variables.
First of all, let's get rid of any rows that contain outliers so that we have a sample that is representative of a typical class of students. We identified that the StudyHours column contains some outliers with extremely low values, so we'll remove those rows.
```python
df_sample = df_students[df_students['StudyHours']>1]
df_sample
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>StudyHours</th>
<th>Grade</th>
<th>Pass</th>
</tr>
</thead>
<tbody>
<tr>
<th>3</th>
<td>Rosie</td>
<td>16.00</td>
<td>97.0</td>
<td>True</td>
</tr>
<tr>
<th>10</th>
<td>Francesca</td>
<td>15.50</td>
<td>82.0</td>
<td>True</td>
</tr>
<tr>
<th>9</th>
<td>Giovanni</td>
<td>14.50</td>
<td>74.0</td>
<td>True</td>
</tr>
<tr>
<th>14</th>
<td>Jenny</td>
<td>15.50</td>
<td>70.0</td>
<td>True</td>
</tr>
<tr>
<th>21</th>
<td>Aisha</td>
<td>12.00</td>
<td>64.0</td>
<td>True</td>
</tr>
<tr>
<th>20</th>
<td>Daniel</td>
<td>12.50</td>
<td>63.0</td>
<td>True</td>
</tr>
<tr>
<th>11</th>
<td>Rajab</td>
<td>13.75</td>
<td>62.0</td>
<td>True</td>
</tr>
<tr>
<th>6</th>
<td>Frederic</td>
<td>11.50</td>
<td>53.0</td>
<td>False</td>
</tr>
<tr>
<th>19</th>
<td>Skye</td>
<td>12.00</td>
<td>52.0</td>
<td>False</td>
</tr>
<tr>
<th>1</th>
<td>Joann</td>
<td>11.50</td>
<td>50.0</td>
<td>False</td>
</tr>
<tr>
<th>0</th>
<td>Dan</td>
<td>10.00</td>
<td>50.0</td>
<td>False</td>
</tr>
<tr>
<th>4</th>
<td>Ethan</td>
<td>9.25</td>
<td>49.0</td>
<td>False</td>
</tr>
<tr>
<th>18</th>
<td>Anila</td>
<td>10.00</td>
<td>48.0</td>
<td>False</td>
</tr>
<tr>
<th>2</th>
<td>Pedro</td>
<td>9.00</td>
<td>47.0</td>
<td>False</td>
</tr>
<tr>
<th>7</th>
<td>Jimmie</td>
<td>9.00</td>
<td>42.0</td>
<td>False</td>
</tr>
<tr>
<th>12</th>
<td>Naiyana</td>
<td>9.00</td>
<td>37.0</td>
<td>False</td>
</tr>
<tr>
<th>16</th>
<td>Helena</td>
<td>9.00</td>
<td>36.0</td>
<td>False</td>
</tr>
<tr>
<th>17</th>
<td>Ismat</td>
<td>6.00</td>
<td>35.0</td>
<td>False</td>
</tr>
<tr>
<th>15</th>
<td>Jakeem</td>
<td>8.00</td>
<td>27.0</td>
<td>False</td>
</tr>
<tr>
<th>8</th>
<td>Rhonda</td>
<td>8.50</td>
<td>26.0</td>
<td>False</td>
</tr>
<tr>
<th>13</th>
<td>Kian</td>
<td>8.00</td>
<td>15.0</td>
<td>False</td>
</tr>
</tbody>
</table>
</div>
### Comparing numeric and categorical variables
The data includes two *numeric* variables (**StudyHours** and **Grade**) and two *categorical* variables (**Name** and **Pass**). Let's start by comparing the numeric **StudyHours** column to the categorical **Pass** column to see if there's an apparent relationship between the number of hours studied and a passing grade.
To make this comparison, let's create box plots showing the distribution of StudyHours for each possible Pass value (true and false).
```python
df_sample.boxplot(column='StudyHours', by='Pass', figsize=(8,5))
```
Comparing the StudyHours distributions, it's immediately apparent (if not particularly surprising) that students who passed the course tended to study for more hours than students who didn't. So if you wanted to predict whether or not a student is likely to pass the course, the amount of time they spend studying may be a good predictive feature.
### Comparing numeric variables
Now let's compare two numeric variables. We'll start by creating a bar chart that shows both grade and study hours.
```python
# Create a bar plot of name vs grade and study hours
df_sample.plot(x='Name', y=['Grade','StudyHours'], kind='bar', figsize=(8,5))
```
The chart shows bars for both grade and study hours for each student; but it's not easy to compare because the values are on different scales. Grades are measured in grade points, and range from 3 to 97; while study time is measured in hours and ranges from 1 to 16.
A common technique when dealing with numeric data in different scales is to *normalize* the data so that the values retain their proportional distribution, but are measured on the same scale. To accomplish this, we'll use a technique called *MinMax* scaling that distributes the values proportionally on a scale of 0 to 1. You could write the code to apply this transformation; but the **Scikit-Learn** library provides a scaler to do it for you.
```python
from sklearn.preprocessing import MinMaxScaler
# Get a scaler object
scaler = MinMaxScaler()
# Create a new dataframe for the scaled values
df_normalized = df_sample[['Name', 'Grade', 'StudyHours']].copy()
# Normalize the numeric columns
df_normalized[['Grade','StudyHours']] = scaler.fit_transform(df_normalized[['Grade','StudyHours']])
# Plot the normalized values
df_normalized.plot(x='Name', y=['Grade','StudyHours'], kind='bar', figsize=(8,5))
```
With the data normalized, it's easier to see an apparent relationship between grade and study time. It's not an exact match, but it definitely seems like students with higher grades tend to have studied more.
So there seems to be a correlation between study time and grade; and in fact, there's a statistical *correlation* measurement we can use to quantify the relationship between these columns.
```python
df_normalized.Grade.corr(df_normalized.StudyHours)
```
0.9117666413789675
The correlation statistic is a value between -1 and 1 that indicates the strength of a relationship. Values above 0 indicate a *positive* correlation (high values of one variable tend to coincide with high values of the other), while values below 0 indicate a *negative* correlation (high values of one variable tend to coincide with low values of the other). In this case, the correlation value is close to 1; showing a strongly positive correlation between study time and grade.
> **Note**: Data scientists often quote the maxim "*correlation* is not *causation*". In other words, as tempting as it might be, you shouldn't interpret the statistical correlation as explaining *why* one of the values is high. In the case of the student data, the statistics demonstrates that students with high grades tend to also have high amounts of study time; but this is not the same as proving that they achieved high grades *because* they studied a lot. The statistic could equally be used as evidence to support the nonsensical conclusion that the students studied a lot *because* their grades were going to be high.
Another way to visualise the apparent correlation between two numeric columns is to use a *scatter* plot.
```python
# Create a scatter plot
df_sample.plot.scatter(title='Study Time vs Grade', x='StudyHours', y='Grade')
```
Again, it looks like there's a discernible pattern in which the students who studied the most hours are also the students who got the highest grades.
We can see this more clearly by adding a *regression* line (or a *line of best fit*) to the plot that shows the general trend in the data. To do this, we'll use a statistical technique called *least squares regression*.
> **Warning - Math Ahead!**
>
> Cast your mind back to when you were learning how to solve linear equations in school, and recall that the *slope-intercept* form of a linear equation looks like this:
>
> \begin{equation}y = mx + b\end{equation}
>
> In this equation, *y* and *x* are the coordinate variables, *m* is the slope of the line, and *b* is the y-intercept (where the line goes through the Y-axis).
>
> In the case of our scatter plot for our student data, we already have our values for *x* (*StudyHours*) and *y* (*Grade*), so we just need to calculate the intercept and slope of the straight line that lies closest to those points. Then we can form a linear equation that calculates a new *y* value on that line for each of our *x* (*StudyHours*) values - to avoid confusion, we'll call this new *y* value *f(x)* (because it's the output from a linear equation ***f***unction based on *x*). The difference between the original *y* (*Grade*) value and the *f(x)* value is the *error* between our regression line and the actual *Grade* achieved by the student. Our goal is to calculate the slope and intercept for a line with the lowest overall error.
>
> Specifically, we define the overall error by taking the error for each point, squaring it, and adding all the squared errors together. The line of best fit is the line that gives us the lowest value for the sum of the squared errors - hence the name *least squares regression*.
Fortunately, you don't need to code the regression calculation yourself - the **SciPy** package includes a **stats** class that provides a **linregress** method to do the hard work for you. This returns (among other things) the coefficients you need for the slope equation - slope (*m*) and intercept (*b*) based on a given pair of variable samples you want to compare.
```python
from scipy import stats
#
df_regression = df_sample[['Grade', 'StudyHours']].copy()
# Get the regression slope and intercept
m, b, r, p, se = stats.linregress(df_regression['StudyHours'], df_regression['Grade'])
print('slope: {:.4f}\ny-intercept: {:.4f}'.format(m,b))
print('so...\n f(x) = {:.4f}x + {:.4f}'.format(m,b))
# Use the function (mx + b) to calculate f(x) for each x (StudyHours) value
df_regression['fx'] = (m * df_regression['StudyHours']) + b
# Calculate the error between f(x) and the actual y (Grade) value
df_regression['error'] = df_regression['fx'] - df_regression['Grade']
# Create a scatter plot of Grade vs StudyHours
df_regression.plot.scatter(x='StudyHours', y='Grade')
# Plot the regression line
plt.plot(df_regression['StudyHours'],df_regression['fx'], color='cyan')
# Display the plot
plt.show()
```
Note that this time, the code plotted two distinct things - the scatter plot of the sample study hours and grades is plotted as before, and then a line of best fit based on the least squares regression coefficients is plotted.
The slope and intercept coefficients calculated for the regression line are shown above the plot.
The line is based on the ***f*(x)** values calculated for each **StudyHours** value. Run the following cell to see a table that includes the following values:
- The **StudyHours** for each student.
- The **Grade** achieved by each student.
- The ***f(x)*** value calculated using the regression line coefficients.
- The *error* between the calculated ***f(x)*** value and the actual **Grade** value.
Some of the errors, particularly at the extreme ends, are quite large (up to over 17.5 grade points); but in general, the line is pretty close to the actual grades.
```python
# Show the original x,y values, the f(x) value, and the error
df_regression[['StudyHours', 'Grade', 'fx', 'error']]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>StudyHours</th>
<th>Grade</th>
<th>fx</th>
<th>error</th>
</tr>
</thead>
<tbody>
<tr>
<th>3</th>
<td>16.00</td>
<td>97.0</td>
<td>83.098400</td>
<td>-13.901600</td>
</tr>
<tr>
<th>10</th>
<td>15.50</td>
<td>82.0</td>
<td>79.941687</td>
<td>-2.058313</td>
</tr>
<tr>
<th>9</th>
<td>14.50</td>
<td>74.0</td>
<td>73.628262</td>
<td>-0.371738</td>
</tr>
<tr>
<th>14</th>
<td>15.50</td>
<td>70.0</td>
<td>79.941687</td>
<td>9.941687</td>
</tr>
<tr>
<th>21</th>
<td>12.00</td>
<td>64.0</td>
<td>57.844698</td>
<td>-6.155302</td>
</tr>
<tr>
<th>20</th>
<td>12.50</td>
<td>63.0</td>
<td>61.001410</td>
<td>-1.998590</td>
</tr>
<tr>
<th>11</th>
<td>13.75</td>
<td>62.0</td>
<td>68.893193</td>
<td>6.893193</td>
</tr>
<tr>
<th>6</th>
<td>11.50</td>
<td>53.0</td>
<td>54.687985</td>
<td>1.687985</td>
</tr>
<tr>
<th>19</th>
<td>12.00</td>
<td>52.0</td>
<td>57.844698</td>
<td>5.844698</td>
</tr>
<tr>
<th>1</th>
<td>11.50</td>
<td>50.0</td>
<td>54.687985</td>
<td>4.687985</td>
</tr>
<tr>
<th>0</th>
<td>10.00</td>
<td>50.0</td>
<td>45.217846</td>
<td>-4.782154</td>
</tr>
<tr>
<th>4</th>
<td>9.25</td>
<td>49.0</td>
<td>40.482777</td>
<td>-8.517223</td>
</tr>
<tr>
<th>18</th>
<td>10.00</td>
<td>48.0</td>
<td>45.217846</td>
<td>-2.782154</td>
</tr>
<tr>
<th>2</th>
<td>9.00</td>
<td>47.0</td>
<td>38.904421</td>
<td>-8.095579</td>
</tr>
<tr>
<th>7</th>
<td>9.00</td>
<td>42.0</td>
<td>38.904421</td>
<td>-3.095579</td>
</tr>
<tr>
<th>12</th>
<td>9.00</td>
<td>37.0</td>
<td>38.904421</td>
<td>1.904421</td>
</tr>
<tr>
<th>16</th>
<td>9.00</td>
<td>36.0</td>
<td>38.904421</td>
<td>2.904421</td>
</tr>
<tr>
<th>17</th>
<td>6.00</td>
<td>35.0</td>
<td>19.964144</td>
<td>-15.035856</td>
</tr>
<tr>
<th>15</th>
<td>8.00</td>
<td>27.0</td>
<td>32.590995</td>
<td>5.590995</td>
</tr>
<tr>
<th>8</th>
<td>8.50</td>
<td>26.0</td>
<td>35.747708</td>
<td>9.747708</td>
</tr>
<tr>
<th>13</th>
<td>8.00</td>
<td>15.0</td>
<td>32.590995</td>
<td>17.590995</td>
</tr>
</tbody>
</table>
</div>
### Using the regression coefficients for prediction
Now that you have the regression coefficients for the study time and grade relationship, you can use them in a function to estimate the expected grade for a given amount of study.
```python
# Define a function based on our regression coefficients
def f(x):
m = 6.3134
b = -17.9164
return m*x + b
study_time = 14
# Get f(x) for study time
prediction = f(study_time)
# Grade can't be less than 0 or more than 100
expected_grade = max(0,min(100,prediction))
#Print the estimated grade
print ('Studying for {} hours per week may result in a grade of {:.0f}'.format(study_time, expected_grade))
```
Studying for 14 hours per week may result in a grade of 70
So by applying statistics to sample data, you've determined a relationship between study time and grade; and encapsulated that relationship in a general function that can be used to predict a grade for a given amount of study time.
This technique is in fact the basic premise of machine learning. You can take a set of sample data that includes one or more *features* (in this case, the number of hours studied) and a known *label* value (in this case, the grade achieved) and use the sample data to derive a function that calculates predicted label values for any given set of features.
## Further Reading
To learn more about the Python packages you explored in this notebook, see the following documentation:
- [NumPy](https://numpy.org/doc/stable/)
- [Pandas](https://pandas.pydata.org/pandas-docs/stable/)
- [Matplotlib](https://matplotlib.org/contents.html)
## Challenge: Analyze Flight Data
If this notebook has inspired you to try exploring data for yourself, why not take on the challenge of a real-world dataset containing flight records from the US Department of Transportation? You'll find the challenge in the [/challenges/01 - Flights Challenge.ipynb](./challenges/01%20-%20Flights%20Challenge.ipynb) notebook!
> **Note**: The time to complete this optional challenge is not included in the estimated time for this exercise - you can spend as little or as much time on it as you like!
|
ff9eede1be89672ea3a1ab75e75cfcef8058f747
| 1,032,858 |
ipynb
|
Jupyter Notebook
|
01 - Data Exploration.ipynb
|
TJ156TJ/ML_Basics
|
5799c3f6ccee36582b5a3df34ce1b40d17b653d8
|
[
"MIT"
] | null | null | null |
01 - Data Exploration.ipynb
|
TJ156TJ/ML_Basics
|
5799c3f6ccee36582b5a3df34ce1b40d17b653d8
|
[
"MIT"
] | null | null | null |
01 - Data Exploration.ipynb
|
TJ156TJ/ML_Basics
|
5799c3f6ccee36582b5a3df34ce1b40d17b653d8
|
[
"MIT"
] | null | null | null | 405.360283 | 48,493 | 0.693454 | true | 24,285 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.835484 | 0.785309 | 0.656112 |
__label__eng_Latn
| 0.964723 | 0.3627 |
# Map, Filter, Reduce, and Groupby
本部分展示高阶函数应用
```python
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
```
```python
def square(x):
return x ** 2
def iseven(n):
return n % 2 == 0
def add(x, y):
return x + y
def mul(x, y):
return x * y
def lesser(x, y):
if x < y:
return x
else:
return y
def greater(x, y):
if x > y:
return x
else:
return y
```
## Map
```python
# map works like this
list(map(square, data))
```
[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
```python
# In this way it's like numpy's broadcasting operators
import numpy as np
X = np.arange(1, 11)
X*2
```
array([ 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
但是 `map` 是纯python函数,所以
* 很慢
* 能够处理普通的函数比如fibonacci
```python
def fib(i):
if i in (0, 1):
return i
else:
return fib(i - 1) + fib(i - 2)
list(map(fib, data))
```
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
```python
# 通过情况下,我们将会这样应用他们
result = []
for item in data:
result.append(fib(item))
result
```
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
```python
# 查看下上面给的好的范式说明了如何定义一个map方法 map looking at the function above gives us a good pattern for how to define `map`
# 我们将自己抽象定义一个map函数 We just abstract out the function `fib` for a user input
# `map` is easy to define
def map(fn, sequence):
result = []
for item in sequence:
result.append(fn(item))
return result
```
鲜为人知的事实是,对象的方法也是完全有效的函数
```python
map(str.upper, ['Alice', 'Bob', 'Charlie'])
```
['ALICE', 'BOB', 'CHARLIE']
map函数是非常重要的,它有自己的语法,**列表理解**,
```python
[fib(i) for i in data]
```
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
```python
[name.upper() for name in ['Alice', 'Bob', 'Charlie']]
```
['ALICE', 'BOB', 'CHARLIE']
## Filter
“过滤器” 高阶函数通过断言方式来过滤数据集。
断言是一个返回“True”或“False”的函数。 `filter`函数返回一个新的只有断言为true的元素的列表。
```python
list(filter(iseven, data))
```
[2, 4, 6, 8, 10]
```python
from sympy import isprime # Only works if you have the sympy math library installed
list(filter(isprime, data))
```
[2, 3, 5, 7]
```python
def filter(predicate, sequence):
result = []
for item in sequence:
if predicate(item):
result.append(item)
return result
```
## Reduce
Reduce是`map`和`filter`的小兄弟。 Reduce不太受欢迎,经常被责骂难以理解。
尽管社会问题“Reduce”是相当强大的,一旦你写了“Reduce”,一旦你明白它是如何工作的。 更重要的是,您将学习如何识别约简操作以及如何将它们与二元运算符配对。 数据分析中的降维操作很常见的,特别是在将大型数据集缩减为概要时。
为了显示“Reduce”,我们将首先实现两个常见的减少,“sum”和“min”。 我们已经用二元运算符`add`和`lessser`来暗示它们,以突出它们的类似结构。 选出以下两个互不相同功能的部分。
```python
def sum(sequence):
result = 0
for item in sequence:
# reult = result + item
result = add(result, item)
return result
```
```python
def min(sequence):
result = 99999999999999 # a really big number
for item in sequence:
# result = result if result < item else item
result = lesser(result, item)
return result
```
### Exercise
现在填写下面的空白来完成`product`的定义,这个函数将序列的元素放在一起。
```python
def product(sequence):
result = ?
for item in sequence:
result = ?(result, item)
return result
assert product([2, 3, 10]) == 60
```
### Exercise
实现 `reduce`.
首先复制上述三个函数的模式。 三者之间的差异是你的输入。 传统上,reduce的论点是有序的,以便下面的例子运行良好。
```python
def reduce(...):
...
```
```python
from functools import reduce
reduce(add, data, 0)
```
55
```python
reduce(mul, data, 1)
```
3628800
```python
reduce(lesser, data, 10000000)
```
1
```python
reduce(greater, data, -100000000)
```
10
## Lambda
我们这部分课程有很多像这样小的函数定义
```
def add(x, y):
return x + y
```
这些单线功能有时看起来有点愚蠢。 我们使用`lambda`关键字来即时创建小函数。 上述定义可以表述如下
```
add = lambda x, y: x + y
```
表达式`lambda x,y:x + y`是一个值,就像`3`或`Alice`一样。 就像文字整数和字符串一样,Lambda表达式可以在不用变量存储的情况下即时使用。
```python
reduce(add, data, 0)
```
55
```python
reduce(lambda x, y: x + y, data, 0) # Define `add` on the fly
```
55
另外,我们可以使用`lambda`来快速指定函数作为更一般化的特化。 在下面我们快速定义总和,最小值和最大值。
```python
sum = lambda data: reduce(add, data, 0)
min = lambda data: reduce(lesser, data, 99999999999)
max = lambda data: reduce(greater, data, -999999999999)
```
```python
sum(data)
```
55
作业练习,自己定制一个 `product` 使用下 `lambda`, `reduce`, 和 `mul`.
```python
product = ...
assert product([2, 3, 10]) == 60
```
## Groupby
Groupby可以被看作是“filter”的一个更强大的版本。 而不是给你一个数据的子集,它把数据分成所有相关的子集。
```python
filter(iseven, data)
```
[2, 4, 6, 8, 10]
```python
from toolz import groupby
groupby(iseven, data)
```
{False: [1, 3, 5, 7, 9], True: [2, 4, 6, 8, 10]}
```python
groupby(isprime, data)
```
{False: [1, 4, 6, 8, 9, 10], True: [2, 3, 5, 7]}
但是 `groupby` 不能严格的断言 (传递函数返回 `True` 或 `False`)
```python
groupby(lambda n: n % 3, data)
```
{0: [3, 6, 9], 1: [1, 4, 7, 10], 2: [2, 5, 8]}
```python
groupby(len, ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'])
```
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
令人惊讶的是`groupby`在普通情况下并不比`filter` 消费更多资源。 它通过数据一次性计算这些数据组。
## Integrative example
让我们把它们放在一个小数据集中展示。
```python
likes = """Alice likes Chocolate
Bob likes Chocolate
Bob likes Apples
Charlie likes Apples
Alice likes Peanut Butter
Charlie likes Peanut Butter"""
```
```python
tuples = map(lambda s: s.split(' likes '), likes.split('\n'))
tuples
```
<map at 0xe5f2c88>
```python
groups = groupby(lambda x: x[0], tuples)
groups
```
{'Alice': [['Alice', 'Chocolate'], ['Alice', 'Peanut Butter']],
'Bob': [['Bob', 'Chocolate'], ['Bob', 'Apples']],
'Charlie': [['Charlie', 'Apples'], ['Charlie', 'Peanut Butter']]}
```python
from toolz import valmap, first, second
valmap(lambda L: list(map(second, L)), groups)
```
{'Alice': ['Chocolate', 'Peanut Butter'],
'Bob': ['Chocolate', 'Apples'],
'Charlie': ['Apples', 'Peanut Butter']}
```python
tuples = map(lambda s: s.split(' likes '), likes.split('\n'))
valmap(lambda L: list(map(first, L)), groupby(lambda x: x[1], tuples))
```
{'Apples': ['Bob', 'Charlie'],
'Chocolate': ['Alice', 'Bob'],
'Peanut Butter': ['Alice', 'Charlie']}
```python
tuples = list(map(lambda s: s.split(' likes '), likes.split('\n')))
# first second 取数据
# groupby 数据分组
# compose 函数组合
# valmap 字典元组计算
from toolz.curried import map, valmap, groupby, first, second, get, curry, compose, pipe
f = compose(valmap(first), groupby(second))
f(tuples)
```
{'Apples': ['Bob', 'Apples'],
'Chocolate': ['Alice', 'Chocolate'],
'Peanut Butter': ['Alice', 'Peanut Butter']}
```python
```
|
f00001392cf3c6c4c3fc326088d3b28ac887d6b6
| 19,350 |
ipynb
|
Jupyter Notebook
|
1-map-filter-reduce-groupby.ipynb
|
PyDriven/pydata-toolz
|
3fc09b93a1aa5e4e0807b8ec2d1f6d0716c8cfde
|
[
"MIT"
] | null | null | null |
1-map-filter-reduce-groupby.ipynb
|
PyDriven/pydata-toolz
|
3fc09b93a1aa5e4e0807b8ec2d1f6d0716c8cfde
|
[
"MIT"
] | null | null | null |
1-map-filter-reduce-groupby.ipynb
|
PyDriven/pydata-toolz
|
3fc09b93a1aa5e4e0807b8ec2d1f6d0716c8cfde
|
[
"MIT"
] | null | null | null | 20.093458 | 270 | 0.471628 | true | 2,713 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.803174 | 0.861538 | 0.691965 |
__label__eng_Latn
| 0.326098 | 0.445998 |
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse, fftpack
from math import factorial as fact
try:
plt.style.use("jupyter")
except OSerror:
print("Using default ploting style")
# L-p norm function
norm = lambda v, p=2 : (len(v)**(-p)*np.einsum('i->', np.abs(v)**2))**(1./p)
```
### Compact n$^{th}$-order derivative
The compact coefficients for the $n^{th}$ derivative $f^{(n)}$ of a function can be found by solving the system
$$
\begin{bmatrix}
\begin{matrix}
0 & 0 & 0
\end{matrix} & \begin{matrix}
1 & 1 & 1\\
\end{matrix}\\
Q^{(n)} & \begin{matrix}
h_{i-1} & 0 & h_{i+1}\\
h_{i-1}^2/2! & 0 & h_{i+1}^2/2!\\
h_{i-1}^3/3! & 0 & h_{i+1}^3/3!\\
h_{i-1}^4/4! & 0 & h_{i+1}^4/4!
\end{matrix}\\
\begin{matrix}
0 & 1 & 0
\end{matrix} & \begin{matrix}
0 & 0 & 0\\
\end{matrix}\\
\end{bmatrix}\begin{bmatrix}
L_{i-1} \\ L_{i} \\ L_{i+1} \\ -R_{i-1} \\ -R_{i} \\ -R_{i+1}\\
\end{bmatrix}=\begin{bmatrix}
0\\ 0\\ 0\\ 0\\ 0\\ 1\\,
\end{bmatrix}
$$
where $h_{i-1}=x_{i-1}-x_i$ and $h_{i+1} = x_{i+1}-x_i$. The sub-matrix $Q^{(n)}$ depends on the derivative required. For the first derivative, we have
$$
Q^{(1)} = \begin{bmatrix}
1 & 1 & 1\\
h_{i-1} & 0 & h_{i+1}\\
h_{i-1}^2/2! & 0 & h_{i+1}^2/2!\\
h_{i-1}^3/3! & 0 & h_{i+1}^3/3!\\
\end{bmatrix}
$$
and for the second derivative
$$
Q^{(2)} = \begin{bmatrix}
0 & 0 & 0\\
1 & 1 & 1\\
h_{i-1} & 0 & h_{i+1}\\
h_{i-1}^2/2! & 0 & h_{i+1}^2/2!\\
\end{bmatrix}.
$$
```python
def get_compact_coeffs(n, hi):
# assumes uniform grid
h_i = -hi
r = np.hstack((np.array([0 for i in range(5)]),1.))
L = np.array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, h_i, 0, hi],
[0, 0, 0, h_i**2/fact(2), 0, hi**2/fact(2)],
[0, 0, 0, h_i**3/fact(3), 0, hi**3/fact(3)],
[0, 0, 0, h_i**4/fact(4), 0, hi**4/fact(4)],
[0, 1, 0, 0, 0, 0]])
insert = np.array([[1, 1, 1],
[h_i, 0, hi],
[h_i**2/fact(2), 0, hi**2/fact(2)],
[h_i**3/fact(3), 0, hi**3/fact(3)]])
L[n:5,:3] = insert[:-n+5,:]
vec = np.round(np.linalg.solve(L, r), 8)
return vec[:3], -vec[3:]
```
We can check that for a first derivative, we recover the standard Pade ($4^{th}$-order) [coefficients](https://github.com/marinlauber/my-numerical-recipes/blob/master/Compact-Schemes.ipynb), which are
$$
L = \left[\frac{1}{4}, 1, \frac{1}{4}\right], \qquad R = \left[-\frac{3}{4}, 0., \frac{3}{4}\right]
$$
```python
pade = np.array([1./4., 1., 1./4., -3./4., 0., 3./4.])
np.allclose(np.hstack(get_compact_coeffs(1, 1)), pade)
```
True
We can now write a function that, given a function $f$, on a uniform grid with spacing $dx$, return the $n^{th}$ derivative of that function. Because for each point we solve for the compact coefficients, we can in theory get compact schemes on non-uniform grid with the same accuracy. Here we will only focs on uniform grids.
```python
def derive_compact(n, f, dx):
# get coeffs
L, R = get_compact_coeffs(n, dx)
# temp array
sol = np.empty_like(f)
# compact scheme on interior points
sol[2:-2] = R[0]*f[1:-3] + R[1]*f[2:-2] + R[2]*f[3:-1]
# boundary points
sol[-2] = R[0]*f[-3] + R[1]*f[-2] + R[2]*f[-1]
sol[-1] = R[0]*f[-2] + R[1]*f[-1] + R[2]*f[-0]
sol[ 0] = R[0]*f[-1] + R[1]*f[ 0] + R[2]*f[ 1]
sol[ 1] = R[0]*f[ 0] + R[1]*f[ 1] + R[2]*f[ 2]
# build ugly matrix by hand
A = sparse.diags(L,[-1,0,1],shape=(len(f),len(f))).toarray()
# periodic BS's
A[ 0,-1] = L[0]
A[-1, 0] = L[2]
return np.linalg.solve(A, sol)
```
We can then test the method on a known function, with known first and second derivaive. For simplicity, we will use trigonometric functions, which have well-behaved infinite derivatives.
$$
f(x) = \sin(x), \,\, x\in[0, 2\pi]
$$
such that
$$
\frac{d}{dx}f(x) = \cos(x), \quad \frac{d^2}{dx^2}f(x) = -\sin(x), \,\, x\in[0, 2\pi]
$$
```python
N = 128
x, dx = np.linspace(0, 2*np.pi, N, retstep=True, endpoint=False)
function = np.sin(x)
# first derivative
sol = derive_compact(1, function, dx)
print('First derivative L2 norm: ', norm(sol-np.cos(x)))
# second derivative
sol = derive_compact(2, function, dx)
print('Second derivative L2 norm: ', norm(sol+np.sin(x)))
```
First derivative L2 norm: 2.00356231982653e-09
Second derivative L2 norm: 1.5119843767976088e-09
### Poisson Equation With Compact Schemes
We aim to solve the following one-dimensionnal Poisson equation with Dirichlet boudnary conditions
$$
\begin{split}
-&\frac{d^2}{dx^2}u(x) = f(x), \quad a<x<b\\
&u(a) = 0, \quad u(b) = 0\\
\end{split}
$$
where $a, b\in\mathbb{R}$, $u(x)$ is the unkown function and $f(x)$ is some given source function. We discretize the left side of the Poisson equaution ($u''_i$) using a compact finite difference scheme with fourth-order accuracy on a uniform grid with grid points being $x_i = a+ih, h=(b-a)/M, i=0, 1, 2,..., M$ where $M$ is a positive integer.
$$
\frac{1}{10}u''_{i-1} + u''_i + \frac{1}{10}u''_{i+1} = \frac{6}{5}\frac{u_{i+1} + 2u_i + u_{i-1}}{h^2},
$$
or in a more common form,
$$
u''_{i-1} + 10u''_i + u''_{i+1} = \frac{12}{h^2}\left(u_{i+1} + 2u_i + u_{i-1}\right).
$$
This results in the following tri-diagonal system
$$
AU''= \frac{12}{h^2}BU,
$$
where $U'' = (u''_1,u''_2,...,u''_M)^\top$ and $U = (u_1,u_2,...,u_M)^\top\in \mathbb{R}^{M-1}$. The tri-diagonal matrix $A, B \in \mathbb{R}^{M-1\times M-1}$ are
$$
A = \begin{bmatrix}
10 & 1 & 0 &\dots & 0 & 0 \\
1 & 10 & 1 &\dots & 0 & 0 \\
0 & 1 & 10 &\dots & 0 & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\
0 & 0 & 0 & \dots & 10 & 1 \\
0 & 0 & 0 &\dots & 1 & 10 \\
\end{bmatrix}, \qquad B = \begin{bmatrix}
-2 & 1 & 0 &\dots & 0 & 0 \\
1 & -2 & 1 &\dots & 0 & 0 \\
0 & 1 & -2 &\dots & 0 & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\
0 & 0 & 0 & \dots & -2 & 1 \\
0 & 0 & 0 &\dots & 1 & -2 \\
\end{bmatrix}.
$$
In addition, we have $-u''(x_i)=f(x_i), i=1,2,...,M-1$ i.e. $-U''=F$. We can re-write out system as
$$
-\frac{12}{h^2}BU = AF,
$$
To obtaine the solution $U$, we simply need to solve the system.
```python
def build_AB(M, h):
A = sparse.diags([1.,10.,1.],[-1,0,1],shape=(M, M)).toarray()
B = sparse.diags([1.,-2.,1.],[-1,0,1],shape=(M, M)).toarray()
# dont forget BC, here homogeneous Dirichlet
B[ 0,:]=0.; B[ 0, 0]=1
B[-1,:]=0.; B[-1,-1]=1
return A, -12./h**2*B
```
In the first example, we consider the problem with homogeneous Dirichlet boundary conditions
$$
\begin{cases}
-u''(x) = \pi^2\sin(\pi x), & 0 < x <2,\\
u(0)=0, \quad u(2) = 0.
\end{cases}
$$
The exact solution is $u_e(x)=\sin(\pi x)$.
```python
def SolvePoissonCompact(f, h, M):
u0 = np.zeros_like(f)
A, B = build_AB(M, h)
sigma = np.matmul(A, f)
return np.linalg.solve(B, sigma)
```
```python
M = 64
x, h = np.linspace(0., 2., M, retstep=True, endpoint=True)
f = np.pi**2*np.sin(np.pi*x)
u_e = np.sin(np.pi*x)
u_num = SolvePoissonCompact(f, h, M)
print(norm(u_num-u_e))
plt.plot(x, u_e, '-s')
plt.plot(x, u_num,'-o')
plt.xlabel(r"$x$");plt.ylabel(r"$u$")
# plt.savefig("figure_1.png", dpi=300);
```
Now with non-zero Dirichlet Boundry conditions
$$
\begin{cases}
-u''(x) = 12e^{-x^2}(-x^2+1/2), & -8 < x <8,\\
u(-8)=-8, \quad u(8) = 8.
\end{cases}
$$
The exact solution is $u_e(x)=3e^{-x^2}$. I. the numerical computation, we denote $U(x)=u(x)-x$ using change of variable. Applying tthe numerical algorithm, we now have
$$
\begin{cases}
-U''(x) = 12e^{-x^2}(-x^2+1/2), & -8 < x <8,\\
U(-8)=-0, \quad U(8) = 0.
\end{cases}
$$
and the approximate numerical solution at a grid point is found as $u(x) = U(x)=x$.
```python
M = 64
x, h = np.linspace(-8., 8., M, retstep=True, endpoint=True)
f = 12.*np.exp(-x**2)*(-x**2 + 0.5)
u_e = 3.*np.exp(-x**2)+x
u_num = SolvePoissonCompact(f, h, M)
print(norm(u_num-u_e))
plt.plot(x, u_e, '-s')
plt.plot(x, u_num+x,'-o')
plt.xlabel(r"$x$");plt.ylabel(r"$u$");
# plt.savefig("figure_2.png", dpi=300);
```
### Using Faste Fourier Transforms to Solve the Poisson Equation
We actually do not need ton inverte the system described earlier to get the solution, [see](https://www.sciencedirect.com/science/article/pii/S0898122116300761). We can use the Sine transform for $U\in\mathbb{R}^{M-1}$
$$
\begin{split}
u_j &= \sum_{k=1}^{M-1}\hat{u}_k\sin\left(\frac{jk\pi}{M}\right), \,\, j=1,2,...,M-1,\\
\hat{u_k} &= \frac{2}{M}\sum_{j=1}^{M-1}u_j\sin\left(\frac{ik\pi}{M}\right), \,\, j=1,2,...,M-1,
\end{split}
$$
from whcih we can approximate $u_{i+1}, u_{i-1}, u''_{i+1}, u''_{i-1}$ as
$$
\begin{align}
u_{i+1}=\sum_{k=1}^{M-1}\hat{u}_k\sin\left(\frac{(i+1)k\pi}{M}\right),\qquad & u_{i-1} = \sum_{k=1}^{M-1}\hat{u}_k\sin\left(\frac{(i-1)k\pi}{M}\right)\\
u''_{i} =\sum_{k=1}^{M-1}\hat{u}''_k\sin\left(\frac{ik\pi}{M}\right),\qquad & u''_{i+1} =\sum_{k=1}^{M-1}\hat{u}''_k\sin\left(\frac{(i+1)k\pi}{M}\right)\\
u''_{i-1} =\sum_{k=1}^{M-1}\hat{u}''_k\sin\left(\frac{(i-1)k\pi}{M}\right). & \\
\end{align}
$$
Subsituting in the compact discretization of the Poisson equation gives,
$$
\sum_{k=1}^{M-1}\hat{u}''_k\left\{ \frac{1}{10}\sin\left(\frac{(i-1)k\pi}{M}\right) + \sin\left(\frac{ik\pi}{M}\right) + \frac{1}{10}\sin\left(\frac{(i+1)k\pi}{M}\right) \right\} =\frac{6}{5h^2}\sum_{k=1}^{M-1}\hat{u}_k\left\{ \sin\left(\frac{(i-1)k\pi}{M}\right) +\sin\left(\frac{(i+1)k\pi}{M}\right) - 2\sin\left(\frac{ik\pi}{M}\right) \right\}
$$
or, after rearranging
$$
\hat{u}_k = -\hat{u}''_k\left(\frac{24\sin^2\left(\frac{k\pi}{2M}\right)}{h^2}\right)^{-1}\left(\cos\left(\frac{k\pi}{M}\right)+5\right), \,\, k\in 1,2,..,M-1.
$$
In addition, we obtain $-u''_i = f_i \,(i=1,2,...,M-1)$. By the inverse Sine transform, we get to know $-\hat{u}''_k=\hat{f}_k \, (k=1,2,...,M-1)$, whci allows us to solve for $\hat{u}$
$$
\hat{u}_k = \hat{f}_k\left(\frac{24\sin^2\left(\frac{k\pi}{2M}\right)}{h^2}\right)^{-1}\left(\cos\left(\frac{k\pi}{M}\right)+5\right), \,\, k\in 1,2,..,M-1.
$$
> **_Note:_** We use a spectral method to solve the tri-diagonal system, this doesn't mean we solve it with spectral accuracy, here the modified wavenumber makes the spectral method the exact same accuracy as the compact scheme.
```python
def SolvePoissonSine(f, h, M):
f_k = fftpack.dst(f, norm='ortho')
k = np.arange(1,M+1)
u_k = f_k*(24*np.sin(k*np.pi/(2*M))**2./h**2.)**(-1.)*(np.cos(np.pi*k/M)+5.)
return fftpack.idst(u_k, norm='ortho')
```
```python
M = 64
x, h = np.linspace(-8, 8, M, retstep=True, endpoint=True)
f = 12.*np.exp(-x**2)*(-x**2 + 0.5)
u_e = 3.*np.exp(-x**2)+x
u_num = SolvePoissonSine(f, h, M)
print(norm(u_num-u_e))
plt.plot(x, u_num + x, '-o')
plt.plot(x, u_e, 's')
plt.xlabel(r"$x$");plt.ylabel(r"$u$");
# plt.savefig("figure_3.png", dpi=300);
```
### Order of Accuracy
```python
L2_com = []
L2_Sine = []
Resolutions = 2.**np.arange(4,9)
for N in Resolutions:
x, h = np.linspace(0., 2., int(N), retstep=True, endpoint=True)
f = np.pi**2*np.sin(np.pi*x)
u_e = np.sin(np.pi*x)
u_num = SolvePoissonCompact(f, h, int(N))
error = norm(u_num-u_e)
L2_com.append(error)
u_num = SolvePoissonSine(f, h, int(N))
error = norm(u_num-u_e)
L2_Sine.append(error)
plt.loglog(Resolutions, np.array(L2_com), '--o', label='Compact Schemes')
plt.loglog(Resolutions, np.array(L2_Sine), ':s', label='Sine Transform')
plt.loglog(Resolutions, Resolutions**(-4), ':k', alpha=0.5, label=r"$4^{th}$-order")
plt.xlabel("Resolution (N)"); plt.ylabel(r"$L_2$-norm Error")
plt.legend();
# plt.savefig("figure_4.png", dpi=300);
```
|
026d2d26706625776cffd939d36d6d4a374a629a
| 114,203 |
ipynb
|
Jupyter Notebook
|
docs/notebooks/Compact-Schemes-for-Poisson-Equation.ipynb
|
marinlauber/marinlauber.github.io
|
851f421c788152e2809b77b907c75bc0bada974d
|
[
"MIT"
] | 1 |
2020-12-16T09:18:39.000Z
|
2020-12-16T09:18:39.000Z
|
docs/notebooks/Compact-Schemes-for-Poisson-Equation.ipynb
|
marinlauber/marinlauber.github.io
|
851f421c788152e2809b77b907c75bc0bada974d
|
[
"MIT"
] | null | null | null |
docs/notebooks/Compact-Schemes-for-Poisson-Equation.ipynb
|
marinlauber/marinlauber.github.io
|
851f421c788152e2809b77b907c75bc0bada974d
|
[
"MIT"
] | 1 |
2020-12-16T09:18:56.000Z
|
2020-12-16T09:18:56.000Z
| 189.077815 | 32,824 | 0.879408 | true | 4,875 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.931463 | 0.877477 | 0.817337 |
__label__eng_Latn
| 0.50535 | 0.73728 |
# Trying to understand what metrics where used
```python
! pip install sympy
```
Collecting sympy
Using cached https://files.pythonhosted.org/packages/dd/f6/ed485ff22efdd7b371d0dbbf6d77ad61c3b3b7e0815a83c89cbb38ce35de/sympy-1.3.tar.gz
Collecting mpmath>=0.19 (from sympy)
[?25l Downloading https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz (512kB)
[K 100% |████████████████████████████████| 522kB 5.3MB/s ta 0:00:011
[?25hBuilding wheels for collected packages: sympy, mpmath
Running setup.py bdist_wheel for sympy ... [?25ldone
[?25h Stored in directory: /home/group7/.cache/pip/wheels/6c/59/86/478e3c0f298368c119095cc5985dedac57c0e35a85c737f823
Running setup.py bdist_wheel for mpmath ... [?25ldone
[?25h Stored in directory: /home/group7/.cache/pip/wheels/63/9d/8e/37c3f6506ed3f152733a699e92d8e0c9f5e5f01dea262f80ad
Successfully built sympy mpmath
```python
from pathlib import Path
import pandas as pd
import sklearn.metrics
from sympy import symbols, nonlinsolve
```
```python
data ="""Persuasive 0.81 0.84 0.91
Audience 0.80 0.99 0.88
Agreement 0.69 0.85 0.76
Informative 0.76 0.74 0.75
Mean 0.74 0.78 0.75
Controversial 0.67 0.64 0.65
Disagreement 0.60 0.68 0.64
topic 0.62 0.67 0.61
Sentiment 0.44 0.46 0.43"""
```
```python
arr = data.split('\n')
```
```python
arr
```
['Persuasive 0.81 0.84 0.91',
'Audience 0.80 0.99 0.88',
'Agreement 0.69 0.85 0.76',
'Informative 0.76 0.74 0.75',
'Mean 0.74 0.78 0.75',
'Controversial 0.67 0.64 0.65',
'Disagreement 0.60 0.68 0.64',
'topic 0.62 0.67 0.61',
'Sentiment 0.44 0.46 0.43']
```python
pr_val, re_val, f1_val = {}, {}, {}
```
```python
for l in arr:
name, prec, reca, f1sc = l.split()
pr_val[name] = float(prec)
re_val[name] = float(reca)
f1_val[name] = float(f1sc)
```
```python
def f1(p, r):
return 2 * p * r * 1 / (p + r)
def prec(true_pos, false_pos):
return true_pos / (false_pos + true_pos)
def reca(true_pos, false_neg):
return true_pos / (true_pos + false_neg)
```
```python
# Persuasive
f1(0.81, 0.84) # 0.91 🛑
```
0.8247272727272728
```python
# Audience
f1(0.80, 0.99) # 0.88 ✔️
```
0.8849162011173185
```python
# Agreement w/ com
f1(0.69, 0.85) # 0.76 ✔️
```
0.7616883116883115
```python
# Informative
f1(0.76, 0.74) # 0.75 ✔️
```
0.7498666666666667
```python
# Mean
f1(0.74, 0.78) # 0.75 ✔️
```
0.7594736842105264
```python
# controversial
f1(0.67, 0.64) # 0.65 ✔️
```
0.6546564885496183
```python
# disagree
f1(0.60, 0.68) # 0.64 ✔️
```
0.6375000000000001
```python
# off-topic
f1(0.62, 0.67) # 0.61 🛑
```
0.644031007751938
```python
# sentiment
f1(0.44, 0.46) # 0.43 🛑
```
0.4497777777777778
```python
pth = Path('/mnt/data/group07/johannes/ynacc_proc/replicate/split/val.csv')
df = pd.read_csv(pth)
```
```python
def solve(name):
counts = df['cl' + name.lower()].value_counts()
# the reverse to ensure that there is only posibile solution
# for i in reversed([[0, 1], [1, 0]]):
all_res = []
for i in [[0, 1], [1, 0]]:
count_0 = counts[i[0]]
count_1 = counts[i[1]]
tp, tn, fp, fn = symbols('tp, tn, fp, fn', real=True)
eq1 = count_1 - tp - fn
eq2 = count_0 - tn - fp
eq3 = prec(tp, fp) - pr_val[name]
eq4 = reca(tp, fn) - re_val[name]
system = [eq1, eq2, eq3, eq4]
res = nonlinsolve(system, [tp, tn, fp, fn])
print(res)
# if len(res) > 0 and all(x > 0 for x in list(res)[0]):
all_res.append(list(list(res)[0]) + [i])
return all_res
```
```python
def kappa(name):
solutions = solve(name)
print(solutions)
for s in solutions:
tp, tn, fp, fn, i = s
y_true, y_pred = [], []
for x in range(round(tp)):
y_true.append(1)
y_pred.append(1)
for x in range(round(tn)):
y_true.append(0)
y_pred.append(0)
for x in range(round(fp)):
y_true.append(0)
y_pred.append(1)
for x in range(round(fn)):
y_true.append(1)
y_pred.append(0)
print(i)
print('kappa: ', sklearn.metrics.cohen_kappa_score(y_true, y_pred))
print('f1_mic: ', sklearn.metrics.f1_score(y_true, y_pred, average='micro'))
print('f1_mac: ',sklearn.metrics.f1_score(y_true, y_pred, average='macro'))
```
```python
for k in f1_val.keys():
print(k)
kappa(k)
print()
```
Persuasive
{(71.4, 481.251851851852, 16.7481481481481, 13.6)}
{(418.32, -13.1244444444444, 98.1244444444444, 79.68)}
[[71.4000000000000, 481.251851851852, 16.7481481481481, 13.6000000000000, [0, 1]], [418.320000000000, -13.1244444444444, 98.1244444444444, 79.6800000000000, [1, 0]]]
[0, 1]
kappa: 0.7896017415802279
f1_mic: 0.9468267581475128
f1_mac: 0.8947953594234788
[1, 0]
kappa: -0.17343597911689246
f1_mic: 0.7013422818791947
f1_mac: 0.41222879684418146
Audience
{(378.18, 104.455, 94.545, 3.82)}
{(197.01, 332.7475, 49.2525, 1.99)}
[[378.180000000000, 104.455000000000, 94.5450000000000, 3.82000000000000, [0, 1]], [197.010000000000, 332.747500000000, 49.2525000000000, 1.99000000000000, [1, 0]]]
[0, 1]
kappa: 0.5751386806319849
f1_mic: 0.8296041308089501
f1_mac: 0.7808674781416081
[1, 0]
kappa: 0.8155406288713061
f1_mic: 0.9122203098106713
f1_mac: 0.9071317756569979
Agreement
{(57.8, 489.031884057971, 25.968115942029, 10.2)}
{(437.75, -128.670289855072, 196.670289855072, 77.25)}
[[57.8000000000000, 489.031884057971, 25.9681159420290, 10.2000000000000, [0, 1]], [437.750000000000, -128.670289855072, 196.670289855072, 77.2500000000000, [1, 0]]]
[0, 1]
kappa: 0.7281065395377759
f1_mic: 0.9382504288164666
f1_mac: 0.8638274680784803
[1, 0]
kappa: -0.1841456752655537
f1_mic: 0.6151685393258427
f1_mac: 0.38086956521739135
Informative
{(71.04, 464.566315789474, 22.4336842105263, 24.96)}
{(360.38, -17.8042105263158, 113.804210526316, 126.62)}
[[71.0400000000000, 464.566315789474, 22.4336842105263, 24.9600000000000, [0, 1]], [360.380000000000, -17.8042105263158, 113.804210526316, 126.620000000000, [1, 0]]]
[0, 1]
kappa: 0.7032307675645233
f1_mic: 0.9193825042881647
f1_mac: 0.8516081515057974
[1, 0]
kappa: -0.249868404021228
f1_mic: 0.5990016638935108
f1_mac: 0.3746097814776275
Mean
{(94.38, 428.839459459459, 33.1605405405405, 26.62)}
{(360.36, -5.61297297297298, 126.612972972973, 101.64)}
[[94.3800000000000, 428.839459459459, 33.1605405405405, 26.6200000000000, [0, 1]], [360.360000000000, -5.61297297297298, 126.612972972973, 101.640000000000, [1, 0]]]
[0, 1]
kappa: 0.6927536231884058
f1_mic: 0.8970840480274442
f1_mac: 0.8463525195024246
[1, 0]
kappa: -0.23774696484450275
f1_mic: 0.6112054329371817
f1_mac: 0.37934668071654365
Controversial
{(120.32, 335.737910447761, 59.2620895522388, 67.68)}
{(252.8, 63.4865671641791, 124.513432835821, 142.2)}
[[120.320000000000, 335.737910447761, 59.2620895522388, 67.6800000000000, [0, 1]], [252.800000000000, 63.4865671641791, 124.513432835821, 142.200000000000, [1, 0]]]
[0, 1]
kappa: 0.4951417252500734
f1_mic: 0.7821612349914236
f1_mac: 0.7475011339105763
[1, 0]
kappa: -0.023822834930511405
f1_mic: 0.5420240137221269
f1_mac: 0.487601591894374
Disagreement
{(159.8, 241.466666666667, 106.533333333333, 75.2)}
{(236.64, 77.24, 157.76, 111.36)}
[[159.800000000000, 241.466666666667, 106.533333333333, 75.2000000000000, [0, 1]], [236.640000000000, 77.2400000000000, 157.760000000000, 111.360000000000, [1, 0]]]
[0, 1]
kappa: 0.36530363210030137
f1_mic: 0.6878216123499142
f1_mac: 0.6816769068305093
[1, 0]
kappa: 0.008985838773072685
f1_mic: 0.5385934819897084
f1_mac: 0.5010102167113708
topic
{(268.0, 18.741935483871, 164.258064516129, 132.0)}
{(122.61, 324.851935483871, 75.148064516129, 60.39)}
[[268.000000000000, 18.7419354838710, 164.258064516129, 132.000000000000, [0, 1]], [122.610000000000, 324.851935483871, 75.1480645161290, 60.3900000000000, [1, 0]]]
[0, 1]
kappa: -0.23743689765947673
f1_mic: 0.4922813036020583
f1_mac: 0.3790016121602948
[1, 0]
kappa: 0.47409040793825796
f1_mic: 0.7684391080617495
f1_mac: 0.7368473845227945
Sentiment
{(76.82, 209.229090909091, 97.7709090909091, 90.18)}
{(141.22, -12.7345454545454, 179.734545454545, 165.78)}
[[76.8200000000000, 209.229090909091, 97.7709090909091, 90.1800000000000, [0, 1]], [141.220000000000, -12.7345454545454, 179.734545454545, 165.780000000000, [1, 0]]]
[0, 1]
kappa: 0.14032684404483975
f1_mic: 0.6033755274261603
f1_mac: 0.5700306872792542
[1, 0]
kappa: -0.5495576686101047
f1_mic: 0.28952772073921973
f1_mac: 0.22452229299363058
```python
kappa('topic')
```
{(268.0, 18.741935483871, 164.258064516129, 132.0)}
kappa: -0.23743689765947673
f1_mic: 0.4922813036020583
f1_mac: 0.3790016121602948
```python
from sympy.solvers.solveset import nonlinsolve
from sympy.core.symbol import symbols
```
```python
# binary
tp, tn, fp, fn = symbols('tp, tn, fp, fn', real=True)
eq1 = 188 - tp - fn
eq2 = 395 - tn - fp
eq3 = prec(tp, fp) - 0.67
eq4 = reca(tp, fn) - 0.64
eq5 = f1(eq3, eq4) - 0.65
system = [eq1, eq2, eq3, eq4]
nonlinsolve(system, [tp, tn, fp, fn])
```
{(120.32, 215.417910447761, 179.582089552239, 67.68)}
```python
# micro
a, b, c, d = symbols('a, b, c, d', real=True)
eq1 = 188 - a - b
eq2 = 395 - c - d
eq3 = prec(a + d, b + c) - 0.67
eq4 = reca(a + d, b + c) - 0.64
# eq5 = f1(eq3, eq4) - 0.65
system = [eq1, eq2, eq3, eq4]
nonlinsolve(system, [a, b, c, d])
```
EmptySet()
```python
# macro
a, b, c, d = symbols('a, b, c, d', real=True)
eq1 = 188 - a - b
eq2 = 395 - c - d
eq3 = (prec(a, c) + prec(d, b)) / 2 - 0.67
eq4 = (reca(a, b) + reca(d, c)) / 2 - 0.64
eq5 = (f1(prec(a, c), reca(a, b)) + f1(prec(d, b), reca(d, c))) / 2 - 0.65
system = [eq1, eq2, eq3, eq4]
nonlinsolve(system, [a, b, c, d])
```
{(-0.67*d - 170.65*sqrt(-1.89242129226345e-5*d**2 + 0.005711568519704*d + 1.0) + 358.65, 0.67*d + 170.65*sqrt(-1.89242129226345e-5*d**2 + 0.005711568519704*d + 1.0) - 170.65, -d + 395, d), (-0.67*d + 170.65*sqrt(-1.89242129226345e-5*d**2 + 0.005711568519704*d + 1.0) + 358.65, 0.67*d - 170.65*sqrt(-1.89242129226345e-5*d**2 + 0.005711568519704*d + 1.0) - 170.65, -d + 395, d)}
```python
# weighted
a, b, c, d = symbols('a, b, c, d', real=True)
eq1 = 188 - a - b
eq2 = 395 - c - d
eq3 = ((a + b) * prec(a, c) + (c + d) * prec(d, b)) / (a + b + c + d) - 0.67
eq4 = ((a + b) * reca(a, b) + (c + d) * reca(d, c)) / (a + b + c + d) - 0.64
eq5 = ((a + b) * f1(prec(a, c), reca(a, b)) + (c + d) * f1(prec(d, b), reca(d, c))) / (a + b + c + d) - 0.65
system = [eq1, eq2, eq3, eq4]
nonlinsolve(system, [a, b, c, d])
```
{(-14.4493657422819, 202.449365742282, 7.43063425771811, 387.569365742282), (582.023911196827, -394.023911196827, 603.903911196827, -208.903911196827)}
```python
```
|
5dafa9c946f115be4c012768032056f90ae7fdfd
| 23,532 |
ipynb
|
Jupyter Notebook
|
code/ynacc/09 Replicate Reported Baseline/Replicate Metrics.ipynb
|
jfilter/masters-thesis
|
39a3d9b862444507982cc4ccd98b6809cab72d82
|
[
"MIT"
] | 5 |
2019-04-24T19:45:07.000Z
|
2020-12-29T06:40:58.000Z
|
code/ynacc/09 Replicate Reported Baseline/Replicate Metrics.ipynb
|
jfilter/masters-thesis
|
39a3d9b862444507982cc4ccd98b6809cab72d82
|
[
"MIT"
] | 2 |
2019-11-05T17:17:38.000Z
|
2019-11-05T17:17:39.000Z
|
code/ynacc/09 Replicate Reported Baseline/Replicate Metrics.ipynb
|
jfilter/masters-thesis
|
39a3d9b862444507982cc4ccd98b6809cab72d82
|
[
"MIT"
] | null | null | null | 26.893714 | 385 | 0.517763 | true | 5,095 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.874077 | 0.79053 | 0.690985 |
__label__yue_Hant
| 0.090693 | 0.44372 |
```
import sympy as sy
from dolfin import *
```
```
```
```
```
```
```
```
x = sy.symbols('x')
y = sy.symbols('y')
G = 10.
Ha = 0.01
nu = 1.
kappa = 1e4
```
```
```
```
sy.ccode
```
```
b = G/kappa*(sy.sinh(y*Ha)/sy.sinh(Ha)-y)
d = 1
p = -G*x - (kappa/2)*b**2
u = G/(nu*Ha*sy.tanh(Ha))*(1-sy.cosh(y*Ha)/sy.cosh(Ha))
v = sy.diff(x,y)
L1 = sy.diff(u,x,x)+sy.diff(u,y,y)
L2 = sy.diff(v,x,x)+sy.diff(v,y,y)
A1 = u*sy.diff(u,x)+v*sy.diff(u,y)
A2 = u*sy.diff(v,x)+v*sy.diff(v,y)
P1 = sy.diff(p,x)
P2 = sy.diff(p,y)
C1 = sy.diff(d,x,y) - sy.diff(b,y,y)
C2 = sy.diff(b,x,y) - sy.diff(d,x,x)
NS1 = -d*(sy.diff(d,x) - sy.diff(b,y))
NS2 = b*(sy.diff(d,x) - sy.diff(b,y))
M1 = sy.diff(u*d-v*b,y)
M2 = -sy.diff(u*d-v*b,x)
```
```
```
```
```
```
```
-5000.0*(-0.001*y + 0.0999983333527776*sinh(0.01*y))*(0.00199996666705555*cosh(0.01*y) - 0.002)
```
```
```
```
```
```
```
```
```
```
```
P2(1,2)
```
```
P2.subs?
```
```
```
```
```
```
```
```
```
```
```
```
sy.evalf(P2.subs(y,pi))
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
v
```
0
```
```
```
```
```
u0 = Expression((sy.ccode(u),sy.ccode(v)))
p0 = Expression(sy.ccode(p))
Laplacian = Expression((sy.ccode(L1),sy.ccode(L2)))
Advection = Expression((sy.ccode(A1),sy.ccode(A2)))
gradPres = Expression((sy.ccode(P1),sy.ccode(P2)))
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
sy.diff(b,y)
```
G*(Ha*cosh(Ha*y)/sinh(Ha) - 1)/kappa
```
p = -G*x - (kappa/2)*b**2
p
```
-G**2*(-y + sinh(Ha*y)/sinh(Ha))**2/(2*kappa) - G*x
```
sy.diff(p,x)
```
-G
```
sy.diff(p,y)
```
-G**2*(-y + sinh(Ha*y)/sinh(Ha))*(2*Ha*cosh(Ha*y)/sinh(Ha) - 2)/(2*kappa)
```
```
```
```
```
```
```
```
```
```
```
```
|
44bea0be23ff60105cada96d6c85ddca41511fda
| 27,038 |
ipynb
|
Jupyter Notebook
|
MHD/FEniCS/Classes/Hartman2D/Untitled1.ipynb
|
wathen/PhD
|
35524f40028541a4d611d8c78574e4cf9ddc3278
|
[
"MIT"
] | 3 |
2020-10-25T13:30:20.000Z
|
2021-08-10T21:27:30.000Z
|
MHD/FEniCS/Classes/Hartman2D/Untitled1.ipynb
|
wathen/PhD
|
35524f40028541a4d611d8c78574e4cf9ddc3278
|
[
"MIT"
] | null | null | null |
MHD/FEniCS/Classes/Hartman2D/Untitled1.ipynb
|
wathen/PhD
|
35524f40028541a4d611d8c78574e4cf9ddc3278
|
[
"MIT"
] | 3 |
2019-10-28T16:12:13.000Z
|
2020-01-13T13:59:44.000Z
| 38.189266 | 2,005 | 0.565168 | true | 844 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.933431 | 0.654895 | 0.611299 |
__label__yue_Hant
| 0.128006 | 0.258583 |
```python
import matplotlib.pyplot as plt
import scipy.stats as st
import seaborn as sns
import pandas as pd
from scipy.stats import norm, uniform, expon
from scipy.integrate import quad
from sympy.solvers import solve
from sympy import Symbol
import numpy as np
from pandas import Series, DataFrame
```
```python
werte = np.array([0,10,11])
ew = np.sum(werte*1/3)
ew
```
7.0
```python
var_X = np.sum((werte-ew)**2*1/3)
var_X
```
24.666666666666664
```python
sd_X = np.sqrt(var_X)
```
```python
sim = np.random.choice(werte, size=10, replace = True)
sim
```
array([ 0, 11, 10, 11, 11, 11, 10, 0, 10, 10])
```python
plt.hist(sim, bins = range(0, 13, 1), edgecolor = "black")
```
```python
```
|
d5558c9c47613f2ec460bfb789d1e9f74c2f5868
| 9,089 |
ipynb
|
Jupyter Notebook
|
Lernphase/SW04/.ipynb_checkpoints/Skript-checkpoint.ipynb
|
florianbaer/STAT
|
7cb86406ed99b88055c92c1913b46e8995835cbb
|
[
"MIT"
] | null | null | null |
Lernphase/SW04/.ipynb_checkpoints/Skript-checkpoint.ipynb
|
florianbaer/STAT
|
7cb86406ed99b88055c92c1913b46e8995835cbb
|
[
"MIT"
] | null | null | null |
Lernphase/SW04/.ipynb_checkpoints/Skript-checkpoint.ipynb
|
florianbaer/STAT
|
7cb86406ed99b88055c92c1913b46e8995835cbb
|
[
"MIT"
] | null | null | null | 53.152047 | 5,876 | 0.795907 | true | 245 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.901921 | 0.689306 | 0.621699 |
__label__eng_Latn
| 0.492787 | 0.282746 |
# Two Dimensional Fractal Integration
To generalize the concepts given in the previous section for FIFs to the FISs, consider that $\Omega = \{\tilde{P}_j = (\tilde{x}_j, \tilde{y}_j), \; j = 1, 2, 3\}$ is a triangular domain in the plane. Let $P = \{P_i = (x_i, y_i), \; i = 1, \ldots, N\}$ be given points in the plane scattered over $\Omega$ containing the points $\tilde{P}_j, \; j = 1, 2, 3$. A triangulation $T(\Omega)$ of $\Omega$ over the points $P$ is given with the set,
$$
\begin{equation}
T(\Omega) = \{ \Omega^i = \{P_j^i\} : P_j^i \in P, \; j = 1, 2, 3, \; i = 1, \ldots, K \}
\end{equation}
$$
Note that $T(\Omega)$ consists of non-degenerate triangles such that $\Omega^i \cap \Omega^j = \emptyset, \; i \neq j$ and $\Omega = \cup_{i = 1}^K \Omega^i$.
Consider that each $P_i = (x_i, y_i) \in P$ is associated with $z_i \in \mathbb{R}$. An interpolation function--also called interpolant, corresponding to the data set $\{(x_i, y_i, z_i), \; i = 1, \ldots, N\}$ is a continuous function $f: \Omega \mapsto \mathbb{R}$ such that $f(x_i, y_i) = z_i$. The answer to the problem of constructing the interpolation function $f$ is in two folds:
1. Construction of an IFS $\mathcal{I} = \{\Omega \times \mathbb{R}; w_i; \; i = 1, \ldots, K \}$ whose attractor is the graph$G$ of the function $f$ satisfying the interpolation points, i.e.$f(x_i, y_i) = z_i, \; i = 1, \ldots, N$.
2. Construction of a contraction mapping $M: \mathcal{F} \mapsto\mathcal{F}$ where $\mathcal{F} =\{ \tilde{f} \in \mathcal{C}(\Omega) : \tilde{f}(\tilde{x}_j, \tilde{y}_j) = \tilde{z}_j \}$ such that the fixed point $f$ of the mapping $M$ satisfies the interpolation points, i.e. $f(x_i, y_i) = z_i, \; i = 1, \ldots, N$
### Construction of the IFS $\mathcal{I}$
Addressing to the first step, we put forward the following IFS $I = \{ \Omega \times \mathbb{R}; w_i, \; i = 1, \ldots, K \}$ with affine transformations $w_i$,
$$
\begin{aligned}
w_i(x, y, z) &= (L_i(x, y), F_i(x, y, z))\\
&=\begin{bmatrix}
\alpha_1^i & \alpha_2^i & 0 \\
\alpha_3^i & \alpha_4^i & 0 \\
\alpha_5^i & \alpha_6^i & \alpha_7^i
\end{bmatrix}
\begin{bmatrix}
x \\ y \\ z
\end{bmatrix} + \begin{bmatrix}
\beta_1^i \\ \beta_2^i \\ \beta_3^i
\end{bmatrix}
\; i = 1, \ldots, K,
\end{aligned}
$$
where $L_i : \Omega \mapsto \Omega^i$
$$
L_i(x, y) = \begin{bmatrix}
\alpha_1^i & \alpha_2^i \\ \alpha_3^i & \alpha_4^i
\end{bmatrix} \begin{bmatrix}
x \\ y
\end{bmatrix} + \begin{bmatrix}
\beta_1^i \\ \beta_2^i
\end{bmatrix}
$$
are contraction mappings for the $z$ axis satisfying the boundary conditions,
$$
F_i(\tilde{x}_j, \tilde{y}_j, \tilde{z}_j) = z_j^i, \; j = 1, 2, 3.
$$
where $\alpha_7^i$ are arbitrary contractivity factors satisfying $|\alpha_7^i| < 1, \; i = 1, \ldots, K$, also called as vertical scaling factors.
Given the vertical scaling factors $\alpha_7^i, \; i = 1, \ldots, K$, the coefficients $\alpha_k^i, \; k = 1 , \ldots, 6, \; i = 1, \ldots, K$ can be found using the boundary conditions in (\ref{eq: plane boundary conditions}) and (\ref{eq: z values boundary conditons}) which results in following system of equations,
$$
\begin{aligned}
\alpha_1^i \tilde{x}_j + \alpha_2^i \tilde{y}_j + \beta_1^i &= x_j^i \\
\alpha_3^i \tilde{x}_j + \alpha_4^i \tilde{y}_j + \beta_2^i &= y_j^i \\
\alpha_5^i \tilde{x}_j + \alpha_6^i \tilde{y}_j + \beta_3^i &= z_j^i - \alpha_7^i \tilde{z}_j
\end{aligned}
$$
for $i = 1, \ldots, K, \; j = 1, 2, 3$. This system can be rewritten in block diagonal matrix equation system,
$$
\begin{bmatrix}
P & 0 & \ldots & 0 \\
0 & P & \ldots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \ldots & P \\
\end{bmatrix}
\begin{bmatrix}
r_1 \\ r_2 \\ r_3
\end{bmatrix} = \begin{bmatrix}
c_1 \\ c_2 \\ c_3
\end{bmatrix}
$$
where
$$
\begin{aligned}
P &=
\begin{bmatrix}
\tilde{x}_1 & \tilde{y}_1 & 1 \\
\tilde{x}_2 & \tilde{y}_2 & 1 \\
\tilde{x}_3 & \tilde{y}_3 & 1 \\
\end{bmatrix}\\
r_1 &= [r_1^1, \ldots, r_1^i, \ldots, r_1^K], \; r_1^i = [\alpha_1^i, \alpha_2^i, \beta_1^i] \\
r_2 &= [r_2^1, \ldots, r_2^i, \ldots, r_2^K], \; r_2^i = [\alpha_3^i, \alpha_4^i, \beta_2^i] \\
r_3 &= [r_3^1, \ldots, r_3^i, \ldots, r_3^K], \; r_3^i = [\alpha_5^i, \alpha_6^i, \beta_3^i] \\
c_1 &= [c_1^1, \ldots, c_1^i, \ldots, c_1^K], \; c_1^i = [x_1^i, x_2^i, x_3^i] \\
c_2 &= [c_2^1, \ldots, c_2^i, \ldots, c_2^K], \; c_2^i = [y_1^i, y_2^i, y_3^i] \\
c_3 &= [c_3^1, \ldots, c_3^i, \ldots, c_3^K], \; c_3^i = [z_1^i, z_2^i, z_3^i] - \alpha_7^i [\tilde{z}_1^i, \tilde{z}_2^i, \tilde{z}_3^i]1
\end{aligned}
$$
We have uncoupled system of equations,
$$
P r_j^i = c_j^i, \; j = 1, 2, 3, \; i = 1, \ldots, N
$$
Since the points $\{\tilde{P}_j, \; j = 1, 2, 3\}$ forms a non-degenerate triangular region $\Omega$, $P^{-1}$ exists and can be solved for the coefficients of the IFS $\mathcal{I}$ which gives,
$$
r_j^i = P^{-1} c_j^i, \; j = 1, 2, 3, \; i = 1, \ldots, K
$$
Inspired by the reasoning given in Theorem 1, we propose the following iterative map to find the fractal interpolant.
Given the triangulation $T(\Omega)$ and the IFS $\mathcal{I}$ defined above, let $(\mathcal{F}, d)$ be a complete metric space such that $\mathcal{F} = \{ \tilde{f} \in \mathcal{C}(\Omega) : \tilde{f}(\tilde{x}_j, \tilde{y}_j) = \tilde{z}_j, \; j = 1, 2, 3 \}$ with the metric $d(\tilde{f}_1, \tilde{f}_2) = max \{ |\tilde{f}_1(x, y) - \tilde{f}_2(x, y)|\}, \; x, y \in \Omega$. Let $M : \mathcal{F} \mapsto \mathcal{F}$ be defined by
$$
(M\tilde{f})(x, y) = F_i(L_i^{-1}(x, y), \tilde{f}(L_i^{-1}(x, y)), \; x, y \in \Omega^i, \; i = 1, \ldots, K
$$
Then, $M$ has a unique fixed point $f$ such that $f = \lim_{n \mapsto \infty} M^n(\tilde{f})$ for any $\tilde{f} \in \mathcal{F}$ which interpolates the given data set, i.e. $f(x_i, y_i) = z_i, \; i = 1, \ldots, N$.
```julia
# This dimensinal fractal integration.
using FractalTools
using Triangulation
using PyPlot
using Statistics
# Read mesh
datapath = joinpath(dirname(dirname(pathof(FractalTools))), "data/gmsh_data")
filename = "t3.msh"
domain = read_mesh(joinpath(datapath, filename))
scale_factor = 1 # Rescale the mesh.
for node in domain.nodes
node.x /= scale_factor
node.y /= scale_factor
end
domains = triangular_partition(domain)
regions = gmsh2matplotlibtri.(domains)
# Define function
func(x, y) = x.^2 .+ y.^2
number_of_regions = length(regions)
z = Vector{Vector}(undef, number_of_regions)
for k = 1 : number_of_regions
z[k] = func(regions[k].x, regions[k].y)
end
# Refine region to evaluate interpolated function
subdiv = 2
x, y = refine(gmsh2matplotlibtri(domain), subdiv, true)
# Compute initial and real values.
func0(xi, yi) = 0.
α = 0.001
interpolant, coeffs = fis(regions, z, α=α, func0=func0, num_iter=10, get_coeffs=true)
# Interpolation and error
real_values = func.(x, y)
interpolated_values = interpolant.(x, y)
absolute_error = abs.(interpolated_values - real_values)
relative_error = 100 * map(i -> abs(real_values[i]) <= 0.1 ? absolute_error[i] : absolute_error[i] / abs(real_values[i]),
1 : length(absolute_error));
```
```julia
nothing
```
```julia
# # Plot the results.
xd = xcoords(domain)
yd = ycoords(domain)
zd = func.(xd, yd)
minx, meanx, maxx = minimum(x), mean(x), maximum(x)
miny, meany, maxy = minimum(y), mean(y), maximum(y)
plot_trisurf(x, y, real_values)
coluors = ["black", "red", "blue", "green", "purple", "magenta"]
for (color, region) in zip(coluors, regions)
triplot(region, color=color)
plot(region.x, region.y, ".", color="orange")
end
xlabel("x", labelpad=10, fontsize=12)
ylabel("y", labelpad=10, fontsize=12)
zlabel("z", labelpad=10, fontsize=12)
xticks(range(minx, maxx, length=5))
yticks(range(miny, maxy, length=5))
zticks(range(minimum(real_values), maximum(real_values), length=5));
```
```julia
plot_trisurf(x, y, interpolated_values)
for (color, region) in zip(coluors, regions)
triplot(region, color=color)
plot(region.x, region.y, ".", color="orange")
end
xlabel("x", labelpad=10, fontsize=12)
ylabel("y", labelpad=10, fontsize=12)
zlabel("z", labelpad=10, fontsize=12)
xticks(range(minx, maxx, length=5))
yticks(range(miny, maxy, length=5))
zticks(range(minimum(interpolated_values), maximum(interpolated_values), length=5))
tight_layout()
```
```julia
plot_trisurf(x, y, absolute_error)
for (color, region) in zip(coluors, regions)
triplot(region, color=color)
plot(region.x, region.y, ".", color="orange")
end
xlabel("x", labelpad=10, fontsize=12)
ylabel("y", labelpad=10, fontsize=12)
zlabel("z", labelpad=10, fontsize=12)
xticks(range(minx, maxx, length=5))
yticks(range(miny, maxy, length=5))
zticks(range(minimum(absolute_error), maximum(absolute_error), length=5))
tight_layout()
```
```julia
plot_trisurf(x, y, relative_error)
for (color, region) in zip(coluors, regions)
triplot(region, color=color)
plot(region.x, region.y, ".", color="orange")
end
xlabel("x", labelpad=10, fontsize=12)
ylabel("y", labelpad=10, fontsize=12)
zlabel("z", labelpad=10, fontsize=12)
xticks(range(minx, maxx, length=5))
yticks(range(miny, maxy, length=5))
zticks(range(minimum(relative_error), maximum(relative_error), length=5))
tight_layout()
```
```julia
plot_trisurf(x, y, relative_error_fltr)
for (color, region) in zip(coluors, regions)
triplot(region, color=color)
plot(region.x, region.y, ".", color="orange")
end
xlabel("x", labelpad=10, fontsize=12)
ylabel("y", labelpad=10, fontsize=12)
zlabel("z", labelpad=10, fontsize=12)
xticks(range(minx, maxx, length=5))
yticks(range(miny, maxy, length=5))
zticks(range(minimum(relative_error_fltr), maximum(relative_error_fltr), length=5))
tight_layout()
```
```julia
fig, ax = subplots(1)
for region in regions
ax.triplot(region)
ax.plot(region.x, region.y, ".", color="orange")
end
ax.set_xlabel("x", fontsize=12)
ax.set_ylabel("y", fontsize=12)
tight_layout()
```
|
c46526fbcd151448f548b2967551aeaa72d0e7fa
| 877,385 |
ipynb
|
Jupyter Notebook
|
notebooks/stash/.ipynb_checkpoints/two_dimensional_interpolation-checkpoint.ipynb
|
zekeriyasari/FractalTools.jl
|
9896b88d30b3a22e1f808f812ce60d23d2a27013
|
[
"MIT"
] | 3 |
2020-09-08T12:20:52.000Z
|
2021-03-26T12:50:16.000Z
|
notebooks/stash/.ipynb_checkpoints/two_dimensional_interpolation-checkpoint.ipynb
|
zekeriyasari/FractalTools.jl
|
9896b88d30b3a22e1f808f812ce60d23d2a27013
|
[
"MIT"
] | 30 |
2020-09-05T18:22:43.000Z
|
2021-07-26T10:09:46.000Z
|
notebooks/stash/.ipynb_checkpoints/two_dimensional_interpolation-checkpoint.ipynb
|
zekeriyasari/FractalTools.jl
|
9896b88d30b3a22e1f808f812ce60d23d2a27013
|
[
"MIT"
] | null | null | null | 2,232.531807 | 216,838 | 0.95964 | true | 3,765 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.908618 | 0.749087 | 0.680634 |
__label__eng_Latn
| 0.605301 | 0.419672 |
# Particle in one-dimensional potential well
## Period of oscillations in potential well
Dynamics of a particle of mass $m$ moving in one dimension $OX$ is described the Newton equation
$$m\ddot x =m\dot v = F(x) = -U'(x),$$
where $F(x)$ is a force acting on tha particle and $U(x)$ is potential energy of the particle. We assume that there are no dissipative forces. Therefore, as we know from the previous section, the total energy $E$ of the particle is conserved, i.e.,
\begin{equation}
\label{eq:cons_E1d}
\frac{mv^2}{2} + U(x) = E = const.
\end{equation}
If we treat the velocity $v=\frac{dx}{dt}$ as an independent variable, it is implicit equation of the orbit (the phase curve) in **phase space** $(x,v)$ of the particle with energy $E$. This equation can be rewritten in the form
$$\frac{m}{2}\left(\frac{dx}{dt}\right)^2+U(x)=E$$
It is a first-order differential equation and can be solved by the method of variable separation. The result reads
\begin{equation}
\label{eq:t_cons_E1d}
t=\sqrt{\frac{m}{2}} \; \int_{a}^{b}{\frac{dx}{\sqrt{E-U(x)}}}
\end{equation}
Here, $a$ is the initial position $x(0)=a$ and $b$ is the final position $x(t)=b$ of the particle. The time $t$ is time for moving the particle from the point $a$ to the point $b$ under the condition that $E \ge U(x)$ for all $x \in (a, b)$.
This is a useful formula which allows to calculate period of oscillations of the particle in a potential well.
In this section we will consider a class of potentials in the form
\begin{equation}
\label{eq:Uxn}
U(x)=A |x|^n,
\end{equation}
where $n$ is a positive real number.
These potentials are similar: they are bounded from below, have only one minimum at $x=0$ and tends to infinity when $x\to \pm \infty$. In such potential the particle motion is bounded and the particle oscillates between two symmetrical positions $x_0$ and $-x_0$ which in general depend on the total energy $E$ and are determined by the equation
$$U(\pm x_0) = E$$
Because of symmetry, the period $T$ of oscillations can be determined by the equation
\begin{equation}
\label{eq:T}
T=4 \; \sqrt{\frac{m}{2}} \; \int_{0}^{x_0}{\frac{dx}{\sqrt{E-U(x)}}}
\end{equation}
This class of potentials is simple but nevertheless analysis of $T$ in dependence of the index $n$ and the total energy $E$ is very interesting and instructive.
We will use computer algebra and numerical methods to investigate properties of motion is such potential wells.
```python
load('cas_utils.sage')
```
```python
t = var('t')
m = var('m')
A = var('A')
assume(A > 0)
assume(m > 0)
y = function('y')(t)
de = m*diff(y,t,2) + 2*A*y == 0
showmath( desolve(de, y,ivar=t) )
```
It is an analytical solution of the Newton equation in the case when $n=2$ (a harmonic potential).
## Particle in potential $x^2$
For $n=2$ the system is a harmonic oscillator:
$$U(x)=A x^2.$$
```python
#reset()
var('m A x E')
forget()
assume(A > 0)
assume(E > 0)
assume(E,'real')
```
To obtain the integration limit $x_0$ in the formula for the period of oscillations, we must solve the equation:
$$U(x)=E$$
So for the $Ax^2$ potential, we have:
```python
U(A,x) = A*x^2
xextr = solve (U(A=A,x=x)==E,x)
showmath(xextr)
```
These formulas describe the values of the oscillator's extremal positions for a given energy. Let's put them into the formula for $T$:
:
```python
period = 2*sqrt(m/2)*integrate( 1/sqrt(E-U(A,x)),(x,x.subs(xextr[0]),x.subs(xextr[1])))
period = period.canonicalize_radical()
showmath(period)
```
We see that the period $T$ does not depend on energy of the oscillator. It means that it does not depend on the initial conditions because the total energy of the particle depends on them. In turn, it means that it does not depend on the distance between the points $-x_0$ and $x_0$. It seems to be unusual behavior: time to travel from $-1$ to $1$ and back is the same as time to travel from $-10000$ to $10000$ and back. In the second case the distance is much, much longer but time is the same. This is an exceptional property valid only for the harmonic potential!
## Particle in $|x|^n$ potential
If $n\neq2$, the general formula for the period can be written as:
$$T=4 \sqrt{\frac{m}{2}} \; \int_0^{x_0}\frac{dx}{\sqrt{E- A x^n}}$$
or in the equivalent form:
$$T=4 \sqrt{\frac{m}{2}}\frac{1}{\sqrt{E}}\int_0^{x_0}\frac{dx}{\sqrt{1-Ax^n/E}}$$
This integral can be transformed to a dimensionless form by substitution
$$\frac{A}{E}x^n=y^n.$$
It is in fact a linear relationship between $x$ and $y$:
$$\left(\frac{A}{E}\right)^{\frac{1}{n}}x=y.$$
Therefore, we can change the integration variable to $y$. To do this, we use SAGE to transform the expression under integral in the following way:
```python
var('dx dy A E x y')
var('n',domain='integer')
assume(n>=0)
assume(A>0)
assume(E>0)
ex1 = dx/sqrt(1-A/E*x^n)
showmath(ex1)
```
and we substitute:
```python
ex2 = ex1.subs({x:(E/A)^(1/n)*y,dx:dy*(E/A)^(1/n)})
showmath( ex2.canonicalize_radical().full_simplify() )
```
Let's take out the expression that depends on the parameters $A$ and $E$:
```python
expr2 = (ex2/dy*sqrt(-y^n + 1)).full_simplify()
showmath( expr2.canonicalize_radical() )
```
```python
prefactor = expr2*sqrt(m/2)*4*1/sqrt(E)
showmath( prefactor.canonicalize_radical() )
```
Finally, we obtain:
$$T=4 \sqrt{\frac{m}{2}}\frac{1}{A^{1/n}} E^{\frac{1}{n}-\frac{1}{2}}\int_0^{y_0}\frac{dy}{\sqrt{1- y^n}}$$
For $n=2$, dependence on $E$ disappears, as we already have seen in the previous case.
We still need to calculate the upper limit $y_0$ of integration. In the integral, the upper limit is the position in which the total energy is the potential energy:
$$U(x)=E$$
In this case $$Ax^n=E.$$
By changing the variables we get:
```python
solve( (A*x^n == E).subs({x:(E/A)^(1/n)*y}), y)
```
That is, the integration limit is $y_0=1.$
Therefore the period of oscillations is given by the relation:
$$T=4 \sqrt{\frac{m}{2}}\frac{1}{A^{1/n}} E^{\frac{1}{n}-\frac{1}{2}}\int_0^{1}\frac{dy}{\sqrt{1- y^n}}$$
We note that only for the case $n=2$, the period $T$ does not depend on E (i.e. on initial conditions, i.e. on the distance). In other cases it depends on the total energy $E$, i.e. it depends on initial conditions, i.e. it depends on the distance between the points $-x_0$ and $x_0$.
The above equation shows how much time the particle needs to travel the distance for one oscillation in dependence on $E$ and in consequence on the distance: If energy $E$ is higher then the distance $4x_0$ is longer.
The scaled integral can be expressed by the beta function of Euler http://en.wikipedia.org/wiki/Beta_function.
We can calculate it:
```python
var('a')
assume(a,'integer')
assume(a>0)
print( assumptions() )
```
```python
integrate(1/sqrt(1-x^(a)),(x,0,1) )
```
We get a formula containing the beta function. It can be evaluated numerically for any values of the $a$ parameter.
```python
(beta(1/2,1/a)/a).subs({a:2}).n()
```
Let's examine this formula numerically. You can use the $\beta$ function, or numerically estimate the integral. This second approach allows you to explore any potential, not just $U(x)=ax^n$.
```python
def beta2(a,b):
return gamma(a)*gamma(b)/gamma(a+b)
a_list = srange(0.02,5,0.1)
a_list2 = [1/4,1/3,1/2,1,2,3,4,5]
integr_list = [ integral_numerical( 1/sqrt(1-x^a_) ,0,1, algorithm='qng',rule=2)[0] \
for a_ in a_list ]
integr_list_analytical = [ beta2(1/2, 1/a_)/a_ for a_ in a_list2 ]
```
we obtain some analytically simple formulas:
```python
showmath( integr_list_analytical )
```
Not we can compare those analytical numbers with numerical results, for example on the plot:
```python
plt_num = list_plot(zip( a_list,integr_list), plotjoined=True )
plt_anal = list_plot(zip( a_list2,integr_list_analytical),color='red')
(plt_num + plt_anal).show(ymin=0,figsize=(6,2))
```
Having an analytical solution, you can examine the asymptotics for large $n$:
```python
var('x')
asympt = limit( beta2(1/2, 1/x)/x,x=oo )
asympt
```
```python
plt_asympt = plot(asympt,(x,0,5),linestyle='dashed',color='gray')
```
Let's add a few points for which the integral takes exact values
```python
l = zip(a_list2[:5],integr_list_analytical[:5])
showmath(l)
```
```python
def plot_point_labels(l):
p=[]
for x,y in l:
p.append( text( "$("+latex(x)+", "+latex(y)+")$" ,(x+0.1,y+0.2) , fontsize=14,horizontal_alignment='left',color='gray') )
p.append( point ( (x,y),size=75,color='red' ) )
return sum(p)
```
```python
some_points = plot_point_labels(l)
```
```python
plt_all = plt_num+plt_anal+plt_asympt+some_points
plt_all.show(figsize=(6,3),ymin=0,ymax=7)
```
## Numerical convergence
The integral
$$\int_0^1 \frac{dx}{\sqrt{1-x^n}}$$ seems to be divergent for $n:$
```python
showmath( numerical_integral( 1/sqrt(1-x^(0.25)) , 0, 1) )
```
However, choosing the right algorithm gives the correct result:
```python
a_ = 1/4. # exponent in integral
integral_numerical( 1/sqrt(1-abs(x)^a_) , 0, 1, algorithm='qags')
```
lets check it out with an exact formula:
```python
(beta(1/2,1/a)/a).subs({a:a_}).n()
```
Indeed, we see that carefull numerical integration gives finite result.
## The dependence of period on energy for different $n$.
```python
var('E x n')
def draw_E(n,figsize=(6,2.5)):
p = []
p2 = []
p.append( plot(abs(x)^n,(x,-2,2),\
ymin=0,ymax=4,legend_label=r"$U(x)=|x|^{%s}$" % n ) )
p.append( plot( (x)^2,(x,-2,2),\
color='gray',legend_label=r"$U(x)=x^{2}$",\
axes_labels=[r"$x$",r"$U(x)$"] ))
p2.append( plot( 4/sqrt(2)*(beta(1/2, 1/n)/n)* E^(1/n-1/2),\
(E,0.00,3),ymin=0,ymax=7,axes_labels=[r"$E$",r"$T$"] ) )
p2.append( plot( 4/sqrt(2)*(beta(1/2, 1/2)/2),\
(E,0.00,3) ,color='gray' ) )
show( sum(p), figsize=figsize )
show( sum(p2), figsize=figsize )
```
```python
@interact
def _(n=slider([1/4,1/2,2/3,3/4,1,3/2,2,3])):
draw_E(n)
```
```python
import os
if 'PDF' in os.environ.keys():
draw_E(1/2)
draw_E(1)
draw_E(4)
```
We can plot the dependence of period $T$ on energy (i.e. amplitude) $T(E)$ for different values of $n$. In figure belowe we see that if $n>2$ then oscillations are faster as energy grows. On the other hand if $n<1$, oscillations are getting slower with growing energy.
Another interesting observation is that for potentials with $n>>1$ and $n<<1$ oscillations will become arbitrarily slow and fast, respectively when $E\to0$. For $n>1$ the potential well is *shallow* at the bottom and *steep* far away from the minimum and for $n<1$ the opposite is true.
```python
n_s = [1/3,2/3, 1, 2, 3]
plot( [4/sqrt(2)*(beta(1/2, 1/n_)/n_)* E^(1/n_-1/2) \
for n_ in n_s],\
(E,0.00,2),axes_labels=[r"$E$",r"$T$"],\
legend_label=['$n='+str(latex(n_))+'$' for n_ in n_s],\
ymax=7, figsize=(6,3) )
```
## Numerical integration of equations of motion
Here we will investigate numerically period $T$ and compare with the analytical formula.
First, let's have a closer look how the potential and force behave for different index $n$:
```python
def plot_Uf(n_):
U(x) = abs(x)^(n_)
plt = plot( [U(x),-diff( U(x),x)],(x,-1,1),\
detect_poles='show',ymin=-3,ymax=3,
legend_label=[r"$U(x)$",'$f(x)$'])
plt.axes_labels([r"$x$",r"$U(x)=|x|^%s$"%latex(n_)])
show(plt,figsize=(6,3))
```
```python
@interact
def _(n_=slider(0.1,2,0.1)):
plot_Uf(n_)
```
```python
if 'PDF' in os.environ.keys():
plot_Uf(1)
plot_Uf(2)
plot_Uf(1/2)
```
We can see that for $n \ge 1$ the force and potential are continuous. If $n=1$ then force has finite jump (discontinuity). Both those cases should not be a problem for numerical integration.
However for $n<1$ we have infinite force at $x=0$.
#### Problems with numerical integration
There is a possibility that if the ODE integrator comes too close, it will blow out!
We can fix this problem by softening the potential singularity by adding small number:
$$ |x| \to |x| + \epsilon. $$
```python
var('x',domain='real')
var('v t')
eps = 1e-6
U(x) = (abs(x)+eps)^(1/2)
showmath( U.diff(x).expand().simplify() )
```
to make sure that Sage will not leave $x/|x|$ unsimplified we can do:
```python
w0 = SR.wild(0)
w1 = SR.wild(1)
f = -U.diff(x).subs({w0*w1/abs(w1):w0*sign(w1)})
```
```python
showmath( f(x) )
```
```python
ode_pot = [v,f(x)]
t_lst = srange(0,10,0.01)
sol = desolve_odeint(ode_pot,[1,.0],t_lst,[x,v])
```
```python
p = line(zip(t_lst, sol[:,0])) + line(zip(t_lst, sol[:,1]), color='red')
p.axes_labels(['$t$','$x(t),v(t)$'])
p + plot(1,(x,0,10),linestyle='dashed',color='gray')
```
We can evaluate the period $T$ from the trajectory obtained via numerical solutions. For this purpose one might need an interpolation of numerical table returned by `desolve_odeint`:
```python
import numpy as np
def find_period(x,t):
zero_list=[]
x0 = x[0]
for i in range(1,len(x)):
if x[i]*x[i-1] < 0:
zero_list.append( - (t[i-1]*x[i] - t[i]*x[i-1])/(x[i-1] - x[i]) )
lnp = np.array(zero_list)
return 2*( (lnp-np.roll(lnp,1))[1:] ).mean()
```
```python
var('x1 x2 t1 t2 a b ')
showmath( (-b/a).subs( solve([a*t1+b==x1,a*t2+b==x2],[a,b], solution_dict=True)[0] ) )
```
We find numerically a period of trajectory:
```python
T = find_period( sol[:,0],t_lst)
T
```
```python
assert abs(T-7.54250)<1e-4
```
Exact results for comparison:
```python
# for n=2 2*pi/sqrt(2)==(2*pi/sqrt(2)).n()
table( [["n","T"]]+[ [n_,((4/sqrt(2)*(beta(1/2, 1/n_)/n_)* E^(1/n_-1/2)).subs({E:1})).n()]
for n_ in [1/4,1/3,1/2,2/3,1,2,3,4,5] ] )
```
## Using the formula for the period to reproduce the trajectory of movement
We take $m=1$ and $A=1$ (then $x=E$), then we can reproduce the trajectory reversing the formula for $T(E)$.
```python
var('x')
U(A,x) = A*x^2
A = 1/2
E = 1
m = 1.
x1=0.1
showmath( solve(E-U(A,x), x) )
```
```python
t_lst = [ (sqrt(m/2.)*integrate( 1/sqrt(E-U(A,x)),(x,0,x1)).n(),x1) \
for x1 in srange(0,sqrt(2.)+1e-10,1e-2)]
```
```python
point(t_lst ,color='red')+\
plot(sqrt(2)*sin(x),(x,0,pi),figsize=(6,2))
```
Interestingly, if we known the dependence of $T(E)$ then we can calculate exactly the potential!
\newpage
|
7f7f755c9b262e1286dd6f7e81e491b365331074
| 26,100 |
ipynb
|
Jupyter Notebook
|
012-1d_potential_well.ipynb
|
marcinofulus/Mechanics_with_SageMath
|
6d13cb2e83cd4be063c9cfef6ce536564a25cf57
|
[
"MIT"
] | null | null | null |
012-1d_potential_well.ipynb
|
marcinofulus/Mechanics_with_SageMath
|
6d13cb2e83cd4be063c9cfef6ce536564a25cf57
|
[
"MIT"
] | 1 |
2022-01-30T16:45:58.000Z
|
2022-01-30T16:45:58.000Z
|
012-1d_potential_well.ipynb
|
marcinofulus/Mechanics_with_SageMath
|
6d13cb2e83cd4be063c9cfef6ce536564a25cf57
|
[
"MIT"
] | 3 |
2020-11-15T08:26:14.000Z
|
2022-02-12T13:07:16.000Z
| 25.996016 | 578 | 0.52046 | true | 4,766 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.91118 | 0.923039 | 0.841055 |
__label__eng_Latn
| 0.973566 | 0.792385 |
# Random Signals and LTI-Systems
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).*
## Auto-Correlation Function
The auto-correlation function (ACF) $\varphi_{yy}[\kappa]$ of the output signal of an LTI system $y[k] = \mathcal{H} \{ x[k] \}$ is derived. It is assumed that the input signal is a wide-sense stationary (WSS) real-valued random process and that the LTI system has a real-valued impulse repsonse $h[k] \in \mathbb{R}$.
Introducing the output relation $y[k] = h[k] * x[k]$ of an LTI system into the definition of the ACF and rearranging terms yields
\begin{equation}
\begin{split}
\varphi_{yy}[\kappa] &= E \{ y[k+\kappa] \cdot y[k] \} \\
&= E \left\{ \sum_{\mu = -\infty}^{\infty} h[\mu] \; x[k+\kappa-\mu] \cdot
\sum_{\nu = -\infty}^{\infty} h[\nu] \; x[k-\nu] \right\} \\
&= \underbrace{h[\kappa] * h[-\kappa]}_{\varphi_{hh}[\kappa]} * \varphi_{xx}[\kappa]
\end{split}
\end{equation}
where the ACF $\varphi_{hh}[\kappa]$ of the deterministic impulse response $h[k]$ is commonly termed as *filter ACF*. This is related to the [link between ACF and convolution](../random_signals/correlation_functions.ipynb#Definition). The relation above is known as the *Wiener-Lee theorem*. It states that the ACF of the output $\varphi_{yy}[\kappa]$ of an LTI system is given by the convolution of the input signal's ACF $\varphi_{xx}[\kappa]$ with the filter ACF $\varphi_{hh}[\kappa]$. For a system which just attenuates the input signal $y[k] = A \cdot x[k]$ with $A \in \mathbb{R}$, the ACF at the output is given as $\varphi_{yy}[\kappa] = A^2 \cdot \varphi_{xx}[\kappa]$.
### Example - System Response to White Noise
Let's assume that the wide-sense ergodic input signal $x[k]$ of an LTI system with impulse response $h[k] = \text{rect}_N[k]$ is normal distributed white noise. Introducing $\varphi_{xx}[\kappa] = N_0\, \delta[\kappa]$ and $h[k]$ into the Wiener-Lee theorem yields
\begin{equation}
\varphi_{yy}[\kappa] = N_0 \cdot \varphi_{hh}[\kappa] = N_0 \cdot (\text{rect}_N[\kappa] * \text{rect}_N[-\kappa])
\end{equation}
The example is evaluated numerically for $N_0 = 1$ and $N=5$
```python
import numpy as np
import matplotlib.pyplot as plt
L = 10000 # number of samples
K = 30 # limit for lags in ACF
# generate input signal (white Gaussian noise)
np.random.seed(2)
x = np.random.normal(size=L)
# compute system response
y = np.convolve(x, [1, 1, 1, 1, 1], mode='full')
# compute and truncate ACF
acf = 1/len(y) * np.correlate(y, y, mode='full')
acf = acf[len(y)-K-1:len(y)+K-1]
kappa = np.arange(-K, K)
# plot ACF
plt.figure(figsize=(10, 6))
plt.stem(kappa, acf, use_line_collection=True)
plt.title('Estimated ACF of output signal $y[k]$')
plt.ylabel(r'$\hat{\varphi}_{yy}[\kappa]$')
plt.xlabel(r'$\kappa$')
plt.axis([-K, K, 1.2*min(acf), 1.1*max(acf)])
plt.grid()
```
**Exercise**
* Derive the theoretic result for $\varphi_{yy}[\kappa]$ by calculating the filter-ACF $\varphi_{hh}[\kappa]$.
* Why is the estimated ACF $\hat{\varphi}_{yy}[\kappa]$ of the output signal not exactly equal to its theoretic result $\varphi_{yy}[\kappa]$?
* Change the number of samples `L` and rerun the example. What changes?
Solution: The filter-ACF is given by $\varphi_{hh}[\kappa] = \text{rect}_N[\kappa] * \text{rect}_N[-\kappa]$. The convolution of two rectangular signals $\text{rect}_N[\kappa]$ results in a triangular signal. Taking the time reversal into account yields
\begin{equation}
\varphi_{hh}[\kappa] = \begin{cases}
N - |\kappa| & \text{for } -N < \kappa \leq N \\
0 & \text{otherwise}
\end{cases}
\end{equation}
for even $N$. The estimated ACF $\hat{\varphi}_{yy}[\kappa]$ differs from its theoretic value due to the statistical uncertainties when using random signals of finite length. Increasing its length `L` lowers the statistical uncertainties.
## Cross-Correlation Function
The cross-correlation functions (CCFs) $\varphi_{xy}[\kappa]$ and $\varphi_{yx}[\kappa]$ between the in- and output signal of an LTI system $y[k] = \mathcal{H} \{ x[k] \}$ are derived. As for the ACF it is assumed that the input signal originates from a wide-sense stationary real-valued random process and that the LTI system's impulse response is real-valued, i.e. $h[k] \in \mathbb{R}$.
Introducing the convolution into the definition of the CCF and rearranging the terms yields
\begin{equation}
\begin{split}
\varphi_{xy}[\kappa] &= E \{ x[k+\kappa] \cdot y[k] \} \\
&= E \left\{ x[k+\kappa] \cdot \sum_{\mu = -\infty}^{\infty} h[\mu] \; x[k-\mu] \right\} \\
&= \sum_{\mu = -\infty}^{\infty} h[\mu] \cdot E \{ x[k+\kappa] \cdot x[k-\mu] \} \\
&= h[-\kappa] * \varphi_{xx}[\kappa]
\end{split}
\end{equation}
The CCF $\varphi_{xy}[\kappa]$ between in- and output is given as the time-reversed impulse response of the system convolved with the ACF of the input signal.
The CCF between out- and input is yielded by taking the symmetry relations of the CCF and ACF into account
\begin{equation}
\varphi_{yx}[\kappa] = \varphi_{xy}[-\kappa] = h[\kappa] * \varphi_{xx}[\kappa]
\end{equation}
The CCF $\varphi_{yx}[\kappa]$ between out- and input is given as the impulse response of the system convolved with the ACF of the input signal.
For a system which just attenuates the input signal $y[k] = A \cdot x[k]$, the CCFs between input and output are given as $\varphi_{xy}[\kappa] = A \cdot \varphi_{xx}[\kappa]$ and $\varphi_{yx}[\kappa] = A \cdot \varphi_{xx}[\kappa]$.
## System Identification by Cross-Correlation
The process of determining the impulse response or transfer function of a system is referred to as *system identification*. The CCFs of an LTI system play an important role in the estimation of the impulse response $h[k]$ of an unknown system. This is illustrated in the following.
The basic idea is to use a specific measurement signal as input signal to the system. Let's assume that the unknown LTI system is excited by [white noise](../random_signals/white_noise.ipynb). The ACF of the wide-sense stationary input signal $x[k]$ is then given as $\varphi_{xx}[\kappa] = N_0 \cdot \delta[\kappa]$. According to the relation derived above, the CCF between out- and input for this special choice of the input signal becomes
\begin{equation}
\varphi_{yx}[\kappa] = h[\kappa] * N_0 \cdot \delta[\kappa] = N_0 \cdot h[\kappa]
\end{equation}
For white noise as input signal $x[k]$, the impulse response of an LTI system can be estimated by estimating the CCF between its out- and input signals. Using noise as measurement signal instead of a Dirac impulse is beneficial since its [crest factor](https://en.wikipedia.org/wiki/Crest_factor) is limited.
### Example
The application of the CCF to the identification of a system is demonstrated. The system is excited by wide-sense ergodic normal distributed white noise with $N_0 = 1$. The ACF of the in- and output, as well as the CCF between out- and input is estimated and plotted.
```python
import scipy.signal as sig
N = 10000 # number of samples for input signal
K = 50 # limit for lags in ACF
# generate input signal
# normally distributed (zero-mean, unit-variance) white noise
np.random.seed(5)
x = np.random.normal(size=N)
# impulse response of the system
h = np.concatenate((np.zeros(10), sig.triang(10), np.zeros(10)))
# output signal by convolution
y = np.convolve(h, x, mode='full')
# compute correlation functions
acfx = 1/len(x) * np.correlate(x, x, mode='full')
acfy = 1/len(y) * np.correlate(y, y, mode='full')
ccfyx = 1/len(y) * np.correlate(y, x, mode='full')
def plot_correlation_function(cf):
'''Plot correlation function.'''
cf = cf[N-K-1:N+K-1]
kappa = np.arange(-len(cf)//2, len(cf)//2)
plt.stem(kappa, cf, use_line_collection=True)
plt.xlabel(r'$\kappa$')
plt.axis([-K, K, -0.2, 1.1*max(cf)])
# plot ACFs and CCF
plt.rc('figure', figsize=(10, 3))
plt.figure()
plot_correlation_function(acfx)
plt.title('Estimated ACF of input signal')
plt.ylabel(r'$\hat{\varphi}_{xx}[\kappa]$')
plt.figure()
plot_correlation_function(acfy)
plt.title('Estimated ACF of output signal')
plt.ylabel(r'$\hat{\varphi}_{yy}[\kappa]$')
plt.figure()
plot_correlation_function(ccfyx)
plt.plot(np.arange(len(h)), h, 'g-')
plt.title('Estimated and true impulse response')
plt.ylabel(r'$\hat{h}[k]$, $h[k]$');
```
**Exercise**
* Why is the estimated CCF $\hat{\varphi}_{yx}[k]$ not exactly equal to the true impulse response $h[k]$ of the system?
* What changes if you change the number of samples `N` of the input signal?
Solution: The derived relations for system identification hold for the case of a wide-sense ergodic input signal of infinite duration. Since we can only numerically simulate signals of finite duration, the observed deviations are a result of the resulting statistical uncertainties. Increasing the length `N` of the input signal improves the estimate of the impulse response.
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
|
c0e136f1573e1e39a9173419dccfdc7ad053e73b
| 383,520 |
ipynb
|
Jupyter Notebook
|
random_signals_LTI_systems/correlation_functions.ipynb
|
TA1DB/digital-signal-processing-lecture
|
fc2219d9ab2217ce96c59e6e8be1f1e270bae08d
|
[
"MIT"
] | 630 |
2016-01-05T17:11:43.000Z
|
2022-03-30T07:48:27.000Z
|
random_signals_LTI_systems/correlation_functions.ipynb
|
SeunghyunOh-Daniel/digital-signal-processing-lecture
|
eea6f46284a903297452d2c6fc489cb4d26a4a54
|
[
"MIT"
] | 12 |
2016-11-07T15:49:55.000Z
|
2022-03-10T13:05:50.000Z
|
random_signals_LTI_systems/correlation_functions.ipynb
|
SeunghyunOh-Daniel/digital-signal-processing-lecture
|
eea6f46284a903297452d2c6fc489cb4d26a4a54
|
[
"MIT"
] | 172 |
2015-12-26T21:05:40.000Z
|
2022-03-10T23:13:30.000Z
| 67.628284 | 22,654 | 0.62402 | true | 2,851 |
Qwen/Qwen-72B
|
1. YES
2. YES
| 0.715424 | 0.839734 | 0.600766 |
__label__eng_Latn
| 0.956317 | 0.234111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.