file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sevencow.py
|
'%s:%s:%s' % (self.access_key, token, info)
class Cow(object):
def __init__(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.upload_tokens = {}
self.stat = functools.partial(self._stat_rm_handler, 'stat')
self.delete = functools.partial(self._stat_rm_handler, 'delete')
self.copy = functools.partial(self._cp_mv_handler, 'copy')
self.move = functools.partial(self._cp_mv_handler, 'move')
def get_bucket(self, bucket):
"""对一个bucket的文件进行操作,
推荐使用此方法得到一个bucket对象,
然后对此bucket的操作就只用传递文件名即可
"""
return Bucket(self, bucket)
def generate_access_token(self, url, params=None):
uri = urlparse(url)
token = uri.path
if uri.query:
token = '%s?%s' % (token, uri.query)
token = '%s\n' % token
if params:
if isinstance(params, basestring):
token += params
else:
token += urlencode(params)
return '%s:%s' % (self.access_key, signing(self.secret_key, token))
def build_requests_headers(self, token):
return {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'QBox %s' % token
}
@requests_error_handler
def api_call(self, url, params=None):
token = self.generate_access_token(url, params=params)
if params:
res = requests.post(url, data=params, headers=self.build_requests_headers(token))
else:
res = requests.post(url, headers=self.build_requests_headers(token))
assert res.status_code == 200, res
return res.json() if res.text else ''
def list_buckets(self):
"""列出所有的buckets"""
url = '%s/buckets' % RS_HOST
return self.api_call(url)
def create_bucket(self, name):
"""不建议使用API建立bucket
测试发现API建立的bucket默认无法设置<bucket_name>.qiniudn.com的二级域名
请直接到web界面建立
"""
url = '%s/mkbucket/%s' % (RS_HOST, name)
return self.api_call(url)
def drop_bucket(self, bucket):
"""删除整个bucket"""
url = '%s/drop/%s' % (RS_HOST, bucket)
return self.api_call(url)
def list_files(self, bucket, marker=None, limit=None, prefix=None):
"""列出bucket中的文件"""
query = ['bucket=%s' % bucket]
if marker:
query.append('marker=%s' % marker)
if limit:
query.append('limit=%s' % limit)
if prefix:
query.append('prefix=%s' % prefix)
url = '%s/list?%s' % (RSF_HOST, '&'.join(query))
return self.api_call(url)
def generate_upload_token(self, scope, ttl=3600):
"""上传文件的uploadToken"""
if scope not in self.upload_tokens:
self.upload_tokens[scope] = UploadToken(self.access_key, self.secret_key, scope, ttl=ttl)
return self.upload_tokens[scope].token
@requests_error_handler
@expected_argument_type(2, (basestring, list, tuple))
def put(self, scope, filename, names=None):
"""上传文件
filename 如果是字符串,表示上传单个文件,
如果是list或者tuple,表示上传多个文件
names 是dict,key为filename, value为上传后的名字
如果不设置,默认为文件名
"""
url = '%s/upload' % UP_HOST
token = self.generate_upload_token(scope)
names = names or {}
def _uploaded_name(filename):
return names.get(filename, None) or os.path.basename(filename)
def _put(filename):
files = {
'file': (filename, open(filename, 'rb')),
}
action = '/rs-put/%s' % urlsafe_b64encode(
'%s:%s' % (scope, _uploaded_name(filename))
)
_type, _encoding = mimetypes.guess_type(filename)
if _type:
action += '/mimeType/%s' % urlsafe_b64encode(_type)
data = {
'auth': token,
'action': action,
}
res = requests.post(url, files=files, data=data)
assert res.status_code == 200, res
return res.json()
if isinstance(filename, basestring):
# 单个文件
return _put(filename)
# 多文件
return [_put(f) for f in filename]
@expected_argument_type(2, (list, tuple))
def _cp_mv_handler(self, action, args):
"""copy move方法
action: 'copy' or 'move'
args: [src_bucket, src_filename, des_bucket, des_filename]
or [(src_bucket, src_filename, des_bucket, des_filename), (), ...]
args 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(args[0], basestring):
return self._cp_mv_single(action, args)
if isinstance(args[0], (list, tuple)):
return self._cp_mv_batch(action, args)
@expected_argument_type(3, (basestring, list, tuple))
def _stat_rm_handler(self, action, bucket, filename):
"""stat delete方法
action: 'stat' or 'delete'
bucket: 哪个bucket
filenmae: 'aabb' or ['aabb', 'ccdd', ...]
filename 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(filename, basestring):
return self._stat_rm_single(action, bucket, filename)
if isinstance(filename, (list, tuple)):
return self._stat_rm_batch(action, bucket, filename)
def _cp_mv_single(self, action, args):
src_bucket, src_filename, des_bucket, des_filename = args
url = '%s/%s/%s/%s' % (
RS_HOST,
action,
urlsafe_b64encode('%s:%s' % (src_bucket, src_filename)),
urlsafe_b64encode('%s:%s' % (des_bucket, des_filename)),
)
return self.api_call(url)
def _cp_mv_batch(self, action, args):
url = '%s/batch' % RS_HOST
def _one_param(arg):
return 'op=/%s/%s/%s' % (
action,
urlsafe_b64encode('%s:%s' % (arg[0], arg[1])),
urlsafe_b64encode('%s:%s' % (arg[2], arg[3])),
)
param = '&'.join( map(_one_param, args) )
return self.api_call(url, param)
def _stat_rm_single(self, action, bucket, filename):
url = '%s/%s/%s' % (
RS_HOST, action, urlsafe_b64encode('%s:%s' % (bucket, filename))
)
return self.api_call(url)
def _stat_rm_batch(self, action, bucket, filenames):
url = '%s/batch' % RS_HOST
param = [
'op=/%s/%s' % (
action, urlsafe_b64encode('%s:%s' % (bucket, f))
) for f in filenames
]
param = '&'.join(param)
return self.api_call(url, param)
def transform_argument(func):
@functools.wraps(func)
def deco(self, *args, **kwargs):
filename = args[0] if len(args) == 1 else args
return func(self, filename, **kwargs)
return deco
class Bucket(object):
def __init__(self, cow, bucket):
self.cow = cow
self.bucket = bucket
@transform_argument
def put(self, *args, **kwargs):
names = kwargs.get('names', None)
if names and not isinstance(names, dict):
raise TypeError(
"names Type error, Expected dict, But got Type of {0}".format(type(names))
)
return self.cow.put(self.bucket, args[0], names=names)
@transform_argument
def stat(self, *args):
return self.cow.stat(self.b
|
ucket, args[0])
@transform_argument
def delete(self, *args):
return self.cow.delete(self.bucket, args[0])
@transform_argument
def copy(self, *args):
return self.cow.copy(self._build_cp_mv_args(args[0]))
@transform_argument
def move(self, *args):
return self.cow.move(self._build_cp_mv_args(args[0]))
def list_files(self, marker=None, limit=None, prefix=None):
return self.cow.list_files(self.bucket, marker=marker, limit=limit, prefix=prefix)
def _build_cp_mv_args(self, filename):
if isinstance(filename[0], basestring):
|
identifier_body
|
|
sevencow.py
|
.qbox.me'
UP_HOST = 'http://up.qbox.me'
RSF_HOST = 'http://rsf.qbox.me'
class CowException(Exception):
def __init__(self, url, status_code, reason, content):
self.url = url
self.status_code = status_code
self.reason = reason
self.content = content
Exception.__init__(self, '%s, %s' % (reason, content))
def signing(secret_key, data):
return urlsafe_b64encode(
hmac.new(secret_key, data, sha1).digest()
)
def requests_error_handler(func):
@functools.wraps(func)
def deco(*args, **kwargs):
try:
return func(*args, **kwargs)
except AssertionError as e:
req = e.args[0]
raise CowException(
req.url, req.status_code, req.reason, req.content
)
return deco
def expected_argument_type(pos, types):
def deco(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
if not isinstance(args[pos], types):
raise TypeError(
"{0} Type error, Expected {1}".format(args[pos], types)
)
return func(*args, **kwargs)
return wrap
return deco
class UploadToken(object):
def __init__(self, access_key, secret_key, scope, ttl=3600):
self.access_key = access_key
self.secret_key = secret_key
self.scope = scope
self.ttl = ttl
self._token = None
self.generated = int(time.time())
@property
def token(self):
if int(time.time()) - self.generated > self.ttl - 60:
# 还有一分钟也认为过期了, make new token
self._token = self._make_token()
if not self._token:
self._token = self._make_token()
return self._token
def _make_token(self):
self.generated = int(time.time())
info = {
'scope': self.scope,
'deadline': self.generated + self.ttl
}
info = urlsafe_b64encode(json.dumps(info))
token = signing(self.secret_key, info)
return '%s:%s:%s' % (self.access_key, token, info)
class Cow(object):
def __init__(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.upload_tokens = {}
self.stat = functools.partial(self._stat_rm_handler, 'stat')
self.delete = functools.partial(self._stat_rm_handler, 'delete')
self.copy = functools.partial(self._cp_mv_handler, 'copy')
self.move = functools.partial(self._cp_mv_handler, 'move')
def get_bucket(self, bucket):
"""对一个bucket的文件进行操作,
推荐使用此方法得到一个bucket对象,
然后对此bucket的操作就只用传递文件名即可
"""
return Bucket(self, bucket)
def generate_access_token(self, url, params=None):
uri = urlparse(url)
token = uri.path
if uri.query:
token = '%s?%s' % (token, uri.query)
token = '%s\n' % token
if params:
if isinstance(params, basestring):
token += params
else:
token += urlencode(params)
return '%s:%s' % (self.access_key, signing(self.secret_key, token))
def build_requests_headers(self, token):
return {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'QBox %s' % token
}
@requests_error_handler
def api_call(self, url, params=None):
token = self.generate_access_token(url, params=params)
if params:
res = requests.post(url, data=params, headers=self.build_requests_headers(token))
else:
res = requests.post(url, headers=self.build_requests_headers(token))
assert res.status_code == 200, res
return res.json() if res.text else ''
def list_buckets(self):
"""列出所有的buckets"""
url = '%s/buckets' % RS_HOST
return self.api_call(url)
def create_bucket(self, name):
"""不建议使用API建立bucket
测试发现API建立的bucket默认无法设置<bucket_name>.qiniudn.com的二级域名
请直接到web界面建立
"""
url = '%s/mkbucket/%s' % (RS_HOST, name)
return self.api_call(url)
def drop_bucket(self, bucket):
"""删除整个bucket"""
url = '%s/drop/%s' % (RS_HOST, bucket)
return self.api_call(url)
def list_files(self, bucket, marker=None, limit=None, prefix=None):
"""列出bucket中的文件"""
query = ['bucket=%s' % bucket]
if marker:
query.append('marker=%s' % marker)
if limit:
query.append('limit=%s' % limit)
if prefix:
query.append('prefix=%s' % prefix)
url = '%s/list?%s' % (RSF_HOST, '&'.join(query))
return self.api_call(url)
def generate_upload_token(self, scope, ttl=3600):
"""上传文件的uploadToken"""
if scope not in self.upload_tokens:
self.upload_tokens[scope] = UploadToken(self.access_key, self.secret_key, scope, ttl=ttl)
return self.upload_tokens[scope].token
@requests_error_handler
@expected_argument_type(2, (basestring, list, tuple))
def put(self, scope, filename, names=None):
"""上传文件
filename 如果是字符串,表示上传单个文件,
如果是list或者tuple,表示上传多个文件
names 是dict,key为filename, value为上传后的名字
如果不设置,默认为文件名
"""
u
|
= '%s/upload' % UP_HOST
token = self.generate_upload_token(scope)
names = names or {}
def _uploaded_name(filename):
return names.get(filename, None) or os.path.basename(filename)
def _put(filename):
files = {
'file': (filename, open(filename, 'rb')),
}
action = '/rs-put/%s' % urlsafe_b64encode(
'%s:%s' % (scope, _uploaded_name(filename))
)
_type, _encoding = mimetypes.guess_type(filename)
if _type:
action += '/mimeType/%s' % urlsafe_b64encode(_type)
data = {
'auth': token,
'action': action,
}
res = requests.post(url, files=files, data=data)
assert res.status_code == 200, res
return res.json()
if isinstance(filename, basestring):
# 单个文件
return _put(filename)
# 多文件
return [_put(f) for f in filename]
@expected_argument_type(2, (list, tuple))
def _cp_mv_handler(self, action, args):
"""copy move方法
action: 'copy' or 'move'
args: [src_bucket, src_filename, des_bucket, des_filename]
or [(src_bucket, src_filename, des_bucket, des_filename), (), ...]
args 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(args[0], basestring):
return self._cp_mv_single(action, args)
if isinstance(args[0], (list, tuple)):
return self._cp_mv_batch(action, args)
@expected_argument_type(3, (basestring, list, tuple))
def _stat_rm_handler(self, action, bucket, filename):
"""stat delete方法
action: 'stat' or 'delete'
bucket: 哪个bucket
filenmae: 'aabb' or ['aabb', 'ccdd', ...]
filename 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(filename, basestring):
return self._stat_rm_single(action, bucket, filename)
if isinstance(filename, (list, tuple)):
return self._stat_rm_batch(action, bucket, filename)
def _cp_mv_single(self, action, args):
src_bucket, src_filename, des_bucket, des_filename = args
url = '%s/%s/%s/%s' % (
RS_HOST,
action,
urlsafe_b64encode('%s:%s' % (src_bucket, src_filename)),
urlsafe_b64encode('%s:%s' % (des_bucket, des_filename)),
)
return self.api_call(url)
def _cp_mv_batch(self, action, args):
url = '%s/batch' % RS_HOST
def _one_param(arg):
return 'op=/%s/%s/%s' % (
action,
urlsafe_b64encode('%s:%s' % (arg[0], arg[1])),
urlsafe_b64encode('%s:%s' % (arg[2], arg[3])),
)
param = '&'.join( map(_one
|
rl
|
identifier_name
|
class_vertice_graph.py
|
.gare_name = None #nom de la gare
self.color = None #couleur de la gare
self.is_a_station= True # boolean True, si le noeud est veritablement une gare. False sinon
def get_lines_connected(self):
list_of_line = []
for edge in self._edges_list:
if edge.id not in list_of_line:
list_of_line.append(edge.id)
return list_of_line
@property
def edges_list(self):
""" Returns the list of neighbour. """
return self._edges_list
# We suppose that the index and the coordinates never change.
# The other properties can.
@edges_list.setter
def
|
(self, edges_list):
""" An element of edges_list is an edge """
for e in edges_list:
exceptions.check_pertinent_edge(self, e)
self._edges_list = edges_list
def neighbours_list(self, list_tuple, id=0):
self._edges_list.clear()
"""interface with old constructor , tuple=(neighbour_vertice,cost) is an element of list_tuple """
for tuple in list_tuple:
E = Edge(self, tuple[0], id, tuple[1])
self._edges_list.append(E)
def number_of_neighbours(self):
return len(self._edges_list)
def is_linked(self, other):
"""returns True if there is an edge between self and other"""
for edge in self._edges_list:
if other.index == edge.linked[1].index:
return True
return False
def push_edge(self, edge, coords_verif=False):
if coords_verif:
exceptions.check_pertinent_edge_coords_verif(self, edge)
else:
exceptions.check_pertinent_edge(self, edge)
self._edges_list.append(edge)
"""
def cost_between(self, other):
for edge in self._edges_list:
[vertice, vertice_voisin] = edge.linked
if vertice_voisin == other:
return edge.given_cost"""
def __repr__(self):
return f"Vertice {str(self.index)}"
def __lt__(self, other):
return self.priority < other.priority
class Edge:
def __init__(self, vertice1, vertice2, id, given_cost=0):
self.linked = [vertice1,vertice2]
self.id = id #identifiant de la liaison. ici id=nom de la ligne a laqualle appartient la liaison
self._given_cost = given_cost #cout de deplacement de la liason donne par l'utilisateur ou la base de donnee
#data_base
self.color=None #couleur de la liason
self.connection_with_displayable=None #indice de la liason developpee( trace reel) dans la table de connection connection_table_edge_and_diplayable_edge de la classe graph
self.index=None
def set_given_cost(self,cost):
self._given_cost=cost
#ne pas mettre @property ici, on veut une methode pas un attribut
def euclidian_cost(self):
return np.sqrt(self.square_euclidian_cost())
def square_euclidian_cost(self):
return np.dot(np.transpose(self.linked[0].coordinates-self.linked[1].coordinates),(self.linked[0].coordinates-self.linked[1].coordinates))
def customized_cost1(self):
V_metro = 25.1 / 3.6 #vitesse moyenne en km/h /3.6 -> vitesse moyenne en m/s
V_train = 49.6 / 3.6
V_tram = 18 / 3.6
V_pieton = 4 / 3.6
if self.id in ["A","B","C","D","E","H","I","J","K","L","M","N","P","R","U","TER","GL"]:
return self._given_cost/V_train
if self.id in [str(i) for i in range(1,15)]+["ORL","CDG","3b","7b"]:
return self._given_cost/V_metro
if self.id in ["T"+str(i) for i in range(1,12)]+["T3A","T3B","FUN"]:
return self._given_cost/V_tram
if self.id in ["RER Walk"]:
return self._given_cost/V_pieton
raise ValueError(" Dans customized_cost1 " +self.id+" non pris en compte dans le calcul de distance")
def __eq__(self,other):
"""2 edges are equal iff same cordinates and same id """
boul0 = self.linked[0].coordinates[0]==other.linked[0].coordinates[0] and self.linked[0].coordinates[1]==other.linked[0].coordinates[1]
boul1 = self.linked[1].coordinates[0]==other.linked[1].coordinates[0] and self.linked[1].coordinates[1]==other.linked[1].coordinates[1]
boulid = self.id==other.id
return boul0 and boul1 and boulid
def __ne__(self,other):
"""2 edges are not equal iff they are not equal :) """
return (self==other)==False
#ne pas mettre @property ici, on veut une methode pas un attribut
def given_cost(self):
return self._given_cost
def __repr__(self):
return f"Edge [{str(self.linked[0].index)}, {str(self.linked[1].index)}] !oriented!"
class Graph:
""" All the information of a graph are contained here. """
def __init__(self,list_of_vertices):
""" Entry : the list of vertices. """
self.list_of_vertices = list_of_vertices
self.number_of_vertices = len(list_of_vertices)
self.connection_table_edge_and_diplayable_edge=[]
self.list_of_edges=[]
self.number_of_disp_edges=0
self.number_of_edges=0
def push_diplayable_edge(self,bidim_array):
self.connection_table_edge_and_diplayable_edge.append(copy.deepcopy(bidim_array))
self.number_of_disp_edges+=1
def push_edge(self,e):
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_edge_without_doublons(self, e):
if e not in self.list_of_edges:
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_vertice(self,vertice):
self.list_of_vertices.append(vertice)
self.number_of_vertices += 1
def push_vertice_without_doublons(self, vertice):
bool,index = self.is_vertice_in_graph_based_on_xy_with_tolerance(vertice,10**(-8))
#bool,index = self.is_vertice_in_graph_based_on_xy(vertice)
if bool == False:
self.push_vertice(vertice)
else:
vertice.coordinates=self.list_of_vertices[index].coordinates
for edge in vertice.edges_list:
if edge not in self.list_of_vertices[index].edges_list:
self.list_of_vertices[index].push_edge(edge,True)
def is_vertice_in_graph_based_on_xy(self,vertice):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if v.coordinates[0] == vertice.coordinates[0] and v.coordinates[1] == vertice.coordinates[1]:
return True,i
return False,None
def is_vertice_in_graph_based_on_xy_with_tolerance(self, vertice, epsilon):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if ((v.coordinates[0] - vertice.coordinates[0])**2) + ((v.coordinates[1] - vertice.coordinates[1])**2) < epsilon:
return True, i
return False, None
def __getitem__(self, key):#implement instance[key]
if key >= 0 and key < self.number_of_vertices:
return self.list_of_vertices[key]
else :
raise IndexError
def laplace_matrix(self):
""" Returns the laplace matrix. """
n = self.number_of_vertices
laplace_matrix = np.zeros((n, n))
for i in range(n):
laplace_matrix[i][i] = 1
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
laplace_matrix[i][edge.linked[1].index] = 1
return laplace_matrix
def A_matrix(self,type_cost=Edge.given_cost):
""" Returns the laplace matrix. """
n = self.number_of_vertices
A_matrix = np.zeros((n, n))
for i in range(n):
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
cost = type_cost(edge)
A_matrix[i][edge.linked[1].index] = cost
A_matrix[edge.linked[1].index][i] = cost
return A_matrix
def pairs_of_vertices(self):
"""Returns the pairs of connected vertices.
Beware ! There might be non-connected vertices in the graph. """
pairs_of_vertices = []
for vertice in self.list_of_vertices:
for
|
edges_list
|
identifier_name
|
class_vertice_graph.py
|
self.gare_name = None #nom de la gare
self.color = None #couleur de la gare
self.is_a_station= True # boolean True, si le noeud est veritablement une gare. False sinon
def get_lines_connected(self):
list_of_line = []
for edge in self._edges_list:
if edge.id not in list_of_line:
list_of_line.append(edge.id)
return list_of_line
@property
def edges_list(self):
""" Returns the list of neighbour. """
return self._edges_list
# We suppose that the index and the coordinates never change.
# The other properties can.
@edges_list.setter
def edges_list(self, edges_list):
""" An element of edges_list is an edge """
for e in edges_list:
exceptions.check_pertinent_edge(self, e)
self._edges_list = edges_list
def neighbours_list(self, list_tuple, id=0):
self._edges_list.clear()
"""interface with old constructor , tuple=(neighbour_vertice,cost) is an element of list_tuple """
for tuple in list_tuple:
E = Edge(self, tuple[0], id, tuple[1])
self._edges_list.append(E)
def number_of_neighbours(self):
return len(self._edges_list)
def is_linked(self, other):
"""returns True if there is an edge between self and other"""
for edge in self._edges_list:
if other.index == edge.linked[1].index:
return True
return False
def push_edge(self, edge, coords_verif=False):
if coords_verif:
exceptions.check_pertinent_edge_coords_verif(self, edge)
else:
exceptions.check_pertinent_edge(self, edge)
self._edges_list.append(edge)
"""
def cost_between(self, other):
for edge in self._edges_list:
[vertice, vertice_voisin] = edge.linked
if vertice_voisin == other:
return edge.given_cost"""
def __repr__(self):
return f"Vertice {str(self.index)}"
def __lt__(self, other):
return self.priority < other.priority
class Edge:
def __init__(self, vertice1, vertice2, id, given_cost=0):
self.linked = [vertice1,vertice2]
self.id = id #identifiant de la liaison. ici id=nom de la ligne a laqualle appartient la liaison
self._given_cost = given_cost #cout de deplacement de la liason donne par l'utilisateur ou la base de donnee
#data_base
self.color=None #couleur de la liason
self.connection_with_displayable=None #indice de la liason developpee( trace reel) dans la table de connection connection_table_edge_and_diplayable_edge de la classe graph
self.index=None
def set_given_cost(self,cost):
self._given_cost=cost
#ne pas mettre @property ici, on veut une methode pas un attribut
def euclidian_cost(self):
return np.sqrt(self.square_euclidian_cost())
def square_euclidian_cost(self):
return np.dot(np.transpose(self.linked[0].coordinates-self.linked[1].coordinates),(self.linked[0].coordinates-self.linked[1].coordinates))
def customized_cost1(self):
V_metro = 25.1 / 3.6 #vitesse moyenne en km/h /3.6 -> vitesse moyenne en m/s
V_train = 49.6 / 3.6
V_tram = 18 / 3.6
V_pieton = 4 / 3.6
if self.id in ["A","B","C","D","E","H","I","J","K","L","M","N","P","R","U","TER","GL"]:
return self._given_cost/V_train
if self.id in [str(i) for i in range(1,15)]+["ORL","CDG","3b","7b"]:
return self._given_cost/V_metro
if self.id in ["T"+str(i) for i in range(1,12)]+["T3A","T3B","FUN"]:
return self._given_cost/V_tram
if self.id in ["RER Walk"]:
return self._given_cost/V_pieton
raise ValueError(" Dans customized_cost1 " +self.id+" non pris en compte dans le calcul de distance")
def __eq__(self,other):
"""2 edges are equal iff same cordinates and same id """
boul0 = self.linked[0].coordinates[0]==other.linked[0].coordinates[0] and self.linked[0].coordinates[1]==other.linked[0].coordinates[1]
boul1 = self.linked[1].coordinates[0]==other.linked[1].coordinates[0] and self.linked[1].coordinates[1]==other.linked[1].coordinates[1]
boulid = self.id==other.id
return boul0 and boul1 and boulid
def __ne__(self,other):
"""2 edges are not equal iff they are not equal :) """
return (self==other)==False
#ne pas mettre @property ici, on veut une methode pas un attribut
def given_cost(self):
return self._given_cost
def __repr__(self):
return f"Edge [{str(self.linked[0].index)}, {str(self.linked[1].index)}] !oriented!"
class Graph:
""" All the information of a graph are contained here. """
def __init__(self,list_of_vertices):
""" Entry : the list of vertices. """
self.list_of_vertices = list_of_vertices
|
self.number_of_edges=0
def push_diplayable_edge(self,bidim_array):
self.connection_table_edge_and_diplayable_edge.append(copy.deepcopy(bidim_array))
self.number_of_disp_edges+=1
def push_edge(self,e):
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_edge_without_doublons(self, e):
if e not in self.list_of_edges:
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_vertice(self,vertice):
self.list_of_vertices.append(vertice)
self.number_of_vertices += 1
def push_vertice_without_doublons(self, vertice):
bool,index = self.is_vertice_in_graph_based_on_xy_with_tolerance(vertice,10**(-8))
#bool,index = self.is_vertice_in_graph_based_on_xy(vertice)
if bool == False:
self.push_vertice(vertice)
else:
vertice.coordinates=self.list_of_vertices[index].coordinates
for edge in vertice.edges_list:
if edge not in self.list_of_vertices[index].edges_list:
self.list_of_vertices[index].push_edge(edge,True)
def is_vertice_in_graph_based_on_xy(self,vertice):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if v.coordinates[0] == vertice.coordinates[0] and v.coordinates[1] == vertice.coordinates[1]:
return True,i
return False,None
def is_vertice_in_graph_based_on_xy_with_tolerance(self, vertice, epsilon):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if ((v.coordinates[0] - vertice.coordinates[0])**2) + ((v.coordinates[1] - vertice.coordinates[1])**2) < epsilon:
return True, i
return False, None
def __getitem__(self, key):#implement instance[key]
if key >= 0 and key < self.number_of_vertices:
return self.list_of_vertices[key]
else :
raise IndexError
def laplace_matrix(self):
""" Returns the laplace matrix. """
n = self.number_of_vertices
laplace_matrix = np.zeros((n, n))
for i in range(n):
laplace_matrix[i][i] = 1
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
laplace_matrix[i][edge.linked[1].index] = 1
return laplace_matrix
def A_matrix(self,type_cost=Edge.given_cost):
""" Returns the laplace matrix. """
n = self.number_of_vertices
A_matrix = np.zeros((n, n))
for i in range(n):
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
cost = type_cost(edge)
A_matrix[i][edge.linked[1].index] = cost
A_matrix[edge.linked[1].index][i] = cost
return A_matrix
def pairs_of_vertices(self):
"""Returns the pairs of connected vertices.
Beware ! There might be non-connected vertices in the graph. """
pairs_of_vertices = []
for vertice in self.list_of_vertices:
for edge in
|
self.number_of_vertices = len(list_of_vertices)
self.connection_table_edge_and_diplayable_edge=[]
self.list_of_edges=[]
self.number_of_disp_edges=0
|
random_line_split
|
class_vertice_graph.py
|
(self):
list_of_line = []
for edge in self._edges_list:
if edge.id not in list_of_line:
list_of_line.append(edge.id)
return list_of_line
@property
def edges_list(self):
""" Returns the list of neighbour. """
return self._edges_list
# We suppose that the index and the coordinates never change.
# The other properties can.
@edges_list.setter
def edges_list(self, edges_list):
""" An element of edges_list is an edge """
for e in edges_list:
exceptions.check_pertinent_edge(self, e)
self._edges_list = edges_list
def neighbours_list(self, list_tuple, id=0):
self._edges_list.clear()
"""interface with old constructor , tuple=(neighbour_vertice,cost) is an element of list_tuple """
for tuple in list_tuple:
E = Edge(self, tuple[0], id, tuple[1])
self._edges_list.append(E)
def number_of_neighbours(self):
return len(self._edges_list)
def is_linked(self, other):
"""returns True if there is an edge between self and other"""
for edge in self._edges_list:
if other.index == edge.linked[1].index:
return True
return False
def push_edge(self, edge, coords_verif=False):
if coords_verif:
exceptions.check_pertinent_edge_coords_verif(self, edge)
else:
exceptions.check_pertinent_edge(self, edge)
self._edges_list.append(edge)
"""
def cost_between(self, other):
for edge in self._edges_list:
[vertice, vertice_voisin] = edge.linked
if vertice_voisin == other:
return edge.given_cost"""
def __repr__(self):
return f"Vertice {str(self.index)}"
def __lt__(self, other):
return self.priority < other.priority
class Edge:
def __init__(self, vertice1, vertice2, id, given_cost=0):
self.linked = [vertice1,vertice2]
self.id = id #identifiant de la liaison. ici id=nom de la ligne a laqualle appartient la liaison
self._given_cost = given_cost #cout de deplacement de la liason donne par l'utilisateur ou la base de donnee
#data_base
self.color=None #couleur de la liason
self.connection_with_displayable=None #indice de la liason developpee( trace reel) dans la table de connection connection_table_edge_and_diplayable_edge de la classe graph
self.index=None
def set_given_cost(self,cost):
self._given_cost=cost
#ne pas mettre @property ici, on veut une methode pas un attribut
def euclidian_cost(self):
return np.sqrt(self.square_euclidian_cost())
def square_euclidian_cost(self):
return np.dot(np.transpose(self.linked[0].coordinates-self.linked[1].coordinates),(self.linked[0].coordinates-self.linked[1].coordinates))
def customized_cost1(self):
V_metro = 25.1 / 3.6 #vitesse moyenne en km/h /3.6 -> vitesse moyenne en m/s
V_train = 49.6 / 3.6
V_tram = 18 / 3.6
V_pieton = 4 / 3.6
if self.id in ["A","B","C","D","E","H","I","J","K","L","M","N","P","R","U","TER","GL"]:
return self._given_cost/V_train
if self.id in [str(i) for i in range(1,15)]+["ORL","CDG","3b","7b"]:
return self._given_cost/V_metro
if self.id in ["T"+str(i) for i in range(1,12)]+["T3A","T3B","FUN"]:
return self._given_cost/V_tram
if self.id in ["RER Walk"]:
return self._given_cost/V_pieton
raise ValueError(" Dans customized_cost1 " +self.id+" non pris en compte dans le calcul de distance")
def __eq__(self,other):
"""2 edges are equal iff same cordinates and same id """
boul0 = self.linked[0].coordinates[0]==other.linked[0].coordinates[0] and self.linked[0].coordinates[1]==other.linked[0].coordinates[1]
boul1 = self.linked[1].coordinates[0]==other.linked[1].coordinates[0] and self.linked[1].coordinates[1]==other.linked[1].coordinates[1]
boulid = self.id==other.id
return boul0 and boul1 and boulid
def __ne__(self,other):
"""2 edges are not equal iff they are not equal :) """
return (self==other)==False
#ne pas mettre @property ici, on veut une methode pas un attribut
def given_cost(self):
return self._given_cost
def __repr__(self):
return f"Edge [{str(self.linked[0].index)}, {str(self.linked[1].index)}] !oriented!"
class Graph:
""" All the information of a graph are contained here. """
def __init__(self,list_of_vertices):
""" Entry : the list of vertices. """
self.list_of_vertices = list_of_vertices
self.number_of_vertices = len(list_of_vertices)
self.connection_table_edge_and_diplayable_edge=[]
self.list_of_edges=[]
self.number_of_disp_edges=0
self.number_of_edges=0
def push_diplayable_edge(self,bidim_array):
self.connection_table_edge_and_diplayable_edge.append(copy.deepcopy(bidim_array))
self.number_of_disp_edges+=1
def push_edge(self,e):
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_edge_without_doublons(self, e):
if e not in self.list_of_edges:
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_vertice(self,vertice):
self.list_of_vertices.append(vertice)
self.number_of_vertices += 1
def push_vertice_without_doublons(self, vertice):
bool,index = self.is_vertice_in_graph_based_on_xy_with_tolerance(vertice,10**(-8))
#bool,index = self.is_vertice_in_graph_based_on_xy(vertice)
if bool == False:
self.push_vertice(vertice)
else:
vertice.coordinates=self.list_of_vertices[index].coordinates
for edge in vertice.edges_list:
if edge not in self.list_of_vertices[index].edges_list:
self.list_of_vertices[index].push_edge(edge,True)
def is_vertice_in_graph_based_on_xy(self,vertice):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if v.coordinates[0] == vertice.coordinates[0] and v.coordinates[1] == vertice.coordinates[1]:
return True,i
return False,None
def is_vertice_in_graph_based_on_xy_with_tolerance(self, vertice, epsilon):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if ((v.coordinates[0] - vertice.coordinates[0])**2) + ((v.coordinates[1] - vertice.coordinates[1])**2) < epsilon:
return True, i
return False, None
def __getitem__(self, key):#implement instance[key]
if key >= 0 and key < self.number_of_vertices:
return self.list_of_vertices[key]
else :
raise IndexError
def laplace_matrix(self):
""" Returns the laplace matrix. """
n = self.number_of_vertices
laplace_matrix = np.zeros((n, n))
for i in range(n):
laplace_matrix[i][i] = 1
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
laplace_matrix[i][edge.linked[1].index] = 1
return laplace_matrix
def A_matrix(self,type_cost=Edge.given_cost):
""" Returns the laplace matrix. """
n = self.number_of_vertices
A_matrix = np.zeros((n, n))
for i in range(n):
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
cost = type_cost(edge)
A_matrix[i][edge.linked[1].index] = cost
A_matrix[edge.linked[1].index][i] = cost
return A_matrix
def pairs_of_vertices(self):
"""Returns the pairs of connected vertices.
Beware ! There might be non-connected vertices in the graph. """
pairs_of_vertices = []
for vertice in self.list_of_vertices:
for edge in vertice.edges_list:
if non_oriented:
if (vertice, edge.linked[1]) and (edge.linked[1], vertice) not in pairs_of_vertices:
|
pairs_of_vertices.append((vertice, edge.linked[1]))
|
conditional_block
|
|
class_vertice_graph.py
|
.gare_name = None #nom de la gare
self.color = None #couleur de la gare
self.is_a_station= True # boolean True, si le noeud est veritablement une gare. False sinon
def get_lines_connected(self):
list_of_line = []
for edge in self._edges_list:
if edge.id not in list_of_line:
list_of_line.append(edge.id)
return list_of_line
@property
def edges_list(self):
""" Returns the list of neighbour. """
return self._edges_list
# We suppose that the index and the coordinates never change.
# The other properties can.
@edges_list.setter
def edges_list(self, edges_list):
""" An element of edges_list is an edge """
for e in edges_list:
exceptions.check_pertinent_edge(self, e)
self._edges_list = edges_list
def neighbours_list(self, list_tuple, id=0):
self._edges_list.clear()
"""interface with old constructor , tuple=(neighbour_vertice,cost) is an element of list_tuple """
for tuple in list_tuple:
E = Edge(self, tuple[0], id, tuple[1])
self._edges_list.append(E)
def number_of_neighbours(self):
return len(self._edges_list)
def is_linked(self, other):
"""returns True if there is an edge between self and other"""
for edge in self._edges_list:
if other.index == edge.linked[1].index:
return True
return False
def push_edge(self, edge, coords_verif=False):
if coords_verif:
exceptions.check_pertinent_edge_coords_verif(self, edge)
else:
exceptions.check_pertinent_edge(self, edge)
self._edges_list.append(edge)
"""
def cost_between(self, other):
for edge in self._edges_list:
[vertice, vertice_voisin] = edge.linked
if vertice_voisin == other:
return edge.given_cost"""
def __repr__(self):
return f"Vertice {str(self.index)}"
def __lt__(self, other):
return self.priority < other.priority
class Edge:
def __init__(self, vertice1, vertice2, id, given_cost=0):
self.linked = [vertice1,vertice2]
self.id = id #identifiant de la liaison. ici id=nom de la ligne a laqualle appartient la liaison
self._given_cost = given_cost #cout de deplacement de la liason donne par l'utilisateur ou la base de donnee
#data_base
self.color=None #couleur de la liason
self.connection_with_displayable=None #indice de la liason developpee( trace reel) dans la table de connection connection_table_edge_and_diplayable_edge de la classe graph
self.index=None
def set_given_cost(self,cost):
self._given_cost=cost
#ne pas mettre @property ici, on veut une methode pas un attribut
def euclidian_cost(self):
return np.sqrt(self.square_euclidian_cost())
def square_euclidian_cost(self):
return np.dot(np.transpose(self.linked[0].coordinates-self.linked[1].coordinates),(self.linked[0].coordinates-self.linked[1].coordinates))
def customized_cost1(self):
V_metro = 25.1 / 3.6 #vitesse moyenne en km/h /3.6 -> vitesse moyenne en m/s
V_train = 49.6 / 3.6
V_tram = 18 / 3.6
V_pieton = 4 / 3.6
if self.id in ["A","B","C","D","E","H","I","J","K","L","M","N","P","R","U","TER","GL"]:
return self._given_cost/V_train
if self.id in [str(i) for i in range(1,15)]+["ORL","CDG","3b","7b"]:
return self._given_cost/V_metro
if self.id in ["T"+str(i) for i in range(1,12)]+["T3A","T3B","FUN"]:
return self._given_cost/V_tram
if self.id in ["RER Walk"]:
return self._given_cost/V_pieton
raise ValueError(" Dans customized_cost1 " +self.id+" non pris en compte dans le calcul de distance")
def __eq__(self,other):
"""2 edges are equal iff same cordinates and same id """
boul0 = self.linked[0].coordinates[0]==other.linked[0].coordinates[0] and self.linked[0].coordinates[1]==other.linked[0].coordinates[1]
boul1 = self.linked[1].coordinates[0]==other.linked[1].coordinates[0] and self.linked[1].coordinates[1]==other.linked[1].coordinates[1]
boulid = self.id==other.id
return boul0 and boul1 and boulid
def __ne__(self,other):
"""2 edges are not equal iff they are not equal :) """
return (self==other)==False
#ne pas mettre @property ici, on veut une methode pas un attribut
def given_cost(self):
return self._given_cost
def __repr__(self):
return f"Edge [{str(self.linked[0].index)}, {str(self.linked[1].index)}] !oriented!"
class Graph:
""" All the information of a graph are contained here. """
def __init__(self,list_of_vertices):
""" Entry : the list of vertices. """
self.list_of_vertices = list_of_vertices
self.number_of_vertices = len(list_of_vertices)
self.connection_table_edge_and_diplayable_edge=[]
self.list_of_edges=[]
self.number_of_disp_edges=0
self.number_of_edges=0
def push_diplayable_edge(self,bidim_array):
self.connection_table_edge_and_diplayable_edge.append(copy.deepcopy(bidim_array))
self.number_of_disp_edges+=1
def push_edge(self,e):
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_edge_without_doublons(self, e):
if e not in self.list_of_edges:
self.number_of_edges+=1
self.list_of_edges.append(e)
def push_vertice(self,vertice):
self.list_of_vertices.append(vertice)
self.number_of_vertices += 1
def push_vertice_without_doublons(self, vertice):
bool,index = self.is_vertice_in_graph_based_on_xy_with_tolerance(vertice,10**(-8))
#bool,index = self.is_vertice_in_graph_based_on_xy(vertice)
if bool == False:
self.push_vertice(vertice)
else:
vertice.coordinates=self.list_of_vertices[index].coordinates
for edge in vertice.edges_list:
if edge not in self.list_of_vertices[index].edges_list:
self.list_of_vertices[index].push_edge(edge,True)
def is_vertice_in_graph_based_on_xy(self,vertice):
|
def is_vertice_in_graph_based_on_xy_with_tolerance(self, vertice, epsilon):
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if ((v.coordinates[0] - vertice.coordinates[0])**2) + ((v.coordinates[1] - vertice.coordinates[1])**2) < epsilon:
return True, i
return False, None
def __getitem__(self, key):#implement instance[key]
if key >= 0 and key < self.number_of_vertices:
return self.list_of_vertices[key]
else :
raise IndexError
def laplace_matrix(self):
""" Returns the laplace matrix. """
n = self.number_of_vertices
laplace_matrix = np.zeros((n, n))
for i in range(n):
laplace_matrix[i][i] = 1
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
laplace_matrix[i][edge.linked[1].index] = 1
return laplace_matrix
def A_matrix(self,type_cost=Edge.given_cost):
""" Returns the laplace matrix. """
n = self.number_of_vertices
A_matrix = np.zeros((n, n))
for i in range(n):
vertice = self.list_of_vertices[i]
for edge in vertice.edges_list:
cost = type_cost(edge)
A_matrix[i][edge.linked[1].index] = cost
A_matrix[edge.linked[1].index][i] = cost
return A_matrix
def pairs_of_vertices(self):
"""Returns the pairs of connected vertices.
Beware ! There might be non-connected vertices in the graph. """
pairs_of_vertices = []
for vertice in self.list_of_vertices:
for edge
|
for i in range(self.number_of_vertices):
v = self.list_of_vertices[i]
if v.coordinates[0] == vertice.coordinates[0] and v.coordinates[1] == vertice.coordinates[1]:
return True,i
return False,None
|
identifier_body
|
build.rs
|
let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set"));
let kernel = PathBuf::from(match env::var("KERNEL") {
Ok(kernel) => kernel,
Err(_) => {
eprintln!(
"The KERNEL environment variable must be set for building the bootloader.\n\n\
Please use the `cargo builder` command for building."
);
process::exit(1);
}
});
let kernel_file_name = kernel
.file_name()
.expect("KERNEL has no valid file name")
.to_str()
.expect("kernel file name not valid utf8");
// check that the kernel file exists
assert!(
kernel.exists(),
"KERNEL does not exist: {}",
kernel.display()
);
// get access to llvm tools shipped in the llvm-tools-preview rustup component
let llvm_tools = match llvm_tools::LlvmTools::new() {
Ok(tools) => tools,
Err(llvm_tools::Error::NotFound) => {
eprintln!("Error: llvm-tools not found");
eprintln!("Maybe the rustup component `llvm-tools-preview` is missing?");
eprintln!(" Install it through: `rustup component add llvm-tools-preview`");
process::exit(1);
}
Err(err) => {
eprintln!("Failed to retrieve llvm-tools component: {:?}", err);
process::exit(1);
}
};
// check that kernel executable has code in it
let llvm_size = llvm_tools
.tool(&llvm_tools::exe("llvm-size"))
.expect("llvm-size not found in llvm-tools");
let mut cmd = Command::new(llvm_size);
cmd.arg(&kernel);
let output = cmd.output().expect("failed to run llvm-size");
let output_str = String::from_utf8_lossy(&output.stdout);
let second_line_opt = output_str.lines().skip(1).next();
let second_line = second_line_opt.expect(&format!(
"unexpected llvm-size line output:\n{}",
output_str
));
let text_size_opt = second_line.split_ascii_whitespace().next();
let text_size =
text_size_opt.expect(&format!("unexpected llvm-size output:\n{}", output_str));
if text_size == "0" {
panic!("Kernel executable has an empty text section. Perhaps the entry point was set incorrectly?\n\n\
Kernel executable at `{}`\n", kernel.display());
}
// strip debug symbols from kernel for faster loading
let stripped_kernel_file_name = format!("kernel_stripped-{}", kernel_file_name);
let stripped_kernel = out_dir.join(&stripped_kernel_file_name);
let objcopy = llvm_tools
.tool(&llvm_tools::exe("llvm-objcopy"))
.expect("llvm-objcopy not found in llvm-tools");
let mut cmd = Command::new(&objcopy);
cmd.arg("--strip-debug");
cmd.arg(&kernel);
cmd.arg(&stripped_kernel);
let exit_status = cmd
.status()
.expect("failed to run objcopy to strip debug symbols");
if !exit_status.success() {
eprintln!("Error: Stripping debug symbols failed");
process::exit(1);
}
if cfg!(feature = "uefi_bin") {
// write file for including kernel in binary
let file_path = out_dir.join("kernel_info.rs");
let mut file = File::create(file_path).expect("failed to create kernel_info.rs");
let kernel_size = fs::metadata(&stripped_kernel)
.expect("Failed to read file metadata of stripped kernel")
.len();
file.write_all(
format!(
"const KERNEL_SIZE: usize = {}; const KERNEL_BYTES: [u8; KERNEL_SIZE] = *include_bytes!(r\"{}\");",
kernel_size,
stripped_kernel.display(),
)
.as_bytes(),
)
.expect("write to kernel_info.rs failed");
}
if cfg!(feature = "bios_bin") {
// wrap the kernel executable as binary in a new ELF file
let stripped_kernel_file_name_replaced = stripped_kernel_file_name
.replace('-', "_")
.replace('.', "_");
let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name));
let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name));
let mut cmd = Command::new(&objcopy);
cmd.arg("-I").arg("binary");
cmd.arg("-O").arg("elf64-x86-64");
cmd.arg("--binary-architecture=i386:x86-64");
cmd.arg("--rename-section").arg(".data=.kernel");
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_start=_kernel_start_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_end=_kernel_end_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_size=_kernel_size",
stripped_kernel_file_name_replaced
));
cmd.current_dir(&out_dir);
cmd.arg(&stripped_kernel_file_name);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run objcopy");
if !exit_status.success() {
eprintln!("Error: Running objcopy failed");
process::exit(1);
}
// create an archive for linking
let ar = llvm_tools
.tool(&llvm_tools::exe("llvm-ar"))
.unwrap_or_else(|| {
eprintln!("Failed to retrieve llvm-ar component");
eprint!("This component is available since nightly-2019-03-29,");
eprintln!("so try updating your toolchain if you're using an older nightly");
process::exit(1);
});
let mut cmd = Command::new(ar);
cmd.arg("crs");
cmd.arg(&kernel_archive);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run ar");
if !exit_status.success() {
eprintln!("Error: Running ar failed");
process::exit(1);
}
// pass link arguments to rustc
println!("cargo:rustc-link-search=native={}", out_dir.display());
println!(
"cargo:rustc-link-lib=static=kernel_bin-{}",
kernel_file_name
);
}
// Parse configuration from the kernel's Cargo.toml
let config = match env::var("KERNEL_MANIFEST") {
Err(env::VarError::NotPresent) => {
panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\
Please use `cargo builder` for building.");
}
Err(env::VarError::NotUnicode(_)) => {
panic!("The KERNEL_MANIFEST environment variable contains invalid unicode")
}
Ok(path)
if Path::new(&path).file_name().and_then(|s| s.to_str()) != Some("Cargo.toml") =>
{
let err = format!(
"The given `--kernel-manifest` path `{}` does not \
point to a `Cargo.toml`",
path,
);
quote! { compile_error!(#err) }
}
Ok(path) if !Path::new(&path).exists() => {
let err = format!(
"The given `--kernel-manifest` path `{}` does not exist.",
path
);
quote! {
compile_error!(#err)
}
}
Ok(path) => {
println!("cargo:rerun-if-changed={}", path);
let contents = fs::read_to_string(&path).expect(&format!(
"failed to read kernel manifest file (path: {})",
path
));
let manifest = contents
.parse::<Value>()
.expect("failed to parse kernel's Cargo.toml");
if manifest
.get("dependencies")
.and_then(|d| d.get("bootloader"))
.is_some()
{
// it seems to be the correct Cargo.toml
let config_table = manifest
.get("package")
.and_then(|table| table.get("metadata"))
.and_then(|table| table.get("bootloader"))
.cloned()
.unwrap_or_else(|| toml::Value::Table(toml::map::Map::new()));
config_table
.try_into::<ParsedConfig>()
.map(|c| quote! { #c })
.unwrap_or_else(|err| {
let err = format!(
"failed to parse bootloader config in {}:\n\n{}",
path,
err.to_string()
);
quote! {
compile_error!(#err)
}
})
} else {
let err = format!(
"no bootloader dependency in {}\n\n The \
`--kernel-manifest` path should point to the `Cargo.toml` \
of the kernel.",
path
);
quote! {
compile_error!(#err)
}
}
}
};
// Write config to file
let file_path = out_dir.join("bootloader_config.rs");
let mut file = File::create(file_path).expect("failed to create bootloader_config.rs");
file
|
{
panic!(
"The {} bootloader must be compiled for the `{}` target.",
firmware, expected_target,
);
}
|
conditional_block
|
|
build.rs
|
of stripped kernel")
.len();
file.write_all(
format!(
"const KERNEL_SIZE: usize = {}; const KERNEL_BYTES: [u8; KERNEL_SIZE] = *include_bytes!(r\"{}\");",
kernel_size,
stripped_kernel.display(),
)
.as_bytes(),
)
.expect("write to kernel_info.rs failed");
}
if cfg!(feature = "bios_bin") {
// wrap the kernel executable as binary in a new ELF file
let stripped_kernel_file_name_replaced = stripped_kernel_file_name
.replace('-', "_")
.replace('.', "_");
let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name));
let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name));
let mut cmd = Command::new(&objcopy);
cmd.arg("-I").arg("binary");
cmd.arg("-O").arg("elf64-x86-64");
cmd.arg("--binary-architecture=i386:x86-64");
cmd.arg("--rename-section").arg(".data=.kernel");
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_start=_kernel_start_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_end=_kernel_end_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_size=_kernel_size",
stripped_kernel_file_name_replaced
));
cmd.current_dir(&out_dir);
cmd.arg(&stripped_kernel_file_name);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run objcopy");
if !exit_status.success() {
eprintln!("Error: Running objcopy failed");
process::exit(1);
}
// create an archive for linking
let ar = llvm_tools
.tool(&llvm_tools::exe("llvm-ar"))
.unwrap_or_else(|| {
eprintln!("Failed to retrieve llvm-ar component");
eprint!("This component is available since nightly-2019-03-29,");
eprintln!("so try updating your toolchain if you're using an older nightly");
process::exit(1);
});
let mut cmd = Command::new(ar);
cmd.arg("crs");
cmd.arg(&kernel_archive);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run ar");
if !exit_status.success() {
eprintln!("Error: Running ar failed");
process::exit(1);
}
// pass link arguments to rustc
println!("cargo:rustc-link-search=native={}", out_dir.display());
println!(
"cargo:rustc-link-lib=static=kernel_bin-{}",
kernel_file_name
);
}
// Parse configuration from the kernel's Cargo.toml
let config = match env::var("KERNEL_MANIFEST") {
Err(env::VarError::NotPresent) => {
panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\
Please use `cargo builder` for building.");
}
Err(env::VarError::NotUnicode(_)) => {
panic!("The KERNEL_MANIFEST environment variable contains invalid unicode")
}
Ok(path)
if Path::new(&path).file_name().and_then(|s| s.to_str()) != Some("Cargo.toml") =>
{
let err = format!(
"The given `--kernel-manifest` path `{}` does not \
point to a `Cargo.toml`",
path,
);
quote! { compile_error!(#err) }
}
Ok(path) if !Path::new(&path).exists() => {
let err = format!(
"The given `--kernel-manifest` path `{}` does not exist.",
path
);
quote! {
compile_error!(#err)
}
}
Ok(path) => {
println!("cargo:rerun-if-changed={}", path);
let contents = fs::read_to_string(&path).expect(&format!(
"failed to read kernel manifest file (path: {})",
path
));
let manifest = contents
.parse::<Value>()
.expect("failed to parse kernel's Cargo.toml");
if manifest
.get("dependencies")
.and_then(|d| d.get("bootloader"))
.is_some()
{
// it seems to be the correct Cargo.toml
let config_table = manifest
.get("package")
.and_then(|table| table.get("metadata"))
.and_then(|table| table.get("bootloader"))
.cloned()
.unwrap_or_else(|| toml::Value::Table(toml::map::Map::new()));
config_table
.try_into::<ParsedConfig>()
.map(|c| quote! { #c })
.unwrap_or_else(|err| {
let err = format!(
"failed to parse bootloader config in {}:\n\n{}",
path,
err.to_string()
);
quote! {
compile_error!(#err)
}
})
} else {
let err = format!(
"no bootloader dependency in {}\n\n The \
`--kernel-manifest` path should point to the `Cargo.toml` \
of the kernel.",
path
);
quote! {
compile_error!(#err)
}
}
}
};
// Write config to file
let file_path = out_dir.join("bootloader_config.rs");
let mut file = File::create(file_path).expect("failed to create bootloader_config.rs");
file.write_all(
quote::quote! {
mod parsed_config {
use crate::config::Config;
pub const CONFIG: Config = #config;
}
}
.to_string()
.as_bytes(),
)
.expect("write to bootloader_config.rs failed");
println!("cargo:rerun-if-env-changed=KERNEL");
println!("cargo:rerun-if-env-changed=KERNEL_MANIFEST");
println!("cargo:rerun-if-changed={}", kernel.display());
println!("cargo:rerun-if-changed=build.rs");
}
fn val_true() -> bool {
true
}
/// Must be always identical with the struct in `src/config.rs`
///
/// This copy is needed because we can't derive Deserialize in the `src/config.rs`
/// module itself, since cargo currently unifies dependencies (the `toml` crate enables
/// serde's standard feature). Also, it allows to separate the parsing special cases
/// such as `AlignedAddress` more cleanly.
#[derive(Debug, serde::Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
struct ParsedConfig {
#[serde(default)]
pub map_physical_memory: bool,
#[serde(default)]
pub map_page_table_recursively: bool,
#[serde(default = "val_true")]
pub map_framebuffer: bool,
pub kernel_stack_size: Option<AlignedAddress>,
pub physical_memory_offset: Option<AlignedAddress>,
pub recursive_index: Option<u16>,
pub kernel_stack_address: Option<AlignedAddress>,
pub boot_info_address: Option<AlignedAddress>,
pub framebuffer_address: Option<AlignedAddress>,
}
/// Convert to tokens suitable for initializing the `Config` struct.
impl quote::ToTokens for ParsedConfig {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
fn optional(value: Option<impl quote::ToTokens>) -> proc_macro2::TokenStream {
value.map(|v| quote!(Some(#v))).unwrap_or(quote!(None))
}
let map_physical_memory = self.map_physical_memory;
let map_page_table_recursively = self.map_page_table_recursively;
let map_framebuffer = self.map_framebuffer;
let kernel_stack_size = optional(self.kernel_stack_size);
let physical_memory_offset = optional(self.physical_memory_offset);
let recursive_index = optional(self.recursive_index);
let kernel_stack_address = optional(self.kernel_stack_address);
let boot_info_address = optional(self.boot_info_address);
let framebuffer_address = optional(self.framebuffer_address);
tokens.extend(quote! { Config {
map_physical_memory: #map_physical_memory,
map_page_table_recursively: #map_page_table_recursively,
map_framebuffer: #map_framebuffer,
kernel_stack_size: #kernel_stack_size,
physical_memory_offset: #physical_memory_offset,
recursive_index: #recursive_index,
kernel_stack_address: #kernel_stack_address,
boot_info_address: #boot_info_address,
framebuffer_address: #framebuffer_address,
}});
}
}
#[derive(Debug, Clone, Copy)]
struct AlignedAddress(u64);
impl quote::ToTokens for AlignedAddress {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
self.0.to_tokens(tokens);
}
}
impl<'de> serde::Deserialize<'de> for AlignedAddress {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(AlignedAddressVisitor)
}
}
/// Helper struct for implementing the `optional_version_deserialize` function.
struct
|
AlignedAddressVisitor
|
identifier_name
|
|
build.rs
|
kernel.display()
);
// get access to llvm tools shipped in the llvm-tools-preview rustup component
let llvm_tools = match llvm_tools::LlvmTools::new() {
Ok(tools) => tools,
Err(llvm_tools::Error::NotFound) => {
eprintln!("Error: llvm-tools not found");
eprintln!("Maybe the rustup component `llvm-tools-preview` is missing?");
eprintln!(" Install it through: `rustup component add llvm-tools-preview`");
process::exit(1);
}
Err(err) => {
eprintln!("Failed to retrieve llvm-tools component: {:?}", err);
process::exit(1);
}
};
// check that kernel executable has code in it
let llvm_size = llvm_tools
.tool(&llvm_tools::exe("llvm-size"))
.expect("llvm-size not found in llvm-tools");
let mut cmd = Command::new(llvm_size);
cmd.arg(&kernel);
let output = cmd.output().expect("failed to run llvm-size");
let output_str = String::from_utf8_lossy(&output.stdout);
let second_line_opt = output_str.lines().skip(1).next();
let second_line = second_line_opt.expect(&format!(
"unexpected llvm-size line output:\n{}",
output_str
));
let text_size_opt = second_line.split_ascii_whitespace().next();
let text_size =
text_size_opt.expect(&format!("unexpected llvm-size output:\n{}", output_str));
if text_size == "0" {
panic!("Kernel executable has an empty text section. Perhaps the entry point was set incorrectly?\n\n\
Kernel executable at `{}`\n", kernel.display());
}
// strip debug symbols from kernel for faster loading
let stripped_kernel_file_name = format!("kernel_stripped-{}", kernel_file_name);
let stripped_kernel = out_dir.join(&stripped_kernel_file_name);
let objcopy = llvm_tools
.tool(&llvm_tools::exe("llvm-objcopy"))
.expect("llvm-objcopy not found in llvm-tools");
let mut cmd = Command::new(&objcopy);
cmd.arg("--strip-debug");
cmd.arg(&kernel);
cmd.arg(&stripped_kernel);
let exit_status = cmd
.status()
.expect("failed to run objcopy to strip debug symbols");
if !exit_status.success() {
eprintln!("Error: Stripping debug symbols failed");
process::exit(1);
}
if cfg!(feature = "uefi_bin") {
// write file for including kernel in binary
let file_path = out_dir.join("kernel_info.rs");
let mut file = File::create(file_path).expect("failed to create kernel_info.rs");
let kernel_size = fs::metadata(&stripped_kernel)
.expect("Failed to read file metadata of stripped kernel")
.len();
file.write_all(
format!(
"const KERNEL_SIZE: usize = {}; const KERNEL_BYTES: [u8; KERNEL_SIZE] = *include_bytes!(r\"{}\");",
kernel_size,
stripped_kernel.display(),
)
.as_bytes(),
)
.expect("write to kernel_info.rs failed");
}
if cfg!(feature = "bios_bin") {
// wrap the kernel executable as binary in a new ELF file
let stripped_kernel_file_name_replaced = stripped_kernel_file_name
.replace('-', "_")
.replace('.', "_");
let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name));
let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name));
let mut cmd = Command::new(&objcopy);
cmd.arg("-I").arg("binary");
cmd.arg("-O").arg("elf64-x86-64");
cmd.arg("--binary-architecture=i386:x86-64");
cmd.arg("--rename-section").arg(".data=.kernel");
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_start=_kernel_start_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_end=_kernel_end_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_size=_kernel_size",
stripped_kernel_file_name_replaced
));
cmd.current_dir(&out_dir);
cmd.arg(&stripped_kernel_file_name);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run objcopy");
if !exit_status.success() {
eprintln!("Error: Running objcopy failed");
process::exit(1);
}
// create an archive for linking
let ar = llvm_tools
.tool(&llvm_tools::exe("llvm-ar"))
.unwrap_or_else(|| {
eprintln!("Failed to retrieve llvm-ar component");
eprint!("This component is available since nightly-2019-03-29,");
eprintln!("so try updating your toolchain if you're using an older nightly");
process::exit(1);
});
let mut cmd = Command::new(ar);
cmd.arg("crs");
cmd.arg(&kernel_archive);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run ar");
if !exit_status.success() {
eprintln!("Error: Running ar failed");
process::exit(1);
}
// pass link arguments to rustc
println!("cargo:rustc-link-search=native={}", out_dir.display());
println!(
"cargo:rustc-link-lib=static=kernel_bin-{}",
|
// Parse configuration from the kernel's Cargo.toml
let config = match env::var("KERNEL_MANIFEST") {
Err(env::VarError::NotPresent) => {
panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\
Please use `cargo builder` for building.");
}
Err(env::VarError::NotUnicode(_)) => {
panic!("The KERNEL_MANIFEST environment variable contains invalid unicode")
}
Ok(path)
if Path::new(&path).file_name().and_then(|s| s.to_str()) != Some("Cargo.toml") =>
{
let err = format!(
"The given `--kernel-manifest` path `{}` does not \
point to a `Cargo.toml`",
path,
);
quote! { compile_error!(#err) }
}
Ok(path) if !Path::new(&path).exists() => {
let err = format!(
"The given `--kernel-manifest` path `{}` does not exist.",
path
);
quote! {
compile_error!(#err)
}
}
Ok(path) => {
println!("cargo:rerun-if-changed={}", path);
let contents = fs::read_to_string(&path).expect(&format!(
"failed to read kernel manifest file (path: {})",
path
));
let manifest = contents
.parse::<Value>()
.expect("failed to parse kernel's Cargo.toml");
if manifest
.get("dependencies")
.and_then(|d| d.get("bootloader"))
.is_some()
{
// it seems to be the correct Cargo.toml
let config_table = manifest
.get("package")
.and_then(|table| table.get("metadata"))
.and_then(|table| table.get("bootloader"))
.cloned()
.unwrap_or_else(|| toml::Value::Table(toml::map::Map::new()));
config_table
.try_into::<ParsedConfig>()
.map(|c| quote! { #c })
.unwrap_or_else(|err| {
let err = format!(
"failed to parse bootloader config in {}:\n\n{}",
path,
err.to_string()
);
quote! {
compile_error!(#err)
}
})
} else {
let err = format!(
"no bootloader dependency in {}\n\n The \
`--kernel-manifest` path should point to the `Cargo.toml` \
of the kernel.",
path
);
quote! {
compile_error!(#err)
}
}
}
};
// Write config to file
let file_path = out_dir.join("bootloader_config.rs");
let mut file = File::create(file_path).expect("failed to create bootloader_config.rs");
file.write_all(
quote::quote! {
mod parsed_config {
use crate::config::Config;
pub const CONFIG: Config = #config;
}
}
.to_string()
.as_bytes(),
)
.expect("write to bootloader_config.rs failed");
println!("cargo:rerun-if-env-changed=KERNEL");
println!("cargo:rerun-if-env-changed=KERNEL_MANIFEST");
println!("cargo:rerun-if-changed={}", kernel.display());
println!("cargo:rerun-if-changed=build.rs");
}
fn val_true() -> bool {
true
}
/// Must be always identical with the struct in `src/config.rs`
///
/// This copy is needed because we can't derive Deserialize in the `src/config.rs`
/// module itself, since cargo currently unifies dependencies (the `toml` crate enables
/// serde's
|
kernel_file_name
);
}
|
random_line_split
|
state.rs
|
new_min.as_()
+ (new_max.as_() - new_min.as_())
* ((value - old_min.as_()) / (old_max.as_() - old_min.as_()))
}
pub fn remap_minz<
T: 'static + Float + Copy + AsPrimitive<T>,
OX: AsPrimitive<T> + Copy,
NX: AsPrimitive<T> + Copy,
>(
value: T,
old_max: OX,
new_max: NX,
) -> T {
remap(value, T::zero(), old_max, T::zero(), new_max)
}
const MOUSE_SENSITIVITY: f64 = 0.01;
const MOVE_SPEED: f64 = 2.5;
const PLAYER_WALL_PADDING: f64 = 10.;
const WALL_ACTUAL_HEIGHT: f64 = 48.;
pub struct State {
pub(crate) position: Vec2D,
pub(crate) angle: f64,
pub(crate) fov: f64,
pub(crate) wall_colors: Vec<Color>,
pub(crate) map: Map,
pub(crate) keys: KeyStateHandler,
pub(crate) columns: Vec<(usize, u32)>,
pub(crate) resolution: usize,
pub(crate) projection_factor: f64,
pub(crate) radian_per_column: f64,
pub(crate) column_width: u32,
}
impl State {
pub fn new() -> Self {
let wall_colors = vec![
Color::RGB(128, 255, 0),
Color::RGB(0, 128, 255),
Color::RGB(255, 0, 128),
Color::RGB(0, 255, 0),
Color::RGB(0, 0, 255),
Color::WHITE,
];
// let map = Map::default();
let map = Map::load("./assets/maps/many_walls.json").unwrap();
let (w, h) = map.dims;
// let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y));
let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.);
let fov = 60.;
let projection_plane_distance =
((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64;
let resolution = WINDOW_WIDTH as usize;
Self {
position,
angle: std::f64::consts::PI,
fov,
wall_colors,
map,
keys: KeyStateHandler::new(),
columns: Vec::with_capacity(resolution),
resolution,
projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT,
radian_per_column: fov.to_radians() / resolution as f64,
column_width: WINDOW_WIDTH / resolution as u32,
}
}
fn get_color(&self, index: usize) -> Color
|
pub fn mouse_motion(&mut self, dx: i32) {
self.angle += MOUSE_SENSITIVITY * dx as f64;
}
fn calculate_collisions(&mut self) {
let mut current_angle = self.angle - (self.fov.to_radians() / 2.);
let end_angle = current_angle + (self.radian_per_column * self.resolution as f64);
self.columns.clear();
for _ in 0..self.resolution {
let mut ray = Vec2D::from_angle(current_angle);
ray.translate(&self.position);
let mut max_height = f64::NEG_INFINITY;
let mut wall_color_index = 0;
for wall in self.map.walls.iter() {
if let Some(intersection_vector) = ray.intersects(wall) {
let raw_distance = ray.dist(&intersection_vector);
let delta = current_angle - self.angle;
let corrected_distance = raw_distance * (delta.cos() as f64);
let projected_height = self.projection_factor / corrected_distance;
if projected_height > max_height {
max_height = projected_height;
wall_color_index = wall.color_index;
}
}
}
if max_height.is_infinite() {
self.columns.push((0, 0));
} else {
self.columns
.push((wall_color_index, max_height.round() as u32));
}
current_angle += self.radian_per_column;
}
}
fn update_camera(&mut self) {
let mut delta = Vec2D::Origin;
let par = Vec2D::from_angle(self.angle as f64);
let perp = Vec2D::from_angle(self.angle + (90f64).to_radians());
if self.keys.is_pressed(Keycode::W) {
delta += par;
}
if self.keys.is_pressed(Keycode::S) {
delta += -par;
}
if self.keys.is_pressed(Keycode::A) {
delta += -perp;
}
if self.keys.is_pressed(Keycode::D) {
delta += perp;
}
// Normalize delta so that the player doesn't move faster moving in a diagonal direction
if !delta.is_origin() {
delta = delta.normalize() * MOVE_SPEED;
}
self.position.add_x_y_raw(delta.x_y());
self.position.clamp(self.map.dims, PLAYER_WALL_PADDING);
}
pub fn update(&mut self) {
self.update_camera();
self.calculate_collisions();
}
pub fn draw_minimap(
&self,
canvas: &mut Canvas<Window>,
dims: (f64, f64),
) -> Result<(), String> {
let minimap_offset = (dims.0.max(dims.1) / 4.);
let minimap_base = Vec2D::new(minimap_offset, minimap_offset);
// Background
canvas.set_draw_color(Color::BLACK);
canvas.fill_rect(Rect::new(
minimap_offset as i32,
minimap_offset as i32,
dims.0 as u32,
dims.1 as u32,
))?;
canvas.set_draw_color(Color::WHITE);
// Player position
let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base;
canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?;
// Player lines
let ray_scale = dims.0.max(dims.1) / 2.;
let half_fov = self.fov.to_radians() / 2.;
let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale);
let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale);
let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale);
canvas.draw_lines(&[
position_mapped.into(),
forward_end.into(),
position_mapped.into(),
left_end.into(),
position_mapped.into(),
right_end.into(),
] as &[Point])?;
// TODO: FOV lines
// Walls
for wall in self.map.walls.iter() {
canvas.set_draw_color(self.get_color(wall.color_index));
let start = wall.a.remap(self.map.dims, dims) + minimap_base;
let end = wall.b.remap(self.map.dims, dims) + minimap_base;
canvas.draw_line(start, end)?;
}
// let mut current_angle = self.angle + (self.fov.to_radians() / 2.);
// for _ in 0..self.resolution {
// let mut ray = self.position;
// ray.set_angle(current_angle);
// ray += self.position;
// let mut max_height = f64::NEG_INFINITY;
// let mut collisions: Vec<(bool, Vec2D)> = vec![];
// let mut collision = Vec2D::Origin;
// for wall in self.map.walls.iter() {
// if let Some(intersection_vector) = ray.intersects(wall) {
// let raw_distance = ray.dist(&intersection_vector);
// let delta = current_angle - self.angle;
// let corrected_distance = raw_distance * (delta.cos() as f64);
// let projected_height = self.projection_factor / corrected_distance;
// if projected_height > max_height {
// max_height = projected_height;
// collisions = collisions.into_iter().fold(vec![], |mut acc, cur| {
// acc.push((false, cur.1));
// acc
// });
// collisions.push((true, collision));
// collision = intersection_vector;
// } else {
// collisions.push((false, intersection_vector));
// }
// }
// }
// if !max_height.is_infinite() {
// canvas.set_draw_color(Color::RED);
// canvas.draw_rects(
// collisions
// .into_iter()
// .map(|(_, v)| {
// Rect::from_center(
// Point::new(
// remap
|
{
self.wall_colors.get(index).copied().unwrap_or(Color::WHITE)
}
|
identifier_body
|
state.rs
|
new_min.as_()
+ (new_max.as_() - new_min.as_())
* ((value - old_min.as_()) / (old_max.as_() - old_min.as_()))
}
pub fn remap_minz<
T: 'static + Float + Copy + AsPrimitive<T>,
OX: AsPrimitive<T> + Copy,
NX: AsPrimitive<T> + Copy,
>(
value: T,
old_max: OX,
new_max: NX,
) -> T {
remap(value, T::zero(), old_max, T::zero(), new_max)
}
const MOUSE_SENSITIVITY: f64 = 0.01;
const MOVE_SPEED: f64 = 2.5;
const PLAYER_WALL_PADDING: f64 = 10.;
const WALL_ACTUAL_HEIGHT: f64 = 48.;
pub struct State {
pub(crate) position: Vec2D,
pub(crate) angle: f64,
pub(crate) fov: f64,
pub(crate) wall_colors: Vec<Color>,
pub(crate) map: Map,
pub(crate) keys: KeyStateHandler,
pub(crate) columns: Vec<(usize, u32)>,
pub(crate) resolution: usize,
pub(crate) projection_factor: f64,
pub(crate) radian_per_column: f64,
pub(crate) column_width: u32,
}
impl State {
pub fn new() -> Self {
let wall_colors = vec![
Color::RGB(128, 255, 0),
Color::RGB(0, 128, 255),
Color::RGB(255, 0, 128),
Color::RGB(0, 255, 0),
Color::RGB(0, 0, 255),
Color::WHITE,
];
// let map = Map::default();
let map = Map::load("./assets/maps/many_walls.json").unwrap();
let (w, h) = map.dims;
// let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y));
let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.);
let fov = 60.;
let projection_plane_distance =
((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64;
let resolution = WINDOW_WIDTH as usize;
Self {
position,
angle: std::f64::consts::PI,
fov,
wall_colors,
map,
keys: KeyStateHandler::new(),
columns: Vec::with_capacity(resolution),
resolution,
projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT,
radian_per_column: fov.to_radians() / resolution as f64,
column_width: WINDOW_WIDTH / resolution as u32,
}
}
fn get_color(&self, index: usize) -> Color {
self.wall_colors.get(index).copied().unwrap_or(Color::WHITE)
}
pub fn mouse_motion(&mut self, dx: i32) {
self.angle += MOUSE_SENSITIVITY * dx as f64;
}
fn calculate_collisions(&mut self) {
let mut current_angle = self.angle - (self.fov.to_radians() / 2.);
let end_angle = current_angle + (self.radian_per_column * self.resolution as f64);
self.columns.clear();
for _ in 0..self.resolution {
let mut ray = Vec2D::from_angle(current_angle);
ray.translate(&self.position);
let mut max_height = f64::NEG_INFINITY;
let mut wall_color_index = 0;
for wall in self.map.walls.iter() {
if let Some(intersection_vector) = ray.intersects(wall) {
let raw_distance = ray.dist(&intersection_vector);
let delta = current_angle - self.angle;
let corrected_distance = raw_distance * (delta.cos() as f64);
let projected_height = self.projection_factor / corrected_distance;
if projected_height > max_height {
max_height = projected_height;
wall_color_index = wall.color_index;
}
}
}
if max_height.is_infinite() {
self.columns.push((0, 0));
} else {
self.columns
.push((wall_color_index, max_height.round() as u32));
}
current_angle += self.radian_per_column;
}
}
fn update_camera(&mut self) {
let mut delta = Vec2D::Origin;
let par = Vec2D::from_angle(self.angle as f64);
let perp = Vec2D::from_angle(self.angle + (90f64).to_radians());
if self.keys.is_pressed(Keycode::W) {
delta += par;
}
if self.keys.is_pressed(Keycode::S) {
delta += -par;
}
if self.keys.is_pressed(Keycode::A) {
delta += -perp;
}
if self.keys.is_pressed(Keycode::D) {
delta += perp;
}
// Normalize delta so that the player doesn't move faster moving in a diagonal direction
if !delta.is_origin() {
delta = delta.normalize() * MOVE_SPEED;
}
|
pub fn update(&mut self) {
self.update_camera();
self.calculate_collisions();
}
pub fn draw_minimap(
&self,
canvas: &mut Canvas<Window>,
dims: (f64, f64),
) -> Result<(), String> {
let minimap_offset = (dims.0.max(dims.1) / 4.);
let minimap_base = Vec2D::new(minimap_offset, minimap_offset);
// Background
canvas.set_draw_color(Color::BLACK);
canvas.fill_rect(Rect::new(
minimap_offset as i32,
minimap_offset as i32,
dims.0 as u32,
dims.1 as u32,
))?;
canvas.set_draw_color(Color::WHITE);
// Player position
let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base;
canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?;
// Player lines
let ray_scale = dims.0.max(dims.1) / 2.;
let half_fov = self.fov.to_radians() / 2.;
let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale);
let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale);
let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale);
canvas.draw_lines(&[
position_mapped.into(),
forward_end.into(),
position_mapped.into(),
left_end.into(),
position_mapped.into(),
right_end.into(),
] as &[Point])?;
// TODO: FOV lines
// Walls
for wall in self.map.walls.iter() {
canvas.set_draw_color(self.get_color(wall.color_index));
let start = wall.a.remap(self.map.dims, dims) + minimap_base;
let end = wall.b.remap(self.map.dims, dims) + minimap_base;
canvas.draw_line(start, end)?;
}
// let mut current_angle = self.angle + (self.fov.to_radians() / 2.);
// for _ in 0..self.resolution {
// let mut ray = self.position;
// ray.set_angle(current_angle);
// ray += self.position;
// let mut max_height = f64::NEG_INFINITY;
// let mut collisions: Vec<(bool, Vec2D)> = vec![];
// let mut collision = Vec2D::Origin;
// for wall in self.map.walls.iter() {
// if let Some(intersection_vector) = ray.intersects(wall) {
// let raw_distance = ray.dist(&intersection_vector);
// let delta = current_angle - self.angle;
// let corrected_distance = raw_distance * (delta.cos() as f64);
// let projected_height = self.projection_factor / corrected_distance;
// if projected_height > max_height {
// max_height = projected_height;
// collisions = collisions.into_iter().fold(vec![], |mut acc, cur| {
// acc.push((false, cur.1));
// acc
// });
// collisions.push((true, collision));
// collision = intersection_vector;
// } else {
// collisions.push((false, intersection_vector));
// }
// }
// }
// if !max_height.is_infinite() {
// canvas.set_draw_color(Color::RED);
// canvas.draw_rects(
// collisions
// .into_iter()
// .map(|(_, v)| {
// Rect::from_center(
// Point::new(
// remap
|
self.position.add_x_y_raw(delta.x_y());
self.position.clamp(self.map.dims, PLAYER_WALL_PADDING);
}
|
random_line_split
|
state.rs
|
new_min.as_()
+ (new_max.as_() - new_min.as_())
* ((value - old_min.as_()) / (old_max.as_() - old_min.as_()))
}
pub fn remap_minz<
T: 'static + Float + Copy + AsPrimitive<T>,
OX: AsPrimitive<T> + Copy,
NX: AsPrimitive<T> + Copy,
>(
value: T,
old_max: OX,
new_max: NX,
) -> T {
remap(value, T::zero(), old_max, T::zero(), new_max)
}
const MOUSE_SENSITIVITY: f64 = 0.01;
const MOVE_SPEED: f64 = 2.5;
const PLAYER_WALL_PADDING: f64 = 10.;
const WALL_ACTUAL_HEIGHT: f64 = 48.;
pub struct State {
pub(crate) position: Vec2D,
pub(crate) angle: f64,
pub(crate) fov: f64,
pub(crate) wall_colors: Vec<Color>,
pub(crate) map: Map,
pub(crate) keys: KeyStateHandler,
pub(crate) columns: Vec<(usize, u32)>,
pub(crate) resolution: usize,
pub(crate) projection_factor: f64,
pub(crate) radian_per_column: f64,
pub(crate) column_width: u32,
}
impl State {
pub fn new() -> Self {
let wall_colors = vec![
Color::RGB(128, 255, 0),
Color::RGB(0, 128, 255),
Color::RGB(255, 0, 128),
Color::RGB(0, 255, 0),
Color::RGB(0, 0, 255),
Color::WHITE,
];
// let map = Map::default();
let map = Map::load("./assets/maps/many_walls.json").unwrap();
let (w, h) = map.dims;
// let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y));
let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.);
let fov = 60.;
let projection_plane_distance =
((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64;
let resolution = WINDOW_WIDTH as usize;
Self {
position,
angle: std::f64::consts::PI,
fov,
wall_colors,
map,
keys: KeyStateHandler::new(),
columns: Vec::with_capacity(resolution),
resolution,
projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT,
radian_per_column: fov.to_radians() / resolution as f64,
column_width: WINDOW_WIDTH / resolution as u32,
}
}
fn get_color(&self, index: usize) -> Color {
self.wall_colors.get(index).copied().unwrap_or(Color::WHITE)
}
pub fn mouse_motion(&mut self, dx: i32) {
self.angle += MOUSE_SENSITIVITY * dx as f64;
}
fn calculate_collisions(&mut self) {
let mut current_angle = self.angle - (self.fov.to_radians() / 2.);
let end_angle = current_angle + (self.radian_per_column * self.resolution as f64);
self.columns.clear();
for _ in 0..self.resolution {
let mut ray = Vec2D::from_angle(current_angle);
ray.translate(&self.position);
let mut max_height = f64::NEG_INFINITY;
let mut wall_color_index = 0;
for wall in self.map.walls.iter() {
if let Some(intersection_vector) = ray.intersects(wall)
|
}
if max_height.is_infinite() {
self.columns.push((0, 0));
} else {
self.columns
.push((wall_color_index, max_height.round() as u32));
}
current_angle += self.radian_per_column;
}
}
fn update_camera(&mut self) {
let mut delta = Vec2D::Origin;
let par = Vec2D::from_angle(self.angle as f64);
let perp = Vec2D::from_angle(self.angle + (90f64).to_radians());
if self.keys.is_pressed(Keycode::W) {
delta += par;
}
if self.keys.is_pressed(Keycode::S) {
delta += -par;
}
if self.keys.is_pressed(Keycode::A) {
delta += -perp;
}
if self.keys.is_pressed(Keycode::D) {
delta += perp;
}
// Normalize delta so that the player doesn't move faster moving in a diagonal direction
if !delta.is_origin() {
delta = delta.normalize() * MOVE_SPEED;
}
self.position.add_x_y_raw(delta.x_y());
self.position.clamp(self.map.dims, PLAYER_WALL_PADDING);
}
pub fn update(&mut self) {
self.update_camera();
self.calculate_collisions();
}
pub fn draw_minimap(
&self,
canvas: &mut Canvas<Window>,
dims: (f64, f64),
) -> Result<(), String> {
let minimap_offset = (dims.0.max(dims.1) / 4.);
let minimap_base = Vec2D::new(minimap_offset, minimap_offset);
// Background
canvas.set_draw_color(Color::BLACK);
canvas.fill_rect(Rect::new(
minimap_offset as i32,
minimap_offset as i32,
dims.0 as u32,
dims.1 as u32,
))?;
canvas.set_draw_color(Color::WHITE);
// Player position
let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base;
canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?;
// Player lines
let ray_scale = dims.0.max(dims.1) / 2.;
let half_fov = self.fov.to_radians() / 2.;
let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale);
let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale);
let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale);
canvas.draw_lines(&[
position_mapped.into(),
forward_end.into(),
position_mapped.into(),
left_end.into(),
position_mapped.into(),
right_end.into(),
] as &[Point])?;
// TODO: FOV lines
// Walls
for wall in self.map.walls.iter() {
canvas.set_draw_color(self.get_color(wall.color_index));
let start = wall.a.remap(self.map.dims, dims) + minimap_base;
let end = wall.b.remap(self.map.dims, dims) + minimap_base;
canvas.draw_line(start, end)?;
}
// let mut current_angle = self.angle + (self.fov.to_radians() / 2.);
// for _ in 0..self.resolution {
// let mut ray = self.position;
// ray.set_angle(current_angle);
// ray += self.position;
// let mut max_height = f64::NEG_INFINITY;
// let mut collisions: Vec<(bool, Vec2D)> = vec![];
// let mut collision = Vec2D::Origin;
// for wall in self.map.walls.iter() {
// if let Some(intersection_vector) = ray.intersects(wall) {
// let raw_distance = ray.dist(&intersection_vector);
// let delta = current_angle - self.angle;
// let corrected_distance = raw_distance * (delta.cos() as f64);
// let projected_height = self.projection_factor / corrected_distance;
// if projected_height > max_height {
// max_height = projected_height;
// collisions = collisions.into_iter().fold(vec![], |mut acc, cur| {
// acc.push((false, cur.1));
// acc
// });
// collisions.push((true, collision));
// collision = intersection_vector;
// } else {
// collisions.push((false, intersection_vector));
// }
// }
// }
// if !max_height.is_infinite() {
// canvas.set_draw_color(Color::RED);
// canvas.draw_rects(
// collisions
// .into_iter()
// .map(|(_, v)| {
// Rect::from_center(
// Point::new(
// remap
|
{
let raw_distance = ray.dist(&intersection_vector);
let delta = current_angle - self.angle;
let corrected_distance = raw_distance * (delta.cos() as f64);
let projected_height = self.projection_factor / corrected_distance;
if projected_height > max_height {
max_height = projected_height;
wall_color_index = wall.color_index;
}
}
|
conditional_block
|
state.rs
|
),
Color::RGB(255, 0, 128),
Color::RGB(0, 255, 0),
Color::RGB(0, 0, 255),
Color::WHITE,
];
// let map = Map::default();
let map = Map::load("./assets/maps/many_walls.json").unwrap();
let (w, h) = map.dims;
// let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y));
let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.);
let fov = 60.;
let projection_plane_distance =
((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64;
let resolution = WINDOW_WIDTH as usize;
Self {
position,
angle: std::f64::consts::PI,
fov,
wall_colors,
map,
keys: KeyStateHandler::new(),
columns: Vec::with_capacity(resolution),
resolution,
projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT,
radian_per_column: fov.to_radians() / resolution as f64,
column_width: WINDOW_WIDTH / resolution as u32,
}
}
fn get_color(&self, index: usize) -> Color {
self.wall_colors.get(index).copied().unwrap_or(Color::WHITE)
}
pub fn mouse_motion(&mut self, dx: i32) {
self.angle += MOUSE_SENSITIVITY * dx as f64;
}
fn calculate_collisions(&mut self) {
let mut current_angle = self.angle - (self.fov.to_radians() / 2.);
let end_angle = current_angle + (self.radian_per_column * self.resolution as f64);
self.columns.clear();
for _ in 0..self.resolution {
let mut ray = Vec2D::from_angle(current_angle);
ray.translate(&self.position);
let mut max_height = f64::NEG_INFINITY;
let mut wall_color_index = 0;
for wall in self.map.walls.iter() {
if let Some(intersection_vector) = ray.intersects(wall) {
let raw_distance = ray.dist(&intersection_vector);
let delta = current_angle - self.angle;
let corrected_distance = raw_distance * (delta.cos() as f64);
let projected_height = self.projection_factor / corrected_distance;
if projected_height > max_height {
max_height = projected_height;
wall_color_index = wall.color_index;
}
}
}
if max_height.is_infinite() {
self.columns.push((0, 0));
} else {
self.columns
.push((wall_color_index, max_height.round() as u32));
}
current_angle += self.radian_per_column;
}
}
fn update_camera(&mut self) {
let mut delta = Vec2D::Origin;
let par = Vec2D::from_angle(self.angle as f64);
let perp = Vec2D::from_angle(self.angle + (90f64).to_radians());
if self.keys.is_pressed(Keycode::W) {
delta += par;
}
if self.keys.is_pressed(Keycode::S) {
delta += -par;
}
if self.keys.is_pressed(Keycode::A) {
delta += -perp;
}
if self.keys.is_pressed(Keycode::D) {
delta += perp;
}
// Normalize delta so that the player doesn't move faster moving in a diagonal direction
if !delta.is_origin() {
delta = delta.normalize() * MOVE_SPEED;
}
self.position.add_x_y_raw(delta.x_y());
self.position.clamp(self.map.dims, PLAYER_WALL_PADDING);
}
pub fn update(&mut self) {
self.update_camera();
self.calculate_collisions();
}
pub fn draw_minimap(
&self,
canvas: &mut Canvas<Window>,
dims: (f64, f64),
) -> Result<(), String> {
let minimap_offset = (dims.0.max(dims.1) / 4.);
let minimap_base = Vec2D::new(minimap_offset, minimap_offset);
// Background
canvas.set_draw_color(Color::BLACK);
canvas.fill_rect(Rect::new(
minimap_offset as i32,
minimap_offset as i32,
dims.0 as u32,
dims.1 as u32,
))?;
canvas.set_draw_color(Color::WHITE);
// Player position
let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base;
canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?;
// Player lines
let ray_scale = dims.0.max(dims.1) / 2.;
let half_fov = self.fov.to_radians() / 2.;
let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale);
let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale);
let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale);
canvas.draw_lines(&[
position_mapped.into(),
forward_end.into(),
position_mapped.into(),
left_end.into(),
position_mapped.into(),
right_end.into(),
] as &[Point])?;
// TODO: FOV lines
// Walls
for wall in self.map.walls.iter() {
canvas.set_draw_color(self.get_color(wall.color_index));
let start = wall.a.remap(self.map.dims, dims) + minimap_base;
let end = wall.b.remap(self.map.dims, dims) + minimap_base;
canvas.draw_line(start, end)?;
}
// let mut current_angle = self.angle + (self.fov.to_radians() / 2.);
// for _ in 0..self.resolution {
// let mut ray = self.position;
// ray.set_angle(current_angle);
// ray += self.position;
// let mut max_height = f64::NEG_INFINITY;
// let mut collisions: Vec<(bool, Vec2D)> = vec![];
// let mut collision = Vec2D::Origin;
// for wall in self.map.walls.iter() {
// if let Some(intersection_vector) = ray.intersects(wall) {
// let raw_distance = ray.dist(&intersection_vector);
// let delta = current_angle - self.angle;
// let corrected_distance = raw_distance * (delta.cos() as f64);
// let projected_height = self.projection_factor / corrected_distance;
// if projected_height > max_height {
// max_height = projected_height;
// collisions = collisions.into_iter().fold(vec![], |mut acc, cur| {
// acc.push((false, cur.1));
// acc
// });
// collisions.push((true, collision));
// collision = intersection_vector;
// } else {
// collisions.push((false, intersection_vector));
// }
// }
// }
// if !max_height.is_infinite() {
// canvas.set_draw_color(Color::RED);
// canvas.draw_rects(
// collisions
// .into_iter()
// .map(|(_, v)| {
// Rect::from_center(
// Point::new(
// remap(v.x(), 0., self.map.dims.0, 0., dims.0).floor() as i32,
// remap(v.y(), 0., self.map.dims.1, 0., dims.1).floor() as i32,
// ) + minimap_base,
// 2,
// 2,
// )
// })
// .collect::<Vec<Rect>>()
// .as_slice(),
// );
// }
// current_angle -= self.radian_per_column;
// }
Ok(())
}
fn render_frame(&self, canvas: &mut Canvas<Window>) -> Result<(), String> {
let column_width_signed = self.column_width as i32;
// TODO: Draw background
let mut current_index = usize::MAX;
let mut current_color = Color::BLACK;
for (idx, (color_index, height)) in self.columns.iter().copied().enumerate() {
if color_index != current_index {
current_color = self.get_color(color_index);
current_index = color_index;
}
let dim_amt = remap(height as f64, 0, WINDOW_HEIGHT, 255, 0).floor() as u8;
canvas.set_draw_color(current_color.dim(dim_amt));
canvas.fill_rect(Rect::from_center(
Point::new(
idx as i32 * column_width_signed + (column_width_signed / 2),
WINDOW_HEIGHT as i32 / 2,
),
self.column_width,
height,
))?;
}
Ok(())
}
pub fn
|
draw
|
identifier_name
|
|
opdb.rs
|
row, &CfNameTypeCode::HaNodesInfo.get())?;
Ok(())
}
///
/// 编辑节点信息
pub fn edit(&mut self, info: &web::Json<EditInfo>) {
self.host = info.host.clone();
self.dbport = info.dbport.clone();
self.cluster_name = info.cluster_name.clone();
self.update_time = crate::timestamp();
}
///
/// 设置节点维护模式状态
pub fn maintain(&mut self, info: &web::Json<EditMainTain>) {
if info.maintain == "true".to_string() {
self.maintain = false;
}else {
self.maintain = true;
}
self.update_time = crate::timestamp();
}
///
/// 获取当前节点在db中保存的状态信息
pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> {
let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?;
if kv.value.len() > 0 {
let state: MysqlState = serde_json::from_str(&kv.value)?;
return Ok(state);
}else {
//let err = format!("this host: {} no state data", &self.host);
//return Err(err.into());
let state = MysqlState::new();
return Ok(state);
}
}
pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> {
let state = self.get_state(db)?;
Ok(state.role)
}
}
///
///
///
///
/// 获取db中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct NodeClusterList{
pub cluster_name_list: Vec<String>
}
impl NodeClusterList{
pub fn new() -> NodeClusterList{
NodeClusterList { cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?;
for row in &result{
let value: HostInfoValue = serde_json::from_str(&row.value)?;
if self.is_exists(&value.cluster_name){continue;}
self.cluster_name_list.push(value.cluster_name.clone());
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name{
return true;
}
}
return false;
}
}
/// 获取route信息中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct RouteClusterList{
pub cluster_name_list: Vec<String>
}
impl RouteClusterList{
pub fn new() -> RouteClusterList {
RouteClusterList{ cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{
let route_all = db.get_route_all()?;
for route in &route_all {
if !self.is_exists(&route.value.cluster_name){
self.cluster_name_list.push(route.value.cluster_name.clone());
}
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name {
return true;
}
}
return false;
}
}
///
///
///
///
/// node节点信息
#[derive(Deserialize, Serialize, Debug)]
pub struct NodeInfo{
pub cluster_name: String,
pub host: String,
pub dbport: usize,
pub online: bool, //是否在线, true、false
pub maintain: bool, //是否处于维护模式,true、false
pub role: String, //主从角色
pub master: String,
pub sql_thread: bool,
pub io_thread: bool,
pub seconds_behind: usize,
pub read_only: bool,
pub version: String,
pub executed_gtid_set: String,
pub innodb_flush_log_at_trx_commit: usize,
pub sync_binlog: usize,
pub server_id: usize,
pub event_scheduler: String,
pub sql_error: String
}
impl NodeInfo{
pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo {
let mut ni = NodeInfo{
cluster_name: node.cluster_name.clone(),
host: node.host.clone(),
dbport: node.dbport.clone(),
online: node.online.clone(),
maintain: node.maintain.clone(),
role: state.role.clone(),
master: state.master.clone(),
sql_thread: state.sql_thread.clone(),
io_thread: state.io_thread.clone(),
seconds_behind: state.seconds_behind.clone(),
read_only: state.read_only.clone(),
version: state.version.clone(),
executed_gtid_set: state.executed_gtid_set.clone(),
innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(),
sync_binlog: state.sync_binlog.clone(),
server_id: state.server_id.clone(),
event_scheduler: state.event_scheduler.clone(),
sql_error: "".to_string()
};
if state.last_io_error.len() > 0{
ni.sql_error = state.last_io_error.clone();
}else if state.last_sql_error.len() > 0 {
ni.sql_error = state.last_sql_error.clone();
}
return ni;
}
}
///
///
///
///
/// 每个集群节点信息
#[derive(Deserialize, Serialize, Debug)]
pub struct ClusterNodeInfo{
pub cluster_name: String,
pub total: usize,
pub nodes_info: Vec<NodeInfo>
}
impl ClusterNodeInfo{
pub fn new(cluster_name: &String) -> ClusterNodeInfo{
ClusterNodeInfo{
cluster_name: cluster_name.clone(),
total: 0,
nodes_info: vec![]
}
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{
let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?;
for row in &result{
let node: HostInfoValue = serde_json::from_str(&row.value)?;
if &node.cluster_name == &self.cluster_name{
let state = node.get_state(db)?;
let node_info = NodeInfo::new(&state, &node);
self.total += 1;
self.nodes_info.push(node_info);
}
}
Ok(())
}
///
/// 统计所有节点监控信息, 用于首页展示
///
/// 倒叙迭代获取每个节点最后一条数据, 如果每个节点都已获取最后一条数据就退出迭代
pub fn static_monitor(&self, db: &web::Data<DbInfo>, rsm: &mut ResponseMonitorStatic) -> Result<(), Box<dyn Error>> {
let cf_name = CfNameTypeCode::SystemData.get();
let mut tmp: Vec<String> = vec![];
//首先检查是否开启监控
for node in &self.nodes_info{
if !self.check_monitor_setting(db, &node.host){
tmp.push(node.host.clone());
}
}
if let Some(cf) = db.db.cf_handle(&cf_name){
let mut iter = db.db.raw_iterator_cf(cf)?;
iter.seek_to_last();
iter.prev();
'all: while iter.valid() {
if tmp.len() == self.nodes_info.len(){
break 'all;
}
if let Some(s) = iter.key(){
let key: String = from_utf8(&s.to_vec())?.parse()?;
if key.starts_with(&PrefixTypeCode::NodeMonitorData.prefix()){
'b: for n in &self.nodes_info{
if key.contains(n.host.as_str()){
for t in &tmp{
if t == &n.host{
break 'b;
}
}
if let Some(v) = iter.value(){
let v: MysqlMonitorStatus = serde_json::from_slice(&v)?;
rsm.update(&v);
tmp.push(n.host.clone());
}
break 'b;
}
}
}
}
//
//
iter.prev();
}
}
Ok(())
}
fn check_monitor_setting(&self, db: &web::Data<DbInfo>, host: &String) -> bool{
let a = db.prefix_get(&PrefixTypeCode::NodeMonitorSeting, host);
match a {
Ok(v) => {
if v.value.len() > 0{
let value: MonitorSetting = serde_json::from_str(&v.value).unwrap();
return value.monitor.clone();
}
}
Err(e) => {
info!("{}", e.to_string());
}
}
return false;
}
}
///
///
impl MysqlState{
pub fn save(&self, db: &web::Data<DbInfo>, key: &String) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(&self)?;
let a = KeyValue{key: key.clone(), value };
db.put(&a, &CfNameTyp
|
eCode::NodesState.get())?;
Ok(())
}
}
///
///
/// slave behind 配置结构体
#[derive(Serialize, Deserialize, Debug)]
pub struct SlaveBehindSetting{
|
conditional_block
|
|
opdb.rs
|
ata: web::Data<DbInfo>, info: &web::Json<HostInfo>) -> Result<(), Box<dyn Error>> {
let check_unique = data.get(&info.host, &CfNameTypeCode::HaNodesInfo.get());
match check_unique {
Ok(v) => {
if v.value.len() > 0 {
let a = format!("this key: ({}) already exists in the database",&info.host);
return Err(a.into());
}
}
_ => {}
}
let v = HostInfoValue::new(info)?;
v.save(&data)?;
//初始化节点监控配置
let monitor_info = MonitorSetting::new(&info.host);
monitor_info.save(&data)?;
Ok(())
}
#[derive(Serialize, Deserialize, Debug)]
pub struct HaChangeLog {
pub key: String, //格式 host_timestamp host为宕机节点
pub cluster_name: String,
pub old_master_info: DownNodeCheck, //宕机节点信息
pub new_master_binlog_info: SlaveInfo, //如果宕机节点在切换之前未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚
pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息
pub recovery_status: bool, //是否已恢复
pub switch_status: bool, //切换状态
}
impl HaChangeLog {
pub fn new() -> HaChangeLog {
HaChangeLog{
key: "".to_string(),
cluster_name: "".to_string(),
old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 },
new_master_binlog_info: SlaveInfo {
host: "".to_string(),
dbport: 0,
slave_info: ReplicationState {
log_name: "".to_string(),
read_log_pos: 0,
exec_log_pos: 0
},
new_master: false
},
recovery_info: RecoveryInfo {
binlog: "".to_string(),
position: 0,
gtid: "".to_string(),
masterhost: "".to_string(),
masterport: 0,
read_binlog: "".to_string(),
read_position: 0
},
recovery_status: false,
switch_status: false
}
}
pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let key = format!("{}_{}",self.key.clone(), crate::timestamp());
let value = serde_json::to_string(self)?;
let row = KeyValue{key, value};
db.put(&row, &CfNameTypeCode::HaChangeLog.get())?;
return Ok(());
}
pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(self)?;
let row = KeyValue{key: row_key, value};
db.put(&row, &CfNameTypeCode::HaChangeLog.get())?;
return Ok(());
}
}
///
///
///
/// 用户信息结构
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct UserInfo {
pub user_name: String,
pub password: String,
pub hook_id: String,
pub create_time: i64,
pub update_time: i64
}
impl UserInfo {
pub fn new(info: &PostUserInfo) -> UserInfo {
let create_time = crate::timestamp();
let update_time = crate::timestamp();
UserInfo{
user_name: info.user_name.clone(),
password: info.password.clone(),
hook_id: rand_string(),
create_time,
update_time
}
}
}
///
///
///
/// 节点基础信息, host做为key
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HostInfoValue {
pub host: String, //127.0.0.1:3306
pub dbport: usize, //default 3306
pub rtype: String, //db、route
pub cluster_name: String, //集群名称,route类型默认default
pub online: bool, //db是否在线, true、false
pub insert_time: i64,
pub update_time: i64,
pub maintain: bool, //是否处于维护模式,true、false
}
impl HostInfoValue {
pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> {
let h = HostInfoValue{
host: info.host.clone(),
rtype: info.rtype.clone(),
dbport: info.dbport.clone(),
cluster_name: info.cluster_name.clone(),
online: false,
insert_time: crate::timestamp(),
update_time: crate::timestamp(),
maintain: false
};
Ok(h)
}
///
/// 写入db
pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(&self)?;
let row = KeyValue{key: self.host.clone(), value};
db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?;
Ok(())
}
///
/// 编辑节点信息
pub fn edit(&mut self, info: &web::Json<EditInfo>) {
self.host = info.host.clone();
self.dbport = info.dbport.clone();
self.cluster_name = info.cluster_name.clone();
self.update_time = crate::timestamp();
}
///
/// 设置节点维护模式状态
pub fn maintain(&mut self, info: &web::Json<EditMainTain>) {
if info.maintain == "true".to_string() {
self.maintain = false;
}else {
self.maintain = true;
}
self.update_time = crate::timestamp();
}
///
/// 获取当前节点在db中保存的状态信息
pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> {
let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?;
if kv.value.len() > 0 {
let state: MysqlState = serde_json::from_str(&kv.value)?;
return Ok(state);
}else {
//let err = format!("this host: {} no state data", &self.host);
//return Err(err.into());
let state = MysqlState::new();
return Ok(state);
}
}
pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> {
let state = self.get_state(db)?;
Ok(state.role)
}
}
///
///
///
///
/// 获取db中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct NodeClusterList{
pub cluster_name_list: Vec<String>
}
impl NodeClusterList{
pub fn new() -> NodeClusterList{
NodeClusterList { cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?;
for row in &result{
let value: HostInfoValue = serde_json::from_str(&row.value)?;
if self.is_exists(&value.cluster_name){continue;}
self.cluster_name_list.push(value.cluster_name.clone());
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name{
return true;
}
}
return false;
}
}
/// 获取route信息中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct RouteClusterList{
pub cluster_name_list: Vec<String>
}
impl RouteClusterList{
pub fn new() -> RouteClusterList {
RouteClusterList{ cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{
let route_all = db.get_route_all()?;
for route in &route_all {
if !self.is_exists(&route.value.cluster_name){
self.cluster_name_list.push(route.value.cluster_name.clone());
}
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name {
return true;
}
}
return false;
}
}
///
///
///
///
/// node节点信息
#[derive(Deserialize, Serialize, Debug)]
pub struct NodeInfo{
pub cluster_name: String,
pub host: String,
pub dbport: usize,
pub online: bool, //是否在线, true、false
pub maintain: bool, //是否处于维护模式,true、false
pub role: String, //主从角色
pub master: String,
pub sql_thread: bool,
pub io_thread: bool,
pub seconds_behind: usize,
pub read_only: bool,
pub version: String,
pub executed_gtid_set: String,
pub innodb_flush_log_at_trx_commit:
|
sert_mysql_host_info(d
|
identifier_name
|
|
opdb.rs
|
之前未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚
pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息
pub recovery_status: bool, //是否已恢复
pub switch_status: bool, //切换状态
}
impl HaChangeLog {
pub fn new() -> HaChangeLog {
HaChangeLog{
key: "".to_string(),
cluster_name: "".to_string(),
old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 },
new_master_binlog_info: SlaveInfo {
host: "".to_string(),
dbport: 0,
slave_info: ReplicationState {
log_name: "".to_string(),
read_log_pos: 0,
exec_log_pos: 0
},
new_master: false
},
recovery_info: RecoveryInfo {
binlog: "".to_string(),
position: 0,
gtid: "".to_string(),
masterhost: "".to_string(),
masterport: 0,
read_binlog: "".to_string(),
read_position: 0
},
recovery_status: false,
switch_status: false
}
}
pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let key = format!("{}_{}",self.key.clone(), crate::timestamp());
let value = serde_json::to_string(self)?;
let row = KeyValue{key, value};
db.put(&row, &CfNameTypeCode::HaChangeLog.get())?;
return Ok(());
}
pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(self)?;
let row = KeyValue{key: row_key, value};
db.put(&row, &CfNameTypeCode::HaChangeLog.get())?;
return Ok(());
}
}
///
///
///
/// 用户信息结构
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct UserInfo {
pub user_name: String,
pub password: String,
pub hook_id: String,
pub create_time: i64,
pub update_time: i64
}
impl UserInfo {
pub fn new(info: &PostUserInfo) -> UserInfo {
let create_time = crate::timestamp();
let update_time = crate::timestamp();
UserInfo{
user_name: info.user_name.clone(),
password: info.password.clone(),
hook_id: rand_string(),
create_time,
update_time
}
}
}
///
///
///
/// 节点基础信息, host做为key
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HostInfoValue {
pub host: String, //127.0.0.1:3306
pub dbport: usize, //default 3306
pub rtype: String, //db、route
pub cluster_name: String, //集群名称,route类型默认default
pub online: bool, //db是否在线, true、false
pub insert_time: i64,
pub update_time: i64,
pub maintain: bool, //是否处于维护模式,true、false
}
impl HostInfoValue {
pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> {
let h = HostInfoValue{
host: info.host.clone(),
rtype: info.rtype.clone(),
dbport: info.dbport.clone(),
cluster_name: info.cluster_name.clone(),
online: false,
insert_time: crate::timestamp(),
update_time: crate::timestamp(),
maintain: false
};
Ok(h)
}
///
/// 写入db
pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(&self)?;
let row = KeyValue{key: self.host.clone(), value};
db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?;
Ok(())
}
///
/// 编辑节点信息
pub fn edit(&mut self, info: &web::Json<EditInfo>) {
self.host = info.host.clone();
self.dbport = info.dbport.clone();
self.cluster_name = info.cluster_name.clone();
self.update_time = crate::timestamp();
}
///
/// 设置节点维护模式状态
pub fn maintain(&mut self, info: &web::Json<EditMainTain>) {
|
}
self.update_time = crate::timestamp();
}
///
/// 获取当前节点在db中保存的状态信息
pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> {
let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?;
if kv.value.len() > 0 {
let state: MysqlState = serde_json::from_str(&kv.value)?;
return Ok(state);
}else {
//let err = format!("this host: {} no state data", &self.host);
//return Err(err.into());
let state = MysqlState::new();
return Ok(state);
}
}
pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> {
let state = self.get_state(db)?;
Ok(state.role)
}
}
///
///
///
///
/// 获取db中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct NodeClusterList{
pub cluster_name_list: Vec<String>
}
impl NodeClusterList{
pub fn new() -> NodeClusterList{
NodeClusterList { cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?;
for row in &result{
let value: HostInfoValue = serde_json::from_str(&row.value)?;
if self.is_exists(&value.cluster_name){continue;}
self.cluster_name_list.push(value.cluster_name.clone());
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name{
return true;
}
}
return false;
}
}
/// 获取route信息中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct RouteClusterList{
pub cluster_name_list: Vec<String>
}
impl RouteClusterList{
pub fn new() -> RouteClusterList {
RouteClusterList{ cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{
let route_all = db.get_route_all()?;
for route in &route_all {
if !self.is_exists(&route.value.cluster_name){
self.cluster_name_list.push(route.value.cluster_name.clone());
}
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name {
return true;
}
}
return false;
}
}
///
///
///
///
/// node节点信息
#[derive(Deserialize, Serialize, Debug)]
pub struct NodeInfo{
pub cluster_name: String,
pub host: String,
pub dbport: usize,
pub online: bool, //是否在线, true、false
pub maintain: bool, //是否处于维护模式,true、false
pub role: String, //主从角色
pub master: String,
pub sql_thread: bool,
pub io_thread: bool,
pub seconds_behind: usize,
pub read_only: bool,
pub version: String,
pub executed_gtid_set: String,
pub innodb_flush_log_at_trx_commit: usize,
pub sync_binlog: usize,
pub server_id: usize,
pub event_scheduler: String,
pub sql_error: String
}
impl NodeInfo{
pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo {
let mut ni = NodeInfo{
cluster_name: node.cluster_name.clone(),
host: node.host.clone(),
dbport: node.dbport.clone(),
online: node.online.clone(),
maintain: node.maintain.clone(),
role: state.role.clone(),
master: state.master.clone(),
sql_thread: state.sql_thread.clone(),
io_thread: state.io_thread.clone(),
seconds_behind: state.seconds_behind.clone(),
read_only: state.read_only.clone(),
version: state.version.clone(),
executed_gtid_set: state.executed_gtid_set.clone(),
innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(),
sync_binlog: state.sync_binlog.clone(),
server_id: state.server_id.clone(),
event_scheduler: state.event_scheduler.clone(),
sql_error: "".to
|
if info.maintain == "true".to_string() {
self.maintain = false;
}else {
self.maintain = true;
|
random_line_split
|
opdb.rs
|
未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚
pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息
pub recovery_status: bool, //是否已恢复
pub switch_status: bool, //切换状态
}
impl HaChangeLog {
pub fn new() -> HaChangeLog {
HaChangeLog{
key: "".to_string(),
cluster_name: "".to_string(),
old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 },
new_master_binlog_info: SlaveInfo {
host: "".to_string(),
dbport: 0,
slave_info: ReplicationState {
log_name: "".to_string(),
read_log_pos: 0,
exec_log_pos: 0
},
new_master: false
},
recovery_info: RecoveryInfo {
binlog: "".to_string(),
position: 0,
gtid: "".to_string(),
masterhost: "".to_string(),
masterport: 0,
read_binlog: "".to_string(),
read_position: 0
},
recovery_status: false,
switch_status: false
}
}
pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let key = format!("{}_{}",self.key.clone(), crate::timestamp());
let value = serde_json::to_string(self)?;
let row = KeyValue{key, value};
db.put(&row, &CfNameTypeCode::HaChangeLog.get())?;
return Ok(());
}
pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(self)?;
let row = KeyValue{key: row_key, value};
db.put(&row, &CfNameTypeCode::HaChangeLog.get())?;
return Ok(());
}
}
///
///
///
/// 用户信息结构
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct UserInfo {
pub user_name: String,
pub password: String,
pub hook_id: String,
pub create_time: i64,
pub update_time: i64
}
impl UserInfo {
pub fn new(info: &PostUserInfo) -> UserInfo {
let create_time = crate::timestamp();
let update_time = crate::timestamp();
UserInfo{
user_name: info.user_name.clone(),
password: inf
|
ult 3306
pub rtype: String, //db、route
pub cluster_name: String, //集群名称,route类型默认default
pub online: bool, //db是否在线, true、false
pub insert_time: i64,
pub update_time: i64,
pub maintain: bool, //是否处于维护模式,true、false
}
impl HostInfoValue {
pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> {
let h = HostInfoValue{
host: info.host.clone(),
rtype: info.rtype.clone(),
dbport: info.dbport.clone(),
cluster_name: info.cluster_name.clone(),
online: false,
insert_time: crate::timestamp(),
update_time: crate::timestamp(),
maintain: false
};
Ok(h)
}
///
/// 写入db
pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let value = serde_json::to_string(&self)?;
let row = KeyValue{key: self.host.clone(), value};
db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?;
Ok(())
}
///
/// 编辑节点信息
pub fn edit(&mut self, info: &web::Json<EditInfo>) {
self.host = info.host.clone();
self.dbport = info.dbport.clone();
self.cluster_name = info.cluster_name.clone();
self.update_time = crate::timestamp();
}
///
/// 设置节点维护模式状态
pub fn maintain(&mut self, info: &web::Json<EditMainTain>) {
if info.maintain == "true".to_string() {
self.maintain = false;
}else {
self.maintain = true;
}
self.update_time = crate::timestamp();
}
///
/// 获取当前节点在db中保存的状态信息
pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> {
let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?;
if kv.value.len() > 0 {
let state: MysqlState = serde_json::from_str(&kv.value)?;
return Ok(state);
}else {
//let err = format!("this host: {} no state data", &self.host);
//return Err(err.into());
let state = MysqlState::new();
return Ok(state);
}
}
pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> {
let state = self.get_state(db)?;
Ok(state.role)
}
}
///
///
///
///
/// 获取db中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct NodeClusterList{
pub cluster_name_list: Vec<String>
}
impl NodeClusterList{
pub fn new() -> NodeClusterList{
NodeClusterList { cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> {
let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?;
for row in &result{
let value: HostInfoValue = serde_json::from_str(&row.value)?;
if self.is_exists(&value.cluster_name){continue;}
self.cluster_name_list.push(value.cluster_name.clone());
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name{
return true;
}
}
return false;
}
}
/// 获取route信息中现有的cluster列表
#[derive(Serialize, Deserialize, Debug)]
pub struct RouteClusterList{
pub cluster_name_list: Vec<String>
}
impl RouteClusterList{
pub fn new() -> RouteClusterList {
RouteClusterList{ cluster_name_list: vec![] }
}
pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{
let route_all = db.get_route_all()?;
for route in &route_all {
if !self.is_exists(&route.value.cluster_name){
self.cluster_name_list.push(route.value.cluster_name.clone());
}
}
Ok(())
}
fn is_exists(&self, cluster_name: &String) -> bool {
for cl in &self.cluster_name_list {
if cl == cluster_name {
return true;
}
}
return false;
}
}
///
///
///
///
/// node节点信息
#[derive(Deserialize, Serialize, Debug)]
pub struct NodeInfo{
pub cluster_name: String,
pub host: String,
pub dbport: usize,
pub online: bool, //是否在线, true、false
pub maintain: bool, //是否处于维护模式,true、false
pub role: String, //主从角色
pub master: String,
pub sql_thread: bool,
pub io_thread: bool,
pub seconds_behind: usize,
pub read_only: bool,
pub version: String,
pub executed_gtid_set: String,
pub innodb_flush_log_at_trx_commit: usize,
pub sync_binlog: usize,
pub server_id: usize,
pub event_scheduler: String,
pub sql_error: String
}
impl NodeInfo{
pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo {
let mut ni = NodeInfo{
cluster_name: node.cluster_name.clone(),
host: node.host.clone(),
dbport: node.dbport.clone(),
online: node.online.clone(),
maintain: node.maintain.clone(),
role: state.role.clone(),
master: state.master.clone(),
sql_thread: state.sql_thread.clone(),
io_thread: state.io_thread.clone(),
seconds_behind: state.seconds_behind.clone(),
read_only: state.read_only.clone(),
version: state.version.clone(),
executed_gtid_set: state.executed_gtid_set.clone(),
innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(),
sync_binlog: state.sync_binlog.clone(),
server_id: state.server_id.clone(),
event_scheduler: state.event_scheduler.clone(),
sql_error:
|
o.password.clone(),
hook_id: rand_string(),
create_time,
update_time
}
}
}
///
///
///
/// 节点基础信息, host做为key
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HostInfoValue {
pub host: String, //127.0.0.1:3306
pub dbport: usize, //defa
|
identifier_body
|
server_impl.go
|
},
writeRequest: &requestWrite{
ask: make(chan []byte),
connId: make(chan int),
response: make(chan error),
},
readList: list.New(),
writeList: list.New(),
flag: false,
// variables for window size
windowSize: params.WindowSize,
mapNeedSend: list.New(),
// variables for epoch
epochChan: make(chan int),
epochMillis: params.EpochMillis,
epochLimit: params.EpochLimit,
// close
deleteClient: make(chan int),
closeConnRequest: &requestCloseConn{
ask: make(chan int),
getError: make(chan error),
},
waitToWriteFinish: false,
writeFinished: make(chan int),
waitToAckFinish: false,
ackFinished: make(chan int),
closeRead: make(chan int, 1),
closeEpoch: make(chan int, 1),
closeEvent: make(chan int, 1),
// close conn
closeConn: make(chan int, 1),
}
// start server
addr, err := lspnet.ResolveUDPAddr("udp", "localhost:"+strconv.Itoa(port))
if err != nil {
return nil, err
}
conn, err := lspnet.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
s.conn = conn
go s.readMessage()
go s.handleMessage()
go s.epochFire()
fmt.Println("new server")
return s, nil
}
func (s *server) epochFire() {
for {
select {
case <-s.closeEpoch:
return
default:
////fmt.Println("Server EpochFire!!!!!!!!!!!!!!!!!!!!!!!!!!")
<-time.After(time.Duration(s.epochMillis) * time.Millisecond)
s.epochChan <- 1
}
}
}
func (s *server) handleMessage() {
for {
select {
case <-s.writeFinished:
if s.writeList.Len() == 0 {
s.writeFinished <- 1
} else {
s.waitToWriteFinish = true
}
case <-s.ackFinished:
//fmt.Println("Server 192: receive ack finished")
s.checkAckFinished()
case msg := <-s.readFromClientChan:
////fmt.Println("Server 162: read from client: ", msg)
s.readFromClient(msg)
case msg := <-s.writeToClientChan:
////fmt.Println("Server 165: write to client: ", msg)
if s.clients[msg.ConnID] == nil {
continue
}
if msg.Type == MsgData {
s.clients[msg.ConnID].mapAck[msg.SeqNum] = false
s.clients[msg.ConnID].mapNeedAck[msg.SeqNum] = msg
// data type: need to consider order
if (msg.SeqNum - s.clients[msg.ConnID].nextSmallestAck) < s.windowSize {
s.writeToClient(msg)
} else {
s.writeList.PushBack(msg)
}
} else {
// ack/conn type: don't need to consider order
s.writeToClient(msg)
}
if s.waitToWriteFinish && s.writeList.Len() == 0 {
s.writeFinished <- 1
s.waitToWriteFinish = false
}
case <-s.readRequest.ask:
s.handleReadRequest()
case payload := <-s.writeRequest.ask:
connId := <-s.writeRequest.connId
var response = s.handleWriteRequest(connId, payload)
s.writeRequest.response <- response
case <-s.epochChan:
s.addEpochNum()
s.handleEpochEvent()
case id := <-s.deleteClient:
delete(s.clients, id)
case id := <-s.closeConnRequest.ask:
if s.clients[id] == nil {
s.closeConnRequest.getError <- errors.New("No client")
}
s.sendDeadMsg(id)
s.closeConnRequest.getError <- nil
}
}
}
func (s *server) checkAckFinished() {
for _, c := range s.clients {
if len(c.mapAck) != 0 {
s.waitToAckFinish = true
//fmt.Println("Server: need to wait ack finish")
return
}
}
s.ackFinished <- 1
}
func (s *server) handleWriteRequest(connID int, payload []byte) error
|
func (s *server) sendData(dataMsg *Message) {
s.writeToClientChan <- dataMsg
}
func (s *server) addEpochNum() {
for connId, c := range s.clients {
c.epochNum += 1
if c.epochNum >= s.epochLimit {
c.lostConn = true
s.sendDeadMsg(connId)
}
}
}
func (s *server) sendDeadMsg(connId int) {
dataMsg := NewData(connId, s.clients[connId].nextSN, nil, nil)
if s.flag {
s.readRequest.response <- dataMsg
s.flag = false
} else {
s.readList.PushBack(dataMsg)
}
}
func (s *server) readData(dataMsg *msgPackage) {
s.readFromClient(dataMsg)
}
func (s *server) handleEpochEvent() {
for connID, c := range s.clients {
/*
* if no data received from the client, then resend ack for connection requestRead
*/
if c.nextSmallestAck == 1 {
ack := NewAck(connID, 0)
go s.sendAck(ack)
}
/*
* sent but not acked, resend the data
*/
for key, received := range c.mapAck {
if !received {
msg := c.mapNeedAck[key]
fmt.Println("Server epoch: ", msg)
s.writeToClient(msg)
}
}
}
}
func (s *server) handleReadRequest() {
msg := s.getMessageFromReadList()
if msg != nil {
//////fmt.Println("101 no message")
/*elm := s.readList.Front()
msg := elm.Value.(*Message)*/
s.readRequest.response <- msg
} else {
s.flag = true
}
return
}
// find next msg that can be send
// return nil if no msg can be send
func (s *server) getMessageFromReadList() *Message {
for e := s.readList.Front(); e != nil; e = e.Next() {
msg := e.Value.(*Message)
if msg.SeqNum == s.clients[msg.ConnID].nextDataRead {
s.clients[msg.ConnID].nextDataRead += 1
s.readList.Remove(e)
return msg
}
}
return nil
}
func (s *server) writeToClient(msg *Message) error {
connId := msg.ConnID
addr := s.clients[connId].addr
mmsg, _ := json.Marshal(msg)
fmt.Println("364 Server write to client: ", msg)
_, err := s.conn.WriteToUDP(mmsg, addr)
if err != nil {
fmt.Println("364: ", err)
}
return err
}
func (s *server) readFromClient(msgPack *msgPackage) {
//////fmt.Println("129 read")
msg := msgPack.msg
addr := msgPack.addr
////fmt.Println("server read: ", msg)
if msg.Type == MsgConnect {
s.connectClient(msg, addr)
} else {
// set epoch number to 0
if s.clients[msg.ConnID] != nil {
s.clients[msg.ConnID].epochNum = 0
}
if msg.Type == MsgData {
if !s.waitToAckFinish {
//fmt.Println("Server read from client: ", msg)
s.receiveData(msg)
}
} else if msg.Type == MsgAck {
fmt.Println("389 Server: receive ack: ", msg)
s.receiveAck(msg)
}
if s.waitToAckFinish {
s.checkAckFinished()
}
}
return
}
func (s *server) alreadyHasClient(addr *lspnet.UDPAddr) bool {
////fmt.Println("server: in already has client")
for _, client := range s.clients {
if client.addr.String() == addr.String() {
return true
}
}
return false
}
func (s *server) receiveData(msg *Message) {
//fmt.Println("Server read: ", msg)
c := s.clients[msg.ConnID]
// check duplicated forst and within window size
/*if c.nextSmallestData+s.windowSize <= msg.SeqNum {
// check within window size, drop it if not
return
} else if c.mapReceived[msg.SeqNum] != nil {
// check duplicated outside of window size
return
}*/
|
{
//////fmt.Println(connID)
client := s.clients[connID]
dataMsg := NewData(connID, client.nextSN, payload, payload)
////fmt.Println("Server: 197 write ", dataMsg)
go s.sendData(dataMsg)
client.nextSN += 1
return nil
}
|
identifier_body
|
server_impl.go
|
: make(chan int),
epochMillis: params.EpochMillis,
epochLimit: params.EpochLimit,
// close
deleteClient: make(chan int),
closeConnRequest: &requestCloseConn{
ask: make(chan int),
getError: make(chan error),
},
waitToWriteFinish: false,
writeFinished: make(chan int),
waitToAckFinish: false,
ackFinished: make(chan int),
closeRead: make(chan int, 1),
closeEpoch: make(chan int, 1),
closeEvent: make(chan int, 1),
// close conn
closeConn: make(chan int, 1),
}
// start server
addr, err := lspnet.ResolveUDPAddr("udp", "localhost:"+strconv.Itoa(port))
if err != nil {
return nil, err
}
conn, err := lspnet.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
s.conn = conn
go s.readMessage()
go s.handleMessage()
go s.epochFire()
fmt.Println("new server")
return s, nil
}
func (s *server) epochFire() {
for {
select {
case <-s.closeEpoch:
return
default:
////fmt.Println("Server EpochFire!!!!!!!!!!!!!!!!!!!!!!!!!!")
<-time.After(time.Duration(s.epochMillis) * time.Millisecond)
s.epochChan <- 1
}
}
}
func (s *server) handleMessage() {
for {
select {
case <-s.writeFinished:
if s.writeList.Len() == 0 {
s.writeFinished <- 1
} else {
s.waitToWriteFinish = true
}
case <-s.ackFinished:
//fmt.Println("Server 192: receive ack finished")
s.checkAckFinished()
case msg := <-s.readFromClientChan:
////fmt.Println("Server 162: read from client: ", msg)
s.readFromClient(msg)
case msg := <-s.writeToClientChan:
////fmt.Println("Server 165: write to client: ", msg)
if s.clients[msg.ConnID] == nil {
continue
}
if msg.Type == MsgData {
s.clients[msg.ConnID].mapAck[msg.SeqNum] = false
s.clients[msg.ConnID].mapNeedAck[msg.SeqNum] = msg
// data type: need to consider order
if (msg.SeqNum - s.clients[msg.ConnID].nextSmallestAck) < s.windowSize {
s.writeToClient(msg)
} else {
s.writeList.PushBack(msg)
}
} else {
// ack/conn type: don't need to consider order
s.writeToClient(msg)
}
if s.waitToWriteFinish && s.writeList.Len() == 0 {
s.writeFinished <- 1
s.waitToWriteFinish = false
}
case <-s.readRequest.ask:
s.handleReadRequest()
case payload := <-s.writeRequest.ask:
connId := <-s.writeRequest.connId
var response = s.handleWriteRequest(connId, payload)
s.writeRequest.response <- response
case <-s.epochChan:
s.addEpochNum()
s.handleEpochEvent()
case id := <-s.deleteClient:
delete(s.clients, id)
case id := <-s.closeConnRequest.ask:
if s.clients[id] == nil {
s.closeConnRequest.getError <- errors.New("No client")
}
s.sendDeadMsg(id)
s.closeConnRequest.getError <- nil
}
}
}
func (s *server) checkAckFinished() {
for _, c := range s.clients {
if len(c.mapAck) != 0 {
s.waitToAckFinish = true
//fmt.Println("Server: need to wait ack finish")
return
}
}
s.ackFinished <- 1
}
func (s *server) handleWriteRequest(connID int, payload []byte) error {
//////fmt.Println(connID)
client := s.clients[connID]
dataMsg := NewData(connID, client.nextSN, payload, payload)
////fmt.Println("Server: 197 write ", dataMsg)
go s.sendData(dataMsg)
client.nextSN += 1
return nil
}
func (s *server) sendData(dataMsg *Message) {
s.writeToClientChan <- dataMsg
}
func (s *server) addEpochNum() {
for connId, c := range s.clients {
c.epochNum += 1
if c.epochNum >= s.epochLimit {
c.lostConn = true
s.sendDeadMsg(connId)
}
}
}
func (s *server) sendDeadMsg(connId int) {
dataMsg := NewData(connId, s.clients[connId].nextSN, nil, nil)
if s.flag {
s.readRequest.response <- dataMsg
s.flag = false
} else {
s.readList.PushBack(dataMsg)
}
}
func (s *server) readData(dataMsg *msgPackage) {
s.readFromClient(dataMsg)
}
func (s *server) handleEpochEvent() {
for connID, c := range s.clients {
/*
* if no data received from the client, then resend ack for connection requestRead
*/
if c.nextSmallestAck == 1 {
ack := NewAck(connID, 0)
go s.sendAck(ack)
}
/*
* sent but not acked, resend the data
*/
for key, received := range c.mapAck {
if !received {
msg := c.mapNeedAck[key]
fmt.Println("Server epoch: ", msg)
s.writeToClient(msg)
}
}
}
}
func (s *server) handleReadRequest() {
msg := s.getMessageFromReadList()
if msg != nil {
//////fmt.Println("101 no message")
/*elm := s.readList.Front()
msg := elm.Value.(*Message)*/
s.readRequest.response <- msg
} else {
s.flag = true
}
return
}
// find next msg that can be send
// return nil if no msg can be send
func (s *server) getMessageFromReadList() *Message {
for e := s.readList.Front(); e != nil; e = e.Next() {
msg := e.Value.(*Message)
if msg.SeqNum == s.clients[msg.ConnID].nextDataRead {
s.clients[msg.ConnID].nextDataRead += 1
s.readList.Remove(e)
return msg
}
}
return nil
}
func (s *server) writeToClient(msg *Message) error {
connId := msg.ConnID
addr := s.clients[connId].addr
mmsg, _ := json.Marshal(msg)
fmt.Println("364 Server write to client: ", msg)
_, err := s.conn.WriteToUDP(mmsg, addr)
if err != nil {
fmt.Println("364: ", err)
}
return err
}
func (s *server) readFromClient(msgPack *msgPackage) {
//////fmt.Println("129 read")
msg := msgPack.msg
addr := msgPack.addr
////fmt.Println("server read: ", msg)
if msg.Type == MsgConnect {
s.connectClient(msg, addr)
} else {
// set epoch number to 0
if s.clients[msg.ConnID] != nil {
s.clients[msg.ConnID].epochNum = 0
}
if msg.Type == MsgData {
if !s.waitToAckFinish {
//fmt.Println("Server read from client: ", msg)
s.receiveData(msg)
}
} else if msg.Type == MsgAck {
fmt.Println("389 Server: receive ack: ", msg)
s.receiveAck(msg)
}
if s.waitToAckFinish {
s.checkAckFinished()
}
}
return
}
func (s *server) alreadyHasClient(addr *lspnet.UDPAddr) bool {
////fmt.Println("server: in already has client")
for _, client := range s.clients {
if client.addr.String() == addr.String() {
return true
}
}
return false
}
func (s *server) receiveData(msg *Message) {
//fmt.Println("Server read: ", msg)
c := s.clients[msg.ConnID]
// check duplicated forst and within window size
/*if c.nextSmallestData+s.windowSize <= msg.SeqNum {
// check within window size, drop it if not
return
} else if c.mapReceived[msg.SeqNum] != nil {
// check duplicated outside of window size
return
}*/
//fmt.Println("pass window size")
// here is no duplicated
// decide should we push it to the readlist or chan
if s.flag && (s.clients[msg.ConnID].nextDataRead == msg.SeqNum) {
s.readRequest.response <- msg
c.nextDataRead += 1
s.flag = false
} else if msg.SeqNum >= c.nextDataRead
|
{
s.readList.PushBack(msg)
}
|
conditional_block
|
|
server_impl.go
|
fmt.Println("Server epoch: ", msg)
s.writeToClient(msg)
}
}
}
}
func (s *server) handleReadRequest() {
msg := s.getMessageFromReadList()
if msg != nil {
//////fmt.Println("101 no message")
/*elm := s.readList.Front()
msg := elm.Value.(*Message)*/
s.readRequest.response <- msg
} else {
s.flag = true
}
return
}
// find next msg that can be send
// return nil if no msg can be send
func (s *server) getMessageFromReadList() *Message {
for e := s.readList.Front(); e != nil; e = e.Next() {
msg := e.Value.(*Message)
if msg.SeqNum == s.clients[msg.ConnID].nextDataRead {
s.clients[msg.ConnID].nextDataRead += 1
s.readList.Remove(e)
return msg
}
}
return nil
}
func (s *server) writeToClient(msg *Message) error {
connId := msg.ConnID
addr := s.clients[connId].addr
mmsg, _ := json.Marshal(msg)
fmt.Println("364 Server write to client: ", msg)
_, err := s.conn.WriteToUDP(mmsg, addr)
if err != nil {
fmt.Println("364: ", err)
}
return err
}
func (s *server) readFromClient(msgPack *msgPackage) {
//////fmt.Println("129 read")
msg := msgPack.msg
addr := msgPack.addr
////fmt.Println("server read: ", msg)
if msg.Type == MsgConnect {
s.connectClient(msg, addr)
} else {
// set epoch number to 0
if s.clients[msg.ConnID] != nil {
s.clients[msg.ConnID].epochNum = 0
}
if msg.Type == MsgData {
if !s.waitToAckFinish {
//fmt.Println("Server read from client: ", msg)
s.receiveData(msg)
}
} else if msg.Type == MsgAck {
fmt.Println("389 Server: receive ack: ", msg)
s.receiveAck(msg)
}
if s.waitToAckFinish {
s.checkAckFinished()
}
}
return
}
func (s *server) alreadyHasClient(addr *lspnet.UDPAddr) bool {
////fmt.Println("server: in already has client")
for _, client := range s.clients {
if client.addr.String() == addr.String() {
return true
}
}
return false
}
func (s *server) receiveData(msg *Message) {
//fmt.Println("Server read: ", msg)
c := s.clients[msg.ConnID]
// check duplicated forst and within window size
/*if c.nextSmallestData+s.windowSize <= msg.SeqNum {
// check within window size, drop it if not
return
} else if c.mapReceived[msg.SeqNum] != nil {
// check duplicated outside of window size
return
}*/
//fmt.Println("pass window size")
// here is no duplicated
// decide should we push it to the readlist or chan
if s.flag && (s.clients[msg.ConnID].nextDataRead == msg.SeqNum) {
s.readRequest.response <- msg
c.nextDataRead += 1
s.flag = false
} else if msg.SeqNum >= c.nextDataRead {
s.readList.PushBack(msg)
}
// create ack message
connID := msg.ConnID
seqNum := msg.SeqNum
ack := NewAck(connID, seqNum)
// check the receive variables in client
// delete the message in mapreceived
if msg.SeqNum == c.nextSmallestData {
c.nextSmallestData += 1
for c.mapReceived[c.nextSmallestData] != nil {
delete(c.mapReceived, c.nextSmallestData)
c.nextSmallestData += 1
}
} else {
c.mapReceived[msg.SeqNum] = msg
}
go s.sendAck(ack)
return
}
func (s *server) sendAck(ack *Message) {
s.writeToClientChan <- ack
}
func (s *server) receiveAck(msg *Message) {
//fmt.Println("server: receiveAck")
c := s.clients[msg.ConnID]
if msg.SeqNum >= c.nextSmallestAck {
c.mapAck[msg.SeqNum] = true
}
if msg.SeqNum == c.nextSmallestAck {
// reset the next smallest ack numebr
value, exist := c.mapAck[c.nextSmallestAck]
for exist && value {
delete(c.mapAck, c.nextSmallestAck)
c.nextSmallestAck += 1
value, exist = c.mapAck[c.nextSmallestAck]
}
var l = *list.New()
for element := s.writeList.Front(); element != nil; element = element.Next() {
message := element.Value.(*Message)
if message.ConnID == msg.ConnID {
if (message.SeqNum - c.nextSmallestAck) < s.windowSize {
s.writeToClient(message)
l.PushBack(element)
}
}
}
////fmt.Println("loop finish go out of write")
for element := l.Front(); element != nil; element = element.Next() {
s.writeList.Remove(element)
}
////fmt.Println("go out of delte dfadfasdf")
if s.waitToWriteFinish && s.writeList.Len() == 0 {
s.writeFinished <- 1
s.waitToWriteFinish = false
}
////fmt.Println("go asdfasdfe dfadfasdf")
}
return
}
func (s *server) connectClient(msg *Message, addr *lspnet.UDPAddr) {
////fmt.Println("Server: connect client ", msg)
// check duplication of connection
if s.alreadyHasClient(addr) {
////fmt.Println("already has client")
return
}
connID := s.nextConnectId
newClient := abstractClient{
addr: addr,
nextSN: 1,
nextDataRead: 1,
// server send msg to client
nextSmallestAck: 1,
mapAck: make(map[int]bool),
mapNeedAck: make(map[int]*Message),
// server receive msg from client
nextSmallestData: 1,
mapReceived: make(map[int]*Message),
// epoch
epochNum: 0,
lostConn: false,
// close
closeConn: false,
}
s.clients[connID] = &newClient
s.nextConnectId += 1
ack := NewAck(connID, 0)
////fmt.Println("Server: send ack to client")
go s.sendAck(ack)
return
}
func (s *server) readMessage() {
var b [1500]byte
//////fmt.Println("Read: read message")
for {
select {
case <-s.closeRead:
return
default:
var msg Message
// unmarshal
n, addr, err := s.conn.ReadFromUDP(b[0:])
err = json.Unmarshal(b[0:n], &msg)
if err == nil {
msgPck := &msgPackage{
msg: &msg,
addr: addr,
}
//fmt.Println("server read: ", &msg)
s.readFromClientChan <- msgPck
} else {
//fmt.Println(err)
return
}
}
}
}
func (s *server) Read() (int, []byte, error) {
s.readRequest.ask <- 1
msg := <-s.readRequest.response
////fmt.Println("Server called read: ", msg)
// check if the client still alive or not
if msg.Payload == nil {
////fmt.Println("client closed when read")
s.deleteClient <- msg.ConnID
//fmt.Println("read ")
return msg.ConnID, nil, errors.New("Client connection lost")
}
// TODO: client lost return connection id
return msg.ConnID, msg.Payload, nil
}
func (s *server) Write(connID int, payload []byte) error {
//fmt.Println("server called write")
/*client := s.clients[connID]
dataMsg := NewData(connID, client.nextSN, payload, payload)
////fmt.Println("Server: 197 write ", dataMsg)
s.writeToClientChan <- dataMsg
client.nextSN += 1*/
//////fmt.Println(s.clients[connID])
if s.clients[connID] == nil {
return errors.New("Client connection lost")
}
s.writeRequest.ask <- payload
s.writeRequest.connId <- connID
response := <-s.writeRequest.response
return response
}
func (s *server) CloseConn(connID int) error {
//fmt.Println("server closeconn!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
s.closeConnRequest.ask <- connID
err := <-s.closeConnRequest.getError
//fmt.Println("server closed finish!!!!!!!!!!!!!!!!!!!!!!")
return err
}
func (s *server)
|
Close
|
identifier_name
|
|
server_impl.go
|
),
},
writeRequest: &requestWrite{
ask: make(chan []byte),
connId: make(chan int),
response: make(chan error),
},
readList: list.New(),
writeList: list.New(),
flag: false,
// variables for window size
windowSize: params.WindowSize,
mapNeedSend: list.New(),
// variables for epoch
epochChan: make(chan int),
epochMillis: params.EpochMillis,
epochLimit: params.EpochLimit,
// close
deleteClient: make(chan int),
closeConnRequest: &requestCloseConn{
ask: make(chan int),
getError: make(chan error),
},
waitToWriteFinish: false,
writeFinished: make(chan int),
waitToAckFinish: false,
ackFinished: make(chan int),
closeRead: make(chan int, 1),
closeEpoch: make(chan int, 1),
closeEvent: make(chan int, 1),
// close conn
closeConn: make(chan int, 1),
}
// start server
addr, err := lspnet.ResolveUDPAddr("udp", "localhost:"+strconv.Itoa(port))
if err != nil {
return nil, err
}
conn, err := lspnet.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
s.conn = conn
go s.readMessage()
go s.handleMessage()
go s.epochFire()
fmt.Println("new server")
return s, nil
}
func (s *server) epochFire() {
for {
select {
case <-s.closeEpoch:
return
default:
////fmt.Println("Server EpochFire!!!!!!!!!!!!!!!!!!!!!!!!!!")
<-time.After(time.Duration(s.epochMillis) * time.Millisecond)
s.epochChan <- 1
}
}
}
func (s *server) handleMessage() {
for {
select {
case <-s.writeFinished:
if s.writeList.Len() == 0 {
s.writeFinished <- 1
} else {
s.waitToWriteFinish = true
}
case <-s.ackFinished:
//fmt.Println("Server 192: receive ack finished")
s.checkAckFinished()
case msg := <-s.readFromClientChan:
////fmt.Println("Server 162: read from client: ", msg)
s.readFromClient(msg)
case msg := <-s.writeToClientChan:
////fmt.Println("Server 165: write to client: ", msg)
if s.clients[msg.ConnID] == nil {
continue
}
if msg.Type == MsgData {
s.clients[msg.ConnID].mapAck[msg.SeqNum] = false
s.clients[msg.ConnID].mapNeedAck[msg.SeqNum] = msg
// data type: need to consider order
if (msg.SeqNum - s.clients[msg.ConnID].nextSmallestAck) < s.windowSize {
s.writeToClient(msg)
} else {
s.writeList.PushBack(msg)
}
} else {
|
if s.waitToWriteFinish && s.writeList.Len() == 0 {
s.writeFinished <- 1
s.waitToWriteFinish = false
}
case <-s.readRequest.ask:
s.handleReadRequest()
case payload := <-s.writeRequest.ask:
connId := <-s.writeRequest.connId
var response = s.handleWriteRequest(connId, payload)
s.writeRequest.response <- response
case <-s.epochChan:
s.addEpochNum()
s.handleEpochEvent()
case id := <-s.deleteClient:
delete(s.clients, id)
case id := <-s.closeConnRequest.ask:
if s.clients[id] == nil {
s.closeConnRequest.getError <- errors.New("No client")
}
s.sendDeadMsg(id)
s.closeConnRequest.getError <- nil
}
}
}
func (s *server) checkAckFinished() {
for _, c := range s.clients {
if len(c.mapAck) != 0 {
s.waitToAckFinish = true
//fmt.Println("Server: need to wait ack finish")
return
}
}
s.ackFinished <- 1
}
func (s *server) handleWriteRequest(connID int, payload []byte) error {
//////fmt.Println(connID)
client := s.clients[connID]
dataMsg := NewData(connID, client.nextSN, payload, payload)
////fmt.Println("Server: 197 write ", dataMsg)
go s.sendData(dataMsg)
client.nextSN += 1
return nil
}
func (s *server) sendData(dataMsg *Message) {
s.writeToClientChan <- dataMsg
}
func (s *server) addEpochNum() {
for connId, c := range s.clients {
c.epochNum += 1
if c.epochNum >= s.epochLimit {
c.lostConn = true
s.sendDeadMsg(connId)
}
}
}
func (s *server) sendDeadMsg(connId int) {
dataMsg := NewData(connId, s.clients[connId].nextSN, nil, nil)
if s.flag {
s.readRequest.response <- dataMsg
s.flag = false
} else {
s.readList.PushBack(dataMsg)
}
}
func (s *server) readData(dataMsg *msgPackage) {
s.readFromClient(dataMsg)
}
func (s *server) handleEpochEvent() {
for connID, c := range s.clients {
/*
* if no data received from the client, then resend ack for connection requestRead
*/
if c.nextSmallestAck == 1 {
ack := NewAck(connID, 0)
go s.sendAck(ack)
}
/*
* sent but not acked, resend the data
*/
for key, received := range c.mapAck {
if !received {
msg := c.mapNeedAck[key]
fmt.Println("Server epoch: ", msg)
s.writeToClient(msg)
}
}
}
}
func (s *server) handleReadRequest() {
msg := s.getMessageFromReadList()
if msg != nil {
//////fmt.Println("101 no message")
/*elm := s.readList.Front()
msg := elm.Value.(*Message)*/
s.readRequest.response <- msg
} else {
s.flag = true
}
return
}
// find next msg that can be send
// return nil if no msg can be send
func (s *server) getMessageFromReadList() *Message {
for e := s.readList.Front(); e != nil; e = e.Next() {
msg := e.Value.(*Message)
if msg.SeqNum == s.clients[msg.ConnID].nextDataRead {
s.clients[msg.ConnID].nextDataRead += 1
s.readList.Remove(e)
return msg
}
}
return nil
}
func (s *server) writeToClient(msg *Message) error {
connId := msg.ConnID
addr := s.clients[connId].addr
mmsg, _ := json.Marshal(msg)
fmt.Println("364 Server write to client: ", msg)
_, err := s.conn.WriteToUDP(mmsg, addr)
if err != nil {
fmt.Println("364: ", err)
}
return err
}
func (s *server) readFromClient(msgPack *msgPackage) {
//////fmt.Println("129 read")
msg := msgPack.msg
addr := msgPack.addr
////fmt.Println("server read: ", msg)
if msg.Type == MsgConnect {
s.connectClient(msg, addr)
} else {
// set epoch number to 0
if s.clients[msg.ConnID] != nil {
s.clients[msg.ConnID].epochNum = 0
}
if msg.Type == MsgData {
if !s.waitToAckFinish {
//fmt.Println("Server read from client: ", msg)
s.receiveData(msg)
}
} else if msg.Type == MsgAck {
fmt.Println("389 Server: receive ack: ", msg)
s.receiveAck(msg)
}
if s.waitToAckFinish {
s.checkAckFinished()
}
}
return
}
func (s *server) alreadyHasClient(addr *lspnet.UDPAddr) bool {
////fmt.Println("server: in already has client")
for _, client := range s.clients {
if client.addr.String() == addr.String() {
return true
}
}
return false
}
func (s *server) receiveData(msg *Message) {
//fmt.Println("Server read: ", msg)
c := s.clients[msg.ConnID]
// check duplicated forst and within window size
/*if c.nextSmallestData+s.windowSize <= msg.SeqNum {
// check within window size, drop it if not
return
} else if c.mapReceived[msg.SeqNum] != nil {
// check duplicated outside of window size
return
}*/
//
|
// ack/conn type: don't need to consider order
s.writeToClient(msg)
}
|
random_line_split
|
simulation.py
|
ride data
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
#Function constants for reporting beggining and ending of rides
RIDE_BEGUN = True
RIDE_FINISHED = False
# Stock issues report
SUCCESSFUL_REPORT = 0
EMPTY_STATION_ISSUE = 1
FULL_STATION_ISSUE = 2
class Simulation:
"""Runs the core of the simulation through time.
=== Attributes ===
all_rides:
A list of all the rides in this simulation.
Note that not all rides might be used, depending on the timeframe
when the simulation is run.
all_stations:
A dictionary containing all the stations in this simulation.
visualizer:
A helper class for visualizing the simulation.
active_rides:
All rides that are active during simulation
"""
all_stations: Dict[str, Station]
all_rides: List[Ride]
visualizer: Visualizer
active_rides: List[Ride]
def __init__(self, station_file: str, ride_file: str) -> None:
"""Initialize this simulation with the given configuration settings.
"""
self.all_stations = create_stations(station_file)
self.all_rides = create_rides(ride_file, self.all_stations)
self.visualizer = Visualizer()
self.active_rides = []
def run(self, start: datetime, end: datetime) -> None:
"""Run the simulation from <start> to <end>.
"""
step = timedelta(minutes=1) # Each iteration spans one minute of time
current_time = start
while current_time <= end:
self._update_active_rides(current_time)
# Method that draws all stations and bikes on the map
self.visualizer.render_drawables(list(self.all_stations.values())\
+ self.active_rides, current_time)
current_time += step
# Leave this code at the very bottom of this method.
# It will keep the visualization window open until you close
# it by pressing the 'X'.
while True:
if self.visualizer.handle_window_events():
return # Stop the simulation
def _update_active_rides(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- Loop through `self.all_rides` and compare each Ride's start and
end times with <time>.
If <time> is between the ride's start and end times (inclusive),
then add the ride to self.active_rides if it isn't already in
that list.
Otherwise, remove the ride from self.active_rides if it is in
that list.
- This means that if a ride started before the simulation's time
period but ends during or after the simulation's time period,
it should still be added to self.active_rides.
"""
for ride in self.all_rides:
# Check whether the current time is between start and end
if time > ride.start_time and time < ride.end_time:
# Check whether it is already in the active rides list
if not ride in self.active_rides:
# Reports that this ride has just begun
# this method will also lead to an update on station's bike
# number.
if ride.report_status(RIDE_BEGUN, time) \
== SUCCESSFUL_REPORT:
# Adds to the active rides list if no anomaly was
# detected
self.active_rides.append(ride)
else:
if ride in self.active_rides:
# Reports that this ride has finished
# this method will also lead to an update on station's bike
# number
ride.report_status(RIDE_FINISHED, time)
# If a finished or not yet beginned ride is in this
# list, removes it.
self.active_rides.remove(ride)
def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:
|
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
return {
'max_start': ('', -1),
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
def _update_active_rides_fast(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- see Task 5 of the assignment handout
"""
pass
def create_stations(stations_file: str) -> Dict[str, 'Station']:
"""Return the stations described in the given JSON data file.
Each key in the returned dictionary is a station id,
and each value is the corresponding Station object.
Note that you need to call Station(...) to create these objects!
Precondition: stations_file matches the format specified in the
assignment handout.
This function should be called *before* _read_rides because the
rides CSV file refers to station ids.
"""
# Read in raw data using the json library.
with open(stations_file) as file:
raw_stations = json.load(file)
stations = {}
for s in raw_stations['stations']:
# Extract the relevant fields from the raw station JSON.
# s is a dictionary with the keys 'n', 's', 'la', 'lo', 'da', and 'ba'
# as described in the assignment handout.
# NOTE: all of the corresponding values are strings, and so you need
# to convert some of them to numbers explicitly using int() or float().
# Inputs for Station Class (with keys from dictionary) :
# -Lon/Lat (lo/la)
# -Capacity (da + ba)
# -Number of bikes (da)
# -Name (s)
station = Station((s['lo'], s['la']), s['da'] + s['ba'],\
s['da'], s['s'])
stations[s['n']] = station
return stations
def create_rides(rides_file: str,
stations: Dict[str, 'Station']) -> List['Ride']:
"""Return the rides described in the given CSV file.
Lookup the station ids contained in the rides file in <stations>
to access the corresponding Station objects.
Ignore any ride whose start or end station is not present in <stations>.
Precondition: rides_file matches the format specified in the
assignment handout.
"""
rides = []
with open(rides_file) as file:
for line in csv.reader(file):
# line is a list of strings, following the format described
# in the assignment handout.
#
# Convert between a string and a datetime object
# using the function datetime.strptime and the DATETIME_FORMAT
# constant we defined above. Example:
# >>> datetime.strptime('2017-06-01 8:00', DATETIME_FORMAT)
# datetime.datetime(2017, 6, 1, 8, 0)
# CSV Columns:
# -Start time [0]
# -Start station code [1]
# -End time [2]
# -End station code [3]
# -ignore the rest [4,5]
# Ride class inputs:
# -Start station (Station())
# -End station (Station())
# -Times (Tuple(start,end))
start_station_code = line[1]
end_station_code = line[3]
# First check whether the stations from the ride are present in the
# stations registry
if start_station_code and end_station_code in stations:
start_time = datetime.strptime(line[0], DATETIME_FORMAT)
end_time = datetime.strptime(line[2], DATETIME_FORMAT)
start_station = stations[start_station_code]
end_station = stations[end_station_code]
ride = Ride(start_station, end_station, (start_time, end_time))
rides.append(ride)
return rides
class Event:
"""An event in the bike share simulation.
Events are ordered by their timestamp.
"""
simulation: 'Simulation'
time: datetime
def __init__(self, simulation: 'Simulation', time: datetime) -> None:
"""Initialize a new event."""
self.simulation = simulation
self.time = time
def __lt__(self, other: 'Event') -> bool:
"""
|
"""Return a dictionary containing statistics for this simulation.
The returned dictionary has exactly four keys, corresponding
to the four statistics tracked for each station:
- 'max_start'
- 'max_end'
- 'max_time_low_availability'
- 'max_time_low_unoccupied'
The corresponding value of each key is a tuple of two elements,
where the first element is the name (NOT id) of the station that has
the maximum value of the quantity specified by that key,
and the second element is the value of that quantity.
For example, the value corresponding to key 'max_start' should be the
name of the station with the most number of rides started at that
station, and the number of rides that started at that station.
"""
tempDict = {
'max_start': ('', -1),
|
identifier_body
|
simulation.py
|
ride data
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
#Function constants for reporting beggining and ending of rides
RIDE_BEGUN = True
RIDE_FINISHED = False
# Stock issues report
SUCCESSFUL_REPORT = 0
EMPTY_STATION_ISSUE = 1
FULL_STATION_ISSUE = 2
class
|
:
"""Runs the core of the simulation through time.
=== Attributes ===
all_rides:
A list of all the rides in this simulation.
Note that not all rides might be used, depending on the timeframe
when the simulation is run.
all_stations:
A dictionary containing all the stations in this simulation.
visualizer:
A helper class for visualizing the simulation.
active_rides:
All rides that are active during simulation
"""
all_stations: Dict[str, Station]
all_rides: List[Ride]
visualizer: Visualizer
active_rides: List[Ride]
def __init__(self, station_file: str, ride_file: str) -> None:
"""Initialize this simulation with the given configuration settings.
"""
self.all_stations = create_stations(station_file)
self.all_rides = create_rides(ride_file, self.all_stations)
self.visualizer = Visualizer()
self.active_rides = []
def run(self, start: datetime, end: datetime) -> None:
"""Run the simulation from <start> to <end>.
"""
step = timedelta(minutes=1) # Each iteration spans one minute of time
current_time = start
while current_time <= end:
self._update_active_rides(current_time)
# Method that draws all stations and bikes on the map
self.visualizer.render_drawables(list(self.all_stations.values())\
+ self.active_rides, current_time)
current_time += step
# Leave this code at the very bottom of this method.
# It will keep the visualization window open until you close
# it by pressing the 'X'.
while True:
if self.visualizer.handle_window_events():
return # Stop the simulation
def _update_active_rides(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- Loop through `self.all_rides` and compare each Ride's start and
end times with <time>.
If <time> is between the ride's start and end times (inclusive),
then add the ride to self.active_rides if it isn't already in
that list.
Otherwise, remove the ride from self.active_rides if it is in
that list.
- This means that if a ride started before the simulation's time
period but ends during or after the simulation's time period,
it should still be added to self.active_rides.
"""
for ride in self.all_rides:
# Check whether the current time is between start and end
if time > ride.start_time and time < ride.end_time:
# Check whether it is already in the active rides list
if not ride in self.active_rides:
# Reports that this ride has just begun
# this method will also lead to an update on station's bike
# number.
if ride.report_status(RIDE_BEGUN, time) \
== SUCCESSFUL_REPORT:
# Adds to the active rides list if no anomaly was
# detected
self.active_rides.append(ride)
else:
if ride in self.active_rides:
# Reports that this ride has finished
# this method will also lead to an update on station's bike
# number
ride.report_status(RIDE_FINISHED, time)
# If a finished or not yet beginned ride is in this
# list, removes it.
self.active_rides.remove(ride)
def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:
"""Return a dictionary containing statistics for this simulation.
The returned dictionary has exactly four keys, corresponding
to the four statistics tracked for each station:
- 'max_start'
- 'max_end'
- 'max_time_low_availability'
- 'max_time_low_unoccupied'
The corresponding value of each key is a tuple of two elements,
where the first element is the name (NOT id) of the station that has
the maximum value of the quantity specified by that key,
and the second element is the value of that quantity.
For example, the value corresponding to key 'max_start' should be the
name of the station with the most number of rides started at that
station, and the number of rides that started at that station.
"""
tempDict = {
'max_start': ('', -1),
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
return {
'max_start': ('', -1),
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
def _update_active_rides_fast(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- see Task 5 of the assignment handout
"""
pass
def create_stations(stations_file: str) -> Dict[str, 'Station']:
"""Return the stations described in the given JSON data file.
Each key in the returned dictionary is a station id,
and each value is the corresponding Station object.
Note that you need to call Station(...) to create these objects!
Precondition: stations_file matches the format specified in the
assignment handout.
This function should be called *before* _read_rides because the
rides CSV file refers to station ids.
"""
# Read in raw data using the json library.
with open(stations_file) as file:
raw_stations = json.load(file)
stations = {}
for s in raw_stations['stations']:
# Extract the relevant fields from the raw station JSON.
# s is a dictionary with the keys 'n', 's', 'la', 'lo', 'da', and 'ba'
# as described in the assignment handout.
# NOTE: all of the corresponding values are strings, and so you need
# to convert some of them to numbers explicitly using int() or float().
# Inputs for Station Class (with keys from dictionary) :
# -Lon/Lat (lo/la)
# -Capacity (da + ba)
# -Number of bikes (da)
# -Name (s)
station = Station((s['lo'], s['la']), s['da'] + s['ba'],\
s['da'], s['s'])
stations[s['n']] = station
return stations
def create_rides(rides_file: str,
stations: Dict[str, 'Station']) -> List['Ride']:
"""Return the rides described in the given CSV file.
Lookup the station ids contained in the rides file in <stations>
to access the corresponding Station objects.
Ignore any ride whose start or end station is not present in <stations>.
Precondition: rides_file matches the format specified in the
assignment handout.
"""
rides = []
with open(rides_file) as file:
for line in csv.reader(file):
# line is a list of strings, following the format described
# in the assignment handout.
#
# Convert between a string and a datetime object
# using the function datetime.strptime and the DATETIME_FORMAT
# constant we defined above. Example:
# >>> datetime.strptime('2017-06-01 8:00', DATETIME_FORMAT)
# datetime.datetime(2017, 6, 1, 8, 0)
# CSV Columns:
# -Start time [0]
# -Start station code [1]
# -End time [2]
# -End station code [3]
# -ignore the rest [4,5]
# Ride class inputs:
# -Start station (Station())
# -End station (Station())
# -Times (Tuple(start,end))
start_station_code = line[1]
end_station_code = line[3]
# First check whether the stations from the ride are present in the
# stations registry
if start_station_code and end_station_code in stations:
start_time = datetime.strptime(line[0], DATETIME_FORMAT)
end_time = datetime.strptime(line[2], DATETIME_FORMAT)
start_station = stations[start_station_code]
end_station = stations[end_station_code]
ride = Ride(start_station, end_station, (start_time, end_time))
rides.append(ride)
return rides
class Event:
"""An event in the bike share simulation.
Events are ordered by their timestamp.
"""
simulation: 'Simulation'
time: datetime
def __init__(self, simulation: 'Simulation', time: datetime) -> None:
"""Initialize a new event."""
self.simulation = simulation
self.time = time
def __lt__(self, other: 'Event') -> bool:
|
Simulation
|
identifier_name
|
simulation.py
|
ride data
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
#Function constants for reporting beggining and ending of rides
RIDE_BEGUN = True
RIDE_FINISHED = False
# Stock issues report
SUCCESSFUL_REPORT = 0
EMPTY_STATION_ISSUE = 1
FULL_STATION_ISSUE = 2
class Simulation:
"""Runs the core of the simulation through time.
=== Attributes ===
all_rides:
A list of all the rides in this simulation.
Note that not all rides might be used, depending on the timeframe
when the simulation is run.
all_stations:
A dictionary containing all the stations in this simulation.
visualizer:
A helper class for visualizing the simulation.
active_rides:
All rides that are active during simulation
"""
all_stations: Dict[str, Station]
all_rides: List[Ride]
visualizer: Visualizer
active_rides: List[Ride]
def __init__(self, station_file: str, ride_file: str) -> None:
"""Initialize this simulation with the given configuration settings.
"""
self.all_stations = create_stations(station_file)
self.all_rides = create_rides(ride_file, self.all_stations)
self.visualizer = Visualizer()
self.active_rides = []
def run(self, start: datetime, end: datetime) -> None:
"""Run the simulation from <start> to <end>.
"""
step = timedelta(minutes=1) # Each iteration spans one minute of time
current_time = start
while current_time <= end:
self._update_active_rides(current_time)
# Method that draws all stations and bikes on the map
self.visualizer.render_drawables(list(self.all_stations.values())\
+ self.active_rides, current_time)
current_time += step
# Leave this code at the very bottom of this method.
# It will keep the visualization window open until you close
# it by pressing the 'X'.
while True:
if self.visualizer.handle_window_events():
return # Stop the simulation
def _update_active_rides(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- Loop through `self.all_rides` and compare each Ride's start and
end times with <time>.
If <time> is between the ride's start and end times (inclusive),
then add the ride to self.active_rides if it isn't already in
that list.
Otherwise, remove the ride from self.active_rides if it is in
that list.
- This means that if a ride started before the simulation's time
period but ends during or after the simulation's time period,
it should still be added to self.active_rides.
"""
for ride in self.all_rides:
# Check whether the current time is between start and end
if time > ride.start_time and time < ride.end_time:
# Check whether it is already in the active rides list
if not ride in self.active_rides:
# Reports that this ride has just begun
# this method will also lead to an update on station's bike
# number.
if ride.report_status(RIDE_BEGUN, time) \
== SUCCESSFUL_REPORT:
# Adds to the active rides list if no anomaly was
# detected
self.active_rides.append(ride)
else:
if ride in self.active_rides:
# Reports that this ride has finished
# this method will also lead to an update on station's bike
# number
ride.report_status(RIDE_FINISHED, time)
# If a finished or not yet beginned ride is in this
# list, removes it.
self.active_rides.remove(ride)
def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:
"""Return a dictionary containing statistics for this simulation.
The returned dictionary has exactly four keys, corresponding
to the four statistics tracked for each station:
- 'max_start'
- 'max_end'
- 'max_time_low_availability'
- 'max_time_low_unoccupied'
The corresponding value of each key is a tuple of two elements,
where the first element is the name (NOT id) of the station that has
the maximum value of the quantity specified by that key,
and the second element is the value of that quantity.
For example, the value corresponding to key 'max_start' should be the
name of the station with the most number of rides started at that
station, and the number of rides that started at that station.
"""
tempDict = {
'max_start': ('', -1),
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
return {
|
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
def _update_active_rides_fast(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- see Task 5 of the assignment handout
"""
pass
def create_stations(stations_file: str) -> Dict[str, 'Station']:
"""Return the stations described in the given JSON data file.
Each key in the returned dictionary is a station id,
and each value is the corresponding Station object.
Note that you need to call Station(...) to create these objects!
Precondition: stations_file matches the format specified in the
assignment handout.
This function should be called *before* _read_rides because the
rides CSV file refers to station ids.
"""
# Read in raw data using the json library.
with open(stations_file) as file:
raw_stations = json.load(file)
stations = {}
for s in raw_stations['stations']:
# Extract the relevant fields from the raw station JSON.
# s is a dictionary with the keys 'n', 's', 'la', 'lo', 'da', and 'ba'
# as described in the assignment handout.
# NOTE: all of the corresponding values are strings, and so you need
# to convert some of them to numbers explicitly using int() or float().
# Inputs for Station Class (with keys from dictionary) :
# -Lon/Lat (lo/la)
# -Capacity (da + ba)
# -Number of bikes (da)
# -Name (s)
station = Station((s['lo'], s['la']), s['da'] + s['ba'],\
s['da'], s['s'])
stations[s['n']] = station
return stations
def create_rides(rides_file: str,
stations: Dict[str, 'Station']) -> List['Ride']:
"""Return the rides described in the given CSV file.
Lookup the station ids contained in the rides file in <stations>
to access the corresponding Station objects.
Ignore any ride whose start or end station is not present in <stations>.
Precondition: rides_file matches the format specified in the
assignment handout.
"""
rides = []
with open(rides_file) as file:
for line in csv.reader(file):
# line is a list of strings, following the format described
# in the assignment handout.
#
# Convert between a string and a datetime object
# using the function datetime.strptime and the DATETIME_FORMAT
# constant we defined above. Example:
# >>> datetime.strptime('2017-06-01 8:00', DATETIME_FORMAT)
# datetime.datetime(2017, 6, 1, 8, 0)
# CSV Columns:
# -Start time [0]
# -Start station code [1]
# -End time [2]
# -End station code [3]
# -ignore the rest [4,5]
# Ride class inputs:
# -Start station (Station())
# -End station (Station())
# -Times (Tuple(start,end))
start_station_code = line[1]
end_station_code = line[3]
# First check whether the stations from the ride are present in the
# stations registry
if start_station_code and end_station_code in stations:
start_time = datetime.strptime(line[0], DATETIME_FORMAT)
end_time = datetime.strptime(line[2], DATETIME_FORMAT)
start_station = stations[start_station_code]
end_station = stations[end_station_code]
ride = Ride(start_station, end_station, (start_time, end_time))
rides.append(ride)
return rides
class Event:
"""An event in the bike share simulation.
Events are ordered by their timestamp.
"""
simulation: 'Simulation'
time: datetime
def __init__(self, simulation: 'Simulation', time: datetime) -> None:
"""Initialize a new event."""
self.simulation = simulation
self.time = time
def __lt__(self, other: 'Event') -> bool:
"""
|
'max_start': ('', -1),
|
random_line_split
|
simulation.py
|
ride data
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
#Function constants for reporting beggining and ending of rides
RIDE_BEGUN = True
RIDE_FINISHED = False
# Stock issues report
SUCCESSFUL_REPORT = 0
EMPTY_STATION_ISSUE = 1
FULL_STATION_ISSUE = 2
class Simulation:
"""Runs the core of the simulation through time.
=== Attributes ===
all_rides:
A list of all the rides in this simulation.
Note that not all rides might be used, depending on the timeframe
when the simulation is run.
all_stations:
A dictionary containing all the stations in this simulation.
visualizer:
A helper class for visualizing the simulation.
active_rides:
All rides that are active during simulation
"""
all_stations: Dict[str, Station]
all_rides: List[Ride]
visualizer: Visualizer
active_rides: List[Ride]
def __init__(self, station_file: str, ride_file: str) -> None:
"""Initialize this simulation with the given configuration settings.
"""
self.all_stations = create_stations(station_file)
self.all_rides = create_rides(ride_file, self.all_stations)
self.visualizer = Visualizer()
self.active_rides = []
def run(self, start: datetime, end: datetime) -> None:
"""Run the simulation from <start> to <end>.
"""
step = timedelta(minutes=1) # Each iteration spans one minute of time
current_time = start
while current_time <= end:
self._update_active_rides(current_time)
# Method that draws all stations and bikes on the map
self.visualizer.render_drawables(list(self.all_stations.values())\
+ self.active_rides, current_time)
current_time += step
# Leave this code at the very bottom of this method.
# It will keep the visualization window open until you close
# it by pressing the 'X'.
while True:
|
def _update_active_rides(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- Loop through `self.all_rides` and compare each Ride's start and
end times with <time>.
If <time> is between the ride's start and end times (inclusive),
then add the ride to self.active_rides if it isn't already in
that list.
Otherwise, remove the ride from self.active_rides if it is in
that list.
- This means that if a ride started before the simulation's time
period but ends during or after the simulation's time period,
it should still be added to self.active_rides.
"""
for ride in self.all_rides:
# Check whether the current time is between start and end
if time > ride.start_time and time < ride.end_time:
# Check whether it is already in the active rides list
if not ride in self.active_rides:
# Reports that this ride has just begun
# this method will also lead to an update on station's bike
# number.
if ride.report_status(RIDE_BEGUN, time) \
== SUCCESSFUL_REPORT:
# Adds to the active rides list if no anomaly was
# detected
self.active_rides.append(ride)
else:
if ride in self.active_rides:
# Reports that this ride has finished
# this method will also lead to an update on station's bike
# number
ride.report_status(RIDE_FINISHED, time)
# If a finished or not yet beginned ride is in this
# list, removes it.
self.active_rides.remove(ride)
def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:
"""Return a dictionary containing statistics for this simulation.
The returned dictionary has exactly four keys, corresponding
to the four statistics tracked for each station:
- 'max_start'
- 'max_end'
- 'max_time_low_availability'
- 'max_time_low_unoccupied'
The corresponding value of each key is a tuple of two elements,
where the first element is the name (NOT id) of the station that has
the maximum value of the quantity specified by that key,
and the second element is the value of that quantity.
For example, the value corresponding to key 'max_start' should be the
name of the station with the most number of rides started at that
station, and the number of rides that started at that station.
"""
tempDict = {
'max_start': ('', -1),
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
return {
'max_start': ('', -1),
'max_end': ('', -1),
'max_time_low_availability': ('', -1),
'max_time_low_unoccupied': ('', -1)
}
def _update_active_rides_fast(self, time: datetime) -> None:
"""Update this simulation's list of active rides for the given time.
REQUIRED IMPLEMENTATION NOTES:
- see Task 5 of the assignment handout
"""
pass
def create_stations(stations_file: str) -> Dict[str, 'Station']:
"""Return the stations described in the given JSON data file.
Each key in the returned dictionary is a station id,
and each value is the corresponding Station object.
Note that you need to call Station(...) to create these objects!
Precondition: stations_file matches the format specified in the
assignment handout.
This function should be called *before* _read_rides because the
rides CSV file refers to station ids.
"""
# Read in raw data using the json library.
with open(stations_file) as file:
raw_stations = json.load(file)
stations = {}
for s in raw_stations['stations']:
# Extract the relevant fields from the raw station JSON.
# s is a dictionary with the keys 'n', 's', 'la', 'lo', 'da', and 'ba'
# as described in the assignment handout.
# NOTE: all of the corresponding values are strings, and so you need
# to convert some of them to numbers explicitly using int() or float().
# Inputs for Station Class (with keys from dictionary) :
# -Lon/Lat (lo/la)
# -Capacity (da + ba)
# -Number of bikes (da)
# -Name (s)
station = Station((s['lo'], s['la']), s['da'] + s['ba'],\
s['da'], s['s'])
stations[s['n']] = station
return stations
def create_rides(rides_file: str,
stations: Dict[str, 'Station']) -> List['Ride']:
"""Return the rides described in the given CSV file.
Lookup the station ids contained in the rides file in <stations>
to access the corresponding Station objects.
Ignore any ride whose start or end station is not present in <stations>.
Precondition: rides_file matches the format specified in the
assignment handout.
"""
rides = []
with open(rides_file) as file:
for line in csv.reader(file):
# line is a list of strings, following the format described
# in the assignment handout.
#
# Convert between a string and a datetime object
# using the function datetime.strptime and the DATETIME_FORMAT
# constant we defined above. Example:
# >>> datetime.strptime('2017-06-01 8:00', DATETIME_FORMAT)
# datetime.datetime(2017, 6, 1, 8, 0)
# CSV Columns:
# -Start time [0]
# -Start station code [1]
# -End time [2]
# -End station code [3]
# -ignore the rest [4,5]
# Ride class inputs:
# -Start station (Station())
# -End station (Station())
# -Times (Tuple(start,end))
start_station_code = line[1]
end_station_code = line[3]
# First check whether the stations from the ride are present in the
# stations registry
if start_station_code and end_station_code in stations:
start_time = datetime.strptime(line[0], DATETIME_FORMAT)
end_time = datetime.strptime(line[2], DATETIME_FORMAT)
start_station = stations[start_station_code]
end_station = stations[end_station_code]
ride = Ride(start_station, end_station, (start_time, end_time))
rides.append(ride)
return rides
class Event:
"""An event in the bike share simulation.
Events are ordered by their timestamp.
"""
simulation: 'Simulation'
time: datetime
def __init__(self, simulation: 'Simulation', time: datetime) -> None:
"""Initialize a new event."""
self.simulation = simulation
self.time = time
def __lt__(self, other: 'Event') -> bool:
"""
|
if self.visualizer.handle_window_events():
return # Stop the simulation
|
conditional_block
|
conn.rs
|
Sender, UnboundedReceiver, UnboundedSender};
use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{read_exact, write_all};
use tokio_timer::{Timer, TimerError};
use core::core::hash::Hash;
use core::ser;
use msg::*;
use types::Error;
use rate_limit::*;
use util::LOGGER;
/// Handler to provide to the connection, will be called back anytime a message
/// is received. The provided sender can be use to immediately send back
/// another message.
pub trait Handler: Sync + Send {
/// Handle function to implement to process incoming messages. A sender to
/// reply immediately as well as the message header and its unparsed body
/// are provided.
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error>;
}
impl<F> Handler for F
where
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
-> Result<Option<Hash>, ser::Error>,
F: Sync + Send,
{
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error> {
self(sender, header, body)
}
}
/// A higher level connection wrapping the TcpStream. Maintains the amount of
/// data transmitted and deals with the low-level task of sending and
/// receiving data, parsing message headers and timeouts.
#[allow(dead_code)]
pub struct Connection {
// Channel to push bytes to the remote peer
outbound_chan: UnboundedSender<Vec<u8>>,
// Close the connection with the remote peer
close_chan: Sender<()>,
// Bytes we've sent.
sent_bytes: Arc<Mutex<u64>>,
// Bytes we've received.
received_bytes: Arc<Mutex<u64>>,
// Counter for read errors.
error_count: Mutex<u64>,
}
impl Connection {
/// Start listening on the provided connection and wraps it. Does not hang
/// the current thread, instead just returns a future and the Connection
/// itself.
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (Connection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let (reader, writer) = conn.split();
// Set Max Read to 12 Mb/s
let reader = ThrottledReader::new(reader, 12_000_000);
// Set Max Write to 12 Mb/s
let writer = ThrottledWriter::new(writer, 12_000_000);
// prepare the channel that will transmit data to the connection writer
let (tx, rx) = futures::sync::mpsc::unbounded();
// same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx
.for_each(|_| Ok(()))
.map_err(|_| Error::ConnectionClose);
let me = Connection {
outbound_chan: tx.clone(),
close_chan: close_tx,
sent_bytes: Arc::new(Mutex::new(0)),
received_bytes: Arc::new(Mutex::new(0)),
error_count: Mutex::new(0),
};
// setup the reading future, getting messages from the peer and processing them
let read_msg = me.read_msg(tx, reader, handler).map(|_| ());
// setting the writing future, getting messages from our system and sending
// them out
let write_msg = me.write_msg(rx, writer).map(|_| ());
// select between our different futures and return them
let fut = Box::new(
close_conn
.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
.map(|_| ())
.map_err(|(e, _)| e),
);
(me, fut)
}
/// Prepares the future that gets message data produced by our system and
/// sends it to the peer connection
fn write_msg<W>(
&self,
rx: UnboundedReceiver<Vec<u8>>,
writer: W,
) -> Box<Future<Item = W, Error = Error>>
where
W: AsyncWrite + 'static,
{
let sent_bytes = self.sent_bytes.clone();
let send_data = rx
.map_err(|_| Error::ConnectionClose)
.map(move |data| {
// add the count of bytes sent
let mut sent_bytes = sent_bytes.lock().unwrap();
*sent_bytes += data.len() as u64;
data
})
// write the data and make sure the future returns the right types
.fold(writer, |writer, data| {
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer)
});
Box::new(send_data)
}
/// Prepares the future reading from the peer connection, parsing each
/// message and forwarding them appropriately based on their type
fn read_msg<F, R>(
&self,
sender: UnboundedSender<Vec<u8>>,
reader: R,
handler: F,
) -> Box<Future<Item = R, Error = Error>>
where
F: Handler + 'static,
R: AsyncRead + 'static,
{
// infinite iterator stream so we repeat the message reading logic until the
// peer is stopped
let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>));
// setup the reading future, getting messages from the peer and processing them
let recv_bytes = self.received_bytes.clone();
let handler = Arc::new(handler);
let read_msg = iter.fold(reader, move |reader, _| {
let recv_bytes = recv_bytes.clone();
let handler = handler.clone();
let sender_inner = sender.clone();
// first read the message header
read_exact(reader, vec![0u8; HEADER_LEN as usize])
.from_err()
.and_then(move |(reader, buf)| {
let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..]));
Ok((reader, header))
})
.and_then(move |(reader, header)| {
// now that we have a size, proceed with the body
read_exact(reader, vec![0u8; header.msg_len as usize])
.map(|(reader, buf)| (reader, header, buf))
.from_err()
})
.and_then(move |(reader, header, buf)| {
// add the count of bytes received
let mut recv_bytes = recv_bytes.lock().unwrap();
*recv_bytes += header.serialized_len() + header.msg_len;
// and handle the different message types
let msg_type = header.msg_type;
if let Err(e) = handler.handle(sender_inner.clone(), header, buf) {
debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e);
return Err(Error::Serialization(e));
}
Ok(reader)
})
});
Box::new(read_msg)
}
/// Utility function to send any Writeable. Handles adding the header and
/// serialization.
pub fn
|
<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> {
let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body));
let mut data = vec![];
try!(ser::serialize(
&mut data,
&MsgHeader::new(t, body_data.len() as u64),
));
data.append(&mut body_data);
self.outbound_chan
.unbounded_send(data)
.map_err(|_| Error::ConnectionClose)
}
/// Bytes sent and received by this peer to the remote peer.
pub fn transmitted_bytes(&self) -> (u64, u64) {
let sent = *self.sent_bytes.lock().unwrap();
let recv = *self.received_bytes.lock().unwrap();
(sent, recv)
}
}
/// Connection wrapper that handles a request/response oriented interaction with
/// a timeout.
pub struct TimeoutConnection {
underlying: Connection,
expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>,
}
impl TimeoutConnection {
/// Same as Connection
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let expects = Arc::new(Mutex::new(vec![]));
// Decorates the handler to remove the "subscription" from the expected
// responses. We got our replies, so no timeout should occur.
let exp = expects.clone();
let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| {
let msg_type = header.msg_type;
let recv_h = try!(handler.handle(sender
|
send_msg
|
identifier_name
|
conn.rs
|
Sender, UnboundedReceiver, UnboundedSender};
use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{read_exact, write_all};
use tokio_timer::{Timer, TimerError};
use core::core::hash::Hash;
use core::ser;
use msg::*;
use types::Error;
use rate_limit::*;
use util::LOGGER;
/// Handler to provide to the connection, will be called back anytime a message
/// is received. The provided sender can be use to immediately send back
/// another message.
pub trait Handler: Sync + Send {
/// Handle function to implement to process incoming messages. A sender to
/// reply immediately as well as the message header and its unparsed body
/// are provided.
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error>;
}
impl<F> Handler for F
where
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
-> Result<Option<Hash>, ser::Error>,
F: Sync + Send,
{
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error> {
self(sender, header, body)
}
}
/// A higher level connection wrapping the TcpStream. Maintains the amount of
/// data transmitted and deals with the low-level task of sending and
/// receiving data, parsing message headers and timeouts.
#[allow(dead_code)]
pub struct Connection {
// Channel to push bytes to the remote peer
outbound_chan: UnboundedSender<Vec<u8>>,
// Close the connection with the remote peer
close_chan: Sender<()>,
// Bytes we've sent.
sent_bytes: Arc<Mutex<u64>>,
// Bytes we've received.
received_bytes: Arc<Mutex<u64>>,
// Counter for read errors.
error_count: Mutex<u64>,
}
impl Connection {
/// Start listening on the provided connection and wraps it. Does not hang
/// the current thread, instead just returns a future and the Connection
/// itself.
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (Connection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let (reader, writer) = conn.split();
// Set Max Read to 12 Mb/s
let reader = ThrottledReader::new(reader, 12_000_000);
// Set Max Write to 12 Mb/s
let writer = ThrottledWriter::new(writer, 12_000_000);
// prepare the channel that will transmit data to the connection writer
let (tx, rx) = futures::sync::mpsc::unbounded();
// same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx
.for_each(|_| Ok(()))
.map_err(|_| Error::ConnectionClose);
let me = Connection {
outbound_chan: tx.clone(),
close_chan: close_tx,
sent_bytes: Arc::new(Mutex::new(0)),
received_bytes: Arc::new(Mutex::new(0)),
error_count: Mutex::new(0),
};
// setup the reading future, getting messages from the peer and processing them
let read_msg = me.read_msg(tx, reader, handler).map(|_| ());
// setting the writing future, getting messages from our system and sending
// them out
let write_msg = me.write_msg(rx, writer).map(|_| ());
// select between our different futures and return them
let fut = Box::new(
close_conn
.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
.map(|_| ())
.map_err(|(e, _)| e),
);
(me, fut)
}
/// Prepares the future that gets message data produced by our system and
/// sends it to the peer connection
fn write_msg<W>(
&self,
rx: UnboundedReceiver<Vec<u8>>,
writer: W,
) -> Box<Future<Item = W, Error = Error>>
where
W: AsyncWrite + 'static,
{
let sent_bytes = self.sent_bytes.clone();
let send_data = rx
.map_err(|_| Error::ConnectionClose)
.map(move |data| {
// add the count of bytes sent
let mut sent_bytes = sent_bytes.lock().unwrap();
*sent_bytes += data.len() as u64;
data
})
// write the data and make sure the future returns the right types
.fold(writer, |writer, data| {
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer)
});
Box::new(send_data)
}
/// Prepares the future reading from the peer connection, parsing each
/// message and forwarding them appropriately based on their type
fn read_msg<F, R>(
&self,
sender: UnboundedSender<Vec<u8>>,
reader: R,
handler: F,
) -> Box<Future<Item = R, Error = Error>>
where
F: Handler + 'static,
R: AsyncRead + 'static,
{
// infinite iterator stream so we repeat the message reading logic until the
// peer is stopped
let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>));
// setup the reading future, getting messages from the peer and processing them
let recv_bytes = self.received_bytes.clone();
let handler = Arc::new(handler);
let read_msg = iter.fold(reader, move |reader, _| {
let recv_bytes = recv_bytes.clone();
let handler = handler.clone();
let sender_inner = sender.clone();
// first read the message header
read_exact(reader, vec![0u8; HEADER_LEN as usize])
.from_err()
.and_then(move |(reader, buf)| {
let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..]));
Ok((reader, header))
})
.and_then(move |(reader, header)| {
// now that we have a size, proceed with the body
read_exact(reader, vec![0u8; header.msg_len as usize])
.map(|(reader, buf)| (reader, header, buf))
.from_err()
})
.and_then(move |(reader, header, buf)| {
// add the count of bytes received
let mut recv_bytes = recv_bytes.lock().unwrap();
*recv_bytes += header.serialized_len() + header.msg_len;
// and handle the different message types
let msg_type = header.msg_type;
if let Err(e) = handler.handle(sender_inner.clone(), header, buf) {
debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e);
return Err(Error::Serialization(e));
}
Ok(reader)
})
});
Box::new(read_msg)
}
/// Utility function to send any Writeable. Handles adding the header and
/// serialization.
pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> {
let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body));
let mut data = vec![];
try!(ser::serialize(
&mut data,
&MsgHeader::new(t, body_data.len() as u64),
));
data.append(&mut body_data);
self.outbound_chan
.unbounded_send(data)
.map_err(|_| Error::ConnectionClose)
}
/// Bytes sent and received by this peer to the remote peer.
pub fn transmitted_bytes(&self) -> (u64, u64) {
let sent = *self.sent_bytes.lock().unwrap();
let recv = *self.received_bytes.lock().unwrap();
(sent, recv)
}
}
/// Connection wrapper that handles a request/response oriented interaction with
/// a timeout.
pub struct TimeoutConnection {
underlying: Connection,
expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>,
}
impl TimeoutConnection {
/// Same as Connection
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let expects = Arc::new(Mutex::new(vec![]));
|
let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| {
let msg_type = header.msg_type;
let recv_h = try!(handler.handle(sender,
|
// Decorates the handler to remove the "subscription" from the expected
// responses. We got our replies, so no timeout should occur.
let exp = expects.clone();
|
random_line_split
|
conn.rs
|
, UnboundedReceiver, UnboundedSender};
use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{read_exact, write_all};
use tokio_timer::{Timer, TimerError};
use core::core::hash::Hash;
use core::ser;
use msg::*;
use types::Error;
use rate_limit::*;
use util::LOGGER;
/// Handler to provide to the connection, will be called back anytime a message
/// is received. The provided sender can be use to immediately send back
/// another message.
pub trait Handler: Sync + Send {
/// Handle function to implement to process incoming messages. A sender to
/// reply immediately as well as the message header and its unparsed body
/// are provided.
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error>;
}
impl<F> Handler for F
where
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
-> Result<Option<Hash>, ser::Error>,
F: Sync + Send,
{
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error>
|
}
/// A higher level connection wrapping the TcpStream. Maintains the amount of
/// data transmitted and deals with the low-level task of sending and
/// receiving data, parsing message headers and timeouts.
#[allow(dead_code)]
pub struct Connection {
// Channel to push bytes to the remote peer
outbound_chan: UnboundedSender<Vec<u8>>,
// Close the connection with the remote peer
close_chan: Sender<()>,
// Bytes we've sent.
sent_bytes: Arc<Mutex<u64>>,
// Bytes we've received.
received_bytes: Arc<Mutex<u64>>,
// Counter for read errors.
error_count: Mutex<u64>,
}
impl Connection {
/// Start listening on the provided connection and wraps it. Does not hang
/// the current thread, instead just returns a future and the Connection
/// itself.
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (Connection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let (reader, writer) = conn.split();
// Set Max Read to 12 Mb/s
let reader = ThrottledReader::new(reader, 12_000_000);
// Set Max Write to 12 Mb/s
let writer = ThrottledWriter::new(writer, 12_000_000);
// prepare the channel that will transmit data to the connection writer
let (tx, rx) = futures::sync::mpsc::unbounded();
// same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx
.for_each(|_| Ok(()))
.map_err(|_| Error::ConnectionClose);
let me = Connection {
outbound_chan: tx.clone(),
close_chan: close_tx,
sent_bytes: Arc::new(Mutex::new(0)),
received_bytes: Arc::new(Mutex::new(0)),
error_count: Mutex::new(0),
};
// setup the reading future, getting messages from the peer and processing them
let read_msg = me.read_msg(tx, reader, handler).map(|_| ());
// setting the writing future, getting messages from our system and sending
// them out
let write_msg = me.write_msg(rx, writer).map(|_| ());
// select between our different futures and return them
let fut = Box::new(
close_conn
.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
.map(|_| ())
.map_err(|(e, _)| e),
);
(me, fut)
}
/// Prepares the future that gets message data produced by our system and
/// sends it to the peer connection
fn write_msg<W>(
&self,
rx: UnboundedReceiver<Vec<u8>>,
writer: W,
) -> Box<Future<Item = W, Error = Error>>
where
W: AsyncWrite + 'static,
{
let sent_bytes = self.sent_bytes.clone();
let send_data = rx
.map_err(|_| Error::ConnectionClose)
.map(move |data| {
// add the count of bytes sent
let mut sent_bytes = sent_bytes.lock().unwrap();
*sent_bytes += data.len() as u64;
data
})
// write the data and make sure the future returns the right types
.fold(writer, |writer, data| {
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer)
});
Box::new(send_data)
}
/// Prepares the future reading from the peer connection, parsing each
/// message and forwarding them appropriately based on their type
fn read_msg<F, R>(
&self,
sender: UnboundedSender<Vec<u8>>,
reader: R,
handler: F,
) -> Box<Future<Item = R, Error = Error>>
where
F: Handler + 'static,
R: AsyncRead + 'static,
{
// infinite iterator stream so we repeat the message reading logic until the
// peer is stopped
let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>));
// setup the reading future, getting messages from the peer and processing them
let recv_bytes = self.received_bytes.clone();
let handler = Arc::new(handler);
let read_msg = iter.fold(reader, move |reader, _| {
let recv_bytes = recv_bytes.clone();
let handler = handler.clone();
let sender_inner = sender.clone();
// first read the message header
read_exact(reader, vec![0u8; HEADER_LEN as usize])
.from_err()
.and_then(move |(reader, buf)| {
let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..]));
Ok((reader, header))
})
.and_then(move |(reader, header)| {
// now that we have a size, proceed with the body
read_exact(reader, vec![0u8; header.msg_len as usize])
.map(|(reader, buf)| (reader, header, buf))
.from_err()
})
.and_then(move |(reader, header, buf)| {
// add the count of bytes received
let mut recv_bytes = recv_bytes.lock().unwrap();
*recv_bytes += header.serialized_len() + header.msg_len;
// and handle the different message types
let msg_type = header.msg_type;
if let Err(e) = handler.handle(sender_inner.clone(), header, buf) {
debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e);
return Err(Error::Serialization(e));
}
Ok(reader)
})
});
Box::new(read_msg)
}
/// Utility function to send any Writeable. Handles adding the header and
/// serialization.
pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> {
let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body));
let mut data = vec![];
try!(ser::serialize(
&mut data,
&MsgHeader::new(t, body_data.len() as u64),
));
data.append(&mut body_data);
self.outbound_chan
.unbounded_send(data)
.map_err(|_| Error::ConnectionClose)
}
/// Bytes sent and received by this peer to the remote peer.
pub fn transmitted_bytes(&self) -> (u64, u64) {
let sent = *self.sent_bytes.lock().unwrap();
let recv = *self.received_bytes.lock().unwrap();
(sent, recv)
}
}
/// Connection wrapper that handles a request/response oriented interaction with
/// a timeout.
pub struct TimeoutConnection {
underlying: Connection,
expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>,
}
impl TimeoutConnection {
/// Same as Connection
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let expects = Arc::new(Mutex::new(vec![]));
// Decorates the handler to remove the "subscription" from the expected
// responses. We got our replies, so no timeout should occur.
let exp = expects.clone();
let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| {
let msg_type = header.msg_type;
let recv_h = try!(handler.handle(sender
|
{
self(sender, header, body)
}
|
identifier_body
|
conn.rs
|
, UnboundedReceiver, UnboundedSender};
use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{read_exact, write_all};
use tokio_timer::{Timer, TimerError};
use core::core::hash::Hash;
use core::ser;
use msg::*;
use types::Error;
use rate_limit::*;
use util::LOGGER;
/// Handler to provide to the connection, will be called back anytime a message
/// is received. The provided sender can be use to immediately send back
/// another message.
pub trait Handler: Sync + Send {
/// Handle function to implement to process incoming messages. A sender to
/// reply immediately as well as the message header and its unparsed body
/// are provided.
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error>;
}
impl<F> Handler for F
where
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
-> Result<Option<Hash>, ser::Error>,
F: Sync + Send,
{
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error> {
self(sender, header, body)
}
}
/// A higher level connection wrapping the TcpStream. Maintains the amount of
/// data transmitted and deals with the low-level task of sending and
/// receiving data, parsing message headers and timeouts.
#[allow(dead_code)]
pub struct Connection {
// Channel to push bytes to the remote peer
outbound_chan: UnboundedSender<Vec<u8>>,
// Close the connection with the remote peer
close_chan: Sender<()>,
// Bytes we've sent.
sent_bytes: Arc<Mutex<u64>>,
// Bytes we've received.
received_bytes: Arc<Mutex<u64>>,
// Counter for read errors.
error_count: Mutex<u64>,
}
impl Connection {
/// Start listening on the provided connection and wraps it. Does not hang
/// the current thread, instead just returns a future and the Connection
/// itself.
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (Connection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let (reader, writer) = conn.split();
// Set Max Read to 12 Mb/s
let reader = ThrottledReader::new(reader, 12_000_000);
// Set Max Write to 12 Mb/s
let writer = ThrottledWriter::new(writer, 12_000_000);
// prepare the channel that will transmit data to the connection writer
let (tx, rx) = futures::sync::mpsc::unbounded();
// same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx
.for_each(|_| Ok(()))
.map_err(|_| Error::ConnectionClose);
let me = Connection {
outbound_chan: tx.clone(),
close_chan: close_tx,
sent_bytes: Arc::new(Mutex::new(0)),
received_bytes: Arc::new(Mutex::new(0)),
error_count: Mutex::new(0),
};
// setup the reading future, getting messages from the peer and processing them
let read_msg = me.read_msg(tx, reader, handler).map(|_| ());
// setting the writing future, getting messages from our system and sending
// them out
let write_msg = me.write_msg(rx, writer).map(|_| ());
// select between our different futures and return them
let fut = Box::new(
close_conn
.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
.map(|_| ())
.map_err(|(e, _)| e),
);
(me, fut)
}
/// Prepares the future that gets message data produced by our system and
/// sends it to the peer connection
fn write_msg<W>(
&self,
rx: UnboundedReceiver<Vec<u8>>,
writer: W,
) -> Box<Future<Item = W, Error = Error>>
where
W: AsyncWrite + 'static,
{
let sent_bytes = self.sent_bytes.clone();
let send_data = rx
.map_err(|_| Error::ConnectionClose)
.map(move |data| {
// add the count of bytes sent
let mut sent_bytes = sent_bytes.lock().unwrap();
*sent_bytes += data.len() as u64;
data
})
// write the data and make sure the future returns the right types
.fold(writer, |writer, data| {
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer)
});
Box::new(send_data)
}
/// Prepares the future reading from the peer connection, parsing each
/// message and forwarding them appropriately based on their type
fn read_msg<F, R>(
&self,
sender: UnboundedSender<Vec<u8>>,
reader: R,
handler: F,
) -> Box<Future<Item = R, Error = Error>>
where
F: Handler + 'static,
R: AsyncRead + 'static,
{
// infinite iterator stream so we repeat the message reading logic until the
// peer is stopped
let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>));
// setup the reading future, getting messages from the peer and processing them
let recv_bytes = self.received_bytes.clone();
let handler = Arc::new(handler);
let read_msg = iter.fold(reader, move |reader, _| {
let recv_bytes = recv_bytes.clone();
let handler = handler.clone();
let sender_inner = sender.clone();
// first read the message header
read_exact(reader, vec![0u8; HEADER_LEN as usize])
.from_err()
.and_then(move |(reader, buf)| {
let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..]));
Ok((reader, header))
})
.and_then(move |(reader, header)| {
// now that we have a size, proceed with the body
read_exact(reader, vec![0u8; header.msg_len as usize])
.map(|(reader, buf)| (reader, header, buf))
.from_err()
})
.and_then(move |(reader, header, buf)| {
// add the count of bytes received
let mut recv_bytes = recv_bytes.lock().unwrap();
*recv_bytes += header.serialized_len() + header.msg_len;
// and handle the different message types
let msg_type = header.msg_type;
if let Err(e) = handler.handle(sender_inner.clone(), header, buf)
|
Ok(reader)
})
});
Box::new(read_msg)
}
/// Utility function to send any Writeable. Handles adding the header and
/// serialization.
pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> {
let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body));
let mut data = vec![];
try!(ser::serialize(
&mut data,
&MsgHeader::new(t, body_data.len() as u64),
));
data.append(&mut body_data);
self.outbound_chan
.unbounded_send(data)
.map_err(|_| Error::ConnectionClose)
}
/// Bytes sent and received by this peer to the remote peer.
pub fn transmitted_bytes(&self) -> (u64, u64) {
let sent = *self.sent_bytes.lock().unwrap();
let recv = *self.received_bytes.lock().unwrap();
(sent, recv)
}
}
/// Connection wrapper that handles a request/response oriented interaction with
/// a timeout.
pub struct TimeoutConnection {
underlying: Connection,
expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>,
}
impl TimeoutConnection {
/// Same as Connection
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{
let expects = Arc::new(Mutex::new(vec![]));
// Decorates the handler to remove the "subscription" from the expected
// responses. We got our replies, so no timeout should occur.
let exp = expects.clone();
let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| {
let msg_type = header.msg_type;
let recv_h = try!(handler.handle(sender
|
{
debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e);
return Err(Error::Serialization(e));
}
|
conditional_block
|
dpkg_install.go
|
for sync
}
func (p *PackageInfo) Name() string {
// Extract the package name from its section in /var/lib/dpkg/status
return p.Paragraph.Value("Package")
}
func (p *PackageInfo) Version() string {
// Extract the package version from its section in /var/lib/dpkg/status
return p.Paragraph.Value("Version")
}
// isConffile determines if a file must be processed as a conffile.
func (p *PackageInfo) isConffile(path string) bool {
for _, conffile := range p.Conffiles {
if path == conffile {
return true
}
}
return false
}
// InfoPath returns the path of a file under /var/lib/dpkg/info/.
// Ex: "list" => /var/lib/dpkg/info/hello.list
func (p *PackageInfo)
|
(filename string) string {
return filepath.Join("/var/lib/dpkg", p.Name()+"."+filename)
}
// We now add a method to change the package status
// and make sure the section in the status file is updated too.
// This method will be used several times at the different steps
// of the installation process.
func (p *PackageInfo) SetStatus(new string) {
p.Status = new
p.StatusDirty = true
// Override in DEB 822 document used to write the status file
old := p.Paragraph.Values["Status"]
parts := strings.Split(old, " ")
newStatus := fmt.Sprintf("%s %s %s", parts[0], parts[1], new)
p.Paragraph.Values["Status"] = newStatus
}
// Now, we are ready to read the database directory to initialize the structs.
func loadDatabase() (*Database, error) {
// Load the status file
f, _ := os.Open("/var/lib/dpkg/status")
parser, _ := deb822.NewParser(f)
status, _ := parser.Parse()
// Read the info directory
var packages []*PackageInfo
for _, statusParagraph := range status.Paragraphs {
statusField := statusParagraph.Value("Status") // install ok installed
statusValues := strings.Split(statusField, " ")
pkg := PackageInfo{
Paragraph: statusParagraph,
MaintainerScripts: make(map[string]string),
Status: statusValues[2],
StatusDirty: false,
}
// Read the configuration files
pkg.Files, _ = ReadLines(pkg.InfoPath("list"))
pkg.Conffiles, _ = ReadLines(pkg.InfoPath("conffiles"))
// Read the maintainer scripts
maintainerScripts := []string{"preinst", "postinst", "prerm", "postrm"}
for _, script := range maintainerScripts {
scriptPath := pkg.InfoPath(script)
if _, err := os.Stat(scriptPath); !os.IsNotExist(err) {
content, err := os.ReadFile(scriptPath)
if err != nil {
return nil, err
}
pkg.MaintainerScripts[script] = string(content)
}
}
packages = append(packages, &pkg)
}
// We have read everything that interest us and are ready
// to populate the Database struct.
return &Database{
Status: status,
Packages: packages,
}, nil
}
// Now we are ready to process an archive to install.
func processArchive(db *Database, archivePath string) error {
// Read the Debian archive file
f, err := os.Open(archivePath)
if err != nil {
return err
}
defer f.Close()
reader := ar.NewReader(f)
// Skip debian-binary
reader.Next()
// control.tar
reader.Next()
var bufControl bytes.Buffer
io.Copy(&bufControl, reader)
pkg, err := parseControl(db, bufControl)
if err != nil {
return err
}
// Add the new package in the database
db.Packages = append(db.Packages, pkg)
db.Sync()
// data.tar
reader.Next()
var bufData bytes.Buffer
io.Copy(&bufData, reader)
fmt.Printf("Preparing to unpack %s ...\n", filepath.Base(archivePath))
if err := pkg.Unpack(bufData); err != nil {
return err
}
if err := pkg.Configure(); err != nil {
return err
}
db.Sync()
return nil
}
// parseControl processes the control.tar archive.
func parseControl(db *Database, buf bytes.Buffer) (*PackageInfo, error) {
// The control.tar archive contains the most important files
// we need to install the package.
// We need to extract metadata from the control file, determine
// if the package contains conffiles and maintainer scripts.
pkg := PackageInfo{
MaintainerScripts: make(map[string]string),
Status: "not-installed",
StatusDirty: true,
}
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return nil, err
}
// Read the file content
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return nil, err
}
switch filepath.Base(hdr.Name) {
case "control":
parser, _ := deb822.NewParser(strings.NewReader(buf.String()))
document, _ := parser.Parse()
controlParagraph := document.Paragraphs[0]
// Copy control fields and add the Status field in second position
pkg.Paragraph = deb822.Paragraph{
Values: make(map[string]string),
}
// Make sure the field "Package' comes first, then "Status",
// then remaining fields.
pkg.Paragraph.Order = append(
pkg.Paragraph.Order, "Package", "Status")
pkg.Paragraph.Values["Package"] = controlParagraph.Value("Package")
pkg.Paragraph.Values["Status"] = "install ok non-installed"
for _, field := range controlParagraph.Order {
if field == "Package" {
continue
}
pkg.Paragraph.Order = append(pkg.Paragraph.Order, field)
pkg.Paragraph.Values[field] = controlParagraph.Value(field)
}
case "conffiles":
pkg.Conffiles = SplitLines(buf.String())
case "prerm":
fallthrough
case "preinst":
fallthrough
case "postinst":
fallthrough
case "postrm":
pkg.MaintainerScripts[filepath.Base(hdr.Name)] = buf.String()
}
}
return &pkg, nil
}
// Unpack processes the data.tar archive.
func (p *PackageInfo) Unpack(buf bytes.Buffer) error {
// The unpacking process consists in extracting all files
// in data.tar to their final destination, except for conffiles,
// which are copied with a special extension that will be removed
// in the configure step.
if err := p.runMaintainerScript("preinst"); err != nil {
return err
}
fmt.Printf("Unpacking %s (%s) ...\n", p.Name(), p.Version())
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return err
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return err
}
switch hdr.Typeflag {
case tar.TypeReg:
dest := hdr.Name
if strings.HasPrefix(dest, "./") {
// ./usr/bin/hello => /usr/bin/hello
dest = dest[1:]
}
if !strings.HasPrefix(dest, "/") {
// usr/bin/hello => /usr/bin/hello
dest = "/" + dest
}
tmpdest := dest
if p.isConffile(tmpdest) {
// Extract using the extension .dpkg-new
tmpdest += ".dpkg-new"
}
if err := os.MkdirAll(filepath.Dir(tmpdest), 0755); err != nil {
log.Fatalf("Failed to unpack directory %s: %v", tmpdest, err)
}
content := buf.Bytes()
if err := os.WriteFile(tmpdest, content, 0755); err != nil {
log.Fatalf("Failed to unpack file %s: %v", tmpdest, err)
}
p.Files = append(p.Files, dest)
}
}
p.SetStatus("unpacked")
p.Sync()
return nil
}
// Configure processes the conffiles.
func (p *PackageInfo) Configure() error {
// The configure process consists in renaming the conffiles
// unpacked at the previous step.
//
// We ignore some implementation concerns like checking if a conffile
// has been updated using the last known checksum.
fmt.Printf("Setting up %s (%s) ...\n", p.Name(), p.Version())
// Rename conffiles
for _, conffile := range p.Conffiles {
os.Rename(conffile+".dpkg-new", conffile)
}
p.SetStatus("half-configured")
p.Sync()
// Run maintainer script
if err := p.runMaintainerScript
|
InfoPath
|
identifier_name
|
dpkg_install.go
|
"prerm", "postrm"}
for _, script := range maintainerScripts {
scriptPath := pkg.InfoPath(script)
if _, err := os.Stat(scriptPath); !os.IsNotExist(err) {
content, err := os.ReadFile(scriptPath)
if err != nil {
return nil, err
}
pkg.MaintainerScripts[script] = string(content)
}
}
packages = append(packages, &pkg)
}
// We have read everything that interest us and are ready
// to populate the Database struct.
return &Database{
Status: status,
Packages: packages,
}, nil
}
// Now we are ready to process an archive to install.
func processArchive(db *Database, archivePath string) error {
// Read the Debian archive file
f, err := os.Open(archivePath)
if err != nil {
return err
}
defer f.Close()
reader := ar.NewReader(f)
// Skip debian-binary
reader.Next()
// control.tar
reader.Next()
var bufControl bytes.Buffer
io.Copy(&bufControl, reader)
pkg, err := parseControl(db, bufControl)
if err != nil {
return err
}
// Add the new package in the database
db.Packages = append(db.Packages, pkg)
db.Sync()
// data.tar
reader.Next()
var bufData bytes.Buffer
io.Copy(&bufData, reader)
fmt.Printf("Preparing to unpack %s ...\n", filepath.Base(archivePath))
if err := pkg.Unpack(bufData); err != nil {
return err
}
if err := pkg.Configure(); err != nil {
return err
}
db.Sync()
return nil
}
// parseControl processes the control.tar archive.
func parseControl(db *Database, buf bytes.Buffer) (*PackageInfo, error) {
// The control.tar archive contains the most important files
// we need to install the package.
// We need to extract metadata from the control file, determine
// if the package contains conffiles and maintainer scripts.
pkg := PackageInfo{
MaintainerScripts: make(map[string]string),
Status: "not-installed",
StatusDirty: true,
}
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return nil, err
}
// Read the file content
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return nil, err
}
switch filepath.Base(hdr.Name) {
case "control":
parser, _ := deb822.NewParser(strings.NewReader(buf.String()))
document, _ := parser.Parse()
controlParagraph := document.Paragraphs[0]
// Copy control fields and add the Status field in second position
pkg.Paragraph = deb822.Paragraph{
Values: make(map[string]string),
}
// Make sure the field "Package' comes first, then "Status",
// then remaining fields.
pkg.Paragraph.Order = append(
pkg.Paragraph.Order, "Package", "Status")
pkg.Paragraph.Values["Package"] = controlParagraph.Value("Package")
pkg.Paragraph.Values["Status"] = "install ok non-installed"
for _, field := range controlParagraph.Order {
if field == "Package" {
continue
}
pkg.Paragraph.Order = append(pkg.Paragraph.Order, field)
pkg.Paragraph.Values[field] = controlParagraph.Value(field)
}
case "conffiles":
pkg.Conffiles = SplitLines(buf.String())
case "prerm":
fallthrough
case "preinst":
fallthrough
case "postinst":
fallthrough
case "postrm":
pkg.MaintainerScripts[filepath.Base(hdr.Name)] = buf.String()
}
}
return &pkg, nil
}
// Unpack processes the data.tar archive.
func (p *PackageInfo) Unpack(buf bytes.Buffer) error {
// The unpacking process consists in extracting all files
// in data.tar to their final destination, except for conffiles,
// which are copied with a special extension that will be removed
// in the configure step.
if err := p.runMaintainerScript("preinst"); err != nil {
return err
}
fmt.Printf("Unpacking %s (%s) ...\n", p.Name(), p.Version())
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return err
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return err
}
switch hdr.Typeflag {
case tar.TypeReg:
dest := hdr.Name
if strings.HasPrefix(dest, "./") {
// ./usr/bin/hello => /usr/bin/hello
dest = dest[1:]
}
if !strings.HasPrefix(dest, "/") {
// usr/bin/hello => /usr/bin/hello
dest = "/" + dest
}
tmpdest := dest
if p.isConffile(tmpdest) {
// Extract using the extension .dpkg-new
tmpdest += ".dpkg-new"
}
if err := os.MkdirAll(filepath.Dir(tmpdest), 0755); err != nil {
log.Fatalf("Failed to unpack directory %s: %v", tmpdest, err)
}
content := buf.Bytes()
if err := os.WriteFile(tmpdest, content, 0755); err != nil {
log.Fatalf("Failed to unpack file %s: %v", tmpdest, err)
}
p.Files = append(p.Files, dest)
}
}
p.SetStatus("unpacked")
p.Sync()
return nil
}
// Configure processes the conffiles.
func (p *PackageInfo) Configure() error {
// The configure process consists in renaming the conffiles
// unpacked at the previous step.
//
// We ignore some implementation concerns like checking if a conffile
// has been updated using the last known checksum.
fmt.Printf("Setting up %s (%s) ...\n", p.Name(), p.Version())
// Rename conffiles
for _, conffile := range p.Conffiles {
os.Rename(conffile+".dpkg-new", conffile)
}
p.SetStatus("half-configured")
p.Sync()
// Run maintainer script
if err := p.runMaintainerScript("postinst"); err != nil {
return err
}
p.SetStatus("installed")
p.Sync()
return nil
}
func (p *PackageInfo) runMaintainerScript(name string) error {
// The control.tar file can contains scripts to be run at
// specific moments. This function uses the standard Go library
// to run the `sh` command with a maintainer scrpit as an argument.
if _, ok := p.MaintainerScripts[name]; !ok {
// Nothing to run
return nil
}
out, err := exec.Command("/bin/sh", p.InfoPath(name)).Output()
if err != nil {
return err
}
fmt.Print(string(out))
return nil
}
// We have covered the different steps of the installation process.
// We still need to write the code to sync the database.
func (d *Database) Sync() error {
newStatus := deb822.Document{
Paragraphs: []deb822.Paragraph{},
}
// Sync the /var/lib/dpkg/info directory
for _, pkg := range d.Packages {
newStatus.Paragraphs = append(newStatus.Paragraphs, pkg.Paragraph)
if pkg.StatusDirty {
if err := pkg.Sync(); err != nil {
return err
}
}
}
// Make a new version of /var/lib/dpkg/status
os.Rename("/var/lib/dpkg/status", "/var/lib/dpkg/status-old")
formatter := deb822.NewFormatter()
formatter.SetFoldedFields("Description")
formatter.SetMultilineFields("Conffiles")
if err := os.WriteFile("/var/lib/dpkg/status",
[]byte(formatter.Format(newStatus)), 0644); err != nil {
return err
}
return nil
}
func (p *PackageInfo) Sync() error
|
{
// This function synchronizes the files under /var/lib/dpkg/info
// for a single package.
// Write <package>.list
if err := os.WriteFile(p.InfoPath("list"),
[]byte(MergeLines(p.Files)), 0644); err != nil {
return err
}
// Write <package>.conffiles
if err := os.WriteFile(p.InfoPath("conffiles"),
[]byte(MergeLines(p.Conffiles)), 0644); err != nil {
return err
}
// Write <package>.{preinst,prerm,postinst,postrm}
for name, content := range p.MaintainerScripts {
err := os.WriteFile(p.InfoPath(name), []byte(content), 0755)
if err != nil {
|
identifier_body
|
|
dpkg_install.go
|
for sync
}
func (p *PackageInfo) Name() string {
// Extract the package name from its section in /var/lib/dpkg/status
return p.Paragraph.Value("Package")
}
func (p *PackageInfo) Version() string {
// Extract the package version from its section in /var/lib/dpkg/status
return p.Paragraph.Value("Version")
}
// isConffile determines if a file must be processed as a conffile.
func (p *PackageInfo) isConffile(path string) bool {
for _, conffile := range p.Conffiles {
if path == conffile {
return true
}
}
return false
}
// InfoPath returns the path of a file under /var/lib/dpkg/info/.
// Ex: "list" => /var/lib/dpkg/info/hello.list
func (p *PackageInfo) InfoPath(filename string) string {
return filepath.Join("/var/lib/dpkg", p.Name()+"."+filename)
}
// We now add a method to change the package status
// and make sure the section in the status file is updated too.
// This method will be used several times at the different steps
// of the installation process.
func (p *PackageInfo) SetStatus(new string) {
p.Status = new
p.StatusDirty = true
// Override in DEB 822 document used to write the status file
old := p.Paragraph.Values["Status"]
parts := strings.Split(old, " ")
newStatus := fmt.Sprintf("%s %s %s", parts[0], parts[1], new)
p.Paragraph.Values["Status"] = newStatus
}
// Now, we are ready to read the database directory to initialize the structs.
func loadDatabase() (*Database, error) {
// Load the status file
f, _ := os.Open("/var/lib/dpkg/status")
parser, _ := deb822.NewParser(f)
status, _ := parser.Parse()
// Read the info directory
var packages []*PackageInfo
for _, statusParagraph := range status.Paragraphs {
statusField := statusParagraph.Value("Status") // install ok installed
statusValues := strings.Split(statusField, " ")
pkg := PackageInfo{
Paragraph: statusParagraph,
MaintainerScripts: make(map[string]string),
Status: statusValues[2],
StatusDirty: false,
}
// Read the configuration files
pkg.Files, _ = ReadLines(pkg.InfoPath("list"))
pkg.Conffiles, _ = ReadLines(pkg.InfoPath("conffiles"))
// Read the maintainer scripts
maintainerScripts := []string{"preinst", "postinst", "prerm", "postrm"}
for _, script := range maintainerScripts {
scriptPath := pkg.InfoPath(script)
if _, err := os.Stat(scriptPath); !os.IsNotExist(err) {
content, err := os.ReadFile(scriptPath)
if err != nil {
return nil, err
}
pkg.MaintainerScripts[script] = string(content)
}
}
packages = append(packages, &pkg)
}
// We have read everything that interest us and are ready
// to populate the Database struct.
return &Database{
Status: status,
Packages: packages,
}, nil
}
// Now we are ready to process an archive to install.
func processArchive(db *Database, archivePath string) error {
// Read the Debian archive file
f, err := os.Open(archivePath)
if err != nil {
return err
}
defer f.Close()
reader := ar.NewReader(f)
// Skip debian-binary
reader.Next()
|
var bufControl bytes.Buffer
io.Copy(&bufControl, reader)
pkg, err := parseControl(db, bufControl)
if err != nil {
return err
}
// Add the new package in the database
db.Packages = append(db.Packages, pkg)
db.Sync()
// data.tar
reader.Next()
var bufData bytes.Buffer
io.Copy(&bufData, reader)
fmt.Printf("Preparing to unpack %s ...\n", filepath.Base(archivePath))
if err := pkg.Unpack(bufData); err != nil {
return err
}
if err := pkg.Configure(); err != nil {
return err
}
db.Sync()
return nil
}
// parseControl processes the control.tar archive.
func parseControl(db *Database, buf bytes.Buffer) (*PackageInfo, error) {
// The control.tar archive contains the most important files
// we need to install the package.
// We need to extract metadata from the control file, determine
// if the package contains conffiles and maintainer scripts.
pkg := PackageInfo{
MaintainerScripts: make(map[string]string),
Status: "not-installed",
StatusDirty: true,
}
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return nil, err
}
// Read the file content
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return nil, err
}
switch filepath.Base(hdr.Name) {
case "control":
parser, _ := deb822.NewParser(strings.NewReader(buf.String()))
document, _ := parser.Parse()
controlParagraph := document.Paragraphs[0]
// Copy control fields and add the Status field in second position
pkg.Paragraph = deb822.Paragraph{
Values: make(map[string]string),
}
// Make sure the field "Package' comes first, then "Status",
// then remaining fields.
pkg.Paragraph.Order = append(
pkg.Paragraph.Order, "Package", "Status")
pkg.Paragraph.Values["Package"] = controlParagraph.Value("Package")
pkg.Paragraph.Values["Status"] = "install ok non-installed"
for _, field := range controlParagraph.Order {
if field == "Package" {
continue
}
pkg.Paragraph.Order = append(pkg.Paragraph.Order, field)
pkg.Paragraph.Values[field] = controlParagraph.Value(field)
}
case "conffiles":
pkg.Conffiles = SplitLines(buf.String())
case "prerm":
fallthrough
case "preinst":
fallthrough
case "postinst":
fallthrough
case "postrm":
pkg.MaintainerScripts[filepath.Base(hdr.Name)] = buf.String()
}
}
return &pkg, nil
}
// Unpack processes the data.tar archive.
func (p *PackageInfo) Unpack(buf bytes.Buffer) error {
// The unpacking process consists in extracting all files
// in data.tar to their final destination, except for conffiles,
// which are copied with a special extension that will be removed
// in the configure step.
if err := p.runMaintainerScript("preinst"); err != nil {
return err
}
fmt.Printf("Unpacking %s (%s) ...\n", p.Name(), p.Version())
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return err
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return err
}
switch hdr.Typeflag {
case tar.TypeReg:
dest := hdr.Name
if strings.HasPrefix(dest, "./") {
// ./usr/bin/hello => /usr/bin/hello
dest = dest[1:]
}
if !strings.HasPrefix(dest, "/") {
// usr/bin/hello => /usr/bin/hello
dest = "/" + dest
}
tmpdest := dest
if p.isConffile(tmpdest) {
// Extract using the extension .dpkg-new
tmpdest += ".dpkg-new"
}
if err := os.MkdirAll(filepath.Dir(tmpdest), 0755); err != nil {
log.Fatalf("Failed to unpack directory %s: %v", tmpdest, err)
}
content := buf.Bytes()
if err := os.WriteFile(tmpdest, content, 0755); err != nil {
log.Fatalf("Failed to unpack file %s: %v", tmpdest, err)
}
p.Files = append(p.Files, dest)
}
}
p.SetStatus("unpacked")
p.Sync()
return nil
}
// Configure processes the conffiles.
func (p *PackageInfo) Configure() error {
// The configure process consists in renaming the conffiles
// unpacked at the previous step.
//
// We ignore some implementation concerns like checking if a conffile
// has been updated using the last known checksum.
fmt.Printf("Setting up %s (%s) ...\n", p.Name(), p.Version())
// Rename conffiles
for _, conffile := range p.Conffiles {
os.Rename(conffile+".dpkg-new", conffile)
}
p.SetStatus("half-configured")
p.Sync()
// Run maintainer script
if err := p.runMaintainerScript
|
// control.tar
reader.Next()
|
random_line_split
|
dpkg_install.go
|
for sync
}
func (p *PackageInfo) Name() string {
// Extract the package name from its section in /var/lib/dpkg/status
return p.Paragraph.Value("Package")
}
func (p *PackageInfo) Version() string {
// Extract the package version from its section in /var/lib/dpkg/status
return p.Paragraph.Value("Version")
}
// isConffile determines if a file must be processed as a conffile.
func (p *PackageInfo) isConffile(path string) bool {
for _, conffile := range p.Conffiles {
if path == conffile {
return true
}
}
return false
}
// InfoPath returns the path of a file under /var/lib/dpkg/info/.
// Ex: "list" => /var/lib/dpkg/info/hello.list
func (p *PackageInfo) InfoPath(filename string) string {
return filepath.Join("/var/lib/dpkg", p.Name()+"."+filename)
}
// We now add a method to change the package status
// and make sure the section in the status file is updated too.
// This method will be used several times at the different steps
// of the installation process.
func (p *PackageInfo) SetStatus(new string) {
p.Status = new
p.StatusDirty = true
// Override in DEB 822 document used to write the status file
old := p.Paragraph.Values["Status"]
parts := strings.Split(old, " ")
newStatus := fmt.Sprintf("%s %s %s", parts[0], parts[1], new)
p.Paragraph.Values["Status"] = newStatus
}
// Now, we are ready to read the database directory to initialize the structs.
func loadDatabase() (*Database, error) {
// Load the status file
f, _ := os.Open("/var/lib/dpkg/status")
parser, _ := deb822.NewParser(f)
status, _ := parser.Parse()
// Read the info directory
var packages []*PackageInfo
for _, statusParagraph := range status.Paragraphs {
statusField := statusParagraph.Value("Status") // install ok installed
statusValues := strings.Split(statusField, " ")
pkg := PackageInfo{
Paragraph: statusParagraph,
MaintainerScripts: make(map[string]string),
Status: statusValues[2],
StatusDirty: false,
}
// Read the configuration files
pkg.Files, _ = ReadLines(pkg.InfoPath("list"))
pkg.Conffiles, _ = ReadLines(pkg.InfoPath("conffiles"))
// Read the maintainer scripts
maintainerScripts := []string{"preinst", "postinst", "prerm", "postrm"}
for _, script := range maintainerScripts {
scriptPath := pkg.InfoPath(script)
if _, err := os.Stat(scriptPath); !os.IsNotExist(err) {
content, err := os.ReadFile(scriptPath)
if err != nil {
return nil, err
}
pkg.MaintainerScripts[script] = string(content)
}
}
packages = append(packages, &pkg)
}
// We have read everything that interest us and are ready
// to populate the Database struct.
return &Database{
Status: status,
Packages: packages,
}, nil
}
// Now we are ready to process an archive to install.
func processArchive(db *Database, archivePath string) error {
// Read the Debian archive file
f, err := os.Open(archivePath)
if err != nil {
return err
}
defer f.Close()
reader := ar.NewReader(f)
// Skip debian-binary
reader.Next()
// control.tar
reader.Next()
var bufControl bytes.Buffer
io.Copy(&bufControl, reader)
pkg, err := parseControl(db, bufControl)
if err != nil
|
// Add the new package in the database
db.Packages = append(db.Packages, pkg)
db.Sync()
// data.tar
reader.Next()
var bufData bytes.Buffer
io.Copy(&bufData, reader)
fmt.Printf("Preparing to unpack %s ...\n", filepath.Base(archivePath))
if err := pkg.Unpack(bufData); err != nil {
return err
}
if err := pkg.Configure(); err != nil {
return err
}
db.Sync()
return nil
}
// parseControl processes the control.tar archive.
func parseControl(db *Database, buf bytes.Buffer) (*PackageInfo, error) {
// The control.tar archive contains the most important files
// we need to install the package.
// We need to extract metadata from the control file, determine
// if the package contains conffiles and maintainer scripts.
pkg := PackageInfo{
MaintainerScripts: make(map[string]string),
Status: "not-installed",
StatusDirty: true,
}
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return nil, err
}
// Read the file content
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return nil, err
}
switch filepath.Base(hdr.Name) {
case "control":
parser, _ := deb822.NewParser(strings.NewReader(buf.String()))
document, _ := parser.Parse()
controlParagraph := document.Paragraphs[0]
// Copy control fields and add the Status field in second position
pkg.Paragraph = deb822.Paragraph{
Values: make(map[string]string),
}
// Make sure the field "Package' comes first, then "Status",
// then remaining fields.
pkg.Paragraph.Order = append(
pkg.Paragraph.Order, "Package", "Status")
pkg.Paragraph.Values["Package"] = controlParagraph.Value("Package")
pkg.Paragraph.Values["Status"] = "install ok non-installed"
for _, field := range controlParagraph.Order {
if field == "Package" {
continue
}
pkg.Paragraph.Order = append(pkg.Paragraph.Order, field)
pkg.Paragraph.Values[field] = controlParagraph.Value(field)
}
case "conffiles":
pkg.Conffiles = SplitLines(buf.String())
case "prerm":
fallthrough
case "preinst":
fallthrough
case "postinst":
fallthrough
case "postrm":
pkg.MaintainerScripts[filepath.Base(hdr.Name)] = buf.String()
}
}
return &pkg, nil
}
// Unpack processes the data.tar archive.
func (p *PackageInfo) Unpack(buf bytes.Buffer) error {
// The unpacking process consists in extracting all files
// in data.tar to their final destination, except for conffiles,
// which are copied with a special extension that will be removed
// in the configure step.
if err := p.runMaintainerScript("preinst"); err != nil {
return err
}
fmt.Printf("Unpacking %s (%s) ...\n", p.Name(), p.Version())
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return err
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, tr); err != nil {
return err
}
switch hdr.Typeflag {
case tar.TypeReg:
dest := hdr.Name
if strings.HasPrefix(dest, "./") {
// ./usr/bin/hello => /usr/bin/hello
dest = dest[1:]
}
if !strings.HasPrefix(dest, "/") {
// usr/bin/hello => /usr/bin/hello
dest = "/" + dest
}
tmpdest := dest
if p.isConffile(tmpdest) {
// Extract using the extension .dpkg-new
tmpdest += ".dpkg-new"
}
if err := os.MkdirAll(filepath.Dir(tmpdest), 0755); err != nil {
log.Fatalf("Failed to unpack directory %s: %v", tmpdest, err)
}
content := buf.Bytes()
if err := os.WriteFile(tmpdest, content, 0755); err != nil {
log.Fatalf("Failed to unpack file %s: %v", tmpdest, err)
}
p.Files = append(p.Files, dest)
}
}
p.SetStatus("unpacked")
p.Sync()
return nil
}
// Configure processes the conffiles.
func (p *PackageInfo) Configure() error {
// The configure process consists in renaming the conffiles
// unpacked at the previous step.
//
// We ignore some implementation concerns like checking if a conffile
// has been updated using the last known checksum.
fmt.Printf("Setting up %s (%s) ...\n", p.Name(), p.Version())
// Rename conffiles
for _, conffile := range p.Conffiles {
os.Rename(conffile+".dpkg-new", conffile)
}
p.SetStatus("half-configured")
p.Sync()
// Run maintainer script
if err := p.runMaintainer
|
{
return err
}
|
conditional_block
|
clarans.py
|
list of points (objects), each point should be represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem).
@param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float('inf')
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__number_clusters <= 0:
raise ValueError("Amount of cluster (current value: '%d') for allocation should be greater than 0." %
self.__number_clusters)
if self.__numlocal < 0:
raise ValueError("Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal)
if self.__maxneighbor < 0:
raise ValueError("Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor)
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print("Better configuration found with medoids: {0} and cost: {1}".format(self.__current[:], estimation))
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
else:
print("Configuration found does not improve current best one because its cost is {0}".format(estimation))
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
self.__update_clusters(self.__optimal_medoids)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while (index_neighbor < self.__maxneighbor):
# get random current medoid that is to be replaced
current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)
other_medoid_cluster_index = self.__belong[other_medoid_index]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
# from the point to candidate median
distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index])
# from the point to nearest (own) medoid
distance_nearest = float('inf')
if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ):
distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index])
# apply rules for cost calculation
if (point_cluster_index == current_medoid_cluster_index):
# case 1:
if (distance_candidate >= distance_nearest):
candidate_cost += distance_nearest - distance_current
# case 2:
else:
candidate_cost += distance_candidate - distance_current
elif (point_cluster_index == other_medoid_cluster_index):
# case 3 ('nearest medoid' is the representative object of that cluster and object is more similar to 'nearest' than to 'candidate'):
if (distance_candidate > distance_nearest):
pass;
# case 4:
else:
candidate_cost += distance_candidate - distance_nearest
if (candidate_cost < 0):
counter+=1
# set candidate that has won
self.__current[current_medoid_cluster_index] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
print("Medoid set changed {0} times".format(counter))
def __find_another_nearest_medoid(self, point_index, current_medoid_index):
|
"""!
@brief Finds the another nearest medoid for the specified point that is different from the specified medoid.
@param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is perfomed.
@param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest.
@return (uint) index of the another nearest medoid for the point.
"""
other_medoid_index = -1
other_distance_nearest = float('inf')
for index_medoid in self.__current:
if (index_medoid != current_medoid_index):
other_distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
if other_distance_candidate < other_distance_nearest:
other_distance_nearest = other_distance_candidate
other_medoid_index = index_medoid
return other_medoid_index
|
identifier_body
|
|
clarans.py
|
s=500, color="red", marker="X", edgecolor="black")
xmin, xmax, ymin, ymax = plt.axis()
xwidth = xmax - xmin
ywidth = ymax - ymin
xw1 = xwidth*0.01
yw1 = ywidth*0.01
xw2 = xwidth*0.005
yw2 = ywidth*0.01
xw3 = xwidth*0.01
yw3 = ywidth*0.01
for i, txt in enumerate(range(len(data))):
if len(str(txt))==2:
ax.annotate(txt, (np.array(data)[:,0][i]-xw1, np.array(data)[:,1][i]-yw1), fontsize=12, size=12)
elif len(str(txt))==1:
ax.annotate(txt, (np.array(data)[:,0][i]-xw2, np.array(data)[:,1][i]-yw2), fontsize=12, size=12)
else:
ax.annotate(txt, (np.array(data)[:,0][i]-xw3, np.array(data)[:,1][i]-yw3), fontsize=9, size=9)
if equal_axis_scale == True:
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
class clarans:
"""!
@brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining).
"""
def __init__(self, data, number_clusters, numlocal, maxneighbor):
"""!
@brief Constructor of clustering algorithm CLARANS.
@details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem).
@param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float('inf')
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__number_clusters <= 0:
raise ValueError("Amount of cluster (current value: '%d') for allocation should be greater than 0." %
self.__number_clusters)
if self.__numlocal < 0:
raise ValueError("Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal)
if self.__maxneighbor < 0:
raise ValueError("Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor)
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print("Better configuration found with medoids: {0} and cost: {1}".format(self.__current[:], estimation))
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
else:
print("Configuration found does not improve current best one because its cost is {0}".format(estimation))
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
self.__update_clusters(self.__optimal_medoids)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while (index_neighbor < self.__maxneighbor):
# get random current medoid that is to be replaced
current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)
other_medoid_cluster_index = self.__belong[other_medoid_index]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
# from the point to candidate median
distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index])
# from the point to nearest (own) medoid
distance_nearest = float('inf')
if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ):
distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index])
# apply rules for cost calculation
if (point_cluster_index == current_medoid_cluster_index):
# case 1:
if (distance_candidate >= distance_nearest):
|
candidate_cost += distance_nearest - distance_current
|
conditional_block
|
|
clarans.py
|
:"seagreen", 1:'beige', 2:'yellow', 3:'grey',
4:'pink', 5:'turquoise', 6:'orange', 7:'purple', 8:'yellowgreen', 9:'olive', 10:'brown',
11:'tan', 12: 'plum', 13:'rosybrown', 14:'lightblue', 15:"khaki", 16:"gainsboro", 17:"peachpuff"}
for i,el in enumerate(list(cl.values())):
plt.scatter(np.array(data)[el,0], np.array(data)[el,1], s=300, color=colors[i%17], edgecolor="black")
for i,el in enumerate(list(cl.keys())):
plt.scatter(np.array(data)[el,0], np.array(data)[el,1], s=500, color="red", marker="X", edgecolor="black")
xmin, xmax, ymin, ymax = plt.axis()
xwidth = xmax - xmin
ywidth = ymax - ymin
xw1 = xwidth*0.01
yw1 = ywidth*0.01
xw2 = xwidth*0.005
yw2 = ywidth*0.01
xw3 = xwidth*0.01
yw3 = ywidth*0.01
for i, txt in enumerate(range(len(data))):
if len(str(txt))==2:
ax.annotate(txt, (np.array(data)[:,0][i]-xw1, np.array(data)[:,1][i]-yw1), fontsize=12, size=12)
elif len(str(txt))==1:
ax.annotate(txt, (np.array(data)[:,0][i]-xw2, np.array(data)[:,1][i]-yw2), fontsize=12, size=12)
else:
ax.annotate(txt, (np.array(data)[:,0][i]-xw3, np.array(data)[:,1][i]-yw3), fontsize=9, size=9)
if equal_axis_scale == True:
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
class clarans:
"""!
@brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining).
"""
def __init__(self, data, number_clusters, numlocal, maxneighbor):
"""!
@brief Constructor of clustering algorithm CLARANS.
@details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem).
@param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float('inf')
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__number_clusters <= 0:
raise ValueError("Amount of cluster (current value: '%d') for allocation should be greater than 0." %
self.__number_clusters)
if self.__numlocal < 0:
raise ValueError("Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal)
if self.__maxneighbor < 0:
raise ValueError("Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor)
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print("Better configuration found with medoids: {0} and cost: {1}".format(self.__current[:], estimation))
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
else:
print("Configuration found does not improve current best one because its cost is {0}".format(estimation))
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
self.__update_clusters(self.__optimal_medoids)
return self
def
|
(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while (index_neighbor < self.__maxneighbor):
# get random current medoid that is to be replaced
current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)
other_medoid_cluster_index = self.__belong[other_medoid_index]
# for optimization calculate all
|
get_clusters
|
identifier_name
|
clarans.py
|
:"seagreen", 1:'beige', 2:'yellow', 3:'grey',
4:'pink', 5:'turquoise', 6:'orange', 7:'purple', 8:'yellowgreen', 9:'olive', 10:'brown',
11:'tan', 12: 'plum', 13:'rosybrown', 14:'lightblue', 15:"khaki", 16:"gainsboro", 17:"peachpuff"}
for i,el in enumerate(list(cl.values())):
plt.scatter(np.array(data)[el,0], np.array(data)[el,1], s=300, color=colors[i%17], edgecolor="black")
for i,el in enumerate(list(cl.keys())):
plt.scatter(np.array(data)[el,0], np.array(data)[el,1], s=500, color="red", marker="X", edgecolor="black")
xmin, xmax, ymin, ymax = plt.axis()
xwidth = xmax - xmin
ywidth = ymax - ymin
xw1 = xwidth*0.01
yw1 = ywidth*0.01
xw2 = xwidth*0.005
yw2 = ywidth*0.01
xw3 = xwidth*0.01
yw3 = ywidth*0.01
for i, txt in enumerate(range(len(data))):
if len(str(txt))==2:
ax.annotate(txt, (np.array(data)[:,0][i]-xw1, np.array(data)[:,1][i]-yw1), fontsize=12, size=12)
elif len(str(txt))==1:
ax.annotate(txt, (np.array(data)[:,0][i]-xw2, np.array(data)[:,1][i]-yw2), fontsize=12, size=12)
else:
ax.annotate(txt, (np.array(data)[:,0][i]-xw3, np.array(data)[:,1][i]-yw3), fontsize=9, size=9)
if equal_axis_scale == True:
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
class clarans:
"""!
@brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining).
"""
def __init__(self, data, number_clusters, numlocal, maxneighbor):
"""!
@brief Constructor of clustering algorithm CLARANS.
@details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem).
@param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float('inf')
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__number_clusters <= 0:
raise ValueError("Amount of cluster (current value: '%d') for allocation should be greater than 0." %
self.__number_clusters)
if self.__numlocal < 0:
raise ValueError("Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal)
if self.__maxneighbor < 0:
raise ValueError("Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor)
|
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print("Better configuration found with medoids: {0} and cost: {1}".format(self.__current[:], estimation))
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
else:
print("Configuration found does not improve current best one because its cost is {0}".format(estimation))
if plotting == True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(self.__pointer_data,
dict(zip(self.__optimal_medoids,self.__clusters)))
self.__update_clusters(self.__optimal_medoids)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while (index_neighbor < self.__maxneighbor):
# get random current medoid that is to be replaced
current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)
other_medoid_cluster_index = self.__belong[other_medoid_index]
# for optimization calculate
|
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
|
random_line_split
|
client.rs
|
}
struct GrpcResponseHandlerTyped<Req : Send + 'static, Resp : Send + 'static> {
method: Arc<MethodDescriptor<Req, Resp>>,
complete: tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>,
remaining_response: Vec<u8>,
}
impl<Req : Send + 'static, Resp : Send + 'static> GrpcResponseHandlerTrait for GrpcResponseHandlerTyped<Req, Resp> {
}
impl<Req : Send + 'static, Resp : Send + 'static> HttpClientResponseHandler for GrpcResponseHandlerTyped<Req, Resp> {
fn headers(&mut self, headers: Vec<StaticHeader>) -> bool {
println!("client: received headers");
if slice_get_header(&headers, ":status") != Some("200") {
if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) {
self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap();
} else {
self.complete.send(ResultOrEof::Error(GrpcError::Other("not 200"))).unwrap();
}
false
} else {
true
}
}
fn data_frame(&mut self, chunk: Vec<u8>) -> bool {
self.remaining_response.extend(&chunk);
loop {
let len = match parse_grpc_frame(&self.remaining_response) {
Err(e) => {
self.complete.send(ResultOrEof::Error(e)).unwrap();
return false;
}
Ok(None) => break,
Ok(Some((message, len))) => {
let resp = self.method.resp_marshaller.read(&message);
self.complete.send(From::from(resp)).ok();
len
}
};
self.remaining_response.drain(..len);
}
true
}
fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool {
let _status_200 = slice_get_header(&headers, ":status") == Some("200");
let grpc_status_0 = slice_get_header(&headers, HEADER_GRPC_STATUS) == Some("0");
if /* status_200 && */ grpc_status_0 {
true
} else {
if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) {
self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap();
} else {
self.complete.send(ResultOrEof::Error(GrpcError::Other("not xxx"))).unwrap();
}
false
}
}
fn end(&mut self) {
self.complete.send(ResultOrEof::Eof).unwrap();
}
}
struct GrpcResponseHandler {
tr: Box<GrpcResponseHandlerTrait>,
}
impl HttpClientResponseHandler for GrpcResponseHandler {
fn headers(&mut self, headers: Vec<StaticHeader>) -> bool {
self.tr.headers(headers)
|
fn data_frame(&mut self, chunk: Vec<u8>) -> bool {
self.tr.data_frame(chunk)
}
fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool {
self.tr.trailers(headers)
}
fn end(&mut self) {
self.tr.end()
}
}
// Data sent from event loop to GrpcClient
struct LoopToClient {
// used only once to send shutdown signal
shutdown_tx: tokio_core::channel::Sender<()>,
loop_handle: reactor::Remote,
http_conn: Arc<HttpClientConnectionAsync<GrpcResponseHandler>>,
}
fn _assert_loop_to_client() {
assert_send::<reactor::Remote>();
assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_sync::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_send::<Arc<HttpClientConnectionAsync<GrpcResponseHandler>>>();
assert_send::<tokio_core::channel::Sender<()>>();
assert_send::<LoopToClient>();
}
/// gRPC client implementation.
/// Used by generated code.
pub struct GrpcClient {
loop_to_client: LoopToClient,
thread_join_handle: Option<thread::JoinHandle<()>>,
host: String,
http_scheme: HttpScheme,
}
impl GrpcClient {
/// Create a client connected to specified host and port.
pub fn new(host: &str, port: u16) -> GrpcResult<GrpcClient> {
// TODO: sync
// TODO: try connect to all addrs
let socket_addr = try!((host, port).to_socket_addrs()).next().unwrap();
// We need some data back from event loop.
// This channel is used to exchange that data
let (get_from_loop_tx, get_from_loop_rx) = mpsc::channel();
// Start event loop.
let join_handle = thread::spawn(move || {
run_client_event_loop(socket_addr, get_from_loop_tx);
});
// Get back call channel and shutdown channel.
let loop_to_client = try!(get_from_loop_rx.recv()
.map_err(|_| GrpcError::Other("get response from loop")));
Ok(GrpcClient {
loop_to_client: loop_to_client,
thread_join_handle: Some(join_handle),
host: host.to_owned(),
http_scheme: HttpScheme::Http,
})
}
pub fn new_resp_channel<Resp : Send + 'static>(&self)
-> futures::Oneshot<(tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, GrpcStreamSend<Resp>)>
{
let (one_sender, one_receiver) = futures::oneshot();
self.loop_to_client.loop_handle.spawn(move |handle| {
let (sender, receiver) = tokio_core::channel::channel(&handle).unwrap();
let receiver: GrpcStreamSend<ResultOrEof<Resp, GrpcError>> = Box::new(receiver.map_err(GrpcError::from));
let receiver: GrpcStreamSend<Resp> = Box::new(stream_with_eof_and_error(receiver));
one_sender.complete((sender, receiver));
futures::finished(())
});
one_receiver
}
pub fn call_impl<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
let host = self.host.clone();
let http_scheme = self.http_scheme.clone();
let http_conn = self.loop_to_client.http_conn.clone();
// A channel to send response back to caller
let future = self.new_resp_channel().map_err(GrpcError::from).and_then(move |(complete, receiver)| {
let headers = vec![
Header::new(":method", "POST"),
Header::new(":path", method.name.clone()),
Header::new(":authority", host.clone()),
Header::new(":scheme", http_scheme.as_bytes()),
];
let request_frames = {
let method = method.clone();
req
.and_then(move |req| {
let grpc_frame = try!(method.req_marshaller.write(&req));
Ok(write_grpc_frame_to_vec(&grpc_frame))
})
.map_err(|e| HttpError::Other(Box::new(e)))
};
let start_request = http_conn.start_request(
headers,
Box::new(request_frames),
GrpcResponseHandler {
tr: Box::new(GrpcResponseHandlerTyped {
method: method.clone(),
complete: complete,
remaining_response: Vec::new(),
}),
}
).map_err(GrpcError::from);
let receiver: GrpcStreamSend<Resp> = receiver;
start_request.map(move |()| receiver)
});
let s: GrpcStreamSend<Resp> = future_flatten_to_stream(future);
s
}
pub fn call_unary<Req : Send + 'static, Resp : Send + 'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcFutureSend<Resp>
{
stream_single_send(self.call_impl(Box::new(stream_once_send(req)), method))
}
pub fn call_server_streaming<Req : Send + 'static, Resp : Send + 'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
self.call_impl(stream_once_send(req).boxed(), method)
}
pub fn call_client_streaming<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcFutureSend<Resp>
{
stream_single_send(self.call_impl(req, method))
}
pub fn call_bidi<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
self.call_impl(req, method)
}
}
// We shutdown client in destructor.
impl Drop for GrpcClient {
fn drop(&mut self) {
// ignore error because even loop may be already dead
self.loop_to_client.shutdown_tx.send(()
|
}
|
random_line_split
|
client.rs
|
}
struct GrpcResponseHandlerTyped<Req : Send + 'static, Resp : Send + 'static> {
method: Arc<MethodDescriptor<Req, Resp>>,
complete: tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>,
remaining_response: Vec<u8>,
}
impl<Req : Send + 'static, Resp : Send + 'static> GrpcResponseHandlerTrait for GrpcResponseHandlerTyped<Req, Resp> {
}
impl<Req : Send + 'static, Resp : Send + 'static> HttpClientResponseHandler for GrpcResponseHandlerTyped<Req, Resp> {
fn headers(&mut self, headers: Vec<StaticHeader>) -> bool {
println!("client: received headers");
if slice_get_header(&headers, ":status") != Some("200") {
if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) {
self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap();
} else {
self.complete.send(ResultOrEof::Error(GrpcError::Other("not 200"))).unwrap();
}
false
} else {
true
}
}
fn data_frame(&mut self, chunk: Vec<u8>) -> bool {
self.remaining_response.extend(&chunk);
loop {
let len = match parse_grpc_frame(&self.remaining_response) {
Err(e) => {
self.complete.send(ResultOrEof::Error(e)).unwrap();
return false;
}
Ok(None) => break,
Ok(Some((message, len))) => {
let resp = self.method.resp_marshaller.read(&message);
self.complete.send(From::from(resp)).ok();
len
}
};
self.remaining_response.drain(..len);
}
true
}
fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool {
let _status_200 = slice_get_header(&headers, ":status") == Some("200");
let grpc_status_0 = slice_get_header(&headers, HEADER_GRPC_STATUS) == Some("0");
if /* status_200 && */ grpc_status_0 {
true
} else {
if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) {
self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap();
} else {
self.complete.send(ResultOrEof::Error(GrpcError::Other("not xxx"))).unwrap();
}
false
}
}
fn end(&mut self) {
self.complete.send(ResultOrEof::Eof).unwrap();
}
}
struct GrpcResponseHandler {
tr: Box<GrpcResponseHandlerTrait>,
}
impl HttpClientResponseHandler for GrpcResponseHandler {
fn headers(&mut self, headers: Vec<StaticHeader>) -> bool {
self.tr.headers(headers)
}
fn data_frame(&mut self, chunk: Vec<u8>) -> bool {
self.tr.data_frame(chunk)
}
fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool {
self.tr.trailers(headers)
}
fn end(&mut self) {
self.tr.end()
}
}
// Data sent from event loop to GrpcClient
struct LoopToClient {
// used only once to send shutdown signal
shutdown_tx: tokio_core::channel::Sender<()>,
loop_handle: reactor::Remote,
http_conn: Arc<HttpClientConnectionAsync<GrpcResponseHandler>>,
}
fn _assert_loop_to_client() {
assert_send::<reactor::Remote>();
assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_sync::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_send::<Arc<HttpClientConnectionAsync<GrpcResponseHandler>>>();
assert_send::<tokio_core::channel::Sender<()>>();
assert_send::<LoopToClient>();
}
/// gRPC client implementation.
/// Used by generated code.
pub struct GrpcClient {
loop_to_client: LoopToClient,
thread_join_handle: Option<thread::JoinHandle<()>>,
host: String,
http_scheme: HttpScheme,
}
impl GrpcClient {
/// Create a client connected to specified host and port.
pub fn new(host: &str, port: u16) -> GrpcResult<GrpcClient> {
// TODO: sync
// TODO: try connect to all addrs
let socket_addr = try!((host, port).to_socket_addrs()).next().unwrap();
// We need some data back from event loop.
// This channel is used to exchange that data
let (get_from_loop_tx, get_from_loop_rx) = mpsc::channel();
// Start event loop.
let join_handle = thread::spawn(move || {
run_client_event_loop(socket_addr, get_from_loop_tx);
});
// Get back call channel and shutdown channel.
let loop_to_client = try!(get_from_loop_rx.recv()
.map_err(|_| GrpcError::Other("get response from loop")));
Ok(GrpcClient {
loop_to_client: loop_to_client,
thread_join_handle: Some(join_handle),
host: host.to_owned(),
http_scheme: HttpScheme::Http,
})
}
pub fn new_resp_channel<Resp : Send + 'static>(&self)
-> futures::Oneshot<(tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, GrpcStreamSend<Resp>)>
{
let (one_sender, one_receiver) = futures::oneshot();
self.loop_to_client.loop_handle.spawn(move |handle| {
let (sender, receiver) = tokio_core::channel::channel(&handle).unwrap();
let receiver: GrpcStreamSend<ResultOrEof<Resp, GrpcError>> = Box::new(receiver.map_err(GrpcError::from));
let receiver: GrpcStreamSend<Resp> = Box::new(stream_with_eof_and_error(receiver));
one_sender.complete((sender, receiver));
futures::finished(())
});
one_receiver
}
pub fn call_impl<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
let host = self.host.clone();
let http_scheme = self.http_scheme.clone();
let http_conn = self.loop_to_client.http_conn.clone();
// A channel to send response back to caller
let future = self.new_resp_channel().map_err(GrpcError::from).and_then(move |(complete, receiver)| {
let headers = vec![
Header::new(":method", "POST"),
Header::new(":path", method.name.clone()),
Header::new(":authority", host.clone()),
Header::new(":scheme", http_scheme.as_bytes()),
];
let request_frames = {
let method = method.clone();
req
.and_then(move |req| {
let grpc_frame = try!(method.req_marshaller.write(&req));
Ok(write_grpc_frame_to_vec(&grpc_frame))
})
.map_err(|e| HttpError::Other(Box::new(e)))
};
let start_request = http_conn.start_request(
headers,
Box::new(request_frames),
GrpcResponseHandler {
tr: Box::new(GrpcResponseHandlerTyped {
method: method.clone(),
complete: complete,
remaining_response: Vec::new(),
}),
}
).map_err(GrpcError::from);
let receiver: GrpcStreamSend<Resp> = receiver;
start_request.map(move |()| receiver)
});
let s: GrpcStreamSend<Resp> = future_flatten_to_stream(future);
s
}
pub fn call_unary<Req : Send + 'static, Resp : Send + 'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcFutureSend<Resp>
{
stream_single_send(self.call_impl(Box::new(stream_once_send(req)), method))
}
pub fn call_server_streaming<Req : Send + 'static, Resp : Send + 'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
self.call_impl(stream_once_send(req).boxed(), method)
}
pub fn
|
<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcFutureSend<Resp>
{
stream_single_send(self.call_impl(req, method))
}
pub fn call_bidi<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
self.call_impl(req, method)
}
}
// We shutdown client in destructor.
impl Drop for GrpcClient {
fn drop(&mut self) {
// ignore error because even loop may be already dead
self.loop_to_client.shutdown_tx.send(()
|
call_client_streaming
|
identifier_name
|
client.rs
|
io_core::channel::Sender<ResultOrEof<Resp, GrpcError>>,
remaining_response: Vec<u8>,
}
impl<Req : Send + 'static, Resp : Send + 'static> GrpcResponseHandlerTrait for GrpcResponseHandlerTyped<Req, Resp> {
}
impl<Req : Send + 'static, Resp : Send + 'static> HttpClientResponseHandler for GrpcResponseHandlerTyped<Req, Resp> {
fn headers(&mut self, headers: Vec<StaticHeader>) -> bool {
println!("client: received headers");
if slice_get_header(&headers, ":status") != Some("200") {
if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) {
self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap();
} else {
self.complete.send(ResultOrEof::Error(GrpcError::Other("not 200"))).unwrap();
}
false
} else {
true
}
}
fn data_frame(&mut self, chunk: Vec<u8>) -> bool {
self.remaining_response.extend(&chunk);
loop {
let len = match parse_grpc_frame(&self.remaining_response) {
Err(e) => {
self.complete.send(ResultOrEof::Error(e)).unwrap();
return false;
}
Ok(None) => break,
Ok(Some((message, len))) => {
let resp = self.method.resp_marshaller.read(&message);
self.complete.send(From::from(resp)).ok();
len
}
};
self.remaining_response.drain(..len);
}
true
}
fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool {
let _status_200 = slice_get_header(&headers, ":status") == Some("200");
let grpc_status_0 = slice_get_header(&headers, HEADER_GRPC_STATUS) == Some("0");
if /* status_200 && */ grpc_status_0 {
true
} else {
if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) {
self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap();
} else {
self.complete.send(ResultOrEof::Error(GrpcError::Other("not xxx"))).unwrap();
}
false
}
}
fn end(&mut self) {
self.complete.send(ResultOrEof::Eof).unwrap();
}
}
struct GrpcResponseHandler {
tr: Box<GrpcResponseHandlerTrait>,
}
impl HttpClientResponseHandler for GrpcResponseHandler {
fn headers(&mut self, headers: Vec<StaticHeader>) -> bool {
self.tr.headers(headers)
}
fn data_frame(&mut self, chunk: Vec<u8>) -> bool {
self.tr.data_frame(chunk)
}
fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool {
self.tr.trailers(headers)
}
fn end(&mut self) {
self.tr.end()
}
}
// Data sent from event loop to GrpcClient
struct LoopToClient {
// used only once to send shutdown signal
shutdown_tx: tokio_core::channel::Sender<()>,
loop_handle: reactor::Remote,
http_conn: Arc<HttpClientConnectionAsync<GrpcResponseHandler>>,
}
fn _assert_loop_to_client() {
assert_send::<reactor::Remote>();
assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_sync::<HttpClientConnectionAsync<GrpcResponseHandler>>();
assert_send::<Arc<HttpClientConnectionAsync<GrpcResponseHandler>>>();
assert_send::<tokio_core::channel::Sender<()>>();
assert_send::<LoopToClient>();
}
/// gRPC client implementation.
/// Used by generated code.
pub struct GrpcClient {
loop_to_client: LoopToClient,
thread_join_handle: Option<thread::JoinHandle<()>>,
host: String,
http_scheme: HttpScheme,
}
impl GrpcClient {
/// Create a client connected to specified host and port.
pub fn new(host: &str, port: u16) -> GrpcResult<GrpcClient> {
// TODO: sync
// TODO: try connect to all addrs
let socket_addr = try!((host, port).to_socket_addrs()).next().unwrap();
// We need some data back from event loop.
// This channel is used to exchange that data
let (get_from_loop_tx, get_from_loop_rx) = mpsc::channel();
// Start event loop.
let join_handle = thread::spawn(move || {
run_client_event_loop(socket_addr, get_from_loop_tx);
});
// Get back call channel and shutdown channel.
let loop_to_client = try!(get_from_loop_rx.recv()
.map_err(|_| GrpcError::Other("get response from loop")));
Ok(GrpcClient {
loop_to_client: loop_to_client,
thread_join_handle: Some(join_handle),
host: host.to_owned(),
http_scheme: HttpScheme::Http,
})
}
pub fn new_resp_channel<Resp : Send + 'static>(&self)
-> futures::Oneshot<(tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, GrpcStreamSend<Resp>)>
{
let (one_sender, one_receiver) = futures::oneshot();
self.loop_to_client.loop_handle.spawn(move |handle| {
let (sender, receiver) = tokio_core::channel::channel(&handle).unwrap();
let receiver: GrpcStreamSend<ResultOrEof<Resp, GrpcError>> = Box::new(receiver.map_err(GrpcError::from));
let receiver: GrpcStreamSend<Resp> = Box::new(stream_with_eof_and_error(receiver));
one_sender.complete((sender, receiver));
futures::finished(())
});
one_receiver
}
pub fn call_impl<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
let host = self.host.clone();
let http_scheme = self.http_scheme.clone();
let http_conn = self.loop_to_client.http_conn.clone();
// A channel to send response back to caller
let future = self.new_resp_channel().map_err(GrpcError::from).and_then(move |(complete, receiver)| {
let headers = vec![
Header::new(":method", "POST"),
Header::new(":path", method.name.clone()),
Header::new(":authority", host.clone()),
Header::new(":scheme", http_scheme.as_bytes()),
];
let request_frames = {
let method = method.clone();
req
.and_then(move |req| {
let grpc_frame = try!(method.req_marshaller.write(&req));
Ok(write_grpc_frame_to_vec(&grpc_frame))
})
.map_err(|e| HttpError::Other(Box::new(e)))
};
let start_request = http_conn.start_request(
headers,
Box::new(request_frames),
GrpcResponseHandler {
tr: Box::new(GrpcResponseHandlerTyped {
method: method.clone(),
complete: complete,
remaining_response: Vec::new(),
}),
}
).map_err(GrpcError::from);
let receiver: GrpcStreamSend<Resp> = receiver;
start_request.map(move |()| receiver)
});
let s: GrpcStreamSend<Resp> = future_flatten_to_stream(future);
s
}
pub fn call_unary<Req : Send + 'static, Resp : Send + 'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcFutureSend<Resp>
{
stream_single_send(self.call_impl(Box::new(stream_once_send(req)), method))
}
pub fn call_server_streaming<Req : Send + 'static, Resp : Send + 'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
self.call_impl(stream_once_send(req).boxed(), method)
}
pub fn call_client_streaming<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcFutureSend<Resp>
{
stream_single_send(self.call_impl(req, method))
}
pub fn call_bidi<Req : Send + 'static, Resp : Send + 'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>)
-> GrpcStreamSend<Resp>
{
self.call_impl(req, method)
}
}
// We shutdown client in destructor.
impl Drop for GrpcClient {
fn drop(&mut self)
|
{
// ignore error because even loop may be already dead
self.loop_to_client.shutdown_tx.send(()).ok();
// do not ignore errors because we own event loop thread
self.thread_join_handle.take().expect("handle.take")
.join().expect("join thread");
}
|
identifier_body
|
|
fixed.rs
|
}
/// Encodes a value of a particular fixed width type into bytes according to the rules
/// described on [`super::RowConverter`]
pub trait FixedLengthEncoding: Copy {
const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>();
type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>;
fn encode(self) -> Self::Encoded;
fn decode(encoded: Self::Encoded) -> Self;
}
impl FixedLengthEncoding for bool {
type Encoded = [u8; 1];
fn encode(self) -> [u8; 1] {
[self as u8]
}
fn decode(encoded: Self::Encoded) -> Self {
encoded[0] != 0
}
}
macro_rules! encode_signed {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
let mut b = self.to_be_bytes();
// Toggle top "sign" bit to ensure consistent sort order
b[0] ^= 0x80;
b
}
fn decode(mut encoded: Self::Encoded) -> Self {
// Toggle top "sign" bit
encoded[0] ^= 0x80;
Self::from_be_bytes(encoded)
}
}
};
}
encode_signed!(1, i8);
encode_signed!(2, i16);
encode_signed!(4, i32);
encode_signed!(8, i64);
encode_signed!(16, i128);
encode_signed!(32, i256);
macro_rules! encode_unsigned {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
self.to_be_bytes()
}
fn decode(encoded: Self::Encoded) -> Self {
Self::from_be_bytes(encoded)
}
}
};
}
encode_unsigned!(1, u8);
encode_unsigned!(2, u16);
encode_unsigned!(4, u32);
encode_unsigned!(8, u64);
impl FixedLengthEncoding for f16 {
type Encoded = [u8; 2];
fn encode(self) -> [u8; 2] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i16;
let val = s ^ (((s >> 15) as u16) >> 1) as i16;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i16::decode(encoded);
let val = bits ^ (((bits >> 15) as u16) >> 1) as i16;
Self::from_bits(val as u16)
}
}
impl FixedLengthEncoding for f32 {
type Encoded = [u8; 4];
fn encode(self) -> [u8; 4] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i32;
let val = s ^ (((s >> 31) as u32) >> 1) as i32;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i32::decode(encoded);
let val = bits ^ (((bits >> 31) as u32) >> 1) as i32;
Self::from_bits(val as u32)
}
}
impl FixedLengthEncoding for f64 {
type Encoded = [u8; 8];
fn encode(self) -> [u8; 8] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i64;
let val = s ^ (((s >> 63) as u64) >> 1) as i64;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i64::decode(encoded);
let val = bits ^ (((bits >> 63) as u64) >> 1) as i64;
Self::from_bits(val as u64)
}
}
/// Returns the total encoded length (including null byte) for a value of type `T::Native`
pub const fn
|
<T>(_col: &PrimitiveArray<T>) -> usize
where
T: ArrowPrimitiveType,
T::Native: FixedLengthEncoding,
{
T::Native::ENCODED_LEN
}
/// Fixed width types are encoded as
///
/// - 1 byte `0` if null or `1` if valid
/// - bytes of [`FixedLengthEncoding`]
pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>(
data: &mut [u8],
offsets: &mut [usize],
i: I,
opts: SortOptions,
) {
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) {
let end_offset = *offset + T::ENCODED_LEN;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
let mut encoded = val.encode();
if opts.descending {
// Flip bits to reverse order
encoded.as_mut().iter_mut().for_each(|v| *v = !*v)
}
to_write[1..].copy_from_slice(encoded.as_ref())
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
pub fn encode_fixed_size_binary(
data: &mut [u8],
offsets: &mut [usize],
array: &FixedSizeBinaryArray,
opts: SortOptions,
) {
let len = array.value_length() as usize;
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) {
let end_offset = *offset + len + 1;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
to_write[1..].copy_from_slice(&val[..len]);
if opts.descending {
// Flip bits to reverse order
to_write[1..1 + len].iter_mut().for_each(|v| *v = !*v)
}
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
/// Splits `len` bytes from `src`
#[inline]
fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] {
let v = &src[..len];
*src = &src[len..];
v
}
/// Decodes a `BooleanArray` from rows
pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray {
let true_val = match options.descending {
true => !1,
false => 1,
};
let len = rows.len();
let mut null_count = 0;
let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let chunks = len / 64;
let remainder = len % 64;
for chunk in 0..chunks {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..64 {
let i = split_off(&mut rows[bit_idx + chunk * 64], 2);
let (null, value) = (i[0] == 1, i[1] == true_val);
null_count += !null as usize;
null_packed |= (null as u64) << bit_idx;
values_packed |= (value as u64) << bit_idx;
}
nulls.push(null_packed);
values.push(values_packed);
}
if remainder != 0 {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..remainder {
let i = split_off(&mut rows[bit_idx + chunks
|
encoded_len
|
identifier_name
|
fixed.rs
|
}
/// Encodes a value of a particular fixed width type into bytes according to the rules
/// described on [`super::RowConverter`]
pub trait FixedLengthEncoding: Copy {
const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>();
type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>;
fn encode(self) -> Self::Encoded;
fn decode(encoded: Self::Encoded) -> Self;
}
impl FixedLengthEncoding for bool {
type Encoded = [u8; 1];
fn encode(self) -> [u8; 1] {
[self as u8]
}
fn decode(encoded: Self::Encoded) -> Self {
encoded[0] != 0
}
}
macro_rules! encode_signed {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
let mut b = self.to_be_bytes();
// Toggle top "sign" bit to ensure consistent sort order
b[0] ^= 0x80;
b
}
fn decode(mut encoded: Self::Encoded) -> Self {
// Toggle top "sign" bit
encoded[0] ^= 0x80;
Self::from_be_bytes(encoded)
}
}
};
}
encode_signed!(1, i8);
encode_signed!(2, i16);
encode_signed!(4, i32);
encode_signed!(8, i64);
encode_signed!(16, i128);
encode_signed!(32, i256);
macro_rules! encode_unsigned {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
self.to_be_bytes()
}
fn decode(encoded: Self::Encoded) -> Self {
Self::from_be_bytes(encoded)
}
}
};
}
encode_unsigned!(1, u8);
encode_unsigned!(2, u16);
encode_unsigned!(4, u32);
encode_unsigned!(8, u64);
impl FixedLengthEncoding for f16 {
type Encoded = [u8; 2];
fn encode(self) -> [u8; 2] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i16;
let val = s ^ (((s >> 15) as u16) >> 1) as i16;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i16::decode(encoded);
let val = bits ^ (((bits >> 15) as u16) >> 1) as i16;
Self::from_bits(val as u16)
}
}
impl FixedLengthEncoding for f32 {
type Encoded = [u8; 4];
fn encode(self) -> [u8; 4] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i32;
let val = s ^ (((s >> 31) as u32) >> 1) as i32;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i32::decode(encoded);
let val = bits ^ (((bits >> 31) as u32) >> 1) as i32;
Self::from_bits(val as u32)
}
}
|
fn encode(self) -> [u8; 8] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i64;
let val = s ^ (((s >> 63) as u64) >> 1) as i64;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i64::decode(encoded);
let val = bits ^ (((bits >> 63) as u64) >> 1) as i64;
Self::from_bits(val as u64)
}
}
/// Returns the total encoded length (including null byte) for a value of type `T::Native`
pub const fn encoded_len<T>(_col: &PrimitiveArray<T>) -> usize
where
T: ArrowPrimitiveType,
T::Native: FixedLengthEncoding,
{
T::Native::ENCODED_LEN
}
/// Fixed width types are encoded as
///
/// - 1 byte `0` if null or `1` if valid
/// - bytes of [`FixedLengthEncoding`]
pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>(
data: &mut [u8],
offsets: &mut [usize],
i: I,
opts: SortOptions,
) {
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) {
let end_offset = *offset + T::ENCODED_LEN;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
let mut encoded = val.encode();
if opts.descending {
// Flip bits to reverse order
encoded.as_mut().iter_mut().for_each(|v| *v = !*v)
}
to_write[1..].copy_from_slice(encoded.as_ref())
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
pub fn encode_fixed_size_binary(
data: &mut [u8],
offsets: &mut [usize],
array: &FixedSizeBinaryArray,
opts: SortOptions,
) {
let len = array.value_length() as usize;
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) {
let end_offset = *offset + len + 1;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
to_write[1..].copy_from_slice(&val[..len]);
if opts.descending {
// Flip bits to reverse order
to_write[1..1 + len].iter_mut().for_each(|v| *v = !*v)
}
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
/// Splits `len` bytes from `src`
#[inline]
fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] {
let v = &src[..len];
*src = &src[len..];
v
}
/// Decodes a `BooleanArray` from rows
pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray {
let true_val = match options.descending {
true => !1,
false => 1,
};
let len = rows.len();
let mut null_count = 0;
let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let chunks = len / 64;
let remainder = len % 64;
for chunk in 0..chunks {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..64 {
let i = split_off(&mut rows[bit_idx + chunk * 64], 2);
let (null, value) = (i[0] == 1, i[1] == true_val);
null_count += !null as usize;
null_packed |= (null as u64) << bit_idx;
values_packed |= (value as u64) << bit_idx;
}
nulls.push(null_packed);
values.push(values_packed);
}
if remainder != 0 {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..remainder {
let i = split_off(&mut rows[bit_idx + chunks
|
impl FixedLengthEncoding for f64 {
type Encoded = [u8; 8];
|
random_line_split
|
fixed.rs
|
}
/// Encodes a value of a particular fixed width type into bytes according to the rules
/// described on [`super::RowConverter`]
pub trait FixedLengthEncoding: Copy {
const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>();
type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>;
fn encode(self) -> Self::Encoded;
fn decode(encoded: Self::Encoded) -> Self;
}
impl FixedLengthEncoding for bool {
type Encoded = [u8; 1];
fn encode(self) -> [u8; 1] {
[self as u8]
}
fn decode(encoded: Self::Encoded) -> Self {
encoded[0] != 0
}
}
macro_rules! encode_signed {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
let mut b = self.to_be_bytes();
// Toggle top "sign" bit to ensure consistent sort order
b[0] ^= 0x80;
b
}
fn decode(mut encoded: Self::Encoded) -> Self {
// Toggle top "sign" bit
encoded[0] ^= 0x80;
Self::from_be_bytes(encoded)
}
}
};
}
encode_signed!(1, i8);
encode_signed!(2, i16);
encode_signed!(4, i32);
encode_signed!(8, i64);
encode_signed!(16, i128);
encode_signed!(32, i256);
macro_rules! encode_unsigned {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
self.to_be_bytes()
}
fn decode(encoded: Self::Encoded) -> Self {
Self::from_be_bytes(encoded)
}
}
};
}
encode_unsigned!(1, u8);
encode_unsigned!(2, u16);
encode_unsigned!(4, u32);
encode_unsigned!(8, u64);
impl FixedLengthEncoding for f16 {
type Encoded = [u8; 2];
fn encode(self) -> [u8; 2] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i16;
let val = s ^ (((s >> 15) as u16) >> 1) as i16;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i16::decode(encoded);
let val = bits ^ (((bits >> 15) as u16) >> 1) as i16;
Self::from_bits(val as u16)
}
}
impl FixedLengthEncoding for f32 {
type Encoded = [u8; 4];
fn encode(self) -> [u8; 4] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i32;
let val = s ^ (((s >> 31) as u32) >> 1) as i32;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i32::decode(encoded);
let val = bits ^ (((bits >> 31) as u32) >> 1) as i32;
Self::from_bits(val as u32)
}
}
impl FixedLengthEncoding for f64 {
type Encoded = [u8; 8];
fn encode(self) -> [u8; 8] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i64;
let val = s ^ (((s >> 63) as u64) >> 1) as i64;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i64::decode(encoded);
let val = bits ^ (((bits >> 63) as u64) >> 1) as i64;
Self::from_bits(val as u64)
}
}
/// Returns the total encoded length (including null byte) for a value of type `T::Native`
pub const fn encoded_len<T>(_col: &PrimitiveArray<T>) -> usize
where
T: ArrowPrimitiveType,
T::Native: FixedLengthEncoding,
{
T::Native::ENCODED_LEN
}
/// Fixed width types are encoded as
///
/// - 1 byte `0` if null or `1` if valid
/// - bytes of [`FixedLengthEncoding`]
pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>(
data: &mut [u8],
offsets: &mut [usize],
i: I,
opts: SortOptions,
) {
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) {
let end_offset = *offset + T::ENCODED_LEN;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
let mut encoded = val.encode();
if opts.descending
|
to_write[1..].copy_from_slice(encoded.as_ref())
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
pub fn encode_fixed_size_binary(
data: &mut [u8],
offsets: &mut [usize],
array: &FixedSizeBinaryArray,
opts: SortOptions,
) {
let len = array.value_length() as usize;
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) {
let end_offset = *offset + len + 1;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
to_write[1..].copy_from_slice(&val[..len]);
if opts.descending {
// Flip bits to reverse order
to_write[1..1 + len].iter_mut().for_each(|v| *v = !*v)
}
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
/// Splits `len` bytes from `src`
#[inline]
fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] {
let v = &src[..len];
*src = &src[len..];
v
}
/// Decodes a `BooleanArray` from rows
pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray {
let true_val = match options.descending {
true => !1,
false => 1,
};
let len = rows.len();
let mut null_count = 0;
let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let chunks = len / 64;
let remainder = len % 64;
for chunk in 0..chunks {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..64 {
let i = split_off(&mut rows[bit_idx + chunk * 64], 2);
let (null, value) = (i[0] == 1, i[1] == true_val);
null_count += !null as usize;
null_packed |= (null as u64) << bit_idx;
values_packed |= (value as u64) << bit_idx;
}
nulls.push(null_packed);
values.push(values_packed);
}
if remainder != 0 {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..remainder {
let i = split_off(&mut rows[bit_idx +
|
{
// Flip bits to reverse order
encoded.as_mut().iter_mut().for_each(|v| *v = !*v)
}
|
conditional_block
|
fixed.rs
|
}
/// Encodes a value of a particular fixed width type into bytes according to the rules
/// described on [`super::RowConverter`]
pub trait FixedLengthEncoding: Copy {
const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>();
type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>;
fn encode(self) -> Self::Encoded;
fn decode(encoded: Self::Encoded) -> Self;
}
impl FixedLengthEncoding for bool {
type Encoded = [u8; 1];
fn encode(self) -> [u8; 1]
|
fn decode(encoded: Self::Encoded) -> Self {
encoded[0] != 0
}
}
macro_rules! encode_signed {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
let mut b = self.to_be_bytes();
// Toggle top "sign" bit to ensure consistent sort order
b[0] ^= 0x80;
b
}
fn decode(mut encoded: Self::Encoded) -> Self {
// Toggle top "sign" bit
encoded[0] ^= 0x80;
Self::from_be_bytes(encoded)
}
}
};
}
encode_signed!(1, i8);
encode_signed!(2, i16);
encode_signed!(4, i32);
encode_signed!(8, i64);
encode_signed!(16, i128);
encode_signed!(32, i256);
macro_rules! encode_unsigned {
($n:expr, $t:ty) => {
impl FixedLengthEncoding for $t {
type Encoded = [u8; $n];
fn encode(self) -> [u8; $n] {
self.to_be_bytes()
}
fn decode(encoded: Self::Encoded) -> Self {
Self::from_be_bytes(encoded)
}
}
};
}
encode_unsigned!(1, u8);
encode_unsigned!(2, u16);
encode_unsigned!(4, u32);
encode_unsigned!(8, u64);
impl FixedLengthEncoding for f16 {
type Encoded = [u8; 2];
fn encode(self) -> [u8; 2] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i16;
let val = s ^ (((s >> 15) as u16) >> 1) as i16;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i16::decode(encoded);
let val = bits ^ (((bits >> 15) as u16) >> 1) as i16;
Self::from_bits(val as u16)
}
}
impl FixedLengthEncoding for f32 {
type Encoded = [u8; 4];
fn encode(self) -> [u8; 4] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i32;
let val = s ^ (((s >> 31) as u32) >> 1) as i32;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i32::decode(encoded);
let val = bits ^ (((bits >> 31) as u32) >> 1) as i32;
Self::from_bits(val as u32)
}
}
impl FixedLengthEncoding for f64 {
type Encoded = [u8; 8];
fn encode(self) -> [u8; 8] {
// https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260
let s = self.to_bits() as i64;
let val = s ^ (((s >> 63) as u64) >> 1) as i64;
val.encode()
}
fn decode(encoded: Self::Encoded) -> Self {
let bits = i64::decode(encoded);
let val = bits ^ (((bits >> 63) as u64) >> 1) as i64;
Self::from_bits(val as u64)
}
}
/// Returns the total encoded length (including null byte) for a value of type `T::Native`
pub const fn encoded_len<T>(_col: &PrimitiveArray<T>) -> usize
where
T: ArrowPrimitiveType,
T::Native: FixedLengthEncoding,
{
T::Native::ENCODED_LEN
}
/// Fixed width types are encoded as
///
/// - 1 byte `0` if null or `1` if valid
/// - bytes of [`FixedLengthEncoding`]
pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>(
data: &mut [u8],
offsets: &mut [usize],
i: I,
opts: SortOptions,
) {
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) {
let end_offset = *offset + T::ENCODED_LEN;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
let mut encoded = val.encode();
if opts.descending {
// Flip bits to reverse order
encoded.as_mut().iter_mut().for_each(|v| *v = !*v)
}
to_write[1..].copy_from_slice(encoded.as_ref())
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
pub fn encode_fixed_size_binary(
data: &mut [u8],
offsets: &mut [usize],
array: &FixedSizeBinaryArray,
opts: SortOptions,
) {
let len = array.value_length() as usize;
for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) {
let end_offset = *offset + len + 1;
if let Some(val) = maybe_val {
let to_write = &mut data[*offset..end_offset];
to_write[0] = 1;
to_write[1..].copy_from_slice(&val[..len]);
if opts.descending {
// Flip bits to reverse order
to_write[1..1 + len].iter_mut().for_each(|v| *v = !*v)
}
} else {
data[*offset] = null_sentinel(opts);
}
*offset = end_offset;
}
}
/// Splits `len` bytes from `src`
#[inline]
fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] {
let v = &src[..len];
*src = &src[len..];
v
}
/// Decodes a `BooleanArray` from rows
pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray {
let true_val = match options.descending {
true => !1,
false => 1,
};
let len = rows.len();
let mut null_count = 0;
let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8);
let chunks = len / 64;
let remainder = len % 64;
for chunk in 0..chunks {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..64 {
let i = split_off(&mut rows[bit_idx + chunk * 64], 2);
let (null, value) = (i[0] == 1, i[1] == true_val);
null_count += !null as usize;
null_packed |= (null as u64) << bit_idx;
values_packed |= (value as u64) << bit_idx;
}
nulls.push(null_packed);
values.push(values_packed);
}
if remainder != 0 {
let mut null_packed = 0;
let mut values_packed = 0;
for bit_idx in 0..remainder {
let i = split_off(&mut rows[bit_idx +
|
{
[self as u8]
}
|
identifier_body
|
pop.py
|
(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
# ECONOMY
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
# set inventory and ideal amounts
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
# a dictionary of Goods to PriceRanges
# represents the price range the agent considers valid for each Good
self.price_belief = {}
# a dictionary of Goods to price list
# represents the prices of the good that the Pop has observed
# during the time they have been trading
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
# make some fake initial data
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
# fake trades
self.observed_trading_range[good] = [
avg_price * 0.5,
avg_price * 1.5
]
# generate fake price belief
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG: print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount
|
__init__
|
identifier_name
|
|
pop.py
|
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG: print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
|
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1: # sell inventory
# the original only old one item here
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
# print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))
self.market.sell(order)
else: # buy more
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
# enough space for ideal order
limit = shortage
else:
# not enough space for ideal order
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
# print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))
self.market.buy(order)
# else:
# print("{} has no shortage of {} (has shortage: {})".format(self.pop_job.title, good.title, shortage))
def update_price_model(self, good, order_type, is_successful, clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or not the
|
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
|
identifier_body
|
pop.py
|
.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG:
|
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1: # sell inventory
# the original only old one item here
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
# print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))
self.market.sell(order)
else: # buy more
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
# enough space for ideal order
limit = shortage
else:
# not enough space for ideal order
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
# print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))
self.market.buy(order)
# else:
# print("{} has no shortage of {} (has shortage: {})".format(self.pop_job.title, good.title, shortage))
def update_price_model(self, good, order_type, is_successful, clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or
|
print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
|
conditional_block
|
pop.py
|
self.price_belief = {}
# a dictionary of Goods to price list
# represents the prices of the good that the Pop has observed
# during the time they have been trading
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
# make some fake initial data
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
# fake trades
self.observed_trading_range[good] = [
avg_price * 0.5,
avg_price * 1.5
]
# generate fake price belief
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG: print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1: # sell inventory
# the original only old one item here
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
# print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))
self.market.sell(order)
else: # buy more
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
# enough space for ideal order
limit = shortage
else:
# not enough space for ideal order
limit
|
# represents the price range the agent considers valid for each Good
|
random_line_split
|
|
server.go
|
reflector server pointer.
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
return &Server{
Timeout: DefaultTimeout,
underlyingStore: underlying,
outerStore: outer,
grp: stop.New(),
}
}
// Shutdown shuts down the reflector server gracefully.
func (s *Server) Shutdown() {
log.Println("shutting down reflector server...")
s.grp.StopAndWait()
log.Println("reflector server stopped")
}
// Start starts the server to handle connections.
func (s *Server) Start(address string) error {
l, err := net.Listen(network, address)
if err != nil {
return errors.Err(err)
}
log.Println("reflector listening on " + address)
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Dec()
<-s.grp.Ch()
err := l.Close()
if err != nil {
log.Error(errors.Prefix("closing listener", err))
}
s.grp.Done()
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "start").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
s.listenAndServe(l)
s.grp.Done()
}()
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
s.enableBlocklist(b)
s.grp.Done()
}()
} else {
//s.Shutdown()
return errors.Err("blocklist is enabled but blob store does not support blocklisting")
}
}
return nil
}
func (s *Server) listenAndServe(listener net.Listener) {
for {
conn, err := listener.Accept()
if err != nil {
if s.quitting() {
return
}
log.Error(err)
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
s.handleConn(conn)
s.grp.Done()
}()
}
}
}
func (s *Server) handleConn(conn net.Conn) {
// all this stuff is to close the connections correctly when we're shutting down the server
connNeedsClosing := make(chan struct{})
defer func() {
close(connNeedsClosing)
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
defer s.grp.Done()
select {
case <-connNeedsClosing:
case <-s.grp.Ch():
}
err := conn.Close()
if err != nil {
log.Error(errors.Prefix("closing peer conn", err))
}
}()
err := s.doHandshake(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending handshake error", err))
}
return
}
for {
err = s.receiveBlob(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending blob receive error", err))
}
return
}
}
}
func (s *Server) doError(conn net.Conn, err error) error {
if err == nil {
return nil
}
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
if shouldLog {
log.Errorln(errors.FullTrace(err))
}
if e2, ok := err.(*json.SyntaxError); ok {
log.Errorf("syntax error at byte offset %d", e2.Offset)
}
//resp, err := json.Marshal(errorResponse{Error: err.Error()})
//if err != nil {
// return err
//}
//return s.write(conn, resp)
return nil
}
func (s *Server) receiveBlob(conn net.Conn) error {
blobSize, blobHash, isSdBlob, err := s.readBlobRequest(conn)
if err != nil {
return err
}
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
if err != nil {
return err
}
wantsBlob = !blobExists
}
var neededBlobs []string
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
if err != nil {
return err
}
} else {
// if we can't check for blobs in a stream, we have to say that the sd blob is
// missing. if we say we have the sd blob, they wont try to send any content blobs
wantsBlob = true
}
}
err = s.sendBlobResponse(conn, wantsBlob, isSdBlob, neededBlobs)
if err != nil {
return err
}
if !wantsBlob {
return nil
}
blob, err := s.readRawBlob(conn, blobSize)
if err != nil {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Prefix("error reading blob "+blobHash[:8], err)
}
receivedBlobHash := BlobHash(blob)
if blobHash != receivedBlobHash {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Err("hash of received blob data does not match hash from send request")
// this can also happen if the blob size is wrong, because the server will read the wrong number of bytes from the stream
}
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
}
if err != nil {
return err
}
metrics.MtrInBytesReflector.Add(float64(len(blob)))
metrics.BlobUploadCount.Inc()
if isSdBlob {
metrics.SDBlobUploadCount.Inc()
}
return s.sendTransferResponse(conn, true, isSdBlob)
}
func (s *Server) doHandshake(conn net.Conn) error {
var handshake handshakeRequestResponse
err := s.read(conn, &handshake)
if err != nil
|
else if handshake.Version == nil {
return errors.Err("handshake is missing protocol version")
} else if *handshake.Version != protocolVersion1 && *handshake.Version != protocolVersion2 {
return errors.Err("protocol version not supported")
}
resp, err := json.Marshal(handshakeRequestResponse{Version: handshake.Version})
if err != nil {
return err
}
return s.write(conn, resp)
}
func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
var sendRequest sendBlobRequest
err := s.read(conn, &sendRequest)
if err != nil {
return 0, "", false, err
}
var blobHash string
var blobSize int
isSdBlob := sendRequest.SdBlobHash != ""
if isSdBlob {
blobSize = sendRequest.SdBlobSize
blobHash = sendRequest.SdBlobHash
} else {
blobSize = sendRequest.BlobSize
blobHash = sendRequest.BlobHash
}
if blobHash == "" {
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
}
if blobSize > maxBlobSize {
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
}
if blobSize == 0 {
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
}
return blobSize, blobHash, isSdBlob, nil
}
func (s *Server) sendBlobResponse(conn net.Conn, shouldSendBlob, isSdBlob bool, neededBlobs []string) error {
var response []byte
var err error
if isSdBlob {
response, err = json.Marshal(sendSdBlobResponse{SendSdBlob: shouldSendBlob, NeededB
|
{
return err
}
|
conditional_block
|
server.go
|
reflector server pointer.
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
return &Server{
Timeout: DefaultTimeout,
underlyingStore: underlying,
outerStore: outer,
grp: stop.New(),
}
}
// Shutdown shuts down the reflector server gracefully.
func (s *Server) Shutdown() {
log.Println("shutting down reflector server...")
s.grp.StopAndWait()
log.Println("reflector server stopped")
}
// Start starts the server to handle connections.
func (s *Server) Start(address string) error {
l, err := net.Listen(network, address)
if err != nil {
return errors.Err(err)
}
log.Println("reflector listening on " + address)
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Dec()
<-s.grp.Ch()
err := l.Close()
if err != nil {
log.Error(errors.Prefix("closing listener", err))
}
s.grp.Done()
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "start").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
s.listenAndServe(l)
s.grp.Done()
}()
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
s.enableBlocklist(b)
s.grp.Done()
}()
} else {
//s.Shutdown()
return errors.Err("blocklist is enabled but blob store does not support blocklisting")
}
}
return nil
}
func (s *Server) listenAndServe(listener net.Listener)
|
func (s *Server) handleConn(conn net.Conn) {
// all this stuff is to close the connections correctly when we're shutting down the server
connNeedsClosing := make(chan struct{})
defer func() {
close(connNeedsClosing)
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
defer s.grp.Done()
select {
case <-connNeedsClosing:
case <-s.grp.Ch():
}
err := conn.Close()
if err != nil {
log.Error(errors.Prefix("closing peer conn", err))
}
}()
err := s.doHandshake(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending handshake error", err))
}
return
}
for {
err = s.receiveBlob(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending blob receive error", err))
}
return
}
}
}
func (s *Server) doError(conn net.Conn, err error) error {
if err == nil {
return nil
}
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
if shouldLog {
log.Errorln(errors.FullTrace(err))
}
if e2, ok := err.(*json.SyntaxError); ok {
log.Errorf("syntax error at byte offset %d", e2.Offset)
}
//resp, err := json.Marshal(errorResponse{Error: err.Error()})
//if err != nil {
// return err
//}
//return s.write(conn, resp)
return nil
}
func (s *Server) receiveBlob(conn net.Conn) error {
blobSize, blobHash, isSdBlob, err := s.readBlobRequest(conn)
if err != nil {
return err
}
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
if err != nil {
return err
}
wantsBlob = !blobExists
}
var neededBlobs []string
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
if err != nil {
return err
}
} else {
// if we can't check for blobs in a stream, we have to say that the sd blob is
// missing. if we say we have the sd blob, they wont try to send any content blobs
wantsBlob = true
}
}
err = s.sendBlobResponse(conn, wantsBlob, isSdBlob, neededBlobs)
if err != nil {
return err
}
if !wantsBlob {
return nil
}
blob, err := s.readRawBlob(conn, blobSize)
if err != nil {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Prefix("error reading blob "+blobHash[:8], err)
}
receivedBlobHash := BlobHash(blob)
if blobHash != receivedBlobHash {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Err("hash of received blob data does not match hash from send request")
// this can also happen if the blob size is wrong, because the server will read the wrong number of bytes from the stream
}
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
}
if err != nil {
return err
}
metrics.MtrInBytesReflector.Add(float64(len(blob)))
metrics.BlobUploadCount.Inc()
if isSdBlob {
metrics.SDBlobUploadCount.Inc()
}
return s.sendTransferResponse(conn, true, isSdBlob)
}
func (s *Server) doHandshake(conn net.Conn) error {
var handshake handshakeRequestResponse
err := s.read(conn, &handshake)
if err != nil {
return err
} else if handshake.Version == nil {
return errors.Err("handshake is missing protocol version")
} else if *handshake.Version != protocolVersion1 && *handshake.Version != protocolVersion2 {
return errors.Err("protocol version not supported")
}
resp, err := json.Marshal(handshakeRequestResponse{Version: handshake.Version})
if err != nil {
return err
}
return s.write(conn, resp)
}
func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
var sendRequest sendBlobRequest
err := s.read(conn, &sendRequest)
if err != nil {
return 0, "", false, err
}
var blobHash string
var blobSize int
isSdBlob := sendRequest.SdBlobHash != ""
if isSdBlob {
blobSize = sendRequest.SdBlobSize
blobHash = sendRequest.SdBlobHash
} else {
blobSize = sendRequest.BlobSize
blobHash = sendRequest.BlobHash
}
if blobHash == "" {
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
}
if blobSize > maxBlobSize {
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
}
if blobSize == 0 {
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
}
return blobSize, blobHash, isSdBlob, nil
}
func (s *Server) sendBlobResponse(conn net.Conn, shouldSendBlob, isSdBlob bool, neededBlobs []string) error {
var response []byte
var err error
if isSdBlob {
response, err = json.Marshal(sendSdBlobResponse{SendSdBlob: shouldSendBlob, Needed
|
{
for {
conn, err := listener.Accept()
if err != nil {
if s.quitting() {
return
}
log.Error(err)
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
s.handleConn(conn)
s.grp.Done()
}()
}
}
}
|
identifier_body
|
server.go
|
reflector", "start").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
s.listenAndServe(l)
s.grp.Done()
}()
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
s.enableBlocklist(b)
s.grp.Done()
}()
} else {
//s.Shutdown()
return errors.Err("blocklist is enabled but blob store does not support blocklisting")
}
}
return nil
}
func (s *Server) listenAndServe(listener net.Listener) {
for {
conn, err := listener.Accept()
if err != nil {
if s.quitting() {
return
}
log.Error(err)
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
s.handleConn(conn)
s.grp.Done()
}()
}
}
}
func (s *Server) handleConn(conn net.Conn) {
// all this stuff is to close the connections correctly when we're shutting down the server
connNeedsClosing := make(chan struct{})
defer func() {
close(connNeedsClosing)
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
defer s.grp.Done()
select {
case <-connNeedsClosing:
case <-s.grp.Ch():
}
err := conn.Close()
if err != nil {
log.Error(errors.Prefix("closing peer conn", err))
}
}()
err := s.doHandshake(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending handshake error", err))
}
return
}
for {
err = s.receiveBlob(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending blob receive error", err))
}
return
}
}
}
func (s *Server) doError(conn net.Conn, err error) error {
if err == nil {
return nil
}
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
if shouldLog {
log.Errorln(errors.FullTrace(err))
}
if e2, ok := err.(*json.SyntaxError); ok {
log.Errorf("syntax error at byte offset %d", e2.Offset)
}
//resp, err := json.Marshal(errorResponse{Error: err.Error()})
//if err != nil {
// return err
//}
//return s.write(conn, resp)
return nil
}
func (s *Server) receiveBlob(conn net.Conn) error {
blobSize, blobHash, isSdBlob, err := s.readBlobRequest(conn)
if err != nil {
return err
}
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
if err != nil {
return err
}
wantsBlob = !blobExists
}
var neededBlobs []string
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
if err != nil {
return err
}
} else {
// if we can't check for blobs in a stream, we have to say that the sd blob is
// missing. if we say we have the sd blob, they wont try to send any content blobs
wantsBlob = true
}
}
err = s.sendBlobResponse(conn, wantsBlob, isSdBlob, neededBlobs)
if err != nil {
return err
}
if !wantsBlob {
return nil
}
blob, err := s.readRawBlob(conn, blobSize)
if err != nil {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Prefix("error reading blob "+blobHash[:8], err)
}
receivedBlobHash := BlobHash(blob)
if blobHash != receivedBlobHash {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Err("hash of received blob data does not match hash from send request")
// this can also happen if the blob size is wrong, because the server will read the wrong number of bytes from the stream
}
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
}
if err != nil {
return err
}
metrics.MtrInBytesReflector.Add(float64(len(blob)))
metrics.BlobUploadCount.Inc()
if isSdBlob {
metrics.SDBlobUploadCount.Inc()
}
return s.sendTransferResponse(conn, true, isSdBlob)
}
func (s *Server) doHandshake(conn net.Conn) error {
var handshake handshakeRequestResponse
err := s.read(conn, &handshake)
if err != nil {
return err
} else if handshake.Version == nil {
return errors.Err("handshake is missing protocol version")
} else if *handshake.Version != protocolVersion1 && *handshake.Version != protocolVersion2 {
return errors.Err("protocol version not supported")
}
resp, err := json.Marshal(handshakeRequestResponse{Version: handshake.Version})
if err != nil {
return err
}
return s.write(conn, resp)
}
func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
var sendRequest sendBlobRequest
err := s.read(conn, &sendRequest)
if err != nil {
return 0, "", false, err
}
var blobHash string
var blobSize int
isSdBlob := sendRequest.SdBlobHash != ""
if isSdBlob {
blobSize = sendRequest.SdBlobSize
blobHash = sendRequest.SdBlobHash
} else {
blobSize = sendRequest.BlobSize
blobHash = sendRequest.BlobHash
}
if blobHash == "" {
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
}
if blobSize > maxBlobSize {
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
}
if blobSize == 0 {
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
}
return blobSize, blobHash, isSdBlob, nil
}
func (s *Server) sendBlobResponse(conn net.Conn, shouldSendBlob, isSdBlob bool, neededBlobs []string) error {
var response []byte
var err error
if isSdBlob {
response, err = json.Marshal(sendSdBlobResponse{SendSdBlob: shouldSendBlob, NeededBlobs: neededBlobs})
} else {
response, err = json.Marshal(sendBlobResponse{SendBlob: shouldSendBlob})
}
if err != nil {
return err
}
return s.write(conn, response)
}
func (s *Server) sendTransferResponse(conn net.Conn, receivedBlob, isSdBlob bool) error {
var response []byte
var err error
if isSdBlob {
response, err = json.Marshal(sdBlobTransferResponse{ReceivedSdBlob: receivedBlob})
} else {
response, err = json.Marshal(blobTransferResponse{ReceivedBlob: receivedBlob})
}
if err != nil {
return err
}
return s.write(conn, response)
}
func (s *Server) read(conn net.Conn, v interface{}) error {
err := conn.SetReadDeadline(time.Now().Add(s.Timeout))
if err != nil {
return errors.Err(err)
}
dec := json.NewDecoder(conn)
err = dec.Decode(v)
if err != nil {
data, _ := io.ReadAll(dec.Buffered())
if len(data) > 0 {
return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data))
}
return errors.Err(err)
}
return nil
|
}
|
random_line_split
|
|
server.go
|
reflector server pointer.
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
return &Server{
Timeout: DefaultTimeout,
underlyingStore: underlying,
outerStore: outer,
grp: stop.New(),
}
}
// Shutdown shuts down the reflector server gracefully.
func (s *Server) Shutdown() {
log.Println("shutting down reflector server...")
s.grp.StopAndWait()
log.Println("reflector server stopped")
}
// Start starts the server to handle connections.
func (s *Server) Start(address string) error {
l, err := net.Listen(network, address)
if err != nil {
return errors.Err(err)
}
log.Println("reflector listening on " + address)
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Dec()
<-s.grp.Ch()
err := l.Close()
if err != nil {
log.Error(errors.Prefix("closing listener", err))
}
s.grp.Done()
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "start").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
s.listenAndServe(l)
s.grp.Done()
}()
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
s.enableBlocklist(b)
s.grp.Done()
}()
} else {
//s.Shutdown()
return errors.Err("blocklist is enabled but blob store does not support blocklisting")
}
}
return nil
}
func (s *Server) listenAndServe(listener net.Listener) {
for {
conn, err := listener.Accept()
if err != nil {
if s.quitting() {
return
}
log.Error(err)
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
s.handleConn(conn)
s.grp.Done()
}()
}
}
}
func (s *Server) handleConn(conn net.Conn) {
// all this stuff is to close the connections correctly when we're shutting down the server
connNeedsClosing := make(chan struct{})
defer func() {
close(connNeedsClosing)
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
defer s.grp.Done()
select {
case <-connNeedsClosing:
case <-s.grp.Ch():
}
err := conn.Close()
if err != nil {
log.Error(errors.Prefix("closing peer conn", err))
}
}()
err := s.doHandshake(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending handshake error", err))
}
return
}
for {
err = s.receiveBlob(conn)
if err != nil {
if errors.Is(err, io.EOF) || s.quitting() {
return
}
err := s.doError(conn, err)
if err != nil {
log.Error(errors.Prefix("sending blob receive error", err))
}
return
}
}
}
func (s *Server) doError(conn net.Conn, err error) error {
if err == nil {
return nil
}
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
if shouldLog {
log.Errorln(errors.FullTrace(err))
}
if e2, ok := err.(*json.SyntaxError); ok {
log.Errorf("syntax error at byte offset %d", e2.Offset)
}
//resp, err := json.Marshal(errorResponse{Error: err.Error()})
//if err != nil {
// return err
//}
//return s.write(conn, resp)
return nil
}
func (s *Server) receiveBlob(conn net.Conn) error {
blobSize, blobHash, isSdBlob, err := s.readBlobRequest(conn)
if err != nil {
return err
}
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
if err != nil {
return err
}
wantsBlob = !blobExists
}
var neededBlobs []string
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
if err != nil {
return err
}
} else {
// if we can't check for blobs in a stream, we have to say that the sd blob is
// missing. if we say we have the sd blob, they wont try to send any content blobs
wantsBlob = true
}
}
err = s.sendBlobResponse(conn, wantsBlob, isSdBlob, neededBlobs)
if err != nil {
return err
}
if !wantsBlob {
return nil
}
blob, err := s.readRawBlob(conn, blobSize)
if err != nil {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Prefix("error reading blob "+blobHash[:8], err)
}
receivedBlobHash := BlobHash(blob)
if blobHash != receivedBlobHash {
sendErr := s.sendTransferResponse(conn, false, isSdBlob)
if sendErr != nil {
return sendErr
}
return errors.Err("hash of received blob data does not match hash from send request")
// this can also happen if the blob size is wrong, because the server will read the wrong number of bytes from the stream
}
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
}
if err != nil {
return err
}
metrics.MtrInBytesReflector.Add(float64(len(blob)))
metrics.BlobUploadCount.Inc()
if isSdBlob {
metrics.SDBlobUploadCount.Inc()
}
return s.sendTransferResponse(conn, true, isSdBlob)
}
func (s *Server)
|
(conn net.Conn) error {
var handshake handshakeRequestResponse
err := s.read(conn, &handshake)
if err != nil {
return err
} else if handshake.Version == nil {
return errors.Err("handshake is missing protocol version")
} else if *handshake.Version != protocolVersion1 && *handshake.Version != protocolVersion2 {
return errors.Err("protocol version not supported")
}
resp, err := json.Marshal(handshakeRequestResponse{Version: handshake.Version})
if err != nil {
return err
}
return s.write(conn, resp)
}
func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
var sendRequest sendBlobRequest
err := s.read(conn, &sendRequest)
if err != nil {
return 0, "", false, err
}
var blobHash string
var blobSize int
isSdBlob := sendRequest.SdBlobHash != ""
if isSdBlob {
blobSize = sendRequest.SdBlobSize
blobHash = sendRequest.SdBlobHash
} else {
blobSize = sendRequest.BlobSize
blobHash = sendRequest.BlobHash
}
if blobHash == "" {
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
}
if blobSize > maxBlobSize {
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
}
if blobSize == 0 {
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
}
return blobSize, blobHash, isSdBlob, nil
}
func (s *Server) sendBlobResponse(conn net.Conn, shouldSendBlob, isSdBlob bool, neededBlobs []string) error {
var response []byte
var err error
if isSdBlob {
response, err = json.Marshal(sendSdBlobResponse{SendSdBlob: shouldSendBlob, NeededB
|
doHandshake
|
identifier_name
|
mod.rs
|
DurationVisitor;
impl<'de> de::Visitor<'de> for DurationVisitor {
type Value = Duration;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("Duration as u64")
}
fn visit_u64<E>(self, v: u64) -> std::result::Result<Self::Value, E>
where
E: de::Error,
{
Ok(Duration::from_secs(v))
}
}
deserializer.deserialize_u64(DurationVisitor)
}
impl RawUserTransaction {
/// Create a new `RawUserTransaction` with a payload.
///
/// It can be either to publish a module, to execute a script, or to issue a writeset
/// transaction.
pub fn new(
sender: AccountAddress,
sequence_number: u64,
payload: TransactionPayload,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload,
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Create a new `RawUserTransaction` with a script.
///
/// A script transaction contains only code to execute. No publishing is allowed in scripts.
pub fn
|
(
sender: AccountAddress,
sequence_number: u64,
script: Script,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload: TransactionPayload::Script(script),
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Create a new `RawUserTransaction` with a module to publish.
///
/// A module transaction is the only way to publish code. Only one module per transaction
/// can be published.
pub fn new_module(
sender: AccountAddress,
sequence_number: u64,
module: Module,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload: TransactionPayload::Module(module),
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Signs the given `RawUserTransaction`. Note that this consumes the `RawUserTransaction` and turns it
/// into a `SignatureCheckedTransaction`.
///
/// For a transaction that has just been signed, its signature is expected to be valid.
pub fn sign(
self,
private_key: &Ed25519PrivateKey,
public_key: Ed25519PublicKey,
) -> Result<SignatureCheckedTransaction> {
let signature = private_key.sign_message(&self.crypto_hash());
Ok(SignatureCheckedTransaction(SignedUserTransaction::new(
self, public_key, signature,
)))
}
pub fn into_payload(self) -> TransactionPayload {
self.payload
}
/// Return the sender of this transaction.
pub fn sender(&self) -> AccountAddress {
self.sender
}
pub fn mock() -> Self {
Self::mock_by_sender(AccountAddress::random())
}
pub fn mock_by_sender(sender: AccountAddress) -> Self {
Self::new(
sender,
0,
TransactionPayload::Script(Script::new(vec![], vec![], vec![])),
0,
0,
Duration::new(0, 0),
)
}
pub fn mock_from(compiled_script: Vec<u8>) -> Self {
Self::new(
AccountAddress::default(),
0,
TransactionPayload::Script(Script::new(compiled_script, vec![stc_type_tag()], vec![])),
600,
0,
Duration::new(0, 0),
)
}
}
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)]
pub enum TransactionPayload {
/// A transaction that executes code.
Script(Script),
/// A transaction that publishes code.
Module(Module),
/// A transaction that publish or update module code by a package.
Package(UpgradePackage),
}
/// A transaction that has been signed.
///
/// A `SignedUserTransaction` is a single transaction that can be atomically executed. Clients submit
/// these to validator nodes, and the validator and executor submits these to the VM.
///
/// **IMPORTANT:** The signature of a `SignedUserTransaction` is not guaranteed to be verified. For a
/// transaction whose signature is statically guaranteed to be verified, see
/// [`SignatureCheckedTransaction`].
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct SignedUserTransaction {
/// The raw transaction
raw_txn: RawUserTransaction,
/// Public key and signature to authenticate
authenticator: TransactionAuthenticator,
}
/// A transaction for which the signature has been verified. Created by
/// [`SignedUserTransaction::check_signature`] and [`RawUserTransaction::sign`].
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SignatureCheckedTransaction(SignedUserTransaction);
impl SignatureCheckedTransaction {
/// Returns the `SignedUserTransaction` within.
pub fn into_inner(self) -> SignedUserTransaction {
self.0
}
/// Returns the `RawUserTransaction` within.
pub fn into_raw_transaction(self) -> RawUserTransaction {
self.0.into_raw_transaction()
}
}
impl Deref for SignatureCheckedTransaction {
type Target = SignedUserTransaction;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for SignedUserTransaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"SignedTransaction {{ \n \
{{ raw_txn: {:#?}, \n \
authenticator: {:#?}, \n \
}} \n \
}}",
self.raw_txn, self.authenticator
)
}
}
impl SignedUserTransaction {
pub fn new(
raw_txn: RawUserTransaction,
public_key: Ed25519PublicKey,
signature: Ed25519Signature,
) -> SignedUserTransaction {
let authenticator = TransactionAuthenticator::ed25519(public_key, signature);
SignedUserTransaction {
raw_txn,
authenticator,
}
}
pub fn multi_ed25519(
raw_txn: RawUserTransaction,
public_key: MultiEd25519PublicKey,
signature: MultiEd25519Signature,
) -> SignedUserTransaction {
let authenticator = TransactionAuthenticator::multi_ed25519(public_key, signature);
SignedUserTransaction {
raw_txn,
authenticator,
}
}
pub fn authenticator(&self) -> TransactionAuthenticator {
self.authenticator.clone()
}
pub fn raw_txn(&self) -> &RawUserTransaction {
&self.raw_txn
}
pub fn sender(&self) -> AccountAddress {
self.raw_txn.sender
}
pub fn into_raw_transaction(self) -> RawUserTransaction {
self.raw_txn
}
pub fn sequence_number(&self) -> u64 {
self.raw_txn.sequence_number
}
pub fn payload(&self) -> &TransactionPayload {
&self.raw_txn.payload
}
pub fn max_gas_amount(&self) -> u64 {
self.raw_txn.max_gas_amount
}
pub fn gas_unit_price(&self) -> u64 {
self.raw_txn.gas_unit_price
}
pub fn expiration_time(&self) -> Duration {
self.raw_txn.expiration_time
}
pub fn raw_txn_bytes_len(&self) -> usize {
scs::to_bytes(&self.raw_txn)
.expect("Unable to serialize RawUserTransaction")
.len()
}
/// Checks that the signature of given transaction. Returns `Ok(SignatureCheckedTransaction)` if
/// the signature is valid.
pub fn check_signature(self) -> Result<SignatureCheckedTransaction> {
self.authenticator
.verify_signature(&self.raw_txn.crypto_hash())?;
Ok(SignatureCheckedTransaction(self))
}
//TODO
pub fn mock() -> Self {
let mut gen = KeyGen::from_os_rng();
let (private_key, public_key) = gen.generate_keypair();
let raw_txn = RawUserTransaction::mock();
raw_txn.sign(&private_key, public_key).unwrap().into_inner()
}
pub fn mock_from(compiled_script: Vec<u8>) -> Self {
let mut gen = KeyGen::from_os_rng();
let (private_key, public_key) = gen.generate_keypair();
let raw_txn = RawUserTransaction::mock_from(compiled_script);
raw_txn.sign(&private_key, public_key).unwrap().into_inner()
}
}
/// The status of executing a transaction. The VM decides whether or not we should `Keep` the
/// transaction output or `Discard` it based upon the execution of the transaction. We wrap these
/// decisions around a `VMStatus` that provides more detail on the final execution state of the VM.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum TransactionStatus {
/// Discard the transaction output
Disc
|
new_script
|
identifier_name
|
mod.rs
|
expiration_time: Duration,
}
// TODO(#1307)
fn serialize_duration<S>(d: &Duration, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
serializer.serialize_u64(d.as_secs())
}
fn deserialize_duration<'de, D>(deserializer: D) -> std::result::Result<Duration, D::Error>
where
D: de::Deserializer<'de>,
{
struct DurationVisitor;
impl<'de> de::Visitor<'de> for DurationVisitor {
type Value = Duration;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("Duration as u64")
}
fn visit_u64<E>(self, v: u64) -> std::result::Result<Self::Value, E>
where
E: de::Error,
{
Ok(Duration::from_secs(v))
}
}
deserializer.deserialize_u64(DurationVisitor)
}
impl RawUserTransaction {
/// Create a new `RawUserTransaction` with a payload.
///
/// It can be either to publish a module, to execute a script, or to issue a writeset
/// transaction.
pub fn new(
sender: AccountAddress,
sequence_number: u64,
payload: TransactionPayload,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload,
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Create a new `RawUserTransaction` with a script.
///
/// A script transaction contains only code to execute. No publishing is allowed in scripts.
pub fn new_script(
sender: AccountAddress,
sequence_number: u64,
script: Script,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload: TransactionPayload::Script(script),
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Create a new `RawUserTransaction` with a module to publish.
///
/// A module transaction is the only way to publish code. Only one module per transaction
/// can be published.
pub fn new_module(
sender: AccountAddress,
sequence_number: u64,
module: Module,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload: TransactionPayload::Module(module),
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Signs the given `RawUserTransaction`. Note that this consumes the `RawUserTransaction` and turns it
/// into a `SignatureCheckedTransaction`.
///
/// For a transaction that has just been signed, its signature is expected to be valid.
pub fn sign(
self,
private_key: &Ed25519PrivateKey,
public_key: Ed25519PublicKey,
) -> Result<SignatureCheckedTransaction> {
let signature = private_key.sign_message(&self.crypto_hash());
Ok(SignatureCheckedTransaction(SignedUserTransaction::new(
self, public_key, signature,
)))
}
pub fn into_payload(self) -> TransactionPayload {
self.payload
}
/// Return the sender of this transaction.
pub fn sender(&self) -> AccountAddress {
self.sender
}
pub fn mock() -> Self {
Self::mock_by_sender(AccountAddress::random())
}
pub fn mock_by_sender(sender: AccountAddress) -> Self {
Self::new(
sender,
0,
TransactionPayload::Script(Script::new(vec![], vec![], vec![])),
0,
0,
Duration::new(0, 0),
)
}
pub fn mock_from(compiled_script: Vec<u8>) -> Self {
Self::new(
AccountAddress::default(),
0,
TransactionPayload::Script(Script::new(compiled_script, vec![stc_type_tag()], vec![])),
600,
0,
Duration::new(0, 0),
)
}
}
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)]
pub enum TransactionPayload {
/// A transaction that executes code.
Script(Script),
/// A transaction that publishes code.
Module(Module),
/// A transaction that publish or update module code by a package.
Package(UpgradePackage),
}
/// A transaction that has been signed.
///
/// A `SignedUserTransaction` is a single transaction that can be atomically executed. Clients submit
/// these to validator nodes, and the validator and executor submits these to the VM.
///
/// **IMPORTANT:** The signature of a `SignedUserTransaction` is not guaranteed to be verified. For a
/// transaction whose signature is statically guaranteed to be verified, see
/// [`SignatureCheckedTransaction`].
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct SignedUserTransaction {
/// The raw transaction
raw_txn: RawUserTransaction,
/// Public key and signature to authenticate
authenticator: TransactionAuthenticator,
}
/// A transaction for which the signature has been verified. Created by
/// [`SignedUserTransaction::check_signature`] and [`RawUserTransaction::sign`].
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SignatureCheckedTransaction(SignedUserTransaction);
impl SignatureCheckedTransaction {
/// Returns the `SignedUserTransaction` within.
pub fn into_inner(self) -> SignedUserTransaction {
self.0
}
/// Returns the `RawUserTransaction` within.
pub fn into_raw_transaction(self) -> RawUserTransaction {
self.0.into_raw_transaction()
}
}
impl Deref for SignatureCheckedTransaction {
type Target = SignedUserTransaction;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for SignedUserTransaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"SignedTransaction {{ \n \
{{ raw_txn: {:#?}, \n \
authenticator: {:#?}, \n \
}} \n \
}}",
self.raw_txn, self.authenticator
)
}
}
impl SignedUserTransaction {
pub fn new(
raw_txn: RawUserTransaction,
public_key: Ed25519PublicKey,
signature: Ed25519Signature,
) -> SignedUserTransaction {
let authenticator = TransactionAuthenticator::ed25519(public_key, signature);
SignedUserTransaction {
raw_txn,
authenticator,
}
}
pub fn multi_ed25519(
raw_txn: RawUserTransaction,
public_key: MultiEd25519PublicKey,
signature: MultiEd25519Signature,
) -> SignedUserTransaction {
let authenticator = TransactionAuthenticator::multi_ed25519(public_key, signature);
SignedUserTransaction {
raw_txn,
authenticator,
}
}
pub fn authenticator(&self) -> TransactionAuthenticator {
self.authenticator.clone()
}
pub fn raw_txn(&self) -> &RawUserTransaction {
&self.raw_txn
}
pub fn sender(&self) -> AccountAddress {
self.raw_txn.sender
}
pub fn into_raw_transaction(self) -> RawUserTransaction {
self.raw_txn
}
pub fn sequence_number(&self) -> u64 {
self.raw_txn.sequence_number
}
pub fn payload(&self) -> &TransactionPayload {
&self.raw_txn.payload
}
pub fn max_gas_amount(&self) -> u64 {
self.raw_txn.max_gas_amount
}
pub fn gas_unit_price(&self) -> u64 {
self.raw_txn.gas_unit_price
}
pub fn expiration_time(&self) -> Duration {
self.raw_txn.expiration_time
}
pub fn raw_txn_bytes_len(&self) -> usize {
scs::to_bytes(&self.raw_txn)
.expect("Unable to serialize RawUserTransaction")
.len()
}
/// Checks that the signature of given transaction. Returns `Ok(SignatureCheckedTransaction)` if
/// the signature is valid.
pub fn check_signature(self) -> Result<SignatureCheckedTransaction> {
self.authenticator
.verify_signature(&self.raw_txn.crypto_hash())?;
Ok(SignatureCheckedTransaction(self))
}
//TODO
pub fn mock() -> Self {
let mut gen = KeyGen::from_os_rng();
let (private_key, public_key) = gen.generate_keypair();
let raw_txn = RawUserTransaction::mock();
raw_txn.sign(&private_key, public_key).unwrap().into_inner()
}
pub fn mock_from(compiled_script: Vec<u8>) ->
|
// A transaction that doesn't expire is represented by a very large value like
// u64::max_value().
#[serde(serialize_with = "serialize_duration")]
#[serde(deserialize_with = "deserialize_duration")]
|
random_line_split
|
|
mod.rs
|
Visitor;
impl<'de> de::Visitor<'de> for DurationVisitor {
type Value = Duration;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("Duration as u64")
}
fn visit_u64<E>(self, v: u64) -> std::result::Result<Self::Value, E>
where
E: de::Error,
{
Ok(Duration::from_secs(v))
}
}
deserializer.deserialize_u64(DurationVisitor)
}
impl RawUserTransaction {
/// Create a new `RawUserTransaction` with a payload.
///
/// It can be either to publish a module, to execute a script, or to issue a writeset
/// transaction.
pub fn new(
sender: AccountAddress,
sequence_number: u64,
payload: TransactionPayload,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload,
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Create a new `RawUserTransaction` with a script.
///
/// A script transaction contains only code to execute. No publishing is allowed in scripts.
pub fn new_script(
sender: AccountAddress,
sequence_number: u64,
script: Script,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload: TransactionPayload::Script(script),
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Create a new `RawUserTransaction` with a module to publish.
///
/// A module transaction is the only way to publish code. Only one module per transaction
/// can be published.
pub fn new_module(
sender: AccountAddress,
sequence_number: u64,
module: Module,
max_gas_amount: u64,
gas_unit_price: u64,
expiration_time: Duration,
) -> Self {
RawUserTransaction {
sender,
sequence_number,
payload: TransactionPayload::Module(module),
max_gas_amount,
gas_unit_price,
expiration_time,
}
}
/// Signs the given `RawUserTransaction`. Note that this consumes the `RawUserTransaction` and turns it
/// into a `SignatureCheckedTransaction`.
///
/// For a transaction that has just been signed, its signature is expected to be valid.
pub fn sign(
self,
private_key: &Ed25519PrivateKey,
public_key: Ed25519PublicKey,
) -> Result<SignatureCheckedTransaction> {
let signature = private_key.sign_message(&self.crypto_hash());
Ok(SignatureCheckedTransaction(SignedUserTransaction::new(
self, public_key, signature,
)))
}
pub fn into_payload(self) -> TransactionPayload {
self.payload
}
/// Return the sender of this transaction.
pub fn sender(&self) -> AccountAddress {
self.sender
}
pub fn mock() -> Self {
Self::mock_by_sender(AccountAddress::random())
}
pub fn mock_by_sender(sender: AccountAddress) -> Self {
Self::new(
sender,
0,
TransactionPayload::Script(Script::new(vec![], vec![], vec![])),
0,
0,
Duration::new(0, 0),
)
}
pub fn mock_from(compiled_script: Vec<u8>) -> Self {
Self::new(
AccountAddress::default(),
0,
TransactionPayload::Script(Script::new(compiled_script, vec![stc_type_tag()], vec![])),
600,
0,
Duration::new(0, 0),
)
}
}
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)]
pub enum TransactionPayload {
/// A transaction that executes code.
Script(Script),
/// A transaction that publishes code.
Module(Module),
/// A transaction that publish or update module code by a package.
Package(UpgradePackage),
}
/// A transaction that has been signed.
///
/// A `SignedUserTransaction` is a single transaction that can be atomically executed. Clients submit
/// these to validator nodes, and the validator and executor submits these to the VM.
///
/// **IMPORTANT:** The signature of a `SignedUserTransaction` is not guaranteed to be verified. For a
/// transaction whose signature is statically guaranteed to be verified, see
/// [`SignatureCheckedTransaction`].
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct SignedUserTransaction {
/// The raw transaction
raw_txn: RawUserTransaction,
/// Public key and signature to authenticate
authenticator: TransactionAuthenticator,
}
/// A transaction for which the signature has been verified. Created by
/// [`SignedUserTransaction::check_signature`] and [`RawUserTransaction::sign`].
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SignatureCheckedTransaction(SignedUserTransaction);
impl SignatureCheckedTransaction {
/// Returns the `SignedUserTransaction` within.
pub fn into_inner(self) -> SignedUserTransaction {
self.0
}
/// Returns the `RawUserTransaction` within.
pub fn into_raw_transaction(self) -> RawUserTransaction {
self.0.into_raw_transaction()
}
}
impl Deref for SignatureCheckedTransaction {
type Target = SignedUserTransaction;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for SignedUserTransaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"SignedTransaction {{ \n \
{{ raw_txn: {:#?}, \n \
authenticator: {:#?}, \n \
}} \n \
}}",
self.raw_txn, self.authenticator
)
}
}
impl SignedUserTransaction {
pub fn new(
raw_txn: RawUserTransaction,
public_key: Ed25519PublicKey,
signature: Ed25519Signature,
) -> SignedUserTransaction {
let authenticator = TransactionAuthenticator::ed25519(public_key, signature);
SignedUserTransaction {
raw_txn,
authenticator,
}
}
pub fn multi_ed25519(
raw_txn: RawUserTransaction,
public_key: MultiEd25519PublicKey,
signature: MultiEd25519Signature,
) -> SignedUserTransaction {
let authenticator = TransactionAuthenticator::multi_ed25519(public_key, signature);
SignedUserTransaction {
raw_txn,
authenticator,
}
}
pub fn authenticator(&self) -> TransactionAuthenticator {
self.authenticator.clone()
}
pub fn raw_txn(&self) -> &RawUserTransaction
|
pub fn sender(&self) -> AccountAddress {
self.raw_txn.sender
}
pub fn into_raw_transaction(self) -> RawUserTransaction {
self.raw_txn
}
pub fn sequence_number(&self) -> u64 {
self.raw_txn.sequence_number
}
pub fn payload(&self) -> &TransactionPayload {
&self.raw_txn.payload
}
pub fn max_gas_amount(&self) -> u64 {
self.raw_txn.max_gas_amount
}
pub fn gas_unit_price(&self) -> u64 {
self.raw_txn.gas_unit_price
}
pub fn expiration_time(&self) -> Duration {
self.raw_txn.expiration_time
}
pub fn raw_txn_bytes_len(&self) -> usize {
scs::to_bytes(&self.raw_txn)
.expect("Unable to serialize RawUserTransaction")
.len()
}
/// Checks that the signature of given transaction. Returns `Ok(SignatureCheckedTransaction)` if
/// the signature is valid.
pub fn check_signature(self) -> Result<SignatureCheckedTransaction> {
self.authenticator
.verify_signature(&self.raw_txn.crypto_hash())?;
Ok(SignatureCheckedTransaction(self))
}
//TODO
pub fn mock() -> Self {
let mut gen = KeyGen::from_os_rng();
let (private_key, public_key) = gen.generate_keypair();
let raw_txn = RawUserTransaction::mock();
raw_txn.sign(&private_key, public_key).unwrap().into_inner()
}
pub fn mock_from(compiled_script: Vec<u8>) -> Self {
let mut gen = KeyGen::from_os_rng();
let (private_key, public_key) = gen.generate_keypair();
let raw_txn = RawUserTransaction::mock_from(compiled_script);
raw_txn.sign(&private_key, public_key).unwrap().into_inner()
}
}
/// The status of executing a transaction. The VM decides whether or not we should `Keep` the
/// transaction output or `Discard` it based upon the execution of the transaction. We wrap these
/// decisions around a `VMStatus` that provides more detail on the final execution state of the VM.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum TransactionStatus {
/// Discard the transaction output
Disc
|
{
&self.raw_txn
}
|
identifier_body
|
sorting.go
|
.sorted.topics = map[string][]sortedTriggerEntry{}
rs.sorted.thats = map[string][]sortedTriggerEntry{}
rs.say("Sorting triggers...")
// If there are no topics, give an error.
if len(rs.topics) == 0 {
return errors.New("SortReplies: no topics were found; did you load any RiveScript code?")
}
// Loop through all the topics.
for topic := range rs.topics {
rs.say("Analyzing topic %s", topic)
// Collect a list of all the triggers we're going to worry about. If this
// topic inherits another topic, we need to recursively add those to the
// list as well.
allTriggers := rs.getTopicTriggers(topic, false)
// Sort these triggers.
rs.sorted.topics[topic] = rs.sortTriggerSet(allTriggers, true)
// Get all of the %Previous triggers for this topic.
thatTriggers := rs.getTopicTriggers(topic, true)
// And sort them, too.
rs.sorted.thats[topic] = rs.sortTriggerSet(thatTriggers, false)
}
// Sort the substitution lists.
rs.sorted.sub = sortList(rs.sub)
rs.sorted.person = sortList(rs.person)
// Did we sort anything at all?
if len(rs.sorted.topics) == 0 && len(rs.sorted.thats) == 0 {
return errors.New("SortReplies: ended up with empty trigger lists; did you load any RiveScript code?")
}
return nil
}
/*
sortTriggerSet sorts a group of triggers in an optimal sorting order.
This function has two use cases:
1. Create a sort buffer for "normal" (matchable) triggers, which are triggers
that are NOT accompanied by a %Previous tag.
2. Create a sort buffer for triggers that had %Previous tags.
Use the `excludePrevious` parameter to control which one is being done. This
function will return a list of sortedTriggerEntry items, and it's intended to
have no duplicate trigger patterns (unless the source RiveScript code explicitly
uses the same duplicate pattern twice, which is a user error).
*/
func (rs *RiveScript) sortTriggerSet(triggers []sortedTriggerEntry, excludePrevious bool) []sortedTriggerEntry {
// Create a priority map, of priority numbers -> their triggers.
prior := map[int][]sortedTriggerEntry{}
// Go through and bucket each trigger by weight (priority).
for _, trig := range triggers {
if excludePrevious && trig.pointer.previous != "" {
continue
}
// Check the trigger text for any {weight} tags, default being 0
match := reWeight.FindStringSubmatch(trig.trigger)
weight := 0
if len(match) > 0 {
weight, _ = strconv.Atoi(match[1])
}
// First trigger of this priority? Initialize the weight map.
if _, ok := prior[weight]; !ok {
prior[weight] = []sortedTriggerEntry{}
}
prior[weight] = append(prior[weight], trig)
}
// Keep a running list of sorted triggers for this topic.
running := []sortedTriggerEntry{}
// Sort the priorities with the highest number first.
var sortedPriorities []int
for k := range prior {
sortedPriorities = append(sortedPriorities, k)
}
sort.Sort(sort.Reverse(sort.IntSlice(sortedPriorities)))
// Go through each priority set.
for _, p := range sortedPriorities {
rs.say("Sorting triggers with priority %d", p)
// So, some of these triggers may include an {inherits} tag, if they
// came from a topic which inherits another topic. Lower inherits values
// mean higher priority on the stack. Triggers that have NO inherits
// value at all (which will default to -1), will be moved to the END of
// the stack at the end (have the highest number/lowest priority).
inherits := -1 // -1 means no {inherits} tag
highestInherits := -1 // Highest number seen so far
// Loop through and categorize these triggers.
track := map[int]*sortTrack{}
track[inherits] = initSortTrack()
// Loop through all the triggers.
for _, trig := range prior[p] {
pattern := trig.trigger
rs.say("Looking at trigger: %s", pattern)
// See if the trigger has an {inherits} tag.
match := reInherits.FindStringSubmatch(pattern)
if len(match) > 0 {
inherits, _ = strconv.Atoi(match[1])
if inherits > highestInherits {
highestInherits = inherits
}
rs.say("Trigger belongs to a topic that inherits other topics. "+
"Level=%d", inherits)
pattern = reInherits.ReplaceAllString(pattern, "")
} else {
inherits = -1
}
// If this is the first time we've seen this inheritance level,
// initialize its sort track structure.
if _, ok := track[inherits]; !ok {
track[inherits] = initSortTrack()
}
// Start inspecting the trigger's contents.
if strings.Contains(pattern, "_") {
// Alphabetic wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a _ wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].alpha[cnt]; !ok {
track[inherits].alpha[cnt] = []sortedTriggerEntry{}
}
track[inherits].alpha[cnt] = append(track[inherits].alpha[cnt], trig)
} else {
track[inherits].under = append(track[inherits].under, trig)
}
} else if strings.Contains(pattern, "#") {
// Numeric wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a # wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].number[cnt]; !ok {
track[inherits].number[cnt] = []sortedTriggerEntry{}
}
track[inherits].number[cnt] = append(track[inherits].number[cnt], trig)
} else {
track[inherits].pound = append(track[inherits].pound, trig)
}
} else if strings.Contains(pattern, "*") {
// Wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a * wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].wild[cnt]; !ok {
track[inherits].wild[cnt] = []sortedTriggerEntry{}
}
track[inherits].wild[cnt] = append(track[inherits].wild[cnt], trig)
} else {
track[inherits].star = append(track[inherits].star, trig)
}
} else if strings.Contains(pattern, "[") {
// Optionals included.
cnt := wordCount(pattern, false)
rs.say("Has optionals with %d words", cnt)
if _, ok := track[inherits].option[cnt]; !ok {
track[inherits].option[cnt] = []sortedTriggerEntry{}
}
track[inherits].option[cnt] = append(track[inherits].option[cnt], trig)
} else {
// Totally atomic.
cnt := wordCount(pattern, false)
rs.say("Totally atomic trigger with %d words", cnt)
if _, ok := track[inherits].atomic[cnt]; !ok {
track[inherits].atomic[cnt] = []sortedTriggerEntry{}
}
track[inherits].atomic[cnt] = append(track[inherits].atomic[cnt], trig)
}
}
// Move the no-{inherits} triggers to the bottom of the stack.
track[highestInherits+1] = track[-1]
delete(track, -1)
// Sort the track from the lowest to the highest.
var trackSorted []int
for k := range track {
trackSorted = append(trackSorted, k)
}
sort.Ints(trackSorted)
// Go through each priority level from greatest to smallest.
for _, ip := range trackSorted {
rs.say("ip=%d", ip)
// Sort each of the main kinds of triggers by their word counts.
running = sortByWords(running, track[ip].atomic)
running = sortByWords(running, track[ip].option)
running = sortByWords(running, track[ip].alpha)
running = sortByWords(running, track[ip].number)
running = sortByWords(running, track[ip].wild)
// Add the single wildcard triggers, sorted by length.
running = sortByLength(running, track[ip].under)
running = sortByLength(running, track[ip].pound)
running = sortByLength(running, track[ip].star)
}
}
return running
}
// sortList sorts lists (like substitutions) from a string:string map.
func
|
(dict map[string]string) []string {
output := []string{}
// Track by number of words.
track := map[int][]string{}
// Loop through each item
|
sortList
|
identifier_name
|
sorting.go
|
]*sortTrack{}
track[inherits] = initSortTrack()
// Loop through all the triggers.
for _, trig := range prior[p] {
pattern := trig.trigger
rs.say("Looking at trigger: %s", pattern)
// See if the trigger has an {inherits} tag.
match := reInherits.FindStringSubmatch(pattern)
if len(match) > 0 {
inherits, _ = strconv.Atoi(match[1])
if inherits > highestInherits {
highestInherits = inherits
}
rs.say("Trigger belongs to a topic that inherits other topics. "+
"Level=%d", inherits)
pattern = reInherits.ReplaceAllString(pattern, "")
} else {
inherits = -1
}
// If this is the first time we've seen this inheritance level,
// initialize its sort track structure.
if _, ok := track[inherits]; !ok {
track[inherits] = initSortTrack()
}
// Start inspecting the trigger's contents.
if strings.Contains(pattern, "_") {
// Alphabetic wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a _ wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].alpha[cnt]; !ok {
track[inherits].alpha[cnt] = []sortedTriggerEntry{}
}
track[inherits].alpha[cnt] = append(track[inherits].alpha[cnt], trig)
} else {
track[inherits].under = append(track[inherits].under, trig)
}
} else if strings.Contains(pattern, "#") {
// Numeric wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a # wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].number[cnt]; !ok {
track[inherits].number[cnt] = []sortedTriggerEntry{}
}
track[inherits].number[cnt] = append(track[inherits].number[cnt], trig)
} else {
track[inherits].pound = append(track[inherits].pound, trig)
}
} else if strings.Contains(pattern, "*") {
// Wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a * wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].wild[cnt]; !ok {
track[inherits].wild[cnt] = []sortedTriggerEntry{}
}
track[inherits].wild[cnt] = append(track[inherits].wild[cnt], trig)
} else {
track[inherits].star = append(track[inherits].star, trig)
}
} else if strings.Contains(pattern, "[") {
// Optionals included.
cnt := wordCount(pattern, false)
rs.say("Has optionals with %d words", cnt)
if _, ok := track[inherits].option[cnt]; !ok {
track[inherits].option[cnt] = []sortedTriggerEntry{}
}
track[inherits].option[cnt] = append(track[inherits].option[cnt], trig)
} else {
// Totally atomic.
cnt := wordCount(pattern, false)
rs.say("Totally atomic trigger with %d words", cnt)
if _, ok := track[inherits].atomic[cnt]; !ok {
track[inherits].atomic[cnt] = []sortedTriggerEntry{}
}
track[inherits].atomic[cnt] = append(track[inherits].atomic[cnt], trig)
}
}
// Move the no-{inherits} triggers to the bottom of the stack.
track[highestInherits+1] = track[-1]
delete(track, -1)
// Sort the track from the lowest to the highest.
var trackSorted []int
for k := range track {
trackSorted = append(trackSorted, k)
}
sort.Ints(trackSorted)
// Go through each priority level from greatest to smallest.
for _, ip := range trackSorted {
rs.say("ip=%d", ip)
// Sort each of the main kinds of triggers by their word counts.
running = sortByWords(running, track[ip].atomic)
running = sortByWords(running, track[ip].option)
running = sortByWords(running, track[ip].alpha)
running = sortByWords(running, track[ip].number)
running = sortByWords(running, track[ip].wild)
// Add the single wildcard triggers, sorted by length.
running = sortByLength(running, track[ip].under)
running = sortByLength(running, track[ip].pound)
running = sortByLength(running, track[ip].star)
}
}
return running
}
// sortList sorts lists (like substitutions) from a string:string map.
func sortList(dict map[string]string) []string {
output := []string{}
// Track by number of words.
track := map[int][]string{}
// Loop through each item.
for item := range dict {
cnt := wordCount(item, true)
if _, ok := track[cnt]; !ok {
track[cnt] = []string{}
}
track[cnt] = append(track[cnt], item)
}
// Sort them by word count, descending.
sortedCounts := []int{}
for cnt := range track {
sortedCounts = append(sortedCounts, cnt)
}
sort.Sort(sort.Reverse(sort.IntSlice(sortedCounts)))
for _, cnt := range sortedCounts {
// Sort the strings of this word-count by their lengths.
sortedLengths := track[cnt]
sort.Sort(sort.Reverse(byLength(sortedLengths)))
output = append(output, sortedLengths...)
}
return output
}
/*
sortByWords sorts a set of triggers by word count and overall length.
This is a helper function for sorting the `atomic`, `option`, `alpha`, `number`
and `wild` attributes of the sortTrack and adding them to the running sort
buffer in that specific order. Since attribute lookup by reflection is expensive
in Go, this function is given the relevant sort buffer directly, and the current
running sort buffer to add the results to.
The `triggers` parameter is a map between word counts and the triggers that
fit that number of words.
*/
func sortByWords(running []sortedTriggerEntry, triggers map[int][]sortedTriggerEntry) []sortedTriggerEntry {
// Sort the triggers by their word counts from greatest to smallest.
var sortedWords []int
for wc := range triggers {
sortedWords = append(sortedWords, wc)
}
sort.Sort(sort.Reverse(sort.IntSlice(sortedWords)))
for _, wc := range sortedWords {
// Triggers with equal word lengths should be sorted by overall trigger length.
var sortedPatterns []string
patternMap := map[string][]sortedTriggerEntry{}
for _, trig := range triggers[wc] {
sortedPatterns = append(sortedPatterns, trig.trigger)
if _, ok := patternMap[trig.trigger]; !ok {
patternMap[trig.trigger] = []sortedTriggerEntry{}
}
patternMap[trig.trigger] = append(patternMap[trig.trigger], trig)
}
sort.Sort(sort.Reverse(byLength(sortedPatterns)))
// Add the triggers to the running triggers bucket.
for _, pattern := range sortedPatterns {
running = append(running, patternMap[pattern]...)
}
}
return running
}
/*
sortByLength sorts a set of triggers purely by character length.
This is like `sortByWords`, but it's intended for triggers that consist solely
of wildcard-like symbols with no real words. For example a trigger of `* * *`
qualifies for this, and it has no words, so we sort by length so it gets a
higher priority than simply `*`.
*/
func sortByLength(running []sortedTriggerEntry, triggers []sortedTriggerEntry) []sortedTriggerEntry {
var sortedPatterns []string
patternMap := map[string][]sortedTriggerEntry{}
for _, trig := range triggers {
sortedPatterns = append(sortedPatterns, trig.trigger)
if _, ok := patternMap[trig.trigger]; !ok {
patternMap[trig.trigger] = []sortedTriggerEntry{}
}
patternMap[trig.trigger] = append(patternMap[trig.trigger], trig)
}
sort.Sort(sort.Reverse(byLength(sortedPatterns)))
// Only loop through unique patterns.
patternSet := map[string]bool{}
// Add them to the running triggers bucket.
for _, pattern := range sortedPatterns {
if _, ok := patternSet[pattern]; ok {
continue
}
patternSet[pattern] = true
running = append(running, patternMap[pattern]...)
}
return running
}
// initSortTrack initializes a new, empty sortTrack object.
func initSortTrack() *sortTrack
|
{
return &sortTrack{
atomic: map[int][]sortedTriggerEntry{},
option: map[int][]sortedTriggerEntry{},
alpha: map[int][]sortedTriggerEntry{},
number: map[int][]sortedTriggerEntry{},
wild: map[int][]sortedTriggerEntry{},
pound: []sortedTriggerEntry{},
under: []sortedTriggerEntry{},
star: []sortedTriggerEntry{},
}
}
|
identifier_body
|
|
sorting.go
|
.sorted.topics = map[string][]sortedTriggerEntry{}
rs.sorted.thats = map[string][]sortedTriggerEntry{}
rs.say("Sorting triggers...")
// If there are no topics, give an error.
if len(rs.topics) == 0 {
return errors.New("SortReplies: no topics were found; did you load any RiveScript code?")
}
// Loop through all the topics.
for topic := range rs.topics {
rs.say("Analyzing topic %s", topic)
// Collect a list of all the triggers we're going to worry about. If this
// topic inherits another topic, we need to recursively add those to the
// list as well.
allTriggers := rs.getTopicTriggers(topic, false)
// Sort these triggers.
rs.sorted.topics[topic] = rs.sortTriggerSet(allTriggers, true)
// Get all of the %Previous triggers for this topic.
thatTriggers := rs.getTopicTriggers(topic, true)
// And sort them, too.
rs.sorted.thats[topic] = rs.sortTriggerSet(thatTriggers, false)
}
// Sort the substitution lists.
rs.sorted.sub = sortList(rs.sub)
rs.sorted.person = sortList(rs.person)
// Did we sort anything at all?
if len(rs.sorted.topics) == 0 && len(rs.sorted.thats) == 0 {
return errors.New("SortReplies: ended up with empty trigger lists; did you load any RiveScript code?")
}
return nil
}
/*
sortTriggerSet sorts a group of triggers in an optimal sorting order.
This function has two use cases:
1. Create a sort buffer for "normal" (matchable) triggers, which are triggers
that are NOT accompanied by a %Previous tag.
2. Create a sort buffer for triggers that had %Previous tags.
Use the `excludePrevious` parameter to control which one is being done. This
function will return a list of sortedTriggerEntry items, and it's intended to
have no duplicate trigger patterns (unless the source RiveScript code explicitly
uses the same duplicate pattern twice, which is a user error).
*/
func (rs *RiveScript) sortTriggerSet(triggers []sortedTriggerEntry, excludePrevious bool) []sortedTriggerEntry {
// Create a priority map, of priority numbers -> their triggers.
prior := map[int][]sortedTriggerEntry{}
// Go through and bucket each trigger by weight (priority).
for _, trig := range triggers {
if excludePrevious && trig.pointer.previous != "" {
continue
}
// Check the trigger text for any {weight} tags, default being 0
match := reWeight.FindStringSubmatch(trig.trigger)
weight := 0
if len(match) > 0 {
weight, _ = strconv.Atoi(match[1])
}
// First trigger of this priority? Initialize the weight map.
if _, ok := prior[weight]; !ok {
prior[weight] = []sortedTriggerEntry{}
}
prior[weight] = append(prior[weight], trig)
}
// Keep a running list of sorted triggers for this topic.
running := []sortedTriggerEntry{}
// Sort the priorities with the highest number first.
var sortedPriorities []int
for k := range prior {
sortedPriorities = append(sortedPriorities, k)
}
sort.Sort(sort.Reverse(sort.IntSlice(sortedPriorities)))
// Go through each priority set.
for _, p := range sortedPriorities {
rs.say("Sorting triggers with priority %d", p)
// So, some of these triggers may include an {inherits} tag, if they
// came from a topic which inherits another topic. Lower inherits values
// mean higher priority on the stack. Triggers that have NO inherits
// value at all (which will default to -1), will be moved to the END of
// the stack at the end (have the highest number/lowest priority).
inherits := -1 // -1 means no {inherits} tag
highestInherits := -1 // Highest number seen so far
// Loop through and categorize these triggers.
track := map[int]*sortTrack{}
track[inherits] = initSortTrack()
// Loop through all the triggers.
for _, trig := range prior[p] {
pattern := trig.trigger
rs.say("Looking at trigger: %s", pattern)
// See if the trigger has an {inherits} tag.
match := reInherits.FindStringSubmatch(pattern)
if len(match) > 0 {
inherits, _ = strconv.Atoi(match[1])
if inherits > highestInherits {
highestInherits = inherits
}
rs.say("Trigger belongs to a topic that inherits other topics. "+
"Level=%d", inherits)
pattern = reInherits.ReplaceAllString(pattern, "")
} else {
inherits = -1
}
// If this is the first time we've seen this inheritance level,
// initialize its sort track structure.
if _, ok := track[inherits]; !ok {
track[inherits] = initSortTrack()
}
// Start inspecting the trigger's contents.
if strings.Contains(pattern, "_") {
// Alphabetic wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a _ wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].alpha[cnt]; !ok {
track[inherits].alpha[cnt] = []sortedTriggerEntry{}
}
track[inherits].alpha[cnt] = append(track[inherits].alpha[cnt], trig)
} else {
track[inherits].under = append(track[inherits].under, trig)
}
} else if strings.Contains(pattern, "#") {
// Numeric wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a # wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].number[cnt]; !ok {
track[inherits].number[cnt] = []sortedTriggerEntry{}
}
track[inherits].number[cnt] = append(track[inherits].number[cnt], trig)
} else {
track[inherits].pound = append(track[inherits].pound, trig)
}
} else if strings.Contains(pattern, "*") {
// Wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a * wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].wild[cnt]; !ok {
track[inherits].wild[cnt] = []sortedTriggerEntry{}
}
track[inherits].wild[cnt] = append(track[inherits].wild[cnt], trig)
} else {
track[inherits].star = append(track[inherits].star, trig)
}
} else if strings.Contains(pattern, "[") {
// Optionals included.
cnt := wordCount(pattern, false)
rs.say("Has optionals with %d words", cnt)
if _, ok := track[inherits].option[cnt]; !ok {
track[inherits].option[cnt] = []sortedTriggerEntry{}
}
track[inherits].option[cnt] = append(track[inherits].option[cnt], trig)
} else {
// Totally atomic.
cnt := wordCount(pattern, false)
|
track[inherits].atomic[cnt] = append(track[inherits].atomic[cnt], trig)
}
}
// Move the no-{inherits} triggers to the bottom of the stack.
track[highestInherits+1] = track[-1]
delete(track, -1)
// Sort the track from the lowest to the highest.
var trackSorted []int
for k := range track {
trackSorted = append(trackSorted, k)
}
sort.Ints(trackSorted)
// Go through each priority level from greatest to smallest.
for _, ip := range trackSorted {
rs.say("ip=%d", ip)
// Sort each of the main kinds of triggers by their word counts.
running = sortByWords(running, track[ip].atomic)
running = sortByWords(running, track[ip].option)
running = sortByWords(running, track[ip].alpha)
running = sortByWords(running, track[ip].number)
running = sortByWords(running, track[ip].wild)
// Add the single wildcard triggers, sorted by length.
running = sortByLength(running, track[ip].under)
running = sortByLength(running, track[ip].pound)
running = sortByLength(running, track[ip].star)
}
}
return running
}
// sortList sorts lists (like substitutions) from a string:string map.
func sortList(dict map[string]string) []string {
output := []string{}
// Track by number of words.
track := map[int][]string{}
// Loop through each item.
|
rs.say("Totally atomic trigger with %d words", cnt)
if _, ok := track[inherits].atomic[cnt]; !ok {
track[inherits].atomic[cnt] = []sortedTriggerEntry{}
}
|
random_line_split
|
sorting.go
|
p)
// So, some of these triggers may include an {inherits} tag, if they
// came from a topic which inherits another topic. Lower inherits values
// mean higher priority on the stack. Triggers that have NO inherits
// value at all (which will default to -1), will be moved to the END of
// the stack at the end (have the highest number/lowest priority).
inherits := -1 // -1 means no {inherits} tag
highestInherits := -1 // Highest number seen so far
// Loop through and categorize these triggers.
track := map[int]*sortTrack{}
track[inherits] = initSortTrack()
// Loop through all the triggers.
for _, trig := range prior[p] {
pattern := trig.trigger
rs.say("Looking at trigger: %s", pattern)
// See if the trigger has an {inherits} tag.
match := reInherits.FindStringSubmatch(pattern)
if len(match) > 0 {
inherits, _ = strconv.Atoi(match[1])
if inherits > highestInherits {
highestInherits = inherits
}
rs.say("Trigger belongs to a topic that inherits other topics. "+
"Level=%d", inherits)
pattern = reInherits.ReplaceAllString(pattern, "")
} else {
inherits = -1
}
// If this is the first time we've seen this inheritance level,
// initialize its sort track structure.
if _, ok := track[inherits]; !ok {
track[inherits] = initSortTrack()
}
// Start inspecting the trigger's contents.
if strings.Contains(pattern, "_") {
// Alphabetic wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a _ wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].alpha[cnt]; !ok {
track[inherits].alpha[cnt] = []sortedTriggerEntry{}
}
track[inherits].alpha[cnt] = append(track[inherits].alpha[cnt], trig)
} else {
track[inherits].under = append(track[inherits].under, trig)
}
} else if strings.Contains(pattern, "#") {
// Numeric wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a # wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].number[cnt]; !ok {
track[inherits].number[cnt] = []sortedTriggerEntry{}
}
track[inherits].number[cnt] = append(track[inherits].number[cnt], trig)
} else {
track[inherits].pound = append(track[inherits].pound, trig)
}
} else if strings.Contains(pattern, "*") {
// Wildcard included.
cnt := wordCount(pattern, false)
rs.say("Has a * wildcard with %d words", cnt)
if cnt > 0 {
if _, ok := track[inherits].wild[cnt]; !ok {
track[inherits].wild[cnt] = []sortedTriggerEntry{}
}
track[inherits].wild[cnt] = append(track[inherits].wild[cnt], trig)
} else {
track[inherits].star = append(track[inherits].star, trig)
}
} else if strings.Contains(pattern, "[") {
// Optionals included.
cnt := wordCount(pattern, false)
rs.say("Has optionals with %d words", cnt)
if _, ok := track[inherits].option[cnt]; !ok {
track[inherits].option[cnt] = []sortedTriggerEntry{}
}
track[inherits].option[cnt] = append(track[inherits].option[cnt], trig)
} else {
// Totally atomic.
cnt := wordCount(pattern, false)
rs.say("Totally atomic trigger with %d words", cnt)
if _, ok := track[inherits].atomic[cnt]; !ok {
track[inherits].atomic[cnt] = []sortedTriggerEntry{}
}
track[inherits].atomic[cnt] = append(track[inherits].atomic[cnt], trig)
}
}
// Move the no-{inherits} triggers to the bottom of the stack.
track[highestInherits+1] = track[-1]
delete(track, -1)
// Sort the track from the lowest to the highest.
var trackSorted []int
for k := range track {
trackSorted = append(trackSorted, k)
}
sort.Ints(trackSorted)
// Go through each priority level from greatest to smallest.
for _, ip := range trackSorted {
rs.say("ip=%d", ip)
// Sort each of the main kinds of triggers by their word counts.
running = sortByWords(running, track[ip].atomic)
running = sortByWords(running, track[ip].option)
running = sortByWords(running, track[ip].alpha)
running = sortByWords(running, track[ip].number)
running = sortByWords(running, track[ip].wild)
// Add the single wildcard triggers, sorted by length.
running = sortByLength(running, track[ip].under)
running = sortByLength(running, track[ip].pound)
running = sortByLength(running, track[ip].star)
}
}
return running
}
// sortList sorts lists (like substitutions) from a string:string map.
func sortList(dict map[string]string) []string {
output := []string{}
// Track by number of words.
track := map[int][]string{}
// Loop through each item.
for item := range dict {
cnt := wordCount(item, true)
if _, ok := track[cnt]; !ok {
track[cnt] = []string{}
}
track[cnt] = append(track[cnt], item)
}
// Sort them by word count, descending.
sortedCounts := []int{}
for cnt := range track {
sortedCounts = append(sortedCounts, cnt)
}
sort.Sort(sort.Reverse(sort.IntSlice(sortedCounts)))
for _, cnt := range sortedCounts {
// Sort the strings of this word-count by their lengths.
sortedLengths := track[cnt]
sort.Sort(sort.Reverse(byLength(sortedLengths)))
output = append(output, sortedLengths...)
}
return output
}
/*
sortByWords sorts a set of triggers by word count and overall length.
This is a helper function for sorting the `atomic`, `option`, `alpha`, `number`
and `wild` attributes of the sortTrack and adding them to the running sort
buffer in that specific order. Since attribute lookup by reflection is expensive
in Go, this function is given the relevant sort buffer directly, and the current
running sort buffer to add the results to.
The `triggers` parameter is a map between word counts and the triggers that
fit that number of words.
*/
func sortByWords(running []sortedTriggerEntry, triggers map[int][]sortedTriggerEntry) []sortedTriggerEntry {
// Sort the triggers by their word counts from greatest to smallest.
var sortedWords []int
for wc := range triggers {
sortedWords = append(sortedWords, wc)
}
sort.Sort(sort.Reverse(sort.IntSlice(sortedWords)))
for _, wc := range sortedWords {
// Triggers with equal word lengths should be sorted by overall trigger length.
var sortedPatterns []string
patternMap := map[string][]sortedTriggerEntry{}
for _, trig := range triggers[wc] {
sortedPatterns = append(sortedPatterns, trig.trigger)
if _, ok := patternMap[trig.trigger]; !ok {
patternMap[trig.trigger] = []sortedTriggerEntry{}
}
patternMap[trig.trigger] = append(patternMap[trig.trigger], trig)
}
sort.Sort(sort.Reverse(byLength(sortedPatterns)))
// Add the triggers to the running triggers bucket.
for _, pattern := range sortedPatterns {
running = append(running, patternMap[pattern]...)
}
}
return running
}
/*
sortByLength sorts a set of triggers purely by character length.
This is like `sortByWords`, but it's intended for triggers that consist solely
of wildcard-like symbols with no real words. For example a trigger of `* * *`
qualifies for this, and it has no words, so we sort by length so it gets a
higher priority than simply `*`.
*/
func sortByLength(running []sortedTriggerEntry, triggers []sortedTriggerEntry) []sortedTriggerEntry {
var sortedPatterns []string
patternMap := map[string][]sortedTriggerEntry{}
for _, trig := range triggers {
sortedPatterns = append(sortedPatterns, trig.trigger)
if _, ok := patternMap[trig.trigger]; !ok {
patternMap[trig.trigger] = []sortedTriggerEntry{}
}
patternMap[trig.trigger] = append(patternMap[trig.trigger], trig)
}
sort.Sort(sort.Reverse(byLength(sortedPatterns)))
// Only loop through unique patterns.
patternSet := map[string]bool{}
// Add them to the running triggers bucket.
for _, pattern := range sortedPatterns {
if _, ok := patternSet[pattern]; ok
|
{
continue
}
|
conditional_block
|
|
app.js
|
s*");
nameOne = re.exec(nameOne)[0];
nameTwo = re.exec(nameTwo)[0];
P1NAME = nameOne;
P2NAME = nameTwo;
coupleName = this.nameMash (nameOne, nameTwo);
COMBONAME = coupleName;
document.getElementById("player-1").innerHTML = (nameOne + "'s Score is:");
document.getElementById("player-2").innerHTML = (nameTwo + "'s Score is:");
document.getElementById("couple-name").innerHTML = ("Couple Name: " + coupleName);
}
function contactEmailInfo () { //grabs the contact details and transforms them into a combination name and stores everything to local storage
var coupleName, name, email;
var atRegex = /%40/g;
nameTemp = $('#form').serialize();
chopped = nameTemp.split("&");
nameOne = chopped[2].split("=")[1];
nameTwo = chopped[3].split("=")[1];
P1EMAIL = nameOne.replace(atRegex, '@');
P2EMAIL = nameTwo.replace(atRegex,'@');
document.getElementById("email-message").innerHTML = ("Thanks! We will be getting back to you sometime before the next burn!");
}
function datesTimes () {//grabs the time... no network or persistant time... Need a GPS module for the Raspberry PI
var cleanDate;
cleanDate = new Date().toLocaleString()
localStorage.setItem("timestamp", cleanDate);
}
function dataLoader(){//pulls the data from a local JSON file
$.getJSON("http://localhost:8000/Love-Staring-Machine/result.json", function(data) {
VALUE1 = data["player1"];
VALUE2 = data["player2"];
});
}
function finalMessage () {//Delivers the final message in the form of a modal overlay based on compadability of scores
//derive a compatability index based on distance of final scores
var p1Score = SCORE[0];
var p2Score = SCORE[1];
var higherScore = p1Score > p2Score ? p1Score : p2Score;
var distance = Math.abs(p1Score - p2Score);
var relDistance = distance / higherScore;
var message = [
"Hell No!!!",
"Y'all Fuck",
"True Love!"
];
var messenger;
if (relDistance > .1) {
messenger = 0;
}
else if (relDistance > .05) {
messenger = 1;
}
else {
messenger = 2;
}
//render the modal overlay
document.getElementById("timer-readout").innerHTML = (message[messenger]);
}
function finalData () {
//Stpres all he data as an object
var finalObject = {};
var totalFinalObjects = JSON.parse(localStorage.getItem("finalobjects"));
var participants = localStorage.getItem("totalparticipants");
var p1Name = P1NAME ? P1NAME : null;
p2Name = P2NAME ? P2NAME : null;
var coupleName = COMBONAME ? COMBONAME : null;
p1Email = P1EMAIL ? P1EMAIL : null;
p2Email = P2EMAIL ? P2EMAIL : null;
//grabs from localstorage the entire
totalFinalObjects[participants] = {
'p1': {
'n': p1Name,
'e': p1Email,
'p': SCORE[0],
's': p1data
},
'p2' : {
'n': p2Name,
'e': p1Email,
'p': SCORE[1],
's': p2data
},
'n': COMBONAME
};
localStorage.setItem("finalobjects", JSON.stringify(totalFinalObjects));
}
function highScore () {//checks if any of the present SCOREs are higher than the all time high SCORE
var tempSCORE = parseInt(localStorage.getItem("highScore"));
if (SCORE[0] > tempSCORE) {
localStorage.setItem("highScore", SCORE[0]);
}
if (SCORE[1] > tempSCORE) {
localStorage.setItem("highScore", SCORE[1]);
}
}
function initialization () {//initializes the persistant dashboard metrics for the first time
//add all of the local storage variables
//localStorage.setItem("highScore", 0);
//localStorage.setItem("totalparticipants", 0);
//localStorage.setItem("average", 0);
//localStorage.setItem("cumulativescores", 0)
localStorage.setItem("finalobjects", "{}");
}
function myTimer() {//Simple, non-accurate clock funtion
var secs = "" + SECONDS;
var min = MINUTES;
if ( MINUTES >= 0){
SECONDS --;
if (SECONDS < 9) {
secs = "0" + secs;
}
if (SECONDS === 0){
SECONDS = 59;
secs = "00";//TODO: fix this where the at 1:00 it displays :00
MINUTES --;
}
if (MINUTES > 0){
document.getElementById("timer-readout").innerHTML = (min + ":" + secs);
}
else {
document.getElementById("timer-readout").innerHTML = (":" + secs);
}
}
else {
makePointsFlag = false;
storeScoresForAverages();
finalMessage();
finalData();
clearTimeout(loop);
clearTimeout(slice);
clearTimeout(countdown);
clearTimeout(points);
clearTimeout(leader);
}
}
function nameMash (p1,p2) {//celebrity name generator splits on the vowels or if all else fails just smash them together
if (p1.length > 5 || p2.length > 5) {//TODO: Clean up this logic and make it more compact
//use the p1 and p2 arguments and loop through to grab the regex vowels
var re = /[aeiou]/gi;
var p1Array = p1.match(re);
var p2Array = p2.match(re);
//if there are more than 2 split on the second
if (p1Array.length >= 2 && p2Array.length >= 2) {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == p1Array.length){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == p2Array.length){
var player2 = p2.substring(match.index - 2, p2.length);
}
}
return player1 + player2;
}
else {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == 1){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == 1){
var player2 = p2.substring(match.index - 1, p2.length);
}
}
return player1 + player2;
}
}
else {
if (p1.length > 2 && p2.length > 2){
return p1.substring(0,1).toUpperCase() + p1.substring(1,Math.floor(p1.length/2)).toLowerCase() + p2.substring(Math.floor(p2.length/2),p2.length).toLowerCase();
}
else {
return p1.substring(0,1).toUpperCase() + p1.substring(1, p1.length).toLowerCase() + p2.substring(2,p2.length).toLowerCase();
}
}
}
function onRefreshInitialization () {//initializes the persistant dashboard metrics
document.getElementById('p1').focus();
$('#form')[0].reset();
}
function overlay() {
el = document.getElementById("overlay");
el.style.visibility = (el.style.visibility == "visible") ? "hidden" : "visible";
}
function pointGenerator () {//this function computes the points
//TODO: Make a funtion to check directional trends and integrate the result into this function
if (makePointsFlag) {
var trendValue1,
trendValue2;
trendOfReadings();
if (p1trend === '0'){
trendValue1 = 9;
}
else if (p1trend === '+'){
trendValue1 = 30;
}
else if (p1trend === '-'){
trendValue1 = 5;
}
else {
trendValue1 = 1;
}
if (p2trend === '0'){
trendValue2 = 9;
}
else if (p2trend === '+'){
trendValue2 = 30;
}
else if (p2trend === '-')
|
else {
trendValue2 = 1;
}
trendValue1 ? SCORE[0] += Math.round(trendValue1) : null;
trendValue2 ? SCORE[1] += Math.round(trendValue2) : null;
scoreRender();
}
}
function scoreRender () {
|
{
trendValue2 = 5;
}
|
conditional_block
|
app.js
|
s*");
nameOne = re.exec(nameOne)[0];
nameTwo = re.exec(nameTwo)[0];
P1NAME = nameOne;
P2NAME = nameTwo;
coupleName = this.nameMash (nameOne, nameTwo);
COMBONAME = coupleName;
document.getElementById("player-1").innerHTML = (nameOne + "'s Score is:");
document.getElementById("player-2").innerHTML = (nameTwo + "'s Score is:");
document.getElementById("couple-name").innerHTML = ("Couple Name: " + coupleName);
}
function contactEmailInfo () { //grabs the contact details and transforms them into a combination name and stores everything to local storage
var coupleName, name, email;
var atRegex = /%40/g;
nameTemp = $('#form').serialize();
chopped = nameTemp.split("&");
nameOne = chopped[2].split("=")[1];
nameTwo = chopped[3].split("=")[1];
P1EMAIL = nameOne.replace(atRegex, '@');
P2EMAIL = nameTwo.replace(atRegex,'@');
document.getElementById("email-message").innerHTML = ("Thanks! We will be getting back to you sometime before the next burn!");
}
function datesTimes () {//grabs the time... no network or persistant time... Need a GPS module for the Raspberry PI
var cleanDate;
cleanDate = new Date().toLocaleString()
localStorage.setItem("timestamp", cleanDate);
}
function dataLoader(){//pulls the data from a local JSON file
$.getJSON("http://localhost:8000/Love-Staring-Machine/result.json", function(data) {
VALUE1 = data["player1"];
VALUE2 = data["player2"];
});
}
function finalMessage () {//Delivers the final message in the form of a modal overlay based on compadability of scores
//derive a compatability index based on distance of final scores
var p1Score = SCORE[0];
var p2Score = SCORE[1];
var higherScore = p1Score > p2Score ? p1Score : p2Score;
var distance = Math.abs(p1Score - p2Score);
var relDistance = distance / higherScore;
var message = [
"Hell No!!!",
"Y'all Fuck",
"True Love!"
];
var messenger;
if (relDistance > .1) {
messenger = 0;
}
else if (relDistance > .05) {
messenger = 1;
}
else {
messenger = 2;
}
//render the modal overlay
document.getElementById("timer-readout").innerHTML = (message[messenger]);
}
function finalData () {
//Stpres all he data as an object
var finalObject = {};
var totalFinalObjects = JSON.parse(localStorage.getItem("finalobjects"));
var participants = localStorage.getItem("totalparticipants");
var p1Name = P1NAME ? P1NAME : null;
p2Name = P2NAME ? P2NAME : null;
var coupleName = COMBONAME ? COMBONAME : null;
p1Email = P1EMAIL ? P1EMAIL : null;
p2Email = P2EMAIL ? P2EMAIL : null;
//grabs from localstorage the entire
totalFinalObjects[participants] = {
'p1': {
'n': p1Name,
'e': p1Email,
'p': SCORE[0],
's': p1data
},
'p2' : {
'n': p2Name,
'e': p1Email,
'p': SCORE[1],
's': p2data
},
'n': COMBONAME
};
localStorage.setItem("finalobjects", JSON.stringify(totalFinalObjects));
}
function highScore ()
|
function initialization () {//initializes the persistant dashboard metrics for the first time
//add all of the local storage variables
//localStorage.setItem("highScore", 0);
//localStorage.setItem("totalparticipants", 0);
//localStorage.setItem("average", 0);
//localStorage.setItem("cumulativescores", 0)
localStorage.setItem("finalobjects", "{}");
}
function myTimer() {//Simple, non-accurate clock funtion
var secs = "" + SECONDS;
var min = MINUTES;
if ( MINUTES >= 0){
SECONDS --;
if (SECONDS < 9) {
secs = "0" + secs;
}
if (SECONDS === 0){
SECONDS = 59;
secs = "00";//TODO: fix this where the at 1:00 it displays :00
MINUTES --;
}
if (MINUTES > 0){
document.getElementById("timer-readout").innerHTML = (min + ":" + secs);
}
else {
document.getElementById("timer-readout").innerHTML = (":" + secs);
}
}
else {
makePointsFlag = false;
storeScoresForAverages();
finalMessage();
finalData();
clearTimeout(loop);
clearTimeout(slice);
clearTimeout(countdown);
clearTimeout(points);
clearTimeout(leader);
}
}
function nameMash (p1,p2) {//celebrity name generator splits on the vowels or if all else fails just smash them together
if (p1.length > 5 || p2.length > 5) {//TODO: Clean up this logic and make it more compact
//use the p1 and p2 arguments and loop through to grab the regex vowels
var re = /[aeiou]/gi;
var p1Array = p1.match(re);
var p2Array = p2.match(re);
//if there are more than 2 split on the second
if (p1Array.length >= 2 && p2Array.length >= 2) {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == p1Array.length){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == p2Array.length){
var player2 = p2.substring(match.index - 2, p2.length);
}
}
return player1 + player2;
}
else {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == 1){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == 1){
var player2 = p2.substring(match.index - 1, p2.length);
}
}
return player1 + player2;
}
}
else {
if (p1.length > 2 && p2.length > 2){
return p1.substring(0,1).toUpperCase() + p1.substring(1,Math.floor(p1.length/2)).toLowerCase() + p2.substring(Math.floor(p2.length/2),p2.length).toLowerCase();
}
else {
return p1.substring(0,1).toUpperCase() + p1.substring(1, p1.length).toLowerCase() + p2.substring(2,p2.length).toLowerCase();
}
}
}
function onRefreshInitialization () {//initializes the persistant dashboard metrics
document.getElementById('p1').focus();
$('#form')[0].reset();
}
function overlay() {
el = document.getElementById("overlay");
el.style.visibility = (el.style.visibility == "visible") ? "hidden" : "visible";
}
function pointGenerator () {//this function computes the points
//TODO: Make a funtion to check directional trends and integrate the result into this function
if (makePointsFlag) {
var trendValue1,
trendValue2;
trendOfReadings();
if (p1trend === '0'){
trendValue1 = 9;
}
else if (p1trend === '+'){
trendValue1 = 30;
}
else if (p1trend === '-'){
trendValue1 = 5;
}
else {
trendValue1 = 1;
}
if (p2trend === '0'){
trendValue2 = 9;
}
else if (p2trend === '+'){
trendValue2 = 30;
}
else if (p2trend === '-'){
trendValue2 = 5;
}
else {
trendValue2 = 1;
}
trendValue1 ? SCORE[0] += Math.round(trendValue1) : null;
trendValue2 ? SCORE[1] += Math.round(trendValue2) : null;
scoreRender();
}
}
function scoreRender () {
|
{//checks if any of the present SCOREs are higher than the all time high SCORE
var tempSCORE = parseInt(localStorage.getItem("highScore"));
if (SCORE[0] > tempSCORE) {
localStorage.setItem("highScore", SCORE[0]);
}
if (SCORE[1] > tempSCORE) {
localStorage.setItem("highScore", SCORE[1]);
}
}
|
identifier_body
|
app.js
|
s*");
nameOne = re.exec(nameOne)[0];
nameTwo = re.exec(nameTwo)[0];
P1NAME = nameOne;
P2NAME = nameTwo;
coupleName = this.nameMash (nameOne, nameTwo);
COMBONAME = coupleName;
document.getElementById("player-1").innerHTML = (nameOne + "'s Score is:");
document.getElementById("player-2").innerHTML = (nameTwo + "'s Score is:");
document.getElementById("couple-name").innerHTML = ("Couple Name: " + coupleName);
}
function contactEmailInfo () { //grabs the contact details and transforms them into a combination name and stores everything to local storage
var coupleName, name, email;
var atRegex = /%40/g;
nameTemp = $('#form').serialize();
chopped = nameTemp.split("&");
nameOne = chopped[2].split("=")[1];
nameTwo = chopped[3].split("=")[1];
P1EMAIL = nameOne.replace(atRegex, '@');
P2EMAIL = nameTwo.replace(atRegex,'@');
document.getElementById("email-message").innerHTML = ("Thanks! We will be getting back to you sometime before the next burn!");
}
function datesTimes () {//grabs the time... no network or persistant time... Need a GPS module for the Raspberry PI
var cleanDate;
cleanDate = new Date().toLocaleString()
localStorage.setItem("timestamp", cleanDate);
}
function dataLoader(){//pulls the data from a local JSON file
$.getJSON("http://localhost:8000/Love-Staring-Machine/result.json", function(data) {
VALUE1 = data["player1"];
VALUE2 = data["player2"];
});
}
function finalMessage () {//Delivers the final message in the form of a modal overlay based on compadability of scores
//derive a compatability index based on distance of final scores
var p1Score = SCORE[0];
var p2Score = SCORE[1];
var higherScore = p1Score > p2Score ? p1Score : p2Score;
var distance = Math.abs(p1Score - p2Score);
var relDistance = distance / higherScore;
var message = [
"Hell No!!!",
"Y'all Fuck",
"True Love!"
];
var messenger;
if (relDistance > .1) {
messenger = 0;
}
else if (relDistance > .05) {
messenger = 1;
}
else {
messenger = 2;
}
//render the modal overlay
document.getElementById("timer-readout").innerHTML = (message[messenger]);
}
function finalData () {
//Stpres all he data as an object
var finalObject = {};
var totalFinalObjects = JSON.parse(localStorage.getItem("finalobjects"));
var participants = localStorage.getItem("totalparticipants");
var p1Name = P1NAME ? P1NAME : null;
p2Name = P2NAME ? P2NAME : null;
var coupleName = COMBONAME ? COMBONAME : null;
p1Email = P1EMAIL ? P1EMAIL : null;
p2Email = P2EMAIL ? P2EMAIL : null;
//grabs from localstorage the entire
totalFinalObjects[participants] = {
'p1': {
'n': p1Name,
'e': p1Email,
'p': SCORE[0],
's': p1data
},
'p2' : {
'n': p2Name,
'e': p1Email,
'p': SCORE[1],
's': p2data
},
'n': COMBONAME
|
function highScore () {//checks if any of the present SCOREs are higher than the all time high SCORE
var tempSCORE = parseInt(localStorage.getItem("highScore"));
if (SCORE[0] > tempSCORE) {
localStorage.setItem("highScore", SCORE[0]);
}
if (SCORE[1] > tempSCORE) {
localStorage.setItem("highScore", SCORE[1]);
}
}
function initialization () {//initializes the persistant dashboard metrics for the first time
//add all of the local storage variables
//localStorage.setItem("highScore", 0);
//localStorage.setItem("totalparticipants", 0);
//localStorage.setItem("average", 0);
//localStorage.setItem("cumulativescores", 0)
localStorage.setItem("finalobjects", "{}");
}
function myTimer() {//Simple, non-accurate clock funtion
var secs = "" + SECONDS;
var min = MINUTES;
if ( MINUTES >= 0){
SECONDS --;
if (SECONDS < 9) {
secs = "0" + secs;
}
if (SECONDS === 0){
SECONDS = 59;
secs = "00";//TODO: fix this where the at 1:00 it displays :00
MINUTES --;
}
if (MINUTES > 0){
document.getElementById("timer-readout").innerHTML = (min + ":" + secs);
}
else {
document.getElementById("timer-readout").innerHTML = (":" + secs);
}
}
else {
makePointsFlag = false;
storeScoresForAverages();
finalMessage();
finalData();
clearTimeout(loop);
clearTimeout(slice);
clearTimeout(countdown);
clearTimeout(points);
clearTimeout(leader);
}
}
function nameMash (p1,p2) {//celebrity name generator splits on the vowels or if all else fails just smash them together
if (p1.length > 5 || p2.length > 5) {//TODO: Clean up this logic and make it more compact
//use the p1 and p2 arguments and loop through to grab the regex vowels
var re = /[aeiou]/gi;
var p1Array = p1.match(re);
var p2Array = p2.match(re);
//if there are more than 2 split on the second
if (p1Array.length >= 2 && p2Array.length >= 2) {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == p1Array.length){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == p2Array.length){
var player2 = p2.substring(match.index - 2, p2.length);
}
}
return player1 + player2;
}
else {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == 1){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == 1){
var player2 = p2.substring(match.index - 1, p2.length);
}
}
return player1 + player2;
}
}
else {
if (p1.length > 2 && p2.length > 2){
return p1.substring(0,1).toUpperCase() + p1.substring(1,Math.floor(p1.length/2)).toLowerCase() + p2.substring(Math.floor(p2.length/2),p2.length).toLowerCase();
}
else {
return p1.substring(0,1).toUpperCase() + p1.substring(1, p1.length).toLowerCase() + p2.substring(2,p2.length).toLowerCase();
}
}
}
function onRefreshInitialization () {//initializes the persistant dashboard metrics
document.getElementById('p1').focus();
$('#form')[0].reset();
}
function overlay() {
el = document.getElementById("overlay");
el.style.visibility = (el.style.visibility == "visible") ? "hidden" : "visible";
}
function pointGenerator () {//this function computes the points
//TODO: Make a funtion to check directional trends and integrate the result into this function
if (makePointsFlag) {
var trendValue1,
trendValue2;
trendOfReadings();
if (p1trend === '0'){
trendValue1 = 9;
}
else if (p1trend === '+'){
trendValue1 = 30;
}
else if (p1trend === '-'){
trendValue1 = 5;
}
else {
trendValue1 = 1;
}
if (p2trend === '0'){
trendValue2 = 9;
}
else if (p2trend === '+'){
trendValue2 = 30;
}
else if (p2trend === '-'){
trendValue2 = 5;
}
else {
trendValue2 = 1;
}
trendValue1 ? SCORE[0] += Math.round(trendValue1) : null;
trendValue2 ? SCORE[1] += Math.round(trendValue2) : null;
scoreRender();
}
}
function scoreRender () {
|
};
localStorage.setItem("finalobjects", JSON.stringify(totalFinalObjects));
}
|
random_line_split
|
app.js
|
s*");
nameOne = re.exec(nameOne)[0];
nameTwo = re.exec(nameTwo)[0];
P1NAME = nameOne;
P2NAME = nameTwo;
coupleName = this.nameMash (nameOne, nameTwo);
COMBONAME = coupleName;
document.getElementById("player-1").innerHTML = (nameOne + "'s Score is:");
document.getElementById("player-2").innerHTML = (nameTwo + "'s Score is:");
document.getElementById("couple-name").innerHTML = ("Couple Name: " + coupleName);
}
function contactEmailInfo () { //grabs the contact details and transforms them into a combination name and stores everything to local storage
var coupleName, name, email;
var atRegex = /%40/g;
nameTemp = $('#form').serialize();
chopped = nameTemp.split("&");
nameOne = chopped[2].split("=")[1];
nameTwo = chopped[3].split("=")[1];
P1EMAIL = nameOne.replace(atRegex, '@');
P2EMAIL = nameTwo.replace(atRegex,'@');
document.getElementById("email-message").innerHTML = ("Thanks! We will be getting back to you sometime before the next burn!");
}
function datesTimes () {//grabs the time... no network or persistant time... Need a GPS module for the Raspberry PI
var cleanDate;
cleanDate = new Date().toLocaleString()
localStorage.setItem("timestamp", cleanDate);
}
function dataLoader(){//pulls the data from a local JSON file
$.getJSON("http://localhost:8000/Love-Staring-Machine/result.json", function(data) {
VALUE1 = data["player1"];
VALUE2 = data["player2"];
});
}
function finalMessage () {//Delivers the final message in the form of a modal overlay based on compadability of scores
//derive a compatability index based on distance of final scores
var p1Score = SCORE[0];
var p2Score = SCORE[1];
var higherScore = p1Score > p2Score ? p1Score : p2Score;
var distance = Math.abs(p1Score - p2Score);
var relDistance = distance / higherScore;
var message = [
"Hell No!!!",
"Y'all Fuck",
"True Love!"
];
var messenger;
if (relDistance > .1) {
messenger = 0;
}
else if (relDistance > .05) {
messenger = 1;
}
else {
messenger = 2;
}
//render the modal overlay
document.getElementById("timer-readout").innerHTML = (message[messenger]);
}
function finalData () {
//Stpres all he data as an object
var finalObject = {};
var totalFinalObjects = JSON.parse(localStorage.getItem("finalobjects"));
var participants = localStorage.getItem("totalparticipants");
var p1Name = P1NAME ? P1NAME : null;
p2Name = P2NAME ? P2NAME : null;
var coupleName = COMBONAME ? COMBONAME : null;
p1Email = P1EMAIL ? P1EMAIL : null;
p2Email = P2EMAIL ? P2EMAIL : null;
//grabs from localstorage the entire
totalFinalObjects[participants] = {
'p1': {
'n': p1Name,
'e': p1Email,
'p': SCORE[0],
's': p1data
},
'p2' : {
'n': p2Name,
'e': p1Email,
'p': SCORE[1],
's': p2data
},
'n': COMBONAME
};
localStorage.setItem("finalobjects", JSON.stringify(totalFinalObjects));
}
function highScore () {//checks if any of the present SCOREs are higher than the all time high SCORE
var tempSCORE = parseInt(localStorage.getItem("highScore"));
if (SCORE[0] > tempSCORE) {
localStorage.setItem("highScore", SCORE[0]);
}
if (SCORE[1] > tempSCORE) {
localStorage.setItem("highScore", SCORE[1]);
}
}
function initialization () {//initializes the persistant dashboard metrics for the first time
//add all of the local storage variables
//localStorage.setItem("highScore", 0);
//localStorage.setItem("totalparticipants", 0);
//localStorage.setItem("average", 0);
//localStorage.setItem("cumulativescores", 0)
localStorage.setItem("finalobjects", "{}");
}
function myTimer() {//Simple, non-accurate clock funtion
var secs = "" + SECONDS;
var min = MINUTES;
if ( MINUTES >= 0){
SECONDS --;
if (SECONDS < 9) {
secs = "0" + secs;
}
if (SECONDS === 0){
SECONDS = 59;
secs = "00";//TODO: fix this where the at 1:00 it displays :00
MINUTES --;
}
if (MINUTES > 0){
document.getElementById("timer-readout").innerHTML = (min + ":" + secs);
}
else {
document.getElementById("timer-readout").innerHTML = (":" + secs);
}
}
else {
makePointsFlag = false;
storeScoresForAverages();
finalMessage();
finalData();
clearTimeout(loop);
clearTimeout(slice);
clearTimeout(countdown);
clearTimeout(points);
clearTimeout(leader);
}
}
function nameMash (p1,p2) {//celebrity name generator splits on the vowels or if all else fails just smash them together
if (p1.length > 5 || p2.length > 5) {//TODO: Clean up this logic and make it more compact
//use the p1 and p2 arguments and loop through to grab the regex vowels
var re = /[aeiou]/gi;
var p1Array = p1.match(re);
var p2Array = p2.match(re);
//if there are more than 2 split on the second
if (p1Array.length >= 2 && p2Array.length >= 2) {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == p1Array.length){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == p2Array.length){
var player2 = p2.substring(match.index - 2, p2.length);
}
}
return player1 + player2;
}
else {
var i = 0
while ((match = re.exec(p1)) != null) {
i ++;
if (i == 1){
var player1 = p1.substring(0,match.index + 1);
}
}
var z = 0
while ((match = re.exec(p2)) != null) {
z ++;
if (z == 1){
var player2 = p2.substring(match.index - 1, p2.length);
}
}
return player1 + player2;
}
}
else {
if (p1.length > 2 && p2.length > 2){
return p1.substring(0,1).toUpperCase() + p1.substring(1,Math.floor(p1.length/2)).toLowerCase() + p2.substring(Math.floor(p2.length/2),p2.length).toLowerCase();
}
else {
return p1.substring(0,1).toUpperCase() + p1.substring(1, p1.length).toLowerCase() + p2.substring(2,p2.length).toLowerCase();
}
}
}
function onRefreshInitialization () {//initializes the persistant dashboard metrics
document.getElementById('p1').focus();
$('#form')[0].reset();
}
function
|
() {
el = document.getElementById("overlay");
el.style.visibility = (el.style.visibility == "visible") ? "hidden" : "visible";
}
function pointGenerator () {//this function computes the points
//TODO: Make a funtion to check directional trends and integrate the result into this function
if (makePointsFlag) {
var trendValue1,
trendValue2;
trendOfReadings();
if (p1trend === '0'){
trendValue1 = 9;
}
else if (p1trend === '+'){
trendValue1 = 30;
}
else if (p1trend === '-'){
trendValue1 = 5;
}
else {
trendValue1 = 1;
}
if (p2trend === '0'){
trendValue2 = 9;
}
else if (p2trend === '+'){
trendValue2 = 30;
}
else if (p2trend === '-'){
trendValue2 = 5;
}
else {
trendValue2 = 1;
}
trendValue1 ? SCORE[0] += Math.round(trendValue1) : null;
trendValue2 ? SCORE[1] += Math.round(trendValue2) : null;
scoreRender();
}
}
function scoreRender () {
|
overlay
|
identifier_name
|
XYRepresentation.py
|
if not distances:
distances.append(dist)
neighbors.append(pt)
else:
counter=0
while counter<len(distances) and dist>distances[counter]:
counter+=1
distances.insert(counter,dist)
neighbors.insert(counter,pt)
return neighbors
def can_connect(self,a,q):
return not self.mp.collide(a,q)
def connect(self,a,q):
|
def sample(self):
samp=self.mp.sample()
self.addVertex(samp)
return samp
class Map():
points=[] #PRM points
edges=[] #PRM edges
lines=[]
buff_lines=[]
obstacles=[]
buff=0
vis_graph=None
visibility=False
def __init__(self,lx=0,hx=0,ly=0,hy=0):
self.lx=lx
self.hx=hx
self.ly=ly
self.hy=hy
"""Returns a pt that is collision free"""
def sample(self):
x=None
y=None
while x==None or self.collide((x,y)):
xwidth=self.hx-self.lx
ywidth=self.hy-self.ly
x=xwidth*rd.random()+self.lx
y=ywidth*rd.random()+self.ly
return (x,y)
def collide(self,point1,point2=None):
if point2==None:
point=shapely.Point(point1[0],point1[1])
for obs in self.obstacles:
if point.within(obs[1]) and not point.touches(obs[1]):
return True
else:
path =shapely.LineString([point1,point2])
if self.buff>0:
path=path.buffer(self.buff)
for obs in self.obstacles:
if obs[1].intersects(path) and not path.touches(obs[1]):
return True
return False
def clear(self):
self.lines=[]
self.buff_lines=[]
self.obstacles=[]
"""vertices are of format [(a,b),(c,d)]"""
def add_Poly(self,vertices=[]):
if self.visibility:
init_poly=shapely.Polygon(vertices)
buff_poly=init_poly.buffer(self.buff,cap_style=2,join_style =2)
new_vert=list(buff_poly.exterior.coords)
self.obstacles.append((mplpath.Path(vertices),init_poly,new_vert,mplpath.Path(new_vert),buff_poly))
else:
self.obstacles.append((mplpath.Path(vertices),shapely.Polygon(vertices),vertices))
def display(self):
"""start grid"""
fig, ax = plt.subplots()
"""Add Obstacles"""
if self.buff>0 and self.visibility:
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[3], facecolor='orange',alpha=0.5))
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[0], facecolor='purple'))
"""Set boundaries"""
ax.set_xlim([self.lx,self.hx])
ax.set_ylim([self.ly,self.hy])
"""add edges"""
c=[]
for edge in self.edges:
c.append((1,0,0,0.5))
# vis_lines.append([(edge[0][0],edge[0][1]),(edge[1][0],edge[1][1])])
ax.add_collection(mc.LineCollection(self.edges,colors=c,linewidths = 1.0))
"""Set size of plot"""
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 9
plt.rcParams["figure.figsize"] = fig_size
"""draw points"""
for pt in self.points:
plt.plot(pt[0],pt[1],marker='o', markersize=5, color="black")
"""draw lines to the plot"""
if not self.visibility and self.buff>0:
path=shapely.LineString(self.buff_lines)
poly=path.buffer(self.buff)
poly_points=list(poly.exterior.coords)
ax.add_patch(patches.PathPatch(mplpath.Path(poly_points), facecolor='yellow',alpha=0.5))
lc = mc.LineCollection(self.lines,linewidths = 2.5)
ax.add_collection(lc)
"""Set up Axis """
ax.minorticks_on()
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='black')
ax.grid(which='minor', linestyle='-', linewidth='0.2', color='black')
plt.show()
def average(intlist):
total=0
for val in intlist:
total+=val
if len(intlist)>0:
return total/float(len(intlist))
else:
return -1
def sd(intlist):
avg= average(intlist)
variance=0
for val in intlist:
variance+=np.square(val-avg)
if len(intlist)>0:
variance= variance/float(len(intlist))
return np.sqrt(variance)
else:
return -1
def stats():
mp = Map(-10,10,-10,10)
mp.add_Poly([(-1,-1), (-8,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(9,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4) ,(1,-5) ,(-7,-5) ,(-7,-4) ])
mp.add_Poly([(7,6), (0,4) ,(-8,7) ,(0,6), (4,8)])
mp.add_Poly([(-9,-9), (8,-5) ,(9,-8)])
starts=[]
goals=[]
for i in range(25):
starts.append(mp.sample())
goals.append(mp.sample())
prmcc_maps=[]
prmk_maps=[]
prmstar_maps=[]
conn=[]
for i in range(10):
g1=Graph(mp)
g2=Graph(mp)
g3=Graph(mp)
conn.append(PRM.prm_cc(g1,25*i+25))
PRM.prm_k(g2,25*i+25,5)
PRM.prm_star(g3,25*i+25)
prmcc_maps.append(g1)
prmk_maps.append(g2)
prmstar_maps.append(g3)
print i
cc_stats=[]
k_stats=[]
star_stats=[]
c=0
for prm_map in prmcc_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_cc(temp,0,starts[i],goals[i],copy.deepcopy(conn[c]))
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
cc_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmk_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_k(temp,0,5,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
k_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmstar_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_star(temp,0,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
star_stats.append(average(raw_dat))
c+=1
print c
print cc_stats
print k_stats
print star_stats
def test():
# mp = Map(-5,5,-5,5)
mp = Map(-7, 8, -7, 8)
start=(-6,7)
goal=(6,-6)
"""Add obstacles"""
# mp.add_Poly([(-1,-1),(-1,1),(1,1),(1,-1)])
mp.add_Poly([(-1,-1), (-6,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(6,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4
|
if self.can_connect(a,q):
if (a,q) not in self.edges and (q,a) not in self.edges:
self.edges.append((a,q))
|
identifier_body
|
XYRepresentation.py
|
if not distances:
distances.append(dist)
neighbors.append(pt)
else:
counter=0
while counter<len(distances) and dist>distances[counter]:
counter+=1
distances.insert(counter,dist)
neighbors.insert(counter,pt)
return neighbors
def can_connect(self,a,q):
return not self.mp.collide(a,q)
def connect(self,a,q):
if self.can_connect(a,q):
if (a,q) not in self.edges and (q,a) not in self.edges:
self.edges.append((a,q))
def sample(self):
samp=self.mp.sample()
self.addVertex(samp)
return samp
class Map():
points=[] #PRM points
edges=[] #PRM edges
lines=[]
buff_lines=[]
obstacles=[]
buff=0
vis_graph=None
visibility=False
def __init__(self,lx=0,hx=0,ly=0,hy=0):
self.lx=lx
self.hx=hx
self.ly=ly
self.hy=hy
"""Returns a pt that is collision free"""
def sample(self):
x=None
y=None
while x==None or self.collide((x,y)):
xwidth=self.hx-self.lx
ywidth=self.hy-self.ly
x=xwidth*rd.random()+self.lx
y=ywidth*rd.random()+self.ly
return (x,y)
def collide(self,point1,point2=None):
if point2==None:
point=shapely.Point(point1[0],point1[1])
for obs in self.obstacles:
if point.within(obs[1]) and not point.touches(obs[1]):
return True
else:
path =shapely.LineString([point1,point2])
if self.buff>0:
path=path.buffer(self.buff)
for obs in self.obstacles:
|
return False
def clear(self):
self.lines=[]
self.buff_lines=[]
self.obstacles=[]
"""vertices are of format [(a,b),(c,d)]"""
def add_Poly(self,vertices=[]):
if self.visibility:
init_poly=shapely.Polygon(vertices)
buff_poly=init_poly.buffer(self.buff,cap_style=2,join_style =2)
new_vert=list(buff_poly.exterior.coords)
self.obstacles.append((mplpath.Path(vertices),init_poly,new_vert,mplpath.Path(new_vert),buff_poly))
else:
self.obstacles.append((mplpath.Path(vertices),shapely.Polygon(vertices),vertices))
def display(self):
"""start grid"""
fig, ax = plt.subplots()
"""Add Obstacles"""
if self.buff>0 and self.visibility:
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[3], facecolor='orange',alpha=0.5))
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[0], facecolor='purple'))
"""Set boundaries"""
ax.set_xlim([self.lx,self.hx])
ax.set_ylim([self.ly,self.hy])
"""add edges"""
c=[]
for edge in self.edges:
c.append((1,0,0,0.5))
# vis_lines.append([(edge[0][0],edge[0][1]),(edge[1][0],edge[1][1])])
ax.add_collection(mc.LineCollection(self.edges,colors=c,linewidths = 1.0))
"""Set size of plot"""
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 9
plt.rcParams["figure.figsize"] = fig_size
"""draw points"""
for pt in self.points:
plt.plot(pt[0],pt[1],marker='o', markersize=5, color="black")
"""draw lines to the plot"""
if not self.visibility and self.buff>0:
path=shapely.LineString(self.buff_lines)
poly=path.buffer(self.buff)
poly_points=list(poly.exterior.coords)
ax.add_patch(patches.PathPatch(mplpath.Path(poly_points), facecolor='yellow',alpha=0.5))
lc = mc.LineCollection(self.lines,linewidths = 2.5)
ax.add_collection(lc)
"""Set up Axis """
ax.minorticks_on()
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='black')
ax.grid(which='minor', linestyle='-', linewidth='0.2', color='black')
plt.show()
def average(intlist):
total=0
for val in intlist:
total+=val
if len(intlist)>0:
return total/float(len(intlist))
else:
return -1
def sd(intlist):
avg= average(intlist)
variance=0
for val in intlist:
variance+=np.square(val-avg)
if len(intlist)>0:
variance= variance/float(len(intlist))
return np.sqrt(variance)
else:
return -1
def stats():
mp = Map(-10,10,-10,10)
mp.add_Poly([(-1,-1), (-8,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(9,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4) ,(1,-5) ,(-7,-5) ,(-7,-4) ])
mp.add_Poly([(7,6), (0,4) ,(-8,7) ,(0,6), (4,8)])
mp.add_Poly([(-9,-9), (8,-5) ,(9,-8)])
starts=[]
goals=[]
for i in range(25):
starts.append(mp.sample())
goals.append(mp.sample())
prmcc_maps=[]
prmk_maps=[]
prmstar_maps=[]
conn=[]
for i in range(10):
g1=Graph(mp)
g2=Graph(mp)
g3=Graph(mp)
conn.append(PRM.prm_cc(g1,25*i+25))
PRM.prm_k(g2,25*i+25,5)
PRM.prm_star(g3,25*i+25)
prmcc_maps.append(g1)
prmk_maps.append(g2)
prmstar_maps.append(g3)
print i
cc_stats=[]
k_stats=[]
star_stats=[]
c=0
for prm_map in prmcc_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_cc(temp,0,starts[i],goals[i],copy.deepcopy(conn[c]))
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
cc_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmk_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_k(temp,0,5,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
k_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmstar_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_star(temp,0,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
star_stats.append(average(raw_dat))
c+=1
print c
print cc_stats
print k_stats
print star_stats
def test():
# mp = Map(-5,5,-5,5)
mp = Map(-7, 8, -7, 8)
start=(-6,7)
goal=(6,-6)
"""Add obstacles"""
# mp.add_Poly([(-1,-1),(-1,1),(1,1),(1,-1)])
mp.add_Poly([(-1,-1), (-6,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(6,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4)
|
if obs[1].intersects(path) and not path.touches(obs[1]):
return True
|
conditional_block
|
XYRepresentation.py
|
if not distances:
distances.append(dist)
neighbors.append(pt)
else:
counter=0
while counter<len(distances) and dist>distances[counter]:
counter+=1
distances.insert(counter,dist)
neighbors.insert(counter,pt)
return neighbors
def can_connect(self,a,q):
return not self.mp.collide(a,q)
def
|
(self,a,q):
if self.can_connect(a,q):
if (a,q) not in self.edges and (q,a) not in self.edges:
self.edges.append((a,q))
def sample(self):
samp=self.mp.sample()
self.addVertex(samp)
return samp
class Map():
points=[] #PRM points
edges=[] #PRM edges
lines=[]
buff_lines=[]
obstacles=[]
buff=0
vis_graph=None
visibility=False
def __init__(self,lx=0,hx=0,ly=0,hy=0):
self.lx=lx
self.hx=hx
self.ly=ly
self.hy=hy
"""Returns a pt that is collision free"""
def sample(self):
x=None
y=None
while x==None or self.collide((x,y)):
xwidth=self.hx-self.lx
ywidth=self.hy-self.ly
x=xwidth*rd.random()+self.lx
y=ywidth*rd.random()+self.ly
return (x,y)
def collide(self,point1,point2=None):
if point2==None:
point=shapely.Point(point1[0],point1[1])
for obs in self.obstacles:
if point.within(obs[1]) and not point.touches(obs[1]):
return True
else:
path =shapely.LineString([point1,point2])
if self.buff>0:
path=path.buffer(self.buff)
for obs in self.obstacles:
if obs[1].intersects(path) and not path.touches(obs[1]):
return True
return False
def clear(self):
self.lines=[]
self.buff_lines=[]
self.obstacles=[]
"""vertices are of format [(a,b),(c,d)]"""
def add_Poly(self,vertices=[]):
if self.visibility:
init_poly=shapely.Polygon(vertices)
buff_poly=init_poly.buffer(self.buff,cap_style=2,join_style =2)
new_vert=list(buff_poly.exterior.coords)
self.obstacles.append((mplpath.Path(vertices),init_poly,new_vert,mplpath.Path(new_vert),buff_poly))
else:
self.obstacles.append((mplpath.Path(vertices),shapely.Polygon(vertices),vertices))
def display(self):
"""start grid"""
fig, ax = plt.subplots()
"""Add Obstacles"""
if self.buff>0 and self.visibility:
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[3], facecolor='orange',alpha=0.5))
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[0], facecolor='purple'))
"""Set boundaries"""
ax.set_xlim([self.lx,self.hx])
ax.set_ylim([self.ly,self.hy])
"""add edges"""
c=[]
for edge in self.edges:
c.append((1,0,0,0.5))
# vis_lines.append([(edge[0][0],edge[0][1]),(edge[1][0],edge[1][1])])
ax.add_collection(mc.LineCollection(self.edges,colors=c,linewidths = 1.0))
"""Set size of plot"""
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 9
plt.rcParams["figure.figsize"] = fig_size
"""draw points"""
for pt in self.points:
plt.plot(pt[0],pt[1],marker='o', markersize=5, color="black")
"""draw lines to the plot"""
if not self.visibility and self.buff>0:
path=shapely.LineString(self.buff_lines)
poly=path.buffer(self.buff)
poly_points=list(poly.exterior.coords)
ax.add_patch(patches.PathPatch(mplpath.Path(poly_points), facecolor='yellow',alpha=0.5))
lc = mc.LineCollection(self.lines,linewidths = 2.5)
ax.add_collection(lc)
"""Set up Axis """
ax.minorticks_on()
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='black')
ax.grid(which='minor', linestyle='-', linewidth='0.2', color='black')
plt.show()
def average(intlist):
total=0
for val in intlist:
total+=val
if len(intlist)>0:
return total/float(len(intlist))
else:
return -1
def sd(intlist):
avg= average(intlist)
variance=0
for val in intlist:
variance+=np.square(val-avg)
if len(intlist)>0:
variance= variance/float(len(intlist))
return np.sqrt(variance)
else:
return -1
def stats():
mp = Map(-10,10,-10,10)
mp.add_Poly([(-1,-1), (-8,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(9,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4) ,(1,-5) ,(-7,-5) ,(-7,-4) ])
mp.add_Poly([(7,6), (0,4) ,(-8,7) ,(0,6), (4,8)])
mp.add_Poly([(-9,-9), (8,-5) ,(9,-8)])
starts=[]
goals=[]
for i in range(25):
starts.append(mp.sample())
goals.append(mp.sample())
prmcc_maps=[]
prmk_maps=[]
prmstar_maps=[]
conn=[]
for i in range(10):
g1=Graph(mp)
g2=Graph(mp)
g3=Graph(mp)
conn.append(PRM.prm_cc(g1,25*i+25))
PRM.prm_k(g2,25*i+25,5)
PRM.prm_star(g3,25*i+25)
prmcc_maps.append(g1)
prmk_maps.append(g2)
prmstar_maps.append(g3)
print i
cc_stats=[]
k_stats=[]
star_stats=[]
c=0
for prm_map in prmcc_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_cc(temp,0,starts[i],goals[i],copy.deepcopy(conn[c]))
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
cc_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmk_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_k(temp,0,5,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
k_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmstar_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_star(temp,0,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
star_stats.append(average(raw_dat))
c+=1
print c
print cc_stats
print k_stats
print star_stats
def test():
# mp = Map(-5,5,-5,5)
mp = Map(-7, 8, -7, 8)
start=(-6,7)
goal=(6,-6)
"""Add obstacles"""
# mp.add_Poly([(-1,-1),(-1,1),(1,1),(1,-1)])
mp.add_Poly([(-1,-1), (-6,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(6,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4
|
connect
|
identifier_name
|
XYRepresentation.py
|
ide(a,q)
def connect(self,a,q):
if self.can_connect(a,q):
if (a,q) not in self.edges and (q,a) not in self.edges:
self.edges.append((a,q))
def sample(self):
samp=self.mp.sample()
self.addVertex(samp)
return samp
class Map():
points=[] #PRM points
edges=[] #PRM edges
lines=[]
buff_lines=[]
obstacles=[]
buff=0
vis_graph=None
visibility=False
def __init__(self,lx=0,hx=0,ly=0,hy=0):
self.lx=lx
self.hx=hx
self.ly=ly
self.hy=hy
"""Returns a pt that is collision free"""
def sample(self):
x=None
y=None
while x==None or self.collide((x,y)):
xwidth=self.hx-self.lx
ywidth=self.hy-self.ly
x=xwidth*rd.random()+self.lx
y=ywidth*rd.random()+self.ly
return (x,y)
def collide(self,point1,point2=None):
if point2==None:
point=shapely.Point(point1[0],point1[1])
for obs in self.obstacles:
if point.within(obs[1]) and not point.touches(obs[1]):
return True
else:
path =shapely.LineString([point1,point2])
if self.buff>0:
path=path.buffer(self.buff)
for obs in self.obstacles:
if obs[1].intersects(path) and not path.touches(obs[1]):
return True
return False
def clear(self):
self.lines=[]
self.buff_lines=[]
self.obstacles=[]
"""vertices are of format [(a,b),(c,d)]"""
def add_Poly(self,vertices=[]):
if self.visibility:
init_poly=shapely.Polygon(vertices)
buff_poly=init_poly.buffer(self.buff,cap_style=2,join_style =2)
new_vert=list(buff_poly.exterior.coords)
self.obstacles.append((mplpath.Path(vertices),init_poly,new_vert,mplpath.Path(new_vert),buff_poly))
else:
self.obstacles.append((mplpath.Path(vertices),shapely.Polygon(vertices),vertices))
def display(self):
"""start grid"""
fig, ax = plt.subplots()
"""Add Obstacles"""
if self.buff>0 and self.visibility:
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[3], facecolor='orange',alpha=0.5))
for poly in self.obstacles:
ax.add_patch(patches.PathPatch(poly[0], facecolor='purple'))
"""Set boundaries"""
ax.set_xlim([self.lx,self.hx])
ax.set_ylim([self.ly,self.hy])
"""add edges"""
c=[]
for edge in self.edges:
c.append((1,0,0,0.5))
# vis_lines.append([(edge[0][0],edge[0][1]),(edge[1][0],edge[1][1])])
ax.add_collection(mc.LineCollection(self.edges,colors=c,linewidths = 1.0))
"""Set size of plot"""
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 9
plt.rcParams["figure.figsize"] = fig_size
"""draw points"""
for pt in self.points:
plt.plot(pt[0],pt[1],marker='o', markersize=5, color="black")
"""draw lines to the plot"""
if not self.visibility and self.buff>0:
path=shapely.LineString(self.buff_lines)
poly=path.buffer(self.buff)
poly_points=list(poly.exterior.coords)
ax.add_patch(patches.PathPatch(mplpath.Path(poly_points), facecolor='yellow',alpha=0.5))
lc = mc.LineCollection(self.lines,linewidths = 2.5)
ax.add_collection(lc)
"""Set up Axis """
ax.minorticks_on()
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='black')
ax.grid(which='minor', linestyle='-', linewidth='0.2', color='black')
plt.show()
def average(intlist):
total=0
for val in intlist:
total+=val
if len(intlist)>0:
return total/float(len(intlist))
else:
return -1
def sd(intlist):
avg= average(intlist)
variance=0
for val in intlist:
variance+=np.square(val-avg)
if len(intlist)>0:
variance= variance/float(len(intlist))
return np.sqrt(variance)
else:
return -1
def stats():
mp = Map(-10,10,-10,10)
mp.add_Poly([(-1,-1), (-8,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(9,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4) ,(1,-5) ,(-7,-5) ,(-7,-4) ])
mp.add_Poly([(7,6), (0,4) ,(-8,7) ,(0,6), (4,8)])
mp.add_Poly([(-9,-9), (8,-5) ,(9,-8)])
starts=[]
goals=[]
for i in range(25):
starts.append(mp.sample())
goals.append(mp.sample())
prmcc_maps=[]
prmk_maps=[]
prmstar_maps=[]
conn=[]
for i in range(10):
g1=Graph(mp)
g2=Graph(mp)
g3=Graph(mp)
conn.append(PRM.prm_cc(g1,25*i+25))
PRM.prm_k(g2,25*i+25,5)
PRM.prm_star(g3,25*i+25)
prmcc_maps.append(g1)
prmk_maps.append(g2)
prmstar_maps.append(g3)
print i
cc_stats=[]
k_stats=[]
star_stats=[]
c=0
for prm_map in prmcc_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_cc(temp,0,starts[i],goals[i],copy.deepcopy(conn[c]))
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
cc_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmk_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_k(temp,0,5,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
k_stats.append(average(raw_dat))
c+=1
print c
c=0
for prm_map in prmstar_maps:
raw_dat=[]
for i in range(len(starts)):
temp=prm_map.copy()
PRM.prm_star(temp,0,starts[i],goals[i])
ag=interpret(temp.vertices,temp.edges)
end = ag.nodes.pop()
beg = ag.nodes.pop()
if find_path(beg,end):
raw_dat.append(end.g)
star_stats.append(average(raw_dat))
c+=1
print c
print cc_stats
print k_stats
print star_stats
def test():
# mp = Map(-5,5,-5,5)
mp = Map(-7, 8, -7, 8)
start=(-6,7)
goal=(6,-6)
"""Add obstacles"""
# mp.add_Poly([(-1,-1),(-1,1),(1,1),(1,-1)])
|
mp.add_Poly([(-1,-1), (-6,-2), (-5,2), (-3,2), (-4,0)])
mp.add_Poly([(6,5), (4,1), (5,-2), (2,-4), (1,2)])
mp.add_Poly([(0,-3) ,(0,-4) ,(1,-5) ,(-5,-5) ,(-5,-4) ])
mp.add_Poly([(6,6), (0,4) ,(-5,6) ,(0,6), (4,7)])
# mp.add_Poly([(-2,0),(-2,-1),(2,-1),(2,0)])
|
random_line_split
|
|
loadout-builder-reducer.ts
|
-values';
import { DestinyClass } from 'bungie-api-ts/destiny2';
import _ from 'lodash';
import { useReducer } from 'react';
import { isLoadoutBuilderItem } from '../loadout/item-utils';
import {
lockedModsFromLoadoutParameters,
statFiltersFromLoadoutParamaters,
statOrderFromLoadoutParameters,
} from './loadout-params';
import { ArmorSet, ArmorStatHashes, ExcludedItems, PinnedItems, StatFilters } from './types';
export interface LoadoutBuilderState {
statOrder: ArmorStatHashes[]; // stat hashes, including disabled stats
upgradeSpendTier: UpgradeSpendTier;
lockItemEnergyType: boolean;
pinnedItems: PinnedItems;
excludedItems: ExcludedItems;
lockedMods: PluggableInventoryItemDefinition[];
lockedExoticHash?: number;
selectedStoreId?: string;
statFilters: Readonly<StatFilters>;
modPicker: {
open: boolean;
initialQuery?: string;
};
compareSet?: ArmorSet;
}
function warnMissingClass(classType: DestinyClass, defs: D2ManifestDefinitions) {
const missingClassName = Object.values(defs.Class).find((c) => c.classType === classType)!
.displayProperties.name;
|
body: t('LoadoutBuilder.MissingClassDescription'),
});
}
const lbStateInit = ({
stores,
preloadedLoadout,
initialLoadoutParameters,
classType,
defs,
}: {
stores: DimStore[];
preloadedLoadout?: Loadout;
initialLoadoutParameters: LoadoutParameters;
classType: DestinyClass | undefined;
defs: D2ManifestDefinitions;
}): LoadoutBuilderState => {
const pinnedItems: PinnedItems = {};
const matchingClass =
classType !== undefined ? stores.find((store) => store.classType === classType) : undefined;
if (classType !== undefined && !matchingClass) {
warnMissingClass(classType, defs);
// Take out the exotic
initialLoadoutParameters = { ...initialLoadoutParameters, exoticArmorHash: undefined };
}
let selectedStoreId = (matchingClass ?? getCurrentStore(stores)!).id;
let loadoutParams = initialLoadoutParameters;
if (stores.length && preloadedLoadout) {
const loadoutStore = stores.find((store) => store.classType === preloadedLoadout.classType);
if (!loadoutStore) {
warnMissingClass(preloadedLoadout.classType, defs);
} else {
selectedStoreId = loadoutStore.id;
// TODO: instead of locking items, show the loadout fixed at the top to compare against and leave all items free
for (const loadoutItem of preloadedLoadout.items) {
if (loadoutItem.equipped) {
const item = getItemAcrossStores(stores, loadoutItem);
if (item && isLoadoutBuilderItem(item)) {
pinnedItems[item.bucket.hash] = item;
}
}
}
// Load all parameters from the loadout if we can
if (preloadedLoadout.parameters) {
loadoutParams = { ...defaultLoadoutParameters, ...preloadedLoadout.parameters };
}
}
}
const statOrder = statOrderFromLoadoutParameters(loadoutParams);
const statFilters = statFiltersFromLoadoutParamaters(loadoutParams);
const lockedMods = lockedModsFromLoadoutParameters(loadoutParams, defs);
const lockItemEnergyType = Boolean(loadoutParams?.lockItemEnergyType);
// We need to handle the deprecated case
const upgradeSpendTier =
loadoutParams.upgradeSpendTier === UpgradeSpendTier.AscendantShardsLockEnergyType
? UpgradeSpendTier.Nothing
: loadoutParams.upgradeSpendTier!;
const lockedExoticHash = loadoutParams.exoticArmorHash;
return {
lockItemEnergyType,
upgradeSpendTier,
statOrder,
pinnedItems,
excludedItems: [],
statFilters,
lockedMods,
lockedExoticHash,
selectedStoreId,
modPicker: {
open: false,
},
};
};
export type LoadoutBuilderAction =
| { type: 'changeCharacter'; storeId: string }
| { type: 'statFiltersChanged'; statFilters: LoadoutBuilderState['statFilters'] }
| { type: 'sortOrderChanged'; sortOrder: LoadoutBuilderState['statOrder'] }
| {
type: 'lockItemEnergyTypeChanged';
lockItemEnergyType: LoadoutBuilderState['lockItemEnergyType'];
}
| { type: 'upgradeSpendTierChanged'; upgradeSpendTier: LoadoutBuilderState['upgradeSpendTier'] }
| { type: 'pinItem'; item: DimItem }
| { type: 'setPinnedItems'; items: DimItem[] }
| { type: 'unpinItem'; item: DimItem }
| { type: 'excludeItem'; item: DimItem }
| { type: 'unexcludeItem'; item: DimItem }
| {
type: 'lockedModsChanged';
lockedMods: PluggableInventoryItemDefinition[];
}
| { type: 'removeLockedMod'; mod: PluggableInventoryItemDefinition }
| { type: 'addGeneralMods'; mods: PluggableInventoryItemDefinition[] }
| { type: 'lockExotic'; lockedExoticHash: number }
| { type: 'removeLockedExotic' }
| { type: 'openModPicker'; initialQuery?: string }
| { type: 'closeModPicker' }
| { type: 'openCompareDrawer'; set: ArmorSet }
| { type: 'closeCompareDrawer' };
// TODO: Move more logic inside the reducer
function lbStateReducer(
state: LoadoutBuilderState,
action: LoadoutBuilderAction
): LoadoutBuilderState {
switch (action.type) {
case 'changeCharacter':
return {
...state,
selectedStoreId: action.storeId,
pinnedItems: {},
excludedItems: {},
lockedExoticHash: undefined,
};
case 'statFiltersChanged':
return { ...state, statFilters: action.statFilters };
case 'pinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
// Remove any previously locked item in that bucket and add this one
pinnedItems: {
...state.pinnedItems,
[bucketHash]: item,
},
// Locking an item clears excluded items in this bucket
excludedItems: {
...state.excludedItems,
[bucketHash]: undefined,
},
};
}
case 'setPinnedItems': {
const { items } = action;
return {
...state,
pinnedItems: _.keyBy(items, (i) => i.bucket.hash),
excludedItems: {},
};
}
case 'unpinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
pinnedItems: {
...state.pinnedItems,
[bucketHash]: undefined,
},
};
}
case 'excludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
if (state.excludedItems[bucketHash]?.some((i) => i.id === item.id)) {
return state; // item's already there
}
const existingExcluded = state.excludedItems[bucketHash] ?? [];
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: [...existingExcluded, item],
},
};
}
case 'unexcludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
const newExcluded = (state.excludedItems[bucketHash] ?? []).filter((i) => i.id !== item.id);
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: newExcluded.length > 0 ? newExcluded : undefined,
},
};
}
case 'lockedModsChanged': {
return {
...state,
lockedMods: action.lockedMods,
};
}
case 'sortOrderChanged': {
return {
...state,
statOrder: action.sortOrder,
};
}
case 'lockItemEnergyTypeChanged': {
return {
...state,
lockItemEnergyType: action.lockItemEnergyType,
};
}
case 'upgradeSpendTierChanged': {
return {
...state,
upgradeSpendTier: action.upgradeSpendTier,
};
}
case 'addGeneralMods': {
let currentGeneralModsCount = state.lockedMods.filter(
(mod) => mod.plug.plugCategoryHash === armor2PlugCategoryHashesByName.general
).length;
const newMods = [...state.lockedMods];
const failures: string[] = [];
for (const mod of action.mods) {
if (currentGeneralModsCount < 5) {
newMods.push(mod);
currentGeneralModsCount++;
} else {
failures.push(mod.displayProperties.name);
}
}
if (failures.length) {
showNotification
|
showNotification({
type: 'error',
title: t('LoadoutBuilder.MissingClass', { className: missingClassName }),
|
random_line_split
|
loadout-builder-reducer.ts
|
-values';
import { DestinyClass } from 'bungie-api-ts/destiny2';
import _ from 'lodash';
import { useReducer } from 'react';
import { isLoadoutBuilderItem } from '../loadout/item-utils';
import {
lockedModsFromLoadoutParameters,
statFiltersFromLoadoutParamaters,
statOrderFromLoadoutParameters,
} from './loadout-params';
import { ArmorSet, ArmorStatHashes, ExcludedItems, PinnedItems, StatFilters } from './types';
export interface LoadoutBuilderState {
statOrder: ArmorStatHashes[]; // stat hashes, including disabled stats
upgradeSpendTier: UpgradeSpendTier;
lockItemEnergyType: boolean;
pinnedItems: PinnedItems;
excludedItems: ExcludedItems;
lockedMods: PluggableInventoryItemDefinition[];
lockedExoticHash?: number;
selectedStoreId?: string;
statFilters: Readonly<StatFilters>;
modPicker: {
open: boolean;
initialQuery?: string;
};
compareSet?: ArmorSet;
}
function
|
(classType: DestinyClass, defs: D2ManifestDefinitions) {
const missingClassName = Object.values(defs.Class).find((c) => c.classType === classType)!
.displayProperties.name;
showNotification({
type: 'error',
title: t('LoadoutBuilder.MissingClass', { className: missingClassName }),
body: t('LoadoutBuilder.MissingClassDescription'),
});
}
const lbStateInit = ({
stores,
preloadedLoadout,
initialLoadoutParameters,
classType,
defs,
}: {
stores: DimStore[];
preloadedLoadout?: Loadout;
initialLoadoutParameters: LoadoutParameters;
classType: DestinyClass | undefined;
defs: D2ManifestDefinitions;
}): LoadoutBuilderState => {
const pinnedItems: PinnedItems = {};
const matchingClass =
classType !== undefined ? stores.find((store) => store.classType === classType) : undefined;
if (classType !== undefined && !matchingClass) {
warnMissingClass(classType, defs);
// Take out the exotic
initialLoadoutParameters = { ...initialLoadoutParameters, exoticArmorHash: undefined };
}
let selectedStoreId = (matchingClass ?? getCurrentStore(stores)!).id;
let loadoutParams = initialLoadoutParameters;
if (stores.length && preloadedLoadout) {
const loadoutStore = stores.find((store) => store.classType === preloadedLoadout.classType);
if (!loadoutStore) {
warnMissingClass(preloadedLoadout.classType, defs);
} else {
selectedStoreId = loadoutStore.id;
// TODO: instead of locking items, show the loadout fixed at the top to compare against and leave all items free
for (const loadoutItem of preloadedLoadout.items) {
if (loadoutItem.equipped) {
const item = getItemAcrossStores(stores, loadoutItem);
if (item && isLoadoutBuilderItem(item)) {
pinnedItems[item.bucket.hash] = item;
}
}
}
// Load all parameters from the loadout if we can
if (preloadedLoadout.parameters) {
loadoutParams = { ...defaultLoadoutParameters, ...preloadedLoadout.parameters };
}
}
}
const statOrder = statOrderFromLoadoutParameters(loadoutParams);
const statFilters = statFiltersFromLoadoutParamaters(loadoutParams);
const lockedMods = lockedModsFromLoadoutParameters(loadoutParams, defs);
const lockItemEnergyType = Boolean(loadoutParams?.lockItemEnergyType);
// We need to handle the deprecated case
const upgradeSpendTier =
loadoutParams.upgradeSpendTier === UpgradeSpendTier.AscendantShardsLockEnergyType
? UpgradeSpendTier.Nothing
: loadoutParams.upgradeSpendTier!;
const lockedExoticHash = loadoutParams.exoticArmorHash;
return {
lockItemEnergyType,
upgradeSpendTier,
statOrder,
pinnedItems,
excludedItems: [],
statFilters,
lockedMods,
lockedExoticHash,
selectedStoreId,
modPicker: {
open: false,
},
};
};
export type LoadoutBuilderAction =
| { type: 'changeCharacter'; storeId: string }
| { type: 'statFiltersChanged'; statFilters: LoadoutBuilderState['statFilters'] }
| { type: 'sortOrderChanged'; sortOrder: LoadoutBuilderState['statOrder'] }
| {
type: 'lockItemEnergyTypeChanged';
lockItemEnergyType: LoadoutBuilderState['lockItemEnergyType'];
}
| { type: 'upgradeSpendTierChanged'; upgradeSpendTier: LoadoutBuilderState['upgradeSpendTier'] }
| { type: 'pinItem'; item: DimItem }
| { type: 'setPinnedItems'; items: DimItem[] }
| { type: 'unpinItem'; item: DimItem }
| { type: 'excludeItem'; item: DimItem }
| { type: 'unexcludeItem'; item: DimItem }
| {
type: 'lockedModsChanged';
lockedMods: PluggableInventoryItemDefinition[];
}
| { type: 'removeLockedMod'; mod: PluggableInventoryItemDefinition }
| { type: 'addGeneralMods'; mods: PluggableInventoryItemDefinition[] }
| { type: 'lockExotic'; lockedExoticHash: number }
| { type: 'removeLockedExotic' }
| { type: 'openModPicker'; initialQuery?: string }
| { type: 'closeModPicker' }
| { type: 'openCompareDrawer'; set: ArmorSet }
| { type: 'closeCompareDrawer' };
// TODO: Move more logic inside the reducer
function lbStateReducer(
state: LoadoutBuilderState,
action: LoadoutBuilderAction
): LoadoutBuilderState {
switch (action.type) {
case 'changeCharacter':
return {
...state,
selectedStoreId: action.storeId,
pinnedItems: {},
excludedItems: {},
lockedExoticHash: undefined,
};
case 'statFiltersChanged':
return { ...state, statFilters: action.statFilters };
case 'pinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
// Remove any previously locked item in that bucket and add this one
pinnedItems: {
...state.pinnedItems,
[bucketHash]: item,
},
// Locking an item clears excluded items in this bucket
excludedItems: {
...state.excludedItems,
[bucketHash]: undefined,
},
};
}
case 'setPinnedItems': {
const { items } = action;
return {
...state,
pinnedItems: _.keyBy(items, (i) => i.bucket.hash),
excludedItems: {},
};
}
case 'unpinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
pinnedItems: {
...state.pinnedItems,
[bucketHash]: undefined,
},
};
}
case 'excludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
if (state.excludedItems[bucketHash]?.some((i) => i.id === item.id)) {
return state; // item's already there
}
const existingExcluded = state.excludedItems[bucketHash] ?? [];
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: [...existingExcluded, item],
},
};
}
case 'unexcludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
const newExcluded = (state.excludedItems[bucketHash] ?? []).filter((i) => i.id !== item.id);
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: newExcluded.length > 0 ? newExcluded : undefined,
},
};
}
case 'lockedModsChanged': {
return {
...state,
lockedMods: action.lockedMods,
};
}
case 'sortOrderChanged': {
return {
...state,
statOrder: action.sortOrder,
};
}
case 'lockItemEnergyTypeChanged': {
return {
...state,
lockItemEnergyType: action.lockItemEnergyType,
};
}
case 'upgradeSpendTierChanged': {
return {
...state,
upgradeSpendTier: action.upgradeSpendTier,
};
}
case 'addGeneralMods': {
let currentGeneralModsCount = state.lockedMods.filter(
(mod) => mod.plug.plugCategoryHash === armor2PlugCategoryHashesByName.general
).length;
const newMods = [...state.lockedMods];
const failures: string[] = [];
for (const mod of action.mods) {
if (currentGeneralModsCount < 5) {
newMods.push(mod);
currentGeneralModsCount++;
} else {
failures.push(mod.displayProperties.name);
}
}
if (failures.length) {
showNotification
|
warnMissingClass
|
identifier_name
|
loadout-builder-reducer.ts
|
-values';
import { DestinyClass } from 'bungie-api-ts/destiny2';
import _ from 'lodash';
import { useReducer } from 'react';
import { isLoadoutBuilderItem } from '../loadout/item-utils';
import {
lockedModsFromLoadoutParameters,
statFiltersFromLoadoutParamaters,
statOrderFromLoadoutParameters,
} from './loadout-params';
import { ArmorSet, ArmorStatHashes, ExcludedItems, PinnedItems, StatFilters } from './types';
export interface LoadoutBuilderState {
statOrder: ArmorStatHashes[]; // stat hashes, including disabled stats
upgradeSpendTier: UpgradeSpendTier;
lockItemEnergyType: boolean;
pinnedItems: PinnedItems;
excludedItems: ExcludedItems;
lockedMods: PluggableInventoryItemDefinition[];
lockedExoticHash?: number;
selectedStoreId?: string;
statFilters: Readonly<StatFilters>;
modPicker: {
open: boolean;
initialQuery?: string;
};
compareSet?: ArmorSet;
}
function warnMissingClass(classType: DestinyClass, defs: D2ManifestDefinitions) {
const missingClassName = Object.values(defs.Class).find((c) => c.classType === classType)!
.displayProperties.name;
showNotification({
type: 'error',
title: t('LoadoutBuilder.MissingClass', { className: missingClassName }),
body: t('LoadoutBuilder.MissingClassDescription'),
});
}
const lbStateInit = ({
stores,
preloadedLoadout,
initialLoadoutParameters,
classType,
defs,
}: {
stores: DimStore[];
preloadedLoadout?: Loadout;
initialLoadoutParameters: LoadoutParameters;
classType: DestinyClass | undefined;
defs: D2ManifestDefinitions;
}): LoadoutBuilderState => {
const pinnedItems: PinnedItems = {};
const matchingClass =
classType !== undefined ? stores.find((store) => store.classType === classType) : undefined;
if (classType !== undefined && !matchingClass) {
warnMissingClass(classType, defs);
// Take out the exotic
initialLoadoutParameters = { ...initialLoadoutParameters, exoticArmorHash: undefined };
}
let selectedStoreId = (matchingClass ?? getCurrentStore(stores)!).id;
let loadoutParams = initialLoadoutParameters;
if (stores.length && preloadedLoadout) {
const loadoutStore = stores.find((store) => store.classType === preloadedLoadout.classType);
if (!loadoutStore) {
warnMissingClass(preloadedLoadout.classType, defs);
} else {
selectedStoreId = loadoutStore.id;
// TODO: instead of locking items, show the loadout fixed at the top to compare against and leave all items free
for (const loadoutItem of preloadedLoadout.items) {
if (loadoutItem.equipped) {
const item = getItemAcrossStores(stores, loadoutItem);
if (item && isLoadoutBuilderItem(item)) {
pinnedItems[item.bucket.hash] = item;
}
}
}
// Load all parameters from the loadout if we can
if (preloadedLoadout.parameters) {
loadoutParams = { ...defaultLoadoutParameters, ...preloadedLoadout.parameters };
}
}
}
const statOrder = statOrderFromLoadoutParameters(loadoutParams);
const statFilters = statFiltersFromLoadoutParamaters(loadoutParams);
const lockedMods = lockedModsFromLoadoutParameters(loadoutParams, defs);
const lockItemEnergyType = Boolean(loadoutParams?.lockItemEnergyType);
// We need to handle the deprecated case
const upgradeSpendTier =
loadoutParams.upgradeSpendTier === UpgradeSpendTier.AscendantShardsLockEnergyType
? UpgradeSpendTier.Nothing
: loadoutParams.upgradeSpendTier!;
const lockedExoticHash = loadoutParams.exoticArmorHash;
return {
lockItemEnergyType,
upgradeSpendTier,
statOrder,
pinnedItems,
excludedItems: [],
statFilters,
lockedMods,
lockedExoticHash,
selectedStoreId,
modPicker: {
open: false,
},
};
};
export type LoadoutBuilderAction =
| { type: 'changeCharacter'; storeId: string }
| { type: 'statFiltersChanged'; statFilters: LoadoutBuilderState['statFilters'] }
| { type: 'sortOrderChanged'; sortOrder: LoadoutBuilderState['statOrder'] }
| {
type: 'lockItemEnergyTypeChanged';
lockItemEnergyType: LoadoutBuilderState['lockItemEnergyType'];
}
| { type: 'upgradeSpendTierChanged'; upgradeSpendTier: LoadoutBuilderState['upgradeSpendTier'] }
| { type: 'pinItem'; item: DimItem }
| { type: 'setPinnedItems'; items: DimItem[] }
| { type: 'unpinItem'; item: DimItem }
| { type: 'excludeItem'; item: DimItem }
| { type: 'unexcludeItem'; item: DimItem }
| {
type: 'lockedModsChanged';
lockedMods: PluggableInventoryItemDefinition[];
}
| { type: 'removeLockedMod'; mod: PluggableInventoryItemDefinition }
| { type: 'addGeneralMods'; mods: PluggableInventoryItemDefinition[] }
| { type: 'lockExotic'; lockedExoticHash: number }
| { type: 'removeLockedExotic' }
| { type: 'openModPicker'; initialQuery?: string }
| { type: 'closeModPicker' }
| { type: 'openCompareDrawer'; set: ArmorSet }
| { type: 'closeCompareDrawer' };
// TODO: Move more logic inside the reducer
function lbStateReducer(
state: LoadoutBuilderState,
action: LoadoutBuilderAction
): LoadoutBuilderState
|
[bucketHash]: item,
},
// Locking an item clears excluded items in this bucket
excludedItems: {
...state.excludedItems,
[bucketHash]: undefined,
},
};
}
case 'setPinnedItems': {
const { items } = action;
return {
...state,
pinnedItems: _.keyBy(items, (i) => i.bucket.hash),
excludedItems: {},
};
}
case 'unpinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
pinnedItems: {
...state.pinnedItems,
[bucketHash]: undefined,
},
};
}
case 'excludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
if (state.excludedItems[bucketHash]?.some((i) => i.id === item.id)) {
return state; // item's already there
}
const existingExcluded = state.excludedItems[bucketHash] ?? [];
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: [...existingExcluded, item],
},
};
}
case 'unexcludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
const newExcluded = (state.excludedItems[bucketHash] ?? []).filter((i) => i.id !== item.id);
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: newExcluded.length > 0 ? newExcluded : undefined,
},
};
}
case 'lockedModsChanged': {
return {
...state,
lockedMods: action.lockedMods,
};
}
case 'sortOrderChanged': {
return {
...state,
statOrder: action.sortOrder,
};
}
case 'lockItemEnergyTypeChanged': {
return {
...state,
lockItemEnergyType: action.lockItemEnergyType,
};
}
case 'upgradeSpendTierChanged': {
return {
...state,
upgradeSpendTier: action.upgradeSpendTier,
};
}
case 'addGeneralMods': {
let currentGeneralModsCount = state.lockedMods.filter(
(mod) => mod.plug.plugCategoryHash === armor2PlugCategoryHashesByName.general
).length;
const newMods = [...state.lockedMods];
const failures: string[] = [];
for (const mod of action.mods) {
if (currentGeneralModsCount < 5) {
newMods.push(mod);
currentGeneralModsCount++;
} else {
failures.push(mod.displayProperties.name);
}
}
if (failures.length) {
showNotification
|
{
switch (action.type) {
case 'changeCharacter':
return {
...state,
selectedStoreId: action.storeId,
pinnedItems: {},
excludedItems: {},
lockedExoticHash: undefined,
};
case 'statFiltersChanged':
return { ...state, statFilters: action.statFilters };
case 'pinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
// Remove any previously locked item in that bucket and add this one
pinnedItems: {
...state.pinnedItems,
|
identifier_body
|
loadout-builder-reducer.ts
|
-values';
import { DestinyClass } from 'bungie-api-ts/destiny2';
import _ from 'lodash';
import { useReducer } from 'react';
import { isLoadoutBuilderItem } from '../loadout/item-utils';
import {
lockedModsFromLoadoutParameters,
statFiltersFromLoadoutParamaters,
statOrderFromLoadoutParameters,
} from './loadout-params';
import { ArmorSet, ArmorStatHashes, ExcludedItems, PinnedItems, StatFilters } from './types';
export interface LoadoutBuilderState {
statOrder: ArmorStatHashes[]; // stat hashes, including disabled stats
upgradeSpendTier: UpgradeSpendTier;
lockItemEnergyType: boolean;
pinnedItems: PinnedItems;
excludedItems: ExcludedItems;
lockedMods: PluggableInventoryItemDefinition[];
lockedExoticHash?: number;
selectedStoreId?: string;
statFilters: Readonly<StatFilters>;
modPicker: {
open: boolean;
initialQuery?: string;
};
compareSet?: ArmorSet;
}
function warnMissingClass(classType: DestinyClass, defs: D2ManifestDefinitions) {
const missingClassName = Object.values(defs.Class).find((c) => c.classType === classType)!
.displayProperties.name;
showNotification({
type: 'error',
title: t('LoadoutBuilder.MissingClass', { className: missingClassName }),
body: t('LoadoutBuilder.MissingClassDescription'),
});
}
const lbStateInit = ({
stores,
preloadedLoadout,
initialLoadoutParameters,
classType,
defs,
}: {
stores: DimStore[];
preloadedLoadout?: Loadout;
initialLoadoutParameters: LoadoutParameters;
classType: DestinyClass | undefined;
defs: D2ManifestDefinitions;
}): LoadoutBuilderState => {
const pinnedItems: PinnedItems = {};
const matchingClass =
classType !== undefined ? stores.find((store) => store.classType === classType) : undefined;
if (classType !== undefined && !matchingClass) {
warnMissingClass(classType, defs);
// Take out the exotic
initialLoadoutParameters = { ...initialLoadoutParameters, exoticArmorHash: undefined };
}
let selectedStoreId = (matchingClass ?? getCurrentStore(stores)!).id;
let loadoutParams = initialLoadoutParameters;
if (stores.length && preloadedLoadout)
|
}
}
const statOrder = statOrderFromLoadoutParameters(loadoutParams);
const statFilters = statFiltersFromLoadoutParamaters(loadoutParams);
const lockedMods = lockedModsFromLoadoutParameters(loadoutParams, defs);
const lockItemEnergyType = Boolean(loadoutParams?.lockItemEnergyType);
// We need to handle the deprecated case
const upgradeSpendTier =
loadoutParams.upgradeSpendTier === UpgradeSpendTier.AscendantShardsLockEnergyType
? UpgradeSpendTier.Nothing
: loadoutParams.upgradeSpendTier!;
const lockedExoticHash = loadoutParams.exoticArmorHash;
return {
lockItemEnergyType,
upgradeSpendTier,
statOrder,
pinnedItems,
excludedItems: [],
statFilters,
lockedMods,
lockedExoticHash,
selectedStoreId,
modPicker: {
open: false,
},
};
};
export type LoadoutBuilderAction =
| { type: 'changeCharacter'; storeId: string }
| { type: 'statFiltersChanged'; statFilters: LoadoutBuilderState['statFilters'] }
| { type: 'sortOrderChanged'; sortOrder: LoadoutBuilderState['statOrder'] }
| {
type: 'lockItemEnergyTypeChanged';
lockItemEnergyType: LoadoutBuilderState['lockItemEnergyType'];
}
| { type: 'upgradeSpendTierChanged'; upgradeSpendTier: LoadoutBuilderState['upgradeSpendTier'] }
| { type: 'pinItem'; item: DimItem }
| { type: 'setPinnedItems'; items: DimItem[] }
| { type: 'unpinItem'; item: DimItem }
| { type: 'excludeItem'; item: DimItem }
| { type: 'unexcludeItem'; item: DimItem }
| {
type: 'lockedModsChanged';
lockedMods: PluggableInventoryItemDefinition[];
}
| { type: 'removeLockedMod'; mod: PluggableInventoryItemDefinition }
| { type: 'addGeneralMods'; mods: PluggableInventoryItemDefinition[] }
| { type: 'lockExotic'; lockedExoticHash: number }
| { type: 'removeLockedExotic' }
| { type: 'openModPicker'; initialQuery?: string }
| { type: 'closeModPicker' }
| { type: 'openCompareDrawer'; set: ArmorSet }
| { type: 'closeCompareDrawer' };
// TODO: Move more logic inside the reducer
function lbStateReducer(
state: LoadoutBuilderState,
action: LoadoutBuilderAction
): LoadoutBuilderState {
switch (action.type) {
case 'changeCharacter':
return {
...state,
selectedStoreId: action.storeId,
pinnedItems: {},
excludedItems: {},
lockedExoticHash: undefined,
};
case 'statFiltersChanged':
return { ...state, statFilters: action.statFilters };
case 'pinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
// Remove any previously locked item in that bucket and add this one
pinnedItems: {
...state.pinnedItems,
[bucketHash]: item,
},
// Locking an item clears excluded items in this bucket
excludedItems: {
...state.excludedItems,
[bucketHash]: undefined,
},
};
}
case 'setPinnedItems': {
const { items } = action;
return {
...state,
pinnedItems: _.keyBy(items, (i) => i.bucket.hash),
excludedItems: {},
};
}
case 'unpinItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
return {
...state,
pinnedItems: {
...state.pinnedItems,
[bucketHash]: undefined,
},
};
}
case 'excludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
if (state.excludedItems[bucketHash]?.some((i) => i.id === item.id)) {
return state; // item's already there
}
const existingExcluded = state.excludedItems[bucketHash] ?? [];
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: [...existingExcluded, item],
},
};
}
case 'unexcludeItem': {
const { item } = action;
const bucketHash = item.bucket.hash;
const newExcluded = (state.excludedItems[bucketHash] ?? []).filter((i) => i.id !== item.id);
return {
...state,
excludedItems: {
...state.excludedItems,
[bucketHash]: newExcluded.length > 0 ? newExcluded : undefined,
},
};
}
case 'lockedModsChanged': {
return {
...state,
lockedMods: action.lockedMods,
};
}
case 'sortOrderChanged': {
return {
...state,
statOrder: action.sortOrder,
};
}
case 'lockItemEnergyTypeChanged': {
return {
...state,
lockItemEnergyType: action.lockItemEnergyType,
};
}
case 'upgradeSpendTierChanged': {
return {
...state,
upgradeSpendTier: action.upgradeSpendTier,
};
}
case 'addGeneralMods': {
let currentGeneralModsCount = state.lockedMods.filter(
(mod) => mod.plug.plugCategoryHash === armor2PlugCategoryHashesByName.general
).length;
const newMods = [...state.lockedMods];
const failures: string[] = [];
for (const mod of action.mods) {
if (currentGeneralModsCount < 5) {
newMods.push(mod);
currentGeneralModsCount++;
} else {
failures.push(mod.displayProperties.name);
}
}
if (failures.length) {
showNotification
|
{
const loadoutStore = stores.find((store) => store.classType === preloadedLoadout.classType);
if (!loadoutStore) {
warnMissingClass(preloadedLoadout.classType, defs);
} else {
selectedStoreId = loadoutStore.id;
// TODO: instead of locking items, show the loadout fixed at the top to compare against and leave all items free
for (const loadoutItem of preloadedLoadout.items) {
if (loadoutItem.equipped) {
const item = getItemAcrossStores(stores, loadoutItem);
if (item && isLoadoutBuilderItem(item)) {
pinnedItems[item.bucket.hash] = item;
}
}
}
// Load all parameters from the loadout if we can
if (preloadedLoadout.parameters) {
loadoutParams = { ...defaultLoadoutParameters, ...preloadedLoadout.parameters };
}
|
conditional_block
|
config.rs
|
_SECONDS: usize = 60;
/// This struct contains the configuration of the agent.
#[derive(Clone)]
pub struct Config {
/// the latching interval for stats
interval: u64,
/// sample rate for counters in Hz
sample_rate: f64,
/// the sampler timeout
sampler_timeout: Duration,
/// maximum consecutive sampler timeouts
max_sampler_timeouts: usize,
/// the listen address for the stats port
listen: SocketAddr,
/// the logging level
loglevel: Level,
/// memcache instance to instrument
memcache: Option<SocketAddr>,
/// flags for enabled statistics subsystems
flags: Flags,
/// the number of cores on the host
cores: usize,
/// an optional file to log stats to
stats_log: Option<String>,
/// flag to indicate Mesos sidecar mode
sidecar: bool,
}
#[derive(Clone)]
/// `Flags` is a simple wrapper for a doubly-keyed `HashSet`
pub struct Flags {
data: HashMap<String, HashSet<String>>,
}
impl Flags {
/// Creates a new empty set of `Flags`
pub fn new() -> Self {
Self {
data: HashMap::new(),
}
}
/// Insert a `pkey`+`lkey` into the set
pub fn insert(&mut self, pkey: &str, lkey: &str) {
let mut entry = self.data.remove(pkey).unwrap_or_default();
entry.insert(lkey.to_owned());
self.data.insert(pkey.to_owned(), entry);
}
/// True if the set contains `pkey`+`lkey`
pub fn contains(&self, pkey: &str, lkey: &str) -> bool {
if let Some(entry) = self.data.get(pkey) {
entry.get(lkey).is_some()
} else {
false
}
}
/// True if the set contains the `pkey`
pub fn
|
(&self, pkey: &str) -> bool {
self.data.get(pkey).is_some()
}
/// Remove a `pkey`+`lkey`
pub fn remove(&mut self, pkey: &str, lkey: &str) {
if let Some(entry) = self.data.get_mut(pkey) {
entry.remove(lkey);
}
}
/// Remove the `pkey` and all `lkey`s under it
pub fn remove_pkey(&mut self, pkey: &str) {
self.data.remove(pkey);
}
}
impl Config {
/// parse command line options and return `Config`
pub fn new() -> Config {
let matches = App::new(NAME)
.version(VERSION)
.author("Brian Martin <bmartin@twitter.com>")
.about("high-resolution systems performance telemetry agent")
.arg(
Arg::with_name("listen")
.short("l")
.long("listen")
.required(true)
.takes_value(true)
.value_name("IP:PORT")
.help("Sets the listen address for metrics"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.multiple(true)
.help("Increase verbosity by one level. Can be used more than once"),
)
.arg(
Arg::with_name("interval")
.long("interval")
.value_name("Seconds")
.help("Integration window duration and stats endpoint refresh time")
.takes_value(true),
)
.arg(
Arg::with_name("sample-rate")
.long("sample-rate")
.value_name("Hertz")
.help("Sets the sampling frequency for the counters")
.takes_value(true),
)
.arg(
Arg::with_name("sampler-timeout")
.long("sampler-timeout")
.value_name("MS")
.help("Sets the timeout for per-sampler execution")
.takes_value(true),
)
.arg(
Arg::with_name("max-sampler-timeouts")
.long("max-sampler-timeouts")
.value_name("MS")
.help("Sets the maximum number of consecutive sampler timeouts")
.takes_value(true),
)
.arg(
Arg::with_name("cpu")
.long("cpu")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from CPU subsystem"),
)
.arg(
Arg::with_name("disk")
.long("disk")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Disk subsystem"),
)
.arg(
Arg::with_name("ebpf")
.long("ebpf")
.takes_value(true)
.multiple(true)
.possible_value("all")
.possible_value("block")
.possible_value("ext4")
.possible_value("scheduler")
.possible_value("xfs")
.help("Enable statistics from eBPF"),
)
.arg(
Arg::with_name("network")
.long("network")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Network subsystem"),
)
.arg(
Arg::with_name("perf")
.long("perf")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.possible_value("per-cgroup")
.help("Enable statistics from Perf Events subsystem"),
)
.arg(
Arg::with_name("memcache")
.long("memcache")
.required(false)
.takes_value(true)
.value_name("IP:PORT")
.help("Connect to the given memcache server and produce stats"),
)
.arg(
Arg::with_name("stats-log")
.long("stats-log")
.required(false)
.takes_value(true)
.value_name("LOG FILE")
.help("Enable logging of stats to file"),
)
.arg(
Arg::with_name("sidecar")
.long("sidecar")
.required(false)
.help("Enables Mesos sidecar mode, instrumenting the container"),
)
.get_matches();
let listen = matches
.value_of("listen")
.unwrap()
.parse()
.unwrap_or_else(|_| {
println!("ERROR: listen address is malformed");
process::exit(1);
});
let memcache = if let Some(sock) = matches.value_of("memcache") {
let socket = sock.parse().unwrap_or_else(|_| {
println!("ERROR: memcache address is malformed");
process::exit(1);
});
Some(socket)
} else {
None
};
let sample_rate =
parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ);
let sampler_timeout = Duration::from_millis(
parse_numeric_arg(&matches, "sampler-timeout")
.unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64,
);
let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts")
.unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS);
let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS)
as u64
* SECOND;
let cores = hardware_threads().unwrap_or(1);
let mut stats_enabled = Flags::new();
for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] {
if let Some(values) = matches.values_of(subsystem) {
let flags: Vec<&str> = values.collect();
for flag in flags {
stats_enabled.insert(subsystem, flag);
}
}
}
let loglevel = match matches.occurrences_of("verbose") {
0 => Level::Info,
1 => Level::Debug,
_ => Level::Trace,
};
let stats_log = matches
.value_of("stats-log")
.map(std::string::ToString::to_string);
let sidecar = matches.is_present("sidecar");
Config {
cores,
flags: stats_enabled,
sample_rate,
sampler_timeout,
max_sampler_timeouts,
interval,
listen,
loglevel,
memcache,
stats_log,
sidecar,
}
}
/// what interval should the stats library latch on
pub fn interval(&self) -> u64 {
self.interval
}
/// what frequency the stats should be sampled on
pub fn sample_rate(&self) -> f64 {
self.sample_rate
}
/// the timeout for sampler execution
pub fn sampler_timeout(&self) -> Duration {
self.sampler_timeout
}
/// maximum consecutive sampler timeouts
pub fn max_sampler_timeouts(&self) -> usize {
self.max_sampler_timeouts
}
/// get listen address
pub fn listen(&self) -> SocketAddr {
self.listen
}
/// get log level
pub fn loglevel(&self) -> Level {
self.loglevel
}
/// how many cores on the host?
pub fn cores(&self) -> usize {
self.cores
}
pub fn memcache(&self) -> Option<SocketAddr> {
self.memcache
}
/// is a flag enabled for a subsystem?
pub
|
contains_pkey
|
identifier_name
|
config.rs
|
: usize = 60;
/// This struct contains the configuration of the agent.
#[derive(Clone)]
pub struct Config {
/// the latching interval for stats
interval: u64,
/// sample rate for counters in Hz
sample_rate: f64,
/// the sampler timeout
sampler_timeout: Duration,
/// maximum consecutive sampler timeouts
max_sampler_timeouts: usize,
/// the listen address for the stats port
listen: SocketAddr,
/// the logging level
loglevel: Level,
/// memcache instance to instrument
memcache: Option<SocketAddr>,
/// flags for enabled statistics subsystems
flags: Flags,
/// the number of cores on the host
cores: usize,
/// an optional file to log stats to
stats_log: Option<String>,
/// flag to indicate Mesos sidecar mode
sidecar: bool,
}
#[derive(Clone)]
/// `Flags` is a simple wrapper for a doubly-keyed `HashSet`
pub struct Flags {
data: HashMap<String, HashSet<String>>,
}
impl Flags {
/// Creates a new empty set of `Flags`
pub fn new() -> Self {
Self {
data: HashMap::new(),
}
}
/// Insert a `pkey`+`lkey` into the set
pub fn insert(&mut self, pkey: &str, lkey: &str) {
let mut entry = self.data.remove(pkey).unwrap_or_default();
entry.insert(lkey.to_owned());
self.data.insert(pkey.to_owned(), entry);
}
/// True if the set contains `pkey`+`lkey`
pub fn contains(&self, pkey: &str, lkey: &str) -> bool {
if let Some(entry) = self.data.get(pkey) {
entry.get(lkey).is_some()
} else {
false
}
}
/// True if the set contains the `pkey`
pub fn contains_pkey(&self, pkey: &str) -> bool {
self.data.get(pkey).is_some()
}
/// Remove a `pkey`+`lkey`
pub fn remove(&mut self, pkey: &str, lkey: &str) {
if let Some(entry) = self.data.get_mut(pkey) {
entry.remove(lkey);
}
}
/// Remove the `pkey` and all `lkey`s under it
pub fn remove_pkey(&mut self, pkey: &str) {
self.data.remove(pkey);
}
}
impl Config {
/// parse command line options and return `Config`
pub fn new() -> Config {
let matches = App::new(NAME)
.version(VERSION)
.author("Brian Martin <bmartin@twitter.com>")
.about("high-resolution systems performance telemetry agent")
.arg(
Arg::with_name("listen")
.short("l")
.long("listen")
.required(true)
.takes_value(true)
.value_name("IP:PORT")
.help("Sets the listen address for metrics"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.multiple(true)
.help("Increase verbosity by one level. Can be used more than once"),
)
.arg(
Arg::with_name("interval")
.long("interval")
.value_name("Seconds")
.help("Integration window duration and stats endpoint refresh time")
.takes_value(true),
)
.arg(
Arg::with_name("sample-rate")
.long("sample-rate")
.value_name("Hertz")
.help("Sets the sampling frequency for the counters")
.takes_value(true),
)
.arg(
Arg::with_name("sampler-timeout")
.long("sampler-timeout")
.value_name("MS")
.help("Sets the timeout for per-sampler execution")
.takes_value(true),
)
.arg(
Arg::with_name("max-sampler-timeouts")
.long("max-sampler-timeouts")
.value_name("MS")
.help("Sets the maximum number of consecutive sampler timeouts")
.takes_value(true),
)
.arg(
Arg::with_name("cpu")
.long("cpu")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from CPU subsystem"),
)
.arg(
Arg::with_name("disk")
.long("disk")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Disk subsystem"),
)
.arg(
Arg::with_name("ebpf")
.long("ebpf")
.takes_value(true)
.multiple(true)
.possible_value("all")
.possible_value("block")
.possible_value("ext4")
.possible_value("scheduler")
.possible_value("xfs")
.help("Enable statistics from eBPF"),
)
.arg(
Arg::with_name("network")
.long("network")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Network subsystem"),
)
.arg(
Arg::with_name("perf")
.long("perf")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.possible_value("per-cgroup")
.help("Enable statistics from Perf Events subsystem"),
)
.arg(
Arg::with_name("memcache")
.long("memcache")
.required(false)
.takes_value(true)
.value_name("IP:PORT")
.help("Connect to the given memcache server and produce stats"),
)
.arg(
Arg::with_name("stats-log")
.long("stats-log")
.required(false)
.takes_value(true)
.value_name("LOG FILE")
.help("Enable logging of stats to file"),
)
.arg(
Arg::with_name("sidecar")
.long("sidecar")
.required(false)
.help("Enables Mesos sidecar mode, instrumenting the container"),
)
.get_matches();
let listen = matches
.value_of("listen")
.unwrap()
.parse()
.unwrap_or_else(|_| {
println!("ERROR: listen address is malformed");
process::exit(1);
});
let memcache = if let Some(sock) = matches.value_of("memcache") {
let socket = sock.parse().unwrap_or_else(|_| {
println!("ERROR: memcache address is malformed");
process::exit(1);
});
Some(socket)
} else {
None
};
let sample_rate =
parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ);
let sampler_timeout = Duration::from_millis(
parse_numeric_arg(&matches, "sampler-timeout")
.unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64,
);
let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts")
.unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS);
let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS)
as u64
* SECOND;
let cores = hardware_threads().unwrap_or(1);
let mut stats_enabled = Flags::new();
for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] {
if let Some(values) = matches.values_of(subsystem) {
let flags: Vec<&str> = values.collect();
for flag in flags {
stats_enabled.insert(subsystem, flag);
}
}
}
let loglevel = match matches.occurrences_of("verbose") {
0 => Level::Info,
1 => Level::Debug,
_ => Level::Trace,
};
let stats_log = matches
.value_of("stats-log")
.map(std::string::ToString::to_string);
let sidecar = matches.is_present("sidecar");
Config {
cores,
flags: stats_enabled,
sample_rate,
sampler_timeout,
max_sampler_timeouts,
interval,
listen,
loglevel,
memcache,
stats_log,
sidecar,
}
}
/// what interval should the stats library latch on
pub fn interval(&self) -> u64 {
self.interval
}
/// what frequency the stats should be sampled on
pub fn sample_rate(&self) -> f64
|
/// the timeout for sampler execution
pub fn sampler_timeout(&self) -> Duration {
self.sampler_timeout
}
/// maximum consecutive sampler timeouts
pub fn max_sampler_timeouts(&self) -> usize {
self.max_sampler_timeouts
}
/// get listen address
pub fn listen(&self) -> SocketAddr {
self.listen
}
/// get log level
pub fn loglevel(&self) -> Level {
self.loglevel
}
/// how many cores on the host?
pub fn cores(&self) -> usize {
self.cores
}
pub fn memcache(&self) -> Option<SocketAddr> {
self.memcache
}
/// is a flag enabled for a subsystem?
pub
|
{
self.sample_rate
}
|
identifier_body
|
config.rs
|
_SECONDS: usize = 60;
/// This struct contains the configuration of the agent.
#[derive(Clone)]
pub struct Config {
/// the latching interval for stats
interval: u64,
/// sample rate for counters in Hz
sample_rate: f64,
/// the sampler timeout
sampler_timeout: Duration,
/// maximum consecutive sampler timeouts
max_sampler_timeouts: usize,
/// the listen address for the stats port
listen: SocketAddr,
/// the logging level
loglevel: Level,
/// memcache instance to instrument
memcache: Option<SocketAddr>,
/// flags for enabled statistics subsystems
flags: Flags,
/// the number of cores on the host
cores: usize,
/// an optional file to log stats to
stats_log: Option<String>,
/// flag to indicate Mesos sidecar mode
sidecar: bool,
}
#[derive(Clone)]
/// `Flags` is a simple wrapper for a doubly-keyed `HashSet`
pub struct Flags {
data: HashMap<String, HashSet<String>>,
}
impl Flags {
/// Creates a new empty set of `Flags`
pub fn new() -> Self {
Self {
data: HashMap::new(),
}
}
/// Insert a `pkey`+`lkey` into the set
pub fn insert(&mut self, pkey: &str, lkey: &str) {
let mut entry = self.data.remove(pkey).unwrap_or_default();
entry.insert(lkey.to_owned());
self.data.insert(pkey.to_owned(), entry);
}
/// True if the set contains `pkey`+`lkey`
pub fn contains(&self, pkey: &str, lkey: &str) -> bool {
if let Some(entry) = self.data.get(pkey) {
entry.get(lkey).is_some()
} else {
false
}
}
/// True if the set contains the `pkey`
pub fn contains_pkey(&self, pkey: &str) -> bool {
self.data.get(pkey).is_some()
}
/// Remove a `pkey`+`lkey`
pub fn remove(&mut self, pkey: &str, lkey: &str) {
if let Some(entry) = self.data.get_mut(pkey) {
entry.remove(lkey);
}
}
/// Remove the `pkey` and all `lkey`s under it
pub fn remove_pkey(&mut self, pkey: &str) {
self.data.remove(pkey);
}
}
impl Config {
/// parse command line options and return `Config`
pub fn new() -> Config {
let matches = App::new(NAME)
.version(VERSION)
.author("Brian Martin <bmartin@twitter.com>")
.about("high-resolution systems performance telemetry agent")
.arg(
Arg::with_name("listen")
.short("l")
.long("listen")
.required(true)
.takes_value(true)
.value_name("IP:PORT")
.help("Sets the listen address for metrics"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.multiple(true)
.help("Increase verbosity by one level. Can be used more than once"),
)
.arg(
Arg::with_name("interval")
.long("interval")
.value_name("Seconds")
.help("Integration window duration and stats endpoint refresh time")
.takes_value(true),
)
.arg(
Arg::with_name("sample-rate")
.long("sample-rate")
.value_name("Hertz")
.help("Sets the sampling frequency for the counters")
.takes_value(true),
)
.arg(
Arg::with_name("sampler-timeout")
.long("sampler-timeout")
.value_name("MS")
.help("Sets the timeout for per-sampler execution")
.takes_value(true),
)
.arg(
Arg::with_name("max-sampler-timeouts")
.long("max-sampler-timeouts")
.value_name("MS")
.help("Sets the maximum number of consecutive sampler timeouts")
.takes_value(true),
)
.arg(
Arg::with_name("cpu")
.long("cpu")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from CPU subsystem"),
)
.arg(
Arg::with_name("disk")
.long("disk")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Disk subsystem"),
)
.arg(
Arg::with_name("ebpf")
.long("ebpf")
.takes_value(true)
.multiple(true)
.possible_value("all")
.possible_value("block")
.possible_value("ext4")
.possible_value("scheduler")
.possible_value("xfs")
.help("Enable statistics from eBPF"),
)
.arg(
Arg::with_name("network")
.long("network")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Network subsystem"),
)
.arg(
Arg::with_name("perf")
.long("perf")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.possible_value("per-cgroup")
.help("Enable statistics from Perf Events subsystem"),
)
.arg(
Arg::with_name("memcache")
.long("memcache")
.required(false)
.takes_value(true)
.value_name("IP:PORT")
.help("Connect to the given memcache server and produce stats"),
)
.arg(
Arg::with_name("stats-log")
.long("stats-log")
.required(false)
.takes_value(true)
.value_name("LOG FILE")
.help("Enable logging of stats to file"),
)
.arg(
Arg::with_name("sidecar")
.long("sidecar")
.required(false)
.help("Enables Mesos sidecar mode, instrumenting the container"),
)
.get_matches();
let listen = matches
.value_of("listen")
.unwrap()
.parse()
.unwrap_or_else(|_| {
println!("ERROR: listen address is malformed");
process::exit(1);
});
let memcache = if let Some(sock) = matches.value_of("memcache")
|
else {
None
};
let sample_rate =
parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ);
let sampler_timeout = Duration::from_millis(
parse_numeric_arg(&matches, "sampler-timeout")
.unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64,
);
let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts")
.unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS);
let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS)
as u64
* SECOND;
let cores = hardware_threads().unwrap_or(1);
let mut stats_enabled = Flags::new();
for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] {
if let Some(values) = matches.values_of(subsystem) {
let flags: Vec<&str> = values.collect();
for flag in flags {
stats_enabled.insert(subsystem, flag);
}
}
}
let loglevel = match matches.occurrences_of("verbose") {
0 => Level::Info,
1 => Level::Debug,
_ => Level::Trace,
};
let stats_log = matches
.value_of("stats-log")
.map(std::string::ToString::to_string);
let sidecar = matches.is_present("sidecar");
Config {
cores,
flags: stats_enabled,
sample_rate,
sampler_timeout,
max_sampler_timeouts,
interval,
listen,
loglevel,
memcache,
stats_log,
sidecar,
}
}
/// what interval should the stats library latch on
pub fn interval(&self) -> u64 {
self.interval
}
/// what frequency the stats should be sampled on
pub fn sample_rate(&self) -> f64 {
self.sample_rate
}
/// the timeout for sampler execution
pub fn sampler_timeout(&self) -> Duration {
self.sampler_timeout
}
/// maximum consecutive sampler timeouts
pub fn max_sampler_timeouts(&self) -> usize {
self.max_sampler_timeouts
}
/// get listen address
pub fn listen(&self) -> SocketAddr {
self.listen
}
/// get log level
pub fn loglevel(&self) -> Level {
self.loglevel
}
/// how many cores on the host?
pub fn cores(&self) -> usize {
self.cores
}
pub fn memcache(&self) -> Option<SocketAddr> {
self.memcache
}
/// is a flag enabled for a subsystem?
pub
|
{
let socket = sock.parse().unwrap_or_else(|_| {
println!("ERROR: memcache address is malformed");
process::exit(1);
});
Some(socket)
}
|
conditional_block
|
config.rs
|
`
pub fn new() -> Self {
Self {
data: HashMap::new(),
}
}
/// Insert a `pkey`+`lkey` into the set
pub fn insert(&mut self, pkey: &str, lkey: &str) {
let mut entry = self.data.remove(pkey).unwrap_or_default();
entry.insert(lkey.to_owned());
self.data.insert(pkey.to_owned(), entry);
}
/// True if the set contains `pkey`+`lkey`
pub fn contains(&self, pkey: &str, lkey: &str) -> bool {
if let Some(entry) = self.data.get(pkey) {
entry.get(lkey).is_some()
} else {
false
}
}
/// True if the set contains the `pkey`
pub fn contains_pkey(&self, pkey: &str) -> bool {
self.data.get(pkey).is_some()
}
/// Remove a `pkey`+`lkey`
pub fn remove(&mut self, pkey: &str, lkey: &str) {
if let Some(entry) = self.data.get_mut(pkey) {
entry.remove(lkey);
}
}
/// Remove the `pkey` and all `lkey`s under it
pub fn remove_pkey(&mut self, pkey: &str) {
self.data.remove(pkey);
}
}
impl Config {
/// parse command line options and return `Config`
pub fn new() -> Config {
let matches = App::new(NAME)
.version(VERSION)
.author("Brian Martin <bmartin@twitter.com>")
.about("high-resolution systems performance telemetry agent")
.arg(
Arg::with_name("listen")
.short("l")
.long("listen")
.required(true)
.takes_value(true)
.value_name("IP:PORT")
.help("Sets the listen address for metrics"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.multiple(true)
.help("Increase verbosity by one level. Can be used more than once"),
)
.arg(
Arg::with_name("interval")
.long("interval")
.value_name("Seconds")
.help("Integration window duration and stats endpoint refresh time")
.takes_value(true),
)
.arg(
Arg::with_name("sample-rate")
.long("sample-rate")
.value_name("Hertz")
.help("Sets the sampling frequency for the counters")
.takes_value(true),
)
.arg(
Arg::with_name("sampler-timeout")
.long("sampler-timeout")
.value_name("MS")
.help("Sets the timeout for per-sampler execution")
.takes_value(true),
)
.arg(
Arg::with_name("max-sampler-timeouts")
.long("max-sampler-timeouts")
.value_name("MS")
.help("Sets the maximum number of consecutive sampler timeouts")
.takes_value(true),
)
.arg(
Arg::with_name("cpu")
.long("cpu")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from CPU subsystem"),
)
.arg(
Arg::with_name("disk")
.long("disk")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Disk subsystem"),
)
.arg(
Arg::with_name("ebpf")
.long("ebpf")
.takes_value(true)
.multiple(true)
.possible_value("all")
.possible_value("block")
.possible_value("ext4")
.possible_value("scheduler")
.possible_value("xfs")
.help("Enable statistics from eBPF"),
)
.arg(
Arg::with_name("network")
.long("network")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.help("Enable statistics from Network subsystem"),
)
.arg(
Arg::with_name("perf")
.long("perf")
.takes_value(true)
.multiple(true)
.possible_value("totals")
.possible_value("per-cgroup")
.help("Enable statistics from Perf Events subsystem"),
)
.arg(
Arg::with_name("memcache")
.long("memcache")
.required(false)
.takes_value(true)
.value_name("IP:PORT")
.help("Connect to the given memcache server and produce stats"),
)
.arg(
Arg::with_name("stats-log")
.long("stats-log")
.required(false)
.takes_value(true)
.value_name("LOG FILE")
.help("Enable logging of stats to file"),
)
.arg(
Arg::with_name("sidecar")
.long("sidecar")
.required(false)
.help("Enables Mesos sidecar mode, instrumenting the container"),
)
.get_matches();
let listen = matches
.value_of("listen")
.unwrap()
.parse()
.unwrap_or_else(|_| {
println!("ERROR: listen address is malformed");
process::exit(1);
});
let memcache = if let Some(sock) = matches.value_of("memcache") {
let socket = sock.parse().unwrap_or_else(|_| {
println!("ERROR: memcache address is malformed");
process::exit(1);
});
Some(socket)
} else {
None
};
let sample_rate =
parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ);
let sampler_timeout = Duration::from_millis(
parse_numeric_arg(&matches, "sampler-timeout")
.unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64,
);
let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts")
.unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS);
let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS)
as u64
* SECOND;
let cores = hardware_threads().unwrap_or(1);
let mut stats_enabled = Flags::new();
for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] {
if let Some(values) = matches.values_of(subsystem) {
let flags: Vec<&str> = values.collect();
for flag in flags {
stats_enabled.insert(subsystem, flag);
}
}
}
let loglevel = match matches.occurrences_of("verbose") {
0 => Level::Info,
1 => Level::Debug,
_ => Level::Trace,
};
let stats_log = matches
.value_of("stats-log")
.map(std::string::ToString::to_string);
let sidecar = matches.is_present("sidecar");
Config {
cores,
flags: stats_enabled,
sample_rate,
sampler_timeout,
max_sampler_timeouts,
interval,
listen,
loglevel,
memcache,
stats_log,
sidecar,
}
}
/// what interval should the stats library latch on
pub fn interval(&self) -> u64 {
self.interval
}
/// what frequency the stats should be sampled on
pub fn sample_rate(&self) -> f64 {
self.sample_rate
}
/// the timeout for sampler execution
pub fn sampler_timeout(&self) -> Duration {
self.sampler_timeout
}
/// maximum consecutive sampler timeouts
pub fn max_sampler_timeouts(&self) -> usize {
self.max_sampler_timeouts
}
/// get listen address
pub fn listen(&self) -> SocketAddr {
self.listen
}
/// get log level
pub fn loglevel(&self) -> Level {
self.loglevel
}
/// how many cores on the host?
pub fn cores(&self) -> usize {
self.cores
}
pub fn memcache(&self) -> Option<SocketAddr> {
self.memcache
}
/// is a flag enabled for a subsystem?
pub fn is_enabled(&self, subsystem: &str, flag: &str) -> bool {
self.flags.contains(subsystem, flag)
}
pub fn is_subsystem_enabled(&self, subsystem: &str) -> bool {
self.flags.contains_pkey(subsystem)
}
pub fn stats_log(&self) -> Option<String> {
self.stats_log.clone()
}
}
/// a helper function to parse a numeric argument by name from `ArgMatches`
fn parse_numeric_arg(matches: &ArgMatches, key: &str) -> Option<usize> {
matches.value_of(key).map(|f| {
f.parse().unwrap_or_else(|_| {
println!("ERROR: could not parse {}", key);
process::exit(1);
})
})
}
/// a helper function to parse a floating point argument by name from `ArgMatches`
fn parse_float_arg(matches: &ArgMatches, key: &str) -> Option<f64> {
matches.value_of(key).map(|f| {
f.parse().unwrap_or_else(|_| {
println!("ERROR: could not parse {}", key);
process::exit(1);
|
})
})
|
random_line_split
|
|
tag.rs
|
32 = tag!(b"Glat");
/// `Gloc`
pub const GLOC: u32 = tag!(b"Gloc");
/// `glyf`
pub const GLYF: u32 = tag!(b"glyf");
/// `GPOS`
pub const GPOS: u32 = tag!(b"GPOS");
/// `grek`
pub const GREK: u32 = tag!(b"grek");
/// `GSUB`
pub const GSUB: u32 = tag!(b"GSUB");
/// `gujr`
pub const GUJR: u32 = tag!(b"gujr");
/// `gur2`
pub const GUR2: u32 = tag!(b"gur2");
/// `guru`
pub const GURU: u32 = tag!(b"guru");
/// `gvar`
pub const GVAR: u32 = tag!(b"gvar");
/// `half`
pub const HALF: u32 = tag!(b"half");
/// `haln`
pub const HALN: u32 = tag!(b"haln");
/// `hdmx`
pub const HDMX: u32 = tag!(b"hdmx");
/// `head`
pub const HEAD: u32 = tag!(b"head");
/// `hhea`
pub const HHEA: u32 = tag!(b"hhea");
/// `hlig`
pub const HLIG: u32 = tag!(b"hlig");
/// `hmtx`
pub const HMTX: u32 = tag!(b"hmtx");
/// `hsty`
pub const HSTY: u32 = tag!(b"hsty");
/// `init`
pub const INIT: u32 = tag!(b"init");
/// `isol`
pub const ISOL: u32 = tag!(b"isol");
/// `jpg `
pub const JPG: u32 = tag!(b"jpg ");
/// `JSTF`
pub const JSTF: u32 = tag!(b"JSTF");
/// `just`
pub const JUST: u32 = tag!(b"just");
/// `kern`
pub const KERN: u32 = tag!(b"kern");
/// `khmr`
pub const KHMR: u32 = tag!(b"khmr");
/// `knd2`
pub const KND2: u32 = tag!(b"knd2");
/// `knda`
pub const KNDA: u32 = tag!(b"knda");
/// `lao `
pub const LAO: u32 = tag!(b"lao ");
/// `latn`
pub const LATN: u32 = tag!(b"latn");
/// `lcar`
pub const LCAR: u32 = tag!(b"lcar");
/// `liga`
pub const LIGA: u32 = tag!(b"liga");
/// `lnum`
pub const LNUM: u32 = tag!(b"lnum");
/// `loca`
pub const LOCA: u32 = tag!(b"loca");
/// `locl`
pub const LOCL: u32 = tag!(b"locl");
/// `LTSH`
pub const LTSH: u32 = tag!(b"LTSH");
/// `mark`
pub const MARK: u32 = tag!(b"mark");
/// `MATH`
pub const MATH: u32 = tag!(b"MATH");
/// `maxp`
pub const MAXP: u32 = tag!(b"maxp");
/// `med2`
pub const MED2: u32 = tag!(b"med2");
/// `medi`
pub const MEDI: u32 = tag!(b"medi");
/// `mkmk`
pub const MKMK: u32 = tag!(b"mkmk");
/// `mlm2`
pub const MLM2: u32 = tag!(b"mlm2");
/// `mlym`
pub const MLYM: u32 = tag!(b"mlym");
/// `mort`
pub const MORT: u32 = tag!(b"mort");
/// `morx`
pub const MORX: u32 = tag!(b"morx");
/// `mset`
pub const MSET: u32 = tag!(b"mset");
/// `name`
pub const NAME: u32 = tag!(b"name");
/// `nukt`
pub const NUKT: u32 = tag!(b"nukt");
/// `onum`
pub const ONUM: u32 = tag!(b"onum");
/// `opbd`
pub const OPBD: u32 = tag!(b"opbd");
/// `ordn`
pub const ORDN: u32 = tag!(b"ordn");
/// `ory2`
pub const ORY2: u32 = tag!(b"ory2");
/// `orya`
pub const ORYA: u32 = tag!(b"orya");
/// `OS/2`
pub const OS_2: u32 = tag!(b"OS/2");
/// `OTTO`
pub const OTTO: u32 = tag!(b"OTTO");
/// `PCLT`
pub const PCLT: u32 = tag!(b"PCLT");
/// `pnum`
pub const PNUM: u32 = tag!(b"pnum");
/// `png `
pub const PNG: u32 = tag!(b"png ");
/// `post`
pub const POST: u32 = tag!(b"post");
/// `pref`
pub const PREF: u32 = tag!(b"pref");
/// `prep`
pub const PREP: u32 = tag!(b"prep");
/// `pres`
pub const PRES: u32 = tag!(b"pres");
/// `prop`
pub const PROP: u32 = tag!(b"prop");
/// `pstf`
pub const PSTF: u32 = tag!(b"pstf");
/// `psts`
pub const PSTS: u32 = tag!(b"psts");
/// `rclt`
pub const RCLT: u32 = tag!(b"rclt");
/// `rkrf`
pub const RKRF: u32 = tag!(b"rkrf");
/// `rlig`
pub const RLIG: u32 = tag!(b"rlig");
/// `rphf`
pub const RPHF: u32 = tag!(b"rphf");
/// `sbix`
pub const SBIX: u32 = tag!(b"sbix");
/// `Silf`
pub const SILF: u32 = tag!(b"Silf");
/// `Sill`
pub const SILL: u32 = tag!(b"Sill");
/// `sinh`
pub const SINH: u32 = tag!(b"sinh");
/// `smcp`
pub const SMCP: u32 = tag!(b"smcp");
/// `SND `
pub const SND: u32 = tag!(b"SND ");
/// `SVG `
pub const SVG: u32 = tag!(b"SVG ");
/// `syrc`
pub const SYRC: u32 = tag!(b"syrc");
/// `taml`
pub const TAML: u32 = tag!(b"taml");
/// `tel2`
pub const TEL2: u32 = tag!(b"tel2");
/// `telu`
pub const TELU: u32 = tag!(b"telu");
/// `thai`
pub const THAI: u32 = tag!(b"thai");
/// `tiff`
pub const TIFF: u32 = tag!(b"tiff");
/// `tml2`
pub const TML2: u32 = tag!(b"tml2");
/// `tnum`
pub const TNUM: u32 = tag!(b"tnum");
/// `trak`
pub const TRAK: u32 = tag!(b"trak");
/// `ttcf`
pub const TTCF: u32 = tag!(b"ttcf");
/// `URD `
pub const URD: u32 = tag!(b"URD ");
/// `vatu`
pub const VATU: u32 = tag!(b"vatu");
/// `VDMX`
pub const VDMX: u32 = tag!(b"VDMX");
/// `vert`
pub const VERT: u32 = tag!(b"vert");
/// `vhea`
pub const VHEA: u32 = tag!(b"vhea");
/// `vmtx`
pub const VMTX: u32 = tag!(b"vmtx");
/// `VORG`
pub const VORG: u32 = tag!(b"VORG");
/// `vrt2`
pub const VRT2: u32 = tag!(b"vrt2");
/// `Zapf`
pub const ZAPF: u32 = tag!(b"Zapf");
/// `zero`
pub const ZERO: u32 = tag!(b"zero");
#[cfg(test)]
mod tests {
use super::*;
mod from_string {
use super::*;
#[test]
fn test_four_chars()
|
{
let tag = from_string("beng").expect("invalid tag");
assert_eq!(tag, 1650814567);
}
|
identifier_body
|
|
tag.rs
|
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let tag = self.0;
let bytes = tag.to_be_bytes();
if bytes.iter().all(|c| c.is_ascii() && !c.is_ascii_control()) {
let s = str::from_utf8(&bytes).unwrap(); // unwrap safe due to above check
s.fmt(f)
} else {
write!(f, "0x{:08x}", tag)
}
}
}
impl fmt::Debug for DisplayTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
/// `abvf`
pub const ABVF: u32 = tag!(b"abvf");
/// `abvm`
pub const ABVM: u32 = tag!(b"abvm");
/// `abvs`
pub const ABVS: u32 = tag!(b"abvs");
/// `acnt`
pub const ACNT: u32 = tag!(b"acnt");
/// `afrc`
pub const AFRC: u32 = tag!(b"afrc");
/// `akhn`
pub const AKHN: u32 = tag!(b"akhn");
/// `arab`
pub const ARAB: u32 = tag!(b"arab");
/// `avar`
pub const AVAR: u32 = tag!(b"avar");
/// `BASE`
pub const BASE: u32 = tag!(b"BASE");
/// `bdat`
pub const BDAT: u32 = tag!(b"bdat");
/// `beng`
pub const BENG: u32 = tag!(b"beng");
/// `bloc`
pub const BLOC: u32 = tag!(b"bloc");
/// `blwf`
pub const BLWF: u32 = tag!(b"blwf");
/// `blwm`
pub const BLWM: u32 = tag!(b"blwm");
/// `blws`
pub const BLWS: u32 = tag!(b"blws");
/// `bng2`
pub const BNG2: u32 = tag!(b"bng2");
/// `bsln`
pub const BSLN: u32 = tag!(b"bsln");
/// `c2sc`
pub const C2SC: u32 = tag!(b"c2sc");
/// `calt`
pub const CALT: u32 = tag!(b"calt");
/// `CBDT`
pub const CBDT: u32 = tag!(b"CBDT");
/// `CBLC`
pub const CBLC: u32 = tag!(b"CBLC");
/// `ccmp`
pub const CCMP: u32 = tag!(b"ccmp");
/// `cfar`
pub const CFAR: u32 = tag!(b"cfar");
/// `CFF `
pub const CFF: u32 = tag!(b"CFF ");
/// `cjct`
pub const CJCT: u32 = tag!(b"cjct");
/// `clig`
pub const CLIG: u32 = tag!(b"clig");
/// `cmap`
pub const CMAP: u32 = tag!(b"cmap");
/// `COLR`
pub const COLR: u32 = tag!(b"COLR");
/// `CPAL`
pub const CPAL: u32 = tag!(b"CPAL");
/// `curs`
pub const CURS: u32 = tag!(b"curs");
/// `cvar`
pub const CVAR: u32 = tag!(b"cvar");
/// `cvt `
pub const CVT: u32 = tag!(b"cvt ");
/// `cyrl`
pub const CYRL: u32 = tag!(b"cyrl");
/// `dev2`
pub const DEV2: u32 = tag!(b"dev2");
/// `deva`
pub const DEVA: u32 = tag!(b"deva");
/// `DFLT`
pub const DFLT: u32 = tag!(b"DFLT");
/// `dist`
pub const DIST: u32 = tag!(b"dist");
/// `dlig`
pub const DLIG: u32 = tag!(b"dlig");
/// `dupe`
pub const DUPE: u32 = tag!(b"dupe");
/// `EBDT`
pub const EBDT: u32 = tag!(b"EBDT");
/// `EBLC`
pub const EBLC: u32 = tag!(b"EBLC");
/// `EBSC`
pub const EBSC: u32 = tag!(b"EBSC");
/// `FAR `
pub const FAR: u32 = tag!(b"FAR ");
/// `fdsc`
pub const FDSC: u32 = tag!(b"fdsc");
/// `Feat`
pub const FEAT2: u32 = tag!(b"Feat");
/// `feat`
pub const FEAT: u32 = tag!(b"feat");
/// `fin2`
pub const FIN2: u32 = tag!(b"fin2");
/// `fin3`
pub const FIN3: u32 = tag!(b"fin3");
/// `fina`
pub const FINA: u32 = tag!(b"fina");
/// `fmtx`
pub const FMTX: u32 = tag!(b"fmtx");
/// `fpgm`
pub const FPGM: u32 = tag!(b"fpgm");
/// `frac`
pub const FRAC: u32 = tag!(b"frac");
/// `fvar`
pub const FVAR: u32 = tag!(b"fvar");
/// `gasp`
pub const GASP: u32 = tag!(b"gasp");
/// `GDEF`
pub const GDEF: u32 = tag!(b"GDEF");
/// `gjr2`
pub const GJR2: u32 = tag!(b"gjr2");
/// `Glat`
pub const GLAT: u32 = tag!(b"Glat");
/// `Gloc`
pub const GLOC: u32 = tag!(b"Gloc");
/// `glyf`
pub const GLYF: u32 = tag!(b"glyf");
/// `GPOS`
pub const GPOS: u32 = tag!(b"GPOS");
/// `grek`
pub const GREK: u32 = tag!(b"grek");
/// `GSUB`
pub const GSUB: u32 = tag!(b"GSUB");
/// `gujr`
pub const GUJR: u32 = tag!(b"gujr");
/// `gur2`
pub const GUR2: u32 = tag!(b"gur2");
/// `guru`
pub const GURU: u32 = tag!(b"guru");
/// `gvar`
pub const GVAR: u32 = tag!(b"gvar");
/// `half`
pub const HALF: u32 = tag!(b"half");
/// `haln`
pub const HALN: u32 = tag!(b"haln");
/// `hdmx`
pub const HDMX: u32 = tag!(b"hdmx");
/// `head`
pub const HEAD: u32 = tag!(b"head");
/// `hhea`
pub const HHEA: u32 = tag!(b"hhea");
/// `hlig`
pub const HLIG: u32 = tag!(b"hlig");
/// `hmtx`
pub const HMTX: u32 = tag!(b"hmtx");
/// `hsty`
pub const HSTY: u32 = tag!(b"hsty");
/// `init`
pub const INIT: u32 = tag!(b"init");
/// `isol`
pub const ISOL: u32 = tag!(b"isol");
/// `jpg `
pub const JPG: u32 = tag!(b"jpg ");
/// `JSTF`
pub const JSTF: u32 = tag!(b"JSTF");
/// `just`
pub const JUST: u32 = tag!(b"just");
/// `kern`
pub const KERN: u32 = tag!(b"kern");
/// `khmr`
pub const KHMR: u32 = tag!(b"khmr");
/// `knd2`
pub const KND2: u32 = tag!(b"knd2");
/// `knda`
pub const KNDA: u32 = tag!(b"knda");
/// `lao `
pub const LAO: u32 = tag!(b"lao ");
/// `latn`
pub const LATN: u32 = tag!(b"latn");
/// `lcar`
pub const LCAR: u32 = tag!(b"lcar");
/// `liga`
pub const LIGA: u32 = tag!(b"liga");
/// `lnum`
pub const LNUM: u32 = tag!(b"lnum");
/// `loca`
pub const LOCA: u32 = tag!(b"loca");
/// `locl`
pub const LOCL: u32 = tag!(b"locl");
/// `LTSH`
pub const LTSH: u32 = tag!(b"LTSH");
/// `mark`
pub const MARK: u32 = tag!(b"
|
fmt
|
identifier_name
|
|
tag.rs
|
fmt::Display::fmt(self, f)
}
}
/// `abvf`
pub const ABVF: u32 = tag!(b"abvf");
/// `abvm`
pub const ABVM: u32 = tag!(b"abvm");
/// `abvs`
pub const ABVS: u32 = tag!(b"abvs");
/// `acnt`
pub const ACNT: u32 = tag!(b"acnt");
/// `afrc`
pub const AFRC: u32 = tag!(b"afrc");
/// `akhn`
pub const AKHN: u32 = tag!(b"akhn");
/// `arab`
pub const ARAB: u32 = tag!(b"arab");
/// `avar`
pub const AVAR: u32 = tag!(b"avar");
/// `BASE`
pub const BASE: u32 = tag!(b"BASE");
/// `bdat`
pub const BDAT: u32 = tag!(b"bdat");
/// `beng`
pub const BENG: u32 = tag!(b"beng");
/// `bloc`
pub const BLOC: u32 = tag!(b"bloc");
/// `blwf`
pub const BLWF: u32 = tag!(b"blwf");
/// `blwm`
pub const BLWM: u32 = tag!(b"blwm");
/// `blws`
pub const BLWS: u32 = tag!(b"blws");
/// `bng2`
pub const BNG2: u32 = tag!(b"bng2");
/// `bsln`
pub const BSLN: u32 = tag!(b"bsln");
/// `c2sc`
pub const C2SC: u32 = tag!(b"c2sc");
/// `calt`
pub const CALT: u32 = tag!(b"calt");
/// `CBDT`
pub const CBDT: u32 = tag!(b"CBDT");
/// `CBLC`
pub const CBLC: u32 = tag!(b"CBLC");
/// `ccmp`
pub const CCMP: u32 = tag!(b"ccmp");
/// `cfar`
pub const CFAR: u32 = tag!(b"cfar");
/// `CFF `
pub const CFF: u32 = tag!(b"CFF ");
/// `cjct`
pub const CJCT: u32 = tag!(b"cjct");
/// `clig`
pub const CLIG: u32 = tag!(b"clig");
/// `cmap`
pub const CMAP: u32 = tag!(b"cmap");
/// `COLR`
pub const COLR: u32 = tag!(b"COLR");
/// `CPAL`
pub const CPAL: u32 = tag!(b"CPAL");
/// `curs`
pub const CURS: u32 = tag!(b"curs");
/// `cvar`
pub const CVAR: u32 = tag!(b"cvar");
/// `cvt `
pub const CVT: u32 = tag!(b"cvt ");
/// `cyrl`
pub const CYRL: u32 = tag!(b"cyrl");
/// `dev2`
pub const DEV2: u32 = tag!(b"dev2");
/// `deva`
pub const DEVA: u32 = tag!(b"deva");
/// `DFLT`
pub const DFLT: u32 = tag!(b"DFLT");
/// `dist`
pub const DIST: u32 = tag!(b"dist");
/// `dlig`
pub const DLIG: u32 = tag!(b"dlig");
/// `dupe`
pub const DUPE: u32 = tag!(b"dupe");
/// `EBDT`
pub const EBDT: u32 = tag!(b"EBDT");
/// `EBLC`
pub const EBLC: u32 = tag!(b"EBLC");
/// `EBSC`
pub const EBSC: u32 = tag!(b"EBSC");
/// `FAR `
pub const FAR: u32 = tag!(b"FAR ");
/// `fdsc`
pub const FDSC: u32 = tag!(b"fdsc");
/// `Feat`
pub const FEAT2: u32 = tag!(b"Feat");
/// `feat`
pub const FEAT: u32 = tag!(b"feat");
/// `fin2`
pub const FIN2: u32 = tag!(b"fin2");
/// `fin3`
pub const FIN3: u32 = tag!(b"fin3");
/// `fina`
pub const FINA: u32 = tag!(b"fina");
/// `fmtx`
pub const FMTX: u32 = tag!(b"fmtx");
/// `fpgm`
pub const FPGM: u32 = tag!(b"fpgm");
/// `frac`
pub const FRAC: u32 = tag!(b"frac");
/// `fvar`
pub const FVAR: u32 = tag!(b"fvar");
/// `gasp`
pub const GASP: u32 = tag!(b"gasp");
/// `GDEF`
pub const GDEF: u32 = tag!(b"GDEF");
/// `gjr2`
pub const GJR2: u32 = tag!(b"gjr2");
/// `Glat`
pub const GLAT: u32 = tag!(b"Glat");
/// `Gloc`
pub const GLOC: u32 = tag!(b"Gloc");
/// `glyf`
pub const GLYF: u32 = tag!(b"glyf");
/// `GPOS`
pub const GPOS: u32 = tag!(b"GPOS");
/// `grek`
pub const GREK: u32 = tag!(b"grek");
/// `GSUB`
pub const GSUB: u32 = tag!(b"GSUB");
/// `gujr`
pub const GUJR: u32 = tag!(b"gujr");
/// `gur2`
pub const GUR2: u32 = tag!(b"gur2");
/// `guru`
pub const GURU: u32 = tag!(b"guru");
/// `gvar`
pub const GVAR: u32 = tag!(b"gvar");
/// `half`
pub const HALF: u32 = tag!(b"half");
/// `haln`
pub const HALN: u32 = tag!(b"haln");
/// `hdmx`
pub const HDMX: u32 = tag!(b"hdmx");
/// `head`
pub const HEAD: u32 = tag!(b"head");
/// `hhea`
pub const HHEA: u32 = tag!(b"hhea");
/// `hlig`
pub const HLIG: u32 = tag!(b"hlig");
/// `hmtx`
pub const HMTX: u32 = tag!(b"hmtx");
/// `hsty`
pub const HSTY: u32 = tag!(b"hsty");
/// `init`
pub const INIT: u32 = tag!(b"init");
/// `isol`
pub const ISOL: u32 = tag!(b"isol");
/// `jpg `
pub const JPG: u32 = tag!(b"jpg ");
/// `JSTF`
pub const JSTF: u32 = tag!(b"JSTF");
/// `just`
pub const JUST: u32 = tag!(b"just");
/// `kern`
pub const KERN: u32 = tag!(b"kern");
/// `khmr`
pub const KHMR: u32 = tag!(b"khmr");
/// `knd2`
pub const KND2: u32 = tag!(b"knd2");
/// `knda`
pub const KNDA: u32 = tag!(b"knda");
/// `lao `
pub const LAO: u32 = tag!(b"lao ");
/// `latn`
pub const LATN: u32 = tag!(b"latn");
/// `lcar`
pub const LCAR: u32 = tag!(b"lcar");
/// `liga`
pub const LIGA: u32 = tag!(b"liga");
/// `lnum`
pub const LNUM: u32 = tag!(b"lnum");
/// `loca`
pub const LOCA: u32 = tag!(b"loca");
/// `locl`
pub const LOCL: u32 = tag!(b"locl");
/// `LTSH`
pub const LTSH: u32 = tag!(b"LTSH");
/// `mark`
pub const MARK: u32 = tag!(b"mark");
/// `MATH`
pub const MATH: u32 = tag!(b"MATH");
/// `maxp`
pub const MAXP: u32 = tag!(b"maxp");
/// `med2`
pub const MED2: u32 = tag!(b"med2");
/// `medi`
|
pub const MKMK: u32 = tag!(b"mkmk");
/// `mlm2`
pub const MLM2: u32 = tag!(b"mlm2");
/// `mlym`
pub const M
|
pub const MEDI: u32 = tag!(b"medi");
/// `mkmk`
|
random_line_split
|
tag.rs
|
tag = (tag << 8) | (c as u32);
count += 1;
}
while count < 4 {
tag = (tag << 8) | (' ' as u32);
count += 1;
}
Ok(tag)
}
impl fmt::Display for DisplayTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let tag = self.0;
let bytes = tag.to_be_bytes();
if bytes.iter().all(|c| c.is_ascii() && !c.is_ascii_control()) {
let s = str::from_utf8(&bytes).unwrap(); // unwrap safe due to above check
s.fmt(f)
} else {
write!(f, "0x{:08x}", tag)
}
}
}
impl fmt::Debug for DisplayTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
/// `abvf`
pub const ABVF: u32 = tag!(b"abvf");
/// `abvm`
pub const ABVM: u32 = tag!(b"abvm");
/// `abvs`
pub const ABVS: u32 = tag!(b"abvs");
/// `acnt`
pub const ACNT: u32 = tag!(b"acnt");
/// `afrc`
pub const AFRC: u32 = tag!(b"afrc");
/// `akhn`
pub const AKHN: u32 = tag!(b"akhn");
/// `arab`
pub const ARAB: u32 = tag!(b"arab");
/// `avar`
pub const AVAR: u32 = tag!(b"avar");
/// `BASE`
pub const BASE: u32 = tag!(b"BASE");
/// `bdat`
pub const BDAT: u32 = tag!(b"bdat");
/// `beng`
pub const BENG: u32 = tag!(b"beng");
/// `bloc`
pub const BLOC: u32 = tag!(b"bloc");
/// `blwf`
pub const BLWF: u32 = tag!(b"blwf");
/// `blwm`
pub const BLWM: u32 = tag!(b"blwm");
/// `blws`
pub const BLWS: u32 = tag!(b"blws");
/// `bng2`
pub const BNG2: u32 = tag!(b"bng2");
/// `bsln`
pub const BSLN: u32 = tag!(b"bsln");
/// `c2sc`
pub const C2SC: u32 = tag!(b"c2sc");
/// `calt`
pub const CALT: u32 = tag!(b"calt");
/// `CBDT`
pub const CBDT: u32 = tag!(b"CBDT");
/// `CBLC`
pub const CBLC: u32 = tag!(b"CBLC");
/// `ccmp`
pub const CCMP: u32 = tag!(b"ccmp");
/// `cfar`
pub const CFAR: u32 = tag!(b"cfar");
/// `CFF `
pub const CFF: u32 = tag!(b"CFF ");
/// `cjct`
pub const CJCT: u32 = tag!(b"cjct");
/// `clig`
pub const CLIG: u32 = tag!(b"clig");
/// `cmap`
pub const CMAP: u32 = tag!(b"cmap");
/// `COLR`
pub const COLR: u32 = tag!(b"COLR");
/// `CPAL`
pub const CPAL: u32 = tag!(b"CPAL");
/// `curs`
pub const CURS: u32 = tag!(b"curs");
/// `cvar`
pub const CVAR: u32 = tag!(b"cvar");
/// `cvt `
pub const CVT: u32 = tag!(b"cvt ");
/// `cyrl`
pub const CYRL: u32 = tag!(b"cyrl");
/// `dev2`
pub const DEV2: u32 = tag!(b"dev2");
/// `deva`
pub const DEVA: u32 = tag!(b"deva");
/// `DFLT`
pub const DFLT: u32 = tag!(b"DFLT");
/// `dist`
pub const DIST: u32 = tag!(b"dist");
/// `dlig`
pub const DLIG: u32 = tag!(b"dlig");
/// `dupe`
pub const DUPE: u32 = tag!(b"dupe");
/// `EBDT`
pub const EBDT: u32 = tag!(b"EBDT");
/// `EBLC`
pub const EBLC: u32 = tag!(b"EBLC");
/// `EBSC`
pub const EBSC: u32 = tag!(b"EBSC");
/// `FAR `
pub const FAR: u32 = tag!(b"FAR ");
/// `fdsc`
pub const FDSC: u32 = tag!(b"fdsc");
/// `Feat`
pub const FEAT2: u32 = tag!(b"Feat");
/// `feat`
pub const FEAT: u32 = tag!(b"feat");
/// `fin2`
pub const FIN2: u32 = tag!(b"fin2");
/// `fin3`
pub const FIN3: u32 = tag!(b"fin3");
/// `fina`
pub const FINA: u32 = tag!(b"fina");
/// `fmtx`
pub const FMTX: u32 = tag!(b"fmtx");
/// `fpgm`
pub const FPGM: u32 = tag!(b"fpgm");
/// `frac`
pub const FRAC: u32 = tag!(b"frac");
/// `fvar`
pub const FVAR: u32 = tag!(b"fvar");
/// `gasp`
pub const GASP: u32 = tag!(b"gasp");
/// `GDEF`
pub const GDEF: u32 = tag!(b"GDEF");
/// `gjr2`
pub const GJR2: u32 = tag!(b"gjr2");
/// `Glat`
pub const GLAT: u32 = tag!(b"Glat");
/// `Gloc`
pub const GLOC: u32 = tag!(b"Gloc");
/// `glyf`
pub const GLYF: u32 = tag!(b"glyf");
/// `GPOS`
pub const GPOS: u32 = tag!(b"GPOS");
/// `grek`
pub const GREK: u32 = tag!(b"grek");
/// `GSUB`
pub const GSUB: u32 = tag!(b"GSUB");
/// `gujr`
pub const GUJR: u32 = tag!(b"gujr");
/// `gur2`
pub const GUR2: u32 = tag!(b"gur2");
/// `guru`
pub const GURU: u32 = tag!(b"guru");
/// `gvar`
pub const GVAR: u32 = tag!(b"gvar");
/// `half`
pub const HALF: u32 = tag!(b"half");
/// `haln`
pub const HALN: u32 = tag!(b"haln");
/// `hdmx`
pub const HDMX: u32 = tag!(b"hdmx");
/// `head`
pub const HEAD: u32 = tag!(b"head");
/// `hhea`
pub const HHEA: u32 = tag!(b"hhea");
/// `hlig`
pub const HLIG: u32 = tag!(b"hlig");
/// `hmtx`
pub const HMTX: u32 = tag!(b"hmtx");
/// `hsty`
pub const HSTY: u32 = tag!(b"hsty");
/// `init`
pub const INIT: u32 = tag!(b"init");
/// `isol`
pub const ISOL: u32 = tag!(b"isol");
/// `jpg `
pub const JPG: u32 = tag!(b"jpg ");
/// `JSTF`
pub const JSTF: u32 = tag!(b"JSTF");
/// `just`
pub const JUST: u32 = tag!(b"just");
/// `kern`
pub const KERN: u32 = tag!(b"kern");
/// `khmr`
pub const KHMR: u32 = tag!(b"khmr");
/// `knd2`
pub const KND2: u32 = tag!(b"knd2");
/// `knda`
pub const KNDA: u32 = tag!(b"knda");
/// `lao `
pub const LAO: u32 = tag!(b"lao ");
/// `latn`
pub const LATN: u32 = tag!(b"latn");
/// `lcar`
pub const LCAR: u32 = tag!(b"lcar");
/// `liga`
pub const LIGA: u32 = tag!(b"liga");
/// `lnum`
pub const LNUM: u32 = tag
|
{
return Err(ParseError::BadValue);
}
|
conditional_block
|
|
lib.rs
|
alter the User-Agent, Referer
* or Cookie headers that it will send and then call ``.call()`` to make the
* request, or you can call ``.post_body()`` to send the HTML yourself, if it
* is not publicly available to the wider Internet.
*
* Getting data out of the result
* ------------------------------
*
* At present, the successful return value of a request is simply a JSON object,
* a tree map. This *will* make it moderately difficult to work with, but if
* you're determined, it's possible. You'll end up with results like these:
*
* // First of all, you must, of course, have a response to work on.
* let mut response: TreeMap<~str, Json>
* = diffbot::call(..., "article", ...).unwrap();
*
* // Get the title of the article
* let title = match response.pop(&~"title").unwrap() {
* json::String(s) => s,
* _ => unreachable!(),
* };
*
* // Get the URL of each image
* let image_urls: ~[Url] = match response.pop(&~"images").unwrap() {
* json::List(images) => images.move_iter().map(|image| match image {
* json::Object(~mut o) => {
* match o.pop(&~"url").unwrap() {
* json::String(ref s) => from_str(s),
* _ => unreachable!(),
* }
* },
* _ => unreachable!(),
* }),
* _ => unreachable!(),
* }.collect();
*
* (Yep, I'll freely admit that these are clumsier than they might be in another
* language, which might allow something like this:
*
* let response = ...;
*
* let title = response.title;
* let image_urls = [from_str(image.url) for image in response.images];
*
* In time we may get strongly typed interfaces which would be much nicer, but
* for now, you'd need to do that yourself. It can be done with the tools in
* ``extra::serialize``, by the way.)
*/
#[crate_id = "diffbot#1.0"];
#[crate_type = "dylib"];
#[crate_type = "rlib"];
#[doc(html_logo_url = "diffy-d.png",
html_favicon_url = "http://www.diffbot.com/favicon.ico")];
extern mod extra = "extra#0.10-pre";
extern mod http = "http#0.1-pre";
use std::io::net::tcp::TcpStream;
use extra::json;
use extra::url::Url;
use http::client::RequestWriter;
use http::method::{Get, Post};
use http::headers::content_type::MediaType;
/// A convenience type which simply keeps track of a developer token and version
/// number.
///
/// There is no necessity to use this type; you can call ``call()`` directly
/// should you so desire.
#[deriving(Eq, Clone)]
pub struct Diffbot {
/// The developer's token
token: ~str,
/// The API version number
version: uint,
}
// Basic methods
impl Diffbot {
/// Construct a new ``Diffbot`` instance from the passed parameters.
pub fn new(token: ~str, version: uint) -> Diffbot {
Diffbot {
token: token,
version: version,
}
}
/// Make a call to any Diffbot API with the stored token and API version.
///
/// See the ``call()`` function for an explanation of the parameters.
pub fn
|
(&self, url: &Url, api: &str, fields: &[&str])
-> Result<json::Object, Error> {
call(url, self.token, api, fields, self.version)
}
/// Prepare a request to any Diffbot API with the stored token and API version.
///
/// See the ``call()`` function for an explanation of the parameters.
pub fn prepare_request(&self, url: &Url, api: &str, fields: &[&str])
-> Request {
prepare_request(url, self.token, api, fields, self.version)
}
}
/// An in-progress Diffbot API call.
pub struct Request {
priv request: RequestWriter<TcpStream>,
}
impl Request {
/// Set the value for Diffbot to send as the ``User-Agent`` header when
/// making your request.
pub fn user_agent(&mut self, user_agent: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-User-Agent",
user_agent);
}
/// Set the value for Diffbot to send as the ``Referer`` header when
/// making your request.
pub fn referer(&mut self, referer: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-Referer",
referer);
}
/// Set the value for Diffbot to send as the ``Cookie`` header when
/// making your request.
pub fn cookie(&mut self, cookie: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-Cookie",
cookie);
}
/// Set Diffbot's timeout, in milliseconds. The default is five seconds.
pub fn timeout(&mut self, milliseconds: u64) {
self.request.url.query.push((~"timeout", milliseconds.to_str()));
}
/// Execute the request and get the results.
pub fn call(self) -> Result<json::Object, Error> {
let mut response = match self.request.read_response() {
Ok(response) => response,
Err(_request) => return Err(IoError), // Request failed
};
let json = match json::from_reader(&mut response as &mut Reader) {
Ok(json) => json,
Err(error) => return Err(JsonError(error)), // It... wasn't JSON!?
};
// Now let's see if this is an API error or not.
// API errors are of the form {"error":"Invalid API.","errorCode":500}
match json {
json::Object(~mut o) => {
match o.pop(&~"errorCode") {
Some(json::Number(num)) => {
let num = num as uint;
let msg = match o.pop(&~"error")
.expect("JSON had errorCode but not error") {
json::String(s) => s,
uh_oh => fail!("error was {} instead of a string", uh_oh.to_str()),
};
Err(ApiError(msg, num))
},
Some(uh_oh) => fail!("errorCode was {} instead of a number", uh_oh.to_str()),
None => Ok(o),
}
},
// All API responses must be objects.
// If it's not, there's something screwy going on.
_ => fail!("API return value wasn't a JSON object"),
}
}
/// Execute the request as a POST request, sending it through with the given
/// text/html entity body.
///
/// This has the effect that Diffbot will skip requesting the URL and will
/// instead take the passed body as the HTML it is to check. This is mainly
/// useful for non-public websites.
pub fn post_body(mut self, body: &[u8]) -> Result<json::Object, Error> {
self.request.method = Post;
self.request.headers.content_type = Some(MediaType(~"text", ~"html", ~[]));
self.request.headers.content_length = Some(body.len());
// Calling write_headers is an extra and unnecessary safety guard which
// will cause the task to fail if the request has already started to be
// sent (which would render the three statements above ineffectual)
self.request.write_headers();
self.request.write(body);
self.call()
}
}
/// Error code: "unauthorized token"
pub static UNAUTHORIZED_TOKEN: uint = 401;
/// Error code: "requested page not found"
pub static REQUESTED_PAGE_NOT_FOUND: uint = 404;
/// Error code: "your token has exceeded the allowed number of calls, or has
/// otherwise been throttled for API abuse."
pub static TOKEN_EXCEEDED_OR_THROTTLED: uint = 429;
/// Error code: "error processing the page. Specific information will be
/// returned in the JSON response."
pub static ERROR_PROCESSING: uint = 500;
/// Something went wrong with the Diffbot API call.
#[deriving(Eq)]
pub enum Error {
/// An error code returned by the Diffbot API, with message and code.
/// Refer to http://www.diffbot.com/dev/docs/error/ for an explanation of
/// the error codes.
///
/// When comparing the error code, you should use these constants:
///
/// - ``UNAUTHORIZED_TOKEN``: "unauthorized token"
/// - ``REQUESTED_PAGE_NOT_FOUND``: "requested page not found"
/// - ``TOKEN_EXCEEDED_OR_THROTTLED``: "your token has exceeded the allowed
/// number of calls, or has otherwise been throttled for API abuse."
/// - ``ERROR_PROCESSING``: "error processing the page. Specific information
/// will be returned in the JSON response."
ApiError(~str, uint),
/// The JSON was not valid. This is one of those ones that *should* never
/// happen; you know...
///
/// Actually, I can percieve that it
|
call
|
identifier_name
|
lib.rs
|
alter the User-Agent, Referer
* or Cookie headers that it will send and then call ``.call()`` to make the
* request, or you can call ``.post_body()`` to send the HTML yourself, if it
* is not publicly available to the wider Internet.
*
* Getting data out of the result
* ------------------------------
*
* At present, the successful return value of a request is simply a JSON object,
* a tree map. This *will* make it moderately difficult to work with, but if
* you're determined, it's possible. You'll end up with results like these:
*
* // First of all, you must, of course, have a response to work on.
* let mut response: TreeMap<~str, Json>
* = diffbot::call(..., "article", ...).unwrap();
*
* // Get the title of the article
* let title = match response.pop(&~"title").unwrap() {
* json::String(s) => s,
* _ => unreachable!(),
* };
*
* // Get the URL of each image
* let image_urls: ~[Url] = match response.pop(&~"images").unwrap() {
* json::List(images) => images.move_iter().map(|image| match image {
* json::Object(~mut o) => {
* match o.pop(&~"url").unwrap() {
* json::String(ref s) => from_str(s),
* _ => unreachable!(),
* }
* },
* _ => unreachable!(),
* }),
* _ => unreachable!(),
* }.collect();
*
* (Yep, I'll freely admit that these are clumsier than they might be in another
* language, which might allow something like this:
*
* let response = ...;
*
* let title = response.title;
* let image_urls = [from_str(image.url) for image in response.images];
*
* In time we may get strongly typed interfaces which would be much nicer, but
* for now, you'd need to do that yourself. It can be done with the tools in
* ``extra::serialize``, by the way.)
*/
#[crate_id = "diffbot#1.0"];
#[crate_type = "dylib"];
#[crate_type = "rlib"];
#[doc(html_logo_url = "diffy-d.png",
html_favicon_url = "http://www.diffbot.com/favicon.ico")];
extern mod extra = "extra#0.10-pre";
extern mod http = "http#0.1-pre";
use std::io::net::tcp::TcpStream;
use extra::json;
use extra::url::Url;
use http::client::RequestWriter;
use http::method::{Get, Post};
use http::headers::content_type::MediaType;
/// A convenience type which simply keeps track of a developer token and version
/// number.
///
/// There is no necessity to use this type; you can call ``call()`` directly
/// should you so desire.
#[deriving(Eq, Clone)]
pub struct Diffbot {
/// The developer's token
token: ~str,
/// The API version number
version: uint,
}
// Basic methods
impl Diffbot {
/// Construct a new ``Diffbot`` instance from the passed parameters.
pub fn new(token: ~str, version: uint) -> Diffbot {
Diffbot {
token: token,
version: version,
}
}
/// Make a call to any Diffbot API with the stored token and API version.
///
/// See the ``call()`` function for an explanation of the parameters.
pub fn call(&self, url: &Url, api: &str, fields: &[&str])
-> Result<json::Object, Error> {
call(url, self.token, api, fields, self.version)
}
/// Prepare a request to any Diffbot API with the stored token and API version.
///
/// See the ``call()`` function for an explanation of the parameters.
pub fn prepare_request(&self, url: &Url, api: &str, fields: &[&str])
-> Request {
prepare_request(url, self.token, api, fields, self.version)
}
}
/// An in-progress Diffbot API call.
pub struct Request {
priv request: RequestWriter<TcpStream>,
}
impl Request {
/// Set the value for Diffbot to send as the ``User-Agent`` header when
/// making your request.
pub fn user_agent(&mut self, user_agent: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-User-Agent",
user_agent);
}
/// Set the value for Diffbot to send as the ``Referer`` header when
/// making your request.
pub fn referer(&mut self, referer: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-Referer",
referer);
}
/// Set the value for Diffbot to send as the ``Cookie`` header when
/// making your request.
pub fn cookie(&mut self, cookie: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-Cookie",
cookie);
}
/// Set Diffbot's timeout, in milliseconds. The default is five seconds.
pub fn timeout(&mut self, milliseconds: u64) {
self.request.url.query.push((~"timeout", milliseconds.to_str()));
}
/// Execute the request and get the results.
pub fn call(self) -> Result<json::Object, Error> {
let mut response = match self.request.read_response() {
Ok(response) => response,
Err(_request) => return Err(IoError), // Request failed
};
let json = match json::from_reader(&mut response as &mut Reader) {
Ok(json) => json,
Err(error) => return Err(JsonError(error)), // It... wasn't JSON!?
};
// Now let's see if this is an API error or not.
// API errors are of the form {"error":"Invalid API.","errorCode":500}
|
let num = num as uint;
let msg = match o.pop(&~"error")
.expect("JSON had errorCode but not error") {
json::String(s) => s,
uh_oh => fail!("error was {} instead of a string", uh_oh.to_str()),
};
Err(ApiError(msg, num))
},
Some(uh_oh) => fail!("errorCode was {} instead of a number", uh_oh.to_str()),
None => Ok(o),
}
},
// All API responses must be objects.
// If it's not, there's something screwy going on.
_ => fail!("API return value wasn't a JSON object"),
}
}
/// Execute the request as a POST request, sending it through with the given
/// text/html entity body.
///
/// This has the effect that Diffbot will skip requesting the URL and will
/// instead take the passed body as the HTML it is to check. This is mainly
/// useful for non-public websites.
pub fn post_body(mut self, body: &[u8]) -> Result<json::Object, Error> {
self.request.method = Post;
self.request.headers.content_type = Some(MediaType(~"text", ~"html", ~[]));
self.request.headers.content_length = Some(body.len());
// Calling write_headers is an extra and unnecessary safety guard which
// will cause the task to fail if the request has already started to be
// sent (which would render the three statements above ineffectual)
self.request.write_headers();
self.request.write(body);
self.call()
}
}
/// Error code: "unauthorized token"
pub static UNAUTHORIZED_TOKEN: uint = 401;
/// Error code: "requested page not found"
pub static REQUESTED_PAGE_NOT_FOUND: uint = 404;
/// Error code: "your token has exceeded the allowed number of calls, or has
/// otherwise been throttled for API abuse."
pub static TOKEN_EXCEEDED_OR_THROTTLED: uint = 429;
/// Error code: "error processing the page. Specific information will be
/// returned in the JSON response."
pub static ERROR_PROCESSING: uint = 500;
/// Something went wrong with the Diffbot API call.
#[deriving(Eq)]
pub enum Error {
/// An error code returned by the Diffbot API, with message and code.
/// Refer to http://www.diffbot.com/dev/docs/error/ for an explanation of
/// the error codes.
///
/// When comparing the error code, you should use these constants:
///
/// - ``UNAUTHORIZED_TOKEN``: "unauthorized token"
/// - ``REQUESTED_PAGE_NOT_FOUND``: "requested page not found"
/// - ``TOKEN_EXCEEDED_OR_THROTTLED``: "your token has exceeded the allowed
/// number of calls, or has otherwise been throttled for API abuse."
/// - ``ERROR_PROCESSING``: "error processing the page. Specific information
/// will be returned in the JSON response."
ApiError(~str, uint),
/// The JSON was not valid. This is one of those ones that *should* never
/// happen; you know...
///
/// Actually, I can percieve that it might
|
match json {
json::Object(~mut o) => {
match o.pop(&~"errorCode") {
Some(json::Number(num)) => {
|
random_line_split
|
lib.rs
|
alter the User-Agent, Referer
* or Cookie headers that it will send and then call ``.call()`` to make the
* request, or you can call ``.post_body()`` to send the HTML yourself, if it
* is not publicly available to the wider Internet.
*
* Getting data out of the result
* ------------------------------
*
* At present, the successful return value of a request is simply a JSON object,
* a tree map. This *will* make it moderately difficult to work with, but if
* you're determined, it's possible. You'll end up with results like these:
*
* // First of all, you must, of course, have a response to work on.
* let mut response: TreeMap<~str, Json>
* = diffbot::call(..., "article", ...).unwrap();
*
* // Get the title of the article
* let title = match response.pop(&~"title").unwrap() {
* json::String(s) => s,
* _ => unreachable!(),
* };
*
* // Get the URL of each image
* let image_urls: ~[Url] = match response.pop(&~"images").unwrap() {
* json::List(images) => images.move_iter().map(|image| match image {
* json::Object(~mut o) => {
* match o.pop(&~"url").unwrap() {
* json::String(ref s) => from_str(s),
* _ => unreachable!(),
* }
* },
* _ => unreachable!(),
* }),
* _ => unreachable!(),
* }.collect();
*
* (Yep, I'll freely admit that these are clumsier than they might be in another
* language, which might allow something like this:
*
* let response = ...;
*
* let title = response.title;
* let image_urls = [from_str(image.url) for image in response.images];
*
* In time we may get strongly typed interfaces which would be much nicer, but
* for now, you'd need to do that yourself. It can be done with the tools in
* ``extra::serialize``, by the way.)
*/
#[crate_id = "diffbot#1.0"];
#[crate_type = "dylib"];
#[crate_type = "rlib"];
#[doc(html_logo_url = "diffy-d.png",
html_favicon_url = "http://www.diffbot.com/favicon.ico")];
extern mod extra = "extra#0.10-pre";
extern mod http = "http#0.1-pre";
use std::io::net::tcp::TcpStream;
use extra::json;
use extra::url::Url;
use http::client::RequestWriter;
use http::method::{Get, Post};
use http::headers::content_type::MediaType;
/// A convenience type which simply keeps track of a developer token and version
/// number.
///
/// There is no necessity to use this type; you can call ``call()`` directly
/// should you so desire.
#[deriving(Eq, Clone)]
pub struct Diffbot {
/// The developer's token
token: ~str,
/// The API version number
version: uint,
}
// Basic methods
impl Diffbot {
/// Construct a new ``Diffbot`` instance from the passed parameters.
pub fn new(token: ~str, version: uint) -> Diffbot {
Diffbot {
token: token,
version: version,
}
}
/// Make a call to any Diffbot API with the stored token and API version.
///
/// See the ``call()`` function for an explanation of the parameters.
pub fn call(&self, url: &Url, api: &str, fields: &[&str])
-> Result<json::Object, Error> {
call(url, self.token, api, fields, self.version)
}
/// Prepare a request to any Diffbot API with the stored token and API version.
///
/// See the ``call()`` function for an explanation of the parameters.
pub fn prepare_request(&self, url: &Url, api: &str, fields: &[&str])
-> Request
|
}
/// An in-progress Diffbot API call.
pub struct Request {
priv request: RequestWriter<TcpStream>,
}
impl Request {
/// Set the value for Diffbot to send as the ``User-Agent`` header when
/// making your request.
pub fn user_agent(&mut self, user_agent: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-User-Agent",
user_agent);
}
/// Set the value for Diffbot to send as the ``Referer`` header when
/// making your request.
pub fn referer(&mut self, referer: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-Referer",
referer);
}
/// Set the value for Diffbot to send as the ``Cookie`` header when
/// making your request.
pub fn cookie(&mut self, cookie: ~str) {
self.request.headers.extensions.insert(~"X-Forwarded-Cookie",
cookie);
}
/// Set Diffbot's timeout, in milliseconds. The default is five seconds.
pub fn timeout(&mut self, milliseconds: u64) {
self.request.url.query.push((~"timeout", milliseconds.to_str()));
}
/// Execute the request and get the results.
pub fn call(self) -> Result<json::Object, Error> {
let mut response = match self.request.read_response() {
Ok(response) => response,
Err(_request) => return Err(IoError), // Request failed
};
let json = match json::from_reader(&mut response as &mut Reader) {
Ok(json) => json,
Err(error) => return Err(JsonError(error)), // It... wasn't JSON!?
};
// Now let's see if this is an API error or not.
// API errors are of the form {"error":"Invalid API.","errorCode":500}
match json {
json::Object(~mut o) => {
match o.pop(&~"errorCode") {
Some(json::Number(num)) => {
let num = num as uint;
let msg = match o.pop(&~"error")
.expect("JSON had errorCode but not error") {
json::String(s) => s,
uh_oh => fail!("error was {} instead of a string", uh_oh.to_str()),
};
Err(ApiError(msg, num))
},
Some(uh_oh) => fail!("errorCode was {} instead of a number", uh_oh.to_str()),
None => Ok(o),
}
},
// All API responses must be objects.
// If it's not, there's something screwy going on.
_ => fail!("API return value wasn't a JSON object"),
}
}
/// Execute the request as a POST request, sending it through with the given
/// text/html entity body.
///
/// This has the effect that Diffbot will skip requesting the URL and will
/// instead take the passed body as the HTML it is to check. This is mainly
/// useful for non-public websites.
pub fn post_body(mut self, body: &[u8]) -> Result<json::Object, Error> {
self.request.method = Post;
self.request.headers.content_type = Some(MediaType(~"text", ~"html", ~[]));
self.request.headers.content_length = Some(body.len());
// Calling write_headers is an extra and unnecessary safety guard which
// will cause the task to fail if the request has already started to be
// sent (which would render the three statements above ineffectual)
self.request.write_headers();
self.request.write(body);
self.call()
}
}
/// Error code: "unauthorized token"
pub static UNAUTHORIZED_TOKEN: uint = 401;
/// Error code: "requested page not found"
pub static REQUESTED_PAGE_NOT_FOUND: uint = 404;
/// Error code: "your token has exceeded the allowed number of calls, or has
/// otherwise been throttled for API abuse."
pub static TOKEN_EXCEEDED_OR_THROTTLED: uint = 429;
/// Error code: "error processing the page. Specific information will be
/// returned in the JSON response."
pub static ERROR_PROCESSING: uint = 500;
/// Something went wrong with the Diffbot API call.
#[deriving(Eq)]
pub enum Error {
/// An error code returned by the Diffbot API, with message and code.
/// Refer to http://www.diffbot.com/dev/docs/error/ for an explanation of
/// the error codes.
///
/// When comparing the error code, you should use these constants:
///
/// - ``UNAUTHORIZED_TOKEN``: "unauthorized token"
/// - ``REQUESTED_PAGE_NOT_FOUND``: "requested page not found"
/// - ``TOKEN_EXCEEDED_OR_THROTTLED``: "your token has exceeded the allowed
/// number of calls, or has otherwise been throttled for API abuse."
/// - ``ERROR_PROCESSING``: "error processing the page. Specific information
/// will be returned in the JSON response."
ApiError(~str, uint),
/// The JSON was not valid. This is one of those ones that *should* never
/// happen; you know...
///
/// Actually, I can percieve that
|
{
prepare_request(url, self.token, api, fields, self.version)
}
|
identifier_body
|
main.rs
|
// length: usize
// }
//
// impl<T> ImageDataIterator<T> {
// fn from_dynamic_image(img: &DynamicImage) -> ImageDataIterator<T> {
// let dimensions = img.dimensions();
//
// ImageDataIterator {
// originalIterator: img.to_rgba().pixels(),
// length: ( dimensions.0 * dimensions.1 ) as usize
// }
// }
// }
//
// impl<'a, T> Iterator for ImageDataIterator<'a, T> {
// type Item = [u8; 4];
// fn next(&mut self) -> Option<[u8; 4]> {
// return match self.originalIterator.next() {
// Some(pixel) => {
// let rgba = pixel.2;
// let data: [u8; 4] = [ rgba[0], rgba[1], rgba[2], rgba[3] ];
// return Some(data);
// },
// None => None
// }
// }
// }
//
// impl<'a, T> ExactSizeIterator for ImageDataIterator<'a, T> {
// fn len(&self) -> usize {
// return self.length;
// }
// }
fn
|
() {
let img = match image::open("./media/autumn.png") {
Ok(image) => image,
Err(err) => panic!("{:?}", err)
};
{ // stdout image info
println!("color {:?}", img.color());
println!("dimensions {:?}", img.dimensions());
// println!("first pixel {:?}", img.pixels().next().unwrap());
// println!("first pixel {:?}", img.pixels().next().map(|item| item.2).unwrap());
}
let instance = {
let inst_exts = vulkano_win::required_extensions();
Instance::new(None, &inst_exts, None).expect("failed to create instance")
};
//TODO: list devices, choose based on user input
for p in PhysicalDevice::enumerate(&instance) {
print!("{}", p.name());
println!(", driver version: {}", p.driver_version());
}
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("no device available");
let queue_family = physical
.queue_families()
.find(|&q| q.supports_graphics())
.expect("couldn't find a graphical queue family");
let (device, mut queues) = {
let unraw_dev_exts = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let mut all_dev_exts = RawDeviceExtensions::from(&unraw_dev_exts);
all_dev_exts.insert(std::ffi::CString::new("VK_KHR_storage_buffer_storage_class").unwrap());
Device::new(
physical,
&Features::none(),
all_dev_exts,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("failed to create device")
};
let queue = queues.next().unwrap();
// let particles = init_particles_buffer();
// let particles_buffer =
// CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), particles)
// .expect("failed to create buffer");
// let shader = cs::Shader::load(device.clone()).expect("failed to create shader module");
// let compute_pipeline = Arc::new(
// ComputePipeline::new(device.clone(), &shader.main_entry_point(), &())
// .expect("failed to create compute pipeline"),
// );
// let set = Arc::new(
// PersistentDescriptorSet::start(compute_pipeline.clone(), 0)
// .add_buffer(particles_buffer.clone())
// .unwrap()
// .build()
// .unwrap(),
// );
// let command_buffer = AutoCommandBufferBuilder::new(device.clone(), queue.family())
// .unwrap()
// .dispatch([PARTICLE_COUNT as u32 / 32, 1, 1], compute_pipeline.clone(), set.clone(), ())
// .unwrap()
// .build()
// .unwrap();
let mut events_loop = EventsLoop::new();
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
let window = surface.window();
let (mut swapchain, images) = {
let caps = surface.capabilities(physical)
.expect("failed to get surface capabilities");
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let format = caps.supported_formats[0].0;
caps.supported_formats.iter().for_each(|sth| println!("{:?}", sth));
let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
return;
};
Swapchain::new(device.clone(), surface.clone(),
caps.min_image_count, format, initial_dimensions, 1, usage, &queue,
SurfaceTransform::Identity, alpha, PresentMode::Fifo, true, None)
.expect("failed to create swapchain")
};
#[derive(Default, Debug, Clone)]
struct Vertex {
position: [f32; 2]
}
let vertex_buffer = {
vulkano::impl_vertex!(Vertex, position);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
Vertex { position: [-0.5, -0.5] },
Vertex { position: [ 0.5, -0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, 0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, -0.5] },
].iter().cloned()).unwrap()
};
// texture
let img_dim = img.dimensions();
let (autumn_texture, autumn_texture_future) = match ImmutableImage::from_iter(
img.as_rgba8().unwrap().pixels().map(|rgba| {
let bytes : [u8; 4] = [rgba[0], rgba[1], rgba[2], rgba[3]];
bytes
}),
Dimensions::Dim2d { width: img_dim.0, height: img_dim.1 },
Format::R8G8B8A8Unorm,
queue.clone()
) {
Ok(i) => i,
Err(err) => panic!("{:?}", err)
};
let sampler = Sampler::new(device.clone(), Filter::Linear, Filter::Linear,
MipmapMode::Nearest, SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 0.0).unwrap();
mod square_vs {
vulkano_shaders::shader!{
ty: "vertex",
path: "./src/shader/square.vs.glsl"
}
}
mod square_fs {
vulkano_shaders::shader!{
ty: "fragment",
path: "./src/shader/square.fs.glsl"
}
}
let square_vs = square_vs::Shader::load(device.clone()).unwrap();
let square_fs = square_fs::Shader::load(device.clone()).unwrap();
let render_pass = Arc::new(vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {}
}
).unwrap());
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input_single_buffer::<Vertex>()
.vertex_shader(square_vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.fragment_shader(square_fs.main_entry_point(), ())
.blend_alpha_blending()
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
.build(device.clone())
.unwrap());
let set = Arc::new(PersistentDescriptorSet::start(pipeline.clone(), 0)
.add_sampled_image(autumn_texture.clone(), sampler.clone()).unwrap()
.build().unwrap()
);
let mut dynamic_state = DynamicState {
line_width: None,
viewports: None,
scissors: None,
compare_mask: None,
write_mask: None,
reference: None
};
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
let mut recreate_swapchain = false;
let mut previous_frame_end = Box::new(sync::now(device.clone()).join(autumn_texture_future)) as Box<dyn GpuFuture>;
let t0 = time::SystemTime::
|
main
|
identifier_name
|
main.rs
|
{
// Some(pixel) => {
// let rgba = pixel.2;
// let data: [u8; 4] = [ rgba[0], rgba[1], rgba[2], rgba[3] ];
// return Some(data);
// },
// None => None
// }
// }
// }
//
// impl<'a, T> ExactSizeIterator for ImageDataIterator<'a, T> {
// fn len(&self) -> usize {
// return self.length;
// }
// }
fn main() {
let img = match image::open("./media/autumn.png") {
Ok(image) => image,
Err(err) => panic!("{:?}", err)
};
{ // stdout image info
println!("color {:?}", img.color());
println!("dimensions {:?}", img.dimensions());
// println!("first pixel {:?}", img.pixels().next().unwrap());
// println!("first pixel {:?}", img.pixels().next().map(|item| item.2).unwrap());
}
let instance = {
let inst_exts = vulkano_win::required_extensions();
Instance::new(None, &inst_exts, None).expect("failed to create instance")
};
//TODO: list devices, choose based on user input
for p in PhysicalDevice::enumerate(&instance) {
print!("{}", p.name());
println!(", driver version: {}", p.driver_version());
}
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("no device available");
let queue_family = physical
.queue_families()
.find(|&q| q.supports_graphics())
.expect("couldn't find a graphical queue family");
let (device, mut queues) = {
let unraw_dev_exts = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let mut all_dev_exts = RawDeviceExtensions::from(&unraw_dev_exts);
all_dev_exts.insert(std::ffi::CString::new("VK_KHR_storage_buffer_storage_class").unwrap());
Device::new(
physical,
&Features::none(),
all_dev_exts,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("failed to create device")
};
let queue = queues.next().unwrap();
// let particles = init_particles_buffer();
// let particles_buffer =
// CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), particles)
// .expect("failed to create buffer");
// let shader = cs::Shader::load(device.clone()).expect("failed to create shader module");
// let compute_pipeline = Arc::new(
// ComputePipeline::new(device.clone(), &shader.main_entry_point(), &())
// .expect("failed to create compute pipeline"),
// );
// let set = Arc::new(
// PersistentDescriptorSet::start(compute_pipeline.clone(), 0)
// .add_buffer(particles_buffer.clone())
// .unwrap()
// .build()
// .unwrap(),
// );
// let command_buffer = AutoCommandBufferBuilder::new(device.clone(), queue.family())
// .unwrap()
// .dispatch([PARTICLE_COUNT as u32 / 32, 1, 1], compute_pipeline.clone(), set.clone(), ())
// .unwrap()
// .build()
// .unwrap();
let mut events_loop = EventsLoop::new();
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
let window = surface.window();
let (mut swapchain, images) = {
let caps = surface.capabilities(physical)
.expect("failed to get surface capabilities");
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let format = caps.supported_formats[0].0;
caps.supported_formats.iter().for_each(|sth| println!("{:?}", sth));
let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
return;
};
Swapchain::new(device.clone(), surface.clone(),
caps.min_image_count, format, initial_dimensions, 1, usage, &queue,
SurfaceTransform::Identity, alpha, PresentMode::Fifo, true, None)
.expect("failed to create swapchain")
};
#[derive(Default, Debug, Clone)]
struct Vertex {
position: [f32; 2]
}
let vertex_buffer = {
vulkano::impl_vertex!(Vertex, position);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
Vertex { position: [-0.5, -0.5] },
Vertex { position: [ 0.5, -0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, 0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, -0.5] },
].iter().cloned()).unwrap()
};
// texture
let img_dim = img.dimensions();
let (autumn_texture, autumn_texture_future) = match ImmutableImage::from_iter(
img.as_rgba8().unwrap().pixels().map(|rgba| {
let bytes : [u8; 4] = [rgba[0], rgba[1], rgba[2], rgba[3]];
bytes
}),
Dimensions::Dim2d { width: img_dim.0, height: img_dim.1 },
Format::R8G8B8A8Unorm,
queue.clone()
) {
Ok(i) => i,
Err(err) => panic!("{:?}", err)
};
let sampler = Sampler::new(device.clone(), Filter::Linear, Filter::Linear,
MipmapMode::Nearest, SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 0.0).unwrap();
mod square_vs {
vulkano_shaders::shader!{
ty: "vertex",
path: "./src/shader/square.vs.glsl"
}
}
mod square_fs {
vulkano_shaders::shader!{
ty: "fragment",
path: "./src/shader/square.fs.glsl"
}
}
let square_vs = square_vs::Shader::load(device.clone()).unwrap();
let square_fs = square_fs::Shader::load(device.clone()).unwrap();
let render_pass = Arc::new(vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {}
}
).unwrap());
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input_single_buffer::<Vertex>()
.vertex_shader(square_vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.fragment_shader(square_fs.main_entry_point(), ())
.blend_alpha_blending()
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
.build(device.clone())
.unwrap());
let set = Arc::new(PersistentDescriptorSet::start(pipeline.clone(), 0)
.add_sampled_image(autumn_texture.clone(), sampler.clone()).unwrap()
.build().unwrap()
);
let mut dynamic_state = DynamicState {
line_width: None,
viewports: None,
scissors: None,
compare_mask: None,
write_mask: None,
reference: None
};
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
let mut recreate_swapchain = false;
let mut previous_frame_end = Box::new(sync::now(device.clone()).join(autumn_texture_future)) as Box<dyn GpuFuture>;
let t0 = time::SystemTime::now();
let mut now = t0;
let mut then;
loop {
previous_frame_end.cleanup_finished();
if recreate_swapchain {
let dimensions = if let Some(dimensions) = window.get_inner_size() {
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
return ;
};
let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
|
Err(err) => panic!("{:?}", err)
};
|
random_line_split
|
|
main.rs
|
// length: usize
// }
//
// impl<T> ImageDataIterator<T> {
// fn from_dynamic_image(img: &DynamicImage) -> ImageDataIterator<T> {
// let dimensions = img.dimensions();
//
// ImageDataIterator {
// originalIterator: img.to_rgba().pixels(),
// length: ( dimensions.0 * dimensions.1 ) as usize
// }
// }
// }
//
// impl<'a, T> Iterator for ImageDataIterator<'a, T> {
// type Item = [u8; 4];
// fn next(&mut self) -> Option<[u8; 4]> {
// return match self.originalIterator.next() {
// Some(pixel) => {
// let rgba = pixel.2;
// let data: [u8; 4] = [ rgba[0], rgba[1], rgba[2], rgba[3] ];
// return Some(data);
// },
// None => None
// }
// }
// }
//
// impl<'a, T> ExactSizeIterator for ImageDataIterator<'a, T> {
// fn len(&self) -> usize {
// return self.length;
// }
// }
fn main()
|
//TODO: list devices, choose based on user input
for p in PhysicalDevice::enumerate(&instance) {
print!("{}", p.name());
println!(", driver version: {}", p.driver_version());
}
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("no device available");
let queue_family = physical
.queue_families()
.find(|&q| q.supports_graphics())
.expect("couldn't find a graphical queue family");
let (device, mut queues) = {
let unraw_dev_exts = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let mut all_dev_exts = RawDeviceExtensions::from(&unraw_dev_exts);
all_dev_exts.insert(std::ffi::CString::new("VK_KHR_storage_buffer_storage_class").unwrap());
Device::new(
physical,
&Features::none(),
all_dev_exts,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("failed to create device")
};
let queue = queues.next().unwrap();
// let particles = init_particles_buffer();
// let particles_buffer =
// CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), particles)
// .expect("failed to create buffer");
// let shader = cs::Shader::load(device.clone()).expect("failed to create shader module");
// let compute_pipeline = Arc::new(
// ComputePipeline::new(device.clone(), &shader.main_entry_point(), &())
// .expect("failed to create compute pipeline"),
// );
// let set = Arc::new(
// PersistentDescriptorSet::start(compute_pipeline.clone(), 0)
// .add_buffer(particles_buffer.clone())
// .unwrap()
// .build()
// .unwrap(),
// );
// let command_buffer = AutoCommandBufferBuilder::new(device.clone(), queue.family())
// .unwrap()
// .dispatch([PARTICLE_COUNT as u32 / 32, 1, 1], compute_pipeline.clone(), set.clone(), ())
// .unwrap()
// .build()
// .unwrap();
let mut events_loop = EventsLoop::new();
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
let window = surface.window();
let (mut swapchain, images) = {
let caps = surface.capabilities(physical)
.expect("failed to get surface capabilities");
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let format = caps.supported_formats[0].0;
caps.supported_formats.iter().for_each(|sth| println!("{:?}", sth));
let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
return;
};
Swapchain::new(device.clone(), surface.clone(),
caps.min_image_count, format, initial_dimensions, 1, usage, &queue,
SurfaceTransform::Identity, alpha, PresentMode::Fifo, true, None)
.expect("failed to create swapchain")
};
#[derive(Default, Debug, Clone)]
struct Vertex {
position: [f32; 2]
}
let vertex_buffer = {
vulkano::impl_vertex!(Vertex, position);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
Vertex { position: [-0.5, -0.5] },
Vertex { position: [ 0.5, -0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, 0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, -0.5] },
].iter().cloned()).unwrap()
};
// texture
let img_dim = img.dimensions();
let (autumn_texture, autumn_texture_future) = match ImmutableImage::from_iter(
img.as_rgba8().unwrap().pixels().map(|rgba| {
let bytes : [u8; 4] = [rgba[0], rgba[1], rgba[2], rgba[3]];
bytes
}),
Dimensions::Dim2d { width: img_dim.0, height: img_dim.1 },
Format::R8G8B8A8Unorm,
queue.clone()
) {
Ok(i) => i,
Err(err) => panic!("{:?}", err)
};
let sampler = Sampler::new(device.clone(), Filter::Linear, Filter::Linear,
MipmapMode::Nearest, SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 0.0).unwrap();
mod square_vs {
vulkano_shaders::shader!{
ty: "vertex",
path: "./src/shader/square.vs.glsl"
}
}
mod square_fs {
vulkano_shaders::shader!{
ty: "fragment",
path: "./src/shader/square.fs.glsl"
}
}
let square_vs = square_vs::Shader::load(device.clone()).unwrap();
let square_fs = square_fs::Shader::load(device.clone()).unwrap();
let render_pass = Arc::new(vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {}
}
).unwrap());
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input_single_buffer::<Vertex>()
.vertex_shader(square_vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.fragment_shader(square_fs.main_entry_point(), ())
.blend_alpha_blending()
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
.build(device.clone())
.unwrap());
let set = Arc::new(PersistentDescriptorSet::start(pipeline.clone(), 0)
.add_sampled_image(autumn_texture.clone(), sampler.clone()).unwrap()
.build().unwrap()
);
let mut dynamic_state = DynamicState {
line_width: None,
viewports: None,
scissors: None,
compare_mask: None,
write_mask: None,
reference: None
};
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
let mut recreate_swapchain = false;
let mut previous_frame_end = Box::new(sync::now(device.clone()).join(autumn_texture_future)) as Box<dyn GpuFuture>;
let t0 = time::SystemTime
|
{
let img = match image::open("./media/autumn.png") {
Ok(image) => image,
Err(err) => panic!("{:?}", err)
};
{ // stdout image info
println!("color {:?}", img.color());
println!("dimensions {:?}", img.dimensions());
// println!("first pixel {:?}", img.pixels().next().unwrap());
// println!("first pixel {:?}", img.pixels().next().map(|item| item.2).unwrap());
}
let instance = {
let inst_exts = vulkano_win::required_extensions();
Instance::new(None, &inst_exts, None).expect("failed to create instance")
};
|
identifier_body
|
main.rs
|
// }
// }
//
// impl<'a, T> Iterator for ImageDataIterator<'a, T> {
// type Item = [u8; 4];
// fn next(&mut self) -> Option<[u8; 4]> {
// return match self.originalIterator.next() {
// Some(pixel) => {
// let rgba = pixel.2;
// let data: [u8; 4] = [ rgba[0], rgba[1], rgba[2], rgba[3] ];
// return Some(data);
// },
// None => None
// }
// }
// }
//
// impl<'a, T> ExactSizeIterator for ImageDataIterator<'a, T> {
// fn len(&self) -> usize {
// return self.length;
// }
// }
fn main() {
let img = match image::open("./media/autumn.png") {
Ok(image) => image,
Err(err) => panic!("{:?}", err)
};
{ // stdout image info
println!("color {:?}", img.color());
println!("dimensions {:?}", img.dimensions());
// println!("first pixel {:?}", img.pixels().next().unwrap());
// println!("first pixel {:?}", img.pixels().next().map(|item| item.2).unwrap());
}
let instance = {
let inst_exts = vulkano_win::required_extensions();
Instance::new(None, &inst_exts, None).expect("failed to create instance")
};
//TODO: list devices, choose based on user input
for p in PhysicalDevice::enumerate(&instance) {
print!("{}", p.name());
println!(", driver version: {}", p.driver_version());
}
let physical = PhysicalDevice::enumerate(&instance)
.next()
.expect("no device available");
let queue_family = physical
.queue_families()
.find(|&q| q.supports_graphics())
.expect("couldn't find a graphical queue family");
let (device, mut queues) = {
let unraw_dev_exts = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let mut all_dev_exts = RawDeviceExtensions::from(&unraw_dev_exts);
all_dev_exts.insert(std::ffi::CString::new("VK_KHR_storage_buffer_storage_class").unwrap());
Device::new(
physical,
&Features::none(),
all_dev_exts,
[(queue_family, 0.5)].iter().cloned(),
)
.expect("failed to create device")
};
let queue = queues.next().unwrap();
// let particles = init_particles_buffer();
// let particles_buffer =
// CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), particles)
// .expect("failed to create buffer");
// let shader = cs::Shader::load(device.clone()).expect("failed to create shader module");
// let compute_pipeline = Arc::new(
// ComputePipeline::new(device.clone(), &shader.main_entry_point(), &())
// .expect("failed to create compute pipeline"),
// );
// let set = Arc::new(
// PersistentDescriptorSet::start(compute_pipeline.clone(), 0)
// .add_buffer(particles_buffer.clone())
// .unwrap()
// .build()
// .unwrap(),
// );
// let command_buffer = AutoCommandBufferBuilder::new(device.clone(), queue.family())
// .unwrap()
// .dispatch([PARTICLE_COUNT as u32 / 32, 1, 1], compute_pipeline.clone(), set.clone(), ())
// .unwrap()
// .build()
// .unwrap();
let mut events_loop = EventsLoop::new();
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
let window = surface.window();
let (mut swapchain, images) = {
let caps = surface.capabilities(physical)
.expect("failed to get surface capabilities");
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let format = caps.supported_formats[0].0;
caps.supported_formats.iter().for_each(|sth| println!("{:?}", sth));
let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
return;
};
Swapchain::new(device.clone(), surface.clone(),
caps.min_image_count, format, initial_dimensions, 1, usage, &queue,
SurfaceTransform::Identity, alpha, PresentMode::Fifo, true, None)
.expect("failed to create swapchain")
};
#[derive(Default, Debug, Clone)]
struct Vertex {
position: [f32; 2]
}
let vertex_buffer = {
vulkano::impl_vertex!(Vertex, position);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
Vertex { position: [-0.5, -0.5] },
Vertex { position: [ 0.5, -0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, 0.5] },
Vertex { position: [-0.5, 0.5] },
Vertex { position: [ 0.5, -0.5] },
].iter().cloned()).unwrap()
};
// texture
let img_dim = img.dimensions();
let (autumn_texture, autumn_texture_future) = match ImmutableImage::from_iter(
img.as_rgba8().unwrap().pixels().map(|rgba| {
let bytes : [u8; 4] = [rgba[0], rgba[1], rgba[2], rgba[3]];
bytes
}),
Dimensions::Dim2d { width: img_dim.0, height: img_dim.1 },
Format::R8G8B8A8Unorm,
queue.clone()
) {
Ok(i) => i,
Err(err) => panic!("{:?}", err)
};
let sampler = Sampler::new(device.clone(), Filter::Linear, Filter::Linear,
MipmapMode::Nearest, SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 0.0).unwrap();
mod square_vs {
vulkano_shaders::shader!{
ty: "vertex",
path: "./src/shader/square.vs.glsl"
}
}
mod square_fs {
vulkano_shaders::shader!{
ty: "fragment",
path: "./src/shader/square.fs.glsl"
}
}
let square_vs = square_vs::Shader::load(device.clone()).unwrap();
let square_fs = square_fs::Shader::load(device.clone()).unwrap();
let render_pass = Arc::new(vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {}
}
).unwrap());
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input_single_buffer::<Vertex>()
.vertex_shader(square_vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.fragment_shader(square_fs.main_entry_point(), ())
.blend_alpha_blending()
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
.build(device.clone())
.unwrap());
let set = Arc::new(PersistentDescriptorSet::start(pipeline.clone(), 0)
.add_sampled_image(autumn_texture.clone(), sampler.clone()).unwrap()
.build().unwrap()
);
let mut dynamic_state = DynamicState {
line_width: None,
viewports: None,
scissors: None,
compare_mask: None,
write_mask: None,
reference: None
};
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
let mut recreate_swapchain = false;
let mut previous_frame_end = Box::new(sync::now(device.clone()).join(autumn_texture_future)) as Box<dyn GpuFuture>;
let t0 = time::SystemTime::now();
let mut now = t0;
let mut then;
loop {
previous_frame_end.cleanup_finished();
if recreate_swapchain {
let dimensions = if let Some(dimensions) = window.get_inner_size()
|
{
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
}
|
conditional_block
|
|
body.rs
|
} else if eccentricity == 1.0 {
return OrbitType::Parabolic;
} else {
return OrbitType::Hyperbolic;
}
}
}
#[derive(Debug)]
pub struct Body {
pub position: Vector3<f64>,
pub velocity: Vector3<f64>,
pub orbit_type: OrbitType,
}
/* Adds methods to Body struct */
impl Body {
pub fn new(position: Vector3<f64>, velocity: Vector3<f64>) -> Body {
// h and e are used for determining what kind of orbit the body is currently in
let h = position.cross(&velocity);
let e = ((velocity.cross(&h) / SOLARGM) - position.normalize()).norm();
Body {
position: position,
velocity: velocity,
orbit_type: OrbitType::new(e),
}
}
pub fn radial_velocity(&self) -> Vector3<f64> {
(self.velocity.dot(&self.position) / self.position.norm_squared()) * self.position
}
pub fn tangential_velocity(&self) -> Vector3<f64> {
self.omega().cross(&self.position)
}
pub fn true_anomaly(&self) -> f64 {
let e_vec = self.eccentricity_vector();
let posit = self.position.normalize();
let val = e_vec.dot(&posit) / (e_vec.norm() * posit.norm());
if posit.dot(&self.velocity.normalize()) < 0.0 {
return 2.0 * PI - val.acos();
} else {
return val.acos();
}
}
/* points from focus to perigee if I'm not mistaken */
pub fn eccentricity_vector(&self) -> Vector3<f64> {
let veloc = self.velocity;
let posit = self.position;
let h = self.angular_momentum();
(veloc.cross(&h) / SOLARGM) - posit.normalize()
}
pub fn angular_momentum(&self) -> Vector3<f64> {
self.position.cross(&self.velocity)
}
pub fn total_energy(&self) -> f64 {
let posit = self.position.norm();
let veloc = self.velocity.norm();
0.5 * veloc.powi(2) - (SOLARGM / posit)
}
pub fn omega(&self) -> Vector3<f64> {
self.angular_momentum() / self.position.norm_squared()
}
pub fn frame_rotation_rate(&self) -> f64 {
self.omega().norm()
}
pub fn position_at_angle(&self, angle: f64) -> Vector3<f64> {
let e = self.eccentricity();
let numer = self.angular_momentum().norm_squared() / SOLARGM;
let denom = 1_f64 + (e * (angle).cos());
let radius = numer / denom;
Vector3::new(radius, 0.0, 0.0)
}
pub fn velocity_at_angle(&self, angle: f64) -> Vector3<f64> {
let p = self.orbital_parameter();
let e = self.eccentricity();
let h = self.angular_momentum().norm_squared();
Vector3::new(
(h / p) * e * angle.sin(),
(h / p) * (1_f64 + e * angle.cos()),
0.0,
)
}
pub fn
|
(&self, angle: f64) -> (Vector3<f64>, Vector3<f64>) {
let r = self.position_at_angle(angle);
let v = self.velocity_at_angle(angle);
let tht = angle - self.true_anomaly();
let trans = Matrix3::from_rows(&[
Vector3::new(tht.cos(), -tht.sin(), 0.0).transpose(),
Vector3::new(tht.sin(), tht.cos(), 0.0).transpose(),
Vector3::new(0.0, 0.0, 1.0).transpose(),
]);
(trans * r, trans * v)
}
// Angle to other body, keep getting the wrong thing anyway, tried everything
pub fn angle_to(&self, other: &Body) -> f64 {
(self.position.dot(&other.position) / (self.position.norm() * other.position.norm())).acos()
}
/* Return a transformation matrix constructed from body's orbit in inertial frame */
pub fn make_frame(&self) -> Matrix3<f64> {
let e_r = self.position.normalize();
let e_h = self.angular_momentum().normalize();
let e_tht = e_h.cross(&e_r);
Matrix3::from_rows(&[e_r.transpose(), e_tht.transpose(), e_h.transpose()])
}
pub fn semi_major_axis(&self) -> f64 {
let ang_moment = self.angular_momentum().norm();
let e = self.eccentricity();
ang_moment.powi(2) / (SOLARGM * (1_f64 - e.powi(2)))
}
pub fn orbital_period(&self) -> f64 {
2_f64 * PI * (self.semi_major_axis().powi(3) / SOLARGM).sqrt()
}
pub fn orbital_parameter(&self) -> f64 {
let e = self.eccentricity();
self.semi_major_axis() * (1.0 - e.powi(2))
}
pub fn eccentric_anomaly(&self) -> f64 {
let e = self.eccentricity();
let theta = self.true_anomaly();
2.0 * ((theta / 2.0).tan() / ((1.0 + e) / (1.0 - e)).sqrt()).atan()
}
pub fn time_since_periapsis(&self) -> f64 {
let t_anom = self.true_anomaly();
let e_anom = self.true_to_eccentric(t_anom);
let a = self.semi_major_axis();
let e = self.eccentricity();
(a.powi(3) / SOLARGM).sqrt() * (e_anom - e * e_anom.sin())
}
pub fn eccentricity(&self) -> f64 {
self.eccentricity_vector().norm()
}
pub fn inclination(&self) -> f64 {
let h = self.angular_momentum();
(h[2] / h.norm()).acos() // h[2] is the z component of the vector
}
pub fn ascending_node(&self) -> Vector3<f64> {
let k = Vector3::new(0.0, 0.0, 1.0);
k.cross(&self.angular_momentum())
}
pub fn argument_of_periapsis(&self) -> f64 {
let n = self.ascending_node();
let e = self.eccentricity_vector();
let omega = (n.dot(&e) / (n.norm() * e.norm())).acos();
if e[2] < 0.0 {
PI2 - omega
} else {
omega
}
}
pub fn argument_of_ascending_node(&self) -> f64 {
let n = self.ascending_node();
let n_x = n[0];
let n_y = n[1];
if n_y >= 0.0 {
(n_x / n.norm()).acos()
} else {
PI2 - (n_x / n.norm()).acos()
}
}
pub fn true_to_eccentric(&self, t_anom: f64) -> f64 {
let a = self.semi_major_axis();
let e = self.eccentricity();
let b = a * (1.0 - e.powi(2)).sqrt();
let p = self.orbital_parameter();
let r = p / (1.0 + e * t_anom.cos());
let c = (a * e + r * t_anom.cos()) / a;
let s = (r / b) * t_anom.sin();
return s.atan2(c);
}
pub fn true_anomaly_at_time(&self, time: f64) -> f64 {
let t_peri = self.time_since_periapsis();
let m_anom = self.mean_anomaly((time * DAYTOSEC) + t_peri);
let angle = self.eccentric_from_mean(m_anom);
return PI2 - self.eccentric_to_true_anomaly(angle);
}
/// The eccentric anomaly at a certain time
pub fn eccentric_from_mean(&self, m_anom: f64) -> f64 {
match self.kepler(m_anom) {
Ok(num) => num,
Err(e) => {
eprintln!("{}: {}\n", "Invalid Orbit".red(), e);
return std::f64::NAN;
}
}
}
/// Return the eccentric anomaly using the appropriate Kepler equation
pub fn kepler(&self, m_anom: f64) -> Result<f64, &str> {
let e = self.eccentricity();
match &self.orbit_type {
OrbitType::Elliptic => Ok(elliptic_kepler(m_anom, e)),
|
position_and_velocity
|
identifier_name
|
body.rs
|
} else if eccentricity == 1.0 {
return OrbitType::Parabolic;
} else {
return OrbitType::Hyperbolic;
}
}
}
#[derive(Debug)]
pub struct Body {
pub position: Vector3<f64>,
pub velocity: Vector3<f64>,
pub orbit_type: OrbitType,
}
/* Adds methods to Body struct */
impl Body {
pub fn new(position: Vector3<f64>, velocity: Vector3<f64>) -> Body {
// h and e are used for determining what kind of orbit the body is currently in
let h = position.cross(&velocity);
let e = ((velocity.cross(&h) / SOLARGM) - position.normalize()).norm();
Body {
position: position,
velocity: velocity,
orbit_type: OrbitType::new(e),
}
}
pub fn radial_velocity(&self) -> Vector3<f64> {
(self.velocity.dot(&self.position) / self.position.norm_squared()) * self.position
}
pub fn tangential_velocity(&self) -> Vector3<f64> {
self.omega().cross(&self.position)
}
pub fn true_anomaly(&self) -> f64 {
let e_vec = self.eccentricity_vector();
let posit = self.position.normalize();
let val = e_vec.dot(&posit) / (e_vec.norm() * posit.norm());
if posit.dot(&self.velocity.normalize()) < 0.0 {
return 2.0 * PI - val.acos();
} else
|
}
/* points from focus to perigee if I'm not mistaken */
pub fn eccentricity_vector(&self) -> Vector3<f64> {
let veloc = self.velocity;
let posit = self.position;
let h = self.angular_momentum();
(veloc.cross(&h) / SOLARGM) - posit.normalize()
}
pub fn angular_momentum(&self) -> Vector3<f64> {
self.position.cross(&self.velocity)
}
pub fn total_energy(&self) -> f64 {
let posit = self.position.norm();
let veloc = self.velocity.norm();
0.5 * veloc.powi(2) - (SOLARGM / posit)
}
pub fn omega(&self) -> Vector3<f64> {
self.angular_momentum() / self.position.norm_squared()
}
pub fn frame_rotation_rate(&self) -> f64 {
self.omega().norm()
}
pub fn position_at_angle(&self, angle: f64) -> Vector3<f64> {
let e = self.eccentricity();
let numer = self.angular_momentum().norm_squared() / SOLARGM;
let denom = 1_f64 + (e * (angle).cos());
let radius = numer / denom;
Vector3::new(radius, 0.0, 0.0)
}
pub fn velocity_at_angle(&self, angle: f64) -> Vector3<f64> {
let p = self.orbital_parameter();
let e = self.eccentricity();
let h = self.angular_momentum().norm_squared();
Vector3::new(
(h / p) * e * angle.sin(),
(h / p) * (1_f64 + e * angle.cos()),
0.0,
)
}
pub fn position_and_velocity(&self, angle: f64) -> (Vector3<f64>, Vector3<f64>) {
let r = self.position_at_angle(angle);
let v = self.velocity_at_angle(angle);
let tht = angle - self.true_anomaly();
let trans = Matrix3::from_rows(&[
Vector3::new(tht.cos(), -tht.sin(), 0.0).transpose(),
Vector3::new(tht.sin(), tht.cos(), 0.0).transpose(),
Vector3::new(0.0, 0.0, 1.0).transpose(),
]);
(trans * r, trans * v)
}
// Angle to other body, keep getting the wrong thing anyway, tried everything
pub fn angle_to(&self, other: &Body) -> f64 {
(self.position.dot(&other.position) / (self.position.norm() * other.position.norm())).acos()
}
/* Return a transformation matrix constructed from body's orbit in inertial frame */
pub fn make_frame(&self) -> Matrix3<f64> {
let e_r = self.position.normalize();
let e_h = self.angular_momentum().normalize();
let e_tht = e_h.cross(&e_r);
Matrix3::from_rows(&[e_r.transpose(), e_tht.transpose(), e_h.transpose()])
}
pub fn semi_major_axis(&self) -> f64 {
let ang_moment = self.angular_momentum().norm();
let e = self.eccentricity();
ang_moment.powi(2) / (SOLARGM * (1_f64 - e.powi(2)))
}
pub fn orbital_period(&self) -> f64 {
2_f64 * PI * (self.semi_major_axis().powi(3) / SOLARGM).sqrt()
}
pub fn orbital_parameter(&self) -> f64 {
let e = self.eccentricity();
self.semi_major_axis() * (1.0 - e.powi(2))
}
pub fn eccentric_anomaly(&self) -> f64 {
let e = self.eccentricity();
let theta = self.true_anomaly();
2.0 * ((theta / 2.0).tan() / ((1.0 + e) / (1.0 - e)).sqrt()).atan()
}
pub fn time_since_periapsis(&self) -> f64 {
let t_anom = self.true_anomaly();
let e_anom = self.true_to_eccentric(t_anom);
let a = self.semi_major_axis();
let e = self.eccentricity();
(a.powi(3) / SOLARGM).sqrt() * (e_anom - e * e_anom.sin())
}
pub fn eccentricity(&self) -> f64 {
self.eccentricity_vector().norm()
}
pub fn inclination(&self) -> f64 {
let h = self.angular_momentum();
(h[2] / h.norm()).acos() // h[2] is the z component of the vector
}
pub fn ascending_node(&self) -> Vector3<f64> {
let k = Vector3::new(0.0, 0.0, 1.0);
k.cross(&self.angular_momentum())
}
pub fn argument_of_periapsis(&self) -> f64 {
let n = self.ascending_node();
let e = self.eccentricity_vector();
let omega = (n.dot(&e) / (n.norm() * e.norm())).acos();
if e[2] < 0.0 {
PI2 - omega
} else {
omega
}
}
pub fn argument_of_ascending_node(&self) -> f64 {
let n = self.ascending_node();
let n_x = n[0];
let n_y = n[1];
if n_y >= 0.0 {
(n_x / n.norm()).acos()
} else {
PI2 - (n_x / n.norm()).acos()
}
}
pub fn true_to_eccentric(&self, t_anom: f64) -> f64 {
let a = self.semi_major_axis();
let e = self.eccentricity();
let b = a * (1.0 - e.powi(2)).sqrt();
let p = self.orbital_parameter();
let r = p / (1.0 + e * t_anom.cos());
let c = (a * e + r * t_anom.cos()) / a;
let s = (r / b) * t_anom.sin();
return s.atan2(c);
}
pub fn true_anomaly_at_time(&self, time: f64) -> f64 {
let t_peri = self.time_since_periapsis();
let m_anom = self.mean_anomaly((time * DAYTOSEC) + t_peri);
let angle = self.eccentric_from_mean(m_anom);
return PI2 - self.eccentric_to_true_anomaly(angle);
}
/// The eccentric anomaly at a certain time
pub fn eccentric_from_mean(&self, m_anom: f64) -> f64 {
match self.kepler(m_anom) {
Ok(num) => num,
Err(e) => {
eprintln!("{}: {}\n", "Invalid Orbit".red(), e);
return std::f64::NAN;
}
}
}
/// Return the eccentric anomaly using the appropriate Kepler equation
pub fn kepler(&self, m_anom: f64) -> Result<f64, &str> {
let e = self.eccentricity();
match &self.orbit_type {
OrbitType::Elliptic => Ok(elliptic_kepler(m_anom, e
|
{
return val.acos();
}
|
conditional_block
|
body.rs
|
} else if eccentricity == 1.0 {
return OrbitType::Parabolic;
} else {
return OrbitType::Hyperbolic;
}
}
}
#[derive(Debug)]
pub struct Body {
pub position: Vector3<f64>,
|
impl Body {
pub fn new(position: Vector3<f64>, velocity: Vector3<f64>) -> Body {
// h and e are used for determining what kind of orbit the body is currently in
let h = position.cross(&velocity);
let e = ((velocity.cross(&h) / SOLARGM) - position.normalize()).norm();
Body {
position: position,
velocity: velocity,
orbit_type: OrbitType::new(e),
}
}
pub fn radial_velocity(&self) -> Vector3<f64> {
(self.velocity.dot(&self.position) / self.position.norm_squared()) * self.position
}
pub fn tangential_velocity(&self) -> Vector3<f64> {
self.omega().cross(&self.position)
}
pub fn true_anomaly(&self) -> f64 {
let e_vec = self.eccentricity_vector();
let posit = self.position.normalize();
let val = e_vec.dot(&posit) / (e_vec.norm() * posit.norm());
if posit.dot(&self.velocity.normalize()) < 0.0 {
return 2.0 * PI - val.acos();
} else {
return val.acos();
}
}
/* points from focus to perigee if I'm not mistaken */
pub fn eccentricity_vector(&self) -> Vector3<f64> {
let veloc = self.velocity;
let posit = self.position;
let h = self.angular_momentum();
(veloc.cross(&h) / SOLARGM) - posit.normalize()
}
pub fn angular_momentum(&self) -> Vector3<f64> {
self.position.cross(&self.velocity)
}
pub fn total_energy(&self) -> f64 {
let posit = self.position.norm();
let veloc = self.velocity.norm();
0.5 * veloc.powi(2) - (SOLARGM / posit)
}
pub fn omega(&self) -> Vector3<f64> {
self.angular_momentum() / self.position.norm_squared()
}
pub fn frame_rotation_rate(&self) -> f64 {
self.omega().norm()
}
pub fn position_at_angle(&self, angle: f64) -> Vector3<f64> {
let e = self.eccentricity();
let numer = self.angular_momentum().norm_squared() / SOLARGM;
let denom = 1_f64 + (e * (angle).cos());
let radius = numer / denom;
Vector3::new(radius, 0.0, 0.0)
}
pub fn velocity_at_angle(&self, angle: f64) -> Vector3<f64> {
let p = self.orbital_parameter();
let e = self.eccentricity();
let h = self.angular_momentum().norm_squared();
Vector3::new(
(h / p) * e * angle.sin(),
(h / p) * (1_f64 + e * angle.cos()),
0.0,
)
}
pub fn position_and_velocity(&self, angle: f64) -> (Vector3<f64>, Vector3<f64>) {
let r = self.position_at_angle(angle);
let v = self.velocity_at_angle(angle);
let tht = angle - self.true_anomaly();
let trans = Matrix3::from_rows(&[
Vector3::new(tht.cos(), -tht.sin(), 0.0).transpose(),
Vector3::new(tht.sin(), tht.cos(), 0.0).transpose(),
Vector3::new(0.0, 0.0, 1.0).transpose(),
]);
(trans * r, trans * v)
}
// Angle to other body, keep getting the wrong thing anyway, tried everything
pub fn angle_to(&self, other: &Body) -> f64 {
(self.position.dot(&other.position) / (self.position.norm() * other.position.norm())).acos()
}
/* Return a transformation matrix constructed from body's orbit in inertial frame */
pub fn make_frame(&self) -> Matrix3<f64> {
let e_r = self.position.normalize();
let e_h = self.angular_momentum().normalize();
let e_tht = e_h.cross(&e_r);
Matrix3::from_rows(&[e_r.transpose(), e_tht.transpose(), e_h.transpose()])
}
pub fn semi_major_axis(&self) -> f64 {
let ang_moment = self.angular_momentum().norm();
let e = self.eccentricity();
ang_moment.powi(2) / (SOLARGM * (1_f64 - e.powi(2)))
}
pub fn orbital_period(&self) -> f64 {
2_f64 * PI * (self.semi_major_axis().powi(3) / SOLARGM).sqrt()
}
pub fn orbital_parameter(&self) -> f64 {
let e = self.eccentricity();
self.semi_major_axis() * (1.0 - e.powi(2))
}
pub fn eccentric_anomaly(&self) -> f64 {
let e = self.eccentricity();
let theta = self.true_anomaly();
2.0 * ((theta / 2.0).tan() / ((1.0 + e) / (1.0 - e)).sqrt()).atan()
}
pub fn time_since_periapsis(&self) -> f64 {
let t_anom = self.true_anomaly();
let e_anom = self.true_to_eccentric(t_anom);
let a = self.semi_major_axis();
let e = self.eccentricity();
(a.powi(3) / SOLARGM).sqrt() * (e_anom - e * e_anom.sin())
}
pub fn eccentricity(&self) -> f64 {
self.eccentricity_vector().norm()
}
pub fn inclination(&self) -> f64 {
let h = self.angular_momentum();
(h[2] / h.norm()).acos() // h[2] is the z component of the vector
}
pub fn ascending_node(&self) -> Vector3<f64> {
let k = Vector3::new(0.0, 0.0, 1.0);
k.cross(&self.angular_momentum())
}
pub fn argument_of_periapsis(&self) -> f64 {
let n = self.ascending_node();
let e = self.eccentricity_vector();
let omega = (n.dot(&e) / (n.norm() * e.norm())).acos();
if e[2] < 0.0 {
PI2 - omega
} else {
omega
}
}
pub fn argument_of_ascending_node(&self) -> f64 {
let n = self.ascending_node();
let n_x = n[0];
let n_y = n[1];
if n_y >= 0.0 {
(n_x / n.norm()).acos()
} else {
PI2 - (n_x / n.norm()).acos()
}
}
pub fn true_to_eccentric(&self, t_anom: f64) -> f64 {
let a = self.semi_major_axis();
let e = self.eccentricity();
let b = a * (1.0 - e.powi(2)).sqrt();
let p = self.orbital_parameter();
let r = p / (1.0 + e * t_anom.cos());
let c = (a * e + r * t_anom.cos()) / a;
let s = (r / b) * t_anom.sin();
return s.atan2(c);
}
pub fn true_anomaly_at_time(&self, time: f64) -> f64 {
let t_peri = self.time_since_periapsis();
let m_anom = self.mean_anomaly((time * DAYTOSEC) + t_peri);
let angle = self.eccentric_from_mean(m_anom);
return PI2 - self.eccentric_to_true_anomaly(angle);
}
/// The eccentric anomaly at a certain time
pub fn eccentric_from_mean(&self, m_anom: f64) -> f64 {
match self.kepler(m_anom) {
Ok(num) => num,
Err(e) => {
eprintln!("{}: {}\n", "Invalid Orbit".red(), e);
return std::f64::NAN;
}
}
}
/// Return the eccentric anomaly using the appropriate Kepler equation
pub fn kepler(&self, m_anom: f64) -> Result<f64, &str> {
let e = self.eccentricity();
match &self.orbit_type {
OrbitType::Elliptic => Ok(elliptic_kepler(m_anom, e)),
|
pub velocity: Vector3<f64>,
pub orbit_type: OrbitType,
}
/* Adds methods to Body struct */
|
random_line_split
|
body.rs
|
} else if eccentricity == 1.0 {
return OrbitType::Parabolic;
} else {
return OrbitType::Hyperbolic;
}
}
}
#[derive(Debug)]
pub struct Body {
pub position: Vector3<f64>,
pub velocity: Vector3<f64>,
pub orbit_type: OrbitType,
}
/* Adds methods to Body struct */
impl Body {
pub fn new(position: Vector3<f64>, velocity: Vector3<f64>) -> Body {
// h and e are used for determining what kind of orbit the body is currently in
let h = position.cross(&velocity);
let e = ((velocity.cross(&h) / SOLARGM) - position.normalize()).norm();
Body {
position: position,
velocity: velocity,
orbit_type: OrbitType::new(e),
}
}
pub fn radial_velocity(&self) -> Vector3<f64> {
(self.velocity.dot(&self.position) / self.position.norm_squared()) * self.position
}
pub fn tangential_velocity(&self) -> Vector3<f64>
|
pub fn true_anomaly(&self) -> f64 {
let e_vec = self.eccentricity_vector();
let posit = self.position.normalize();
let val = e_vec.dot(&posit) / (e_vec.norm() * posit.norm());
if posit.dot(&self.velocity.normalize()) < 0.0 {
return 2.0 * PI - val.acos();
} else {
return val.acos();
}
}
/* points from focus to perigee if I'm not mistaken */
pub fn eccentricity_vector(&self) -> Vector3<f64> {
let veloc = self.velocity;
let posit = self.position;
let h = self.angular_momentum();
(veloc.cross(&h) / SOLARGM) - posit.normalize()
}
pub fn angular_momentum(&self) -> Vector3<f64> {
self.position.cross(&self.velocity)
}
pub fn total_energy(&self) -> f64 {
let posit = self.position.norm();
let veloc = self.velocity.norm();
0.5 * veloc.powi(2) - (SOLARGM / posit)
}
pub fn omega(&self) -> Vector3<f64> {
self.angular_momentum() / self.position.norm_squared()
}
pub fn frame_rotation_rate(&self) -> f64 {
self.omega().norm()
}
pub fn position_at_angle(&self, angle: f64) -> Vector3<f64> {
let e = self.eccentricity();
let numer = self.angular_momentum().norm_squared() / SOLARGM;
let denom = 1_f64 + (e * (angle).cos());
let radius = numer / denom;
Vector3::new(radius, 0.0, 0.0)
}
pub fn velocity_at_angle(&self, angle: f64) -> Vector3<f64> {
let p = self.orbital_parameter();
let e = self.eccentricity();
let h = self.angular_momentum().norm_squared();
Vector3::new(
(h / p) * e * angle.sin(),
(h / p) * (1_f64 + e * angle.cos()),
0.0,
)
}
pub fn position_and_velocity(&self, angle: f64) -> (Vector3<f64>, Vector3<f64>) {
let r = self.position_at_angle(angle);
let v = self.velocity_at_angle(angle);
let tht = angle - self.true_anomaly();
let trans = Matrix3::from_rows(&[
Vector3::new(tht.cos(), -tht.sin(), 0.0).transpose(),
Vector3::new(tht.sin(), tht.cos(), 0.0).transpose(),
Vector3::new(0.0, 0.0, 1.0).transpose(),
]);
(trans * r, trans * v)
}
// Angle to other body, keep getting the wrong thing anyway, tried everything
pub fn angle_to(&self, other: &Body) -> f64 {
(self.position.dot(&other.position) / (self.position.norm() * other.position.norm())).acos()
}
/* Return a transformation matrix constructed from body's orbit in inertial frame */
pub fn make_frame(&self) -> Matrix3<f64> {
let e_r = self.position.normalize();
let e_h = self.angular_momentum().normalize();
let e_tht = e_h.cross(&e_r);
Matrix3::from_rows(&[e_r.transpose(), e_tht.transpose(), e_h.transpose()])
}
pub fn semi_major_axis(&self) -> f64 {
let ang_moment = self.angular_momentum().norm();
let e = self.eccentricity();
ang_moment.powi(2) / (SOLARGM * (1_f64 - e.powi(2)))
}
pub fn orbital_period(&self) -> f64 {
2_f64 * PI * (self.semi_major_axis().powi(3) / SOLARGM).sqrt()
}
pub fn orbital_parameter(&self) -> f64 {
let e = self.eccentricity();
self.semi_major_axis() * (1.0 - e.powi(2))
}
pub fn eccentric_anomaly(&self) -> f64 {
let e = self.eccentricity();
let theta = self.true_anomaly();
2.0 * ((theta / 2.0).tan() / ((1.0 + e) / (1.0 - e)).sqrt()).atan()
}
pub fn time_since_periapsis(&self) -> f64 {
let t_anom = self.true_anomaly();
let e_anom = self.true_to_eccentric(t_anom);
let a = self.semi_major_axis();
let e = self.eccentricity();
(a.powi(3) / SOLARGM).sqrt() * (e_anom - e * e_anom.sin())
}
pub fn eccentricity(&self) -> f64 {
self.eccentricity_vector().norm()
}
pub fn inclination(&self) -> f64 {
let h = self.angular_momentum();
(h[2] / h.norm()).acos() // h[2] is the z component of the vector
}
pub fn ascending_node(&self) -> Vector3<f64> {
let k = Vector3::new(0.0, 0.0, 1.0);
k.cross(&self.angular_momentum())
}
pub fn argument_of_periapsis(&self) -> f64 {
let n = self.ascending_node();
let e = self.eccentricity_vector();
let omega = (n.dot(&e) / (n.norm() * e.norm())).acos();
if e[2] < 0.0 {
PI2 - omega
} else {
omega
}
}
pub fn argument_of_ascending_node(&self) -> f64 {
let n = self.ascending_node();
let n_x = n[0];
let n_y = n[1];
if n_y >= 0.0 {
(n_x / n.norm()).acos()
} else {
PI2 - (n_x / n.norm()).acos()
}
}
pub fn true_to_eccentric(&self, t_anom: f64) -> f64 {
let a = self.semi_major_axis();
let e = self.eccentricity();
let b = a * (1.0 - e.powi(2)).sqrt();
let p = self.orbital_parameter();
let r = p / (1.0 + e * t_anom.cos());
let c = (a * e + r * t_anom.cos()) / a;
let s = (r / b) * t_anom.sin();
return s.atan2(c);
}
pub fn true_anomaly_at_time(&self, time: f64) -> f64 {
let t_peri = self.time_since_periapsis();
let m_anom = self.mean_anomaly((time * DAYTOSEC) + t_peri);
let angle = self.eccentric_from_mean(m_anom);
return PI2 - self.eccentric_to_true_anomaly(angle);
}
/// The eccentric anomaly at a certain time
pub fn eccentric_from_mean(&self, m_anom: f64) -> f64 {
match self.kepler(m_anom) {
Ok(num) => num,
Err(e) => {
eprintln!("{}: {}\n", "Invalid Orbit".red(), e);
return std::f64::NAN;
}
}
}
/// Return the eccentric anomaly using the appropriate Kepler equation
pub fn kepler(&self, m_anom: f64) -> Result<f64, &str> {
let e = self.eccentricity();
match &self.orbit_type {
OrbitType::Elliptic => Ok(elliptic_kepler(m_anom, e
|
{
self.omega().cross(&self.position)
}
|
identifier_body
|
thermald.py
|
12)
dat.thermal.mem = read_tz(2)
dat.thermal.gpu = read_tz(16)
dat.thermal.bat = read_tz(29)
return dat
LEON = False
def setup_eon_fan():
global LEON
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
#os.system("echo 1 > /sys/devices/soc/6a00000.ssusb/power_supply/usb/usb_otg")
LEON = True
bus.close()
last_eon_fan_val = None
def set_eon_fan(val):
|
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THERSHOLD = 45.
def handle_fan(max_cpu_temp, bat_temp, fan_speed):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THERSHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed//16384)
return fan_speed
def thermald_thread():
setup_eon_fan()
# prevent LEECO from undervoltage
BATT_PERC_OFF = 10 if LEON else 3
# now loop
thermal_sock = messaging.pub_sock(service_list['thermal'].port)
health_sock = messaging.sub_sock(service_list['health'].port)
location_sock = messaging.sub_sock(service_list['gpsLocation'].port)
fan_speed = 0
count = 0
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
thermal_status_prev = ThermalStatus.green
usb_power = True
usb_power_prev = True
health_sock.RCVTIMEO = int(1000 * 2 * DT_TRML) # 2x the expected health frequency
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
health_prev = None
current_connectivity_alert = None
params = Params()
while 1:
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal()
# clear car params when panda gets disconnected
if health is None and health_prev is not None:
params.panda_disconnect()
health_prev = health
if health is not None:
usb_power = health.health.usbPowerMode != log.HealthData.UsbPowerMode.client
# loggerd is gated based on free space
avail = get_available_percent() / 100.0
# thermal message now also includes free space
msg.thermal.freeSpace = avail
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
with open("/sys/class/power_supply/battery/current_now") as f:
msg.thermal.batteryCurrent = int(f.read())
with open("/sys/class/power_supply/battery/voltage_now") as f:
msg.thermal.batteryVoltage = int(f.read())
with open("/sys/class/power_supply/usb/present") as f:
msg.thermal.usbOnline = bool(int(f.read()))
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10., msg.thermal.gpu / 10.)
bat_temp = msg.thermal.bat/1000.
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed)
msg.thermal.fanSpeed = fan_speed
# thermal logic with hysterisis
if max_cpu_temp > 107. or bat_temp >= 63.:
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 92.5 or bat_temp > 60.: # CPU throttling starts around ~90C
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 87.5:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.now()
try:
last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
except (TypeError, ValueError):
last_update = now
dt = now - last_update
if dt.days > DAYS_NO_CONNECTIVITY_MAX:
if current_connectivity_alert != "expired":
current_connectivity_alert = "expired"
params.delete("Offroad_ConnectivityNeededPrompt")
params.put("Offroad_ConnectivityNeeded", json.dumps(OFFROAD_ALERTS["Offroad_ConnectivityNeeded"]))
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
remaining_time = str(DAYS_NO_CONNECTIVITY_MAX - dt.days)
if current_connectivity_alert != "prompt" + remaining_time:
current_connectivity_alert = "prompt" + remaining_time
alert_connectivity_prompt = copy.copy(OFFROAD_ALERTS["Offroad_ConnectivityNeededPrompt"])
alert_connectivity_prompt["text"] += remaining_time + " days."
params.delete("Offroad_ConnectivityNeeded")
params.put
|
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val-1)<<6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
|
identifier_body
|
thermald.py
|
2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
#os.system("echo 1 > /sys/devices/soc/6a00000.ssusb/power_supply/usb/usb_otg")
LEON = True
bus.close()
last_eon_fan_val = None
def set_eon_fan(val):
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val-1)<<6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THERSHOLD = 45.
def handle_fan(max_cpu_temp, bat_temp, fan_speed):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THERSHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed//16384)
return fan_speed
def thermald_thread():
setup_eon_fan()
# prevent LEECO from undervoltage
BATT_PERC_OFF = 10 if LEON else 3
# now loop
thermal_sock = messaging.pub_sock(service_list['thermal'].port)
health_sock = messaging.sub_sock(service_list['health'].port)
location_sock = messaging.sub_sock(service_list['gpsLocation'].port)
fan_speed = 0
count = 0
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
thermal_status_prev = ThermalStatus.green
usb_power = True
usb_power_prev = True
health_sock.RCVTIMEO = int(1000 * 2 * DT_TRML) # 2x the expected health frequency
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
health_prev = None
current_connectivity_alert = None
params = Params()
while 1:
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal()
# clear car params when panda gets disconnected
if health is None and health_prev is not None:
params.panda_disconnect()
health_prev = health
if health is not None:
usb_power = health.health.usbPowerMode != log.HealthData.UsbPowerMode.client
# loggerd is gated based on free space
avail = get_available_percent() / 100.0
# thermal message now also includes free space
msg.thermal.freeSpace = avail
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
with open("/sys/class/power_supply/battery/current_now") as f:
msg.thermal.batteryCurrent = int(f.read())
with open("/sys/class/power_supply/battery/voltage_now") as f:
msg.thermal.batteryVoltage = int(f.read())
with open("/sys/class/power_supply/usb/present") as f:
msg.thermal.usbOnline = bool(int(f.read()))
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10., msg.thermal.gpu / 10.)
bat_temp = msg.thermal.bat/1000.
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed)
msg.thermal.fanSpeed = fan_speed
# thermal logic with hysterisis
if max_cpu_temp > 107. or bat_temp >= 63.:
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 92.5 or bat_temp > 60.: # CPU throttling starts around ~90C
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 87.5:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.now()
try:
last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
except (TypeError, ValueError):
last_update = now
dt = now - last_update
if dt.days > DAYS_NO_CONNECTIVITY_MAX:
if current_connectivity_alert != "expired":
current_connectivity_alert = "expired"
params.delete("Offroad_ConnectivityNeededPrompt")
params.put("Offroad_ConnectivityNeeded", json.dumps(OFFROAD_ALERTS["Offroad_ConnectivityNeeded"]))
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
remaining_time = str(DAYS_NO_CONNECTIVITY_MAX - dt.days)
if current_connectivity_alert != "prompt" + remaining_time:
current_connectivity_alert = "prompt" + remaining_time
alert_connectivity_prompt = copy.copy(OFFROAD_ALERTS["Offroad_ConnectivityNeededPrompt"])
alert_connectivity_prompt["text"] += remaining_time + " days."
params.delete("Offroad_ConnectivityNeeded")
params.put("Offroad_ConnectivityNeededPrompt", json.dumps(alert_connectivity_prompt))
elif current_connectivity_alert is not None:
current_connectivity_alert = None
params.delete("Offroad_ConnectivityNeeded")
|
params.delete("Offroad_ConnectivityNeededPrompt")
# start constellation of processes when the car starts
|
random_line_split
|
|
thermald.py
|
2)
dat.thermal.mem = read_tz(2)
dat.thermal.gpu = read_tz(16)
dat.thermal.bat = read_tz(29)
return dat
LEON = False
def setup_eon_fan():
global LEON
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
#os.system("echo 1 > /sys/devices/soc/6a00000.ssusb/power_supply/usb/usb_otg")
LEON = True
bus.close()
last_eon_fan_val = None
def
|
(val):
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val-1)<<6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THERSHOLD = 45.
def handle_fan(max_cpu_temp, bat_temp, fan_speed):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THERSHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed//16384)
return fan_speed
def thermald_thread():
setup_eon_fan()
# prevent LEECO from undervoltage
BATT_PERC_OFF = 10 if LEON else 3
# now loop
thermal_sock = messaging.pub_sock(service_list['thermal'].port)
health_sock = messaging.sub_sock(service_list['health'].port)
location_sock = messaging.sub_sock(service_list['gpsLocation'].port)
fan_speed = 0
count = 0
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
thermal_status_prev = ThermalStatus.green
usb_power = True
usb_power_prev = True
health_sock.RCVTIMEO = int(1000 * 2 * DT_TRML) # 2x the expected health frequency
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
health_prev = None
current_connectivity_alert = None
params = Params()
while 1:
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal()
# clear car params when panda gets disconnected
if health is None and health_prev is not None:
params.panda_disconnect()
health_prev = health
if health is not None:
usb_power = health.health.usbPowerMode != log.HealthData.UsbPowerMode.client
# loggerd is gated based on free space
avail = get_available_percent() / 100.0
# thermal message now also includes free space
msg.thermal.freeSpace = avail
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
with open("/sys/class/power_supply/battery/current_now") as f:
msg.thermal.batteryCurrent = int(f.read())
with open("/sys/class/power_supply/battery/voltage_now") as f:
msg.thermal.batteryVoltage = int(f.read())
with open("/sys/class/power_supply/usb/present") as f:
msg.thermal.usbOnline = bool(int(f.read()))
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10., msg.thermal.gpu / 10.)
bat_temp = msg.thermal.bat/1000.
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed)
msg.thermal.fanSpeed = fan_speed
# thermal logic with hysterisis
if max_cpu_temp > 107. or bat_temp >= 63.:
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 92.5 or bat_temp > 60.: # CPU throttling starts around ~90C
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 87.5:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.now()
try:
last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
except (TypeError, ValueError):
last_update = now
dt = now - last_update
if dt.days > DAYS_NO_CONNECTIVITY_MAX:
if current_connectivity_alert != "expired":
current_connectivity_alert = "expired"
params.delete("Offroad_ConnectivityNeededPrompt")
params.put("Offroad_ConnectivityNeeded", json.dumps(OFFROAD_ALERTS["Offroad_ConnectivityNeeded"]))
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
remaining_time = str(DAYS_NO_CONNECTIVITY_MAX - dt.days)
if current_connectivity_alert != "prompt" + remaining_time:
current_connectivity_alert = "prompt" + remaining_time
alert_connectivity_prompt = copy.copy(OFFROAD_ALERTS["Offroad_ConnectivityNeededPrompt"])
alert_connectivity_prompt["text"] += remaining_time + " days."
params.delete("Offroad_ConnectivityNeeded")
params.put
|
set_eon_fan
|
identifier_name
|
thermald.py
|
12)
dat.thermal.mem = read_tz(2)
dat.thermal.gpu = read_tz(16)
dat.thermal.bat = read_tz(29)
return dat
LEON = False
def setup_eon_fan():
global LEON
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
#os.system("echo 1 > /sys/devices/soc/6a00000.ssusb/power_supply/usb/usb_otg")
LEON = True
bus.close()
last_eon_fan_val = None
def set_eon_fan(val):
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val-1)<<6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THERSHOLD = 45.
def handle_fan(max_cpu_temp, bat_temp, fan_speed):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THERSHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed//16384)
return fan_speed
def thermald_thread():
setup_eon_fan()
# prevent LEECO from undervoltage
BATT_PERC_OFF = 10 if LEON else 3
# now loop
thermal_sock = messaging.pub_sock(service_list['thermal'].port)
health_sock = messaging.sub_sock(service_list['health'].port)
location_sock = messaging.sub_sock(service_list['gpsLocation'].port)
fan_speed = 0
count = 0
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
thermal_status_prev = ThermalStatus.green
usb_power = True
usb_power_prev = True
health_sock.RCVTIMEO = int(1000 * 2 * DT_TRML) # 2x the expected health frequency
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
health_prev = None
current_connectivity_alert = None
params = Params()
while 1:
|
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
with open("/sys/class/power_supply/battery/current_now") as f:
msg.thermal.batteryCurrent = int(f.read())
with open("/sys/class/power_supply/battery/voltage_now") as f:
msg.thermal.batteryVoltage = int(f.read())
with open("/sys/class/power_supply/usb/present") as f:
msg.thermal.usbOnline = bool(int(f.read()))
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10., msg.thermal.gpu / 10.)
bat_temp = msg.thermal.bat/1000.
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed)
msg.thermal.fanSpeed = fan_speed
# thermal logic with hysterisis
if max_cpu_temp > 107. or bat_temp >= 63.:
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 92.5 or bat_temp > 60.: # CPU throttling starts around ~90C
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 87.5:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.now()
try:
last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
except (TypeError, ValueError):
last_update = now
dt = now - last_update
if dt.days > DAYS_NO_CONNECTIVITY_MAX:
if current_connectivity_alert != "expired":
current_connectivity_alert = "expired"
params.delete("Offroad_ConnectivityNeededPrompt")
params.put("Offroad_ConnectivityNeeded", json.dumps(OFFROAD_ALERTS["Offroad_ConnectivityNeeded"]))
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
remaining_time = str(DAYS_NO_CONNECTIVITY_MAX - dt.days)
if current_connectivity_alert != "prompt" + remaining_time:
current_connectivity_alert = "prompt" + remaining_time
alert_connectivity_prompt = copy.copy(OFFROAD_ALERTS["Offroad_ConnectivityNeededPrompt"])
alert_connectivity_prompt["text"] += remaining_time + " days."
params.delete("Offroad_ConnectivityNeeded")
params.put
|
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal()
# clear car params when panda gets disconnected
if health is None and health_prev is not None:
params.panda_disconnect()
health_prev = health
if health is not None:
usb_power = health.health.usbPowerMode != log.HealthData.UsbPowerMode.client
# loggerd is gated based on free space
avail = get_available_percent() / 100.0
# thermal message now also includes free space
msg.thermal.freeSpace = avail
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
|
conditional_block
|
falcon.py
|
salt)
s0 = sub_zq(hashed, mul_zq(s1, self.h))
s0 = [(coef + (q >> 1)) % q - (q >> 1) for coef in s0]
# Check that the (s0, s1) is short
norm_sign = sum(coef ** 2 for coef in s0)
norm_sign += sum(coef ** 2 for coef in s1)
if norm_sign > self.signature_bound:
print("Squared norm of signature is too large:", norm_sign)
return False
# If all checks are passed, accept
return True
class SecretKey:
"""
This class contains methods for performing
secret key operations (and also public key operations) in Falcon.
One can:
- initialize a secret key for:
- n = 128, 256, 512, 1024,
- phi = x ** n + 1,
- q = 12 * 1024 + 1
- find a preimage t of a point c (both in ( Z[x] mod (Phi,q) )**2 ) such that t*B0 = c
- hash a message to a point of Z[x] mod (Phi,q)
- sign a message
- verify the signature of a message
"""
def __init__(self, n, polys=None):
"""Initialize a secret key."""
# Public parameters
self.n = n
self.sigma = Params[n]["sigma"]
self.sigmin = Params[n]["sigmin"]
self.signature_bound = Params[n]["sig_bound"]
self.sig_bytelen = Params[n]["sig_bytelen"]
# Compute NTRU polynomials f, g, F, G verifying fG - gF = q mod Phi
if polys is None:
self.f, self.g, self.F, self.G = ntru_gen(n)
else:
[f, g, F, G] = polys
assert all((len(poly) == n) for poly in [f, g, F, G])
self.f = f[:]
self.g = g[:]
self.F = F[:]
self.G = G[:]
# From f, g, F, G, compute the basis B0 of a NTRU lattice
# as well as its Gram matrix and their fft's.
B0 = [[self.g, neg(self.f)], [self.G, neg(self.F)]]
G0 = gram(B0)
self.B0_fft = [[fft(elt) for elt in row] for row in B0]
G0_fft = [[fft(elt) for elt in row] for row in G0]
self.T_fft = ffldl_fft(G0_fft)
# Normalize Falcon tree
normalize_tree(self.T_fft, self.sigma)
# The public key is a polynomial such that h*f = g mod (Phi,q)
self.h = div_zq(self.g, self.f)
def __repr__(self, verbose=False):
"""Print the object in readable form."""
rep = "Private key for n = {n}:\n\n".format(n=self.n)
rep += "f = {f}\n\n".format(f=self.f)
rep += "g = {g}\n\n".format(g=self.g)
rep += "F = {F}\n\n".format(F=self.F)
rep += "G = {G}\n\n".format(G=self.G)
if verbose:
rep += "\nFFT tree\n"
rep += print_tree(self.T_fft, pref="")
return rep
def hash_to_point(self, message, salt):
"""
Hash a message to a point in Z[x] mod(Phi, q).
Inspired by the Parse function from NewHope.
"""
n = self.n
if q > (1 << 16):
raise ValueError("The modulus is too large")
k = (1 << 16) // q
# Create a SHAKE object and hash the salt and message.
shake = SHAKE256.new()
shake.update(salt)
shake.update(message)
# Output pseudorandom bytes and map them to coefficients.
hashed = [0 for i in range(n)]
i = 0
j = 0
while i < n:
# Takes 2 bytes, transform them in a 16 bits integer
twobytes = shake.read(2)
elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x
# Implicit rejection sampling
if elt < k * q:
hashed[i] = elt % q
i += 1
j += 1
return hashed
def sample_preimage(self, point, seed=None):
"""
Sample a short vector s such that s[0] + s[1] * h = point.
"""
[[a, b], [c, d]] = self.B0_fft
# We compute a vector t_fft such that:
# (fft(point), fft(0)) * B0_fft = t_fft
# Because fft(0) = 0 and the inverse of B has a very specific form,
# we can do several optimizations.
'''
print("---------Inside sample_preimage----------")
print("point: ", point)
'''
point_fft = fft(point)
t0_fft = [(point_fft[i] * d[i]) / q for i in range(self.n)]
t1_fft = [(-point_fft[i] * b[i]) / q for i in range(self.n)]
t_fft = [t0_fft, t1_fft]
# We now compute v such that:
# v = z * B0 for an integral vector z
# v is close to (point, 0)
if seed is None:
# If no seed is defined, use urandom as the pseudo-random source.
z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin, urandom)
else:
# If a seed is defined, initialize a ChaCha20 PRG
# that is used to generate pseudo-randomness.
chacha_prng = ChaCha20(seed)
z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin,
chacha_prng.randombytes)
v0_fft = add_fft(mul_fft(z_fft[0], a), mul_fft(z_fft[1], c))
v1_fft = add_fft(mul_fft(z_fft[0], b), mul_fft(z_fft[1], d))
v0 = [int(round(elt)) for elt in ifft(v0_fft)]
v1 = [int(round(elt)) for elt in ifft(v1_fft)]
# The difference s = (point, 0) - v is such that:
# s is short
# s[0] + s[1] * h = point
s = [sub(point, v0), neg(v1)]
return s
def sign(self, message, randombytes=urandom):
"""
Sign a message. The message MUST be a byte string or byte array.
Optionally, one can select the source of (pseudo-)randomness used
(default: urandom).
"""
int_header = 0x30 + logn[self.n]
header = int_header.to_bytes(1, "little")
salt = randombytes(SALT_LEN)
hashed = self.hash_to_point(message, salt)
# We repeat the signing procedure until we find a signature that is
# short enough (both the Euclidean norm and the bytelength)
'''
print("---------Inside sign----------")
'''
while(1):
if (randombytes == urandom):
s = self.sample_preimage(hashed)
'''
print("s: ", s)
'''
else:
seed = randombytes(SEED_LEN)
s = self.sample_preimage(hashed, seed=seed)
norm_sign = sum(coef ** 2 for coef in s[0])
norm_sign += sum(coef ** 2 for coef in s[1])
# Check the Euclidean norm
if norm_sign <= self.signature_bound:
enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)
# Check that the encoding is valid (sometimes it fails)
if (enc_s is not False):
return header + salt + enc_s
'''
else:
print("-------------INVALID encoding---------------")
else:
print("-------------NOT within signature bound---------------")
'''
def verify(self, message, signature):
"""
Verify a signature.
"""
# Unpack the salt and the short polynomial s1
salt = signature[HEAD_LEN:HEAD_LEN + SALT_LEN]
enc_s = signature[HEAD_LEN + SALT_LEN:]
s1 = decompress(enc_s, self.sig_bytelen - HEAD_LEN - SALT_LEN, self.n)
# Check that the encoding is valid
if (s1 is False):
print("Invalid encoding")
return False
# Compute s0 and normalize its coefficients in (-q/2, q/2]
hashed = self.hash_to_point(message, salt)
|
random_line_split
|
||
falcon.py
|
if elt < k * q:
hashed[i] = elt % q
i += 1
j += 1
return hashed
def verify(self, message, signature):
"""
Verify a signature.
"""
# Unpack the salt and the short polynomial s1
salt = signature[HEAD_LEN:HEAD_LEN + SALT_LEN]
enc_s = signature[HEAD_LEN + SALT_LEN:]
s1 = decompress(enc_s, self.sig_bytelen - HEAD_LEN - SALT_LEN, self.n)
# Check that the encoding is valid
if (s1 is False):
print("Invalid encoding")
return False
# Compute s0 and normalize its coefficients in (-q/2, q/2]
hashed = self.hash_to_point(message, salt)
s0 = sub_zq(hashed, mul_zq(s1, self.h))
s0 = [(coef + (q >> 1)) % q - (q >> 1) for coef in s0]
# Check that the (s0, s1) is short
norm_sign = sum(coef ** 2 for coef in s0)
norm_sign += sum(coef ** 2 for coef in s1)
if norm_sign > self.signature_bound:
print("Squared norm of signature is too large:", norm_sign)
return False
# If all checks are passed, accept
return True
class SecretKey:
"""
This class contains methods for performing
secret key operations (and also public key operations) in Falcon.
One can:
- initialize a secret key for:
- n = 128, 256, 512, 1024,
- phi = x ** n + 1,
- q = 12 * 1024 + 1
- find a preimage t of a point c (both in ( Z[x] mod (Phi,q) )**2 ) such that t*B0 = c
- hash a message to a point of Z[x] mod (Phi,q)
- sign a message
- verify the signature of a message
"""
def __init__(self, n, polys=None):
"""Initialize a secret key."""
# Public parameters
self.n = n
self.sigma = Params[n]["sigma"]
self.sigmin = Params[n]["sigmin"]
self.signature_bound = Params[n]["sig_bound"]
self.sig_bytelen = Params[n]["sig_bytelen"]
# Compute NTRU polynomials f, g, F, G verifying fG - gF = q mod Phi
if polys is None:
self.f, self.g, self.F, self.G = ntru_gen(n)
else:
[f, g, F, G] = polys
assert all((len(poly) == n) for poly in [f, g, F, G])
self.f = f[:]
self.g = g[:]
self.F = F[:]
self.G = G[:]
# From f, g, F, G, compute the basis B0 of a NTRU lattice
# as well as its Gram matrix and their fft's.
B0 = [[self.g, neg(self.f)], [self.G, neg(self.F)]]
G0 = gram(B0)
self.B0_fft = [[fft(elt) for elt in row] for row in B0]
G0_fft = [[fft(elt) for elt in row] for row in G0]
self.T_fft = ffldl_fft(G0_fft)
# Normalize Falcon tree
normalize_tree(self.T_fft, self.sigma)
# The public key is a polynomial such that h*f = g mod (Phi,q)
self.h = div_zq(self.g, self.f)
def __repr__(self, verbose=False):
"""Print the object in readable form."""
rep = "Private key for n = {n}:\n\n".format(n=self.n)
rep += "f = {f}\n\n".format(f=self.f)
rep += "g = {g}\n\n".format(g=self.g)
rep += "F = {F}\n\n".format(F=self.F)
rep += "G = {G}\n\n".format(G=self.G)
if verbose:
rep += "\nFFT tree\n"
rep += print_tree(self.T_fft, pref="")
return rep
def hash_to_point(self, message, salt):
"""
Hash a message to a point in Z[x] mod(Phi, q).
Inspired by the Parse function from NewHope.
"""
n = self.n
if q > (1 << 16):
raise ValueError("The modulus is too large")
k = (1 << 16) // q
# Create a SHAKE object and hash the salt and message.
shake = SHAKE256.new()
shake.update(salt)
shake.update(message)
# Output pseudorandom bytes and map them to coefficients.
hashed = [0 for i in range(n)]
i = 0
j = 0
while i < n:
# Takes 2 bytes, transform them in a 16 bits integer
twobytes = shake.read(2)
elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x
# Implicit rejection sampling
if elt < k * q:
hashed[i] = elt % q
i += 1
j += 1
return hashed
def sample_preimage(self, point, seed=None):
"""
Sample a short vector s such that s[0] + s[1] * h = point.
"""
[[a, b], [c, d]] = self.B0_fft
# We compute a vector t_fft such that:
# (fft(point), fft(0)) * B0_fft = t_fft
# Because fft(0) = 0 and the inverse of B has a very specific form,
# we can do several optimizations.
'''
print("---------Inside sample_preimage----------")
print("point: ", point)
'''
point_fft = fft(point)
t0_fft = [(point_fft[i] * d[i]) / q for i in range(self.n)]
t1_fft = [(-point_fft[i] * b[i]) / q for i in range(self.n)]
t_fft = [t0_fft, t1_fft]
# We now compute v such that:
# v = z * B0 for an integral vector z
# v is close to (point, 0)
if seed is None:
# If no seed is defined, use urandom as the pseudo-random source.
z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin, urandom)
else:
# If a seed is defined, initialize a ChaCha20 PRG
# that is used to generate pseudo-randomness.
chacha_prng = ChaCha20(seed)
z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin,
chacha_prng.randombytes)
v0_fft = add_fft(mul_fft(z_fft[0], a), mul_fft(z_fft[1], c))
v1_fft = add_fft(mul_fft(z_fft[0], b), mul_fft(z_fft[1], d))
v0 = [int(round(elt)) for elt in ifft(v0_fft)]
v1 = [int(round(elt)) for elt in ifft(v1_fft)]
# The difference s = (point, 0) - v is such that:
# s is short
# s[0] + s[1] * h = point
s = [sub(point, v0), neg(v1)]
return s
def sign(self, message, randombytes=urandom):
"""
Sign a message. The message MUST be a byte string or byte array.
Optionally, one can select the source of (pseudo-)randomness used
(default: urandom).
"""
int_header = 0x30 + logn[self.n]
header = int_header.to_bytes(1, "little")
salt = randombytes(SALT_LEN)
hashed = self.hash_to_point(message, salt)
# We repeat the signing procedure until we find a signature that is
# short enough (both the Euclidean norm and the bytelength)
'''
print("---------Inside sign----------")
'''
while(1):
if (randombytes == urandom):
s = self.sample_preimage(hashed)
'''
print("s: ", s)
'''
else:
seed = randombytes(SEED_LEN)
s = self.sample_preimage(hashed, seed=seed)
norm_sign = sum(coef ** 2 for coef in s[0])
norm_sign += sum(coef ** 2 for coef in s[1])
# Check the Euclidean norm
if norm_sign <= self.signature_bound:
enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)
# Check that the encoding is valid (sometimes it fails)
if (enc_s is not False):
|
return header + salt + enc_s
|
conditional_block
|
|
falcon.py
|
"n": 16,
"sigma": 151.78340713845503,
"sigmin": 1.170254078853483,
"sig_bound": 892039,
"sig_bytelen": 63,
},
# FalconParam(32, 8)
32: {
"n": 32,
"sigma": 154.6747794602761,
"sigmin": 1.1925466358390344,
"sig_bound": 1852696,
"sig_bytelen": 82,
},
# FalconParam(64, 16)
64: {
"n": 64,
"sigma": 157.51308555044122,
"sigmin": 1.2144300507766141,
"sig_bound": 3842630,
"sig_bytelen": 122,
},
# FalconParam(128, 32)
128: {
"n": 128,
"sigma": 160.30114421975344,
"sigmin": 1.235926056771981,
"sig_bound": 7959734,
"sig_bytelen": 200,
},
# FalconParam(256, 64)
256: {
"n": 256,
"sigma": 163.04153322607107,
"sigmin": 1.2570545284063217,
"sig_bound": 16468416,
"sig_bytelen": 356,
},
# FalconParam(512, 128)
512: {
"n": 512,
"sigma": 165.7366171829776,
"sigmin": 1.2778336969128337,
"sig_bound": 34034726,
"sig_bytelen": 666,
},
# FalconParam(1024, 256)
1024: {
"n": 1024,
"sigma": 168.38857144654395,
"sigmin": 1.298280334344292,
"sig_bound": 70265242,
"sig_bytelen": 1280,
},
}
def print_tree(tree, pref=""):
"""
Display a LDL tree in a readable form.
Args:
T: a LDL tree
Format: coefficient or fft
"""
leaf = "|_____> "
top = "|_______"
son1 = "| "
son2 = " "
width = len(top)
a = ""
if len(tree) == 3:
if (pref == ""):
a += pref + str(tree[0]) + "\n"
else:
a += pref[:-width] + top + str(tree[0]) + "\n"
a += print_tree(tree[1], pref + son1)
a += print_tree(tree[2], pref + son2)
return a
else:
return (pref[:-width] + leaf + str(tree) + "\n")
def normalize_tree(tree, sigma):
"""
Normalize leaves of a LDL tree (from values ||b_i||**2 to sigma/||b_i||).
Args:
T: a LDL tree
sigma: a standard deviation
Format: coefficient or fft
"""
if len(tree) == 3:
normalize_tree(tree[1], sigma)
normalize_tree(tree[2], sigma)
else:
tree[0] = sigma / sqrt(tree[0].real)
tree[1] = 0
class PublicKey:
"""
This class contains methods for performing public key operations in Falcon.
"""
def __init__(self, sk=None, n=None, h=None):
"""Initialize a public key."""
if sk:
self.n = sk.n
self.h = sk.h
elif n and h:
self.n = n
self.h = h
else:
raise Exception("Public Key construction failed: insufficient/wrong arguments")
self.signature_bound = Params[self.n]["sig_bound"]
self.sig_bytelen = Params[self.n]["sig_bytelen"]
def __repr__(self):
"""Print the object in readable form."""
rep = "Public for n = {n}:\n\n".format(n=self.n)
rep += "h = {h}\n".format(h=self.h)
return rep
def hash_to_point(self, message, salt):
"""
Hash a message to a point in Z[x] mod(Phi, q).
Inspired by the Parse function from NewHope.
"""
n = self.n
if q > (1 << 16):
raise ValueError("The modulus is too large")
k = (1 << 16) // q
# Create a SHAKE object and hash the salt and message.
shake = SHAKE256.new()
shake.update(salt)
shake.update(message)
# Output pseudorandom bytes and map them to coefficients.
hashed = [0 for i in range(n)]
i = 0
j = 0
while i < n:
# Takes 2 bytes, transform them in a 16 bits integer
twobytes = shake.read(2)
elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x
# Implicit rejection sampling
if elt < k * q:
hashed[i] = elt % q
i += 1
j += 1
return hashed
def
|
(self, message, signature):
"""
Verify a signature.
"""
# Unpack the salt and the short polynomial s1
salt = signature[HEAD_LEN:HEAD_LEN + SALT_LEN]
enc_s = signature[HEAD_LEN + SALT_LEN:]
s1 = decompress(enc_s, self.sig_bytelen - HEAD_LEN - SALT_LEN, self.n)
# Check that the encoding is valid
if (s1 is False):
print("Invalid encoding")
return False
# Compute s0 and normalize its coefficients in (-q/2, q/2]
hashed = self.hash_to_point(message, salt)
s0 = sub_zq(hashed, mul_zq(s1, self.h))
s0 = [(coef + (q >> 1)) % q - (q >> 1) for coef in s0]
# Check that the (s0, s1) is short
norm_sign = sum(coef ** 2 for coef in s0)
norm_sign += sum(coef ** 2 for coef in s1)
if norm_sign > self.signature_bound:
print("Squared norm of signature is too large:", norm_sign)
return False
# If all checks are passed, accept
return True
class SecretKey:
"""
This class contains methods for performing
secret key operations (and also public key operations) in Falcon.
One can:
- initialize a secret key for:
- n = 128, 256, 512, 1024,
- phi = x ** n + 1,
- q = 12 * 1024 + 1
- find a preimage t of a point c (both in ( Z[x] mod (Phi,q) )**2 ) such that t*B0 = c
- hash a message to a point of Z[x] mod (Phi,q)
- sign a message
- verify the signature of a message
"""
def __init__(self, n, polys=None):
"""Initialize a secret key."""
# Public parameters
self.n = n
self.sigma = Params[n]["sigma"]
self.sigmin = Params[n]["sigmin"]
self.signature_bound = Params[n]["sig_bound"]
self.sig_bytelen = Params[n]["sig_bytelen"]
# Compute NTRU polynomials f, g, F, G verifying fG - gF = q mod Phi
if polys is None:
self.f, self.g, self.F, self.G = ntru_gen(n)
else:
[f, g, F, G] = polys
assert all((len(poly) == n) for poly in [
|
verify
|
identifier_name
|
falcon.py
|
"n": 16,
"sigma": 151.78340713845503,
"sigmin": 1.170254078853483,
"sig_bound": 892039,
"sig_bytelen": 63,
},
# FalconParam(32, 8)
32: {
"n": 32,
"sigma": 154.6747794602761,
"sigmin": 1.1925466358390344,
"sig_bound": 1852696,
"sig_bytelen": 82,
},
# FalconParam(64, 16)
64: {
"n": 64,
"sigma": 157.51308555044122,
"sigmin": 1.2144300507766141,
"sig_bound": 3842630,
"sig_bytelen": 122,
},
# FalconParam(128, 32)
128: {
"n": 128,
"sigma": 160.30114421975344,
"sigmin": 1.235926056771981,
"sig_bound": 7959734,
"sig_bytelen": 200,
},
# FalconParam(256, 64)
256: {
"n": 256,
"sigma": 163.04153322607107,
"sigmin": 1.2570545284063217,
"sig_bound": 16468416,
"sig_bytelen": 356,
},
# FalconParam(512, 128)
512: {
"n": 512,
"sigma": 165.7366171829776,
"sigmin": 1.2778336969128337,
"sig_bound": 34034726,
"sig_bytelen": 666,
},
# FalconParam(1024, 256)
1024: {
"n": 1024,
"sigma": 168.38857144654395,
"sigmin": 1.298280334344292,
"sig_bound": 70265242,
"sig_bytelen": 1280,
},
}
def print_tree(tree, pref=""):
"""
Display a LDL tree in a readable form.
Args:
T: a LDL tree
Format: coefficient or fft
"""
leaf = "|_____> "
top = "|_______"
son1 = "| "
son2 = " "
width = len(top)
a = ""
if len(tree) == 3:
if (pref == ""):
a += pref + str(tree[0]) + "\n"
else:
a += pref[:-width] + top + str(tree[0]) + "\n"
a += print_tree(tree[1], pref + son1)
a += print_tree(tree[2], pref + son2)
return a
else:
return (pref[:-width] + leaf + str(tree) + "\n")
def normalize_tree(tree, sigma):
"""
Normalize leaves of a LDL tree (from values ||b_i||**2 to sigma/||b_i||).
Args:
T: a LDL tree
sigma: a standard deviation
Format: coefficient or fft
"""
if len(tree) == 3:
normalize_tree(tree[1], sigma)
normalize_tree(tree[2], sigma)
else:
tree[0] = sigma / sqrt(tree[0].real)
tree[1] = 0
class PublicKey:
|
rep = "Public for n = {n}:\n\n".format(n=self.n)
rep += "h = {h}\n".format(h=self.h)
return rep
def hash_to_point(self, message, salt):
"""
Hash a message to a point in Z[x] mod(Phi, q).
Inspired by the Parse function from NewHope.
"""
n = self.n
if q > (1 << 16):
raise ValueError("The modulus is too large")
k = (1 << 16) // q
# Create a SHAKE object and hash the salt and message.
shake = SHAKE256.new()
shake.update(salt)
shake.update(message)
# Output pseudorandom bytes and map them to coefficients.
hashed = [0 for i in range(n)]
i = 0
j = 0
while i < n:
# Takes 2 bytes, transform them in a 16 bits integer
twobytes = shake.read(2)
elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x
# Implicit rejection sampling
if elt < k * q:
hashed[i] = elt % q
i += 1
j += 1
return hashed
def verify(self, message, signature):
"""
Verify a signature.
"""
# Unpack the salt and the short polynomial s1
salt = signature[HEAD_LEN:HEAD_LEN + SALT_LEN]
enc_s = signature[HEAD_LEN + SALT_LEN:]
s1 = decompress(enc_s, self.sig_bytelen - HEAD_LEN - SALT_LEN, self.n)
# Check that the encoding is valid
if (s1 is False):
print("Invalid encoding")
return False
# Compute s0 and normalize its coefficients in (-q/2, q/2]
hashed = self.hash_to_point(message, salt)
s0 = sub_zq(hashed, mul_zq(s1, self.h))
s0 = [(coef + (q >> 1)) % q - (q >> 1) for coef in s0]
# Check that the (s0, s1) is short
norm_sign = sum(coef ** 2 for coef in s0)
norm_sign += sum(coef ** 2 for coef in s1)
if norm_sign > self.signature_bound:
print("Squared norm of signature is too large:", norm_sign)
return False
# If all checks are passed, accept
return True
class SecretKey:
"""
This class contains methods for performing
secret key operations (and also public key operations) in Falcon.
One can:
- initialize a secret key for:
- n = 128, 256, 512, 1024,
- phi = x ** n + 1,
- q = 12 * 1024 + 1
- find a preimage t of a point c (both in ( Z[x] mod (Phi,q) )**2 ) such that t*B0 = c
- hash a message to a point of Z[x] mod (Phi,q)
- sign a message
- verify the signature of a message
"""
def __init__(self, n, polys=None):
"""Initialize a secret key."""
# Public parameters
self.n = n
self.sigma = Params[n]["sigma"]
self.sigmin = Params[n]["sigmin"]
self.signature_bound = Params[n]["sig_bound"]
self.sig_bytelen = Params[n]["sig_bytelen"]
# Compute NTRU polynomials f, g, F, G verifying fG - gF = q mod Phi
if polys is None:
self.f, self.g, self.F, self.G = ntru_gen(n)
else:
[f, g, F, G] = polys
assert all((len(poly) == n) for poly in [
|
"""
This class contains methods for performing public key operations in Falcon.
"""
def __init__(self, sk=None, n=None, h=None):
"""Initialize a public key."""
if sk:
self.n = sk.n
self.h = sk.h
elif n and h:
self.n = n
self.h = h
else:
raise Exception("Public Key construction failed: insufficient/wrong arguments")
self.signature_bound = Params[self.n]["sig_bound"]
self.sig_bytelen = Params[self.n]["sig_bytelen"]
def __repr__(self):
"""Print the object in readable form."""
|
identifier_body
|
cluster_feeder.go
|
State: m.ClusterState,
specClient: spec.NewSpecClient(m.PodLister),
selectorFetcher: m.SelectorFetcher,
memorySaveMode: m.MemorySaveMode,
controllerFetcher: m.ControllerFetcher,
recommenderName: m.RecommenderName,
}
}
// WatchEvictionEventsWithRetries watches new Events with reason=Evicted and passes them to the observer.
func WatchEvictionEventsWithRetries(kubeClient kube_client.Interface, observer oom.Observer, namespace string) {
go func() {
options := metav1.ListOptions{
FieldSelector: "reason=Evicted",
}
watchEvictionEventsOnce := func() {
watchInterface, err := kubeClient.CoreV1().Events(namespace).Watch(context.TODO(), options)
if err != nil {
klog.Errorf("Cannot initialize watching events. Reason %v", err)
return
}
watchEvictionEvents(watchInterface.ResultChan(), observer)
}
for {
watchEvictionEventsOnce()
// Wait between attempts, retrying too often breaks API server.
waitTime := wait.Jitter(evictionWatchRetryWait, evictionWatchJitterFactor)
klog.V(1).Infof("An attempt to watch eviction events finished. Waiting %v before the next one.", waitTime)
time.Sleep(waitTime)
}
}()
}
func watchEvictionEvents(evictedEventChan <-chan watch.Event, observer oom.Observer) {
for {
evictedEvent, ok := <-evictedEventChan
if !ok {
klog.V(3).Infof("Eviction event chan closed")
return
}
if evictedEvent.Type == watch.Added {
evictedEvent, ok := evictedEvent.Object.(*apiv1.Event)
if !ok {
continue
}
observer.OnEvent(evictedEvent)
}
}
}
// Creates clients watching pods: PodLister (listing only not terminated pods).
func newPodClients(kubeClient kube_client.Interface, resourceEventHandler cache.ResourceEventHandler, namespace string) v1lister.PodLister {
// We are interested in pods which are Running or Unknown (in case the pod is
// running but there are some transient errors we don't want to delete it from
// our model).
// We don't want to watch Pending pods because they didn't generate any usage
// yet.
// Succeeded and Failed failed pods don't generate any usage anymore but we
// don't necessarily want to immediately delete them.
selector := fields.ParseSelectorOrDie("status.phase!=" + string(apiv1.PodPending))
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", namespace, selector)
indexer, controller := cache.NewIndexerInformer(
podListWatch,
&apiv1.Pod{},
time.Hour,
resourceEventHandler,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
podLister := v1lister.NewPodLister(indexer)
stopCh := make(chan struct{})
go controller.Run(stopCh)
return podLister
}
// NewPodListerAndOOMObserver creates pair of pod lister and OOM observer.
func NewPodListerAndOOMObserver(kubeClient kube_client.Interface, namespace string) (v1lister.PodLister, oom.Observer) {
oomObserver := oom.NewObserver()
podLister := newPodClients(kubeClient, oomObserver, namespace)
WatchEvictionEventsWithRetries(kubeClient, oomObserver, namespace)
return podLister, oomObserver
}
type clusterStateFeeder struct {
coreClient corev1.CoreV1Interface
specClient spec.SpecClient
metricsClient metrics.MetricsClient
oomChan <-chan oom.OomInfo
vpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter
vpaLister vpa_lister.VerticalPodAutoscalerLister
clusterState *model.ClusterState
selectorFetcher target.VpaTargetSelectorFetcher
memorySaveMode bool
controllerFetcher controllerfetcher.ControllerFetcher
recommenderName string
}
func (feeder *clusterStateFeeder) InitFromHistoryProvider(historyProvider history.HistoryProvider) {
klog.V(3).Info("Initializing VPA from history provider")
clusterHistory, err := historyProvider.GetClusterHistory()
if err != nil {
klog.Errorf("Cannot get cluster history: %v", err)
}
for podID, podHistory := range clusterHistory {
klog.V(4).Infof("Adding pod %v with labels %v", podID, podHistory.LastLabels)
feeder.clusterState.AddOrUpdatePod(podID, podHistory.LastLabels, apiv1.PodUnknown)
for containerName, sampleList := range podHistory.Samples {
containerID := model.ContainerID{
PodID: podID,
ContainerName: containerName,
}
if err = feeder.clusterState.AddOrUpdateContainer(containerID, nil); err != nil {
klog.Warningf("Failed to add container %+v. Reason: %+v", containerID, err)
}
klog.V(4).Infof("Adding %d samples for container %v", len(sampleList), containerID)
for _, sample := range sampleList {
if err := feeder.clusterState.AddSample(
&model.ContainerUsageSampleWithKey{
ContainerUsageSample: sample,
Container: containerID,
}); err != nil {
klog.Warningf("Error adding metric sample for container %v: %v", containerID, err)
}
}
}
}
}
func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpoint) error {
vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
vpa, exists := feeder.clusterState.Vpas[vpaID]
if !exists {
return fmt.Errorf("cannot load checkpoint to missing VPA object %+v", vpaID)
}
cs := model.NewAggregateContainerState()
err := cs.LoadFromCheckpoint(&checkpoint.Status)
if err != nil {
return fmt.Errorf("cannot load checkpoint for VPA %+v. Reason: %v", vpa.ID, err)
}
vpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs
return nil
}
func (feeder *clusterStateFeeder) InitFromCheckpoints() {
klog.V(3).Info("Initializing VPA from checkpoints")
feeder.LoadVPAs()
namespaces := make(map[string]bool)
for _, v := range feeder.clusterState.Vpas {
namespaces[v.ID.Namespace] = true
}
for namespace := range namespaces {
klog.V(3).Infof("Fetching checkpoints from namespace %s", namespace)
checkpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil
|
for _, checkpoint := range checkpointList.Items {
klog.V(3).Infof("Loading VPA %s/%s checkpoint for %s", checkpoint.ObjectMeta.Namespace, checkpoint.Spec.VPAObjectName, checkpoint.Spec.ContainerName)
err = feeder.setVpaCheckpoint(&checkpoint)
if err != nil {
klog.Errorf("Error while loading checkpoint. Reason: %+v", err)
}
}
}
}
func (feeder *clusterStateFeeder) GarbageCollectCheckpoints() {
klog.V(3).Info("Starting garbage collection of checkpoints")
feeder.LoadVPAs()
namespaceList, err := feeder.coreClient.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Errorf("Cannot list namespaces. Reason: %+v", err)
return
}
for _, namespaceItem := range namespaceList.Items {
namespace := namespaceItem.Name
checkpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Errorf("Cannot list VPA checkpoints from namespace %v. Reason: %+v", namespace, err)
}
for _, checkpoint := range checkpointList.Items {
vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
_, exists := feeder.clusterState.Vpas[vpaID]
if !exists {
err = feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).Delete(context.TODO(), checkpoint.Name, metav1.DeleteOptions{})
if err == nil {
klog.V(3).Infof("Orphaned VPA checkpoint cleanup - deleting %v/%v.", namespace, checkpoint.Name)
} else {
klog.Errorf("Cannot delete VPA checkpoint %v/%v. Reason: %+v", namespace, checkpoint.Name, err)
}
}
}
}
}
func implicitDefaultRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector) bool {
return len(selectors) == 0
}
func selectsRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector, name *
|
{
klog.Errorf("Cannot list VPA checkpoints from namespace %v. Reason: %+v", namespace, err)
}
|
conditional_block
|
cluster_feeder.go
|
clusterState: m.ClusterState,
specClient: spec.NewSpecClient(m.PodLister),
selectorFetcher: m.SelectorFetcher,
memorySaveMode: m.MemorySaveMode,
controllerFetcher: m.ControllerFetcher,
recommenderName: m.RecommenderName,
}
}
// WatchEvictionEventsWithRetries watches new Events with reason=Evicted and passes them to the observer.
func WatchEvictionEventsWithRetries(kubeClient kube_client.Interface, observer oom.Observer, namespace string) {
go func() {
options := metav1.ListOptions{
FieldSelector: "reason=Evicted",
}
watchEvictionEventsOnce := func() {
watchInterface, err := kubeClient.CoreV1().Events(namespace).Watch(context.TODO(), options)
if err != nil {
klog.Errorf("Cannot initialize watching events. Reason %v", err)
return
}
watchEvictionEvents(watchInterface.ResultChan(), observer)
}
for {
watchEvictionEventsOnce()
// Wait between attempts, retrying too often breaks API server.
waitTime := wait.Jitter(evictionWatchRetryWait, evictionWatchJitterFactor)
klog.V(1).Infof("An attempt to watch eviction events finished. Waiting %v before the next one.", waitTime)
time.Sleep(waitTime)
}
}()
}
func watchEvictionEvents(evictedEventChan <-chan watch.Event, observer oom.Observer) {
for {
evictedEvent, ok := <-evictedEventChan
if !ok {
klog.V(3).Infof("Eviction event chan closed")
return
}
if evictedEvent.Type == watch.Added {
evictedEvent, ok := evictedEvent.Object.(*apiv1.Event)
if !ok {
continue
}
observer.OnEvent(evictedEvent)
}
}
}
// Creates clients watching pods: PodLister (listing only not terminated pods).
func newPodClients(kubeClient kube_client.Interface, resourceEventHandler cache.ResourceEventHandler, namespace string) v1lister.PodLister {
// We are interested in pods which are Running or Unknown (in case the pod is
// running but there are some transient errors we don't want to delete it from
// our model).
// We don't want to watch Pending pods because they didn't generate any usage
// yet.
// Succeeded and Failed failed pods don't generate any usage anymore but we
// don't necessarily want to immediately delete them.
selector := fields.ParseSelectorOrDie("status.phase!=" + string(apiv1.PodPending))
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", namespace, selector)
indexer, controller := cache.NewIndexerInformer(
podListWatch,
&apiv1.Pod{},
time.Hour,
resourceEventHandler,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
podLister := v1lister.NewPodLister(indexer)
stopCh := make(chan struct{})
go controller.Run(stopCh)
return podLister
}
// NewPodListerAndOOMObserver creates pair of pod lister and OOM observer.
func NewPodListerAndOOMObserver(kubeClient kube_client.Interface, namespace string) (v1lister.PodLister, oom.Observer) {
oomObserver := oom.NewObserver()
podLister := newPodClients(kubeClient, oomObserver, namespace)
WatchEvictionEventsWithRetries(kubeClient, oomObserver, namespace)
return podLister, oomObserver
}
type clusterStateFeeder struct {
coreClient corev1.CoreV1Interface
specClient spec.SpecClient
metricsClient metrics.MetricsClient
oomChan <-chan oom.OomInfo
vpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter
vpaLister vpa_lister.VerticalPodAutoscalerLister
clusterState *model.ClusterState
selectorFetcher target.VpaTargetSelectorFetcher
memorySaveMode bool
controllerFetcher controllerfetcher.ControllerFetcher
recommenderName string
}
func (feeder *clusterStateFeeder) InitFromHistoryProvider(historyProvider history.HistoryProvider) {
klog.V(3).Info("Initializing VPA from history provider")
clusterHistory, err := historyProvider.GetClusterHistory()
if err != nil {
klog.Errorf("Cannot get cluster history: %v", err)
}
for podID, podHistory := range clusterHistory {
klog.V(4).Infof("Adding pod %v with labels %v", podID, podHistory.LastLabels)
feeder.clusterState.AddOrUpdatePod(podID, podHistory.LastLabels, apiv1.PodUnknown)
for containerName, sampleList := range podHistory.Samples {
containerID := model.ContainerID{
PodID: podID,
ContainerName: containerName,
}
if err = feeder.clusterState.AddOrUpdateContainer(containerID, nil); err != nil {
klog.Warningf("Failed to add container %+v. Reason: %+v", containerID, err)
}
klog.V(4).Infof("Adding %d samples for container %v", len(sampleList), containerID)
for _, sample := range sampleList {
if err := feeder.clusterState.AddSample(
&model.ContainerUsageSampleWithKey{
ContainerUsageSample: sample,
Container: containerID,
}); err != nil {
klog.Warningf("Error adding metric sample for container %v: %v", containerID, err)
}
}
}
}
}
func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.VerticalPodAutoscalerCheckpoint) error {
vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
vpa, exists := feeder.clusterState.Vpas[vpaID]
if !exists {
return fmt.Errorf("cannot load checkpoint to missing VPA object %+v", vpaID)
}
cs := model.NewAggregateContainerState()
err := cs.LoadFromCheckpoint(&checkpoint.Status)
if err != nil {
return fmt.Errorf("cannot load checkpoint for VPA %+v. Reason: %v", vpa.ID, err)
}
vpa.ContainersInitialAggregateState[checkpoint.Spec.ContainerName] = cs
return nil
}
func (feeder *clusterStateFeeder) InitFromCheckpoints()
|
klog.Errorf("Error while loading checkpoint. Reason: %+v", err)
}
}
}
}
func (feeder *clusterStateFeeder) GarbageCollectCheckpoints() {
klog.V(3).Info("Starting garbage collection of checkpoints")
feeder.LoadVPAs()
namespaceList, err := feeder.coreClient.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Errorf("Cannot list namespaces. Reason: %+v", err)
return
}
for _, namespaceItem := range namespaceList.Items {
namespace := namespaceItem.Name
checkpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Errorf("Cannot list VPA checkpoints from namespace %v. Reason: %+v", namespace, err)
}
for _, checkpoint := range checkpointList.Items {
vpaID := model.VpaID{Namespace: checkpoint.Namespace, VpaName: checkpoint.Spec.VPAObjectName}
_, exists := feeder.clusterState.Vpas[vpaID]
if !exists {
err = feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).Delete(context.TODO(), checkpoint.Name, metav1.DeleteOptions{})
if err == nil {
klog.V(3).Infof("Orphaned VPA checkpoint cleanup - deleting %v/%v.", namespace, checkpoint.Name)
} else {
klog.Errorf("Cannot delete VPA checkpoint %v/%v. Reason: %+v", namespace, checkpoint.Name, err)
}
}
}
}
}
func implicitDefaultRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector) bool {
return len(selectors) == 0
}
func selectsRecommender(selectors []*vpa_types.VerticalPodAutoscalerRecommenderSelector, name *
|
{
klog.V(3).Info("Initializing VPA from checkpoints")
feeder.LoadVPAs()
namespaces := make(map[string]bool)
for _, v := range feeder.clusterState.Vpas {
namespaces[v.ID.Namespace] = true
}
for namespace := range namespaces {
klog.V(3).Infof("Fetching checkpoints from namespace %s", namespace)
checkpointList, err := feeder.vpaCheckpointClient.VerticalPodAutoscalerCheckpoints(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Errorf("Cannot list VPA checkpoints from namespace %v. Reason: %+v", namespace, err)
}
for _, checkpoint := range checkpointList.Items {
klog.V(3).Infof("Loading VPA %s/%s checkpoint for %s", checkpoint.ObjectMeta.Namespace, checkpoint.Spec.VPAObjectName, checkpoint.Spec.ContainerName)
err = feeder.setVpaCheckpoint(&checkpoint)
if err != nil {
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.