file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
utils.py
#! /usr/bin/env python ############################################################################ ## util.py ## ## Part of the DendroPy library for phylogenetic computing. ## ## Copyright 2008 Jeet Sukumaran and Mark T. Holder. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################ """ This module contains various utility functions and methods. """ import os import fnmatch class RecastingIterator(object): """ Given an iterator I_X that returns objects of type X {x1, x2, x3, ... etc.}, and a function F(X), that takes objects of type X as an argument and returns objects of type Y, F(X) = Y, this class will act as an iterator that returns objects of type Y, I_Y, given an iterator on X. The 'function' given can be a class if the class's constructor takes a single argument of type X. """ def __init__(self, source_iter, casting_func=None, filter_func=None): """ `source_iter` is an iterator. `casting_func` is a function that takes objects returned by `source_iter` and returns other objects. `filter_func` is what will be applied to the SOURCE object to decide if it will be returned. """ self.source_iter = iter(source_iter) self.casting_func = casting_func self.filter_func = filter_func def __iter__(self): "Returns self." return self def next(self): """ Gets next item from the underlying iterator, and if filter_func returns True on it, applies casting_func to it and returns it. """ while True: source_next = self.source_iter.next() if self.filter_func is None or self.filter_func(source_next): if self.casting_func is not None: return self.casting_func(source_next) else: return source_next class OrderedCaselessDict(dict): """ Inherits from dict. Maintains two sets of keys: the first the keys belonging to dict, which actually accesses the container items. This is always cast to lower() whenever it is called, thus ensuring that keys are always of the same case. The second set of keys it maintains locally in an list, thus maintaining the order in which they were added. The second set of keys is not cast to lower(), which means that client code can always recover the original 'canonical' casing of the keys. ONLY TAKES STRING KEYS! """ def __init__(self, other=None): """ Creates the local set of keys, and then initializes self with arguments, if any, by using the superclass methods, keeping the ordered keys in sync. """ super(OrderedCaselessDict, self).__init__() self.__ordered_keys = [] if other is not None: if isinstance(other, dict): for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) else: for key, val in other: if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) def copy(self): "Returns a shallow copy of self." return self.__class__(self) def iterkeys(self): "Returns an iterator over self's ordered keys." return iter(self.__ordered_keys) def itervalues(self): "Returns an iterator over self's key, value pairs." for key in self.iterkeys(): yield self[key.lower()] def iteritems(self): "Returns an iterator over self's values." for key in self.iterkeys(): yield (key, self[key.lower()]) def items(self): "Returns key, value pairs in key-order." return [(key, self[key]) for key in self.iterkeys()] def values(self): "Returns list of key, value pairs." return [v for v in self.itervalues()] def __iter__(self): "Returns an iterator over self's ordered keys." return self.iterkeys() def __repr__(self): "Returns a representation of self's ordered keys." return "%s([%s])" \ % (self.__class__.__name__, ', \ '.join(["(%r, %r)" % item for item in self.iteritems()])) def __str__(self): "Returns a string representation of self." return "{%s}" \ % (', '.join(["(%r, %r)" % item for item in self.iteritems()]),)
"Sets an item using a case-insensitive key," if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), value) def __delitem__(self, key): "Remove item with specified key." del(self.__ordered_keys[self.index(key)]) super(OrderedCaselessDict, \ self).__delitem__(key.lower()) def __contains__(self, key): "Returns true if has key, regardless of case." return super(OrderedCaselessDict, self).__contains__(key.lower()) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" if key.lower() in self: val = self[key] self.__delitem__(key.lower()) return val else: return alt_val def popitem(self): "a.popitem() remove and last (key, value) pair" key = self.__ordered_keys[-1] item = (key, self[key.lower()]) self.__delitem__(key) return item def caseless_keys(self): "Returns a copy of the ordered list of keys." return [k.lower() for k in self.__ordered_keys] def index(self, key): """ Return the index of (caseless) key. Raise KeyError if not found. """ count = 0 for k in self.__ordered_keys: if k.lower() == key.lower(): return count count = count + 1 raise KeyError(key) def keys(self): "Returns a copy of the ordered list of keys." return list(self.__ordered_keys) def clear(self): "Deletes all items from the dictionary." self.__ordered_keys = [] super(OrderedCaselessDict, self).clear() def has_key(self, key): "Returns true if has key, regardless of case." return key.lower() in self def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." return super(OrderedCaselessDict, self).get(key.lower(), def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." return super(OrderedCaselessDict, self).setdefault(key.lower(), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." ocd = OrderedCaselessDict() for key in iterable: if key.lower() not in self: self[key] = value return ocd class NormalizedBitmaskDict(dict): """ Keys, {K_i}, are longs. `mask` must be provided before elements can be added removed from dictionary. All keys are normalized such that the right- most bit is '1'. That is, if the key's right-most bit is '1', it is added as-is, otherwise it is complemented by XOR'ing it with 'mask'. """ def normalize(key, mask): if key & 1: return (~key) & mask return key normalize = staticmethod(normalize) def __init__(self, other=None, mask=None): "Assigns mask, and then populates from `other`, if given." dict.__init__(self) self.mask = mask if other is not None: if isinstance(other, NormalizedBitmaskDict): self.mask = other.mask if isinstance(other, dict): for key, val in other.items(): self[key] = val def normalize_key(self, key): return NormalizedBitmaskDict.normalize(key, self.mask) def __setitem__(self, key, value): "Sets item with normalized key." dict.__setitem__(self, self.normalize_key(key), value) def __getitem__(self, key): "Gets an item by its key." key = self.normalize_key(key) return dict.__getitem__(self, key) def __delitem__(self, key): "Remove item with normalized key." key = self.normalize_key(key) dict.__delitem__(self, key) def __contains__(self, key): "Returns true if has normalized key." key = self.normalize_key(key) return dict.__contains__(self, key) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" key = self.normalize_key(key) return dict.pop(self, key) def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." key = self.normalize_key(key) return dict.get(self, key, def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." dict.setdefault(self, self.normalize_key(key), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): dict.__setitem__(self, self.normalize_key(key), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." raise NotImplementedError def pretty_print_timedelta(timedelta): hours, mins, secs = str(timedelta).split(":") return("%s hour(s), %s minute(s), %s second(s)" % (hours, mins, secs)) def glob_match(pathname, pattern, respect_case=False, complement=False): if respect_case: if fnmatch.fnmatchcase(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False else: pathname = pathname.lower() pattern = pattern.lower() if fnmatch.fnmatch(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False def find_files(top, recursive=True, filename_filter=None, dirname_filter=None, excludes=None, complement=False, respect_case=False, expand_vars=True, include_hidden=True): if expand_vars: top = os.path.abspath(os.path.expandvars(os.path.expanduser(top))) if excludes == None: excludes = [] filepaths = [] if os.path.exists(top): for fpath in os.listdir(top): abspath = os.path.abspath(os.path.join(top, fpath)) if os.path.isfile(abspath): if (include_hidden or not fpath.startswith('.')) \ and (not filename_filter or glob_match(fpath, filename_filter, respect_case, complement)): to_exclude = False for e in excludes: if glob_match(fpath, e, respect_case): to_exclude = True if not to_exclude: filepaths.append(abspath) elif os.path.isdir(abspath): if recursive: if (include_hidden or not fpath.startswith('.')) \ and (not dirname_filter or (glob_match(fpath, dirname_filter, respect_case, complement))): filepaths.extend(find_files(abspath, recursive=recursive, filename_filter=filename_filter, dirname_filter=dirname_filter, excludes=excludes, complement=complement, respect_case=respect_case, expand_vars=False)) filepaths.sort() return filepaths def sample_mean_var_ml(x): """Returns the sample mean and variance of x using the ML estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], 0 s = 0.0 ss = 0.0 for i in x: s += i ss += i*i mu = s/n var = (ss/n) - mu*mu return mu, var def sample_mean_var_unbiased(x): """Returns the sample mean and variance of x using the common unbiased estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], float('Inf') mean, v = sample_mean_var_ml(x) var = v*n/(n-1) return mean, var
def __getitem__(self, key): "Gets an item using a case-insensitive key." return super(OrderedCaselessDict, self).__getitem__(key.lower()) def __setitem__(self, key, value):
random_line_split
utils.py
#! /usr/bin/env python ############################################################################ ## util.py ## ## Part of the DendroPy library for phylogenetic computing. ## ## Copyright 2008 Jeet Sukumaran and Mark T. Holder. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################ """ This module contains various utility functions and methods. """ import os import fnmatch class RecastingIterator(object): """ Given an iterator I_X that returns objects of type X {x1, x2, x3, ... etc.}, and a function F(X), that takes objects of type X as an argument and returns objects of type Y, F(X) = Y, this class will act as an iterator that returns objects of type Y, I_Y, given an iterator on X. The 'function' given can be a class if the class's constructor takes a single argument of type X. """ def __init__(self, source_iter, casting_func=None, filter_func=None): """ `source_iter` is an iterator. `casting_func` is a function that takes objects returned by `source_iter` and returns other objects. `filter_func` is what will be applied to the SOURCE object to decide if it will be returned. """ self.source_iter = iter(source_iter) self.casting_func = casting_func self.filter_func = filter_func def __iter__(self): "Returns self." return self def next(self): """ Gets next item from the underlying iterator, and if filter_func returns True on it, applies casting_func to it and returns it. """ while True: source_next = self.source_iter.next() if self.filter_func is None or self.filter_func(source_next): if self.casting_func is not None: return self.casting_func(source_next) else: return source_next class OrderedCaselessDict(dict): """ Inherits from dict. Maintains two sets of keys: the first the keys belonging to dict, which actually accesses the container items. This is always cast to lower() whenever it is called, thus ensuring that keys are always of the same case. The second set of keys it maintains locally in an list, thus maintaining the order in which they were added. The second set of keys is not cast to lower(), which means that client code can always recover the original 'canonical' casing of the keys. ONLY TAKES STRING KEYS! """ def __init__(self, other=None): """ Creates the local set of keys, and then initializes self with arguments, if any, by using the superclass methods, keeping the ordered keys in sync. """ super(OrderedCaselessDict, self).__init__() self.__ordered_keys = [] if other is not None: if isinstance(other, dict): for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) else: for key, val in other: if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) def copy(self): "Returns a shallow copy of self." return self.__class__(self) def iterkeys(self): "Returns an iterator over self's ordered keys." return iter(self.__ordered_keys) def itervalues(self): "Returns an iterator over self's key, value pairs." for key in self.iterkeys(): yield self[key.lower()] def iteritems(self): "Returns an iterator over self's values." for key in self.iterkeys(): yield (key, self[key.lower()]) def items(self): "Returns key, value pairs in key-order." return [(key, self[key]) for key in self.iterkeys()] def values(self): "Returns list of key, value pairs." return [v for v in self.itervalues()] def __iter__(self): "Returns an iterator over self's ordered keys." return self.iterkeys() def __repr__(self): "Returns a representation of self's ordered keys." return "%s([%s])" \ % (self.__class__.__name__, ', \ '.join(["(%r, %r)" % item for item in self.iteritems()])) def __str__(self): "Returns a string representation of self." return "{%s}" \ % (', '.join(["(%r, %r)" % item for item in self.iteritems()]),) def __getitem__(self, key): "Gets an item using a case-insensitive key." return super(OrderedCaselessDict, self).__getitem__(key.lower()) def __setitem__(self, key, value): "Sets an item using a case-insensitive key," if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), value) def __delitem__(self, key): "Remove item with specified key." del(self.__ordered_keys[self.index(key)]) super(OrderedCaselessDict, \ self).__delitem__(key.lower()) def __contains__(self, key): "Returns true if has key, regardless of case." return super(OrderedCaselessDict, self).__contains__(key.lower()) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" if key.lower() in self: val = self[key] self.__delitem__(key.lower()) return val else: return alt_val def popitem(self): "a.popitem() remove and last (key, value) pair" key = self.__ordered_keys[-1] item = (key, self[key.lower()]) self.__delitem__(key) return item def caseless_keys(self): "Returns a copy of the ordered list of keys." return [k.lower() for k in self.__ordered_keys] def index(self, key): """ Return the index of (caseless) key. Raise KeyError if not found. """ count = 0 for k in self.__ordered_keys: if k.lower() == key.lower(): return count count = count + 1 raise KeyError(key) def keys(self): "Returns a copy of the ordered list of keys." return list(self.__ordered_keys) def clear(self): "Deletes all items from the dictionary." self.__ordered_keys = [] super(OrderedCaselessDict, self).clear() def has_key(self, key): "Returns true if has key, regardless of case." return key.lower() in self def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." return super(OrderedCaselessDict, self).get(key.lower(), def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." return super(OrderedCaselessDict, self).setdefault(key.lower(), def_val) def
(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." ocd = OrderedCaselessDict() for key in iterable: if key.lower() not in self: self[key] = value return ocd class NormalizedBitmaskDict(dict): """ Keys, {K_i}, are longs. `mask` must be provided before elements can be added removed from dictionary. All keys are normalized such that the right- most bit is '1'. That is, if the key's right-most bit is '1', it is added as-is, otherwise it is complemented by XOR'ing it with 'mask'. """ def normalize(key, mask): if key & 1: return (~key) & mask return key normalize = staticmethod(normalize) def __init__(self, other=None, mask=None): "Assigns mask, and then populates from `other`, if given." dict.__init__(self) self.mask = mask if other is not None: if isinstance(other, NormalizedBitmaskDict): self.mask = other.mask if isinstance(other, dict): for key, val in other.items(): self[key] = val def normalize_key(self, key): return NormalizedBitmaskDict.normalize(key, self.mask) def __setitem__(self, key, value): "Sets item with normalized key." dict.__setitem__(self, self.normalize_key(key), value) def __getitem__(self, key): "Gets an item by its key." key = self.normalize_key(key) return dict.__getitem__(self, key) def __delitem__(self, key): "Remove item with normalized key." key = self.normalize_key(key) dict.__delitem__(self, key) def __contains__(self, key): "Returns true if has normalized key." key = self.normalize_key(key) return dict.__contains__(self, key) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" key = self.normalize_key(key) return dict.pop(self, key) def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." key = self.normalize_key(key) return dict.get(self, key, def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." dict.setdefault(self, self.normalize_key(key), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): dict.__setitem__(self, self.normalize_key(key), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." raise NotImplementedError def pretty_print_timedelta(timedelta): hours, mins, secs = str(timedelta).split(":") return("%s hour(s), %s minute(s), %s second(s)" % (hours, mins, secs)) def glob_match(pathname, pattern, respect_case=False, complement=False): if respect_case: if fnmatch.fnmatchcase(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False else: pathname = pathname.lower() pattern = pattern.lower() if fnmatch.fnmatch(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False def find_files(top, recursive=True, filename_filter=None, dirname_filter=None, excludes=None, complement=False, respect_case=False, expand_vars=True, include_hidden=True): if expand_vars: top = os.path.abspath(os.path.expandvars(os.path.expanduser(top))) if excludes == None: excludes = [] filepaths = [] if os.path.exists(top): for fpath in os.listdir(top): abspath = os.path.abspath(os.path.join(top, fpath)) if os.path.isfile(abspath): if (include_hidden or not fpath.startswith('.')) \ and (not filename_filter or glob_match(fpath, filename_filter, respect_case, complement)): to_exclude = False for e in excludes: if glob_match(fpath, e, respect_case): to_exclude = True if not to_exclude: filepaths.append(abspath) elif os.path.isdir(abspath): if recursive: if (include_hidden or not fpath.startswith('.')) \ and (not dirname_filter or (glob_match(fpath, dirname_filter, respect_case, complement))): filepaths.extend(find_files(abspath, recursive=recursive, filename_filter=filename_filter, dirname_filter=dirname_filter, excludes=excludes, complement=complement, respect_case=respect_case, expand_vars=False)) filepaths.sort() return filepaths def sample_mean_var_ml(x): """Returns the sample mean and variance of x using the ML estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], 0 s = 0.0 ss = 0.0 for i in x: s += i ss += i*i mu = s/n var = (ss/n) - mu*mu return mu, var def sample_mean_var_unbiased(x): """Returns the sample mean and variance of x using the common unbiased estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], float('Inf') mean, v = sample_mean_var_ml(x) var = v*n/(n-1) return mean, var
update
identifier_name
utils.py
#! /usr/bin/env python ############################################################################ ## util.py ## ## Part of the DendroPy library for phylogenetic computing. ## ## Copyright 2008 Jeet Sukumaran and Mark T. Holder. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################ """ This module contains various utility functions and methods. """ import os import fnmatch class RecastingIterator(object): """ Given an iterator I_X that returns objects of type X {x1, x2, x3, ... etc.}, and a function F(X), that takes objects of type X as an argument and returns objects of type Y, F(X) = Y, this class will act as an iterator that returns objects of type Y, I_Y, given an iterator on X. The 'function' given can be a class if the class's constructor takes a single argument of type X. """ def __init__(self, source_iter, casting_func=None, filter_func=None): """ `source_iter` is an iterator. `casting_func` is a function that takes objects returned by `source_iter` and returns other objects. `filter_func` is what will be applied to the SOURCE object to decide if it will be returned. """ self.source_iter = iter(source_iter) self.casting_func = casting_func self.filter_func = filter_func def __iter__(self): "Returns self." return self def next(self): """ Gets next item from the underlying iterator, and if filter_func returns True on it, applies casting_func to it and returns it. """ while True: source_next = self.source_iter.next() if self.filter_func is None or self.filter_func(source_next): if self.casting_func is not None: return self.casting_func(source_next) else: return source_next class OrderedCaselessDict(dict): """ Inherits from dict. Maintains two sets of keys: the first the keys belonging to dict, which actually accesses the container items. This is always cast to lower() whenever it is called, thus ensuring that keys are always of the same case. The second set of keys it maintains locally in an list, thus maintaining the order in which they were added. The second set of keys is not cast to lower(), which means that client code can always recover the original 'canonical' casing of the keys. ONLY TAKES STRING KEYS! """ def __init__(self, other=None): """ Creates the local set of keys, and then initializes self with arguments, if any, by using the superclass methods, keeping the ordered keys in sync. """ super(OrderedCaselessDict, self).__init__() self.__ordered_keys = [] if other is not None: if isinstance(other, dict): for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) else: for key, val in other: if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) def copy(self): "Returns a shallow copy of self." return self.__class__(self) def iterkeys(self): "Returns an iterator over self's ordered keys." return iter(self.__ordered_keys) def itervalues(self): "Returns an iterator over self's key, value pairs." for key in self.iterkeys(): yield self[key.lower()] def iteritems(self): "Returns an iterator over self's values." for key in self.iterkeys(): yield (key, self[key.lower()]) def items(self): "Returns key, value pairs in key-order." return [(key, self[key]) for key in self.iterkeys()] def values(self): "Returns list of key, value pairs." return [v for v in self.itervalues()] def __iter__(self): "Returns an iterator over self's ordered keys." return self.iterkeys() def __repr__(self): "Returns a representation of self's ordered keys." return "%s([%s])" \ % (self.__class__.__name__, ', \ '.join(["(%r, %r)" % item for item in self.iteritems()])) def __str__(self): "Returns a string representation of self." return "{%s}" \ % (', '.join(["(%r, %r)" % item for item in self.iteritems()]),) def __getitem__(self, key): "Gets an item using a case-insensitive key." return super(OrderedCaselessDict, self).__getitem__(key.lower()) def __setitem__(self, key, value): "Sets an item using a case-insensitive key," if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), value) def __delitem__(self, key): "Remove item with specified key." del(self.__ordered_keys[self.index(key)]) super(OrderedCaselessDict, \ self).__delitem__(key.lower()) def __contains__(self, key): "Returns true if has key, regardless of case." return super(OrderedCaselessDict, self).__contains__(key.lower()) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" if key.lower() in self: val = self[key] self.__delitem__(key.lower()) return val else: return alt_val def popitem(self): "a.popitem() remove and last (key, value) pair" key = self.__ordered_keys[-1] item = (key, self[key.lower()]) self.__delitem__(key) return item def caseless_keys(self): "Returns a copy of the ordered list of keys." return [k.lower() for k in self.__ordered_keys] def index(self, key): """ Return the index of (caseless) key. Raise KeyError if not found. """ count = 0 for k in self.__ordered_keys: if k.lower() == key.lower(): return count count = count + 1 raise KeyError(key) def keys(self): "Returns a copy of the ordered list of keys." return list(self.__ordered_keys) def clear(self): "Deletes all items from the dictionary." self.__ordered_keys = [] super(OrderedCaselessDict, self).clear() def has_key(self, key): "Returns true if has key, regardless of case." return key.lower() in self def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." return super(OrderedCaselessDict, self).get(key.lower(), def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." return super(OrderedCaselessDict, self).setdefault(key.lower(), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." ocd = OrderedCaselessDict() for key in iterable: if key.lower() not in self: self[key] = value return ocd class NormalizedBitmaskDict(dict): """ Keys, {K_i}, are longs. `mask` must be provided before elements can be added removed from dictionary. All keys are normalized such that the right- most bit is '1'. That is, if the key's right-most bit is '1', it is added as-is, otherwise it is complemented by XOR'ing it with 'mask'. """ def normalize(key, mask): if key & 1: return (~key) & mask return key normalize = staticmethod(normalize) def __init__(self, other=None, mask=None): "Assigns mask, and then populates from `other`, if given." dict.__init__(self) self.mask = mask if other is not None: if isinstance(other, NormalizedBitmaskDict): self.mask = other.mask if isinstance(other, dict): for key, val in other.items(): self[key] = val def normalize_key(self, key): return NormalizedBitmaskDict.normalize(key, self.mask) def __setitem__(self, key, value): "Sets item with normalized key." dict.__setitem__(self, self.normalize_key(key), value) def __getitem__(self, key): "Gets an item by its key." key = self.normalize_key(key) return dict.__getitem__(self, key) def __delitem__(self, key): "Remove item with normalized key." key = self.normalize_key(key) dict.__delitem__(self, key) def __contains__(self, key): "Returns true if has normalized key." key = self.normalize_key(key) return dict.__contains__(self, key) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" key = self.normalize_key(key) return dict.pop(self, key) def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." key = self.normalize_key(key) return dict.get(self, key, def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." dict.setdefault(self, self.normalize_key(key), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): dict.__setitem__(self, self.normalize_key(key), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." raise NotImplementedError def pretty_print_timedelta(timedelta): hours, mins, secs = str(timedelta).split(":") return("%s hour(s), %s minute(s), %s second(s)" % (hours, mins, secs)) def glob_match(pathname, pattern, respect_case=False, complement=False): if respect_case: if fnmatch.fnmatchcase(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False else: pathname = pathname.lower() pattern = pattern.lower() if fnmatch.fnmatch(pathname, pattern): if complement:
else: return True else: if complement: return True else: return False def find_files(top, recursive=True, filename_filter=None, dirname_filter=None, excludes=None, complement=False, respect_case=False, expand_vars=True, include_hidden=True): if expand_vars: top = os.path.abspath(os.path.expandvars(os.path.expanduser(top))) if excludes == None: excludes = [] filepaths = [] if os.path.exists(top): for fpath in os.listdir(top): abspath = os.path.abspath(os.path.join(top, fpath)) if os.path.isfile(abspath): if (include_hidden or not fpath.startswith('.')) \ and (not filename_filter or glob_match(fpath, filename_filter, respect_case, complement)): to_exclude = False for e in excludes: if glob_match(fpath, e, respect_case): to_exclude = True if not to_exclude: filepaths.append(abspath) elif os.path.isdir(abspath): if recursive: if (include_hidden or not fpath.startswith('.')) \ and (not dirname_filter or (glob_match(fpath, dirname_filter, respect_case, complement))): filepaths.extend(find_files(abspath, recursive=recursive, filename_filter=filename_filter, dirname_filter=dirname_filter, excludes=excludes, complement=complement, respect_case=respect_case, expand_vars=False)) filepaths.sort() return filepaths def sample_mean_var_ml(x): """Returns the sample mean and variance of x using the ML estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], 0 s = 0.0 ss = 0.0 for i in x: s += i ss += i*i mu = s/n var = (ss/n) - mu*mu return mu, var def sample_mean_var_unbiased(x): """Returns the sample mean and variance of x using the common unbiased estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], float('Inf') mean, v = sample_mean_var_ml(x) var = v*n/(n-1) return mean, var
return False
conditional_block
utils.py
#! /usr/bin/env python ############################################################################ ## util.py ## ## Part of the DendroPy library for phylogenetic computing. ## ## Copyright 2008 Jeet Sukumaran and Mark T. Holder. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################ """ This module contains various utility functions and methods. """ import os import fnmatch class RecastingIterator(object): """ Given an iterator I_X that returns objects of type X {x1, x2, x3, ... etc.}, and a function F(X), that takes objects of type X as an argument and returns objects of type Y, F(X) = Y, this class will act as an iterator that returns objects of type Y, I_Y, given an iterator on X. The 'function' given can be a class if the class's constructor takes a single argument of type X. """ def __init__(self, source_iter, casting_func=None, filter_func=None): """ `source_iter` is an iterator. `casting_func` is a function that takes objects returned by `source_iter` and returns other objects. `filter_func` is what will be applied to the SOURCE object to decide if it will be returned. """ self.source_iter = iter(source_iter) self.casting_func = casting_func self.filter_func = filter_func def __iter__(self): "Returns self." return self def next(self): """ Gets next item from the underlying iterator, and if filter_func returns True on it, applies casting_func to it and returns it. """ while True: source_next = self.source_iter.next() if self.filter_func is None or self.filter_func(source_next): if self.casting_func is not None: return self.casting_func(source_next) else: return source_next class OrderedCaselessDict(dict): """ Inherits from dict. Maintains two sets of keys: the first the keys belonging to dict, which actually accesses the container items. This is always cast to lower() whenever it is called, thus ensuring that keys are always of the same case. The second set of keys it maintains locally in an list, thus maintaining the order in which they were added. The second set of keys is not cast to lower(), which means that client code can always recover the original 'canonical' casing of the keys. ONLY TAKES STRING KEYS! """ def __init__(self, other=None): """ Creates the local set of keys, and then initializes self with arguments, if any, by using the superclass methods, keeping the ordered keys in sync. """ super(OrderedCaselessDict, self).__init__() self.__ordered_keys = [] if other is not None: if isinstance(other, dict): for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) else: for key, val in other: if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, \ self).__setitem__(key.lower(), val) def copy(self): "Returns a shallow copy of self." return self.__class__(self) def iterkeys(self): "Returns an iterator over self's ordered keys." return iter(self.__ordered_keys) def itervalues(self): "Returns an iterator over self's key, value pairs." for key in self.iterkeys(): yield self[key.lower()] def iteritems(self): "Returns an iterator over self's values." for key in self.iterkeys(): yield (key, self[key.lower()]) def items(self): "Returns key, value pairs in key-order." return [(key, self[key]) for key in self.iterkeys()] def values(self): "Returns list of key, value pairs." return [v for v in self.itervalues()] def __iter__(self):
def __repr__(self): "Returns a representation of self's ordered keys." return "%s([%s])" \ % (self.__class__.__name__, ', \ '.join(["(%r, %r)" % item for item in self.iteritems()])) def __str__(self): "Returns a string representation of self." return "{%s}" \ % (', '.join(["(%r, %r)" % item for item in self.iteritems()]),) def __getitem__(self, key): "Gets an item using a case-insensitive key." return super(OrderedCaselessDict, self).__getitem__(key.lower()) def __setitem__(self, key, value): "Sets an item using a case-insensitive key," if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), value) def __delitem__(self, key): "Remove item with specified key." del(self.__ordered_keys[self.index(key)]) super(OrderedCaselessDict, \ self).__delitem__(key.lower()) def __contains__(self, key): "Returns true if has key, regardless of case." return super(OrderedCaselessDict, self).__contains__(key.lower()) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" if key.lower() in self: val = self[key] self.__delitem__(key.lower()) return val else: return alt_val def popitem(self): "a.popitem() remove and last (key, value) pair" key = self.__ordered_keys[-1] item = (key, self[key.lower()]) self.__delitem__(key) return item def caseless_keys(self): "Returns a copy of the ordered list of keys." return [k.lower() for k in self.__ordered_keys] def index(self, key): """ Return the index of (caseless) key. Raise KeyError if not found. """ count = 0 for k in self.__ordered_keys: if k.lower() == key.lower(): return count count = count + 1 raise KeyError(key) def keys(self): "Returns a copy of the ordered list of keys." return list(self.__ordered_keys) def clear(self): "Deletes all items from the dictionary." self.__ordered_keys = [] super(OrderedCaselessDict, self).clear() def has_key(self, key): "Returns true if has key, regardless of case." return key.lower() in self def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." return super(OrderedCaselessDict, self).get(key.lower(), def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." return super(OrderedCaselessDict, self).setdefault(key.lower(), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): if key.lower() not in self: self.__ordered_keys.append(str(key)) super(OrderedCaselessDict, self).__setitem__(key.lower(), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." ocd = OrderedCaselessDict() for key in iterable: if key.lower() not in self: self[key] = value return ocd class NormalizedBitmaskDict(dict): """ Keys, {K_i}, are longs. `mask` must be provided before elements can be added removed from dictionary. All keys are normalized such that the right- most bit is '1'. That is, if the key's right-most bit is '1', it is added as-is, otherwise it is complemented by XOR'ing it with 'mask'. """ def normalize(key, mask): if key & 1: return (~key) & mask return key normalize = staticmethod(normalize) def __init__(self, other=None, mask=None): "Assigns mask, and then populates from `other`, if given." dict.__init__(self) self.mask = mask if other is not None: if isinstance(other, NormalizedBitmaskDict): self.mask = other.mask if isinstance(other, dict): for key, val in other.items(): self[key] = val def normalize_key(self, key): return NormalizedBitmaskDict.normalize(key, self.mask) def __setitem__(self, key, value): "Sets item with normalized key." dict.__setitem__(self, self.normalize_key(key), value) def __getitem__(self, key): "Gets an item by its key." key = self.normalize_key(key) return dict.__getitem__(self, key) def __delitem__(self, key): "Remove item with normalized key." key = self.normalize_key(key) dict.__delitem__(self, key) def __contains__(self, key): "Returns true if has normalized key." key = self.normalize_key(key) return dict.__contains__(self, key) def pop(self, key, alt_val=None): "a.pop(k[, x]): a[k] if k in a, else x (and remove k)" key = self.normalize_key(key) return dict.pop(self, key) def get(self, key, def_val=None): "Gets an item by its key, returning default if key not present." key = self.normalize_key(key) return dict.get(self, key, def_val) def setdefault(self, key, def_val=None): "Sets the default value to return if key not present." dict.setdefault(self, self.normalize_key(key), def_val) def update(self, other): """ updates (and overwrites) key/value pairs: k = { 'a':'A', 'b':'B', 'c':'C'} q = { 'c':'C', 'd':'D', 'f':'F'} k.update(q) {'a': 'A', 'c': 'C', 'b': 'B', 'd': 'D', 'f': 'F'} """ for key, val in other.items(): dict.__setitem__(self, self.normalize_key(key), val) def fromkeys(self, iterable, value=None): "Creates a new dictionary with keys from seq and values set to value." raise NotImplementedError def pretty_print_timedelta(timedelta): hours, mins, secs = str(timedelta).split(":") return("%s hour(s), %s minute(s), %s second(s)" % (hours, mins, secs)) def glob_match(pathname, pattern, respect_case=False, complement=False): if respect_case: if fnmatch.fnmatchcase(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False else: pathname = pathname.lower() pattern = pattern.lower() if fnmatch.fnmatch(pathname, pattern): if complement: return False else: return True else: if complement: return True else: return False def find_files(top, recursive=True, filename_filter=None, dirname_filter=None, excludes=None, complement=False, respect_case=False, expand_vars=True, include_hidden=True): if expand_vars: top = os.path.abspath(os.path.expandvars(os.path.expanduser(top))) if excludes == None: excludes = [] filepaths = [] if os.path.exists(top): for fpath in os.listdir(top): abspath = os.path.abspath(os.path.join(top, fpath)) if os.path.isfile(abspath): if (include_hidden or not fpath.startswith('.')) \ and (not filename_filter or glob_match(fpath, filename_filter, respect_case, complement)): to_exclude = False for e in excludes: if glob_match(fpath, e, respect_case): to_exclude = True if not to_exclude: filepaths.append(abspath) elif os.path.isdir(abspath): if recursive: if (include_hidden or not fpath.startswith('.')) \ and (not dirname_filter or (glob_match(fpath, dirname_filter, respect_case, complement))): filepaths.extend(find_files(abspath, recursive=recursive, filename_filter=filename_filter, dirname_filter=dirname_filter, excludes=excludes, complement=complement, respect_case=respect_case, expand_vars=False)) filepaths.sort() return filepaths def sample_mean_var_ml(x): """Returns the sample mean and variance of x using the ML estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], 0 s = 0.0 ss = 0.0 for i in x: s += i ss += i*i mu = s/n var = (ss/n) - mu*mu return mu, var def sample_mean_var_unbiased(x): """Returns the sample mean and variance of x using the common unbiased estimator of the sample variance. """ n = len(x) assert(n > 0) if n == 1: return x[0], float('Inf') mean, v = sample_mean_var_ml(x) var = v*n/(n-1) return mean, var
"Returns an iterator over self's ordered keys." return self.iterkeys()
identifier_body
index.js
/** * index scirpt for Zhangshuo Guoranhao */ define('index', ['common'], function(require, exports, module){ var common = require('common'); // 首页 命名空间 var pub = {}; pub.openId = common.openId.getItem(); pub.accountRelatived = common.accountRelatived.getItem(); pub.dfd = $.Deferred();//主要用于商品接口的延时对象 pub.dfdDefault = $.Deferred();//主要用与选择门店弹框的延时对象 pub.accountRelative = $.Deferred();//主要用户账户关联的延时对象 pub.storeInfo = null; // 门店信息 pub.firmId = undefined; // 门店ID pub.paramListInit = function(){ pub.logined = common.isLogin(); // 已经登录 pub.logined && common.autoLogin(); // 执行自动登录 }; // 接口处理命名空间 pub.apiHandle = {}; pub.apiHandle.init = function(){ var me = this; //先判断是否为扫码进入-判断依据为链接上的参数 if (common.watmId.getItem()) { //将用户的默认门店改为当前门店,用户无感知 pub.firmId = common.watmId.getItem(); me.firm_default.init(); }else{ //使用用户的默认门店 if( common.storeInfo.getKey() ){ pub.storeInfo = common.getStoreInfo(); pub.firmId = pub.storeInfo.id; // 取存储的ID common.temp1.getKey() ? me.firm_default.apiData( pub.storeInfo, true ) : me.firm_default.init(); }else{ pub.dfdDefault.done(function(){ common.setMyTimeout(function(){ common.dialog.init().show('请选择门店',function(){ },function(){ common.jumpLinkPlain('html/store.html'); }); },300); }); me.firm_default.init(); } } if( common.timestamp.getKey() ){ var json = common.JSONparse( common.timestamp.getItem('timestamp') ); if( json.timestamp > Date.now() ){ me.main_page_goods.apiData( json.con ); }else{ common.timestamp.removeItem(); pub.dfd.done(function(){ me.main_page_goods.init(); }); } }else{ pub.dfd.done(function(){ me.main_page_goods.init(); }); } }; // 默认门店 pub.apiHandle.firm_default = { init : function(){ common.ajaxPost({ method : 'firm_default', firmId : pub.firmId },function(d){ switch( +d.statusCode ){ case 100000 : pub.apiHandle.firm_default.apiData( d.data ); break; case 100400 : common.clearData(); break; case 100901 : common.dialog.init({ btns : [{ klass : 'dialog-confirm-btn', txt : '确认'}] }).show('门店信息不存在,请重新选择门店', function(){common.jumpLinkPlain('html/store.html')}); break; } }); }, apiData : function( data, symbol ){ common.temp1.setItem('temp1'); var d = data, pNode = $('.index_tit p'), spanNode = pNode.eq(0).find("span"); spanNode.eq(0).html( d.cityName ).next().html( d.countyName ); pNode.eq(1).html( d.firmName ); if( symbol != true ){ // 存储不存在 pub.storeInfo = data; pub.firmId = d.id; common.storeInfo.setItem( common.JSONStr( data ) ); } data.type == 5 && $('.index_center_wrap > dl').css('opacity',0.5); if(!pub.watmId){ pub.dfdDefault.resolve(); } pub.dfd.resolve(); pub.apiHandle.custom_activity_firm_list.init(); } }; pub.apiHandle.custom_activity_firm_list = { init : function(){ common.ajaxPost({ method : 'custom_activity_firm_list', firmId : pub.firmId },function( d ){ if( d.statusCode == "100000" && d.data.length != 0 ){ $('#shop-active-data-box').html( $('#shop-active-data').tmpl( d.data ) ); var swiper = new Swiper('.shop-active-container', { slidesPerView: 4, spaceBetween: 30, onTap: function( swiper,e ){ common.first_data.removeItem(); common.two_data.removeItem(); common.seckill.removeItem(); common.jumpLinkPlain( swiper.clickedSlide.getAttribute('h5Url') ); } }); }else{ $('.shop-active-container').css('padding','6px'); } }); } }; // 首页商品列表 pub.apiHandle.main_page_goods = { init : function(){ var me = this; common.ajaxPost({ method : 'main_page_goods', firmId : pub.firmId, websiteNode : pub.storeInfo.websiteNode },function( d ){ switch( +d.statusCode ){ case 100000 : me.apiData( d ); break; case 100400 : common.clearData(); break; case 100901 : common.dialog.init().show('当前门店信息不存在,请重新选择门店',function(){},function(){common.jumpLinkPlain('html/store.html')}); break; } }); }, apiDataDeal : function( data ){ var html = '', goodsInfo = ''; for(var i in data) { goodsInfo = data[i].goodsInfo; var arr = []; goodsInfo.isHot == 1 && arr.push('isHot'); goodsInfo.isNew == 1 && arr.push('isNew'); goodsInfo.isRecommend == 1 && arr.push('isRecommend'); goodsInfo.isSale == 1 && arr.push('isSale'); html += '<dl data="' + goodsInfo.id + '" goods-box="goods-box">' html += ' <dt>' html += ' <img ' + ( i < 6 ? 'src' : 'data-src' ) + '=' + goodsInfo.goodsLogo + ' class="fadeIn"/>' html += '<div class="box">' for(var k = 0; k < arr.length; k++){ html += '<span class="goodSatce ' + arr[k] + '"></span>' } html += '</div>' html += ' </dt>' html += ' <dd>' html += ' <p>' + goodsInfo.goodsName + '</p>' html += ' <p class="clearfloat">' html += ' <span class="float_left">'+ goodsInfo.specInfo + '</span>' html += ' <span class="float_right">¥'+ goodsInfo.nowPrice + '</span>' html += ' </p>' html += ' </dd>' html += '</dl>' } $(".index_inner").html( html ); }, apiData : function( d ){ var json = {}, me = this, data = d.data; if( !common.timestamp.getKey() && data.adInfoList.length != 0 && data.mainPageGoodsDetails.length != 0 ){ json.timestamp = Date.now() + 3 * 60 * 1000; json.con = d; common.timestamp.setItem( common.JSONStr( json ) ); } data.adInfoList.length != 0 && common.bannerShow( data.adInfoList, '.index_banner', function( d ){ var html = '', i = 0, link = null; for ( i in d ){ html += '<div class="swiper-slide"><a href="' + ( d[i].linkU
dex_inner").empty(); data.mainPageGoodsDetails.length != 0 && me.apiDataDeal( data.mainPageGoodsDetails ); common.isWeiXin() && (function(){ common.weixin.config( location.href.split('#')[0] ); common.weixin.share( d.data.customShare ); }()); } }; pub.get_weixin_code = function(){ common.ajaxPost({ method: 'get_weixin_code', weixinCode : pub.weixinCode },function( d ){ if( d.statusCode == '100000' && d.data.fromWX == 1 ){ pub.openId = d.data.openId; //pub.openId = 'oLC2v0vzHkVYsoMKzSec5w78rfSs' common.openId.setItem( pub.openId ); // 存opendId //pub.accountRelative.resolve(); // 触发账户关联接口 //获取openId后台判断如果注册的情况下就返回用户信息 没有注册的情况下后台默认注册 pub.apiHandle.scan_qrcode_login.init() }else{ common.prompt( d.statusStr ); } }); }; //微信自动登录 pub.apiHandle.scan_qrcode_login = { init:function(){ common.ajaxPost({ method: 'scan_qrcode_login', openId : pub.openId },function( d ){ if( d.statusCode == '100000'){ pub.apiHandle.scan_qrcode_login.apiData(d); }else{ common.prompt( d.statusStr ); } }); }, apiData:function(d){ var infor = d.data.cuserInfo, user_data = { cuserInfoid : infor.id, firmId : infor.firmId, faceImg : infor.faceImg, petName : infor.petName, realName : infor.realName, idCard : infor.idcard, mobile : infor.mobile, sex : infor.sex }; common.user_data.setItem( common.JSONStr(user_data) ); localStorage.setItem('tokenId',d.data.tokenId) common.secretKey.setItem( d.data.secretKey ); //登陆完成后再进行门店初始化 pub.apiHandle.init(); }, }; //换肤 pub.apiHandle.change_app_theme = { init:function(){ if (common.huanfu.getItem() && common.huanfu.getItem() != 1) { $(".index_header,.index_inner,.footer").addClass("skin"+sessionStorage.getItem("huanfu")) } } } // 事件处理初始化 pub.eventHandle = { init : function(){ //底部跳转问题 common.footerNav(function( i ){ if( i == 1 ){ common.first_data.removeItem(); common.two_data.removeItem(); } common.jumpLinkPlain( ['../index.html','./html/moregoods.html','./html/cart.html','./html/my.html'][i] ); }); //点击跳转详情页面 $('.index_inner').on('click', "dl[goods-box]", function() { common.jumpLinkPlain( "html/goodsDetails.html?goodsId=" + $(this).attr("data") ); }); // $(".index_center_wrap").on('click', "dl", function() { // if( pub.storeInfo.type != 5 ){ // common.first_data.removeItem(); // common.two_data.removeItem(); // var i = $(this).attr("data"); // var pathNames = ["html/moregoods.html?type=TAO_CAN","html/moregoods.html?type=JU_HUI","html/seckill.html","html/pre.html"]; // i == "3" && common.seckill.setItem('1'); // common.jumpLinkPlain( pathNames[ i - 1 ] ); // } // }); $(".index_center_wrap").on('click', ".index_center_center", function() { common.jumpLinkPlain("html/month_service.html"); }); common.jumpLinkSpecial( '.index_rigth','html/search.html' ); //跳转搜索页面 common.jumpLinkSpecial( '.index_tit','html/store.html' ); // 跳转到门店 // $('#shop-active-data-box').on('li div','click',function(){ // console.log( $(this).attr('h5Url')); // common.jumpLinkPlain( $(this).attr('h5Url') ); // }); } }; // 模块初始化 pub.init = function(){ //pub.paramListInit(); // 参数初始化 pub.logined = common.isLogin(); // 判断是否登录 pub.isWeiXin = common.isWeiXin(); pub.watmId = common.getUrlParam("firmId");//获取机器编码id if (pub.watmId && pub.watmId != 'grhao') { common.watmId.setItem(pub.watmId); } //pub.domain = 'http://weixin.grhao.com'; pub.domain = common.WXDOMAIN; pub.appid = common.WXAPPID; if (pub.isWeiXin) { pub.weixinCode = common.getUrlParam('code'); // 获取url参数 pub.state = common.getUrlParam("state");//获取url机器编码参数 if (pub.logined) { common.autoLogin(); // 执行自动登录 /*if (common.watmId.getItem()) { //将用户的默认门店改为当前门店,用户无感知 }else{ //使用用户的默认门店 }*/ pub.apiHandle.init(); // 模块初始化接口数据处理 }else{ var state = 'state='+(pub.watmId ? pub.watmId : 'grhao'); //测试掌烁科技wxe92e098badc60fab正式果然好商城wxf9dd00234aa6e921 /*!pub.openId ? (function(){ pub.weixinCode ? pub.get_weixin_code() : common.jumpLinkPlain("https://open.weixin.qq.com/connect/oauth2/authorize?appid="+pub.appid+"&redirect_uri=" + pub.domain + "/index.html&response_type=code&scope=snsapi_userinfo&"+ state+ "&connect_redirect=1#wechat_redirect"); }()) : (function(){ //!pub.accountRelatived && pub.logined && pub.accountRelative.resolve(); // 关联 //使用openId去进行登录操作 pub.apiHandle.scan_qrcode_login.init() }());*/ pub.weixinCode ? pub.get_weixin_code() : common.jumpLinkPlain("https://open.weixin.qq.com/connect/oauth2/authorize?appid="+pub.appid+"&redirect_uri=" + pub.domain + "/index.html&response_type=code&scope=snsapi_userinfo&"+ state+ "&connect_redirect=1#wechat_redirect"); } }else{ if (pub.logined) { common.autoLogin(); // 执行自动登录 pub.apiHandle.init(); // 模块初始化接口数据处理 }else{ //存储watmId;用于账号登录之后的门店。 pub.apiHandle.init(); } } /* 用户退出账号后,不能再使用 openId 登录,意味着只要使用 tokenId 登录即可 */ // 账户绑定逻辑处理 /* pub.isWeiXin && (function(){ !pub.openId ? (function(){ pub.weixinCode ? pub.get_weixin_code() : common.jumpLinkPlain("https://open.weixin.qq.com/connect/oauth2/authorize?appid=wxf9dd00234aa6e921&redirect_uri=" + pub.domain + "/test0319/&response_type=code&scope=snsapi_userinfo&"+ state+ "&connect_redirect=1#wechat_redirect"); }()) : (function(){ !pub.accountRelatived && pub.logined && pub.accountRelative.resolve(); // 关联 }()); }()); pub.accountRelative.done( pub.apiHandle.weixin_binding.init ); // 账户绑定接口 */ // 微信授权处理 // !pub.openId && common.isWeiXin() && pub.weixinCode && pub.get_weixin_code(); common.lazyload(); // 懒加载 pub.eventHandle.init(); // 模块初始化事件处理 $('.footer_item[data-content]','#foot').attr('data-content', common.getTotal() ); if (!common.huanfu.getKey()) { common.change_app_theme(); common.defHuanfu.done(function(){ pub.apiHandle.change_app_theme.init(); }) }else{ pub.apiHandle.change_app_theme.init(); } }; common.refresh(); // require.async('https://hm.baidu.com/hm.js?2a10c871d8aa53992101d3d66a7812ae'); // 百度统计 module.exports = pub; });
rl ? d[i].linkUrl : 'javascript:void(0)' ) + '"><img src="' + d[i].adLogo + '" /></a></div>' } return html; }); $(".in
conditional_block
index.js
/** * index scirpt for Zhangshuo Guoranhao */ define('index', ['common'], function(require, exports, module){ var common = require('common'); // 首页 命名空间 var pub = {}; pub.openId = common.openId.getItem(); pub.accountRelatived = common.accountRelatived.getItem(); pub.dfd = $.Deferred();//主要用于商品接口的延时对象 pub.dfdDefault = $.Deferred();//主要用与选择门店弹框的延时对象 pub.accountRelative = $.Deferred();//主要用户账户关联的延时对象 pub.storeInfo = null; // 门店信息 pub.firmId = undefined; // 门店ID pub.paramListInit = function(){ pub.logined = common.isLogin(); // 已经登录 pub.logined && common.autoLogin(); // 执行自动登录 }; // 接口处理命名空间 pub.apiHandle = {}; pub.apiHandle.init = function(){ var me = this; //先判断是否为扫码进入-判断依据为链接上的参数 if (common.watmId.getItem()) { //将用户的默认门店改为当前门店,用户无感知 pub.firmId = common.watmId.getItem(); me.firm_default.init(); }else{ //使用用户的默认门店 if( common.storeInfo.getKey() ){ pub.storeInfo = common.getStoreInfo(); pub.firmId = pub.storeInfo.id; // 取存储的ID common.temp1.getKey() ? me.firm_default.apiData( pub.storeInfo, true ) : me.firm_default.init(); }else{ pub.dfdDefault.done(function(){ common.setMyTimeout(function(){ common.dialog.init().show('请选择门店',function(){ },function(){ common.jumpLinkPlain('html/store.html'); }); },300); }); me.firm_default.init(); } } if( common.timestamp.getKey() ){ var json = common.JSONparse( common.timestamp.getItem('timestamp') ); if( json.timestamp > Date.now() ){ me.main_page_goods.apiData( json.con ); }else{ common.timestamp.removeItem(); pub.dfd.done(function(){ me.main_page_goods.init(); }); } }else{ pub.dfd.done(function(){ me.main_page_goods.init(); }); } }; // 默认门店 pub.apiHandle.firm_default = { init : function(){ common.ajaxPost({ method : 'firm_default', firmId : pub.firmId },function(d){ switch( +d.statusCode ){ case 100000 : pub.apiHandle.firm_default.apiData( d.data ); break; case 100400 : common.clearData(); break; case 100901 : common.dialog.init({ btns : [{ klass : 'dialog-confirm-btn', txt : '确认'}] }).show('门店信息不存在,请重新选择门店', function(){common.jumpLinkPlain('html/store.html')}); break; } }); }, apiData : function( data, symbol ){ common.temp1.setItem('temp1'); var d = data, pNode = $('.index_tit p'), spanNode = pNode.eq(0).find("span"); spanNode.eq(0).html( d.cityName ).next().html( d.countyName ); pNode.eq(1).html( d.firmName ); if( symbol != true ){ // 存储不存在 pub.storeInfo = data; pub.firmId = d.id; common.storeInfo.setItem( common.JSONStr( data ) ); } data.type == 5 && $('.index_center_wrap > dl').css('opacity',0.5); if(!pub.watmId){ pub.dfdDefault.resolve(); } pub.dfd.resolve(); pub.apiHandle.custom_activity_firm_list.init(); } }; pub.apiHandle.custom_activity_firm_list = { init : function(){ common.ajaxPost({ method : 'custom_activity_firm_list', firmId : pub.firmId },function( d ){ if( d.statusCode == "100000" && d.data.length != 0 ){ $('#shop-active-data-box').html( $('#shop-active-data').tmpl( d.data ) ); var swiper = new Swiper('.shop-active-container', { slidesPerView: 4, spaceBetween: 30, onTap: function( swiper,e ){ common.first_data.removeItem(); common.two_data.removeItem(); common.seckill.removeItem(); common.jumpLinkPlain( swiper.clickedSlide.getAttribute('h5Url') ); } }); }else{ $('.shop-active-container').css('padding','6px'); } }); } }; // 首页商品列表 pub.apiHandle.main_page_goods = { init : function(){ var me = this; common.ajaxPost({ method : 'main_page_goods', firmId : pub.firmId, websiteNode : pub.storeInfo.websiteNode },function( d ){ switch( +d.statusCode ){ case 100000 : me.apiData( d ); break; case 100400 : common.clearData(); break; case 100901 : common.dialog.init().show('当前门店信息不存在,请重新选择门店',function(){},function(){common.jumpLinkPlain('html/store.html')}); break; } }); }, apiDataDeal : function( data ){ var html = '', goodsInfo = ''; for(var i in data) { goodsInfo = data[i].goodsInfo; var arr = []; goodsInfo.isHot == 1 && arr.push('isHot'); goodsInfo.isNew == 1 && arr.push('isNew'); goodsInfo.isRecommend == 1 && arr.push('isRecommend'); goodsInfo.isSale == 1 && arr.push('isSale'); html += '<dl data="' + goodsInfo.id + '" goods-box="goods-box">' html += ' <dt>' html += ' <img ' + ( i < 6 ? 'src' : 'data-src' ) + '=' + goodsInfo.goodsLogo + ' class="fadeIn"/>' html += '<div class="box">' for(var k = 0; k < arr.length; k++){ html += '<span class="goodSatce ' + arr[k] + '"></span>' } html += '</div>' html += ' </dt>' html += ' <dd>' html += ' <p>' + goodsInfo.goodsName + '</p>' html += ' <p class="clearfloat">' html += ' <span class="float_left">'+ goodsInfo.specInfo + '</span>' html += ' <span class="float_right">¥'+ goodsInfo.nowPrice + '</span>' html += ' </p>' html += ' </dd>' html += '</dl>' } $(".index_inner").html( html ); }, apiData : function( d ){ var json = {}, me = this, data = d.data; if( !common.timestamp.getKey() && data.adInfoList.length != 0 && data.mainPageGoodsDetails.length != 0 ){ json.timestamp = Date.now() + 3 * 60 * 1000; json.con = d; common.timestamp.setItem( common.JSONStr( json ) ); } data.adInfoList.length != 0 && common.bannerShow( data.adInfoList, '.index_banner', function( d ){ var html = '', i = 0, link = null; for ( i in d ){ html += '<div class="swiper-slide"><a href="' + ( d[i].linkUrl ? d[i].linkUrl : 'javascript:void(0)' ) + '"><img src="' + d[i].adLogo + '" /></a></div>' } return html; }); $(".index_inner").empty(); data.mainPageGoodsDetails.length != 0 && me.apiDataDeal( data.mainPageGoodsDetails ); common.isWeiXin() && (function(){ common.weixin.config( location.href.split('#')[0] ); common.weixin.share( d.data.customShare ); }()); } }; pub.get_weixin_code = function(){ common.ajaxPost({ method: 'get_weixin_code', weixinCode : pub.weixinCode },function( d ){ if( d.statusCode == '100000' && d.data.fromWX == 1 ){ pub.openId = d.data.openId; //pub.openId = 'oLC2v0vzHkVYsoMKzSec5w78rfSs' common.openId.setItem( pub.openId ); // 存opendId //pub.accountRelative.resolve(); // 触发账户关联接口 //获取openId后台判断如果注册的情况下就返回用户信息 没有注册的情况下后台默认注册 pub.apiHandle.scan_qrcode_login.init() }else{ common.prompt( d.statusStr ); } }); }; //微信自动登录 pub.apiHandle.scan_qrcode_login = { init:function(){ common.ajaxPost({ method: 'scan_qrcode_login', openId : pub.openId },function( d ){ if( d.statusCode == '100000'){ pub.apiHandle.scan_qrcode_login.apiData(d); }else{ common.prompt( d.statusStr ); } }); }, apiData:function(d){ var infor = d.data.cuserInfo, user_data = { cuserInfoid : infor.id, firmId : infor.firmId, faceImg : infor.faceImg, petName : infor.petName, realName : infor.realName, idCard : infor.idcard, mobile : infor.mobile, sex : infor.sex }; common.user_data.setItem( common.JSONStr(user_data) ); localStorage.setItem('tokenId',d.data.tokenId) common.secretKey.setItem( d.data.secretKey ); //登陆完成后再进行门店初始化 pub.apiHandle.init(); }, }; //换肤 pub.apiHandle.change_app_theme = { init:function(){ if (common.huanfu.getItem() && common.huanfu.getItem() != 1) { $(".index_header,.index_inner,.footer").addClass("skin"+sessionStorage.getItem("huanfu")) } } } // 事件处理初始化 pub.eventHandle = { init : function(){ //底部跳转问题 common.footerNav(function( i ){ if( i == 1 ){ common.first_data.removeItem(); common.two_data.removeItem(); } common.jumpLinkPlain( ['../index.html','./html/moregoods.html','./html/cart.html','./html/my.html'][i] ); }); //点击跳转详情页面 $('.index_inner').on('click', "dl[goods-box]", function() { common.jumpLinkPlain( "html/goodsDetails.html?goodsId=" + $(this).attr("data") ); }); // $(".index_center_wrap").on('click', "dl", function() { // if( pub.storeInfo.type != 5 ){ // common.first_data.removeItem(); // common.two_data.removeItem(); // var i = $(this).attr("data"); // var pathNames = ["html/moregoods.html?type=TAO_CAN","html/moregoods.html?type=JU_HUI","html/seckill.html","html/pre.html"]; // i == "3" && common.seckill.setItem('1'); // common.jumpLinkPlain( pathNames[ i - 1 ] ); // } // }); $(".index_center_wrap").on('click', ".index_center_center", function() { common.jumpLinkPlain("html/month_service.html"); }); common.jumpLinkSpecial( '.index_rigth','html/search.html' ); //跳转搜索页面 common.jumpLinkSpecial( '.index_tit','html/store.html' ); // 跳转到门店 // $('#shop-active-data-box').on('li div','click',function(){ // console.log( $(this).attr('h5Url')); // common.jumpLinkPlain( $(this).attr('h5Url') ); // }); } }; // 模块初始化 pub.init = function(){ //pub.paramListInit(); // 参数初始化 pub.logined = common.isLogin(); // 判断是否登录 pub.isWeiXin = common.isWeiXin(); pub.watmId = common.getUrlParam("firmId");//获取机器编码id if (pub.watmId && pub.watmId != 'grhao') { common.watmId.setItem(pub.watmId); } //pub.domain = 'http://weixin.grhao.com'; pub.domain = common.WXDOMAIN; pub.appid = common.WXAPPID; if (pub.isWeiXin) { pub.weixinCode = common.getUrlParam('code'); // 获取url参数 pub.state = common.getUrlParam("state");//获取url机器编码参数 if (pub.logined) { common.autoLogin(); // 执行自动登录 /*if (common.watmId.getItem()) { //将用户的默认门店改为当前门店,用户无感知 }else{ //使用用户的默认门店 }*/ pub.apiHandle.init(); // 模块初始化接口数据处理 }else{ var state = 'state='+(pub.watmId ? pub.watmId : 'grhao'); //测试掌烁科技wxe92e098badc60fab正式果然好商城wxf9dd00234aa6e921 /*!pub.openId ? (function(){ pub.weixinCode ? pub.get_weixin_code() : common.jumpLinkPlain("https://open.weixin.qq.com/connect/oauth2/authorize?appid="+pub.appid+"&redirect_uri=" + pub.domain + "/index.html&response_type=code&scope=snsapi_userinfo&"+ state+ "&connect_redirect=1#wechat_redirect"); }()) : (function(){ //!pub.accountRelatived && pub.logined && pub.accountRelative.resolve(); // 关联 //使用openId去进行登录操作 pub.apiHandle.scan_qrcode_login.init() }());*/ pub.weixinCode ? pub.get_weixin_code() : common.jumpLinkPlain("https://open.weixin.qq.com/connect/oauth2/authorize?appid="+pub.appid+"&redirect_uri=" + pub.domain + "/index.html&response_type=code&scope=snsapi_userinfo&"+ state+ "&connect_redirect=1#wechat_redirect"); } }else{ if (pub.logined) { common.autoLogin(); // 执行自动登录 pub.apiHandle.init(); // 模块初始化接口数据处理 }else{ //存储watmId;用于账号登录之后的门店。 pub.apiHandle.init(); } } /* 用户退出账号后,不能再使用 openId 登录,意味着只要使用 tokenId 登录即可 */ // 账户绑定逻辑处理 /* pub.isWeiXin && (function(){ !pub.openId ? (function(){ pub.weixinCode ? pub.get_weixin_code() : common.jumpLinkPlain("https://open.weixin.qq.com/connect/oauth2/authorize?appid=wxf9dd00234aa6e921&redirect_uri=" + pub.domain + "/test0319/&response_type=code&scope=snsapi_userinfo&"+ state+ "&connect_redirect=1#wechat_redirect"); }()) : (function(){
pub.accountRelative.done( pub.apiHandle.weixin_binding.init ); // 账户绑定接口 */ // 微信授权处理 // !pub.openId && common.isWeiXin() && pub.weixinCode && pub.get_weixin_code(); common.lazyload(); // 懒加载 pub.eventHandle.init(); // 模块初始化事件处理 $('.footer_item[data-content]','#foot').attr('data-content', common.getTotal() ); if (!common.huanfu.getKey()) { common.change_app_theme(); common.defHuanfu.done(function(){ pub.apiHandle.change_app_theme.init(); }) }else{ pub.apiHandle.change_app_theme.init(); } }; common.refresh(); // require.async('https://hm.baidu.com/hm.js?2a10c871d8aa53992101d3d66a7812ae'); // 百度统计 module.exports = pub; });
!pub.accountRelatived && pub.logined && pub.accountRelative.resolve(); // 关联 }()); }());
random_line_split
main.rs
extern crate regex; pub mod dict; pub mod dictionary; pub mod idx; pub mod ifo; pub mod reformat; pub mod result; pub mod syn; //pub mod web; use self::regex::bytes::Regex; use std::cmp::Ordering; use std::io::prelude::*; use std::iter::Iterator; use std::mem; use std::net::TcpListener; use std::net::TcpStream; use std::{env, fs, path, str}; //use self::regex::Error; /// StarDict contains all dictionary found within the specified file system directory. pub struct StarDict { directories: Vec<dictionary::Dictionary>, } /// An iterator that merges several underlying iterators. try to dedup one duplicated /// word from each iterator. pub struct WordMergeIter<T: Iterator<Item = Vec<u8>>> { wordit: Vec<T>, cur: Vec<Option<Vec<u8>>>, } impl<'a, T: Iterator<Item = Vec<u8>>> Iterator for WordMergeIter<T> { type Item = Vec<u8>; fn next(&mut self) -> Option<Self::Item> { let l = self.cur.len(); if l == 0 { return None; } let mut x = 0usize; let mut i = 1usize; while i < l { x = match (&self.cur[x], &self.cur[i]) { (None, _) => i, (_, None) => x, (Some(a), Some(b)) => match idx::Idx::dict_cmp(&a, &b, false) { Ordering::Greater => i, Ordering::Equal => { self.cur[i] = self.wordit[i].next(); x } _ => x, }, }; i += 1; } mem::replace(&mut self.cur[x], self.wordit[x].next()) } } impl StarDict { /// Create a StarDict struct from a system path. in the path, /// there should be some directories. each directory contains /// the dict files, like .ifo, .idx, .dict, etc. /// The dictionary will be sorted by its directory name. pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> { let mut sort_dirs = Vec::new(); let mut items = Vec::new(); if root.is_dir() { for it in fs::read_dir(root)? { //println!("push direc: {:?}", it); let it = it?.path(); if it.is_dir() { sort_dirs.push(it.into_boxed_path()); } } } sort_dirs.sort(); for it in sort_dirs.iter() { match dictionary::Dictionary::new(&**it, root) { Ok(d) => { items.push(d); } Err(e) => { eprintln!("ignore reason: {:?}", e); } } } Ok(StarDict { directories: items }) } /// Get the Ifo struct, which is parsed from the .ifo file. pub fn info(&self) -> Vec<&ifo::Ifo> { let mut items = Vec::with_capacity(self.directories.len()); for it in &self.directories { items.push(&it.ifo); } items } /// List the following neighbor words of `word`, from `off`. /// If `off` is a negative number, list from before `-off`. pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { let mut x = d.neighbors(word, off); let mut s = d.neighbors_syn(word, off); cur.push(x.next()); cur.push(s.next()); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Search from all dictionaries. using the specified regular expression. /// to match the beginning of a word, use `^`, the ending of a word, use `$`. pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { //println!("in for {}", d.ifo.name.as_str()); let mut x = d.search_regex(reg); let mut s = d.search_syn(reg); //println!("created inner iter"); cur.push(x.next()); cur.push(s.next()); //println!("created 1st value"); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Lookup the word. Find in the Idx case-sensitively, if not found then try to do /// case-insensitive search. Also find all case-insensitive matching words in Syn. pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> { let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len()); for d in self.directories.iter() { if let Ok(x) = d.lookup(word) { ret.extend(x); } } Ok(ret) } } struct StardictUrl { path: [u8; 4usize], word: Vec<u8>, offset: i32, // args for offset and length, may use BTreeMap, but it cost too much. length: usize, } impl StardictUrl { fn new() -> StardictUrl { StardictUrl { path: [0; 4], word: Vec::with_capacity(16), offset: 0, length: 0, } } fn byte_to_u8(b: u8) -> u8 { match b { b'0'..=b'9' => b - b'0', b'A'..=b'F' => b - (b'A' - 10), b'a'..=b'f' => b - (b'a' - 10), _ => b, } } fn add_path(&mut self, c: u8, idx: usize) { if idx < self.path.len() { self.path[idx] = c; } } fn add_byte(&mut self, c: u8) { self.word.push(c); } fn add_arg_offset(&mut self, c: i32) { self.offset = self.offset * 10 + c; } fn add_arg_length(&mut self, c: usize) { self.length = self.length * 10 + c; } } fn main() { let mut host = String::from("0.0.0.0:8888"); //let mut host = String::from("[::]:8888"); let mut dictdir = String::from("/usr/share/stardict/dic"); let dict; { let mut _daemon = false; let mut pendarg = 0u8; for arg in env::args().skip(1) { //parse options. println!("cmd args: {}", &arg); let a = arg.as_bytes(); match pendarg { b'h' => { host.clear(); host.push_str(&arg); pendarg = 0; } b'd' => { _daemon = true; pendarg = 0; } b'r' => { dictdir.clear(); dictdir.push_str(&arg); pendarg = 0; } 0 => (), _ => { println!("parameter: [-d] [-h host:port] [-r dict-root-dir]"); return; } } if a[0] == b'-' { pendarg = a[1]; } } //println!("get arg host={}, daemon={}", host, daemon); //if daemon { //} dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap(); } println!("dict size={}", dict.directories.len()); //for d in dict.info().iter() { // println!("dict: wordcount:{} {}", d.word_count, d.name); //} //webs let listener = TcpListener::bind(&host).expect("Bind Socket failed!"); //let pool = web::ThreadPool::new(4); let cr = { let mut fmtp = path::PathBuf::from(&dictdir); fmtp.push("rformat.conf"); reformat::ContentReformat::from_config_file(&fmtp) }; for stream in listener.incoming() { let stream = stream.expect("accept TCP failed!"); //pool.execute( if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) { println!("communication failed!"); } //); } println!("Shutting down."); } fn handle_connection( mut stream: TcpStream, dict: &StarDict, cr: &reformat::ContentReformat, dictdir: &str, ) -> std::io::Result<()> { //stream.set_nonblocking(false)?; //stream.set_nodelay(false)?; let mut buffer = vec![0u8; 512]; { let mut sz = 0usize; while let Ok(bn) = stream.read(&mut buffer[sz..]) { sz += bn; if bn == 0 || sz <= 4 || sz > 4096 { stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?; return Ok(()); } if buffer[sz - 4] == b'\r' && buffer[sz - 3] == b'\n' && buffer[sz - 2] == b'\r' && buffer[sz - 1] == b'\n' { buffer.resize(sz, 0); break; } if sz >= buffer.len() { buffer.resize(buffer.len() + 512, 0); } } } let get = b"GET /"; //("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html"); let mut content: Vec<u8> = Vec::new(); let mut surl = StardictUrl::new(); if buffer.starts_with(get) { let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal let mut w = 0u8; buffer[5..] .iter() .take_while(|c| **c != b' ') .for_each(|c| { if state < 0 { if *c == b'%' { state = -2; } else if *c == b'?' { // parse args. state = -4; } else { if state == -2 { w = StardictUrl::byte_to_u8(*c) << 4; state = -3; } else if state == -3 { w |= StardictUrl::byte_to_u8(*c); surl.add_byte(w); state = -1; } else if state == -4 { if *c == b'=' { state = -5; } else { w = *c; } } else if state == -5 { match *c { b'&' => { state = -4; } b'-' => { if w == b'o' { w = b'O'; } else { state = -32768; } } b'0'..=b'9' => { let v: i32 = (*c - b'0') as i32; if w == b'o' { surl.add_arg_offset(v); } else if w == b'O' { // negative offset surl.add_arg_offset(-v); } else if w == b'l' { // length surl.add_arg_length(v as usize); } } _ => { state = -32768; } } } else { surl.add_byte(*c); } } } else if *c == b'/' { state = -1; } else { surl.add_path(*c, state as usize); state += 1; } }); //println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length); if surl.length == 0 { surl.length = 10; } if surl.word.len() > 0 { if surl.path[0] == b'W' { //word lookup match dict.lookup(&surl.word) { Ok(x) => { content.extend(b"<ol>"); for (i, e) in x.iter().enumerate() { content.extend(b"<li><a href='#word_"); content.extend(i.to_string().as_bytes()); content.extend(b"'>"); content.extend(&e.word); content.extend(b" : "); content.extend(e.dictionary.name.as_bytes()); content.extend(b"</a></li>"); } content.extend(b"</ol>\n"); for (i, e) in x.iter().enumerate() { content.extend(b"<div id='word_"); content.extend(i.to_string().as_bytes()); content.extend(b"' class='res_word'>"); content.extend(e.dictionary.name.as_bytes()); content.extend(b" ("); content.extend(&e.word); content.extend(b") </div><div class='res_definition'>".iter()); for (a, b) in e .dictionary .same_type_sequence .as_bytes() .iter() .zip(e.result.split(|c| *c == 0)) { content.extend(&cr.replace_all( *a, e.dictionary.dict_path.as_bytes(), b, )); } content.extend(b"</div>\n"); } } Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'n' { //neighbor words reference for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) { content.extend(s); content.extend(b"\n"); } } else if surl.path[0] == b's' { //search with regex match str::from_utf8(&surl.word) { Ok(x) => match Regex::new(x) { Ok(v) => { content.extend(b"/~/:<ol>"); dict.search(&v).take(surl.length).for_each(|e| { content.extend(b"<li><a>"); content.extend(e); content.extend(b"</a></li>\n"); }); content.extend(b"</ol>"); } Err(e) => println!("err: {:?}", e), }, Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'r'
else if surl.path[0] == b'w' { content.extend(HOME_PAGE.as_bytes()); } } else { content.extend(HOME_PAGE.as_bytes()); } } fn map_by_file(f: &[u8]) -> &'static [u8] { if let Some(s) = f.rsplit(|c| *c == b'.').next() { match s { b"js" => return b"application/javascript", b"css" => return b"text/css", b"jpg" => return b"image/jpeg", b"png" => return b"image/png", _ => (), } } b"text/html" } if content.len() > 0 { //let mut cg = 0; //content.iter_mut().for_each(|x|{ *x = if cg % 10 == 0 {b'\n'} else {b'a'}; cg = cg + 1;}); stream.write(b"HTTP/1.0 200 OK\r\nContent-Type: ")?; if surl.path[0] == b'n' { stream.write(b"text/plain")?; } else if surl.path[0] == b'r' { stream.write(map_by_file(&surl.word))?; } else { stream.write(b"text/html")?; } stream.write(b"\r\nContent-Length: ")?; stream.write(content.len().to_string().as_bytes())?; stream.write(b"\r\nConnection: close\r\n\r\n")?; //stream.write(b"\r\n\r\n")?; /* for blk in content.chunks(1024) { stream.write(blk)?; } */ stream.write(&content)?; } else { stream.write(b"HTTP/1.0 404 NOT FOUND\r\n\r\nnot found")?; } stream.flush()?; //stream.shutdown(std::net::Shutdown::Both)?; Ok(()) } const HOME_PAGE: &'static str = r"<html><head> <meta http-equiv='Content-Type' content='text/html; charset=UTF-8' /> <title>Star Dictionary</title> <style> .res_definition{ table-layout: fixed; border-left: thin dashed black; border-right: thin dashed black; padding: 5px; } .res_word{ table-layout: fixed; border: thin solid black; padding: 5px; } .numi{ width:5em; } span{ color:green; } a{ color:blue; text-decoration:underline; cursor:pointer; } blockquote{ margin:0em 0em 0em 1em; padding:0em 0em 0em 0em; } </style> <link href='/r/rhtm/jquery-ui.css' rel='stylesheet'> <script src='/r/rhtm/jquery.js'></script> <script src='/r/rhtm/jquery-ui.js'></script> <script src='/r/rhtm/autohint.js'></script> </head><body> <form id='qwFORM' action='/' method='GET'> <input id='qwt' type='text' name='w' class='ui-autocomplete-input' placeholder='input word' required='required' value=''/>/<input id='chkreg' type='checkbox'/>/ <input type='submit' value='='/> &nbsp;<input type='button' id='backwardbtn' value='<'/> <input type='button' id='forwardbtn' value='>'/> (<input type='number' class='numi' id='hint_offset' value='0' disabled/>, <input type='number' class='numi' id='result_length' value='10'/>) </form><hr/> <div id='dict_content'></div></body></html>";
{ //html js css page etc. if let Ok(fname) = str::from_utf8(&surl.word) { let mut pfile = path::PathBuf::from(dictdir); pfile.push(fname); if let Ok(mut f) = fs::File::open(pfile) { if f.read_to_end(&mut content).is_err() { content.clear(); } } } }
conditional_block
main.rs
extern crate regex; pub mod dict; pub mod dictionary; pub mod idx; pub mod ifo; pub mod reformat; pub mod result; pub mod syn; //pub mod web; use self::regex::bytes::Regex; use std::cmp::Ordering; use std::io::prelude::*; use std::iter::Iterator; use std::mem; use std::net::TcpListener; use std::net::TcpStream; use std::{env, fs, path, str}; //use self::regex::Error; /// StarDict contains all dictionary found within the specified file system directory. pub struct StarDict { directories: Vec<dictionary::Dictionary>, } /// An iterator that merges several underlying iterators. try to dedup one duplicated /// word from each iterator. pub struct WordMergeIter<T: Iterator<Item = Vec<u8>>> { wordit: Vec<T>, cur: Vec<Option<Vec<u8>>>, } impl<'a, T: Iterator<Item = Vec<u8>>> Iterator for WordMergeIter<T> { type Item = Vec<u8>; fn next(&mut self) -> Option<Self::Item> { let l = self.cur.len(); if l == 0 { return None; } let mut x = 0usize; let mut i = 1usize; while i < l { x = match (&self.cur[x], &self.cur[i]) { (None, _) => i, (_, None) => x, (Some(a), Some(b)) => match idx::Idx::dict_cmp(&a, &b, false) { Ordering::Greater => i, Ordering::Equal => { self.cur[i] = self.wordit[i].next(); x } _ => x, }, }; i += 1; } mem::replace(&mut self.cur[x], self.wordit[x].next()) } } impl StarDict { /// Create a StarDict struct from a system path. in the path, /// there should be some directories. each directory contains /// the dict files, like .ifo, .idx, .dict, etc. /// The dictionary will be sorted by its directory name. pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> { let mut sort_dirs = Vec::new(); let mut items = Vec::new(); if root.is_dir() { for it in fs::read_dir(root)? { //println!("push direc: {:?}", it); let it = it?.path(); if it.is_dir() { sort_dirs.push(it.into_boxed_path()); } } } sort_dirs.sort(); for it in sort_dirs.iter() { match dictionary::Dictionary::new(&**it, root) { Ok(d) => { items.push(d); } Err(e) => { eprintln!("ignore reason: {:?}", e); } } } Ok(StarDict { directories: items }) } /// Get the Ifo struct, which is parsed from the .ifo file. pub fn info(&self) -> Vec<&ifo::Ifo> { let mut items = Vec::with_capacity(self.directories.len()); for it in &self.directories { items.push(&it.ifo); } items } /// List the following neighbor words of `word`, from `off`. /// If `off` is a negative number, list from before `-off`. pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter>
/// Search from all dictionaries. using the specified regular expression. /// to match the beginning of a word, use `^`, the ending of a word, use `$`. pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { //println!("in for {}", d.ifo.name.as_str()); let mut x = d.search_regex(reg); let mut s = d.search_syn(reg); //println!("created inner iter"); cur.push(x.next()); cur.push(s.next()); //println!("created 1st value"); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Lookup the word. Find in the Idx case-sensitively, if not found then try to do /// case-insensitive search. Also find all case-insensitive matching words in Syn. pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> { let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len()); for d in self.directories.iter() { if let Ok(x) = d.lookup(word) { ret.extend(x); } } Ok(ret) } } struct StardictUrl { path: [u8; 4usize], word: Vec<u8>, offset: i32, // args for offset and length, may use BTreeMap, but it cost too much. length: usize, } impl StardictUrl { fn new() -> StardictUrl { StardictUrl { path: [0; 4], word: Vec::with_capacity(16), offset: 0, length: 0, } } fn byte_to_u8(b: u8) -> u8 { match b { b'0'..=b'9' => b - b'0', b'A'..=b'F' => b - (b'A' - 10), b'a'..=b'f' => b - (b'a' - 10), _ => b, } } fn add_path(&mut self, c: u8, idx: usize) { if idx < self.path.len() { self.path[idx] = c; } } fn add_byte(&mut self, c: u8) { self.word.push(c); } fn add_arg_offset(&mut self, c: i32) { self.offset = self.offset * 10 + c; } fn add_arg_length(&mut self, c: usize) { self.length = self.length * 10 + c; } } fn main() { let mut host = String::from("0.0.0.0:8888"); //let mut host = String::from("[::]:8888"); let mut dictdir = String::from("/usr/share/stardict/dic"); let dict; { let mut _daemon = false; let mut pendarg = 0u8; for arg in env::args().skip(1) { //parse options. println!("cmd args: {}", &arg); let a = arg.as_bytes(); match pendarg { b'h' => { host.clear(); host.push_str(&arg); pendarg = 0; } b'd' => { _daemon = true; pendarg = 0; } b'r' => { dictdir.clear(); dictdir.push_str(&arg); pendarg = 0; } 0 => (), _ => { println!("parameter: [-d] [-h host:port] [-r dict-root-dir]"); return; } } if a[0] == b'-' { pendarg = a[1]; } } //println!("get arg host={}, daemon={}", host, daemon); //if daemon { //} dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap(); } println!("dict size={}", dict.directories.len()); //for d in dict.info().iter() { // println!("dict: wordcount:{} {}", d.word_count, d.name); //} //webs let listener = TcpListener::bind(&host).expect("Bind Socket failed!"); //let pool = web::ThreadPool::new(4); let cr = { let mut fmtp = path::PathBuf::from(&dictdir); fmtp.push("rformat.conf"); reformat::ContentReformat::from_config_file(&fmtp) }; for stream in listener.incoming() { let stream = stream.expect("accept TCP failed!"); //pool.execute( if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) { println!("communication failed!"); } //); } println!("Shutting down."); } fn handle_connection( mut stream: TcpStream, dict: &StarDict, cr: &reformat::ContentReformat, dictdir: &str, ) -> std::io::Result<()> { //stream.set_nonblocking(false)?; //stream.set_nodelay(false)?; let mut buffer = vec![0u8; 512]; { let mut sz = 0usize; while let Ok(bn) = stream.read(&mut buffer[sz..]) { sz += bn; if bn == 0 || sz <= 4 || sz > 4096 { stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?; return Ok(()); } if buffer[sz - 4] == b'\r' && buffer[sz - 3] == b'\n' && buffer[sz - 2] == b'\r' && buffer[sz - 1] == b'\n' { buffer.resize(sz, 0); break; } if sz >= buffer.len() { buffer.resize(buffer.len() + 512, 0); } } } let get = b"GET /"; //("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html"); let mut content: Vec<u8> = Vec::new(); let mut surl = StardictUrl::new(); if buffer.starts_with(get) { let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal let mut w = 0u8; buffer[5..] .iter() .take_while(|c| **c != b' ') .for_each(|c| { if state < 0 { if *c == b'%' { state = -2; } else if *c == b'?' { // parse args. state = -4; } else { if state == -2 { w = StardictUrl::byte_to_u8(*c) << 4; state = -3; } else if state == -3 { w |= StardictUrl::byte_to_u8(*c); surl.add_byte(w); state = -1; } else if state == -4 { if *c == b'=' { state = -5; } else { w = *c; } } else if state == -5 { match *c { b'&' => { state = -4; } b'-' => { if w == b'o' { w = b'O'; } else { state = -32768; } } b'0'..=b'9' => { let v: i32 = (*c - b'0') as i32; if w == b'o' { surl.add_arg_offset(v); } else if w == b'O' { // negative offset surl.add_arg_offset(-v); } else if w == b'l' { // length surl.add_arg_length(v as usize); } } _ => { state = -32768; } } } else { surl.add_byte(*c); } } } else if *c == b'/' { state = -1; } else { surl.add_path(*c, state as usize); state += 1; } }); //println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length); if surl.length == 0 { surl.length = 10; } if surl.word.len() > 0 { if surl.path[0] == b'W' { //word lookup match dict.lookup(&surl.word) { Ok(x) => { content.extend(b"<ol>"); for (i, e) in x.iter().enumerate() { content.extend(b"<li><a href='#word_"); content.extend(i.to_string().as_bytes()); content.extend(b"'>"); content.extend(&e.word); content.extend(b" : "); content.extend(e.dictionary.name.as_bytes()); content.extend(b"</a></li>"); } content.extend(b"</ol>\n"); for (i, e) in x.iter().enumerate() { content.extend(b"<div id='word_"); content.extend(i.to_string().as_bytes()); content.extend(b"' class='res_word'>"); content.extend(e.dictionary.name.as_bytes()); content.extend(b" ("); content.extend(&e.word); content.extend(b") </div><div class='res_definition'>".iter()); for (a, b) in e .dictionary .same_type_sequence .as_bytes() .iter() .zip(e.result.split(|c| *c == 0)) { content.extend(&cr.replace_all( *a, e.dictionary.dict_path.as_bytes(), b, )); } content.extend(b"</div>\n"); } } Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'n' { //neighbor words reference for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) { content.extend(s); content.extend(b"\n"); } } else if surl.path[0] == b's' { //search with regex match str::from_utf8(&surl.word) { Ok(x) => match Regex::new(x) { Ok(v) => { content.extend(b"/~/:<ol>"); dict.search(&v).take(surl.length).for_each(|e| { content.extend(b"<li><a>"); content.extend(e); content.extend(b"</a></li>\n"); }); content.extend(b"</ol>"); } Err(e) => println!("err: {:?}", e), }, Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'r' { //html js css page etc. if let Ok(fname) = str::from_utf8(&surl.word) { let mut pfile = path::PathBuf::from(dictdir); pfile.push(fname); if let Ok(mut f) = fs::File::open(pfile) { if f.read_to_end(&mut content).is_err() { content.clear(); } } } } else if surl.path[0] == b'w' { content.extend(HOME_PAGE.as_bytes()); } } else { content.extend(HOME_PAGE.as_bytes()); } } fn map_by_file(f: &[u8]) -> &'static [u8] { if let Some(s) = f.rsplit(|c| *c == b'.').next() { match s { b"js" => return b"application/javascript", b"css" => return b"text/css", b"jpg" => return b"image/jpeg", b"png" => return b"image/png", _ => (), } } b"text/html" } if content.len() > 0 { //let mut cg = 0; //content.iter_mut().for_each(|x|{ *x = if cg % 10 == 0 {b'\n'} else {b'a'}; cg = cg + 1;}); stream.write(b"HTTP/1.0 200 OK\r\nContent-Type: ")?; if surl.path[0] == b'n' { stream.write(b"text/plain")?; } else if surl.path[0] == b'r' { stream.write(map_by_file(&surl.word))?; } else { stream.write(b"text/html")?; } stream.write(b"\r\nContent-Length: ")?; stream.write(content.len().to_string().as_bytes())?; stream.write(b"\r\nConnection: close\r\n\r\n")?; //stream.write(b"\r\n\r\n")?; /* for blk in content.chunks(1024) { stream.write(blk)?; } */ stream.write(&content)?; } else { stream.write(b"HTTP/1.0 404 NOT FOUND\r\n\r\nnot found")?; } stream.flush()?; //stream.shutdown(std::net::Shutdown::Both)?; Ok(()) } const HOME_PAGE: &'static str = r"<html><head> <meta http-equiv='Content-Type' content='text/html; charset=UTF-8' /> <title>Star Dictionary</title> <style> .res_definition{ table-layout: fixed; border-left: thin dashed black; border-right: thin dashed black; padding: 5px; } .res_word{ table-layout: fixed; border: thin solid black; padding: 5px; } .numi{ width:5em; } span{ color:green; } a{ color:blue; text-decoration:underline; cursor:pointer; } blockquote{ margin:0em 0em 0em 1em; padding:0em 0em 0em 0em; } </style> <link href='/r/rhtm/jquery-ui.css' rel='stylesheet'> <script src='/r/rhtm/jquery.js'></script> <script src='/r/rhtm/jquery-ui.js'></script> <script src='/r/rhtm/autohint.js'></script> </head><body> <form id='qwFORM' action='/' method='GET'> <input id='qwt' type='text' name='w' class='ui-autocomplete-input' placeholder='input word' required='required' value=''/>/<input id='chkreg' type='checkbox'/>/ <input type='submit' value='='/> &nbsp;<input type='button' id='backwardbtn' value='<'/> <input type='button' id='forwardbtn' value='>'/> (<input type='number' class='numi' id='hint_offset' value='0' disabled/>, <input type='number' class='numi' id='result_length' value='10'/>) </form><hr/> <div id='dict_content'></div></body></html>";
{ let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { let mut x = d.neighbors(word, off); let mut s = d.neighbors_syn(word, off); cur.push(x.next()); cur.push(s.next()); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } }
identifier_body
main.rs
extern crate regex; pub mod dict; pub mod dictionary; pub mod idx; pub mod ifo; pub mod reformat; pub mod result; pub mod syn; //pub mod web; use self::regex::bytes::Regex; use std::cmp::Ordering; use std::io::prelude::*; use std::iter::Iterator; use std::mem; use std::net::TcpListener; use std::net::TcpStream; use std::{env, fs, path, str}; //use self::regex::Error; /// StarDict contains all dictionary found within the specified file system directory. pub struct StarDict { directories: Vec<dictionary::Dictionary>, } /// An iterator that merges several underlying iterators. try to dedup one duplicated /// word from each iterator. pub struct
<T: Iterator<Item = Vec<u8>>> { wordit: Vec<T>, cur: Vec<Option<Vec<u8>>>, } impl<'a, T: Iterator<Item = Vec<u8>>> Iterator for WordMergeIter<T> { type Item = Vec<u8>; fn next(&mut self) -> Option<Self::Item> { let l = self.cur.len(); if l == 0 { return None; } let mut x = 0usize; let mut i = 1usize; while i < l { x = match (&self.cur[x], &self.cur[i]) { (None, _) => i, (_, None) => x, (Some(a), Some(b)) => match idx::Idx::dict_cmp(&a, &b, false) { Ordering::Greater => i, Ordering::Equal => { self.cur[i] = self.wordit[i].next(); x } _ => x, }, }; i += 1; } mem::replace(&mut self.cur[x], self.wordit[x].next()) } } impl StarDict { /// Create a StarDict struct from a system path. in the path, /// there should be some directories. each directory contains /// the dict files, like .ifo, .idx, .dict, etc. /// The dictionary will be sorted by its directory name. pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> { let mut sort_dirs = Vec::new(); let mut items = Vec::new(); if root.is_dir() { for it in fs::read_dir(root)? { //println!("push direc: {:?}", it); let it = it?.path(); if it.is_dir() { sort_dirs.push(it.into_boxed_path()); } } } sort_dirs.sort(); for it in sort_dirs.iter() { match dictionary::Dictionary::new(&**it, root) { Ok(d) => { items.push(d); } Err(e) => { eprintln!("ignore reason: {:?}", e); } } } Ok(StarDict { directories: items }) } /// Get the Ifo struct, which is parsed from the .ifo file. pub fn info(&self) -> Vec<&ifo::Ifo> { let mut items = Vec::with_capacity(self.directories.len()); for it in &self.directories { items.push(&it.ifo); } items } /// List the following neighbor words of `word`, from `off`. /// If `off` is a negative number, list from before `-off`. pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { let mut x = d.neighbors(word, off); let mut s = d.neighbors_syn(word, off); cur.push(x.next()); cur.push(s.next()); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Search from all dictionaries. using the specified regular expression. /// to match the beginning of a word, use `^`, the ending of a word, use `$`. pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { //println!("in for {}", d.ifo.name.as_str()); let mut x = d.search_regex(reg); let mut s = d.search_syn(reg); //println!("created inner iter"); cur.push(x.next()); cur.push(s.next()); //println!("created 1st value"); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Lookup the word. Find in the Idx case-sensitively, if not found then try to do /// case-insensitive search. Also find all case-insensitive matching words in Syn. pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> { let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len()); for d in self.directories.iter() { if let Ok(x) = d.lookup(word) { ret.extend(x); } } Ok(ret) } } struct StardictUrl { path: [u8; 4usize], word: Vec<u8>, offset: i32, // args for offset and length, may use BTreeMap, but it cost too much. length: usize, } impl StardictUrl { fn new() -> StardictUrl { StardictUrl { path: [0; 4], word: Vec::with_capacity(16), offset: 0, length: 0, } } fn byte_to_u8(b: u8) -> u8 { match b { b'0'..=b'9' => b - b'0', b'A'..=b'F' => b - (b'A' - 10), b'a'..=b'f' => b - (b'a' - 10), _ => b, } } fn add_path(&mut self, c: u8, idx: usize) { if idx < self.path.len() { self.path[idx] = c; } } fn add_byte(&mut self, c: u8) { self.word.push(c); } fn add_arg_offset(&mut self, c: i32) { self.offset = self.offset * 10 + c; } fn add_arg_length(&mut self, c: usize) { self.length = self.length * 10 + c; } } fn main() { let mut host = String::from("0.0.0.0:8888"); //let mut host = String::from("[::]:8888"); let mut dictdir = String::from("/usr/share/stardict/dic"); let dict; { let mut _daemon = false; let mut pendarg = 0u8; for arg in env::args().skip(1) { //parse options. println!("cmd args: {}", &arg); let a = arg.as_bytes(); match pendarg { b'h' => { host.clear(); host.push_str(&arg); pendarg = 0; } b'd' => { _daemon = true; pendarg = 0; } b'r' => { dictdir.clear(); dictdir.push_str(&arg); pendarg = 0; } 0 => (), _ => { println!("parameter: [-d] [-h host:port] [-r dict-root-dir]"); return; } } if a[0] == b'-' { pendarg = a[1]; } } //println!("get arg host={}, daemon={}", host, daemon); //if daemon { //} dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap(); } println!("dict size={}", dict.directories.len()); //for d in dict.info().iter() { // println!("dict: wordcount:{} {}", d.word_count, d.name); //} //webs let listener = TcpListener::bind(&host).expect("Bind Socket failed!"); //let pool = web::ThreadPool::new(4); let cr = { let mut fmtp = path::PathBuf::from(&dictdir); fmtp.push("rformat.conf"); reformat::ContentReformat::from_config_file(&fmtp) }; for stream in listener.incoming() { let stream = stream.expect("accept TCP failed!"); //pool.execute( if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) { println!("communication failed!"); } //); } println!("Shutting down."); } fn handle_connection( mut stream: TcpStream, dict: &StarDict, cr: &reformat::ContentReformat, dictdir: &str, ) -> std::io::Result<()> { //stream.set_nonblocking(false)?; //stream.set_nodelay(false)?; let mut buffer = vec![0u8; 512]; { let mut sz = 0usize; while let Ok(bn) = stream.read(&mut buffer[sz..]) { sz += bn; if bn == 0 || sz <= 4 || sz > 4096 { stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?; return Ok(()); } if buffer[sz - 4] == b'\r' && buffer[sz - 3] == b'\n' && buffer[sz - 2] == b'\r' && buffer[sz - 1] == b'\n' { buffer.resize(sz, 0); break; } if sz >= buffer.len() { buffer.resize(buffer.len() + 512, 0); } } } let get = b"GET /"; //("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html"); let mut content: Vec<u8> = Vec::new(); let mut surl = StardictUrl::new(); if buffer.starts_with(get) { let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal let mut w = 0u8; buffer[5..] .iter() .take_while(|c| **c != b' ') .for_each(|c| { if state < 0 { if *c == b'%' { state = -2; } else if *c == b'?' { // parse args. state = -4; } else { if state == -2 { w = StardictUrl::byte_to_u8(*c) << 4; state = -3; } else if state == -3 { w |= StardictUrl::byte_to_u8(*c); surl.add_byte(w); state = -1; } else if state == -4 { if *c == b'=' { state = -5; } else { w = *c; } } else if state == -5 { match *c { b'&' => { state = -4; } b'-' => { if w == b'o' { w = b'O'; } else { state = -32768; } } b'0'..=b'9' => { let v: i32 = (*c - b'0') as i32; if w == b'o' { surl.add_arg_offset(v); } else if w == b'O' { // negative offset surl.add_arg_offset(-v); } else if w == b'l' { // length surl.add_arg_length(v as usize); } } _ => { state = -32768; } } } else { surl.add_byte(*c); } } } else if *c == b'/' { state = -1; } else { surl.add_path(*c, state as usize); state += 1; } }); //println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length); if surl.length == 0 { surl.length = 10; } if surl.word.len() > 0 { if surl.path[0] == b'W' { //word lookup match dict.lookup(&surl.word) { Ok(x) => { content.extend(b"<ol>"); for (i, e) in x.iter().enumerate() { content.extend(b"<li><a href='#word_"); content.extend(i.to_string().as_bytes()); content.extend(b"'>"); content.extend(&e.word); content.extend(b" : "); content.extend(e.dictionary.name.as_bytes()); content.extend(b"</a></li>"); } content.extend(b"</ol>\n"); for (i, e) in x.iter().enumerate() { content.extend(b"<div id='word_"); content.extend(i.to_string().as_bytes()); content.extend(b"' class='res_word'>"); content.extend(e.dictionary.name.as_bytes()); content.extend(b" ("); content.extend(&e.word); content.extend(b") </div><div class='res_definition'>".iter()); for (a, b) in e .dictionary .same_type_sequence .as_bytes() .iter() .zip(e.result.split(|c| *c == 0)) { content.extend(&cr.replace_all( *a, e.dictionary.dict_path.as_bytes(), b, )); } content.extend(b"</div>\n"); } } Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'n' { //neighbor words reference for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) { content.extend(s); content.extend(b"\n"); } } else if surl.path[0] == b's' { //search with regex match str::from_utf8(&surl.word) { Ok(x) => match Regex::new(x) { Ok(v) => { content.extend(b"/~/:<ol>"); dict.search(&v).take(surl.length).for_each(|e| { content.extend(b"<li><a>"); content.extend(e); content.extend(b"</a></li>\n"); }); content.extend(b"</ol>"); } Err(e) => println!("err: {:?}", e), }, Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'r' { //html js css page etc. if let Ok(fname) = str::from_utf8(&surl.word) { let mut pfile = path::PathBuf::from(dictdir); pfile.push(fname); if let Ok(mut f) = fs::File::open(pfile) { if f.read_to_end(&mut content).is_err() { content.clear(); } } } } else if surl.path[0] == b'w' { content.extend(HOME_PAGE.as_bytes()); } } else { content.extend(HOME_PAGE.as_bytes()); } } fn map_by_file(f: &[u8]) -> &'static [u8] { if let Some(s) = f.rsplit(|c| *c == b'.').next() { match s { b"js" => return b"application/javascript", b"css" => return b"text/css", b"jpg" => return b"image/jpeg", b"png" => return b"image/png", _ => (), } } b"text/html" } if content.len() > 0 { //let mut cg = 0; //content.iter_mut().for_each(|x|{ *x = if cg % 10 == 0 {b'\n'} else {b'a'}; cg = cg + 1;}); stream.write(b"HTTP/1.0 200 OK\r\nContent-Type: ")?; if surl.path[0] == b'n' { stream.write(b"text/plain")?; } else if surl.path[0] == b'r' { stream.write(map_by_file(&surl.word))?; } else { stream.write(b"text/html")?; } stream.write(b"\r\nContent-Length: ")?; stream.write(content.len().to_string().as_bytes())?; stream.write(b"\r\nConnection: close\r\n\r\n")?; //stream.write(b"\r\n\r\n")?; /* for blk in content.chunks(1024) { stream.write(blk)?; } */ stream.write(&content)?; } else { stream.write(b"HTTP/1.0 404 NOT FOUND\r\n\r\nnot found")?; } stream.flush()?; //stream.shutdown(std::net::Shutdown::Both)?; Ok(()) } const HOME_PAGE: &'static str = r"<html><head> <meta http-equiv='Content-Type' content='text/html; charset=UTF-8' /> <title>Star Dictionary</title> <style> .res_definition{ table-layout: fixed; border-left: thin dashed black; border-right: thin dashed black; padding: 5px; } .res_word{ table-layout: fixed; border: thin solid black; padding: 5px; } .numi{ width:5em; } span{ color:green; } a{ color:blue; text-decoration:underline; cursor:pointer; } blockquote{ margin:0em 0em 0em 1em; padding:0em 0em 0em 0em; } </style> <link href='/r/rhtm/jquery-ui.css' rel='stylesheet'> <script src='/r/rhtm/jquery.js'></script> <script src='/r/rhtm/jquery-ui.js'></script> <script src='/r/rhtm/autohint.js'></script> </head><body> <form id='qwFORM' action='/' method='GET'> <input id='qwt' type='text' name='w' class='ui-autocomplete-input' placeholder='input word' required='required' value=''/>/<input id='chkreg' type='checkbox'/>/ <input type='submit' value='='/> &nbsp;<input type='button' id='backwardbtn' value='<'/> <input type='button' id='forwardbtn' value='>'/> (<input type='number' class='numi' id='hint_offset' value='0' disabled/>, <input type='number' class='numi' id='result_length' value='10'/>) </form><hr/> <div id='dict_content'></div></body></html>";
WordMergeIter
identifier_name
main.rs
extern crate regex; pub mod dict; pub mod dictionary; pub mod idx; pub mod ifo; pub mod reformat; pub mod result; pub mod syn; //pub mod web; use self::regex::bytes::Regex; use std::cmp::Ordering; use std::io::prelude::*; use std::iter::Iterator; use std::mem; use std::net::TcpListener; use std::net::TcpStream; use std::{env, fs, path, str}; //use self::regex::Error; /// StarDict contains all dictionary found within the specified file system directory. pub struct StarDict { directories: Vec<dictionary::Dictionary>, } /// An iterator that merges several underlying iterators. try to dedup one duplicated /// word from each iterator. pub struct WordMergeIter<T: Iterator<Item = Vec<u8>>> { wordit: Vec<T>, cur: Vec<Option<Vec<u8>>>, } impl<'a, T: Iterator<Item = Vec<u8>>> Iterator for WordMergeIter<T> { type Item = Vec<u8>; fn next(&mut self) -> Option<Self::Item> { let l = self.cur.len(); if l == 0 { return None; } let mut x = 0usize; let mut i = 1usize; while i < l { x = match (&self.cur[x], &self.cur[i]) { (None, _) => i, (_, None) => x, (Some(a), Some(b)) => match idx::Idx::dict_cmp(&a, &b, false) { Ordering::Greater => i, Ordering::Equal => { self.cur[i] = self.wordit[i].next(); x } _ => x, }, }; i += 1; } mem::replace(&mut self.cur[x], self.wordit[x].next()) } } impl StarDict { /// Create a StarDict struct from a system path. in the path, /// there should be some directories. each directory contains /// the dict files, like .ifo, .idx, .dict, etc. /// The dictionary will be sorted by its directory name. pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> { let mut sort_dirs = Vec::new(); let mut items = Vec::new(); if root.is_dir() { for it in fs::read_dir(root)? { //println!("push direc: {:?}", it); let it = it?.path(); if it.is_dir() { sort_dirs.push(it.into_boxed_path()); } } } sort_dirs.sort(); for it in sort_dirs.iter() { match dictionary::Dictionary::new(&**it, root) { Ok(d) => { items.push(d); } Err(e) => { eprintln!("ignore reason: {:?}", e); } } } Ok(StarDict { directories: items }) } /// Get the Ifo struct, which is parsed from the .ifo file. pub fn info(&self) -> Vec<&ifo::Ifo> { let mut items = Vec::with_capacity(self.directories.len()); for it in &self.directories { items.push(&it.ifo); } items } /// List the following neighbor words of `word`, from `off`. /// If `off` is a negative number, list from before `-off`. pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { let mut x = d.neighbors(word, off); let mut s = d.neighbors_syn(word, off); cur.push(x.next()); cur.push(s.next()); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Search from all dictionaries. using the specified regular expression. /// to match the beginning of a word, use `^`, the ending of a word, use `$`. pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> { let mut wordit = Vec::with_capacity(2 * self.directories.len()); let mut cur = Vec::with_capacity(2 * self.directories.len()); for d in self.directories.iter() { //println!("in for {}", d.ifo.name.as_str()); let mut x = d.search_regex(reg); let mut s = d.search_syn(reg); //println!("created inner iter"); cur.push(x.next()); cur.push(s.next()); //println!("created 1st value"); wordit.push(x); wordit.push(s); } WordMergeIter { wordit, cur } } /// Lookup the word. Find in the Idx case-sensitively, if not found then try to do /// case-insensitive search. Also find all case-insensitive matching words in Syn. pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> { let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len()); for d in self.directories.iter() { if let Ok(x) = d.lookup(word) { ret.extend(x); } } Ok(ret) } } struct StardictUrl { path: [u8; 4usize], word: Vec<u8>, offset: i32, // args for offset and length, may use BTreeMap, but it cost too much. length: usize, } impl StardictUrl { fn new() -> StardictUrl { StardictUrl { path: [0; 4], word: Vec::with_capacity(16), offset: 0, length: 0, } } fn byte_to_u8(b: u8) -> u8 { match b { b'0'..=b'9' => b - b'0', b'A'..=b'F' => b - (b'A' - 10), b'a'..=b'f' => b - (b'a' - 10), _ => b, } } fn add_path(&mut self, c: u8, idx: usize) { if idx < self.path.len() { self.path[idx] = c; } } fn add_byte(&mut self, c: u8) { self.word.push(c); } fn add_arg_offset(&mut self, c: i32) { self.offset = self.offset * 10 + c; } fn add_arg_length(&mut self, c: usize) { self.length = self.length * 10 + c; } } fn main() { let mut host = String::from("0.0.0.0:8888"); //let mut host = String::from("[::]:8888"); let mut dictdir = String::from("/usr/share/stardict/dic"); let dict; { let mut _daemon = false; let mut pendarg = 0u8; for arg in env::args().skip(1) { //parse options. println!("cmd args: {}", &arg); let a = arg.as_bytes(); match pendarg { b'h' => { host.clear(); host.push_str(&arg); pendarg = 0; } b'd' => { _daemon = true; pendarg = 0; } b'r' => { dictdir.clear(); dictdir.push_str(&arg); pendarg = 0; } 0 => (), _ => { println!("parameter: [-d] [-h host:port] [-r dict-root-dir]"); return; } } if a[0] == b'-' { pendarg = a[1]; } } //println!("get arg host={}, daemon={}", host, daemon); //if daemon { //} dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap(); } println!("dict size={}", dict.directories.len()); //for d in dict.info().iter() { // println!("dict: wordcount:{} {}", d.word_count, d.name); //} //webs let listener = TcpListener::bind(&host).expect("Bind Socket failed!"); //let pool = web::ThreadPool::new(4); let cr = { let mut fmtp = path::PathBuf::from(&dictdir); fmtp.push("rformat.conf"); reformat::ContentReformat::from_config_file(&fmtp) }; for stream in listener.incoming() { let stream = stream.expect("accept TCP failed!"); //pool.execute( if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) { println!("communication failed!"); } //); } println!("Shutting down."); } fn handle_connection( mut stream: TcpStream, dict: &StarDict, cr: &reformat::ContentReformat, dictdir: &str, ) -> std::io::Result<()> { //stream.set_nonblocking(false)?; //stream.set_nodelay(false)?; let mut buffer = vec![0u8; 512]; { let mut sz = 0usize; while let Ok(bn) = stream.read(&mut buffer[sz..]) { sz += bn; if bn == 0 || sz <= 4 || sz > 4096 { stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?; return Ok(()); } if buffer[sz - 4] == b'\r' && buffer[sz - 3] == b'\n' && buffer[sz - 2] == b'\r' && buffer[sz - 1] == b'\n' { buffer.resize(sz, 0); break; } if sz >= buffer.len() { buffer.resize(buffer.len() + 512, 0); } } } let get = b"GET /"; //("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html"); let mut content: Vec<u8> = Vec::new(); let mut surl = StardictUrl::new(); if buffer.starts_with(get) { let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal let mut w = 0u8; buffer[5..] .iter() .take_while(|c| **c != b' ') .for_each(|c| { if state < 0 { if *c == b'%' { state = -2; } else if *c == b'?' { // parse args. state = -4; } else { if state == -2 { w = StardictUrl::byte_to_u8(*c) << 4; state = -3; } else if state == -3 { w |= StardictUrl::byte_to_u8(*c); surl.add_byte(w); state = -1; } else if state == -4 { if *c == b'=' { state = -5; } else { w = *c; } } else if state == -5 { match *c { b'&' => { state = -4; } b'-' => { if w == b'o' { w = b'O'; } else { state = -32768; } } b'0'..=b'9' => { let v: i32 = (*c - b'0') as i32; if w == b'o' { surl.add_arg_offset(v); } else if w == b'O' { // negative offset surl.add_arg_offset(-v); } else if w == b'l' { // length surl.add_arg_length(v as usize); } } _ => { state = -32768; } } } else { surl.add_byte(*c); } } } else if *c == b'/' { state = -1; } else { surl.add_path(*c, state as usize); state += 1; } }); //println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length); if surl.length == 0 { surl.length = 10; } if surl.word.len() > 0 { if surl.path[0] == b'W' { //word lookup match dict.lookup(&surl.word) { Ok(x) => { content.extend(b"<ol>"); for (i, e) in x.iter().enumerate() { content.extend(b"<li><a href='#word_"); content.extend(i.to_string().as_bytes()); content.extend(b"'>"); content.extend(&e.word); content.extend(b" : "); content.extend(e.dictionary.name.as_bytes()); content.extend(b"</a></li>"); } content.extend(b"</ol>\n"); for (i, e) in x.iter().enumerate() { content.extend(b"<div id='word_"); content.extend(i.to_string().as_bytes()); content.extend(b"' class='res_word'>"); content.extend(e.dictionary.name.as_bytes()); content.extend(b" ("); content.extend(&e.word); content.extend(b") </div><div class='res_definition'>".iter()); for (a, b) in e .dictionary .same_type_sequence .as_bytes() .iter() .zip(e.result.split(|c| *c == 0)) { content.extend(&cr.replace_all( *a, e.dictionary.dict_path.as_bytes(), b, )); } content.extend(b"</div>\n"); } } Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'n' { //neighbor words reference for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) { content.extend(s); content.extend(b"\n"); } } else if surl.path[0] == b's' { //search with regex match str::from_utf8(&surl.word) { Ok(x) => match Regex::new(x) { Ok(v) => { content.extend(b"/~/:<ol>"); dict.search(&v).take(surl.length).for_each(|e| { content.extend(b"<li><a>"); content.extend(e); content.extend(b"</a></li>\n"); }); content.extend(b"</ol>"); } Err(e) => println!("err: {:?}", e), }, Err(e) => println!("err: {:?}", e), } } else if surl.path[0] == b'r' { //html js css page etc. if let Ok(fname) = str::from_utf8(&surl.word) { let mut pfile = path::PathBuf::from(dictdir); pfile.push(fname); if let Ok(mut f) = fs::File::open(pfile) { if f.read_to_end(&mut content).is_err() { content.clear(); } } } } else if surl.path[0] == b'w' { content.extend(HOME_PAGE.as_bytes()); } } else { content.extend(HOME_PAGE.as_bytes()); } } fn map_by_file(f: &[u8]) -> &'static [u8] { if let Some(s) = f.rsplit(|c| *c == b'.').next() { match s { b"js" => return b"application/javascript", b"css" => return b"text/css", b"jpg" => return b"image/jpeg", b"png" => return b"image/png", _ => (), } } b"text/html" } if content.len() > 0 { //let mut cg = 0; //content.iter_mut().for_each(|x|{ *x = if cg % 10 == 0 {b'\n'} else {b'a'}; cg = cg + 1;}); stream.write(b"HTTP/1.0 200 OK\r\nContent-Type: ")?;
} else if surl.path[0] == b'r' { stream.write(map_by_file(&surl.word))?; } else { stream.write(b"text/html")?; } stream.write(b"\r\nContent-Length: ")?; stream.write(content.len().to_string().as_bytes())?; stream.write(b"\r\nConnection: close\r\n\r\n")?; //stream.write(b"\r\n\r\n")?; /* for blk in content.chunks(1024) { stream.write(blk)?; } */ stream.write(&content)?; } else { stream.write(b"HTTP/1.0 404 NOT FOUND\r\n\r\nnot found")?; } stream.flush()?; //stream.shutdown(std::net::Shutdown::Both)?; Ok(()) } const HOME_PAGE: &'static str = r"<html><head> <meta http-equiv='Content-Type' content='text/html; charset=UTF-8' /> <title>Star Dictionary</title> <style> .res_definition{ table-layout: fixed; border-left: thin dashed black; border-right: thin dashed black; padding: 5px; } .res_word{ table-layout: fixed; border: thin solid black; padding: 5px; } .numi{ width:5em; } span{ color:green; } a{ color:blue; text-decoration:underline; cursor:pointer; } blockquote{ margin:0em 0em 0em 1em; padding:0em 0em 0em 0em; } </style> <link href='/r/rhtm/jquery-ui.css' rel='stylesheet'> <script src='/r/rhtm/jquery.js'></script> <script src='/r/rhtm/jquery-ui.js'></script> <script src='/r/rhtm/autohint.js'></script> </head><body> <form id='qwFORM' action='/' method='GET'> <input id='qwt' type='text' name='w' class='ui-autocomplete-input' placeholder='input word' required='required' value=''/>/<input id='chkreg' type='checkbox'/>/ <input type='submit' value='='/> &nbsp;<input type='button' id='backwardbtn' value='<'/> <input type='button' id='forwardbtn' value='>'/> (<input type='number' class='numi' id='hint_offset' value='0' disabled/>, <input type='number' class='numi' id='result_length' value='10'/>) </form><hr/> <div id='dict_content'></div></body></html>";
if surl.path[0] == b'n' { stream.write(b"text/plain")?;
random_line_split
jsontest.js
//assuming this comes from an ajax call var info = [{ "Event": "Pals - The Irish at Gallipoli", "Start Date": "1st April 2015", "Finish Date": "31st April 2015", "Time": "During Museum opening hours", "Location": "Decorative Arts & History", "Description": "As part of the Museum's WW1 centenary programme, award-winning innovators ANU Productions present an immersive World War I experience, based on the events surrounding the campaign at Gallipoli in Turkey and inspired by the previously untold stories of the 7th Battalion of the Royal Irish Fusiliers and the everyday lives of the Irish people who were affected by the Great War. The project is a partnership between ANU Productions , the National Museum of Ireland and the Department of Arts, Heritage and Gaeltacht and is supported by the National Archives of Ireland. Performances will run Wednesday - Saturday: 11.00am, 12.00pm, 2.00pm, 3.00pm and 4.00pm Sunday - 2.00pm, 3.00pm and 4.00pm. Tickets: 5euro.", "Booking Information": "For More Information and Tickets: www.pals-theirishatgallipoli.com", "Format": "Performance", "Audience": "Adults" },{ "Event": "The Big Barracks Egg Hunt", "Start Date": "5th April 2015", "Finish Date": "5th April 2015", "Time": "3-4.30pm", "Location": "Decorative Arts & History", "Description": " Can you follow the trail and test your knowledge to find the golden egg? Complete the trail to receive an egg-cellent edible reward!", "Booking Information": "No booking required.", "Format": "Family Event", "Audience": "Families" },{ "Event": "The Easter Rising: Irish Volunteer Drills", "Start Date": "18th April 2015", "Finish Date": "18th April 2015", "Time": "2-4pm", "Location": "Decorative Arts & History", "Description": " Come along and see re-enactors practice drills as the Irish Volunteers did between 1913 and 1916. In the period before the 1916 Easter Rising the Irish Volunteers organised openly and trained in the use of rifles and military skills. See the uniforms and weapons used by the Volunteers and how they were taught to use them.", "Booking Information": "Clarke Square, no booking required.", "Format": "Historical Re-enactment", "Audience": "All ages"
"Event": "Seminar: Method and Form", "Start Date": "25th April 2015", "Finish Date": "25th April 2015", "Time": "2-4.pm", "Location": "Decorative Arts & History", "Description": "Marking Year of Irish Design 2015, and in partnership with the Dublin Decorative & Fine Arts Society, 'Method and Form' is an afternoon of talks exploring how past and present come together in contemporary Irish applied arts. Speakers include makers Zelouf and Bell, curator Dr Jennifer Goff (NMI) and Dr Audrey Whitty, Keeper of Art & Industry (NMI).", "Booking Information": "Go to www.museum.ie for a full programme.Booking essential. Tickets 5 euro.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "70th Anniversary of the end of World War Two", "Start Date": "6th June 2015", "Finish Date": "6th June 2015", "Time": "2-4.30pm", "Location": "Decorative Arts & History", "Description": "Come along and see uniforms and equipment used by the Allied (American, British, French, Soviet) and Axis (German, Italian) Forces during the Second World War. This is an interactive way of learning about this conflict which ended 70 years ago. Presented by historical re-enactors, The WW2 Club.", "Booking Information": "Clarke Square, no booking required.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "Storytelling - Viking Stories of Gods, Heroes, Baddies and Brutes!", "Start Date": "18th April 2015", "Finish Date": "18th April 2015", "Time": "2.30-3.30pm", "Location": "Archaeology", "Description": "Join Niall de Búrca, live, for mighty tales of Gods, heroes, baddies and brutes. Niall has performed to audiences all over the world in places as diverse as Tehran, Los Angeles, Buenos Aires and Bucharest and his motto is: Níor bhris focal maith fiacal riamh!...A good word never broke a tooth!", "Booking Information": "This event will take place in the Ceramics Room, First Floor. No booking required, places will be allocated on a first come basis. Not wheelchair accessible.", "Format": "Family Event", "Audience": "Ages 6+" },{ "Event": "Bealtaine Workshop - Irish Leather Craft", "Start Date": "7th May 2015", "Finish Date": "8th May 2015", "Time": "11am-3pm", "Location": "Archaeology", "Description": "To celebrate Bealtaine in the Year of Irish Design 2015, the National Museum is delighted to welcome designer and artist Róisín Gartland who will facilitate a two-day workshop. Participants will work with expert leather-worker Róisín to create their own leather object based on ancient examples of Irish leather design.", "Booking Information": "Booking required, please phone 01 6486339 or email educationarch@museum.ie. Located in the Learning Resource Room, first floor. Not wheelchair accessible.", "Format": "Workshop", "Audience": "Adults" },{ "Event": "National Drawing Day 2015 with Kenneth Donfield, National College of Art & Design (NCAD)", "Start Date": "16th May 2015", "Finish Date": "16th May 2015", "Time": "1am-4pm", "Location": "Archaeology", "Description": "Observe and sketch the Museum's collections and beautiful 19th century building with guidance from artist and tutor in Drawing and Painting at NCAD, Kenneth Donfield. Beginners and experienced artists are welcome.", "Booking Information": "Booking required, please phone 01 6486339 or email educationarch@museum.ie. Located on the ground floor and first floor. Partially wheelchair accessible.", "Format": "Workshop", "Audience": "Adults" },{ "Event": "Lunch-time Talk: You Talking to Me?", "Start Date": "6th April 2015", "Finish Date": "6th April 2015", "Time": "1am-1.45pm", "Location": "Natural History", "Description": "Whether it is finding a mate or deterring a predator, animals have evolved a variety of methods of communicating, from the familiar visual or auditory to the unusual seismic or thermal to the downright bizarre such as dance! Join zoologist Donal Vaughan to discover more about the world of animal communication.", "Booking Information": "Booking required, email educationarch@museum.ie or call 01 648 6332.", "Format": "Talk", "Audience": "Adults" },{ "Event": "Native Species Weekend at Dublin Zoo", "Start Date": "23rd April 2015", "Finish Date": "24th April 2015", "Time": "See www.dublinzoo.ie", "Location": "Natural History", "Description": "Join us at our stand at Dublin Zoo as we celebrate Native Species Weekend.", "Booking Information": "For information on entry fees and times, please see the Dublin Zoo website www.dublinzoo.ie.", "Format": "Family Event", "Audience": "Families" },{ "Event": "Preserving the Peace", "Start Date": "1st April 2015", "Finish Date": "30th April 2015", "Time": "During Opening Hours", "Location": "Country Life", "Description": "From gallows, shillelaghs and manacles to the hangman's rope! This exhibition commemorates 200 years of law and order through objects relating to policing, crime, prisons and punishment, highlighting the sometimes uncomfortable reality of Ireland's official past.", "Booking Information": "n/a", "Format": "Temporary Exhibition", "Audience": "All Ages" },{ "Event": "Kids Museum: Treasure Hunt", "Start Date": " 10th April 2015", "Finish Date": "10th April 2015", "Time": "2pm-4pm", "Location": "Country Life", "Description": "Join us for an exciting adventure around the grounds of Turlough Park. Work together to see if you can find all the clues and figure them out. Complete the hunt to get the treasure! ", "Booking Information": "Booking required. Weather appropriate clothing recommended.", "Format": "Family Event", "Audience": "Families" },{ "Event": "Preserving the Peace", "Start Date": "1st May 2015", "Finish Date": "24th May 2015", "Time": "During Opening Hours", "Location": "Country Life", "Description": "From gallows, shillelaghs and manacles to the hangman's rope! This exhibition commemorates 200 years of law and order through objects relating to policing, crime, prisons and punishment, highlighting the sometimes uncomfortable reality of Ireland's official past.", "Booking Information": "n/a", "Format": "Temporary Exhibition", "Audience": "All Ages" },{ "Event": "Bealtaine: May Flowers", "Start Date": "1st May 2015", "Finish Date": "1st May 2015", "Time": "2pm-3:30pm", "Location": "Country Life", "Description": "Find out how flowers were used to welcome the Summer long ago. Learn about traditional May Day customs with curator Clodagh Doyle. Create your own paper flowers to make a May posie with Aoife O'Toole.", "Booking Information": "Booking required", "Format": "Workshop", "Audience": "Adults" },{ "Event": "Féile na Tuaithe 2015: Free Family Festival", "Start Date": "23rd May 2015", "Finish Date": "24th May 2015", "Time": "12pm-5pm", "Location": "Country Life", "Description": "Enjoy a great family day out, perfect for all ages! Stop by our new Family Area for weaving, drumming, mosaic-making, puppet shows, storytelling, facepainting, and look at donkeys, goats and birds of prey. Visit our Craft and Food Villages and the Entertainment Stage for the best sites, sounds, and tastes in Traditional and Contemporary Ireland, complete with demonstrators and performers of international acclaim. Stroll through the Museum galleries and grounds on scarecrow and fairy trails. There is so much to do that you will need to return both days! ", "Booking Information": "See www.museum.ie for Saturday and Sunday's full listings.", "Format": "Open Day", "Audience": "Families" }]; var json = '{"glossary":{"title":"example glossary","GlossDiv":{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","ID":"44","str":"SGML","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}}}'; function getValues(obj, key) { var objects = []; for (var i in obj) { if (!obj.hasOwnProperty(i)) continue; if (typeof obj[i] == 'object') { objects = objects.concat(getValues(obj[i], key)); } else if (i == key) { objects.push(obj[i]); } } return objects; } var js = JSON.parse(json); console.log(getValues(js,'ID')); $(document).on("pageinit", "#info-page", function () { var li = ""; $.each(info, function (i,Event) { li += '<li><a href="#" id="' + i + '" class="info-go">' + Event.Event + '</a></li>'; }); $("#prof-list").append(li).promise().done(function () { $(this).on("click", ".info-go", function (e) { e.preventDefault(); $("#details-page").data("info", info[this.id]); $.mobile.changePage("#details-page"); }); $(this).listview("refresh"); }); }); $(document).on("pagebeforeshow", "#details-page", function () { var info = $(this).data("info"); var info_view = ""; for (var key in info) { info_view += '<div class="ui-grid-a"><div class="ui-block-a"><div class="ui-bar field" style="font-weight : bold; text-align: left;">' + key + '</div></div><div class="ui-block-b"><div class="ui-bar value" style="width : 80%">' + info[key] + '</div></div></div>'; } $(this).find("[data-role=content]").html(info_view); });
},{
random_line_split
jsontest.js
//assuming this comes from an ajax call var info = [{ "Event": "Pals - The Irish at Gallipoli", "Start Date": "1st April 2015", "Finish Date": "31st April 2015", "Time": "During Museum opening hours", "Location": "Decorative Arts & History", "Description": "As part of the Museum's WW1 centenary programme, award-winning innovators ANU Productions present an immersive World War I experience, based on the events surrounding the campaign at Gallipoli in Turkey and inspired by the previously untold stories of the 7th Battalion of the Royal Irish Fusiliers and the everyday lives of the Irish people who were affected by the Great War. The project is a partnership between ANU Productions , the National Museum of Ireland and the Department of Arts, Heritage and Gaeltacht and is supported by the National Archives of Ireland. Performances will run Wednesday - Saturday: 11.00am, 12.00pm, 2.00pm, 3.00pm and 4.00pm Sunday - 2.00pm, 3.00pm and 4.00pm. Tickets: 5euro.", "Booking Information": "For More Information and Tickets: www.pals-theirishatgallipoli.com", "Format": "Performance", "Audience": "Adults" },{ "Event": "The Big Barracks Egg Hunt", "Start Date": "5th April 2015", "Finish Date": "5th April 2015", "Time": "3-4.30pm", "Location": "Decorative Arts & History", "Description": " Can you follow the trail and test your knowledge to find the golden egg? Complete the trail to receive an egg-cellent edible reward!", "Booking Information": "No booking required.", "Format": "Family Event", "Audience": "Families" },{ "Event": "The Easter Rising: Irish Volunteer Drills", "Start Date": "18th April 2015", "Finish Date": "18th April 2015", "Time": "2-4pm", "Location": "Decorative Arts & History", "Description": " Come along and see re-enactors practice drills as the Irish Volunteers did between 1913 and 1916. In the period before the 1916 Easter Rising the Irish Volunteers organised openly and trained in the use of rifles and military skills. See the uniforms and weapons used by the Volunteers and how they were taught to use them.", "Booking Information": "Clarke Square, no booking required.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "Seminar: Method and Form", "Start Date": "25th April 2015", "Finish Date": "25th April 2015", "Time": "2-4.pm", "Location": "Decorative Arts & History", "Description": "Marking Year of Irish Design 2015, and in partnership with the Dublin Decorative & Fine Arts Society, 'Method and Form' is an afternoon of talks exploring how past and present come together in contemporary Irish applied arts. Speakers include makers Zelouf and Bell, curator Dr Jennifer Goff (NMI) and Dr Audrey Whitty, Keeper of Art & Industry (NMI).", "Booking Information": "Go to www.museum.ie for a full programme.Booking essential. Tickets 5 euro.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "70th Anniversary of the end of World War Two", "Start Date": "6th June 2015", "Finish Date": "6th June 2015", "Time": "2-4.30pm", "Location": "Decorative Arts & History", "Description": "Come along and see uniforms and equipment used by the Allied (American, British, French, Soviet) and Axis (German, Italian) Forces during the Second World War. This is an interactive way of learning about this conflict which ended 70 years ago. Presented by historical re-enactors, The WW2 Club.", "Booking Information": "Clarke Square, no booking required.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "Storytelling - Viking Stories of Gods, Heroes, Baddies and Brutes!", "Start Date": "18th April 2015", "Finish Date": "18th April 2015", "Time": "2.30-3.30pm", "Location": "Archaeology", "Description": "Join Niall de Búrca, live, for mighty tales of Gods, heroes, baddies and brutes. Niall has performed to audiences all over the world in places as diverse as Tehran, Los Angeles, Buenos Aires and Bucharest and his motto is: Níor bhris focal maith fiacal riamh!...A good word never broke a tooth!", "Booking Information": "This event will take place in the Ceramics Room, First Floor. No booking required, places will be allocated on a first come basis. Not wheelchair accessible.", "Format": "Family Event", "Audience": "Ages 6+" },{ "Event": "Bealtaine Workshop - Irish Leather Craft", "Start Date": "7th May 2015", "Finish Date": "8th May 2015", "Time": "11am-3pm", "Location": "Archaeology", "Description": "To celebrate Bealtaine in the Year of Irish Design 2015, the National Museum is delighted to welcome designer and artist Róisín Gartland who will facilitate a two-day workshop. Participants will work with expert leather-worker Róisín to create their own leather object based on ancient examples of Irish leather design.", "Booking Information": "Booking required, please phone 01 6486339 or email educationarch@museum.ie. Located in the Learning Resource Room, first floor. Not wheelchair accessible.", "Format": "Workshop", "Audience": "Adults" },{ "Event": "National Drawing Day 2015 with Kenneth Donfield, National College of Art & Design (NCAD)", "Start Date": "16th May 2015", "Finish Date": "16th May 2015", "Time": "1am-4pm", "Location": "Archaeology", "Description": "Observe and sketch the Museum's collections and beautiful 19th century building with guidance from artist and tutor in Drawing and Painting at NCAD, Kenneth Donfield. Beginners and experienced artists are welcome.", "Booking Information": "Booking required, please phone 01 6486339 or email educationarch@museum.ie. Located on the ground floor and first floor. Partially wheelchair accessible.", "Format": "Workshop", "Audience": "Adults" },{ "Event": "Lunch-time Talk: You Talking to Me?", "Start Date": "6th April 2015", "Finish Date": "6th April 2015", "Time": "1am-1.45pm", "Location": "Natural History", "Description": "Whether it is finding a mate or deterring a predator, animals have evolved a variety of methods of communicating, from the familiar visual or auditory to the unusual seismic or thermal to the downright bizarre such as dance! Join zoologist Donal Vaughan to discover more about the world of animal communication.", "Booking Information": "Booking required, email educationarch@museum.ie or call 01 648 6332.", "Format": "Talk", "Audience": "Adults" },{ "Event": "Native Species Weekend at Dublin Zoo", "Start Date": "23rd April 2015", "Finish Date": "24th April 2015", "Time": "See www.dublinzoo.ie", "Location": "Natural History", "Description": "Join us at our stand at Dublin Zoo as we celebrate Native Species Weekend.", "Booking Information": "For information on entry fees and times, please see the Dublin Zoo website www.dublinzoo.ie.", "Format": "Family Event", "Audience": "Families" },{ "Event": "Preserving the Peace", "Start Date": "1st April 2015", "Finish Date": "30th April 2015", "Time": "During Opening Hours", "Location": "Country Life", "Description": "From gallows, shillelaghs and manacles to the hangman's rope! This exhibition commemorates 200 years of law and order through objects relating to policing, crime, prisons and punishment, highlighting the sometimes uncomfortable reality of Ireland's official past.", "Booking Information": "n/a", "Format": "Temporary Exhibition", "Audience": "All Ages" },{ "Event": "Kids Museum: Treasure Hunt", "Start Date": " 10th April 2015", "Finish Date": "10th April 2015", "Time": "2pm-4pm", "Location": "Country Life", "Description": "Join us for an exciting adventure around the grounds of Turlough Park. Work together to see if you can find all the clues and figure them out. Complete the hunt to get the treasure! ", "Booking Information": "Booking required. Weather appropriate clothing recommended.", "Format": "Family Event", "Audience": "Families" },{ "Event": "Preserving the Peace", "Start Date": "1st May 2015", "Finish Date": "24th May 2015", "Time": "During Opening Hours", "Location": "Country Life", "Description": "From gallows, shillelaghs and manacles to the hangman's rope! This exhibition commemorates 200 years of law and order through objects relating to policing, crime, prisons and punishment, highlighting the sometimes uncomfortable reality of Ireland's official past.", "Booking Information": "n/a", "Format": "Temporary Exhibition", "Audience": "All Ages" },{ "Event": "Bealtaine: May Flowers", "Start Date": "1st May 2015", "Finish Date": "1st May 2015", "Time": "2pm-3:30pm", "Location": "Country Life", "Description": "Find out how flowers were used to welcome the Summer long ago. Learn about traditional May Day customs with curator Clodagh Doyle. Create your own paper flowers to make a May posie with Aoife O'Toole.", "Booking Information": "Booking required", "Format": "Workshop", "Audience": "Adults" },{ "Event": "Féile na Tuaithe 2015: Free Family Festival", "Start Date": "23rd May 2015", "Finish Date": "24th May 2015", "Time": "12pm-5pm", "Location": "Country Life", "Description": "Enjoy a great family day out, perfect for all ages! Stop by our new Family Area for weaving, drumming, mosaic-making, puppet shows, storytelling, facepainting, and look at donkeys, goats and birds of prey. Visit our Craft and Food Villages and the Entertainment Stage for the best sites, sounds, and tastes in Traditional and Contemporary Ireland, complete with demonstrators and performers of international acclaim. Stroll through the Museum galleries and grounds on scarecrow and fairy trails. There is so much to do that you will need to return both days! ", "Booking Information": "See www.museum.ie for Saturday and Sunday's full listings.", "Format": "Open Day", "Audience": "Families" }]; var json = '{"glossary":{"title":"example glossary","GlossDiv":{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","ID":"44","str":"SGML","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}}}'; function getValu
ey) { var objects = []; for (var i in obj) { if (!obj.hasOwnProperty(i)) continue; if (typeof obj[i] == 'object') { objects = objects.concat(getValues(obj[i], key)); } else if (i == key) { objects.push(obj[i]); } } return objects; } var js = JSON.parse(json); console.log(getValues(js,'ID')); $(document).on("pageinit", "#info-page", function () { var li = ""; $.each(info, function (i,Event) { li += '<li><a href="#" id="' + i + '" class="info-go">' + Event.Event + '</a></li>'; }); $("#prof-list").append(li).promise().done(function () { $(this).on("click", ".info-go", function (e) { e.preventDefault(); $("#details-page").data("info", info[this.id]); $.mobile.changePage("#details-page"); }); $(this).listview("refresh"); }); }); $(document).on("pagebeforeshow", "#details-page", function () { var info = $(this).data("info"); var info_view = ""; for (var key in info) { info_view += '<div class="ui-grid-a"><div class="ui-block-a"><div class="ui-bar field" style="font-weight : bold; text-align: left;">' + key + '</div></div><div class="ui-block-b"><div class="ui-bar value" style="width : 80%">' + info[key] + '</div></div></div>'; } $(this).find("[data-role=content]").html(info_view); });
es(obj, k
identifier_name
jsontest.js
//assuming this comes from an ajax call var info = [{ "Event": "Pals - The Irish at Gallipoli", "Start Date": "1st April 2015", "Finish Date": "31st April 2015", "Time": "During Museum opening hours", "Location": "Decorative Arts & History", "Description": "As part of the Museum's WW1 centenary programme, award-winning innovators ANU Productions present an immersive World War I experience, based on the events surrounding the campaign at Gallipoli in Turkey and inspired by the previously untold stories of the 7th Battalion of the Royal Irish Fusiliers and the everyday lives of the Irish people who were affected by the Great War. The project is a partnership between ANU Productions , the National Museum of Ireland and the Department of Arts, Heritage and Gaeltacht and is supported by the National Archives of Ireland. Performances will run Wednesday - Saturday: 11.00am, 12.00pm, 2.00pm, 3.00pm and 4.00pm Sunday - 2.00pm, 3.00pm and 4.00pm. Tickets: 5euro.", "Booking Information": "For More Information and Tickets: www.pals-theirishatgallipoli.com", "Format": "Performance", "Audience": "Adults" },{ "Event": "The Big Barracks Egg Hunt", "Start Date": "5th April 2015", "Finish Date": "5th April 2015", "Time": "3-4.30pm", "Location": "Decorative Arts & History", "Description": " Can you follow the trail and test your knowledge to find the golden egg? Complete the trail to receive an egg-cellent edible reward!", "Booking Information": "No booking required.", "Format": "Family Event", "Audience": "Families" },{ "Event": "The Easter Rising: Irish Volunteer Drills", "Start Date": "18th April 2015", "Finish Date": "18th April 2015", "Time": "2-4pm", "Location": "Decorative Arts & History", "Description": " Come along and see re-enactors practice drills as the Irish Volunteers did between 1913 and 1916. In the period before the 1916 Easter Rising the Irish Volunteers organised openly and trained in the use of rifles and military skills. See the uniforms and weapons used by the Volunteers and how they were taught to use them.", "Booking Information": "Clarke Square, no booking required.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "Seminar: Method and Form", "Start Date": "25th April 2015", "Finish Date": "25th April 2015", "Time": "2-4.pm", "Location": "Decorative Arts & History", "Description": "Marking Year of Irish Design 2015, and in partnership with the Dublin Decorative & Fine Arts Society, 'Method and Form' is an afternoon of talks exploring how past and present come together in contemporary Irish applied arts. Speakers include makers Zelouf and Bell, curator Dr Jennifer Goff (NMI) and Dr Audrey Whitty, Keeper of Art & Industry (NMI).", "Booking Information": "Go to www.museum.ie for a full programme.Booking essential. Tickets 5 euro.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "70th Anniversary of the end of World War Two", "Start Date": "6th June 2015", "Finish Date": "6th June 2015", "Time": "2-4.30pm", "Location": "Decorative Arts & History", "Description": "Come along and see uniforms and equipment used by the Allied (American, British, French, Soviet) and Axis (German, Italian) Forces during the Second World War. This is an interactive way of learning about this conflict which ended 70 years ago. Presented by historical re-enactors, The WW2 Club.", "Booking Information": "Clarke Square, no booking required.", "Format": "Historical Re-enactment", "Audience": "All ages" },{ "Event": "Storytelling - Viking Stories of Gods, Heroes, Baddies and Brutes!", "Start Date": "18th April 2015", "Finish Date": "18th April 2015", "Time": "2.30-3.30pm", "Location": "Archaeology", "Description": "Join Niall de Búrca, live, for mighty tales of Gods, heroes, baddies and brutes. Niall has performed to audiences all over the world in places as diverse as Tehran, Los Angeles, Buenos Aires and Bucharest and his motto is: Níor bhris focal maith fiacal riamh!...A good word never broke a tooth!", "Booking Information": "This event will take place in the Ceramics Room, First Floor. No booking required, places will be allocated on a first come basis. Not wheelchair accessible.", "Format": "Family Event", "Audience": "Ages 6+" },{ "Event": "Bealtaine Workshop - Irish Leather Craft", "Start Date": "7th May 2015", "Finish Date": "8th May 2015", "Time": "11am-3pm", "Location": "Archaeology", "Description": "To celebrate Bealtaine in the Year of Irish Design 2015, the National Museum is delighted to welcome designer and artist Róisín Gartland who will facilitate a two-day workshop. Participants will work with expert leather-worker Róisín to create their own leather object based on ancient examples of Irish leather design.", "Booking Information": "Booking required, please phone 01 6486339 or email educationarch@museum.ie. Located in the Learning Resource Room, first floor. Not wheelchair accessible.", "Format": "Workshop", "Audience": "Adults" },{ "Event": "National Drawing Day 2015 with Kenneth Donfield, National College of Art & Design (NCAD)", "Start Date": "16th May 2015", "Finish Date": "16th May 2015", "Time": "1am-4pm", "Location": "Archaeology", "Description": "Observe and sketch the Museum's collections and beautiful 19th century building with guidance from artist and tutor in Drawing and Painting at NCAD, Kenneth Donfield. Beginners and experienced artists are welcome.", "Booking Information": "Booking required, please phone 01 6486339 or email educationarch@museum.ie. Located on the ground floor and first floor. Partially wheelchair accessible.", "Format": "Workshop", "Audience": "Adults" },{ "Event": "Lunch-time Talk: You Talking to Me?", "Start Date": "6th April 2015", "Finish Date": "6th April 2015", "Time": "1am-1.45pm", "Location": "Natural History", "Description": "Whether it is finding a mate or deterring a predator, animals have evolved a variety of methods of communicating, from the familiar visual or auditory to the unusual seismic or thermal to the downright bizarre such as dance! Join zoologist Donal Vaughan to discover more about the world of animal communication.", "Booking Information": "Booking required, email educationarch@museum.ie or call 01 648 6332.", "Format": "Talk", "Audience": "Adults" },{ "Event": "Native Species Weekend at Dublin Zoo", "Start Date": "23rd April 2015", "Finish Date": "24th April 2015", "Time": "See www.dublinzoo.ie", "Location": "Natural History", "Description": "Join us at our stand at Dublin Zoo as we celebrate Native Species Weekend.", "Booking Information": "For information on entry fees and times, please see the Dublin Zoo website www.dublinzoo.ie.", "Format": "Family Event", "Audience": "Families" },{ "Event": "Preserving the Peace", "Start Date": "1st April 2015", "Finish Date": "30th April 2015", "Time": "During Opening Hours", "Location": "Country Life", "Description": "From gallows, shillelaghs and manacles to the hangman's rope! This exhibition commemorates 200 years of law and order through objects relating to policing, crime, prisons and punishment, highlighting the sometimes uncomfortable reality of Ireland's official past.", "Booking Information": "n/a", "Format": "Temporary Exhibition", "Audience": "All Ages" },{ "Event": "Kids Museum: Treasure Hunt", "Start Date": " 10th April 2015", "Finish Date": "10th April 2015", "Time": "2pm-4pm", "Location": "Country Life", "Description": "Join us for an exciting adventure around the grounds of Turlough Park. Work together to see if you can find all the clues and figure them out. Complete the hunt to get the treasure! ", "Booking Information": "Booking required. Weather appropriate clothing recommended.", "Format": "Family Event", "Audience": "Families" },{ "Event": "Preserving the Peace", "Start Date": "1st May 2015", "Finish Date": "24th May 2015", "Time": "During Opening Hours", "Location": "Country Life", "Description": "From gallows, shillelaghs and manacles to the hangman's rope! This exhibition commemorates 200 years of law and order through objects relating to policing, crime, prisons and punishment, highlighting the sometimes uncomfortable reality of Ireland's official past.", "Booking Information": "n/a", "Format": "Temporary Exhibition", "Audience": "All Ages" },{ "Event": "Bealtaine: May Flowers", "Start Date": "1st May 2015", "Finish Date": "1st May 2015", "Time": "2pm-3:30pm", "Location": "Country Life", "Description": "Find out how flowers were used to welcome the Summer long ago. Learn about traditional May Day customs with curator Clodagh Doyle. Create your own paper flowers to make a May posie with Aoife O'Toole.", "Booking Information": "Booking required", "Format": "Workshop", "Audience": "Adults" },{ "Event": "Féile na Tuaithe 2015: Free Family Festival", "Start Date": "23rd May 2015", "Finish Date": "24th May 2015", "Time": "12pm-5pm", "Location": "Country Life", "Description": "Enjoy a great family day out, perfect for all ages! Stop by our new Family Area for weaving, drumming, mosaic-making, puppet shows, storytelling, facepainting, and look at donkeys, goats and birds of prey. Visit our Craft and Food Villages and the Entertainment Stage for the best sites, sounds, and tastes in Traditional and Contemporary Ireland, complete with demonstrators and performers of international acclaim. Stroll through the Museum galleries and grounds on scarecrow and fairy trails. There is so much to do that you will need to return both days! ", "Booking Information": "See www.museum.ie for Saturday and Sunday's full listings.", "Format": "Open Day", "Audience": "Families" }]; var json = '{"glossary":{"title":"example glossary","GlossDiv":{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","ID":"44","str":"SGML","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}}}'; function getValues(obj, key) { v
s = JSON.parse(json); console.log(getValues(js,'ID')); $(document).on("pageinit", "#info-page", function () { var li = ""; $.each(info, function (i,Event) { li += '<li><a href="#" id="' + i + '" class="info-go">' + Event.Event + '</a></li>'; }); $("#prof-list").append(li).promise().done(function () { $(this).on("click", ".info-go", function (e) { e.preventDefault(); $("#details-page").data("info", info[this.id]); $.mobile.changePage("#details-page"); }); $(this).listview("refresh"); }); }); $(document).on("pagebeforeshow", "#details-page", function () { var info = $(this).data("info"); var info_view = ""; for (var key in info) { info_view += '<div class="ui-grid-a"><div class="ui-block-a"><div class="ui-bar field" style="font-weight : bold; text-align: left;">' + key + '</div></div><div class="ui-block-b"><div class="ui-bar value" style="width : 80%">' + info[key] + '</div></div></div>'; } $(this).find("[data-role=content]").html(info_view); });
ar objects = []; for (var i in obj) { if (!obj.hasOwnProperty(i)) continue; if (typeof obj[i] == 'object') { objects = objects.concat(getValues(obj[i], key)); } else if (i == key) { objects.push(obj[i]); } } return objects; } var j
identifier_body
Python_SQL_Project_CodeBase-DA.py
import argparse as agp import getpass import os from myTools import MSSQL_DBConnector as mssql from myTools import DBConnector as dbc import myTools.ContentObfuscation as ce try: import pandas as pd except: mi.installModule("pandas") import pandas as pd def printSplashScreen(): print("*************************************************************************************************") print("\t THIS SCRIPT ALLOWS TO EXTRACT SURVEY DATA FROM THE SAMPLE SEEN IN SQL CLASS") print("\t IT REPLICATES THE BEHAVIOUR OF A STORED PROCEDURE & TRIGGER IN A PROGRAMMATIC WAY") print("\t COMMAND LINE OPTIONS ARE:") print("\t\t -h or --help: print the help content on the console") print("*************************************************************************************************\n\n") def processCLIArguments()-> dict: retParametersDictionary:dict = None dbpassword:str = '' obfuscator: ce.ContentObfuscation = ce.ContentObfuscation() try: argParser:agp.ArgumentParser = agp.ArgumentParser(add_help=True) argParser.add_argument("-n", "--DSN", dest="dsn", \ action='store', default= None, help="Sets the SQL Server DSN descriptor file - Take precedence over all access parameters", type=str) argParser.add_argument("-s", "--DBServer", dest="dbserver", \ action='store', help="Sets the SQL Server DB Server Address", type=str) argParser.add_argument("-d", "--DBName", dest="dbname", \ action='store', help="Sets the SQL Server DB Name", type=str) argParser.add_argument("-u", "--DBUser", dest="dbusername", \ action='store', help="Sets the SQL Server DB Username", type=str) argParser.add_argument("-p", "--DBUserPassword", dest="dbuserpassword", \ action='store', help="Sets the SQL Server DB User Password", type=str) argParser.add_argument("-t", "--UseTrustedConnection", dest="trustedmode", \ action='store', default=False, \ help="Sets the SQL Server connection in Trusted Connection mode (default = False)", type=bool) argParser.add_argument("-v", "--TargetViewName", dest="viewname", \ action='store', default="dbo.vw_AllSurveyData", \ help="Sets the SQL Server target view name (this can be SCHEMA.VIEWNAME) - (default = dbo.vw_AllSurveyData)", type=str) argParser.add_argument("-f", "--SerialisedPersistenceFilePath", dest="persistencefilepath", \ action='store', default=os.getcwd() + os.sep + "lastKnownSurveyStructure.pkl", \ help="Sets the Persistence File Path (default = script current directory given by os.getcwd())", type=str) argParser.add_argument("-r", "--ResultsFilePath", dest="resultsfilepath", \ action='store', default=os.getcwd() + os.sep + "results.csv", \ help="Sets the Results (CSV) File Path (default = script current directory given by os.getcwd())", type=str) argParsingResults:agp.Namespace = argParser.parse_args() if(argParsingResults.dbserver is None): raise Exception("You must provide a SQL Server Address using the -s or --DBServer CLI argument") if(argParsingResults.dbname is None): raise Exception("You must provide a target database name using the -d or --DBName CLI argument") if(argParsingResults.trustedmode == False): if(argParsingResults.dbusername is None): raise Exception("You must provide a DB user account name using the -u or --DBUser CLI argument (or use TrustedConnection Mode)") #Password should not be provided directly in cleartext on the CLI if(argParsingResults.dbuserpassword is None): dbpassword = obfuscator.obfuscate(getpass.getpass('Please type the DB user password (no echo): ')) else: dbpassword = obfuscator.obfuscate(argParsingResults.dbuserpassword) else: if(argParsingResults.dbusername is not None): raise Exception("You are using the TrustedConnection Mode yet providing DB user name: this will be disregarded in TrustedConnection") retParametersDictionary = { "dsn" : argParsingResults.dsn, "dbserver" : argParsingResults.dbserver, "dbname" : argParsingResults.dbname, "dbusername" : argParsingResults.dbusername, "dbuserpassword" : dbpassword, "trustedmode" : argParsingResults.trustedmode, "viewname" : argParsingResults.viewname, "persistencefilepath": argParsingResults.persistencefilepath, "resultsfilepath" : argParsingResults.resultsfilepath } except Exception as e: print("Command Line arguments processing error: " + str(e)) return retParametersDictionary def getSurveyStructure(connector: mssql.MSSQL_DBConnector) -> pd.DataFrame: surveyStructResults = None if(connector is not None): if(connector.IsConnected == True): strQuerySurveyStruct = """ SELECT SurveyId, QuestionId FROM SurveyStructure ORDER BY SurveyId, QuestionId """ try: surveyStructResults:pd.DataFrame = connector.ExecuteQuery_withRS(strQuerySurveyStruct) except Exception as excp: raise Exception('GetSurveyStructure(): Cannot execute query').with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): no Database connection").with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): Database connection object is None").with_traceback(excp.__traceback__) return surveyStructResults def doesPersistenceFileExist(persistenceFilePath: str)-> bool: success = True try: file = open(persistenceFilePath) file.close() except FileNotFoundError: success = False return success def isPersistenceFileDirectoryWritable(persistenceFilePath: str)-> bool:
def compareDBSurveyStructureToPersistenceFile(surveyStructResults:pd.DataFrame, persistenceFilePath: str) -> bool: same_file = False try: unpickled_persistanceFileDF = pd.read_csv(persistenceFilePath) if(surveyStructResults.equals(unpickled_persistanceFileDF) == True): same_file = True except Exception as excp: raise Exception("compareDBSurveyStructureToPersistenceFile(): Couldn't read (unpickle) the persistence file").with_traceback(excp.__traceback__) return same_file def getAllSurveyDataQuery(connector: dbc.DBConnector) -> str: #IN THIS FUNCTION YOU MUST STRICTLY CONVERT THE CODE OF getAllSurveyData written in T-SQL, available in Survey_Sample_A19 and seen in class # Below is the beginning of the conversion # The Python version must return the string containing the dynamic query (as we cannot use sp_executesql in Python!) strQueryTemplateForAnswerColumn: str = """COALESCE( ( SELECT a.Answer_Value FROM Answer as a WHERE a.UserId = u.UserId AND a.SurveyId = <SURVEY_ID> AND a.QuestionId = <QUESTION_ID> ), -1) AS ANS_Q<QUESTION_ID> """ strQueryTemplateForNullColumnn: str = ' NULL AS ANS_Q<QUESTION_ID> ' strQueryTemplateOuterUnionQuery: str = """ SELECT UserId , <SURVEY_ID> as SurveyId , <DYNAMIC_QUESTION_ANSWERS> FROM [User] as u WHERE EXISTS ( \ SELECT * FROM Answer as a WHERE u.UserId = a.UserId AND a.SurveyId = <SURVEY_ID> ) """ # strCurrentUnionQueryBlock: str = '' strFinalQuery: str = '' #MAIN LOOP, OVER ALL THE SURVEYS # FOR EACH SURVEY, IN currentSurveyId, WE NEED TO CONSTRUCT THE ANSWER COLUMN QUERIES #inner loop, over the questions of the survey # Cursors are replaced by a query retrived in a pandas df surveyQuery:str = 'SELECT SurveyId FROM Survey ORDER BY SurveyId' surveyQueryDF:pd.DataFrame = connector.ExecuteQuery_withRS(surveyQuery) #CARRY ON THE CONVERSION # LOOP over surveyId and use Pandas DataFrame.iterrows() to iterate over data frame for n,data in surveyQueryDF.iterrows(): currentSurveyId = data['SurveyId'] print(currentSurveyId) strCurrentUnionQueryBlock: str = '' currentQuestionCursorStr:str = """SELECT * FROM ( SELECT SurveyId, QuestionId, 1 as InSurvey FROM SurveyStructure WHERE SurveyId = %s UNION SELECT %s as SurveyId,Q.QuestionId,0 as InSurvey FROM Question as Q WHERE NOT EXISTS(SELECT *FROM SurveyStructure as S WHERE S.SurveyId = %s AND S.QuestionId = Q.QuestionId )) as t ORDER BY QuestionId; """ % (currentSurveyId,currentSurveyId,currentSurveyId) currentQuestionCursorDF:pd.DataFrame = connector.ExecuteQuery_withRS(currentQuestionCursorStr) strColumnsQueryPart:str=''; for m,currentQData in currentQuestionCursorDF.iterrows(): currentInSurvey = currentQData['InSurvey'] currentSurveyIdInQuestion = currentQData['SurveyId'] currentQuestionID = currentQData['QuestionId'] if currentInSurvey == 0 : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForNullColumnn.replace('<QUESTION_ID>',str(currentQuestionID)) else : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForAnswerColumn.replace('<QUESTION_ID>',str(currentQuestionID)) if m != len(currentQuestionCursorDF.index) - 1 : strColumnsQueryPart = strColumnsQueryPart + ', ' strCurrentUnionQueryBlock = strCurrentUnionQueryBlock + strQueryTemplateOuterUnionQuery.replace('<DYNAMIC_QUESTION_ANSWERS>',str(strColumnsQueryPart)) strCurrentUnionQueryBlock = strCurrentUnionQueryBlock.replace('<SURVEY_ID>', str(currentSurveyId)) strFinalQuery = strFinalQuery + strCurrentUnionQueryBlock if n != len(surveyQueryDF.index)-1 : strFinalQuery = strFinalQuery + ' UNION ' return strFinalQuery def refreshViewInDB(connector: dbc.DBConnector, baseViewQuery:str, viewName:str)->None: if(connector.IsConnected == True): refreshViewQuery = "CREATE OR ALTER VIEW " + viewName + " AS " + baseViewQuery try: cursor = connector._dbConduit.cursor() cursor.execute(refreshViewQuery) connector._dbConduit.commit() except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') def surveyResultsToDF(connector: dbc.DBConnector, viewName:str)->pd.DataFrame: results:pd.DataFrame = None if(connector.IsConnected == True): selectAllSurveyDataQuery = "SELECT * FROM " + viewName + " ORDER BY UserId, SurveyId" try: results = connector.ExecuteQuery_withRS(selectAllSurveyDataQuery) except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') return results def main(): cliArguments:dict = None printSplashScreen() try: cliArguments = processCLIArguments() except Except as excp: print("Exiting") return if(cliArguments is not None): #if you are using the Visual Studio Solution, you can set the command line parameters within VS (it's done in this example) #For setting your own values in VS, please make sure to open the VS Project Properties (Menu "Project, bottom choice), tab "Debug", textbox "Script arguments" #If you are trying this script outside VS, you must provide command line parameters yourself, i.e. on Windows #python.exe Python_SQL_Project_Sample_Solution --DBServer <YOUR_MSSQL> -d <DBName> -t True #See the processCLIArguments() function for accepted parameters try: connector = mssql.MSSQL_DBConnector(DSN = cliArguments["dsn"], dbserver = cliArguments["dbserver"], \ dbname = cliArguments["dbname"], dbusername = cliArguments["dbusername"], \ dbpassword = cliArguments["dbuserpassword"], trustedmode = cliArguments["trustedmode"], \ viewname = cliArguments["viewname"]) connector.Open() surveyStructureDF:pd.DataFrame = getSurveyStructure(connector) if(doesPersistenceFileExist(cliArguments["persistencefilepath"]) == False): if(isPersistenceFileDirectoryWritable(cliArguments["persistencefilepath"]) == True): #pickle the dataframe in the path given by persistencefilepath #TODO df_savedSurveyStructure = surveyStructureDF.drop(surveyStructureDF.index[3]) df_savedSurveyStructure.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments["persistencefilepath"] + "\n") #refresh the view using the function written for this purpose #TODO refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) else: #Compare the existing pickled SurveyStructure file with surveyStructureDF # What do you need to do if the dataframe and the pickled file are different? #TODO #pass #pass only written here for not creating a syntax error, to be removed #get your survey results from the view in a dataframe and save it to a CSV file in the path given by resultsfilepath #TODO if compareDBSurveyStructureToPersistenceFile(surveyStructureDF, cliArguments["persistencefilepath"]) : print("New SurveyStructure is same as saved one, do nothing") else: print('SurveyStructure is different than saved one, need to trigger view') surveyStructureDF.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments['persistencefilepath'] ) refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) surveyResultsDF = surveyResultsToDF(connector,cliArguments['viewname']) surveyResultsDF.to_csv(cliArguments["resultsfilepath"], index=False, header=True) print("\nDONE - Results exported in " + cliArguments["resultsfilepath"] + "\n") connector.Close() except Exception as excp: print(excp) else: print("Inconsistency: CLI argument dictionary is None. Exiting") return if __name__ == '__main__': main()
success = True if(os.access(os.path.dirname(persistenceFilePath), os.W_OK) == False): success = False return success
identifier_body
Python_SQL_Project_CodeBase-DA.py
import argparse as agp import getpass import os from myTools import MSSQL_DBConnector as mssql from myTools import DBConnector as dbc import myTools.ContentObfuscation as ce try: import pandas as pd except: mi.installModule("pandas") import pandas as pd def printSplashScreen(): print("*************************************************************************************************") print("\t THIS SCRIPT ALLOWS TO EXTRACT SURVEY DATA FROM THE SAMPLE SEEN IN SQL CLASS") print("\t IT REPLICATES THE BEHAVIOUR OF A STORED PROCEDURE & TRIGGER IN A PROGRAMMATIC WAY") print("\t COMMAND LINE OPTIONS ARE:") print("\t\t -h or --help: print the help content on the console") print("*************************************************************************************************\n\n") def processCLIArguments()-> dict: retParametersDictionary:dict = None dbpassword:str = '' obfuscator: ce.ContentObfuscation = ce.ContentObfuscation() try: argParser:agp.ArgumentParser = agp.ArgumentParser(add_help=True) argParser.add_argument("-n", "--DSN", dest="dsn", \ action='store', default= None, help="Sets the SQL Server DSN descriptor file - Take precedence over all access parameters", type=str) argParser.add_argument("-s", "--DBServer", dest="dbserver", \ action='store', help="Sets the SQL Server DB Server Address", type=str) argParser.add_argument("-d", "--DBName", dest="dbname", \ action='store', help="Sets the SQL Server DB Name", type=str) argParser.add_argument("-u", "--DBUser", dest="dbusername", \ action='store', help="Sets the SQL Server DB Username", type=str) argParser.add_argument("-p", "--DBUserPassword", dest="dbuserpassword", \ action='store', help="Sets the SQL Server DB User Password", type=str) argParser.add_argument("-t", "--UseTrustedConnection", dest="trustedmode", \ action='store', default=False, \ help="Sets the SQL Server connection in Trusted Connection mode (default = False)", type=bool) argParser.add_argument("-v", "--TargetViewName", dest="viewname", \ action='store', default="dbo.vw_AllSurveyData", \ help="Sets the SQL Server target view name (this can be SCHEMA.VIEWNAME) - (default = dbo.vw_AllSurveyData)", type=str) argParser.add_argument("-f", "--SerialisedPersistenceFilePath", dest="persistencefilepath", \ action='store', default=os.getcwd() + os.sep + "lastKnownSurveyStructure.pkl", \ help="Sets the Persistence File Path (default = script current directory given by os.getcwd())", type=str) argParser.add_argument("-r", "--ResultsFilePath", dest="resultsfilepath", \ action='store', default=os.getcwd() + os.sep + "results.csv", \ help="Sets the Results (CSV) File Path (default = script current directory given by os.getcwd())", type=str) argParsingResults:agp.Namespace = argParser.parse_args() if(argParsingResults.dbserver is None): raise Exception("You must provide a SQL Server Address using the -s or --DBServer CLI argument") if(argParsingResults.dbname is None): raise Exception("You must provide a target database name using the -d or --DBName CLI argument") if(argParsingResults.trustedmode == False): if(argParsingResults.dbusername is None): raise Exception("You must provide a DB user account name using the -u or --DBUser CLI argument (or use TrustedConnection Mode)") #Password should not be provided directly in cleartext on the CLI if(argParsingResults.dbuserpassword is None): dbpassword = obfuscator.obfuscate(getpass.getpass('Please type the DB user password (no echo): ')) else: dbpassword = obfuscator.obfuscate(argParsingResults.dbuserpassword) else: if(argParsingResults.dbusername is not None): raise Exception("You are using the TrustedConnection Mode yet providing DB user name: this will be disregarded in TrustedConnection") retParametersDictionary = { "dsn" : argParsingResults.dsn, "dbserver" : argParsingResults.dbserver, "dbname" : argParsingResults.dbname, "dbusername" : argParsingResults.dbusername, "dbuserpassword" : dbpassword, "trustedmode" : argParsingResults.trustedmode, "viewname" : argParsingResults.viewname, "persistencefilepath": argParsingResults.persistencefilepath, "resultsfilepath" : argParsingResults.resultsfilepath } except Exception as e: print("Command Line arguments processing error: " + str(e)) return retParametersDictionary def getSurveyStructure(connector: mssql.MSSQL_DBConnector) -> pd.DataFrame: surveyStructResults = None if(connector is not None): if(connector.IsConnected == True): strQuerySurveyStruct = """ SELECT SurveyId, QuestionId FROM SurveyStructure ORDER BY SurveyId, QuestionId """ try: surveyStructResults:pd.DataFrame = connector.ExecuteQuery_withRS(strQuerySurveyStruct) except Exception as excp: raise Exception('GetSurveyStructure(): Cannot execute query').with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): no Database connection").with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): Database connection object is None").with_traceback(excp.__traceback__) return surveyStructResults def doesPersistenceFileExist(persistenceFilePath: str)-> bool: success = True try: file = open(persistenceFilePath) file.close() except FileNotFoundError: success = False return success def isPersistenceFileDirectoryWritable(persistenceFilePath: str)-> bool: success = True if(os.access(os.path.dirname(persistenceFilePath), os.W_OK) == False): success = False return success def compareDBSurveyStructureToPersistenceFile(surveyStructResults:pd.DataFrame, persistenceFilePath: str) -> bool: same_file = False try: unpickled_persistanceFileDF = pd.read_csv(persistenceFilePath) if(surveyStructResults.equals(unpickled_persistanceFileDF) == True): same_file = True except Exception as excp: raise Exception("compareDBSurveyStructureToPersistenceFile(): Couldn't read (unpickle) the persistence file").with_traceback(excp.__traceback__) return same_file def getAllSurveyDataQuery(connector: dbc.DBConnector) -> str: #IN THIS FUNCTION YOU MUST STRICTLY CONVERT THE CODE OF getAllSurveyData written in T-SQL, available in Survey_Sample_A19 and seen in class # Below is the beginning of the conversion # The Python version must return the string containing the dynamic query (as we cannot use sp_executesql in Python!) strQueryTemplateForAnswerColumn: str = """COALESCE( ( SELECT a.Answer_Value FROM Answer as a WHERE a.UserId = u.UserId AND a.SurveyId = <SURVEY_ID> AND a.QuestionId = <QUESTION_ID> ), -1) AS ANS_Q<QUESTION_ID> """ strQueryTemplateForNullColumnn: str = ' NULL AS ANS_Q<QUESTION_ID> ' strQueryTemplateOuterUnionQuery: str = """ SELECT UserId , <SURVEY_ID> as SurveyId , <DYNAMIC_QUESTION_ANSWERS> FROM [User] as u WHERE EXISTS ( \ SELECT * FROM Answer as a WHERE u.UserId = a.UserId AND a.SurveyId = <SURVEY_ID> ) """ # strCurrentUnionQueryBlock: str = '' strFinalQuery: str = '' #MAIN LOOP, OVER ALL THE SURVEYS # FOR EACH SURVEY, IN currentSurveyId, WE NEED TO CONSTRUCT THE ANSWER COLUMN QUERIES #inner loop, over the questions of the survey # Cursors are replaced by a query retrived in a pandas df surveyQuery:str = 'SELECT SurveyId FROM Survey ORDER BY SurveyId' surveyQueryDF:pd.DataFrame = connector.ExecuteQuery_withRS(surveyQuery) #CARRY ON THE CONVERSION # LOOP over surveyId and use Pandas DataFrame.iterrows() to iterate over data frame for n,data in surveyQueryDF.iterrows(): currentSurveyId = data['SurveyId'] print(currentSurveyId) strCurrentUnionQueryBlock: str = '' currentQuestionCursorStr:str = """SELECT * FROM ( SELECT SurveyId, QuestionId, 1 as InSurvey FROM SurveyStructure WHERE SurveyId = %s UNION SELECT %s as SurveyId,Q.QuestionId,0 as InSurvey FROM Question as Q WHERE NOT EXISTS(SELECT *FROM SurveyStructure as S WHERE S.SurveyId = %s AND S.QuestionId = Q.QuestionId )) as t ORDER BY QuestionId; """ % (currentSurveyId,currentSurveyId,currentSurveyId) currentQuestionCursorDF:pd.DataFrame = connector.ExecuteQuery_withRS(currentQuestionCursorStr) strColumnsQueryPart:str=''; for m,currentQData in currentQuestionCursorDF.iterrows(): currentInSurvey = currentQData['InSurvey'] currentSurveyIdInQuestion = currentQData['SurveyId'] currentQuestionID = currentQData['QuestionId'] if currentInSurvey == 0 : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForNullColumnn.replace('<QUESTION_ID>',str(currentQuestionID)) else : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForAnswerColumn.replace('<QUESTION_ID>',str(currentQuestionID)) if m != len(currentQuestionCursorDF.index) - 1 : strColumnsQueryPart = strColumnsQueryPart + ', ' strCurrentUnionQueryBlock = strCurrentUnionQueryBlock + strQueryTemplateOuterUnionQuery.replace('<DYNAMIC_QUESTION_ANSWERS>',str(strColumnsQueryPart)) strCurrentUnionQueryBlock = strCurrentUnionQueryBlock.replace('<SURVEY_ID>', str(currentSurveyId)) strFinalQuery = strFinalQuery + strCurrentUnionQueryBlock if n != len(surveyQueryDF.index)-1 : strFinalQuery = strFinalQuery + ' UNION ' return strFinalQuery def refreshViewInDB(connector: dbc.DBConnector, baseViewQuery:str, viewName:str)->None: if(connector.IsConnected == True): refreshViewQuery = "CREATE OR ALTER VIEW " + viewName + " AS " + baseViewQuery try: cursor = connector._dbConduit.cursor() cursor.execute(refreshViewQuery) connector._dbConduit.commit() except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') def surveyResultsToDF(connector: dbc.DBConnector, viewName:str)->pd.DataFrame: results:pd.DataFrame = None if(connector.IsConnected == True): selectAllSurveyDataQuery = "SELECT * FROM " + viewName + " ORDER BY UserId, SurveyId" try: results = connector.ExecuteQuery_withRS(selectAllSurveyDataQuery) except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') return results def main(): cliArguments:dict = None printSplashScreen() try: cliArguments = processCLIArguments() except Except as excp: print("Exiting") return if(cliArguments is not None): #if you are using the Visual Studio Solution, you can set the command line parameters within VS (it's done in this example) #For setting your own values in VS, please make sure to open the VS Project Properties (Menu "Project, bottom choice), tab "Debug", textbox "Script arguments" #If you are trying this script outside VS, you must provide command line parameters yourself, i.e. on Windows #python.exe Python_SQL_Project_Sample_Solution --DBServer <YOUR_MSSQL> -d <DBName> -t True #See the processCLIArguments() function for accepted parameters try: connector = mssql.MSSQL_DBConnector(DSN = cliArguments["dsn"], dbserver = cliArguments["dbserver"], \ dbname = cliArguments["dbname"], dbusername = cliArguments["dbusername"], \ dbpassword = cliArguments["dbuserpassword"], trustedmode = cliArguments["trustedmode"], \ viewname = cliArguments["viewname"]) connector.Open() surveyStructureDF:pd.DataFrame = getSurveyStructure(connector) if(doesPersistenceFileExist(cliArguments["persistencefilepath"]) == False): if(isPersistenceFileDirectoryWritable(cliArguments["persistencefilepath"]) == True): #pickle the dataframe in the path given by persistencefilepath #TODO df_savedSurveyStructure = surveyStructureDF.drop(surveyStructureDF.index[3]) df_savedSurveyStructure.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments["persistencefilepath"] + "\n") #refresh the view using the function written for this purpose
#Compare the existing pickled SurveyStructure file with surveyStructureDF # What do you need to do if the dataframe and the pickled file are different? #TODO #pass #pass only written here for not creating a syntax error, to be removed #get your survey results from the view in a dataframe and save it to a CSV file in the path given by resultsfilepath #TODO if compareDBSurveyStructureToPersistenceFile(surveyStructureDF, cliArguments["persistencefilepath"]) : print("New SurveyStructure is same as saved one, do nothing") else: print('SurveyStructure is different than saved one, need to trigger view') surveyStructureDF.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments['persistencefilepath'] ) refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) surveyResultsDF = surveyResultsToDF(connector,cliArguments['viewname']) surveyResultsDF.to_csv(cliArguments["resultsfilepath"], index=False, header=True) print("\nDONE - Results exported in " + cliArguments["resultsfilepath"] + "\n") connector.Close() except Exception as excp: print(excp) else: print("Inconsistency: CLI argument dictionary is None. Exiting") return if __name__ == '__main__': main()
#TODO refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) else:
random_line_split
Python_SQL_Project_CodeBase-DA.py
import argparse as agp import getpass import os from myTools import MSSQL_DBConnector as mssql from myTools import DBConnector as dbc import myTools.ContentObfuscation as ce try: import pandas as pd except: mi.installModule("pandas") import pandas as pd def printSplashScreen(): print("*************************************************************************************************") print("\t THIS SCRIPT ALLOWS TO EXTRACT SURVEY DATA FROM THE SAMPLE SEEN IN SQL CLASS") print("\t IT REPLICATES THE BEHAVIOUR OF A STORED PROCEDURE & TRIGGER IN A PROGRAMMATIC WAY") print("\t COMMAND LINE OPTIONS ARE:") print("\t\t -h or --help: print the help content on the console") print("*************************************************************************************************\n\n") def processCLIArguments()-> dict: retParametersDictionary:dict = None dbpassword:str = '' obfuscator: ce.ContentObfuscation = ce.ContentObfuscation() try: argParser:agp.ArgumentParser = agp.ArgumentParser(add_help=True) argParser.add_argument("-n", "--DSN", dest="dsn", \ action='store', default= None, help="Sets the SQL Server DSN descriptor file - Take precedence over all access parameters", type=str) argParser.add_argument("-s", "--DBServer", dest="dbserver", \ action='store', help="Sets the SQL Server DB Server Address", type=str) argParser.add_argument("-d", "--DBName", dest="dbname", \ action='store', help="Sets the SQL Server DB Name", type=str) argParser.add_argument("-u", "--DBUser", dest="dbusername", \ action='store', help="Sets the SQL Server DB Username", type=str) argParser.add_argument("-p", "--DBUserPassword", dest="dbuserpassword", \ action='store', help="Sets the SQL Server DB User Password", type=str) argParser.add_argument("-t", "--UseTrustedConnection", dest="trustedmode", \ action='store', default=False, \ help="Sets the SQL Server connection in Trusted Connection mode (default = False)", type=bool) argParser.add_argument("-v", "--TargetViewName", dest="viewname", \ action='store', default="dbo.vw_AllSurveyData", \ help="Sets the SQL Server target view name (this can be SCHEMA.VIEWNAME) - (default = dbo.vw_AllSurveyData)", type=str) argParser.add_argument("-f", "--SerialisedPersistenceFilePath", dest="persistencefilepath", \ action='store', default=os.getcwd() + os.sep + "lastKnownSurveyStructure.pkl", \ help="Sets the Persistence File Path (default = script current directory given by os.getcwd())", type=str) argParser.add_argument("-r", "--ResultsFilePath", dest="resultsfilepath", \ action='store', default=os.getcwd() + os.sep + "results.csv", \ help="Sets the Results (CSV) File Path (default = script current directory given by os.getcwd())", type=str) argParsingResults:agp.Namespace = argParser.parse_args() if(argParsingResults.dbserver is None): raise Exception("You must provide a SQL Server Address using the -s or --DBServer CLI argument") if(argParsingResults.dbname is None): raise Exception("You must provide a target database name using the -d or --DBName CLI argument") if(argParsingResults.trustedmode == False): if(argParsingResults.dbusername is None): raise Exception("You must provide a DB user account name using the -u or --DBUser CLI argument (or use TrustedConnection Mode)") #Password should not be provided directly in cleartext on the CLI if(argParsingResults.dbuserpassword is None): dbpassword = obfuscator.obfuscate(getpass.getpass('Please type the DB user password (no echo): ')) else: dbpassword = obfuscator.obfuscate(argParsingResults.dbuserpassword) else: if(argParsingResults.dbusername is not None): raise Exception("You are using the TrustedConnection Mode yet providing DB user name: this will be disregarded in TrustedConnection") retParametersDictionary = { "dsn" : argParsingResults.dsn, "dbserver" : argParsingResults.dbserver, "dbname" : argParsingResults.dbname, "dbusername" : argParsingResults.dbusername, "dbuserpassword" : dbpassword, "trustedmode" : argParsingResults.trustedmode, "viewname" : argParsingResults.viewname, "persistencefilepath": argParsingResults.persistencefilepath, "resultsfilepath" : argParsingResults.resultsfilepath } except Exception as e: print("Command Line arguments processing error: " + str(e)) return retParametersDictionary def getSurveyStructure(connector: mssql.MSSQL_DBConnector) -> pd.DataFrame: surveyStructResults = None if(connector is not None): if(connector.IsConnected == True): strQuerySurveyStruct = """ SELECT SurveyId, QuestionId FROM SurveyStructure ORDER BY SurveyId, QuestionId """ try: surveyStructResults:pd.DataFrame = connector.ExecuteQuery_withRS(strQuerySurveyStruct) except Exception as excp: raise Exception('GetSurveyStructure(): Cannot execute query').with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): no Database connection").with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): Database connection object is None").with_traceback(excp.__traceback__) return surveyStructResults def doesPersistenceFileExist(persistenceFilePath: str)-> bool: success = True try: file = open(persistenceFilePath) file.close() except FileNotFoundError: success = False return success def isPersistenceFileDirectoryWritable(persistenceFilePath: str)-> bool: success = True if(os.access(os.path.dirname(persistenceFilePath), os.W_OK) == False): success = False return success def compareDBSurveyStructureToPersistenceFile(surveyStructResults:pd.DataFrame, persistenceFilePath: str) -> bool: same_file = False try: unpickled_persistanceFileDF = pd.read_csv(persistenceFilePath) if(surveyStructResults.equals(unpickled_persistanceFileDF) == True): same_file = True except Exception as excp: raise Exception("compareDBSurveyStructureToPersistenceFile(): Couldn't read (unpickle) the persistence file").with_traceback(excp.__traceback__) return same_file def getAllSurveyDataQuery(connector: dbc.DBConnector) -> str: #IN THIS FUNCTION YOU MUST STRICTLY CONVERT THE CODE OF getAllSurveyData written in T-SQL, available in Survey_Sample_A19 and seen in class # Below is the beginning of the conversion # The Python version must return the string containing the dynamic query (as we cannot use sp_executesql in Python!) strQueryTemplateForAnswerColumn: str = """COALESCE( ( SELECT a.Answer_Value FROM Answer as a WHERE a.UserId = u.UserId AND a.SurveyId = <SURVEY_ID> AND a.QuestionId = <QUESTION_ID> ), -1) AS ANS_Q<QUESTION_ID> """ strQueryTemplateForNullColumnn: str = ' NULL AS ANS_Q<QUESTION_ID> ' strQueryTemplateOuterUnionQuery: str = """ SELECT UserId , <SURVEY_ID> as SurveyId , <DYNAMIC_QUESTION_ANSWERS> FROM [User] as u WHERE EXISTS ( \ SELECT * FROM Answer as a WHERE u.UserId = a.UserId AND a.SurveyId = <SURVEY_ID> ) """ # strCurrentUnionQueryBlock: str = '' strFinalQuery: str = '' #MAIN LOOP, OVER ALL THE SURVEYS # FOR EACH SURVEY, IN currentSurveyId, WE NEED TO CONSTRUCT THE ANSWER COLUMN QUERIES #inner loop, over the questions of the survey # Cursors are replaced by a query retrived in a pandas df surveyQuery:str = 'SELECT SurveyId FROM Survey ORDER BY SurveyId' surveyQueryDF:pd.DataFrame = connector.ExecuteQuery_withRS(surveyQuery) #CARRY ON THE CONVERSION # LOOP over surveyId and use Pandas DataFrame.iterrows() to iterate over data frame for n,data in surveyQueryDF.iterrows(): currentSurveyId = data['SurveyId'] print(currentSurveyId) strCurrentUnionQueryBlock: str = '' currentQuestionCursorStr:str = """SELECT * FROM ( SELECT SurveyId, QuestionId, 1 as InSurvey FROM SurveyStructure WHERE SurveyId = %s UNION SELECT %s as SurveyId,Q.QuestionId,0 as InSurvey FROM Question as Q WHERE NOT EXISTS(SELECT *FROM SurveyStructure as S WHERE S.SurveyId = %s AND S.QuestionId = Q.QuestionId )) as t ORDER BY QuestionId; """ % (currentSurveyId,currentSurveyId,currentSurveyId) currentQuestionCursorDF:pd.DataFrame = connector.ExecuteQuery_withRS(currentQuestionCursorStr) strColumnsQueryPart:str=''; for m,currentQData in currentQuestionCursorDF.iterrows(): currentInSurvey = currentQData['InSurvey'] currentSurveyIdInQuestion = currentQData['SurveyId'] currentQuestionID = currentQData['QuestionId'] if currentInSurvey == 0 : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForNullColumnn.replace('<QUESTION_ID>',str(currentQuestionID)) else : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForAnswerColumn.replace('<QUESTION_ID>',str(currentQuestionID)) if m != len(currentQuestionCursorDF.index) - 1 : strColumnsQueryPart = strColumnsQueryPart + ', ' strCurrentUnionQueryBlock = strCurrentUnionQueryBlock + strQueryTemplateOuterUnionQuery.replace('<DYNAMIC_QUESTION_ANSWERS>',str(strColumnsQueryPart)) strCurrentUnionQueryBlock = strCurrentUnionQueryBlock.replace('<SURVEY_ID>', str(currentSurveyId)) strFinalQuery = strFinalQuery + strCurrentUnionQueryBlock if n != len(surveyQueryDF.index)-1 : strFinalQuery = strFinalQuery + ' UNION ' return strFinalQuery def refreshViewInDB(connector: dbc.DBConnector, baseViewQuery:str, viewName:str)->None: if(connector.IsConnected == True): refreshViewQuery = "CREATE OR ALTER VIEW " + viewName + " AS " + baseViewQuery try: cursor = connector._dbConduit.cursor() cursor.execute(refreshViewQuery) connector._dbConduit.commit() except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') def surveyResultsToDF(connector: dbc.DBConnector, viewName:str)->pd.DataFrame: results:pd.DataFrame = None if(connector.IsConnected == True): selectAllSurveyDataQuery = "SELECT * FROM " + viewName + " ORDER BY UserId, SurveyId" try: results = connector.ExecuteQuery_withRS(selectAllSurveyDataQuery) except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') return results def main(): cliArguments:dict = None printSplashScreen() try: cliArguments = processCLIArguments() except Except as excp: print("Exiting") return if(cliArguments is not None): #if you are using the Visual Studio Solution, you can set the command line parameters within VS (it's done in this example) #For setting your own values in VS, please make sure to open the VS Project Properties (Menu "Project, bottom choice), tab "Debug", textbox "Script arguments" #If you are trying this script outside VS, you must provide command line parameters yourself, i.e. on Windows #python.exe Python_SQL_Project_Sample_Solution --DBServer <YOUR_MSSQL> -d <DBName> -t True #See the processCLIArguments() function for accepted parameters
else: print("Inconsistency: CLI argument dictionary is None. Exiting") return if __name__ == '__main__': main()
try: connector = mssql.MSSQL_DBConnector(DSN = cliArguments["dsn"], dbserver = cliArguments["dbserver"], \ dbname = cliArguments["dbname"], dbusername = cliArguments["dbusername"], \ dbpassword = cliArguments["dbuserpassword"], trustedmode = cliArguments["trustedmode"], \ viewname = cliArguments["viewname"]) connector.Open() surveyStructureDF:pd.DataFrame = getSurveyStructure(connector) if(doesPersistenceFileExist(cliArguments["persistencefilepath"]) == False): if(isPersistenceFileDirectoryWritable(cliArguments["persistencefilepath"]) == True): #pickle the dataframe in the path given by persistencefilepath #TODO df_savedSurveyStructure = surveyStructureDF.drop(surveyStructureDF.index[3]) df_savedSurveyStructure.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments["persistencefilepath"] + "\n") #refresh the view using the function written for this purpose #TODO refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) else: #Compare the existing pickled SurveyStructure file with surveyStructureDF # What do you need to do if the dataframe and the pickled file are different? #TODO #pass #pass only written here for not creating a syntax error, to be removed #get your survey results from the view in a dataframe and save it to a CSV file in the path given by resultsfilepath #TODO if compareDBSurveyStructureToPersistenceFile(surveyStructureDF, cliArguments["persistencefilepath"]) : print("New SurveyStructure is same as saved one, do nothing") else: print('SurveyStructure is different than saved one, need to trigger view') surveyStructureDF.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments['persistencefilepath'] ) refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) surveyResultsDF = surveyResultsToDF(connector,cliArguments['viewname']) surveyResultsDF.to_csv(cliArguments["resultsfilepath"], index=False, header=True) print("\nDONE - Results exported in " + cliArguments["resultsfilepath"] + "\n") connector.Close() except Exception as excp: print(excp)
conditional_block
Python_SQL_Project_CodeBase-DA.py
import argparse as agp import getpass import os from myTools import MSSQL_DBConnector as mssql from myTools import DBConnector as dbc import myTools.ContentObfuscation as ce try: import pandas as pd except: mi.installModule("pandas") import pandas as pd def printSplashScreen(): print("*************************************************************************************************") print("\t THIS SCRIPT ALLOWS TO EXTRACT SURVEY DATA FROM THE SAMPLE SEEN IN SQL CLASS") print("\t IT REPLICATES THE BEHAVIOUR OF A STORED PROCEDURE & TRIGGER IN A PROGRAMMATIC WAY") print("\t COMMAND LINE OPTIONS ARE:") print("\t\t -h or --help: print the help content on the console") print("*************************************************************************************************\n\n") def processCLIArguments()-> dict: retParametersDictionary:dict = None dbpassword:str = '' obfuscator: ce.ContentObfuscation = ce.ContentObfuscation() try: argParser:agp.ArgumentParser = agp.ArgumentParser(add_help=True) argParser.add_argument("-n", "--DSN", dest="dsn", \ action='store', default= None, help="Sets the SQL Server DSN descriptor file - Take precedence over all access parameters", type=str) argParser.add_argument("-s", "--DBServer", dest="dbserver", \ action='store', help="Sets the SQL Server DB Server Address", type=str) argParser.add_argument("-d", "--DBName", dest="dbname", \ action='store', help="Sets the SQL Server DB Name", type=str) argParser.add_argument("-u", "--DBUser", dest="dbusername", \ action='store', help="Sets the SQL Server DB Username", type=str) argParser.add_argument("-p", "--DBUserPassword", dest="dbuserpassword", \ action='store', help="Sets the SQL Server DB User Password", type=str) argParser.add_argument("-t", "--UseTrustedConnection", dest="trustedmode", \ action='store', default=False, \ help="Sets the SQL Server connection in Trusted Connection mode (default = False)", type=bool) argParser.add_argument("-v", "--TargetViewName", dest="viewname", \ action='store', default="dbo.vw_AllSurveyData", \ help="Sets the SQL Server target view name (this can be SCHEMA.VIEWNAME) - (default = dbo.vw_AllSurveyData)", type=str) argParser.add_argument("-f", "--SerialisedPersistenceFilePath", dest="persistencefilepath", \ action='store', default=os.getcwd() + os.sep + "lastKnownSurveyStructure.pkl", \ help="Sets the Persistence File Path (default = script current directory given by os.getcwd())", type=str) argParser.add_argument("-r", "--ResultsFilePath", dest="resultsfilepath", \ action='store', default=os.getcwd() + os.sep + "results.csv", \ help="Sets the Results (CSV) File Path (default = script current directory given by os.getcwd())", type=str) argParsingResults:agp.Namespace = argParser.parse_args() if(argParsingResults.dbserver is None): raise Exception("You must provide a SQL Server Address using the -s or --DBServer CLI argument") if(argParsingResults.dbname is None): raise Exception("You must provide a target database name using the -d or --DBName CLI argument") if(argParsingResults.trustedmode == False): if(argParsingResults.dbusername is None): raise Exception("You must provide a DB user account name using the -u or --DBUser CLI argument (or use TrustedConnection Mode)") #Password should not be provided directly in cleartext on the CLI if(argParsingResults.dbuserpassword is None): dbpassword = obfuscator.obfuscate(getpass.getpass('Please type the DB user password (no echo): ')) else: dbpassword = obfuscator.obfuscate(argParsingResults.dbuserpassword) else: if(argParsingResults.dbusername is not None): raise Exception("You are using the TrustedConnection Mode yet providing DB user name: this will be disregarded in TrustedConnection") retParametersDictionary = { "dsn" : argParsingResults.dsn, "dbserver" : argParsingResults.dbserver, "dbname" : argParsingResults.dbname, "dbusername" : argParsingResults.dbusername, "dbuserpassword" : dbpassword, "trustedmode" : argParsingResults.trustedmode, "viewname" : argParsingResults.viewname, "persistencefilepath": argParsingResults.persistencefilepath, "resultsfilepath" : argParsingResults.resultsfilepath } except Exception as e: print("Command Line arguments processing error: " + str(e)) return retParametersDictionary def getSurveyStructure(connector: mssql.MSSQL_DBConnector) -> pd.DataFrame: surveyStructResults = None if(connector is not None): if(connector.IsConnected == True): strQuerySurveyStruct = """ SELECT SurveyId, QuestionId FROM SurveyStructure ORDER BY SurveyId, QuestionId """ try: surveyStructResults:pd.DataFrame = connector.ExecuteQuery_withRS(strQuerySurveyStruct) except Exception as excp: raise Exception('GetSurveyStructure(): Cannot execute query').with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): no Database connection").with_traceback(excp.__traceback__) else: raise Exception("GetSurveyStructure(): Database connection object is None").with_traceback(excp.__traceback__) return surveyStructResults def doesPersistenceFileExist(persistenceFilePath: str)-> bool: success = True try: file = open(persistenceFilePath) file.close() except FileNotFoundError: success = False return success def
(persistenceFilePath: str)-> bool: success = True if(os.access(os.path.dirname(persistenceFilePath), os.W_OK) == False): success = False return success def compareDBSurveyStructureToPersistenceFile(surveyStructResults:pd.DataFrame, persistenceFilePath: str) -> bool: same_file = False try: unpickled_persistanceFileDF = pd.read_csv(persistenceFilePath) if(surveyStructResults.equals(unpickled_persistanceFileDF) == True): same_file = True except Exception as excp: raise Exception("compareDBSurveyStructureToPersistenceFile(): Couldn't read (unpickle) the persistence file").with_traceback(excp.__traceback__) return same_file def getAllSurveyDataQuery(connector: dbc.DBConnector) -> str: #IN THIS FUNCTION YOU MUST STRICTLY CONVERT THE CODE OF getAllSurveyData written in T-SQL, available in Survey_Sample_A19 and seen in class # Below is the beginning of the conversion # The Python version must return the string containing the dynamic query (as we cannot use sp_executesql in Python!) strQueryTemplateForAnswerColumn: str = """COALESCE( ( SELECT a.Answer_Value FROM Answer as a WHERE a.UserId = u.UserId AND a.SurveyId = <SURVEY_ID> AND a.QuestionId = <QUESTION_ID> ), -1) AS ANS_Q<QUESTION_ID> """ strQueryTemplateForNullColumnn: str = ' NULL AS ANS_Q<QUESTION_ID> ' strQueryTemplateOuterUnionQuery: str = """ SELECT UserId , <SURVEY_ID> as SurveyId , <DYNAMIC_QUESTION_ANSWERS> FROM [User] as u WHERE EXISTS ( \ SELECT * FROM Answer as a WHERE u.UserId = a.UserId AND a.SurveyId = <SURVEY_ID> ) """ # strCurrentUnionQueryBlock: str = '' strFinalQuery: str = '' #MAIN LOOP, OVER ALL THE SURVEYS # FOR EACH SURVEY, IN currentSurveyId, WE NEED TO CONSTRUCT THE ANSWER COLUMN QUERIES #inner loop, over the questions of the survey # Cursors are replaced by a query retrived in a pandas df surveyQuery:str = 'SELECT SurveyId FROM Survey ORDER BY SurveyId' surveyQueryDF:pd.DataFrame = connector.ExecuteQuery_withRS(surveyQuery) #CARRY ON THE CONVERSION # LOOP over surveyId and use Pandas DataFrame.iterrows() to iterate over data frame for n,data in surveyQueryDF.iterrows(): currentSurveyId = data['SurveyId'] print(currentSurveyId) strCurrentUnionQueryBlock: str = '' currentQuestionCursorStr:str = """SELECT * FROM ( SELECT SurveyId, QuestionId, 1 as InSurvey FROM SurveyStructure WHERE SurveyId = %s UNION SELECT %s as SurveyId,Q.QuestionId,0 as InSurvey FROM Question as Q WHERE NOT EXISTS(SELECT *FROM SurveyStructure as S WHERE S.SurveyId = %s AND S.QuestionId = Q.QuestionId )) as t ORDER BY QuestionId; """ % (currentSurveyId,currentSurveyId,currentSurveyId) currentQuestionCursorDF:pd.DataFrame = connector.ExecuteQuery_withRS(currentQuestionCursorStr) strColumnsQueryPart:str=''; for m,currentQData in currentQuestionCursorDF.iterrows(): currentInSurvey = currentQData['InSurvey'] currentSurveyIdInQuestion = currentQData['SurveyId'] currentQuestionID = currentQData['QuestionId'] if currentInSurvey == 0 : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForNullColumnn.replace('<QUESTION_ID>',str(currentQuestionID)) else : strColumnsQueryPart= strColumnsQueryPart + strQueryTemplateForAnswerColumn.replace('<QUESTION_ID>',str(currentQuestionID)) if m != len(currentQuestionCursorDF.index) - 1 : strColumnsQueryPart = strColumnsQueryPart + ', ' strCurrentUnionQueryBlock = strCurrentUnionQueryBlock + strQueryTemplateOuterUnionQuery.replace('<DYNAMIC_QUESTION_ANSWERS>',str(strColumnsQueryPart)) strCurrentUnionQueryBlock = strCurrentUnionQueryBlock.replace('<SURVEY_ID>', str(currentSurveyId)) strFinalQuery = strFinalQuery + strCurrentUnionQueryBlock if n != len(surveyQueryDF.index)-1 : strFinalQuery = strFinalQuery + ' UNION ' return strFinalQuery def refreshViewInDB(connector: dbc.DBConnector, baseViewQuery:str, viewName:str)->None: if(connector.IsConnected == True): refreshViewQuery = "CREATE OR ALTER VIEW " + viewName + " AS " + baseViewQuery try: cursor = connector._dbConduit.cursor() cursor.execute(refreshViewQuery) connector._dbConduit.commit() except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') def surveyResultsToDF(connector: dbc.DBConnector, viewName:str)->pd.DataFrame: results:pd.DataFrame = None if(connector.IsConnected == True): selectAllSurveyDataQuery = "SELECT * FROM " + viewName + " ORDER BY UserId, SurveyId" try: results = connector.ExecuteQuery_withRS(selectAllSurveyDataQuery) except Exception as excp: raise excp else: raise Exception('Cannot refresh view, connector object not connected to DB') return results def main(): cliArguments:dict = None printSplashScreen() try: cliArguments = processCLIArguments() except Except as excp: print("Exiting") return if(cliArguments is not None): #if you are using the Visual Studio Solution, you can set the command line parameters within VS (it's done in this example) #For setting your own values in VS, please make sure to open the VS Project Properties (Menu "Project, bottom choice), tab "Debug", textbox "Script arguments" #If you are trying this script outside VS, you must provide command line parameters yourself, i.e. on Windows #python.exe Python_SQL_Project_Sample_Solution --DBServer <YOUR_MSSQL> -d <DBName> -t True #See the processCLIArguments() function for accepted parameters try: connector = mssql.MSSQL_DBConnector(DSN = cliArguments["dsn"], dbserver = cliArguments["dbserver"], \ dbname = cliArguments["dbname"], dbusername = cliArguments["dbusername"], \ dbpassword = cliArguments["dbuserpassword"], trustedmode = cliArguments["trustedmode"], \ viewname = cliArguments["viewname"]) connector.Open() surveyStructureDF:pd.DataFrame = getSurveyStructure(connector) if(doesPersistenceFileExist(cliArguments["persistencefilepath"]) == False): if(isPersistenceFileDirectoryWritable(cliArguments["persistencefilepath"]) == True): #pickle the dataframe in the path given by persistencefilepath #TODO df_savedSurveyStructure = surveyStructureDF.drop(surveyStructureDF.index[3]) df_savedSurveyStructure.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments["persistencefilepath"] + "\n") #refresh the view using the function written for this purpose #TODO refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) else: #Compare the existing pickled SurveyStructure file with surveyStructureDF # What do you need to do if the dataframe and the pickled file are different? #TODO #pass #pass only written here for not creating a syntax error, to be removed #get your survey results from the view in a dataframe and save it to a CSV file in the path given by resultsfilepath #TODO if compareDBSurveyStructureToPersistenceFile(surveyStructureDF, cliArguments["persistencefilepath"]) : print("New SurveyStructure is same as saved one, do nothing") else: print('SurveyStructure is different than saved one, need to trigger view') surveyStructureDF.to_csv(cliArguments['persistencefilepath'], index=False, header=True) print("\nINFO - Content of SurveyResults table pickled in " + cliArguments['persistencefilepath'] ) refreshViewInDB(connector, getAllSurveyDataQuery(connector), cliArguments['viewname']) surveyResultsDF = surveyResultsToDF(connector,cliArguments['viewname']) surveyResultsDF.to_csv(cliArguments["resultsfilepath"], index=False, header=True) print("\nDONE - Results exported in " + cliArguments["resultsfilepath"] + "\n") connector.Close() except Exception as excp: print(excp) else: print("Inconsistency: CLI argument dictionary is None. Exiting") return if __name__ == '__main__': main()
isPersistenceFileDirectoryWritable
identifier_name
versioned_object_tracker.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cluster import ( "fmt" "sort" "strconv" "sync" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/testing" ) // ObjectTracker keeps track of objects. It is intended to be used to // fake calls to a server by returning objects based on their kind, // namespace and name. type ObjectTracker interface { // Add adds an object to the tracker. If object being added // is a list, its items are added separately. Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // Update updates an existing object in the tracker in the specified namespace. Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. Delete(gvr schema.GroupVersionResource, ns, name string) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) GetResourceVersion() uint64 } // ObjectScheme abstracts the implementation of common operations on objects. type ObjectScheme interface { runtime.ObjectCreater runtime.ObjectTyper } type tracker struct { scheme ObjectScheme decoder runtime.Decoder lock sync.RWMutex objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object // The value type of watchers is a map of which the key is either a namespace or // all/non namespace aka "" and its value is list of fake watchers. // Manipulations on resources will broadcast the notification events into the // watchers' channel. Note that too many unhandled events (currently 100, // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher intResourceVersion uint64 } var _ ObjectTracker = &tracker{} // NewObjectTracker returns an ObjectTracker that can be used to keep track // of objects for the fake clientset. Mostly useful for unit tests. func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder, rv uint64) ObjectTracker { return &tracker{ scheme: scheme, decoder: decoder, objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), intResourceVersion: rv, } } func (t *tracker) GetResourceVersion() uint64 { return t.intResourceVersion } func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error)
func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { t.lock.Lock() defer t.lock.Unlock() fakewatcher := watch.NewRaceFreeFake() if _, exists := t.watchers[gvr]; !exists { t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) } t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) return fakewatcher, nil } func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { errNotFound := errors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return nil, errNotFound } matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] if !ok { return nil, errNotFound } // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} } } return obj, nil } func (t *tracker) Add(obj runtime.Object) error { if meta.IsListType(obj) { return t.addList(obj, false) } objMeta, err := meta.Accessor(obj) if err != nil { //return err return fmt.Errorf("Getting accessor : %s", err.Error()) } gvks, _, err := t.scheme.ObjectKinds(obj) if err != nil { //return err return fmt.Errorf("Getting gvks : %s", err.Error()) } if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} } if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } for _, gvk := range gvks { // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The // actual registration in apiserver can specify arbitrary route for a // gvk. If a test uses such objects, it cannot preset the tracker with // objects via Add(). Instead, it should trigger the Create() function // of the tracker, where an arbitrary gvr can be specified. gvr, _ := meta.UnsafeGuessKindToResource(gvk) // Resource doesn't have the concept of "__internal" version, just set it to "". if gvr.Version == runtime.APIVersionInternal { gvr.Version = "" } err := t.add(gvr, obj, objMeta.GetNamespace(), false) if err != nil { //return err return fmt.Errorf("Adding object : %s", err.Error()) } } return nil } func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, false) } func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, true) } func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { watches := []*watch.RaceFreeFakeWatcher{} if t.watchers[gvr] != nil { if w := t.watchers[gvr][ns]; w != nil { watches = append(watches, w...) } if ns != metav1.NamespaceAll { if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { watches = append(watches, w...) } } } return watches } func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() gr := gvr.GroupResource() // To avoid the object from being accidentally modified by caller // after it's been added to the tracker, we always store the deep // copy. obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { return err } // Propagate namespace to the new object if hasn't already been set. if len(newMeta.GetNamespace()) == 0 { newMeta.SetNamespace(ns) } if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) return errors.NewBadRequest(msg) } // Increment ResourceVersion t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) _, ok := t.objects[gvr] if !ok { t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) } namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} if _, ok = t.objects[gvr][namespacedName]; ok { if replaceExisting { for _, w := range t.getWatches(gvr, ns) { w.Modify(obj.DeepCopyObject()) } t.objects[gvr][namespacedName] = obj return nil } return errors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. return errors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj for _, w := range t.getWatches(gvr, ns) { w.Add(obj.DeepCopyObject()) } return nil } func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { list, err := meta.ExtractList(obj) if err != nil { return err } errs := runtime.DecodeList(list, t.decoder) if len(errs) > 0 { return errs[0] } for _, obj := range list { if err := t.Add(obj); err != nil { return err } } return nil } func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } // Increment ResourceVersion newMeta, err := meta.Accessor(obj) if err != nil { return err } t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) delete(objs, namespacedName) for _, w := range t.getWatches(gvr, ns) { w.Delete(obj.DeepCopyObject()) } return nil } // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { res := []runtime.Object{} for _, obj := range objs { acc, err := meta.Accessor(obj) if err != nil { return nil, err } if ns != "" && acc.GetNamespace() != ns { continue } res = append(res, obj) } // Sort res to get deterministic order. sort.Slice(res, func(i, j int) bool { acc1, _ := meta.Accessor(res[i]) acc2, _ := meta.Accessor(res[j]) if acc1.GetNamespace() != acc2.GetNamespace() { return acc1.GetNamespace() < acc2.GetNamespace() } return acc1.GetName() < acc2.GetName() }) return res, nil } // DefaultWatchReactor returns default watch action func DefaultWatchReactor(watchInterface watch.Interface, err error) testing.WatchReactionFunc { return func(action testing.Action) (bool, watch.Interface, error) { return true, watchInterface, err } }
{ // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. listGVK := gvk listGVK.Kind = listGVK.Kind + "List" // GVK does have the concept of "internal version". The scheme recognizes // the runtime.APIVersionInternal, but not the empty string. if listGVK.Version == "" { listGVK.Version = runtime.APIVersionInternal } list, err := t.scheme.New(listGVK) if err != nil { return nil, err } if !meta.IsListType(list) { return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) } t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return list, nil } matchingObjs, err := filterByNamespace(objs, ns) if err != nil { return nil, err } if err := meta.SetList(list, matchingObjs); err != nil { return nil, err } return list.DeepCopyObject(), nil }
identifier_body
versioned_object_tracker.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cluster import ( "fmt" "sort" "strconv" "sync" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/testing" ) // ObjectTracker keeps track of objects. It is intended to be used to // fake calls to a server by returning objects based on their kind, // namespace and name. type ObjectTracker interface { // Add adds an object to the tracker. If object being added // is a list, its items are added separately. Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // Update updates an existing object in the tracker in the specified namespace. Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. Delete(gvr schema.GroupVersionResource, ns, name string) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) GetResourceVersion() uint64 } // ObjectScheme abstracts the implementation of common operations on objects. type ObjectScheme interface { runtime.ObjectCreater runtime.ObjectTyper } type tracker struct { scheme ObjectScheme decoder runtime.Decoder lock sync.RWMutex objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object // The value type of watchers is a map of which the key is either a namespace or // all/non namespace aka "" and its value is list of fake watchers. // Manipulations on resources will broadcast the notification events into the // watchers' channel. Note that too many unhandled events (currently 100, // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher intResourceVersion uint64 } var _ ObjectTracker = &tracker{} // NewObjectTracker returns an ObjectTracker that can be used to keep track // of objects for the fake clientset. Mostly useful for unit tests. func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder, rv uint64) ObjectTracker { return &tracker{ scheme: scheme, decoder: decoder, objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), intResourceVersion: rv, } } func (t *tracker) GetResourceVersion() uint64 { return t.intResourceVersion } func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. listGVK := gvk listGVK.Kind = listGVK.Kind + "List" // GVK does have the concept of "internal version". The scheme recognizes // the runtime.APIVersionInternal, but not the empty string. if listGVK.Version == "" { listGVK.Version = runtime.APIVersionInternal } list, err := t.scheme.New(listGVK) if err != nil { return nil, err } if !meta.IsListType(list) { return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) } t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return list, nil } matchingObjs, err := filterByNamespace(objs, ns) if err != nil { return nil, err
} if err := meta.SetList(list, matchingObjs); err != nil { return nil, err } return list.DeepCopyObject(), nil } func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { t.lock.Lock() defer t.lock.Unlock() fakewatcher := watch.NewRaceFreeFake() if _, exists := t.watchers[gvr]; !exists { t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) } t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) return fakewatcher, nil } func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { errNotFound := errors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return nil, errNotFound } matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] if !ok { return nil, errNotFound } // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} } } return obj, nil } func (t *tracker) Add(obj runtime.Object) error { if meta.IsListType(obj) { return t.addList(obj, false) } objMeta, err := meta.Accessor(obj) if err != nil { //return err return fmt.Errorf("Getting accessor : %s", err.Error()) } gvks, _, err := t.scheme.ObjectKinds(obj) if err != nil { //return err return fmt.Errorf("Getting gvks : %s", err.Error()) } if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} } if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } for _, gvk := range gvks { // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The // actual registration in apiserver can specify arbitrary route for a // gvk. If a test uses such objects, it cannot preset the tracker with // objects via Add(). Instead, it should trigger the Create() function // of the tracker, where an arbitrary gvr can be specified. gvr, _ := meta.UnsafeGuessKindToResource(gvk) // Resource doesn't have the concept of "__internal" version, just set it to "". if gvr.Version == runtime.APIVersionInternal { gvr.Version = "" } err := t.add(gvr, obj, objMeta.GetNamespace(), false) if err != nil { //return err return fmt.Errorf("Adding object : %s", err.Error()) } } return nil } func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, false) } func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, true) } func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { watches := []*watch.RaceFreeFakeWatcher{} if t.watchers[gvr] != nil { if w := t.watchers[gvr][ns]; w != nil { watches = append(watches, w...) } if ns != metav1.NamespaceAll { if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { watches = append(watches, w...) } } } return watches } func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() gr := gvr.GroupResource() // To avoid the object from being accidentally modified by caller // after it's been added to the tracker, we always store the deep // copy. obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { return err } // Propagate namespace to the new object if hasn't already been set. if len(newMeta.GetNamespace()) == 0 { newMeta.SetNamespace(ns) } if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) return errors.NewBadRequest(msg) } // Increment ResourceVersion t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) _, ok := t.objects[gvr] if !ok { t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) } namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} if _, ok = t.objects[gvr][namespacedName]; ok { if replaceExisting { for _, w := range t.getWatches(gvr, ns) { w.Modify(obj.DeepCopyObject()) } t.objects[gvr][namespacedName] = obj return nil } return errors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. return errors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj for _, w := range t.getWatches(gvr, ns) { w.Add(obj.DeepCopyObject()) } return nil } func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { list, err := meta.ExtractList(obj) if err != nil { return err } errs := runtime.DecodeList(list, t.decoder) if len(errs) > 0 { return errs[0] } for _, obj := range list { if err := t.Add(obj); err != nil { return err } } return nil } func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } // Increment ResourceVersion newMeta, err := meta.Accessor(obj) if err != nil { return err } t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) delete(objs, namespacedName) for _, w := range t.getWatches(gvr, ns) { w.Delete(obj.DeepCopyObject()) } return nil } // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { res := []runtime.Object{} for _, obj := range objs { acc, err := meta.Accessor(obj) if err != nil { return nil, err } if ns != "" && acc.GetNamespace() != ns { continue } res = append(res, obj) } // Sort res to get deterministic order. sort.Slice(res, func(i, j int) bool { acc1, _ := meta.Accessor(res[i]) acc2, _ := meta.Accessor(res[j]) if acc1.GetNamespace() != acc2.GetNamespace() { return acc1.GetNamespace() < acc2.GetNamespace() } return acc1.GetName() < acc2.GetName() }) return res, nil } // DefaultWatchReactor returns default watch action func DefaultWatchReactor(watchInterface watch.Interface, err error) testing.WatchReactionFunc { return func(action testing.Action) (bool, watch.Interface, error) { return true, watchInterface, err } }
random_line_split
versioned_object_tracker.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cluster import ( "fmt" "sort" "strconv" "sync" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/testing" ) // ObjectTracker keeps track of objects. It is intended to be used to // fake calls to a server by returning objects based on their kind, // namespace and name. type ObjectTracker interface { // Add adds an object to the tracker. If object being added // is a list, its items are added separately. Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // Update updates an existing object in the tracker in the specified namespace. Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. Delete(gvr schema.GroupVersionResource, ns, name string) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) GetResourceVersion() uint64 } // ObjectScheme abstracts the implementation of common operations on objects. type ObjectScheme interface { runtime.ObjectCreater runtime.ObjectTyper } type tracker struct { scheme ObjectScheme decoder runtime.Decoder lock sync.RWMutex objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object // The value type of watchers is a map of which the key is either a namespace or // all/non namespace aka "" and its value is list of fake watchers. // Manipulations on resources will broadcast the notification events into the // watchers' channel. Note that too many unhandled events (currently 100, // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher intResourceVersion uint64 } var _ ObjectTracker = &tracker{} // NewObjectTracker returns an ObjectTracker that can be used to keep track // of objects for the fake clientset. Mostly useful for unit tests. func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder, rv uint64) ObjectTracker { return &tracker{ scheme: scheme, decoder: decoder, objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), intResourceVersion: rv, } } func (t *tracker) GetResourceVersion() uint64 { return t.intResourceVersion } func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. listGVK := gvk listGVK.Kind = listGVK.Kind + "List" // GVK does have the concept of "internal version". The scheme recognizes // the runtime.APIVersionInternal, but not the empty string. if listGVK.Version == "" { listGVK.Version = runtime.APIVersionInternal } list, err := t.scheme.New(listGVK) if err != nil { return nil, err } if !meta.IsListType(list) { return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) } t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return list, nil } matchingObjs, err := filterByNamespace(objs, ns) if err != nil { return nil, err } if err := meta.SetList(list, matchingObjs); err != nil { return nil, err } return list.DeepCopyObject(), nil } func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { t.lock.Lock() defer t.lock.Unlock() fakewatcher := watch.NewRaceFreeFake() if _, exists := t.watchers[gvr]; !exists { t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) } t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) return fakewatcher, nil } func (t *tracker)
(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { errNotFound := errors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return nil, errNotFound } matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] if !ok { return nil, errNotFound } // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} } } return obj, nil } func (t *tracker) Add(obj runtime.Object) error { if meta.IsListType(obj) { return t.addList(obj, false) } objMeta, err := meta.Accessor(obj) if err != nil { //return err return fmt.Errorf("Getting accessor : %s", err.Error()) } gvks, _, err := t.scheme.ObjectKinds(obj) if err != nil { //return err return fmt.Errorf("Getting gvks : %s", err.Error()) } if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} } if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } for _, gvk := range gvks { // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The // actual registration in apiserver can specify arbitrary route for a // gvk. If a test uses such objects, it cannot preset the tracker with // objects via Add(). Instead, it should trigger the Create() function // of the tracker, where an arbitrary gvr can be specified. gvr, _ := meta.UnsafeGuessKindToResource(gvk) // Resource doesn't have the concept of "__internal" version, just set it to "". if gvr.Version == runtime.APIVersionInternal { gvr.Version = "" } err := t.add(gvr, obj, objMeta.GetNamespace(), false) if err != nil { //return err return fmt.Errorf("Adding object : %s", err.Error()) } } return nil } func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, false) } func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, true) } func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { watches := []*watch.RaceFreeFakeWatcher{} if t.watchers[gvr] != nil { if w := t.watchers[gvr][ns]; w != nil { watches = append(watches, w...) } if ns != metav1.NamespaceAll { if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { watches = append(watches, w...) } } } return watches } func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() gr := gvr.GroupResource() // To avoid the object from being accidentally modified by caller // after it's been added to the tracker, we always store the deep // copy. obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { return err } // Propagate namespace to the new object if hasn't already been set. if len(newMeta.GetNamespace()) == 0 { newMeta.SetNamespace(ns) } if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) return errors.NewBadRequest(msg) } // Increment ResourceVersion t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) _, ok := t.objects[gvr] if !ok { t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) } namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} if _, ok = t.objects[gvr][namespacedName]; ok { if replaceExisting { for _, w := range t.getWatches(gvr, ns) { w.Modify(obj.DeepCopyObject()) } t.objects[gvr][namespacedName] = obj return nil } return errors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. return errors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj for _, w := range t.getWatches(gvr, ns) { w.Add(obj.DeepCopyObject()) } return nil } func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { list, err := meta.ExtractList(obj) if err != nil { return err } errs := runtime.DecodeList(list, t.decoder) if len(errs) > 0 { return errs[0] } for _, obj := range list { if err := t.Add(obj); err != nil { return err } } return nil } func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } // Increment ResourceVersion newMeta, err := meta.Accessor(obj) if err != nil { return err } t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) delete(objs, namespacedName) for _, w := range t.getWatches(gvr, ns) { w.Delete(obj.DeepCopyObject()) } return nil } // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { res := []runtime.Object{} for _, obj := range objs { acc, err := meta.Accessor(obj) if err != nil { return nil, err } if ns != "" && acc.GetNamespace() != ns { continue } res = append(res, obj) } // Sort res to get deterministic order. sort.Slice(res, func(i, j int) bool { acc1, _ := meta.Accessor(res[i]) acc2, _ := meta.Accessor(res[j]) if acc1.GetNamespace() != acc2.GetNamespace() { return acc1.GetNamespace() < acc2.GetNamespace() } return acc1.GetName() < acc2.GetName() }) return res, nil } // DefaultWatchReactor returns default watch action func DefaultWatchReactor(watchInterface watch.Interface, err error) testing.WatchReactionFunc { return func(action testing.Action) (bool, watch.Interface, error) { return true, watchInterface, err } }
Get
identifier_name
versioned_object_tracker.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cluster import ( "fmt" "sort" "strconv" "sync" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/testing" ) // ObjectTracker keeps track of objects. It is intended to be used to // fake calls to a server by returning objects based on their kind, // namespace and name. type ObjectTracker interface { // Add adds an object to the tracker. If object being added // is a list, its items are added separately. Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // Update updates an existing object in the tracker in the specified namespace. Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. Delete(gvr schema.GroupVersionResource, ns, name string) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) GetResourceVersion() uint64 } // ObjectScheme abstracts the implementation of common operations on objects. type ObjectScheme interface { runtime.ObjectCreater runtime.ObjectTyper } type tracker struct { scheme ObjectScheme decoder runtime.Decoder lock sync.RWMutex objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object // The value type of watchers is a map of which the key is either a namespace or // all/non namespace aka "" and its value is list of fake watchers. // Manipulations on resources will broadcast the notification events into the // watchers' channel. Note that too many unhandled events (currently 100, // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher intResourceVersion uint64 } var _ ObjectTracker = &tracker{} // NewObjectTracker returns an ObjectTracker that can be used to keep track // of objects for the fake clientset. Mostly useful for unit tests. func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder, rv uint64) ObjectTracker { return &tracker{ scheme: scheme, decoder: decoder, objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), intResourceVersion: rv, } } func (t *tracker) GetResourceVersion() uint64 { return t.intResourceVersion } func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. listGVK := gvk listGVK.Kind = listGVK.Kind + "List" // GVK does have the concept of "internal version". The scheme recognizes // the runtime.APIVersionInternal, but not the empty string. if listGVK.Version == "" { listGVK.Version = runtime.APIVersionInternal } list, err := t.scheme.New(listGVK) if err != nil { return nil, err } if !meta.IsListType(list) { return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) } t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return list, nil } matchingObjs, err := filterByNamespace(objs, ns) if err != nil { return nil, err } if err := meta.SetList(list, matchingObjs); err != nil { return nil, err } return list.DeepCopyObject(), nil } func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { t.lock.Lock() defer t.lock.Unlock() fakewatcher := watch.NewRaceFreeFake() if _, exists := t.watchers[gvr]; !exists { t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) } t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) return fakewatcher, nil } func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { errNotFound := errors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() objs, ok := t.objects[gvr] if !ok { return nil, errNotFound } matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] if !ok { return nil, errNotFound } // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} } } return obj, nil } func (t *tracker) Add(obj runtime.Object) error { if meta.IsListType(obj) { return t.addList(obj, false) } objMeta, err := meta.Accessor(obj) if err != nil { //return err return fmt.Errorf("Getting accessor : %s", err.Error()) } gvks, _, err := t.scheme.ObjectKinds(obj) if err != nil { //return err return fmt.Errorf("Getting gvks : %s", err.Error()) } if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} } if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } for _, gvk := range gvks { // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The // actual registration in apiserver can specify arbitrary route for a // gvk. If a test uses such objects, it cannot preset the tracker with // objects via Add(). Instead, it should trigger the Create() function // of the tracker, where an arbitrary gvr can be specified. gvr, _ := meta.UnsafeGuessKindToResource(gvk) // Resource doesn't have the concept of "__internal" version, just set it to "". if gvr.Version == runtime.APIVersionInternal { gvr.Version = "" } err := t.add(gvr, obj, objMeta.GetNamespace(), false) if err != nil { //return err return fmt.Errorf("Adding object : %s", err.Error()) } } return nil } func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, false) } func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { return t.add(gvr, obj, ns, true) } func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { watches := []*watch.RaceFreeFakeWatcher{} if t.watchers[gvr] != nil { if w := t.watchers[gvr][ns]; w != nil { watches = append(watches, w...) } if ns != metav1.NamespaceAll { if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { watches = append(watches, w...) } } } return watches } func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() gr := gvr.GroupResource() // To avoid the object from being accidentally modified by caller // after it's been added to the tracker, we always store the deep // copy. obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { return err } // Propagate namespace to the new object if hasn't already been set. if len(newMeta.GetNamespace()) == 0 { newMeta.SetNamespace(ns) } if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) return errors.NewBadRequest(msg) } // Increment ResourceVersion t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) _, ok := t.objects[gvr] if !ok { t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) } namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} if _, ok = t.objects[gvr][namespacedName]; ok { if replaceExisting { for _, w := range t.getWatches(gvr, ns) { w.Modify(obj.DeepCopyObject()) } t.objects[gvr][namespacedName] = obj return nil } return errors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. return errors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj for _, w := range t.getWatches(gvr, ns) { w.Add(obj.DeepCopyObject()) } return nil } func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { list, err := meta.ExtractList(obj) if err != nil { return err } errs := runtime.DecodeList(list, t.decoder) if len(errs) > 0
for _, obj := range list { if err := t.Add(obj); err != nil { return err } } return nil } func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { return errors.NewNotFound(gvr.GroupResource(), name) } // Increment ResourceVersion newMeta, err := meta.Accessor(obj) if err != nil { return err } t.intResourceVersion++ newMeta.SetResourceVersion(strconv.FormatUint(t.intResourceVersion, 10)) delete(objs, namespacedName) for _, w := range t.getWatches(gvr, ns) { w.Delete(obj.DeepCopyObject()) } return nil } // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { res := []runtime.Object{} for _, obj := range objs { acc, err := meta.Accessor(obj) if err != nil { return nil, err } if ns != "" && acc.GetNamespace() != ns { continue } res = append(res, obj) } // Sort res to get deterministic order. sort.Slice(res, func(i, j int) bool { acc1, _ := meta.Accessor(res[i]) acc2, _ := meta.Accessor(res[j]) if acc1.GetNamespace() != acc2.GetNamespace() { return acc1.GetNamespace() < acc2.GetNamespace() } return acc1.GetName() < acc2.GetName() }) return res, nil } // DefaultWatchReactor returns default watch action func DefaultWatchReactor(watchInterface watch.Interface, err error) testing.WatchReactionFunc { return func(action testing.Action) (bool, watch.Interface, error) { return true, watchInterface, err } }
{ return errs[0] }
conditional_block
lib.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod additional_cpp_generator; mod byvalue_checker; mod byvalue_scanner; mod conversion; mod function_wrapper; mod known_types; mod parse; mod rust_pretty_printer; mod type_database; mod typedef_analyzer; mod types; #[cfg(any(test, feature = "build"))] mod builder; #[cfg(test)] mod integration_tests; use conversion::BridgeConverter; use proc_macro2::TokenStream as TokenStream2; use std::{fmt::Display, path::PathBuf}; use type_database::TypeDatabase; use quote::ToTokens; use syn::{ parse::{Parse, ParseStream, Result as ParseResult}, Token, }; use syn::{parse_quote, ItemMod, Macro}; use additional_cpp_generator::AdditionalCppGenerator; use itertools::join; use known_types::KNOWN_TYPES; use log::{info, warn}; use types::TypeName; /// We use a forked version of bindgen - for now. /// We hope to unfork. use autocxx_bindgen as bindgen; #[cfg(any(test, feature = "build"))] pub use builder::{build, expect_build, BuilderError, BuilderResult, BuilderSuccess}; pub use parse::{parse_file, parse_token_stream, ParseError, ParsedFile}; pub use cxx_gen::HEADER; /// Re-export cxx such that clients can use the same version as /// us. This doesn't enable clients to avoid depending on the cxx /// crate too, unfortunately, since generated cxx::bridge code /// refers explicitly to ::cxx. See /// https://github.com/google/autocxx/issues/36 pub use cxx; pub struct CppFilePair { pub header: Vec<u8>, pub implementation: Vec<u8>, pub header_name: String, } pub struct GeneratedCpp(pub Vec<CppFilePair>); /// Errors which may occur in generating bindings for these C++ /// functions. #[derive(Debug)] pub enum Error { /// Any error reported by bindgen, generating the C++ bindings. /// Any C++ parsing errors, etc. would be reported this way. Bindgen(()), /// Any problem parsing the Rust file. Parsing(syn::Error), /// No `include_cpp!` macro could be found. NoAutoCxxInc, /// The include directories specified were incorreect. CouldNotCanoncalizeIncludeDir(PathBuf), /// Some error occcurred in converting the bindgen-style /// bindings to safe cxx bindings. Conversion(conversion::ConvertError), /// No 'generate' or 'generate_pod' was specified. /// It might be that in future we can simply let things work /// without any allowlist, in which case bindgen should generate /// bindings for everything. That just seems very unlikely to work /// in the common case right now. NoGenerationRequested, } impl Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Bindgen(_) => write!(f, "Bindgen was unable to generate the initial .rs bindings for this file. This may indicate a parsing problem with the C++ headers.")?, Error::Parsing(err) => write!(f, "The Rust file could not be parsede: {}", err)?, Error::NoAutoCxxInc => write!(f, "No C++ include directory was provided. Consider setting AUTOCXX_INC.")?, Error::CouldNotCanoncalizeIncludeDir(pb) => write!(f, "One of the C++ include directories provided ({}) did not appear to exist or could otherwise not be made into a canonical path.", pb.to_string_lossy())?, Error::Conversion(err) => write!(f, "autocxx could not generate the requested bindings. {}", err)?, Error::NoGenerationRequested => write!(f, "No 'generate' or 'generate_pod' directives were found, so we would not generate any Rust bindings despite the inclusion of C++ headers.")?, } Ok(()) } } pub type Result<T, E = Error> = std::result::Result<T, E>; pub enum CppInclusion { Define(String), Header(String), } #[allow(clippy::large_enum_variant)] // because this is only used once enum State { NotGenerated, ParseOnly, NothingGenerated, Generated(ItemMod, AdditionalCppGenerator), } /// Core of the autocxx engine. See `generate` for most details /// on how this works. /// /// TODO - consider whether this 'engine' crate should actually be a /// directory of source symlinked from all the other sub-crates, so that /// we avoid exposing an external interface from this code. pub struct IncludeCpp { inclusions: Vec<CppInclusion>, type_database: TypeDatabase, preconfigured_inc_dirs: Option<std::ffi::OsString>, exclude_utilities: bool, state: State, } impl Parse for IncludeCpp { fn parse(input: ParseStream) -> ParseResult<Self> { Self::new_from_parse_stream(input) } } impl IncludeCpp { fn new_from_parse_stream(input: ParseStream) -> syn::Result<Self> { // Takes as inputs: // 1. List of headers to include // 2. List of #defines to include // 3. Allowlist let mut inclusions = Vec::new(); let mut parse_only = false; let mut exclude_utilities = false; let mut type_database = TypeDatabase::new(); let mut unsafe_found = false; while !input.is_empty() { if input.parse::<Option<syn::Token![#]>>()?.is_some() { let ident: syn::Ident = input.parse()?; if ident != "include" { return Err(syn::Error::new(ident.span(), "expected include")); } let hdr: syn::LitStr = input.parse()?; inclusions.push(CppInclusion::Header(hdr.value())); } else if input.parse::<Option<Token![unsafe]>>()?.is_some() { unsafe_found = true; } else { let ident: syn::Ident = input.parse()?; input.parse::<Option<syn::Token![!]>>()?; if ident == "generate" || ident == "generate_pod" { let args; syn::parenthesized!(args in input); let generate: syn::LitStr = args.parse()?; type_database.add_to_allowlist(generate.value()); if ident == "generate_pod" { type_database .note_pod_request(TypeName::new_from_user_input(&generate.value())); } } else if ident == "nested_type" { let args; syn::parenthesized!(args in input); let nested: syn::LitStr = args.parse()?; args.parse::<syn::Token![,]>()?; let nested_in: syn::LitStr = args.parse()?; type_database.note_nested_type( TypeName::new_from_user_input(&nested.value()), TypeName::new_from_user_input(&nested_in.value()), ); } else if ident == "block" { let args; syn::parenthesized!(args in input); let generate: syn::LitStr = args.parse()?; type_database.add_to_blocklist(generate.value()); } else if ident == "parse_only" { parse_only = true; } else if ident == "exclude_utilities" { exclude_utilities = true; } else { return Err(syn::Error::new( ident.span(), "expected generate, generate_pod, nested_type or exclude_utilities", )); } } if input.is_empty() { break; } } if !exclude_utilities { type_database.add_to_allowlist("make_string".to_string()); } if !unsafe_found { return Err(syn::Error::new( input.span(), "the unsafe keyword must be specified within each include_cpp! block", )); } Ok(IncludeCpp { inclusions, preconfigured_inc_dirs: None, exclude_utilities, type_database, state: if parse_only { State::ParseOnly } else { State::NotGenerated }, }) } pub fn new_from_syn(mac: Macro) -> Result<Self> { mac.parse_body::<IncludeCpp>().map_err(Error::Parsing) } pub fn set_include_dirs<P: AsRef<std::ffi::OsStr>>(&mut self, include_dirs: P) { self.preconfigured_inc_dirs = Some(include_dirs.as_ref().into()); } fn build_header(&self) -> String { join( self.inclusions.iter().map(|incl| match incl { CppInclusion::Define(symbol) => format!("#define {}\n", symbol), CppInclusion::Header(path) => format!("#include \"{}\"\n", path), }), "", ) } fn determine_incdirs(&self) -> Result<Vec<PathBuf>> { let inc_dirs = match &self.preconfigured_inc_dirs { Some(d) => d.clone(), None => std::env::var_os("AUTOCXX_INC").ok_or(Error::NoAutoCxxInc)?, };
// instead of requiring callers always to set AUTOCXX_INC. // On Windows, the canonical path begins with a UNC prefix that cannot be passed to // the MSVC compiler, so dunce::canonicalize() is used instead of std::fs::canonicalize() // See: // * https://github.com/dtolnay/cxx/pull/41 // * https://github.com/alexcrichton/cc-rs/issues/169 inc_dirs .map(|p| dunce::canonicalize(&p).map_err(|_| Error::CouldNotCanoncalizeIncludeDir(p))) .collect() } fn make_bindgen_builder(&self) -> Result<bindgen::Builder> { // TODO support different C++ versions let mut builder = bindgen::builder() .clang_args(&["-x", "c++", "-std=c++14"]) .derive_copy(false) .derive_debug(false) .default_enum_style(bindgen::EnumVariation::Rust { non_exhaustive: false, }) .enable_cxx_namespaces() .disable_nested_struct_naming() .generate_inline_functions(true) .layout_tests(false); // TODO revisit later for item in known_types::get_initial_blocklist() { builder = builder.blacklist_item(item); } for inc_dir in self.determine_incdirs()? { // TODO work with OsStrs here to avoid the .display() builder = builder.clang_arg(format!("-I{}", inc_dir.display())); } // 3. Passes allowlist and other options to the bindgen::Builder equivalent // to --output-style=cxx --allowlist=<as passed in> for a in self.type_database.allowlist() { // TODO - allowlist type/functions/separately builder = builder .whitelist_type(a) .whitelist_function(a) .whitelist_var(a); } Ok(builder) } fn inject_header_into_bindgen(&self, mut builder: bindgen::Builder) -> bindgen::Builder { let full_header = self.build_header(); let full_header = format!("{}\n\n{}", KNOWN_TYPES.get_prelude(), full_header,); builder = builder.header_contents("example.hpp", &full_header); builder } /// Generate the Rust bindings. Call `generate` first. pub fn generate_rs(&self) -> TokenStream2 { match &self.state { State::NotGenerated => panic!("Call generate() first"), State::Generated(itemmod, _) => itemmod.to_token_stream(), State::NothingGenerated | State::ParseOnly => TokenStream2::new(), } } fn parse_bindings(&self, bindings: bindgen::Bindings) -> Result<ItemMod> { // This bindings object is actually a TokenStream internally and we're wasting // effort converting to and from string. We could enhance the bindgen API // in future. let bindings = bindings.to_string(); // Manually add the mod ffi {} so that we can ask syn to parse // into a single construct. let bindings = format!("mod bindgen {{ {} }}", bindings); info!("Bindings: {}", bindings); syn::parse_str::<ItemMod>(&bindings).map_err(Error::Parsing) } fn generate_include_list(&self) -> Vec<String> { let mut include_list = Vec::new(); for incl in &self.inclusions { match incl { CppInclusion::Header(ref hdr) => { include_list.push(hdr.clone()); } CppInclusion::Define(_) => warn!("Currently no way to define! within cxx"), } } include_list } /// Actually examine the headers to find out what needs generating. /// Most errors occur at this stage as we fail to interpret the C++ /// headers properly. /// /// The basic idea is this. We will run `bindgen` which will spit /// out a ton of Rust code corresponding to all the types and functions /// defined in C++. We'll then post-process that bindgen output /// into a form suitable for ingestion by `cxx`. /// (It's the `bridge_converter` mod which does that.) /// Along the way, the `bridge_converter` might tell us of additional /// C++ code which we should generate, e.g. wrappers to move things /// into and out of `UniquePtr`s. pub fn generate(&mut self) -> Result<()> { // If we are in parse only mode, do nothing. This is used for // doc tests to ensure the parsing is valid, but we can't expect // valid C++ header files or linkers to allow a complete build. match self.state { State::ParseOnly => return Ok(()), State::NotGenerated => {} State::Generated(_, _) | State::NothingGenerated => panic!("Only call generate once"), } if self.type_database.allowlist_is_empty() { return Err(Error::NoGenerationRequested); } let builder = self.make_bindgen_builder()?; let bindings = self .inject_header_into_bindgen(builder) .generate() .map_err(Error::Bindgen)?; let bindings = self.parse_bindings(bindings)?; let include_list = self.generate_include_list(); let mut converter = BridgeConverter::new(&include_list, &self.type_database); let conversion = converter .convert(bindings, self.exclude_utilities) .map_err(Error::Conversion)?; let mut additional_cpp_generator = AdditionalCppGenerator::new(self.build_header()); additional_cpp_generator.add_needs(conversion.additional_cpp_needs, &self.type_database); let mut items = conversion.items; let mut new_bindings: ItemMod = parse_quote! { #[allow(non_snake_case)] #[allow(dead_code)] #[allow(non_upper_case_globals)] #[allow(non_camel_case_types)] mod ffi { } }; new_bindings.content.as_mut().unwrap().1.append(&mut items); info!( "New bindings:\n{}", rust_pretty_printer::pretty_print(&new_bindings.to_token_stream()) ); self.state = State::Generated(new_bindings, additional_cpp_generator); Ok(()) } /// Generate C++-side bindings for these APIs. Call `generate` first. pub fn generate_h_and_cxx(&self) -> Result<GeneratedCpp, cxx_gen::Error> { let mut files = Vec::new(); match &self.state { State::ParseOnly => panic!("Cannot generate C++ in parse-only mode"), State::NotGenerated => panic!("Call generate() first"), State::NothingGenerated => {} State::Generated(itemmod, additional_cpp_generator) => { let rs = itemmod.into_token_stream(); let opt = cxx_gen::Opt::default(); let cxx_generated = cxx_gen::generate_header_and_cc(rs, &opt)?; files.push(CppFilePair { header: cxx_generated.header, header_name: "cxxgen.h".to_string(), implementation: cxx_generated.implementation, }); match additional_cpp_generator.generate() { None => {} Some(additional_cpp) => { // TODO should probably replace pragma once below with traditional include guards. let declarations = format!("#pragma once\n{}", additional_cpp.declarations); files.push(CppFilePair { header: declarations.as_bytes().to_vec(), header_name: "autocxxgen.h".to_string(), implementation: additional_cpp.definitions.as_bytes().to_vec(), }); info!("Additional C++ decls:\n{}", declarations); info!("Additional C++ defs:\n{}", additional_cpp.definitions); } } } }; Ok(GeneratedCpp(files)) } /// Get the configured include directories. pub fn include_dirs(&self) -> Result<Vec<PathBuf>> { self.determine_incdirs() } }
let inc_dirs = std::env::split_paths(&inc_dirs); // TODO consider if we can or should look up the include path automatically
random_line_split
lib.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod additional_cpp_generator; mod byvalue_checker; mod byvalue_scanner; mod conversion; mod function_wrapper; mod known_types; mod parse; mod rust_pretty_printer; mod type_database; mod typedef_analyzer; mod types; #[cfg(any(test, feature = "build"))] mod builder; #[cfg(test)] mod integration_tests; use conversion::BridgeConverter; use proc_macro2::TokenStream as TokenStream2; use std::{fmt::Display, path::PathBuf}; use type_database::TypeDatabase; use quote::ToTokens; use syn::{ parse::{Parse, ParseStream, Result as ParseResult}, Token, }; use syn::{parse_quote, ItemMod, Macro}; use additional_cpp_generator::AdditionalCppGenerator; use itertools::join; use known_types::KNOWN_TYPES; use log::{info, warn}; use types::TypeName; /// We use a forked version of bindgen - for now. /// We hope to unfork. use autocxx_bindgen as bindgen; #[cfg(any(test, feature = "build"))] pub use builder::{build, expect_build, BuilderError, BuilderResult, BuilderSuccess}; pub use parse::{parse_file, parse_token_stream, ParseError, ParsedFile}; pub use cxx_gen::HEADER; /// Re-export cxx such that clients can use the same version as /// us. This doesn't enable clients to avoid depending on the cxx /// crate too, unfortunately, since generated cxx::bridge code /// refers explicitly to ::cxx. See /// https://github.com/google/autocxx/issues/36 pub use cxx; pub struct CppFilePair { pub header: Vec<u8>, pub implementation: Vec<u8>, pub header_name: String, } pub struct GeneratedCpp(pub Vec<CppFilePair>); /// Errors which may occur in generating bindings for these C++ /// functions. #[derive(Debug)] pub enum Error { /// Any error reported by bindgen, generating the C++ bindings. /// Any C++ parsing errors, etc. would be reported this way. Bindgen(()), /// Any problem parsing the Rust file. Parsing(syn::Error), /// No `include_cpp!` macro could be found. NoAutoCxxInc, /// The include directories specified were incorreect. CouldNotCanoncalizeIncludeDir(PathBuf), /// Some error occcurred in converting the bindgen-style /// bindings to safe cxx bindings. Conversion(conversion::ConvertError), /// No 'generate' or 'generate_pod' was specified. /// It might be that in future we can simply let things work /// without any allowlist, in which case bindgen should generate /// bindings for everything. That just seems very unlikely to work /// in the common case right now. NoGenerationRequested, } impl Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Bindgen(_) => write!(f, "Bindgen was unable to generate the initial .rs bindings for this file. This may indicate a parsing problem with the C++ headers.")?, Error::Parsing(err) => write!(f, "The Rust file could not be parsede: {}", err)?, Error::NoAutoCxxInc => write!(f, "No C++ include directory was provided. Consider setting AUTOCXX_INC.")?, Error::CouldNotCanoncalizeIncludeDir(pb) => write!(f, "One of the C++ include directories provided ({}) did not appear to exist or could otherwise not be made into a canonical path.", pb.to_string_lossy())?, Error::Conversion(err) => write!(f, "autocxx could not generate the requested bindings. {}", err)?, Error::NoGenerationRequested => write!(f, "No 'generate' or 'generate_pod' directives were found, so we would not generate any Rust bindings despite the inclusion of C++ headers.")?, } Ok(()) } } pub type Result<T, E = Error> = std::result::Result<T, E>; pub enum CppInclusion { Define(String), Header(String), } #[allow(clippy::large_enum_variant)] // because this is only used once enum State { NotGenerated, ParseOnly, NothingGenerated, Generated(ItemMod, AdditionalCppGenerator), } /// Core of the autocxx engine. See `generate` for most details /// on how this works. /// /// TODO - consider whether this 'engine' crate should actually be a /// directory of source symlinked from all the other sub-crates, so that /// we avoid exposing an external interface from this code. pub struct IncludeCpp { inclusions: Vec<CppInclusion>, type_database: TypeDatabase, preconfigured_inc_dirs: Option<std::ffi::OsString>, exclude_utilities: bool, state: State, } impl Parse for IncludeCpp { fn parse(input: ParseStream) -> ParseResult<Self> { Self::new_from_parse_stream(input) } } impl IncludeCpp { fn new_from_parse_stream(input: ParseStream) -> syn::Result<Self> { // Takes as inputs: // 1. List of headers to include // 2. List of #defines to include // 3. Allowlist let mut inclusions = Vec::new(); let mut parse_only = false; let mut exclude_utilities = false; let mut type_database = TypeDatabase::new(); let mut unsafe_found = false; while !input.is_empty() { if input.parse::<Option<syn::Token![#]>>()?.is_some() { let ident: syn::Ident = input.parse()?; if ident != "include" { return Err(syn::Error::new(ident.span(), "expected include")); } let hdr: syn::LitStr = input.parse()?; inclusions.push(CppInclusion::Header(hdr.value())); } else if input.parse::<Option<Token![unsafe]>>()?.is_some() { unsafe_found = true; } else { let ident: syn::Ident = input.parse()?; input.parse::<Option<syn::Token![!]>>()?; if ident == "generate" || ident == "generate_pod" { let args; syn::parenthesized!(args in input); let generate: syn::LitStr = args.parse()?; type_database.add_to_allowlist(generate.value()); if ident == "generate_pod" { type_database .note_pod_request(TypeName::new_from_user_input(&generate.value())); } } else if ident == "nested_type" { let args; syn::parenthesized!(args in input); let nested: syn::LitStr = args.parse()?; args.parse::<syn::Token![,]>()?; let nested_in: syn::LitStr = args.parse()?; type_database.note_nested_type( TypeName::new_from_user_input(&nested.value()), TypeName::new_from_user_input(&nested_in.value()), ); } else if ident == "block" { let args; syn::parenthesized!(args in input); let generate: syn::LitStr = args.parse()?; type_database.add_to_blocklist(generate.value()); } else if ident == "parse_only" { parse_only = true; } else if ident == "exclude_utilities" { exclude_utilities = true; } else { return Err(syn::Error::new( ident.span(), "expected generate, generate_pod, nested_type or exclude_utilities", )); } } if input.is_empty() { break; } } if !exclude_utilities { type_database.add_to_allowlist("make_string".to_string()); } if !unsafe_found { return Err(syn::Error::new( input.span(), "the unsafe keyword must be specified within each include_cpp! block", )); } Ok(IncludeCpp { inclusions, preconfigured_inc_dirs: None, exclude_utilities, type_database, state: if parse_only { State::ParseOnly } else { State::NotGenerated }, }) } pub fn new_from_syn(mac: Macro) -> Result<Self> { mac.parse_body::<IncludeCpp>().map_err(Error::Parsing) } pub fn set_include_dirs<P: AsRef<std::ffi::OsStr>>(&mut self, include_dirs: P) { self.preconfigured_inc_dirs = Some(include_dirs.as_ref().into()); } fn build_header(&self) -> String { join( self.inclusions.iter().map(|incl| match incl { CppInclusion::Define(symbol) => format!("#define {}\n", symbol), CppInclusion::Header(path) => format!("#include \"{}\"\n", path), }), "", ) } fn determine_incdirs(&self) -> Result<Vec<PathBuf>> { let inc_dirs = match &self.preconfigured_inc_dirs { Some(d) => d.clone(), None => std::env::var_os("AUTOCXX_INC").ok_or(Error::NoAutoCxxInc)?, }; let inc_dirs = std::env::split_paths(&inc_dirs); // TODO consider if we can or should look up the include path automatically // instead of requiring callers always to set AUTOCXX_INC. // On Windows, the canonical path begins with a UNC prefix that cannot be passed to // the MSVC compiler, so dunce::canonicalize() is used instead of std::fs::canonicalize() // See: // * https://github.com/dtolnay/cxx/pull/41 // * https://github.com/alexcrichton/cc-rs/issues/169 inc_dirs .map(|p| dunce::canonicalize(&p).map_err(|_| Error::CouldNotCanoncalizeIncludeDir(p))) .collect() } fn make_bindgen_builder(&self) -> Result<bindgen::Builder> { // TODO support different C++ versions let mut builder = bindgen::builder() .clang_args(&["-x", "c++", "-std=c++14"]) .derive_copy(false) .derive_debug(false) .default_enum_style(bindgen::EnumVariation::Rust { non_exhaustive: false, }) .enable_cxx_namespaces() .disable_nested_struct_naming() .generate_inline_functions(true) .layout_tests(false); // TODO revisit later for item in known_types::get_initial_blocklist() { builder = builder.blacklist_item(item); } for inc_dir in self.determine_incdirs()? { // TODO work with OsStrs here to avoid the .display() builder = builder.clang_arg(format!("-I{}", inc_dir.display())); } // 3. Passes allowlist and other options to the bindgen::Builder equivalent // to --output-style=cxx --allowlist=<as passed in> for a in self.type_database.allowlist() { // TODO - allowlist type/functions/separately builder = builder .whitelist_type(a) .whitelist_function(a) .whitelist_var(a); } Ok(builder) } fn inject_header_into_bindgen(&self, mut builder: bindgen::Builder) -> bindgen::Builder
/// Generate the Rust bindings. Call `generate` first. pub fn generate_rs(&self) -> TokenStream2 { match &self.state { State::NotGenerated => panic!("Call generate() first"), State::Generated(itemmod, _) => itemmod.to_token_stream(), State::NothingGenerated | State::ParseOnly => TokenStream2::new(), } } fn parse_bindings(&self, bindings: bindgen::Bindings) -> Result<ItemMod> { // This bindings object is actually a TokenStream internally and we're wasting // effort converting to and from string. We could enhance the bindgen API // in future. let bindings = bindings.to_string(); // Manually add the mod ffi {} so that we can ask syn to parse // into a single construct. let bindings = format!("mod bindgen {{ {} }}", bindings); info!("Bindings: {}", bindings); syn::parse_str::<ItemMod>(&bindings).map_err(Error::Parsing) } fn generate_include_list(&self) -> Vec<String> { let mut include_list = Vec::new(); for incl in &self.inclusions { match incl { CppInclusion::Header(ref hdr) => { include_list.push(hdr.clone()); } CppInclusion::Define(_) => warn!("Currently no way to define! within cxx"), } } include_list } /// Actually examine the headers to find out what needs generating. /// Most errors occur at this stage as we fail to interpret the C++ /// headers properly. /// /// The basic idea is this. We will run `bindgen` which will spit /// out a ton of Rust code corresponding to all the types and functions /// defined in C++. We'll then post-process that bindgen output /// into a form suitable for ingestion by `cxx`. /// (It's the `bridge_converter` mod which does that.) /// Along the way, the `bridge_converter` might tell us of additional /// C++ code which we should generate, e.g. wrappers to move things /// into and out of `UniquePtr`s. pub fn generate(&mut self) -> Result<()> { // If we are in parse only mode, do nothing. This is used for // doc tests to ensure the parsing is valid, but we can't expect // valid C++ header files or linkers to allow a complete build. match self.state { State::ParseOnly => return Ok(()), State::NotGenerated => {} State::Generated(_, _) | State::NothingGenerated => panic!("Only call generate once"), } if self.type_database.allowlist_is_empty() { return Err(Error::NoGenerationRequested); } let builder = self.make_bindgen_builder()?; let bindings = self .inject_header_into_bindgen(builder) .generate() .map_err(Error::Bindgen)?; let bindings = self.parse_bindings(bindings)?; let include_list = self.generate_include_list(); let mut converter = BridgeConverter::new(&include_list, &self.type_database); let conversion = converter .convert(bindings, self.exclude_utilities) .map_err(Error::Conversion)?; let mut additional_cpp_generator = AdditionalCppGenerator::new(self.build_header()); additional_cpp_generator.add_needs(conversion.additional_cpp_needs, &self.type_database); let mut items = conversion.items; let mut new_bindings: ItemMod = parse_quote! { #[allow(non_snake_case)] #[allow(dead_code)] #[allow(non_upper_case_globals)] #[allow(non_camel_case_types)] mod ffi { } }; new_bindings.content.as_mut().unwrap().1.append(&mut items); info!( "New bindings:\n{}", rust_pretty_printer::pretty_print(&new_bindings.to_token_stream()) ); self.state = State::Generated(new_bindings, additional_cpp_generator); Ok(()) } /// Generate C++-side bindings for these APIs. Call `generate` first. pub fn generate_h_and_cxx(&self) -> Result<GeneratedCpp, cxx_gen::Error> { let mut files = Vec::new(); match &self.state { State::ParseOnly => panic!("Cannot generate C++ in parse-only mode"), State::NotGenerated => panic!("Call generate() first"), State::NothingGenerated => {} State::Generated(itemmod, additional_cpp_generator) => { let rs = itemmod.into_token_stream(); let opt = cxx_gen::Opt::default(); let cxx_generated = cxx_gen::generate_header_and_cc(rs, &opt)?; files.push(CppFilePair { header: cxx_generated.header, header_name: "cxxgen.h".to_string(), implementation: cxx_generated.implementation, }); match additional_cpp_generator.generate() { None => {} Some(additional_cpp) => { // TODO should probably replace pragma once below with traditional include guards. let declarations = format!("#pragma once\n{}", additional_cpp.declarations); files.push(CppFilePair { header: declarations.as_bytes().to_vec(), header_name: "autocxxgen.h".to_string(), implementation: additional_cpp.definitions.as_bytes().to_vec(), }); info!("Additional C++ decls:\n{}", declarations); info!("Additional C++ defs:\n{}", additional_cpp.definitions); } } } }; Ok(GeneratedCpp(files)) } /// Get the configured include directories. pub fn include_dirs(&self) -> Result<Vec<PathBuf>> { self.determine_incdirs() } }
{ let full_header = self.build_header(); let full_header = format!("{}\n\n{}", KNOWN_TYPES.get_prelude(), full_header,); builder = builder.header_contents("example.hpp", &full_header); builder }
identifier_body
lib.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod additional_cpp_generator; mod byvalue_checker; mod byvalue_scanner; mod conversion; mod function_wrapper; mod known_types; mod parse; mod rust_pretty_printer; mod type_database; mod typedef_analyzer; mod types; #[cfg(any(test, feature = "build"))] mod builder; #[cfg(test)] mod integration_tests; use conversion::BridgeConverter; use proc_macro2::TokenStream as TokenStream2; use std::{fmt::Display, path::PathBuf}; use type_database::TypeDatabase; use quote::ToTokens; use syn::{ parse::{Parse, ParseStream, Result as ParseResult}, Token, }; use syn::{parse_quote, ItemMod, Macro}; use additional_cpp_generator::AdditionalCppGenerator; use itertools::join; use known_types::KNOWN_TYPES; use log::{info, warn}; use types::TypeName; /// We use a forked version of bindgen - for now. /// We hope to unfork. use autocxx_bindgen as bindgen; #[cfg(any(test, feature = "build"))] pub use builder::{build, expect_build, BuilderError, BuilderResult, BuilderSuccess}; pub use parse::{parse_file, parse_token_stream, ParseError, ParsedFile}; pub use cxx_gen::HEADER; /// Re-export cxx such that clients can use the same version as /// us. This doesn't enable clients to avoid depending on the cxx /// crate too, unfortunately, since generated cxx::bridge code /// refers explicitly to ::cxx. See /// https://github.com/google/autocxx/issues/36 pub use cxx; pub struct CppFilePair { pub header: Vec<u8>, pub implementation: Vec<u8>, pub header_name: String, } pub struct GeneratedCpp(pub Vec<CppFilePair>); /// Errors which may occur in generating bindings for these C++ /// functions. #[derive(Debug)] pub enum Error { /// Any error reported by bindgen, generating the C++ bindings. /// Any C++ parsing errors, etc. would be reported this way. Bindgen(()), /// Any problem parsing the Rust file. Parsing(syn::Error), /// No `include_cpp!` macro could be found. NoAutoCxxInc, /// The include directories specified were incorreect. CouldNotCanoncalizeIncludeDir(PathBuf), /// Some error occcurred in converting the bindgen-style /// bindings to safe cxx bindings. Conversion(conversion::ConvertError), /// No 'generate' or 'generate_pod' was specified. /// It might be that in future we can simply let things work /// without any allowlist, in which case bindgen should generate /// bindings for everything. That just seems very unlikely to work /// in the common case right now. NoGenerationRequested, } impl Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Bindgen(_) => write!(f, "Bindgen was unable to generate the initial .rs bindings for this file. This may indicate a parsing problem with the C++ headers.")?, Error::Parsing(err) => write!(f, "The Rust file could not be parsede: {}", err)?, Error::NoAutoCxxInc => write!(f, "No C++ include directory was provided. Consider setting AUTOCXX_INC.")?, Error::CouldNotCanoncalizeIncludeDir(pb) => write!(f, "One of the C++ include directories provided ({}) did not appear to exist or could otherwise not be made into a canonical path.", pb.to_string_lossy())?, Error::Conversion(err) => write!(f, "autocxx could not generate the requested bindings. {}", err)?, Error::NoGenerationRequested => write!(f, "No 'generate' or 'generate_pod' directives were found, so we would not generate any Rust bindings despite the inclusion of C++ headers.")?, } Ok(()) } } pub type Result<T, E = Error> = std::result::Result<T, E>; pub enum CppInclusion { Define(String), Header(String), } #[allow(clippy::large_enum_variant)] // because this is only used once enum State { NotGenerated, ParseOnly, NothingGenerated, Generated(ItemMod, AdditionalCppGenerator), } /// Core of the autocxx engine. See `generate` for most details /// on how this works. /// /// TODO - consider whether this 'engine' crate should actually be a /// directory of source symlinked from all the other sub-crates, so that /// we avoid exposing an external interface from this code. pub struct IncludeCpp { inclusions: Vec<CppInclusion>, type_database: TypeDatabase, preconfigured_inc_dirs: Option<std::ffi::OsString>, exclude_utilities: bool, state: State, } impl Parse for IncludeCpp { fn parse(input: ParseStream) -> ParseResult<Self> { Self::new_from_parse_stream(input) } } impl IncludeCpp { fn new_from_parse_stream(input: ParseStream) -> syn::Result<Self> { // Takes as inputs: // 1. List of headers to include // 2. List of #defines to include // 3. Allowlist let mut inclusions = Vec::new(); let mut parse_only = false; let mut exclude_utilities = false; let mut type_database = TypeDatabase::new(); let mut unsafe_found = false; while !input.is_empty() { if input.parse::<Option<syn::Token![#]>>()?.is_some() { let ident: syn::Ident = input.parse()?; if ident != "include" { return Err(syn::Error::new(ident.span(), "expected include")); } let hdr: syn::LitStr = input.parse()?; inclusions.push(CppInclusion::Header(hdr.value())); } else if input.parse::<Option<Token![unsafe]>>()?.is_some() { unsafe_found = true; } else { let ident: syn::Ident = input.parse()?; input.parse::<Option<syn::Token![!]>>()?; if ident == "generate" || ident == "generate_pod" { let args; syn::parenthesized!(args in input); let generate: syn::LitStr = args.parse()?; type_database.add_to_allowlist(generate.value()); if ident == "generate_pod" { type_database .note_pod_request(TypeName::new_from_user_input(&generate.value())); } } else if ident == "nested_type" { let args; syn::parenthesized!(args in input); let nested: syn::LitStr = args.parse()?; args.parse::<syn::Token![,]>()?; let nested_in: syn::LitStr = args.parse()?; type_database.note_nested_type( TypeName::new_from_user_input(&nested.value()), TypeName::new_from_user_input(&nested_in.value()), ); } else if ident == "block" { let args; syn::parenthesized!(args in input); let generate: syn::LitStr = args.parse()?; type_database.add_to_blocklist(generate.value()); } else if ident == "parse_only" { parse_only = true; } else if ident == "exclude_utilities" { exclude_utilities = true; } else { return Err(syn::Error::new( ident.span(), "expected generate, generate_pod, nested_type or exclude_utilities", )); } } if input.is_empty() { break; } } if !exclude_utilities { type_database.add_to_allowlist("make_string".to_string()); } if !unsafe_found { return Err(syn::Error::new( input.span(), "the unsafe keyword must be specified within each include_cpp! block", )); } Ok(IncludeCpp { inclusions, preconfigured_inc_dirs: None, exclude_utilities, type_database, state: if parse_only { State::ParseOnly } else { State::NotGenerated }, }) } pub fn new_from_syn(mac: Macro) -> Result<Self> { mac.parse_body::<IncludeCpp>().map_err(Error::Parsing) } pub fn set_include_dirs<P: AsRef<std::ffi::OsStr>>(&mut self, include_dirs: P) { self.preconfigured_inc_dirs = Some(include_dirs.as_ref().into()); } fn build_header(&self) -> String { join( self.inclusions.iter().map(|incl| match incl { CppInclusion::Define(symbol) => format!("#define {}\n", symbol), CppInclusion::Header(path) => format!("#include \"{}\"\n", path), }), "", ) } fn determine_incdirs(&self) -> Result<Vec<PathBuf>> { let inc_dirs = match &self.preconfigured_inc_dirs { Some(d) => d.clone(), None => std::env::var_os("AUTOCXX_INC").ok_or(Error::NoAutoCxxInc)?, }; let inc_dirs = std::env::split_paths(&inc_dirs); // TODO consider if we can or should look up the include path automatically // instead of requiring callers always to set AUTOCXX_INC. // On Windows, the canonical path begins with a UNC prefix that cannot be passed to // the MSVC compiler, so dunce::canonicalize() is used instead of std::fs::canonicalize() // See: // * https://github.com/dtolnay/cxx/pull/41 // * https://github.com/alexcrichton/cc-rs/issues/169 inc_dirs .map(|p| dunce::canonicalize(&p).map_err(|_| Error::CouldNotCanoncalizeIncludeDir(p))) .collect() } fn make_bindgen_builder(&self) -> Result<bindgen::Builder> { // TODO support different C++ versions let mut builder = bindgen::builder() .clang_args(&["-x", "c++", "-std=c++14"]) .derive_copy(false) .derive_debug(false) .default_enum_style(bindgen::EnumVariation::Rust { non_exhaustive: false, }) .enable_cxx_namespaces() .disable_nested_struct_naming() .generate_inline_functions(true) .layout_tests(false); // TODO revisit later for item in known_types::get_initial_blocklist() { builder = builder.blacklist_item(item); } for inc_dir in self.determine_incdirs()? { // TODO work with OsStrs here to avoid the .display() builder = builder.clang_arg(format!("-I{}", inc_dir.display())); } // 3. Passes allowlist and other options to the bindgen::Builder equivalent // to --output-style=cxx --allowlist=<as passed in> for a in self.type_database.allowlist() { // TODO - allowlist type/functions/separately builder = builder .whitelist_type(a) .whitelist_function(a) .whitelist_var(a); } Ok(builder) } fn inject_header_into_bindgen(&self, mut builder: bindgen::Builder) -> bindgen::Builder { let full_header = self.build_header(); let full_header = format!("{}\n\n{}", KNOWN_TYPES.get_prelude(), full_header,); builder = builder.header_contents("example.hpp", &full_header); builder } /// Generate the Rust bindings. Call `generate` first. pub fn generate_rs(&self) -> TokenStream2 { match &self.state { State::NotGenerated => panic!("Call generate() first"), State::Generated(itemmod, _) => itemmod.to_token_stream(), State::NothingGenerated | State::ParseOnly => TokenStream2::new(), } } fn parse_bindings(&self, bindings: bindgen::Bindings) -> Result<ItemMod> { // This bindings object is actually a TokenStream internally and we're wasting // effort converting to and from string. We could enhance the bindgen API // in future. let bindings = bindings.to_string(); // Manually add the mod ffi {} so that we can ask syn to parse // into a single construct. let bindings = format!("mod bindgen {{ {} }}", bindings); info!("Bindings: {}", bindings); syn::parse_str::<ItemMod>(&bindings).map_err(Error::Parsing) } fn generate_include_list(&self) -> Vec<String> { let mut include_list = Vec::new(); for incl in &self.inclusions { match incl { CppInclusion::Header(ref hdr) => { include_list.push(hdr.clone()); } CppInclusion::Define(_) => warn!("Currently no way to define! within cxx"), } } include_list } /// Actually examine the headers to find out what needs generating. /// Most errors occur at this stage as we fail to interpret the C++ /// headers properly. /// /// The basic idea is this. We will run `bindgen` which will spit /// out a ton of Rust code corresponding to all the types and functions /// defined in C++. We'll then post-process that bindgen output /// into a form suitable for ingestion by `cxx`. /// (It's the `bridge_converter` mod which does that.) /// Along the way, the `bridge_converter` might tell us of additional /// C++ code which we should generate, e.g. wrappers to move things /// into and out of `UniquePtr`s. pub fn generate(&mut self) -> Result<()> { // If we are in parse only mode, do nothing. This is used for // doc tests to ensure the parsing is valid, but we can't expect // valid C++ header files or linkers to allow a complete build. match self.state { State::ParseOnly => return Ok(()), State::NotGenerated => {} State::Generated(_, _) | State::NothingGenerated => panic!("Only call generate once"), } if self.type_database.allowlist_is_empty() { return Err(Error::NoGenerationRequested); } let builder = self.make_bindgen_builder()?; let bindings = self .inject_header_into_bindgen(builder) .generate() .map_err(Error::Bindgen)?; let bindings = self.parse_bindings(bindings)?; let include_list = self.generate_include_list(); let mut converter = BridgeConverter::new(&include_list, &self.type_database); let conversion = converter .convert(bindings, self.exclude_utilities) .map_err(Error::Conversion)?; let mut additional_cpp_generator = AdditionalCppGenerator::new(self.build_header()); additional_cpp_generator.add_needs(conversion.additional_cpp_needs, &self.type_database); let mut items = conversion.items; let mut new_bindings: ItemMod = parse_quote! { #[allow(non_snake_case)] #[allow(dead_code)] #[allow(non_upper_case_globals)] #[allow(non_camel_case_types)] mod ffi { } }; new_bindings.content.as_mut().unwrap().1.append(&mut items); info!( "New bindings:\n{}", rust_pretty_printer::pretty_print(&new_bindings.to_token_stream()) ); self.state = State::Generated(new_bindings, additional_cpp_generator); Ok(()) } /// Generate C++-side bindings for these APIs. Call `generate` first. pub fn generate_h_and_cxx(&self) -> Result<GeneratedCpp, cxx_gen::Error> { let mut files = Vec::new(); match &self.state { State::ParseOnly => panic!("Cannot generate C++ in parse-only mode"), State::NotGenerated => panic!("Call generate() first"), State::NothingGenerated => {} State::Generated(itemmod, additional_cpp_generator) => { let rs = itemmod.into_token_stream(); let opt = cxx_gen::Opt::default(); let cxx_generated = cxx_gen::generate_header_and_cc(rs, &opt)?; files.push(CppFilePair { header: cxx_generated.header, header_name: "cxxgen.h".to_string(), implementation: cxx_generated.implementation, }); match additional_cpp_generator.generate() { None => {} Some(additional_cpp) => { // TODO should probably replace pragma once below with traditional include guards. let declarations = format!("#pragma once\n{}", additional_cpp.declarations); files.push(CppFilePair { header: declarations.as_bytes().to_vec(), header_name: "autocxxgen.h".to_string(), implementation: additional_cpp.definitions.as_bytes().to_vec(), }); info!("Additional C++ decls:\n{}", declarations); info!("Additional C++ defs:\n{}", additional_cpp.definitions); } } } }; Ok(GeneratedCpp(files)) } /// Get the configured include directories. pub fn
(&self) -> Result<Vec<PathBuf>> { self.determine_incdirs() } }
include_dirs
identifier_name
hwacha.py
import numpy as np import ctypes as ct import ast from ctree.jit import LazySpecializedFunction, ConcreteSpecializedFunction from ctree.transformations import PyBasicConversions from ctree.transforms.declaration_filler import DeclarationFiller from ctree.c.nodes import CFile import ctree.c.nodes as C from ctree.nodes import Project from ctree.types import get_ctype from ctree.templates.nodes import StringTemplate def get_nd_pointer(arg): return np.ctypeslib.ndpointer(arg.dtype, arg.ndim, arg.shape) class HwachaFN(ConcreteSpecializedFunction): def finalize(self, entry_point_name, project_node, entry_typesig): self._c_function = self._compile(entry_point_name, project_node, entry_typesig) return self def __call__(self, *args): return self._c_function(*args) class MapTransformer(ast.NodeTransformer): def __init__(self, loopvar, param_dict, retval_name): self.loopvar = loopvar self.param_dict = param_dict self.retval_name = retval_name def visit_SymbolRef(self, node): if node.name in self.param_dict: return C.ArrayRef(node, C.SymbolRef(self.loopvar)) return node def visit_Return(self, node): node.value = self.visit(node.value) return C.Assign(C.ArrayRef(C.SymbolRef(self.retval_name), C.SymbolRef(self.loopvar)), node.value) hwacha_configure_block = """ size_t vector_length; __asm__ volatile ( "vsetcfg 16, 1\\n" "vsetvl %0, %1\\n" : "=r"(vector_length) : "r"({SIZE}) ); """ bounds_check = """ if ({SIZE} == {loopvar}) continue; """ class ScalarFinder(ast.NodeVisitor): def __init__(self, scalars): self.scalars = scalars def visit_Constant(self, node):
def get_scalars_in_body(node): scalars = set() visitor = ScalarFinder(scalars) for stmt in node.body: visitor.visit(stmt) return scalars number_dict = { "1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine", "0": "zero", ".": "dot" } def scalar_init(scalar): name = "".join(number_dict[digit] for digit in str(scalar)) return StringTemplate(""" union {{ float f; uint32_t i; }} {name}; {name}.f = {scalar}f; """.format(name=name, scalar=scalar)) obtained_vector_length = """ size_t obtained_vector_length; __asm__ volatile( "vsetvl %0, %1\\n" : "=r"(obtained_vector_length) : "r"({SIZE} - {loopvar}) ); assert(obtained_vector_length <= {SIZE}); """ class ArrayRefFinder(ast.NodeVisitor): def __init__(self, refs): self.refs = refs def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): self.refs.append(node) else: self.visit(node.left) self.visit(node.right) def get_array_references_in_body(node): refs = [] finder = ArrayRefFinder(refs) for stmt in node.body: finder.visit(stmt) return refs class HwachaASMTranslator(ast.NodeTransformer): def __init__(self, scalars, ref_register_map, body, type_map): self.scalars = scalars self.ref_register_map = ref_register_map self.body = body self.curr_register = -1 self.reg_map = {} self.type_map = type_map def get_next_register(self): self.curr_register += 1 return "vv{}".format(self.curr_register) def visit_SymbolRef(self, node): if node.name in self.reg_map: return self.reg_map[node.name] return node def visit_Cast(self, node): reg = self.get_next_register() value = self.visit(node.value) if isinstance(node.type, ct.c_float): self.body.append(" vfcvt.s.w {0}, {1}\\n".format(reg, value)) self.type_map[reg] = ct.c_float return reg else: raise NotImplementedError() def visit_Constant(self, node): self.type_map[node.value] = get_ctype(node.value) return self.scalars[node.value] def visit_FunctionCall(self, node): if node.func.name == 'max': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() print(node) print(arg1) if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmax.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg elif node.func.name == 'min': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmin.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg raise NotImplementedError() def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): reg = self.get_next_register() self.body.append(" vlwu {0}, {1}\\n".format( reg, self.ref_register_map[str(node)][1])) return reg if isinstance(node.op, C.Op.Assign): node.right = self.visit(node.right) if isinstance(node.left, C.SymbolRef): self.reg_map[node.left.name] = node.right return elif isinstance(node.left, C.BinaryOp) and \ isinstance(node.left.op, C.Op.ArrayRef): if self.type_map[node.left.left.name] != self.type_map[node.right]: reg = self.get_next_register() self.body.append(" vfcvt.w.s {0}, {1}\\n".format(reg, node.right)) self.body.append(" vsw {0}, {1}\\n".format(reg, self.ref_register_map[str(node.left)][1])) return node.left = self.visit(node.left) node.right = self.visit(node.right) reg = self.get_next_register() if isinstance(node.op, C.Op.Sub): self.body.append(" vsub {0}, {1}, {2}\\n".format( reg, node.left, node.right)) elif isinstance(node.op, C.Op.Div): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfdiv.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() elif isinstance(node.op, C.Op.Mul): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfmul.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() return reg def get_asm_body(node, scalars, refs, type_map): body = """ __asm__ volatile ( ".align 3\\n" "__hwacha_body:\\n" """ asm_body = [] translator = HwachaASMTranslator(scalars, refs, asm_body, type_map) for s in node.body: translator.visit(s) for s in asm_body: body += "\"" + s + "\"\n" body += "\" vstop\\n\"\n" body += " );" return StringTemplate(body) class HwachaVectorize(ast.NodeTransformer): def __init__(self, type_map, defns): self.type_map = type_map self.defns = defns def visit_For(self, node): if node.pragma == "ivdep": block = [] loopvar = node.incr.arg size = node.test.right scalars = get_scalars_in_body(node) refs = get_array_references_in_body(node) ref_register_map = {} scalar_register_map = {} for index, ref in enumerate(refs): ref_register_map[str(ref)] = (ref, "va{}".format(index)) for index, scalar in enumerate(scalars): reg = "vs{}".format(index) scalar_register_map[scalar] = reg self.type_map[reg] = get_ctype(scalar) body = [] block.append(StringTemplate(hwacha_configure_block.format(SIZE=size))) node.incr = C.AddAssign(loopvar, C.SymbolRef("vector_length")) self.defns.append(get_asm_body(node, scalar_register_map, ref_register_map, self.type_map)) block.append(node) body.append(StringTemplate(bounds_check.format(SIZE=size, loopvar=loopvar))) for scalar in scalars: body.append(scalar_init(scalar)) body.append(StringTemplate(obtained_vector_length.format(SIZE=size, loopvar=loopvar))) block1 = "" block2 = "" index = 0 for _, info in ref_register_map.items(): ref, register = info block1 += "\t \"vmsa {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0} + {1}),\n".format( ref.left.name, ref.right.name) index += 1 for scalar, register in scalar_register_map.items(): block1 += "\t \"vmss {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0}.i),\n".format( "".join(number_dict[digit] for digit in str(scalar))) index += 1 block1 += "\"fence\\n\"\n" block1 += "\"vf 0(%{0})\\n\"\n".format(index) block2 += "\"r\" (&__hwacha_body)" body.append(StringTemplate( """ __asm__ volatile( {block1} : : {block2} : "memory" ); """.format(block1=block1, block2=block2))) node.body = body block.append( StringTemplate(""" __asm__ volatile( "fence\\n" ); """)) return block class HwachaTranslator(LazySpecializedFunction): def args_to_subconfig(self, args): return tuple(get_nd_pointer(arg) for arg in args) def transform(self, py_ast, program_cfg): arg_cfg, tune_cfg = program_cfg tree = PyBasicConversions().visit(py_ast) param_dict = {} tree.body[0].params.append(C.SymbolRef("retval", arg_cfg[0]())) # Annotate arguments for param, type in zip(tree.body[0].params, arg_cfg): param.type = type() param_dict[param.name] = type._dtype_ length = np.prod(arg_cfg[0]._shape_) transformer = MapTransformer("i", param_dict, "retval") body = list(map(transformer.visit, tree.body[0].defn)) tree.body[0].defn = [C.For( C.Assign(C.SymbolRef("i", ct.c_int()), C.Constant(0)), C.Lt(C.SymbolRef("i"), C.Constant(length)), C.PostInc(C.SymbolRef("i")), body=body, pragma="ivdep" )] tree = DeclarationFiller().visit(tree) defns = [] tree = HwachaVectorize(param_dict, defns).visit(tree) file_body = [ StringTemplate("#include <stdlib.h>"), StringTemplate("#include <stdint.h>"), StringTemplate("#include <assert.h>"), StringTemplate("extern \"C\" void __hwacha_body(void);"), ] file_body.extend(defns) file_body.append(tree) return [CFile("generated", file_body)] def finalize(self, transform_result, program_config): generated = transform_result[0] print(generated) proj = Project([generated]) entry_type = ct.CFUNCTYPE(None, *program_config[0]) return HwachaFN().finalize("apply", proj, entry_type) def hwacha_map(fn, *args): mapfn = HwachaTranslator.from_function(fn, "map") retval = np.empty_like(args[0]) args += (retval, ) mapfn(*args) return retval CALIBRATE_COLD = 0x7000 CALIBRATE_HOT = 0xA000 SIZE = (208 * 156) # Generate a dummy calibration table, just so there's something # to execute. cold = np.full(SIZE, CALIBRATE_COLD, np.int32) hot = np.full(SIZE, CALIBRATE_HOT, np.int32) # Generate a dummy input image, again just so there's something # to execute. raw = np.empty(SIZE, np.int32) for i in range(SIZE): scale = (CALIBRATE_HOT - CALIBRATE_COLD) percent = (i % 120) - 10 raw[i] = scale * (percent / 100.0) + CALIBRATE_COLD raw[i] = CALIBRATE_COLD + (i % (int)(scale - 2)) + 1 def gold(cold, hot, raw, flat): for i in range(208 * 156): _max = hot[i] _min = cold[i] offset = raw[i] - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) flat[i] = 255 * scaled def test_map(cold, hot, raw): _max = hot _min = cold offset = raw - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) return 255.0 * scaled flat_gold = np.empty_like(raw) gold(cold, hot, raw, flat_gold) flat_test = hwacha_map(test_map, cold, hot, raw) np.testing.assert_array_equal(flat_gold, flat_test)
self.scalars.add(node.value)
identifier_body
hwacha.py
import numpy as np import ctypes as ct import ast from ctree.jit import LazySpecializedFunction, ConcreteSpecializedFunction from ctree.transformations import PyBasicConversions from ctree.transforms.declaration_filler import DeclarationFiller from ctree.c.nodes import CFile import ctree.c.nodes as C from ctree.nodes import Project from ctree.types import get_ctype from ctree.templates.nodes import StringTemplate def get_nd_pointer(arg): return np.ctypeslib.ndpointer(arg.dtype, arg.ndim, arg.shape) class HwachaFN(ConcreteSpecializedFunction): def finalize(self, entry_point_name, project_node, entry_typesig): self._c_function = self._compile(entry_point_name, project_node, entry_typesig) return self def __call__(self, *args): return self._c_function(*args) class MapTransformer(ast.NodeTransformer): def __init__(self, loopvar, param_dict, retval_name): self.loopvar = loopvar self.param_dict = param_dict self.retval_name = retval_name def visit_SymbolRef(self, node): if node.name in self.param_dict: return C.ArrayRef(node, C.SymbolRef(self.loopvar)) return node def visit_Return(self, node): node.value = self.visit(node.value) return C.Assign(C.ArrayRef(C.SymbolRef(self.retval_name), C.SymbolRef(self.loopvar)), node.value) hwacha_configure_block = """ size_t vector_length; __asm__ volatile ( "vsetcfg 16, 1\\n" "vsetvl %0, %1\\n" : "=r"(vector_length) : "r"({SIZE}) ); """ bounds_check = """ if ({SIZE} == {loopvar}) continue; """ class ScalarFinder(ast.NodeVisitor): def __init__(self, scalars): self.scalars = scalars def visit_Constant(self, node): self.scalars.add(node.value) def get_scalars_in_body(node): scalars = set() visitor = ScalarFinder(scalars) for stmt in node.body: visitor.visit(stmt) return scalars number_dict = { "1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine", "0": "zero", ".": "dot" } def scalar_init(scalar): name = "".join(number_dict[digit] for digit in str(scalar)) return StringTemplate(""" union {{ float f; uint32_t i; }} {name}; {name}.f = {scalar}f; """.format(name=name, scalar=scalar)) obtained_vector_length = """ size_t obtained_vector_length; __asm__ volatile( "vsetvl %0, %1\\n" : "=r"(obtained_vector_length) : "r"({SIZE} - {loopvar}) ); assert(obtained_vector_length <= {SIZE}); """ class ArrayRefFinder(ast.NodeVisitor): def __init__(self, refs): self.refs = refs def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): self.refs.append(node) else: self.visit(node.left) self.visit(node.right) def get_array_references_in_body(node): refs = [] finder = ArrayRefFinder(refs) for stmt in node.body: finder.visit(stmt) return refs class HwachaASMTranslator(ast.NodeTransformer): def __init__(self, scalars, ref_register_map, body, type_map): self.scalars = scalars self.ref_register_map = ref_register_map self.body = body self.curr_register = -1 self.reg_map = {} self.type_map = type_map def get_next_register(self): self.curr_register += 1 return "vv{}".format(self.curr_register) def visit_SymbolRef(self, node): if node.name in self.reg_map: return self.reg_map[node.name] return node def visit_Cast(self, node): reg = self.get_next_register() value = self.visit(node.value) if isinstance(node.type, ct.c_float): self.body.append(" vfcvt.s.w {0}, {1}\\n".format(reg, value)) self.type_map[reg] = ct.c_float return reg else: raise NotImplementedError() def visit_Constant(self, node): self.type_map[node.value] = get_ctype(node.value) return self.scalars[node.value] def visit_FunctionCall(self, node): if node.func.name == 'max': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() print(node) print(arg1) if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmax.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg elif node.func.name == 'min': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmin.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg raise NotImplementedError() def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): reg = self.get_next_register() self.body.append(" vlwu {0}, {1}\\n".format( reg, self.ref_register_map[str(node)][1])) return reg if isinstance(node.op, C.Op.Assign): node.right = self.visit(node.right) if isinstance(node.left, C.SymbolRef): self.reg_map[node.left.name] = node.right return elif isinstance(node.left, C.BinaryOp) and \ isinstance(node.left.op, C.Op.ArrayRef): if self.type_map[node.left.left.name] != self.type_map[node.right]: reg = self.get_next_register() self.body.append(" vfcvt.w.s {0}, {1}\\n".format(reg, node.right)) self.body.append(" vsw {0}, {1}\\n".format(reg, self.ref_register_map[str(node.left)][1])) return node.left = self.visit(node.left) node.right = self.visit(node.right) reg = self.get_next_register() if isinstance(node.op, C.Op.Sub): self.body.append(" vsub {0}, {1}, {2}\\n".format( reg, node.left, node.right)) elif isinstance(node.op, C.Op.Div): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfdiv.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() elif isinstance(node.op, C.Op.Mul): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfmul.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() return reg def get_asm_body(node, scalars, refs, type_map): body = """ __asm__ volatile ( ".align 3\\n" "__hwacha_body:\\n" """ asm_body = [] translator = HwachaASMTranslator(scalars, refs, asm_body, type_map) for s in node.body: translator.visit(s) for s in asm_body: body += "\"" + s + "\"\n" body += "\" vstop\\n\"\n" body += " );" return StringTemplate(body) class
(ast.NodeTransformer): def __init__(self, type_map, defns): self.type_map = type_map self.defns = defns def visit_For(self, node): if node.pragma == "ivdep": block = [] loopvar = node.incr.arg size = node.test.right scalars = get_scalars_in_body(node) refs = get_array_references_in_body(node) ref_register_map = {} scalar_register_map = {} for index, ref in enumerate(refs): ref_register_map[str(ref)] = (ref, "va{}".format(index)) for index, scalar in enumerate(scalars): reg = "vs{}".format(index) scalar_register_map[scalar] = reg self.type_map[reg] = get_ctype(scalar) body = [] block.append(StringTemplate(hwacha_configure_block.format(SIZE=size))) node.incr = C.AddAssign(loopvar, C.SymbolRef("vector_length")) self.defns.append(get_asm_body(node, scalar_register_map, ref_register_map, self.type_map)) block.append(node) body.append(StringTemplate(bounds_check.format(SIZE=size, loopvar=loopvar))) for scalar in scalars: body.append(scalar_init(scalar)) body.append(StringTemplate(obtained_vector_length.format(SIZE=size, loopvar=loopvar))) block1 = "" block2 = "" index = 0 for _, info in ref_register_map.items(): ref, register = info block1 += "\t \"vmsa {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0} + {1}),\n".format( ref.left.name, ref.right.name) index += 1 for scalar, register in scalar_register_map.items(): block1 += "\t \"vmss {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0}.i),\n".format( "".join(number_dict[digit] for digit in str(scalar))) index += 1 block1 += "\"fence\\n\"\n" block1 += "\"vf 0(%{0})\\n\"\n".format(index) block2 += "\"r\" (&__hwacha_body)" body.append(StringTemplate( """ __asm__ volatile( {block1} : : {block2} : "memory" ); """.format(block1=block1, block2=block2))) node.body = body block.append( StringTemplate(""" __asm__ volatile( "fence\\n" ); """)) return block class HwachaTranslator(LazySpecializedFunction): def args_to_subconfig(self, args): return tuple(get_nd_pointer(arg) for arg in args) def transform(self, py_ast, program_cfg): arg_cfg, tune_cfg = program_cfg tree = PyBasicConversions().visit(py_ast) param_dict = {} tree.body[0].params.append(C.SymbolRef("retval", arg_cfg[0]())) # Annotate arguments for param, type in zip(tree.body[0].params, arg_cfg): param.type = type() param_dict[param.name] = type._dtype_ length = np.prod(arg_cfg[0]._shape_) transformer = MapTransformer("i", param_dict, "retval") body = list(map(transformer.visit, tree.body[0].defn)) tree.body[0].defn = [C.For( C.Assign(C.SymbolRef("i", ct.c_int()), C.Constant(0)), C.Lt(C.SymbolRef("i"), C.Constant(length)), C.PostInc(C.SymbolRef("i")), body=body, pragma="ivdep" )] tree = DeclarationFiller().visit(tree) defns = [] tree = HwachaVectorize(param_dict, defns).visit(tree) file_body = [ StringTemplate("#include <stdlib.h>"), StringTemplate("#include <stdint.h>"), StringTemplate("#include <assert.h>"), StringTemplate("extern \"C\" void __hwacha_body(void);"), ] file_body.extend(defns) file_body.append(tree) return [CFile("generated", file_body)] def finalize(self, transform_result, program_config): generated = transform_result[0] print(generated) proj = Project([generated]) entry_type = ct.CFUNCTYPE(None, *program_config[0]) return HwachaFN().finalize("apply", proj, entry_type) def hwacha_map(fn, *args): mapfn = HwachaTranslator.from_function(fn, "map") retval = np.empty_like(args[0]) args += (retval, ) mapfn(*args) return retval CALIBRATE_COLD = 0x7000 CALIBRATE_HOT = 0xA000 SIZE = (208 * 156) # Generate a dummy calibration table, just so there's something # to execute. cold = np.full(SIZE, CALIBRATE_COLD, np.int32) hot = np.full(SIZE, CALIBRATE_HOT, np.int32) # Generate a dummy input image, again just so there's something # to execute. raw = np.empty(SIZE, np.int32) for i in range(SIZE): scale = (CALIBRATE_HOT - CALIBRATE_COLD) percent = (i % 120) - 10 raw[i] = scale * (percent / 100.0) + CALIBRATE_COLD raw[i] = CALIBRATE_COLD + (i % (int)(scale - 2)) + 1 def gold(cold, hot, raw, flat): for i in range(208 * 156): _max = hot[i] _min = cold[i] offset = raw[i] - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) flat[i] = 255 * scaled def test_map(cold, hot, raw): _max = hot _min = cold offset = raw - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) return 255.0 * scaled flat_gold = np.empty_like(raw) gold(cold, hot, raw, flat_gold) flat_test = hwacha_map(test_map, cold, hot, raw) np.testing.assert_array_equal(flat_gold, flat_test)
HwachaVectorize
identifier_name
hwacha.py
import numpy as np import ctypes as ct import ast from ctree.jit import LazySpecializedFunction, ConcreteSpecializedFunction from ctree.transformations import PyBasicConversions from ctree.transforms.declaration_filler import DeclarationFiller from ctree.c.nodes import CFile import ctree.c.nodes as C from ctree.nodes import Project from ctree.types import get_ctype from ctree.templates.nodes import StringTemplate def get_nd_pointer(arg): return np.ctypeslib.ndpointer(arg.dtype, arg.ndim, arg.shape) class HwachaFN(ConcreteSpecializedFunction): def finalize(self, entry_point_name, project_node, entry_typesig): self._c_function = self._compile(entry_point_name, project_node, entry_typesig) return self def __call__(self, *args): return self._c_function(*args) class MapTransformer(ast.NodeTransformer): def __init__(self, loopvar, param_dict, retval_name): self.loopvar = loopvar self.param_dict = param_dict self.retval_name = retval_name def visit_SymbolRef(self, node): if node.name in self.param_dict: return C.ArrayRef(node, C.SymbolRef(self.loopvar)) return node def visit_Return(self, node): node.value = self.visit(node.value) return C.Assign(C.ArrayRef(C.SymbolRef(self.retval_name), C.SymbolRef(self.loopvar)), node.value) hwacha_configure_block = """ size_t vector_length; __asm__ volatile ( "vsetcfg 16, 1\\n" "vsetvl %0, %1\\n" : "=r"(vector_length) : "r"({SIZE}) ); """ bounds_check = """ if ({SIZE} == {loopvar}) continue;
class ScalarFinder(ast.NodeVisitor): def __init__(self, scalars): self.scalars = scalars def visit_Constant(self, node): self.scalars.add(node.value) def get_scalars_in_body(node): scalars = set() visitor = ScalarFinder(scalars) for stmt in node.body: visitor.visit(stmt) return scalars number_dict = { "1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine", "0": "zero", ".": "dot" } def scalar_init(scalar): name = "".join(number_dict[digit] for digit in str(scalar)) return StringTemplate(""" union {{ float f; uint32_t i; }} {name}; {name}.f = {scalar}f; """.format(name=name, scalar=scalar)) obtained_vector_length = """ size_t obtained_vector_length; __asm__ volatile( "vsetvl %0, %1\\n" : "=r"(obtained_vector_length) : "r"({SIZE} - {loopvar}) ); assert(obtained_vector_length <= {SIZE}); """ class ArrayRefFinder(ast.NodeVisitor): def __init__(self, refs): self.refs = refs def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): self.refs.append(node) else: self.visit(node.left) self.visit(node.right) def get_array_references_in_body(node): refs = [] finder = ArrayRefFinder(refs) for stmt in node.body: finder.visit(stmt) return refs class HwachaASMTranslator(ast.NodeTransformer): def __init__(self, scalars, ref_register_map, body, type_map): self.scalars = scalars self.ref_register_map = ref_register_map self.body = body self.curr_register = -1 self.reg_map = {} self.type_map = type_map def get_next_register(self): self.curr_register += 1 return "vv{}".format(self.curr_register) def visit_SymbolRef(self, node): if node.name in self.reg_map: return self.reg_map[node.name] return node def visit_Cast(self, node): reg = self.get_next_register() value = self.visit(node.value) if isinstance(node.type, ct.c_float): self.body.append(" vfcvt.s.w {0}, {1}\\n".format(reg, value)) self.type_map[reg] = ct.c_float return reg else: raise NotImplementedError() def visit_Constant(self, node): self.type_map[node.value] = get_ctype(node.value) return self.scalars[node.value] def visit_FunctionCall(self, node): if node.func.name == 'max': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() print(node) print(arg1) if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmax.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg elif node.func.name == 'min': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmin.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg raise NotImplementedError() def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): reg = self.get_next_register() self.body.append(" vlwu {0}, {1}\\n".format( reg, self.ref_register_map[str(node)][1])) return reg if isinstance(node.op, C.Op.Assign): node.right = self.visit(node.right) if isinstance(node.left, C.SymbolRef): self.reg_map[node.left.name] = node.right return elif isinstance(node.left, C.BinaryOp) and \ isinstance(node.left.op, C.Op.ArrayRef): if self.type_map[node.left.left.name] != self.type_map[node.right]: reg = self.get_next_register() self.body.append(" vfcvt.w.s {0}, {1}\\n".format(reg, node.right)) self.body.append(" vsw {0}, {1}\\n".format(reg, self.ref_register_map[str(node.left)][1])) return node.left = self.visit(node.left) node.right = self.visit(node.right) reg = self.get_next_register() if isinstance(node.op, C.Op.Sub): self.body.append(" vsub {0}, {1}, {2}\\n".format( reg, node.left, node.right)) elif isinstance(node.op, C.Op.Div): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfdiv.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() elif isinstance(node.op, C.Op.Mul): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfmul.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() return reg def get_asm_body(node, scalars, refs, type_map): body = """ __asm__ volatile ( ".align 3\\n" "__hwacha_body:\\n" """ asm_body = [] translator = HwachaASMTranslator(scalars, refs, asm_body, type_map) for s in node.body: translator.visit(s) for s in asm_body: body += "\"" + s + "\"\n" body += "\" vstop\\n\"\n" body += " );" return StringTemplate(body) class HwachaVectorize(ast.NodeTransformer): def __init__(self, type_map, defns): self.type_map = type_map self.defns = defns def visit_For(self, node): if node.pragma == "ivdep": block = [] loopvar = node.incr.arg size = node.test.right scalars = get_scalars_in_body(node) refs = get_array_references_in_body(node) ref_register_map = {} scalar_register_map = {} for index, ref in enumerate(refs): ref_register_map[str(ref)] = (ref, "va{}".format(index)) for index, scalar in enumerate(scalars): reg = "vs{}".format(index) scalar_register_map[scalar] = reg self.type_map[reg] = get_ctype(scalar) body = [] block.append(StringTemplate(hwacha_configure_block.format(SIZE=size))) node.incr = C.AddAssign(loopvar, C.SymbolRef("vector_length")) self.defns.append(get_asm_body(node, scalar_register_map, ref_register_map, self.type_map)) block.append(node) body.append(StringTemplate(bounds_check.format(SIZE=size, loopvar=loopvar))) for scalar in scalars: body.append(scalar_init(scalar)) body.append(StringTemplate(obtained_vector_length.format(SIZE=size, loopvar=loopvar))) block1 = "" block2 = "" index = 0 for _, info in ref_register_map.items(): ref, register = info block1 += "\t \"vmsa {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0} + {1}),\n".format( ref.left.name, ref.right.name) index += 1 for scalar, register in scalar_register_map.items(): block1 += "\t \"vmss {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0}.i),\n".format( "".join(number_dict[digit] for digit in str(scalar))) index += 1 block1 += "\"fence\\n\"\n" block1 += "\"vf 0(%{0})\\n\"\n".format(index) block2 += "\"r\" (&__hwacha_body)" body.append(StringTemplate( """ __asm__ volatile( {block1} : : {block2} : "memory" ); """.format(block1=block1, block2=block2))) node.body = body block.append( StringTemplate(""" __asm__ volatile( "fence\\n" ); """)) return block class HwachaTranslator(LazySpecializedFunction): def args_to_subconfig(self, args): return tuple(get_nd_pointer(arg) for arg in args) def transform(self, py_ast, program_cfg): arg_cfg, tune_cfg = program_cfg tree = PyBasicConversions().visit(py_ast) param_dict = {} tree.body[0].params.append(C.SymbolRef("retval", arg_cfg[0]())) # Annotate arguments for param, type in zip(tree.body[0].params, arg_cfg): param.type = type() param_dict[param.name] = type._dtype_ length = np.prod(arg_cfg[0]._shape_) transformer = MapTransformer("i", param_dict, "retval") body = list(map(transformer.visit, tree.body[0].defn)) tree.body[0].defn = [C.For( C.Assign(C.SymbolRef("i", ct.c_int()), C.Constant(0)), C.Lt(C.SymbolRef("i"), C.Constant(length)), C.PostInc(C.SymbolRef("i")), body=body, pragma="ivdep" )] tree = DeclarationFiller().visit(tree) defns = [] tree = HwachaVectorize(param_dict, defns).visit(tree) file_body = [ StringTemplate("#include <stdlib.h>"), StringTemplate("#include <stdint.h>"), StringTemplate("#include <assert.h>"), StringTemplate("extern \"C\" void __hwacha_body(void);"), ] file_body.extend(defns) file_body.append(tree) return [CFile("generated", file_body)] def finalize(self, transform_result, program_config): generated = transform_result[0] print(generated) proj = Project([generated]) entry_type = ct.CFUNCTYPE(None, *program_config[0]) return HwachaFN().finalize("apply", proj, entry_type) def hwacha_map(fn, *args): mapfn = HwachaTranslator.from_function(fn, "map") retval = np.empty_like(args[0]) args += (retval, ) mapfn(*args) return retval CALIBRATE_COLD = 0x7000 CALIBRATE_HOT = 0xA000 SIZE = (208 * 156) # Generate a dummy calibration table, just so there's something # to execute. cold = np.full(SIZE, CALIBRATE_COLD, np.int32) hot = np.full(SIZE, CALIBRATE_HOT, np.int32) # Generate a dummy input image, again just so there's something # to execute. raw = np.empty(SIZE, np.int32) for i in range(SIZE): scale = (CALIBRATE_HOT - CALIBRATE_COLD) percent = (i % 120) - 10 raw[i] = scale * (percent / 100.0) + CALIBRATE_COLD raw[i] = CALIBRATE_COLD + (i % (int)(scale - 2)) + 1 def gold(cold, hot, raw, flat): for i in range(208 * 156): _max = hot[i] _min = cold[i] offset = raw[i] - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) flat[i] = 255 * scaled def test_map(cold, hot, raw): _max = hot _min = cold offset = raw - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) return 255.0 * scaled flat_gold = np.empty_like(raw) gold(cold, hot, raw, flat_gold) flat_test = hwacha_map(test_map, cold, hot, raw) np.testing.assert_array_equal(flat_gold, flat_test)
"""
random_line_split
hwacha.py
import numpy as np import ctypes as ct import ast from ctree.jit import LazySpecializedFunction, ConcreteSpecializedFunction from ctree.transformations import PyBasicConversions from ctree.transforms.declaration_filler import DeclarationFiller from ctree.c.nodes import CFile import ctree.c.nodes as C from ctree.nodes import Project from ctree.types import get_ctype from ctree.templates.nodes import StringTemplate def get_nd_pointer(arg): return np.ctypeslib.ndpointer(arg.dtype, arg.ndim, arg.shape) class HwachaFN(ConcreteSpecializedFunction): def finalize(self, entry_point_name, project_node, entry_typesig): self._c_function = self._compile(entry_point_name, project_node, entry_typesig) return self def __call__(self, *args): return self._c_function(*args) class MapTransformer(ast.NodeTransformer): def __init__(self, loopvar, param_dict, retval_name): self.loopvar = loopvar self.param_dict = param_dict self.retval_name = retval_name def visit_SymbolRef(self, node): if node.name in self.param_dict: return C.ArrayRef(node, C.SymbolRef(self.loopvar)) return node def visit_Return(self, node): node.value = self.visit(node.value) return C.Assign(C.ArrayRef(C.SymbolRef(self.retval_name), C.SymbolRef(self.loopvar)), node.value) hwacha_configure_block = """ size_t vector_length; __asm__ volatile ( "vsetcfg 16, 1\\n" "vsetvl %0, %1\\n" : "=r"(vector_length) : "r"({SIZE}) ); """ bounds_check = """ if ({SIZE} == {loopvar}) continue; """ class ScalarFinder(ast.NodeVisitor): def __init__(self, scalars): self.scalars = scalars def visit_Constant(self, node): self.scalars.add(node.value) def get_scalars_in_body(node): scalars = set() visitor = ScalarFinder(scalars) for stmt in node.body: visitor.visit(stmt) return scalars number_dict = { "1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine", "0": "zero", ".": "dot" } def scalar_init(scalar): name = "".join(number_dict[digit] for digit in str(scalar)) return StringTemplate(""" union {{ float f; uint32_t i; }} {name}; {name}.f = {scalar}f; """.format(name=name, scalar=scalar)) obtained_vector_length = """ size_t obtained_vector_length; __asm__ volatile( "vsetvl %0, %1\\n" : "=r"(obtained_vector_length) : "r"({SIZE} - {loopvar}) ); assert(obtained_vector_length <= {SIZE}); """ class ArrayRefFinder(ast.NodeVisitor): def __init__(self, refs): self.refs = refs def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): self.refs.append(node) else: self.visit(node.left) self.visit(node.right) def get_array_references_in_body(node): refs = [] finder = ArrayRefFinder(refs) for stmt in node.body: finder.visit(stmt) return refs class HwachaASMTranslator(ast.NodeTransformer): def __init__(self, scalars, ref_register_map, body, type_map): self.scalars = scalars self.ref_register_map = ref_register_map self.body = body self.curr_register = -1 self.reg_map = {} self.type_map = type_map def get_next_register(self): self.curr_register += 1 return "vv{}".format(self.curr_register) def visit_SymbolRef(self, node): if node.name in self.reg_map: return self.reg_map[node.name] return node def visit_Cast(self, node): reg = self.get_next_register() value = self.visit(node.value) if isinstance(node.type, ct.c_float): self.body.append(" vfcvt.s.w {0}, {1}\\n".format(reg, value)) self.type_map[reg] = ct.c_float return reg else: raise NotImplementedError() def visit_Constant(self, node): self.type_map[node.value] = get_ctype(node.value) return self.scalars[node.value] def visit_FunctionCall(self, node): if node.func.name == 'max': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() print(node) print(arg1) if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmax.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg elif node.func.name == 'min': arg1 = self.visit(node.args[0]) arg2 = self.visit(node.args[1]) reg = self.get_next_register() if self.type_map[arg1] == ct.c_float or \ self.type_map[arg2] == ct.c_float: self.body.append(" vfmin.s {0}, {1}, {2}\\n".format( reg, arg1, arg2 )) self.type_map[reg] = ct.c_float return reg raise NotImplementedError() def visit_BinaryOp(self, node): if isinstance(node.op, C.Op.ArrayRef): reg = self.get_next_register() self.body.append(" vlwu {0}, {1}\\n".format( reg, self.ref_register_map[str(node)][1])) return reg if isinstance(node.op, C.Op.Assign): node.right = self.visit(node.right) if isinstance(node.left, C.SymbolRef): self.reg_map[node.left.name] = node.right return elif isinstance(node.left, C.BinaryOp) and \ isinstance(node.left.op, C.Op.ArrayRef): if self.type_map[node.left.left.name] != self.type_map[node.right]: reg = self.get_next_register() self.body.append(" vfcvt.w.s {0}, {1}\\n".format(reg, node.right)) self.body.append(" vsw {0}, {1}\\n".format(reg, self.ref_register_map[str(node.left)][1])) return node.left = self.visit(node.left) node.right = self.visit(node.right) reg = self.get_next_register() if isinstance(node.op, C.Op.Sub): self.body.append(" vsub {0}, {1}, {2}\\n".format( reg, node.left, node.right)) elif isinstance(node.op, C.Op.Div): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfdiv.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() elif isinstance(node.op, C.Op.Mul): if self.type_map[node.left] == ct.c_float or \ self.type_map[node.right] == ct.c_float: self.body.append(" vfmul.s {0}, {1}, {2}\\n".format( reg, node.left, node.right)) self.type_map[reg] = ct.c_float else: raise NotImplementedError() return reg def get_asm_body(node, scalars, refs, type_map): body = """ __asm__ volatile ( ".align 3\\n" "__hwacha_body:\\n" """ asm_body = [] translator = HwachaASMTranslator(scalars, refs, asm_body, type_map) for s in node.body: translator.visit(s) for s in asm_body: body += "\"" + s + "\"\n" body += "\" vstop\\n\"\n" body += " );" return StringTemplate(body) class HwachaVectorize(ast.NodeTransformer): def __init__(self, type_map, defns): self.type_map = type_map self.defns = defns def visit_For(self, node): if node.pragma == "ivdep": block = [] loopvar = node.incr.arg size = node.test.right scalars = get_scalars_in_body(node) refs = get_array_references_in_body(node) ref_register_map = {} scalar_register_map = {} for index, ref in enumerate(refs):
for index, scalar in enumerate(scalars): reg = "vs{}".format(index) scalar_register_map[scalar] = reg self.type_map[reg] = get_ctype(scalar) body = [] block.append(StringTemplate(hwacha_configure_block.format(SIZE=size))) node.incr = C.AddAssign(loopvar, C.SymbolRef("vector_length")) self.defns.append(get_asm_body(node, scalar_register_map, ref_register_map, self.type_map)) block.append(node) body.append(StringTemplate(bounds_check.format(SIZE=size, loopvar=loopvar))) for scalar in scalars: body.append(scalar_init(scalar)) body.append(StringTemplate(obtained_vector_length.format(SIZE=size, loopvar=loopvar))) block1 = "" block2 = "" index = 0 for _, info in ref_register_map.items(): ref, register = info block1 += "\t \"vmsa {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0} + {1}),\n".format( ref.left.name, ref.right.name) index += 1 for scalar, register in scalar_register_map.items(): block1 += "\t \"vmss {0}, %{1}\\n\"\n".format(register, index) block2 += "\"r\"({0}.i),\n".format( "".join(number_dict[digit] for digit in str(scalar))) index += 1 block1 += "\"fence\\n\"\n" block1 += "\"vf 0(%{0})\\n\"\n".format(index) block2 += "\"r\" (&__hwacha_body)" body.append(StringTemplate( """ __asm__ volatile( {block1} : : {block2} : "memory" ); """.format(block1=block1, block2=block2))) node.body = body block.append( StringTemplate(""" __asm__ volatile( "fence\\n" ); """)) return block class HwachaTranslator(LazySpecializedFunction): def args_to_subconfig(self, args): return tuple(get_nd_pointer(arg) for arg in args) def transform(self, py_ast, program_cfg): arg_cfg, tune_cfg = program_cfg tree = PyBasicConversions().visit(py_ast) param_dict = {} tree.body[0].params.append(C.SymbolRef("retval", arg_cfg[0]())) # Annotate arguments for param, type in zip(tree.body[0].params, arg_cfg): param.type = type() param_dict[param.name] = type._dtype_ length = np.prod(arg_cfg[0]._shape_) transformer = MapTransformer("i", param_dict, "retval") body = list(map(transformer.visit, tree.body[0].defn)) tree.body[0].defn = [C.For( C.Assign(C.SymbolRef("i", ct.c_int()), C.Constant(0)), C.Lt(C.SymbolRef("i"), C.Constant(length)), C.PostInc(C.SymbolRef("i")), body=body, pragma="ivdep" )] tree = DeclarationFiller().visit(tree) defns = [] tree = HwachaVectorize(param_dict, defns).visit(tree) file_body = [ StringTemplate("#include <stdlib.h>"), StringTemplate("#include <stdint.h>"), StringTemplate("#include <assert.h>"), StringTemplate("extern \"C\" void __hwacha_body(void);"), ] file_body.extend(defns) file_body.append(tree) return [CFile("generated", file_body)] def finalize(self, transform_result, program_config): generated = transform_result[0] print(generated) proj = Project([generated]) entry_type = ct.CFUNCTYPE(None, *program_config[0]) return HwachaFN().finalize("apply", proj, entry_type) def hwacha_map(fn, *args): mapfn = HwachaTranslator.from_function(fn, "map") retval = np.empty_like(args[0]) args += (retval, ) mapfn(*args) return retval CALIBRATE_COLD = 0x7000 CALIBRATE_HOT = 0xA000 SIZE = (208 * 156) # Generate a dummy calibration table, just so there's something # to execute. cold = np.full(SIZE, CALIBRATE_COLD, np.int32) hot = np.full(SIZE, CALIBRATE_HOT, np.int32) # Generate a dummy input image, again just so there's something # to execute. raw = np.empty(SIZE, np.int32) for i in range(SIZE): scale = (CALIBRATE_HOT - CALIBRATE_COLD) percent = (i % 120) - 10 raw[i] = scale * (percent / 100.0) + CALIBRATE_COLD raw[i] = CALIBRATE_COLD + (i % (int)(scale - 2)) + 1 def gold(cold, hot, raw, flat): for i in range(208 * 156): _max = hot[i] _min = cold[i] offset = raw[i] - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) flat[i] = 255 * scaled def test_map(cold, hot, raw): _max = hot _min = cold offset = raw - _min scale = _max - _min foffset = float(offset) fscale = float(scale) scaled = foffset / fscale scaled = min(1.0, scaled) scaled = max(0.0, scaled) return 255.0 * scaled flat_gold = np.empty_like(raw) gold(cold, hot, raw, flat_gold) flat_test = hwacha_map(test_map, cold, hot, raw) np.testing.assert_array_equal(flat_gold, flat_test)
ref_register_map[str(ref)] = (ref, "va{}".format(index))
conditional_block
lib.rs
//! `hull` is Theseus's shell for basic interactive systems operations. //! //! Just as the hull is the outermost layer or "shell" of a boat or ship, //! this crate `hull` is the shell of the "Ship of Theseus" (this OS). //! //! Functionally, this is similar to bash, zsh, fish, etc. //! //! This shell will eventually supercede the shell located at //! `applications/shell`. //! //! Terminology used in this file using `sleep 1 | sleep 2 & sleep 3` as an //! example: //! - A line is an entire line of user input i.e. `sleep 1 | sleep 2 & sleep 3`. //! - A task is a subset of a line used to spawn an individual task i.e. `sleep //! 1`, `sleep 2`, and `sleep 3`. //! - A job is a list of piped tasks i.e. `sleep 1 | sleep 2`, and `sleep 3`. //! - A command is the first word in a task i.e. `sleep`. //! - The arguments are any subsequent words in a task i.e. `1`, `2`, and `3`. #![cfg_attr(not(test), no_std)] #![feature(extend_one, let_chains)] extern crate alloc; mod builtin; mod error; mod job; mod parse; mod wrapper; use crate::{ job::{JobPart, State}, parse::{ParsedJob, ParsedLine, ParsedTask}, }; use alloc::{borrow::ToOwned, format, string::String, sync::Arc, vec::Vec}; use app_io::{println, IoStreams}; use core::fmt::Write; use hashbrown::HashMap; use job::Job; use log::{error, warn}; use noline::{builder::EditorBuilder, sync::embedded::IO as Io}; use path::Path; use stdio::Stdio; use sync_block::Mutex; use task::{ExitValue, KillReason}; use tty::{Event, LineDiscipline}; pub use crate::error::{Error, Result}; pub fn main(_: Vec<String>) -> isize { let mut shell = Shell { discipline: app_io::line_discipline().expect("no line discipline"), jobs: Arc::new(Mutex::new(HashMap::new())), stop_order: Vec::new(), history: Vec::new(), }; let result = shell.run(); shell.set_app_discipline(); if let Err(e) = result { println!("{e:?}"); -1 } else { 0 } } pub struct Shell { discipline: Arc<LineDiscipline>, // TODO: Could use a vec-based data structure like Vec<Option<JoinableTaskRef> // Adding a job would iterate over the vec trying to find a None and if it can't, push to the // end. Removing a job would replace the job with None. jobs: Arc<Mutex<HashMap<usize, Job>>>, stop_order: Vec<usize>, history: Vec<String>, } impl Shell { /// Configures the line discipline for use by the shell. fn set_shell_discipline(&self) { self.discipline.set_raw(); } /// Configures the line discipline for use by applications. fn set_app_discipline(&self) -> AppDisciplineGuard { self.discipline.set_sane(); AppDisciplineGuard { discipline: self.discipline.clone(), } } fn run(&mut self) -> Result<()> { self.set_shell_discipline(); let wrapper = wrapper::Wrapper { stdin: app_io::stdin().expect("no stdin"), stdout: app_io::stdout().expect("no stdout"), }; let mut io = Io::new(wrapper); let mut editor = EditorBuilder::new_unbounded() .with_unbounded_history() .build_sync(&mut io) .expect("couldn't instantiate line editor"); loop { editor.dedup_history(); if let Ok(line) = editor.readline("> ", &mut io) { match self.execute_line(line) { Ok(()) => {} Err(Error::ExitRequested) => return Ok(()), Err(e) => return Err(e), }; } else { write!(io, "failed to read line").expect("failed to write output"); } } } fn execute_line(&mut self, line: &str) -> Result<()> { let parsed_line = ParsedLine::from(line); if parsed_line.is_empty() { return Ok(()); } // TODO: Use line editor history. self.history.push(line.to_owned()); for (job_str, job) in parsed_line.background { if let Err(error) = self.execute_cmd(job, job_str, false) { error.print()?; } } if let Some((job_str, job)) = parsed_line.foreground { let app_discipline_guard = self.set_app_discipline(); match self.execute_cmd(job, job_str, true) { Ok(Some(foreground_id)) => { if let Err(error) = self.wait_on_job(foreground_id) { error.print()?; } } Ok(None) => {} Err(error) => error.print()?, } drop(app_discipline_guard); } Ok(()) } /// Executes a command. fn execute_cmd( &mut self, parsed_job: ParsedJob, job_str: &str, current: bool, ) -> Result<Option<usize>> { let shell_streams = app_io::streams().unwrap(); let stderr = shell_streams.stderr; let mut previous_output = shell_streams.stdin; let mut iter = parsed_job.into_iter().peekable(); let mut task = iter.next(); let mut jobs = self.jobs.lock(); let mut job_id = 1; let mut temp_job = Job { string: job_str.to_owned(), parts: Vec::new(), current, }; loop { match jobs.try_insert(job_id, temp_job) { Ok(_) => break, Err(e) => { temp_job = e.value; } } job_id += 1; } drop(jobs); while let Some(ParsedTask { command, args }) = task { if iter.peek().is_none() { if let Some(result) = self.execute_builtin(command, &args) { self.jobs.lock().remove(&job_id); return result.map(|_| None); } else { let streams = IoStreams { // TODO: Technically clone not needed. stdin: previous_output.clone(), stdout: shell_streams.stdout.clone(), stderr: stderr.clone(), discipline: shell_streams.discipline, }; let part = self.resolve_external(command, args, streams, job_id)?; self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part); return Ok(Some(job_id)); } } // TODO: Piped builtin commands. let pipe = Stdio::new(); let streams = IoStreams { stdin: previous_output.clone(), stdout: Arc::new(pipe.get_writer()), stderr: stderr.clone(), discipline: None, }; let part = self.resolve_external(command, args, streams, job_id)?; self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part); previous_output = Arc::new(pipe.get_reader()); task = iter.next(); } unreachable!("called execute_cmd with empty command"); } fn wait_on_job(&mut self, num: usize) -> Result<()> { let jobs = self.jobs.lock(); let Some(job) = jobs.get(&num) else { return Ok(()) }; if !job.current { warn!("asked to wait on non-current job"); return Ok(()); } drop(jobs); self.discipline.clear_events(); let event_receiver = self.discipline.event_receiver(); loop { // TODO: Use async futures::select! loop? if let Ok(event) = event_receiver.try_receive() { match event { Event::CtrlC => { if let Some(mut job) = self.jobs.lock().remove(&num) { job.kill()?; } else { error!("tried to kill a job that doesn't exist"); } return Err(Error::Command(130)); } Event::CtrlD => error!("received ctrl+d event"), Event::CtrlZ => error!("received ctrl+z event"), } } else { let mut jobs = self.jobs.lock(); if let Some(job) = jobs.get_mut(&num) && let Some(exit_value) = job.exit_value() { jobs.remove(&num); return match exit_value { 0 => Ok(()), _ => Err(Error::Command(exit_value)), }; } } scheduler::schedule(); } } fn execute_builtin(&mut self, cmd: &str, args: &[&str]) -> Option<Result<()>> { Some(match cmd { "" => Ok(()), "alias" => self.alias(args), "bg" => self.bg(args), "cd" => self.cd(args), "exec" => self.exec(args), "exit" => self.exit(args), "export" => self.export(args), "fc" => self.fc(args), "fg" => self.fg(args), "getopts" => self.getopts(args), "hash" => self.hash(args), "history" => { self.history(args); Ok(()) } "jobs" => self.jobs(args), "set" => self.set(args), "unalias" => self.unalias(args), "unset" => self.unset(args), "wait" => self.wait(args), _ => return None, }) } fn resolve_external( &self, cmd: &str, args: Vec<&str>, streams: IoStreams, job_id: usize, ) -> Result<JobPart>
} struct AppDisciplineGuard { discipline: Arc<LineDiscipline>, } impl Drop for AppDisciplineGuard { fn drop(&mut self) { self.discipline.set_raw(); } } #[cfg(test)] mod tests { use super::*; use alloc::vec; #[test] fn test_split_pipes() { assert_eq!( split_pipes("a b c |d e f|g | h | i j"), vec![ ("a", vec!["b", "c"]), ("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![]), ("i", vec!["j"]) ] ); } #[test] fn test_parse_line() { assert_eq!( parse_line("a b|c &d e f|g | h & i j | k"), ParsedLine { background: vec![ vec![("a", vec!["b"]), ("c", vec![])], vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])], ], foreground: Some(vec![("i", vec!["j"]), ("k", vec![])]), } ); assert_eq!( parse_line("a b|c &d e f|g | h & i j | k& "), ParsedLine { background: vec![ vec![("a", vec!["b"]), ("c", vec![])], vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])], vec![("i", vec!["j"]), ("k", vec![])] ], foreground: None, } ); } }
{ let namespace_dir = task::get_my_current_task() .map(|t| t.get_namespace().dir().clone()) .expect("couldn't get namespace dir"); let crate_name = format!("{cmd}-"); let mut matching_files = namespace_dir .get_files_starting_with(&crate_name) .into_iter(); let app_path = match matching_files.next() { Some(f) => Path::new(f.lock().get_absolute_path()), None => return Err(Error::CommandNotFound(cmd.to_owned())), }; if matching_files.next().is_some() { println!("multiple matching files found, running: {app_path}"); } let task = spawn::new_application_task_builder(app_path, None) .map_err(Error::SpawnFailed)? .argument(args.into_iter().map(ToOwned::to_owned).collect::<Vec<_>>()) .block() .spawn() .unwrap(); let task_ref = task.clone(); let id = task.id; // TODO: Double arc :( app_io::insert_child_streams(id, streams); task.unblock().map_err(Error::UnblockFailed)?; // Spawn watchdog task. spawn::new_task_builder( move |_| { let task_ref = task.clone(); let exit_value = match task.join().unwrap() { ExitValue::Completed(status) => { match status.downcast_ref::<isize>() { Some(num) => *num, // FIXME: Document/decide on a number for when app doesn't // return isize. None => 210, } } ExitValue::Killed(reason) => match reason { // FIXME: Document/decide on a number. This is used by bash. KillReason::Requested => 130, KillReason::Panic(_) => 1, KillReason::Exception(num) => num.into(), }, }; let mut jobs = self.jobs.lock(); if let Some(mut job) = jobs.remove(&job_id) { for part in job.parts.iter_mut() { if part.task == task_ref { part.state = State::Done(exit_value); break; } } if job.current { jobs.insert(job_id, job); } } }, (), ) .spawn() .map_err(Error::SpawnFailed)?; Ok(JobPart { state: State::Running, task: task_ref, }) }
identifier_body
lib.rs
//! `hull` is Theseus's shell for basic interactive systems operations. //! //! Just as the hull is the outermost layer or "shell" of a boat or ship, //! this crate `hull` is the shell of the "Ship of Theseus" (this OS). //! //! Functionally, this is similar to bash, zsh, fish, etc. //! //! This shell will eventually supercede the shell located at //! `applications/shell`. //! //! Terminology used in this file using `sleep 1 | sleep 2 & sleep 3` as an //! example: //! - A line is an entire line of user input i.e. `sleep 1 | sleep 2 & sleep 3`. //! - A task is a subset of a line used to spawn an individual task i.e. `sleep //! 1`, `sleep 2`, and `sleep 3`. //! - A job is a list of piped tasks i.e. `sleep 1 | sleep 2`, and `sleep 3`. //! - A command is the first word in a task i.e. `sleep`. //! - The arguments are any subsequent words in a task i.e. `1`, `2`, and `3`. #![cfg_attr(not(test), no_std)] #![feature(extend_one, let_chains)] extern crate alloc; mod builtin; mod error; mod job; mod parse; mod wrapper; use crate::{ job::{JobPart, State}, parse::{ParsedJob, ParsedLine, ParsedTask}, }; use alloc::{borrow::ToOwned, format, string::String, sync::Arc, vec::Vec}; use app_io::{println, IoStreams}; use core::fmt::Write; use hashbrown::HashMap; use job::Job; use log::{error, warn}; use noline::{builder::EditorBuilder, sync::embedded::IO as Io}; use path::Path; use stdio::Stdio; use sync_block::Mutex; use task::{ExitValue, KillReason}; use tty::{Event, LineDiscipline}; pub use crate::error::{Error, Result}; pub fn main(_: Vec<String>) -> isize { let mut shell = Shell { discipline: app_io::line_discipline().expect("no line discipline"), jobs: Arc::new(Mutex::new(HashMap::new())), stop_order: Vec::new(), history: Vec::new(), }; let result = shell.run(); shell.set_app_discipline(); if let Err(e) = result { println!("{e:?}"); -1 } else { 0 } } pub struct Shell { discipline: Arc<LineDiscipline>, // TODO: Could use a vec-based data structure like Vec<Option<JoinableTaskRef> // Adding a job would iterate over the vec trying to find a None and if it can't, push to the // end. Removing a job would replace the job with None. jobs: Arc<Mutex<HashMap<usize, Job>>>, stop_order: Vec<usize>, history: Vec<String>, } impl Shell { /// Configures the line discipline for use by the shell. fn set_shell_discipline(&self) { self.discipline.set_raw(); } /// Configures the line discipline for use by applications. fn set_app_discipline(&self) -> AppDisciplineGuard { self.discipline.set_sane(); AppDisciplineGuard { discipline: self.discipline.clone(), } } fn run(&mut self) -> Result<()> { self.set_shell_discipline(); let wrapper = wrapper::Wrapper { stdin: app_io::stdin().expect("no stdin"), stdout: app_io::stdout().expect("no stdout"), }; let mut io = Io::new(wrapper); let mut editor = EditorBuilder::new_unbounded() .with_unbounded_history() .build_sync(&mut io) .expect("couldn't instantiate line editor"); loop { editor.dedup_history(); if let Ok(line) = editor.readline("> ", &mut io) { match self.execute_line(line) { Ok(()) => {} Err(Error::ExitRequested) => return Ok(()), Err(e) => return Err(e), }; } else { write!(io, "failed to read line").expect("failed to write output"); } } } fn execute_line(&mut self, line: &str) -> Result<()> { let parsed_line = ParsedLine::from(line); if parsed_line.is_empty() { return Ok(()); } // TODO: Use line editor history. self.history.push(line.to_owned()); for (job_str, job) in parsed_line.background { if let Err(error) = self.execute_cmd(job, job_str, false) { error.print()?; } } if let Some((job_str, job)) = parsed_line.foreground { let app_discipline_guard = self.set_app_discipline(); match self.execute_cmd(job, job_str, true) { Ok(Some(foreground_id)) => { if let Err(error) = self.wait_on_job(foreground_id) { error.print()?; } } Ok(None) => {} Err(error) => error.print()?, } drop(app_discipline_guard); } Ok(()) } /// Executes a command. fn execute_cmd( &mut self, parsed_job: ParsedJob, job_str: &str, current: bool, ) -> Result<Option<usize>> { let shell_streams = app_io::streams().unwrap(); let stderr = shell_streams.stderr; let mut previous_output = shell_streams.stdin; let mut iter = parsed_job.into_iter().peekable(); let mut task = iter.next(); let mut jobs = self.jobs.lock(); let mut job_id = 1; let mut temp_job = Job { string: job_str.to_owned(), parts: Vec::new(), current, }; loop { match jobs.try_insert(job_id, temp_job) { Ok(_) => break, Err(e) => { temp_job = e.value; } } job_id += 1;
while let Some(ParsedTask { command, args }) = task { if iter.peek().is_none() { if let Some(result) = self.execute_builtin(command, &args) { self.jobs.lock().remove(&job_id); return result.map(|_| None); } else { let streams = IoStreams { // TODO: Technically clone not needed. stdin: previous_output.clone(), stdout: shell_streams.stdout.clone(), stderr: stderr.clone(), discipline: shell_streams.discipline, }; let part = self.resolve_external(command, args, streams, job_id)?; self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part); return Ok(Some(job_id)); } } // TODO: Piped builtin commands. let pipe = Stdio::new(); let streams = IoStreams { stdin: previous_output.clone(), stdout: Arc::new(pipe.get_writer()), stderr: stderr.clone(), discipline: None, }; let part = self.resolve_external(command, args, streams, job_id)?; self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part); previous_output = Arc::new(pipe.get_reader()); task = iter.next(); } unreachable!("called execute_cmd with empty command"); } fn wait_on_job(&mut self, num: usize) -> Result<()> { let jobs = self.jobs.lock(); let Some(job) = jobs.get(&num) else { return Ok(()) }; if !job.current { warn!("asked to wait on non-current job"); return Ok(()); } drop(jobs); self.discipline.clear_events(); let event_receiver = self.discipline.event_receiver(); loop { // TODO: Use async futures::select! loop? if let Ok(event) = event_receiver.try_receive() { match event { Event::CtrlC => { if let Some(mut job) = self.jobs.lock().remove(&num) { job.kill()?; } else { error!("tried to kill a job that doesn't exist"); } return Err(Error::Command(130)); } Event::CtrlD => error!("received ctrl+d event"), Event::CtrlZ => error!("received ctrl+z event"), } } else { let mut jobs = self.jobs.lock(); if let Some(job) = jobs.get_mut(&num) && let Some(exit_value) = job.exit_value() { jobs.remove(&num); return match exit_value { 0 => Ok(()), _ => Err(Error::Command(exit_value)), }; } } scheduler::schedule(); } } fn execute_builtin(&mut self, cmd: &str, args: &[&str]) -> Option<Result<()>> { Some(match cmd { "" => Ok(()), "alias" => self.alias(args), "bg" => self.bg(args), "cd" => self.cd(args), "exec" => self.exec(args), "exit" => self.exit(args), "export" => self.export(args), "fc" => self.fc(args), "fg" => self.fg(args), "getopts" => self.getopts(args), "hash" => self.hash(args), "history" => { self.history(args); Ok(()) } "jobs" => self.jobs(args), "set" => self.set(args), "unalias" => self.unalias(args), "unset" => self.unset(args), "wait" => self.wait(args), _ => return None, }) } fn resolve_external( &self, cmd: &str, args: Vec<&str>, streams: IoStreams, job_id: usize, ) -> Result<JobPart> { let namespace_dir = task::get_my_current_task() .map(|t| t.get_namespace().dir().clone()) .expect("couldn't get namespace dir"); let crate_name = format!("{cmd}-"); let mut matching_files = namespace_dir .get_files_starting_with(&crate_name) .into_iter(); let app_path = match matching_files.next() { Some(f) => Path::new(f.lock().get_absolute_path()), None => return Err(Error::CommandNotFound(cmd.to_owned())), }; if matching_files.next().is_some() { println!("multiple matching files found, running: {app_path}"); } let task = spawn::new_application_task_builder(app_path, None) .map_err(Error::SpawnFailed)? .argument(args.into_iter().map(ToOwned::to_owned).collect::<Vec<_>>()) .block() .spawn() .unwrap(); let task_ref = task.clone(); let id = task.id; // TODO: Double arc :( app_io::insert_child_streams(id, streams); task.unblock().map_err(Error::UnblockFailed)?; // Spawn watchdog task. spawn::new_task_builder( move |_| { let task_ref = task.clone(); let exit_value = match task.join().unwrap() { ExitValue::Completed(status) => { match status.downcast_ref::<isize>() { Some(num) => *num, // FIXME: Document/decide on a number for when app doesn't // return isize. None => 210, } } ExitValue::Killed(reason) => match reason { // FIXME: Document/decide on a number. This is used by bash. KillReason::Requested => 130, KillReason::Panic(_) => 1, KillReason::Exception(num) => num.into(), }, }; let mut jobs = self.jobs.lock(); if let Some(mut job) = jobs.remove(&job_id) { for part in job.parts.iter_mut() { if part.task == task_ref { part.state = State::Done(exit_value); break; } } if job.current { jobs.insert(job_id, job); } } }, (), ) .spawn() .map_err(Error::SpawnFailed)?; Ok(JobPart { state: State::Running, task: task_ref, }) } } struct AppDisciplineGuard { discipline: Arc<LineDiscipline>, } impl Drop for AppDisciplineGuard { fn drop(&mut self) { self.discipline.set_raw(); } } #[cfg(test)] mod tests { use super::*; use alloc::vec; #[test] fn test_split_pipes() { assert_eq!( split_pipes("a b c |d e f|g | h | i j"), vec![ ("a", vec!["b", "c"]), ("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![]), ("i", vec!["j"]) ] ); } #[test] fn test_parse_line() { assert_eq!( parse_line("a b|c &d e f|g | h & i j | k"), ParsedLine { background: vec![ vec![("a", vec!["b"]), ("c", vec![])], vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])], ], foreground: Some(vec![("i", vec!["j"]), ("k", vec![])]), } ); assert_eq!( parse_line("a b|c &d e f|g | h & i j | k& "), ParsedLine { background: vec![ vec![("a", vec!["b"]), ("c", vec![])], vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])], vec![("i", vec!["j"]), ("k", vec![])] ], foreground: None, } ); } }
} drop(jobs);
random_line_split
lib.rs
//! `hull` is Theseus's shell for basic interactive systems operations. //! //! Just as the hull is the outermost layer or "shell" of a boat or ship, //! this crate `hull` is the shell of the "Ship of Theseus" (this OS). //! //! Functionally, this is similar to bash, zsh, fish, etc. //! //! This shell will eventually supercede the shell located at //! `applications/shell`. //! //! Terminology used in this file using `sleep 1 | sleep 2 & sleep 3` as an //! example: //! - A line is an entire line of user input i.e. `sleep 1 | sleep 2 & sleep 3`. //! - A task is a subset of a line used to spawn an individual task i.e. `sleep //! 1`, `sleep 2`, and `sleep 3`. //! - A job is a list of piped tasks i.e. `sleep 1 | sleep 2`, and `sleep 3`. //! - A command is the first word in a task i.e. `sleep`. //! - The arguments are any subsequent words in a task i.e. `1`, `2`, and `3`. #![cfg_attr(not(test), no_std)] #![feature(extend_one, let_chains)] extern crate alloc; mod builtin; mod error; mod job; mod parse; mod wrapper; use crate::{ job::{JobPart, State}, parse::{ParsedJob, ParsedLine, ParsedTask}, }; use alloc::{borrow::ToOwned, format, string::String, sync::Arc, vec::Vec}; use app_io::{println, IoStreams}; use core::fmt::Write; use hashbrown::HashMap; use job::Job; use log::{error, warn}; use noline::{builder::EditorBuilder, sync::embedded::IO as Io}; use path::Path; use stdio::Stdio; use sync_block::Mutex; use task::{ExitValue, KillReason}; use tty::{Event, LineDiscipline}; pub use crate::error::{Error, Result}; pub fn main(_: Vec<String>) -> isize { let mut shell = Shell { discipline: app_io::line_discipline().expect("no line discipline"), jobs: Arc::new(Mutex::new(HashMap::new())), stop_order: Vec::new(), history: Vec::new(), }; let result = shell.run(); shell.set_app_discipline(); if let Err(e) = result { println!("{e:?}"); -1 } else { 0 } } pub struct Shell { discipline: Arc<LineDiscipline>, // TODO: Could use a vec-based data structure like Vec<Option<JoinableTaskRef> // Adding a job would iterate over the vec trying to find a None and if it can't, push to the // end. Removing a job would replace the job with None. jobs: Arc<Mutex<HashMap<usize, Job>>>, stop_order: Vec<usize>, history: Vec<String>, } impl Shell { /// Configures the line discipline for use by the shell. fn set_shell_discipline(&self) { self.discipline.set_raw(); } /// Configures the line discipline for use by applications. fn set_app_discipline(&self) -> AppDisciplineGuard { self.discipline.set_sane(); AppDisciplineGuard { discipline: self.discipline.clone(), } } fn run(&mut self) -> Result<()> { self.set_shell_discipline(); let wrapper = wrapper::Wrapper { stdin: app_io::stdin().expect("no stdin"), stdout: app_io::stdout().expect("no stdout"), }; let mut io = Io::new(wrapper); let mut editor = EditorBuilder::new_unbounded() .with_unbounded_history() .build_sync(&mut io) .expect("couldn't instantiate line editor"); loop { editor.dedup_history(); if let Ok(line) = editor.readline("> ", &mut io) { match self.execute_line(line) { Ok(()) => {} Err(Error::ExitRequested) => return Ok(()), Err(e) => return Err(e), }; } else { write!(io, "failed to read line").expect("failed to write output"); } } } fn
(&mut self, line: &str) -> Result<()> { let parsed_line = ParsedLine::from(line); if parsed_line.is_empty() { return Ok(()); } // TODO: Use line editor history. self.history.push(line.to_owned()); for (job_str, job) in parsed_line.background { if let Err(error) = self.execute_cmd(job, job_str, false) { error.print()?; } } if let Some((job_str, job)) = parsed_line.foreground { let app_discipline_guard = self.set_app_discipline(); match self.execute_cmd(job, job_str, true) { Ok(Some(foreground_id)) => { if let Err(error) = self.wait_on_job(foreground_id) { error.print()?; } } Ok(None) => {} Err(error) => error.print()?, } drop(app_discipline_guard); } Ok(()) } /// Executes a command. fn execute_cmd( &mut self, parsed_job: ParsedJob, job_str: &str, current: bool, ) -> Result<Option<usize>> { let shell_streams = app_io::streams().unwrap(); let stderr = shell_streams.stderr; let mut previous_output = shell_streams.stdin; let mut iter = parsed_job.into_iter().peekable(); let mut task = iter.next(); let mut jobs = self.jobs.lock(); let mut job_id = 1; let mut temp_job = Job { string: job_str.to_owned(), parts: Vec::new(), current, }; loop { match jobs.try_insert(job_id, temp_job) { Ok(_) => break, Err(e) => { temp_job = e.value; } } job_id += 1; } drop(jobs); while let Some(ParsedTask { command, args }) = task { if iter.peek().is_none() { if let Some(result) = self.execute_builtin(command, &args) { self.jobs.lock().remove(&job_id); return result.map(|_| None); } else { let streams = IoStreams { // TODO: Technically clone not needed. stdin: previous_output.clone(), stdout: shell_streams.stdout.clone(), stderr: stderr.clone(), discipline: shell_streams.discipline, }; let part = self.resolve_external(command, args, streams, job_id)?; self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part); return Ok(Some(job_id)); } } // TODO: Piped builtin commands. let pipe = Stdio::new(); let streams = IoStreams { stdin: previous_output.clone(), stdout: Arc::new(pipe.get_writer()), stderr: stderr.clone(), discipline: None, }; let part = self.resolve_external(command, args, streams, job_id)?; self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part); previous_output = Arc::new(pipe.get_reader()); task = iter.next(); } unreachable!("called execute_cmd with empty command"); } fn wait_on_job(&mut self, num: usize) -> Result<()> { let jobs = self.jobs.lock(); let Some(job) = jobs.get(&num) else { return Ok(()) }; if !job.current { warn!("asked to wait on non-current job"); return Ok(()); } drop(jobs); self.discipline.clear_events(); let event_receiver = self.discipline.event_receiver(); loop { // TODO: Use async futures::select! loop? if let Ok(event) = event_receiver.try_receive() { match event { Event::CtrlC => { if let Some(mut job) = self.jobs.lock().remove(&num) { job.kill()?; } else { error!("tried to kill a job that doesn't exist"); } return Err(Error::Command(130)); } Event::CtrlD => error!("received ctrl+d event"), Event::CtrlZ => error!("received ctrl+z event"), } } else { let mut jobs = self.jobs.lock(); if let Some(job) = jobs.get_mut(&num) && let Some(exit_value) = job.exit_value() { jobs.remove(&num); return match exit_value { 0 => Ok(()), _ => Err(Error::Command(exit_value)), }; } } scheduler::schedule(); } } fn execute_builtin(&mut self, cmd: &str, args: &[&str]) -> Option<Result<()>> { Some(match cmd { "" => Ok(()), "alias" => self.alias(args), "bg" => self.bg(args), "cd" => self.cd(args), "exec" => self.exec(args), "exit" => self.exit(args), "export" => self.export(args), "fc" => self.fc(args), "fg" => self.fg(args), "getopts" => self.getopts(args), "hash" => self.hash(args), "history" => { self.history(args); Ok(()) } "jobs" => self.jobs(args), "set" => self.set(args), "unalias" => self.unalias(args), "unset" => self.unset(args), "wait" => self.wait(args), _ => return None, }) } fn resolve_external( &self, cmd: &str, args: Vec<&str>, streams: IoStreams, job_id: usize, ) -> Result<JobPart> { let namespace_dir = task::get_my_current_task() .map(|t| t.get_namespace().dir().clone()) .expect("couldn't get namespace dir"); let crate_name = format!("{cmd}-"); let mut matching_files = namespace_dir .get_files_starting_with(&crate_name) .into_iter(); let app_path = match matching_files.next() { Some(f) => Path::new(f.lock().get_absolute_path()), None => return Err(Error::CommandNotFound(cmd.to_owned())), }; if matching_files.next().is_some() { println!("multiple matching files found, running: {app_path}"); } let task = spawn::new_application_task_builder(app_path, None) .map_err(Error::SpawnFailed)? .argument(args.into_iter().map(ToOwned::to_owned).collect::<Vec<_>>()) .block() .spawn() .unwrap(); let task_ref = task.clone(); let id = task.id; // TODO: Double arc :( app_io::insert_child_streams(id, streams); task.unblock().map_err(Error::UnblockFailed)?; // Spawn watchdog task. spawn::new_task_builder( move |_| { let task_ref = task.clone(); let exit_value = match task.join().unwrap() { ExitValue::Completed(status) => { match status.downcast_ref::<isize>() { Some(num) => *num, // FIXME: Document/decide on a number for when app doesn't // return isize. None => 210, } } ExitValue::Killed(reason) => match reason { // FIXME: Document/decide on a number. This is used by bash. KillReason::Requested => 130, KillReason::Panic(_) => 1, KillReason::Exception(num) => num.into(), }, }; let mut jobs = self.jobs.lock(); if let Some(mut job) = jobs.remove(&job_id) { for part in job.parts.iter_mut() { if part.task == task_ref { part.state = State::Done(exit_value); break; } } if job.current { jobs.insert(job_id, job); } } }, (), ) .spawn() .map_err(Error::SpawnFailed)?; Ok(JobPart { state: State::Running, task: task_ref, }) } } struct AppDisciplineGuard { discipline: Arc<LineDiscipline>, } impl Drop for AppDisciplineGuard { fn drop(&mut self) { self.discipline.set_raw(); } } #[cfg(test)] mod tests { use super::*; use alloc::vec; #[test] fn test_split_pipes() { assert_eq!( split_pipes("a b c |d e f|g | h | i j"), vec![ ("a", vec!["b", "c"]), ("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![]), ("i", vec!["j"]) ] ); } #[test] fn test_parse_line() { assert_eq!( parse_line("a b|c &d e f|g | h & i j | k"), ParsedLine { background: vec![ vec![("a", vec!["b"]), ("c", vec![])], vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])], ], foreground: Some(vec![("i", vec!["j"]), ("k", vec![])]), } ); assert_eq!( parse_line("a b|c &d e f|g | h & i j | k& "), ParsedLine { background: vec![ vec![("a", vec!["b"]), ("c", vec![])], vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])], vec![("i", vec!["j"]), ("k", vec![])] ], foreground: None, } ); } }
execute_line
identifier_name
leaflet.js
import * as d3 from 'd3'; var L = require('leaflet'); import * as topojson from 'topojson'; /** * Leaflet Chart * Creates a map using Leaflet.js */ export default function (config, helper) { var Leaflet = Object.create(helper); Leaflet.init = function (config) { const vm = this; vm._config = config ? config : {}; vm._scales = {}; vm._axes = {}; vm._data = []; vm._scales.color = d3 .scaleQuantize() .range(['#fee5d9', '#fcae91', '#fb6a4a', '#de2d26', '#a50f15']); }; Leaflet.id = function (col) { var vm = this; vm._config.id = col; return vm; }; Leaflet.fill = function (col) { var vm = this; vm._config.fill = col; return vm; }; Leaflet.opacity = function (value) { var vm = this; vm._config.opacity = value; return vm; }; Leaflet.colors = function (colors) { var vm = this; vm._config.colors = colors; if (colors && Array.isArray(colors)) { vm._scales.color.range(colors); } else if (typeof colors === 'function') { vm._scales.color = colors; } return vm; }; Leaflet.colorLegend = function (legendTitle) { var vm = this; vm._config.legendTitle = legendTitle; return vm; }; // ------------------------------- // Triggered by chart.js; Leaflet.data = function (data) { var vm = this; vm._topojson = data[1] ? data[1] : false; //Topojson data = data[0]; //User data if (vm._config.data.filter) { data = data.filter(vm._config.data.filter); } vm._data = data; //vm._quantiles = vm._setQuantile(data); vm._minMax = d3.extent(data, function (d) { return +d[vm._config.fill]; }); vm._scales.color.domain(vm._minMax); var objects = vm._config.map.topojson.objects; vm._nodes = []; if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; vm._topojson.objects[obj].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } } else if (objects) { vm._topojson.objects[objects].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } // vm._config.map.min = vm._minMax[0]; // vm._config.map.max = vm._minMax[1]; return vm; }; Leaflet.scales = function () { var vm = this; return vm; }; Leaflet.drawColorLegend = function () { var vm = this; var range = vm._scales.color.range().length; var step = (vm._minMax[1] - vm._minMax[0]) / (range - 1); var domain = vm._config.colors; var quantilePosition = d3 .scaleBand() .rangeRound([vm._config.size.height * 0.8, 0]) .domain(domain); //Add gradient legend //defaults to right position var legend = d3 .select('#' + vm._config.bindTo) .append('svg') .attr('width', 120) .attr('height', vm._config.size.height) .style('z-index', 401) .style('position', 'absolute') .style('top', '4px') .style('right', '2px') .append('g') .attr('class', 'legend quantized') .attr('transform', 'translate(50,25)'); // legend background legend .append('rect') .attr('x', -50) .attr('y', -35) .attr('width', 100) .attr('height', vm._config.size.height - 10) .attr('rx', 10) .attr('ry', 10) .attr('class', 'legend-background') .attr('fill', 'rgba(255,255,255,0.6)'); // legend title legend .append('text') .attr('x', 0) .attr('y', -12) .attr('class', 'legend-title') .attr('text-anchor', 'middle') .text(vm._config.legendTitle); var quantiles = legend .selectAll('.quantile') .data(vm._config.colors) .enter() .append('g') .attr('class', 'quantile') .attr('transform', function (d) { return 'translate(-20, ' + quantilePosition(d) + ')'; }); // Rect quantiles .append('rect') .attr('x', -10) .attr('y', 0) .attr('width', 18) .attr('height', quantilePosition.bandwidth()) .attr('fill', function (d) { return d; }); //top text is the max value quantiles .append('text') .attr('x', 17) .attr('y', 5) .attr('class', 'top-label') .attr('text-anchor', 'left') .text(function (d) { let max = vm._scales.color.invertExtent(d)[1]; if (vm._config.legendTitle === 'Porcentaje' && max > 100) { max = 100; } if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(max); } else { return vm.utils.format()(max); } }); //bottom text is the min value quantiles .append('text') .attr('x', 17) .attr('y', vm._config.size.height / 5 - 11) .attr('class', 'bottom-label') .attr('text-anchor', 'left') .text(function (d, i) { if (i === 0) { let min = vm._scales.color.invertExtent(d)[0]; if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(min); } else { return vm.utils.format()(min); } } else { return ''; } }); }; Leaflet.draw = function () { var vm = this; var urlTopojson = vm._config.map.topojson.url; var objects = vm._config.map.topojson.objects; //'states' var tran = vm._config.map.topojson.translate; //var tran = [2580, 700]; var scale = vm._config.map.topojson.scale; //1300 var parser = vm._config.map.topojson.parser; var id = vm._config.map.topojson.id; L.TopoJSON = L.GeoJSON.extend({ addData: function (jsonData) { var geojson, key; if (jsonData.type === 'Topology') { if (objects) { if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; geojson = topojson.feature(jsonData, jsonData.objects[obj]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { geojson = topojson.feature(jsonData, jsonData.objects[objects]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { for (key in jsonData.objects) { geojson = topojson.feature(jsonData, jsonData.objects[key]); L.GeoJSON.prototype.addData.call(this, geojson); } } } else { L.GeoJSON.prototype.addData.call(this, jsonData); } }, }); var LatLng = { lat: 25.5629994, lon: -100.6405644, }; if ( vm._config.map.topojson.center && vm._config.map.topojson.center.length === 2 ) { LatLng.lat = vm._config.map.topojson.center[0]; LatLng.lon = vm._config.map.topojson.center[1]; } var bounds = new L.LatLngBounds( new L.LatLng(LatLng.lat + 5, LatLng.lon - 5), new L.LatLng(LatLng.lat - 5, LatLng.lon + 5) ); vm._map = new L.Map(vm._config.bindTo, { center: bounds.getCenter(), zoom: vm._config.map.topojson.zoom || 7, maxZoom: vm._config.map.topojson.maxZoom || 10, minZoom: vm._config.map.topojson.minZoom || 3, maxBounds: bounds, maxBoundsViscosity: 1.0, }); var mapTiles = L.tileLayer( 'http://b.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png', { attribution: '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>', } ); var topoLayer = new L.TopoJSON(); mapTiles.addTo(vm._map); addTopoData(vm._topojson); // vm._map.on('zoomend', function() { // d3.selectAll('.dbox-label').remove(); // Object.values(vm._map._layers) // .filter(obj => obj.feature) // .forEach(function(layer) { // vm.drawLabel(layer); // }); // }); function addTopoData(topoData) { topoLayer.addData(topoData); topoLayer.addTo(vm._map); topoLayer.eachLayer(handleLayer); } var tip = vm.utils.d3.tip().html( vm._config.tip ? vm._config.tip.bind(this) : function (d) { let html = '<div class="d3-tip" style="z-index: 99999;"><span>' + (d.feature.properties.NOM_ENT || d.feature.properties.NOM_MUN) + '</span><br/><span>' + vm.utils.format()(d.feature.properties[vm._config.fill]) + '</span></div>'; return html; } ); d3.select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated') .call(tip); /** * Set each layer * @param {obj} layer */ function handleLayer(layer)
function enterLayer(layer) { tip.show(layer, d3.select(layer._path).node()); } function leaveLayer(layer) { tip.hide(layer); } /** * Draw Legend */ if (typeof vm._config.legend === 'function') { vm._config.legend.call(this, vm._nodes); } Leaflet.drawColorLegend(); return vm; }; /** * Add labels for each path (layer) to display value */ Leaflet.drawLabel = function (layer) { const vm = this; const props = layer.feature.properties; const path = d3.select(layer._path).node(); const bbox = path.getBBox(); var svg = d3 .select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated'); if (props[vm._config.fill] !== undefined) { svg .append('text') .attr('class', 'dbox-label') .attr('x', bbox.x + bbox.width / 2) .attr('y', bbox.y + d3.min([bbox.height / 2, 30])) .attr('text-anchor', 'middle') .text( `${ props[vm._config.poly_name] ? props[vm._config.poly_name] + ': ' : '' } ${vm.utils.format()(props[vm._config.fill])}` ); } return vm; }; Leaflet.init(config); return Leaflet; }
{ var value = layer.feature.properties[vm._config.fill]; if (!value) { // Remove polygons without data /** @todo validate what to do with NA's */ d3.select(layer._path).remove(); } else { var fillColor = vm._scales.color(value); layer.setStyle({ fillColor: fillColor, fillOpacity: vm._config.opacity || 0.7, color: '#555', weight: 1, opacity: 0.5, }); vm.drawLabel(layer); layer.on({ mouseover: function () { enterLayer(layer); }, mouseout: function () { leaveLayer(layer); }, }); } }
identifier_body
leaflet.js
import * as d3 from 'd3'; var L = require('leaflet'); import * as topojson from 'topojson'; /** * Leaflet Chart * Creates a map using Leaflet.js */ export default function (config, helper) { var Leaflet = Object.create(helper); Leaflet.init = function (config) { const vm = this; vm._config = config ? config : {}; vm._scales = {}; vm._axes = {}; vm._data = []; vm._scales.color = d3 .scaleQuantize() .range(['#fee5d9', '#fcae91', '#fb6a4a', '#de2d26', '#a50f15']); }; Leaflet.id = function (col) { var vm = this; vm._config.id = col; return vm; }; Leaflet.fill = function (col) { var vm = this; vm._config.fill = col; return vm; }; Leaflet.opacity = function (value) { var vm = this; vm._config.opacity = value; return vm; }; Leaflet.colors = function (colors) { var vm = this; vm._config.colors = colors; if (colors && Array.isArray(colors)) { vm._scales.color.range(colors); } else if (typeof colors === 'function') { vm._scales.color = colors; } return vm; }; Leaflet.colorLegend = function (legendTitle) { var vm = this; vm._config.legendTitle = legendTitle; return vm; }; // ------------------------------- // Triggered by chart.js; Leaflet.data = function (data) { var vm = this; vm._topojson = data[1] ? data[1] : false; //Topojson data = data[0]; //User data if (vm._config.data.filter) { data = data.filter(vm._config.data.filter); } vm._data = data; //vm._quantiles = vm._setQuantile(data); vm._minMax = d3.extent(data, function (d) { return +d[vm._config.fill]; }); vm._scales.color.domain(vm._minMax); var objects = vm._config.map.topojson.objects; vm._nodes = []; if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; vm._topojson.objects[obj].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } } else if (objects) { vm._topojson.objects[objects].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } // vm._config.map.min = vm._minMax[0]; // vm._config.map.max = vm._minMax[1]; return vm; }; Leaflet.scales = function () { var vm = this; return vm; }; Leaflet.drawColorLegend = function () { var vm = this; var range = vm._scales.color.range().length; var step = (vm._minMax[1] - vm._minMax[0]) / (range - 1); var domain = vm._config.colors; var quantilePosition = d3 .scaleBand() .rangeRound([vm._config.size.height * 0.8, 0]) .domain(domain); //Add gradient legend //defaults to right position var legend = d3 .select('#' + vm._config.bindTo) .append('svg') .attr('width', 120) .attr('height', vm._config.size.height) .style('z-index', 401) .style('position', 'absolute') .style('top', '4px') .style('right', '2px') .append('g') .attr('class', 'legend quantized') .attr('transform', 'translate(50,25)'); // legend background legend .append('rect') .attr('x', -50) .attr('y', -35) .attr('width', 100) .attr('height', vm._config.size.height - 10) .attr('rx', 10) .attr('ry', 10) .attr('class', 'legend-background') .attr('fill', 'rgba(255,255,255,0.6)'); // legend title legend .append('text') .attr('x', 0) .attr('y', -12) .attr('class', 'legend-title') .attr('text-anchor', 'middle') .text(vm._config.legendTitle); var quantiles = legend .selectAll('.quantile') .data(vm._config.colors) .enter() .append('g') .attr('class', 'quantile') .attr('transform', function (d) { return 'translate(-20, ' + quantilePosition(d) + ')'; }); // Rect quantiles .append('rect') .attr('x', -10) .attr('y', 0) .attr('width', 18) .attr('height', quantilePosition.bandwidth()) .attr('fill', function (d) { return d; }); //top text is the max value quantiles .append('text') .attr('x', 17) .attr('y', 5) .attr('class', 'top-label') .attr('text-anchor', 'left') .text(function (d) { let max = vm._scales.color.invertExtent(d)[1]; if (vm._config.legendTitle === 'Porcentaje' && max > 100) { max = 100; } if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(max); } else { return vm.utils.format()(max); } }); //bottom text is the min value quantiles .append('text') .attr('x', 17) .attr('y', vm._config.size.height / 5 - 11) .attr('class', 'bottom-label') .attr('text-anchor', 'left') .text(function (d, i) { if (i === 0) { let min = vm._scales.color.invertExtent(d)[0]; if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(min); } else { return vm.utils.format()(min); } } else { return ''; } }); }; Leaflet.draw = function () { var vm = this; var urlTopojson = vm._config.map.topojson.url; var objects = vm._config.map.topojson.objects; //'states' var tran = vm._config.map.topojson.translate; //var tran = [2580, 700]; var scale = vm._config.map.topojson.scale; //1300 var parser = vm._config.map.topojson.parser; var id = vm._config.map.topojson.id; L.TopoJSON = L.GeoJSON.extend({ addData: function (jsonData) { var geojson, key; if (jsonData.type === 'Topology') { if (objects) { if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; geojson = topojson.feature(jsonData, jsonData.objects[obj]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { geojson = topojson.feature(jsonData, jsonData.objects[objects]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { for (key in jsonData.objects) { geojson = topojson.feature(jsonData, jsonData.objects[key]); L.GeoJSON.prototype.addData.call(this, geojson); } } } else { L.GeoJSON.prototype.addData.call(this, jsonData); } }, }); var LatLng = { lat: 25.5629994, lon: -100.6405644, }; if ( vm._config.map.topojson.center && vm._config.map.topojson.center.length === 2 ) { LatLng.lat = vm._config.map.topojson.center[0]; LatLng.lon = vm._config.map.topojson.center[1]; } var bounds = new L.LatLngBounds( new L.LatLng(LatLng.lat + 5, LatLng.lon - 5), new L.LatLng(LatLng.lat - 5, LatLng.lon + 5) ); vm._map = new L.Map(vm._config.bindTo, { center: bounds.getCenter(), zoom: vm._config.map.topojson.zoom || 7, maxZoom: vm._config.map.topojson.maxZoom || 10, minZoom: vm._config.map.topojson.minZoom || 3, maxBounds: bounds, maxBoundsViscosity: 1.0, }); var mapTiles = L.tileLayer( 'http://b.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png', { attribution: '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>', } ); var topoLayer = new L.TopoJSON(); mapTiles.addTo(vm._map); addTopoData(vm._topojson); // vm._map.on('zoomend', function() { // d3.selectAll('.dbox-label').remove(); // Object.values(vm._map._layers) // .filter(obj => obj.feature) // .forEach(function(layer) { // vm.drawLabel(layer); // }); // }); function addTopoData(topoData) { topoLayer.addData(topoData); topoLayer.addTo(vm._map); topoLayer.eachLayer(handleLayer); } var tip = vm.utils.d3.tip().html( vm._config.tip ? vm._config.tip.bind(this) : function (d) { let html = '<div class="d3-tip" style="z-index: 99999;"><span>' + (d.feature.properties.NOM_ENT || d.feature.properties.NOM_MUN) + '</span><br/><span>' + vm.utils.format()(d.feature.properties[vm._config.fill]) + '</span></div>'; return html; } ); d3.select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated') .call(tip); /** * Set each layer * @param {obj} layer */ function
(layer) { var value = layer.feature.properties[vm._config.fill]; if (!value) { // Remove polygons without data /** @todo validate what to do with NA's */ d3.select(layer._path).remove(); } else { var fillColor = vm._scales.color(value); layer.setStyle({ fillColor: fillColor, fillOpacity: vm._config.opacity || 0.7, color: '#555', weight: 1, opacity: 0.5, }); vm.drawLabel(layer); layer.on({ mouseover: function () { enterLayer(layer); }, mouseout: function () { leaveLayer(layer); }, }); } } function enterLayer(layer) { tip.show(layer, d3.select(layer._path).node()); } function leaveLayer(layer) { tip.hide(layer); } /** * Draw Legend */ if (typeof vm._config.legend === 'function') { vm._config.legend.call(this, vm._nodes); } Leaflet.drawColorLegend(); return vm; }; /** * Add labels for each path (layer) to display value */ Leaflet.drawLabel = function (layer) { const vm = this; const props = layer.feature.properties; const path = d3.select(layer._path).node(); const bbox = path.getBBox(); var svg = d3 .select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated'); if (props[vm._config.fill] !== undefined) { svg .append('text') .attr('class', 'dbox-label') .attr('x', bbox.x + bbox.width / 2) .attr('y', bbox.y + d3.min([bbox.height / 2, 30])) .attr('text-anchor', 'middle') .text( `${ props[vm._config.poly_name] ? props[vm._config.poly_name] + ': ' : '' } ${vm.utils.format()(props[vm._config.fill])}` ); } return vm; }; Leaflet.init(config); return Leaflet; }
handleLayer
identifier_name
leaflet.js
import * as d3 from 'd3'; var L = require('leaflet'); import * as topojson from 'topojson'; /** * Leaflet Chart * Creates a map using Leaflet.js */ export default function (config, helper) { var Leaflet = Object.create(helper); Leaflet.init = function (config) { const vm = this; vm._config = config ? config : {}; vm._scales = {}; vm._axes = {}; vm._data = []; vm._scales.color = d3 .scaleQuantize() .range(['#fee5d9', '#fcae91', '#fb6a4a', '#de2d26', '#a50f15']); }; Leaflet.id = function (col) { var vm = this; vm._config.id = col; return vm; }; Leaflet.fill = function (col) { var vm = this; vm._config.fill = col; return vm; }; Leaflet.opacity = function (value) { var vm = this; vm._config.opacity = value; return vm; }; Leaflet.colors = function (colors) { var vm = this; vm._config.colors = colors; if (colors && Array.isArray(colors)) { vm._scales.color.range(colors); } else if (typeof colors === 'function') { vm._scales.color = colors; } return vm; };
return vm; }; // ------------------------------- // Triggered by chart.js; Leaflet.data = function (data) { var vm = this; vm._topojson = data[1] ? data[1] : false; //Topojson data = data[0]; //User data if (vm._config.data.filter) { data = data.filter(vm._config.data.filter); } vm._data = data; //vm._quantiles = vm._setQuantile(data); vm._minMax = d3.extent(data, function (d) { return +d[vm._config.fill]; }); vm._scales.color.domain(vm._minMax); var objects = vm._config.map.topojson.objects; vm._nodes = []; if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; vm._topojson.objects[obj].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } } else if (objects) { vm._topojson.objects[objects].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } // vm._config.map.min = vm._minMax[0]; // vm._config.map.max = vm._minMax[1]; return vm; }; Leaflet.scales = function () { var vm = this; return vm; }; Leaflet.drawColorLegend = function () { var vm = this; var range = vm._scales.color.range().length; var step = (vm._minMax[1] - vm._minMax[0]) / (range - 1); var domain = vm._config.colors; var quantilePosition = d3 .scaleBand() .rangeRound([vm._config.size.height * 0.8, 0]) .domain(domain); //Add gradient legend //defaults to right position var legend = d3 .select('#' + vm._config.bindTo) .append('svg') .attr('width', 120) .attr('height', vm._config.size.height) .style('z-index', 401) .style('position', 'absolute') .style('top', '4px') .style('right', '2px') .append('g') .attr('class', 'legend quantized') .attr('transform', 'translate(50,25)'); // legend background legend .append('rect') .attr('x', -50) .attr('y', -35) .attr('width', 100) .attr('height', vm._config.size.height - 10) .attr('rx', 10) .attr('ry', 10) .attr('class', 'legend-background') .attr('fill', 'rgba(255,255,255,0.6)'); // legend title legend .append('text') .attr('x', 0) .attr('y', -12) .attr('class', 'legend-title') .attr('text-anchor', 'middle') .text(vm._config.legendTitle); var quantiles = legend .selectAll('.quantile') .data(vm._config.colors) .enter() .append('g') .attr('class', 'quantile') .attr('transform', function (d) { return 'translate(-20, ' + quantilePosition(d) + ')'; }); // Rect quantiles .append('rect') .attr('x', -10) .attr('y', 0) .attr('width', 18) .attr('height', quantilePosition.bandwidth()) .attr('fill', function (d) { return d; }); //top text is the max value quantiles .append('text') .attr('x', 17) .attr('y', 5) .attr('class', 'top-label') .attr('text-anchor', 'left') .text(function (d) { let max = vm._scales.color.invertExtent(d)[1]; if (vm._config.legendTitle === 'Porcentaje' && max > 100) { max = 100; } if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(max); } else { return vm.utils.format()(max); } }); //bottom text is the min value quantiles .append('text') .attr('x', 17) .attr('y', vm._config.size.height / 5 - 11) .attr('class', 'bottom-label') .attr('text-anchor', 'left') .text(function (d, i) { if (i === 0) { let min = vm._scales.color.invertExtent(d)[0]; if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(min); } else { return vm.utils.format()(min); } } else { return ''; } }); }; Leaflet.draw = function () { var vm = this; var urlTopojson = vm._config.map.topojson.url; var objects = vm._config.map.topojson.objects; //'states' var tran = vm._config.map.topojson.translate; //var tran = [2580, 700]; var scale = vm._config.map.topojson.scale; //1300 var parser = vm._config.map.topojson.parser; var id = vm._config.map.topojson.id; L.TopoJSON = L.GeoJSON.extend({ addData: function (jsonData) { var geojson, key; if (jsonData.type === 'Topology') { if (objects) { if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; geojson = topojson.feature(jsonData, jsonData.objects[obj]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { geojson = topojson.feature(jsonData, jsonData.objects[objects]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { for (key in jsonData.objects) { geojson = topojson.feature(jsonData, jsonData.objects[key]); L.GeoJSON.prototype.addData.call(this, geojson); } } } else { L.GeoJSON.prototype.addData.call(this, jsonData); } }, }); var LatLng = { lat: 25.5629994, lon: -100.6405644, }; if ( vm._config.map.topojson.center && vm._config.map.topojson.center.length === 2 ) { LatLng.lat = vm._config.map.topojson.center[0]; LatLng.lon = vm._config.map.topojson.center[1]; } var bounds = new L.LatLngBounds( new L.LatLng(LatLng.lat + 5, LatLng.lon - 5), new L.LatLng(LatLng.lat - 5, LatLng.lon + 5) ); vm._map = new L.Map(vm._config.bindTo, { center: bounds.getCenter(), zoom: vm._config.map.topojson.zoom || 7, maxZoom: vm._config.map.topojson.maxZoom || 10, minZoom: vm._config.map.topojson.minZoom || 3, maxBounds: bounds, maxBoundsViscosity: 1.0, }); var mapTiles = L.tileLayer( 'http://b.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png', { attribution: '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>', } ); var topoLayer = new L.TopoJSON(); mapTiles.addTo(vm._map); addTopoData(vm._topojson); // vm._map.on('zoomend', function() { // d3.selectAll('.dbox-label').remove(); // Object.values(vm._map._layers) // .filter(obj => obj.feature) // .forEach(function(layer) { // vm.drawLabel(layer); // }); // }); function addTopoData(topoData) { topoLayer.addData(topoData); topoLayer.addTo(vm._map); topoLayer.eachLayer(handleLayer); } var tip = vm.utils.d3.tip().html( vm._config.tip ? vm._config.tip.bind(this) : function (d) { let html = '<div class="d3-tip" style="z-index: 99999;"><span>' + (d.feature.properties.NOM_ENT || d.feature.properties.NOM_MUN) + '</span><br/><span>' + vm.utils.format()(d.feature.properties[vm._config.fill]) + '</span></div>'; return html; } ); d3.select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated') .call(tip); /** * Set each layer * @param {obj} layer */ function handleLayer(layer) { var value = layer.feature.properties[vm._config.fill]; if (!value) { // Remove polygons without data /** @todo validate what to do with NA's */ d3.select(layer._path).remove(); } else { var fillColor = vm._scales.color(value); layer.setStyle({ fillColor: fillColor, fillOpacity: vm._config.opacity || 0.7, color: '#555', weight: 1, opacity: 0.5, }); vm.drawLabel(layer); layer.on({ mouseover: function () { enterLayer(layer); }, mouseout: function () { leaveLayer(layer); }, }); } } function enterLayer(layer) { tip.show(layer, d3.select(layer._path).node()); } function leaveLayer(layer) { tip.hide(layer); } /** * Draw Legend */ if (typeof vm._config.legend === 'function') { vm._config.legend.call(this, vm._nodes); } Leaflet.drawColorLegend(); return vm; }; /** * Add labels for each path (layer) to display value */ Leaflet.drawLabel = function (layer) { const vm = this; const props = layer.feature.properties; const path = d3.select(layer._path).node(); const bbox = path.getBBox(); var svg = d3 .select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated'); if (props[vm._config.fill] !== undefined) { svg .append('text') .attr('class', 'dbox-label') .attr('x', bbox.x + bbox.width / 2) .attr('y', bbox.y + d3.min([bbox.height / 2, 30])) .attr('text-anchor', 'middle') .text( `${ props[vm._config.poly_name] ? props[vm._config.poly_name] + ': ' : '' } ${vm.utils.format()(props[vm._config.fill])}` ); } return vm; }; Leaflet.init(config); return Leaflet; }
Leaflet.colorLegend = function (legendTitle) { var vm = this; vm._config.legendTitle = legendTitle;
random_line_split
leaflet.js
import * as d3 from 'd3'; var L = require('leaflet'); import * as topojson from 'topojson'; /** * Leaflet Chart * Creates a map using Leaflet.js */ export default function (config, helper) { var Leaflet = Object.create(helper); Leaflet.init = function (config) { const vm = this; vm._config = config ? config : {}; vm._scales = {}; vm._axes = {}; vm._data = []; vm._scales.color = d3 .scaleQuantize() .range(['#fee5d9', '#fcae91', '#fb6a4a', '#de2d26', '#a50f15']); }; Leaflet.id = function (col) { var vm = this; vm._config.id = col; return vm; }; Leaflet.fill = function (col) { var vm = this; vm._config.fill = col; return vm; }; Leaflet.opacity = function (value) { var vm = this; vm._config.opacity = value; return vm; }; Leaflet.colors = function (colors) { var vm = this; vm._config.colors = colors; if (colors && Array.isArray(colors)) { vm._scales.color.range(colors); } else if (typeof colors === 'function') { vm._scales.color = colors; } return vm; }; Leaflet.colorLegend = function (legendTitle) { var vm = this; vm._config.legendTitle = legendTitle; return vm; }; // ------------------------------- // Triggered by chart.js; Leaflet.data = function (data) { var vm = this; vm._topojson = data[1] ? data[1] : false; //Topojson data = data[0]; //User data if (vm._config.data.filter) { data = data.filter(vm._config.data.filter); } vm._data = data; //vm._quantiles = vm._setQuantile(data); vm._minMax = d3.extent(data, function (d) { return +d[vm._config.fill]; }); vm._scales.color.domain(vm._minMax); var objects = vm._config.map.topojson.objects; vm._nodes = []; if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; vm._topojson.objects[obj].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } } else if (objects) { vm._topojson.objects[objects].geometries.forEach(function (geom) { geom.id = vm._config.map.topojson.parser(geom); var found = vm._data.filter((o) => o[vm._config.id] == geom.id)[0]; if (found) { geom.properties[vm._config.fill] = found[vm._config.fill]; } vm._nodes.push(geom); }); } // vm._config.map.min = vm._minMax[0]; // vm._config.map.max = vm._minMax[1]; return vm; }; Leaflet.scales = function () { var vm = this; return vm; }; Leaflet.drawColorLegend = function () { var vm = this; var range = vm._scales.color.range().length; var step = (vm._minMax[1] - vm._minMax[0]) / (range - 1); var domain = vm._config.colors; var quantilePosition = d3 .scaleBand() .rangeRound([vm._config.size.height * 0.8, 0]) .domain(domain); //Add gradient legend //defaults to right position var legend = d3 .select('#' + vm._config.bindTo) .append('svg') .attr('width', 120) .attr('height', vm._config.size.height) .style('z-index', 401) .style('position', 'absolute') .style('top', '4px') .style('right', '2px') .append('g') .attr('class', 'legend quantized') .attr('transform', 'translate(50,25)'); // legend background legend .append('rect') .attr('x', -50) .attr('y', -35) .attr('width', 100) .attr('height', vm._config.size.height - 10) .attr('rx', 10) .attr('ry', 10) .attr('class', 'legend-background') .attr('fill', 'rgba(255,255,255,0.6)'); // legend title legend .append('text') .attr('x', 0) .attr('y', -12) .attr('class', 'legend-title') .attr('text-anchor', 'middle') .text(vm._config.legendTitle); var quantiles = legend .selectAll('.quantile') .data(vm._config.colors) .enter() .append('g') .attr('class', 'quantile') .attr('transform', function (d) { return 'translate(-20, ' + quantilePosition(d) + ')'; }); // Rect quantiles .append('rect') .attr('x', -10) .attr('y', 0) .attr('width', 18) .attr('height', quantilePosition.bandwidth()) .attr('fill', function (d) { return d; }); //top text is the max value quantiles .append('text') .attr('x', 17) .attr('y', 5) .attr('class', 'top-label') .attr('text-anchor', 'left') .text(function (d) { let max = vm._scales.color.invertExtent(d)[1]; if (vm._config.legendTitle === 'Porcentaje' && max > 100) { max = 100; } if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(max); } else { return vm.utils.format()(max); } }); //bottom text is the min value quantiles .append('text') .attr('x', 17) .attr('y', vm._config.size.height / 5 - 11) .attr('class', 'bottom-label') .attr('text-anchor', 'left') .text(function (d, i) { if (i === 0) { let min = vm._scales.color.invertExtent(d)[0]; if (vm._config.map.formatLegend) { return vm._config.map.formatLegend(min); } else { return vm.utils.format()(min); } } else { return ''; } }); }; Leaflet.draw = function () { var vm = this; var urlTopojson = vm._config.map.topojson.url; var objects = vm._config.map.topojson.objects; //'states' var tran = vm._config.map.topojson.translate; //var tran = [2580, 700]; var scale = vm._config.map.topojson.scale; //1300 var parser = vm._config.map.topojson.parser; var id = vm._config.map.topojson.id; L.TopoJSON = L.GeoJSON.extend({ addData: function (jsonData) { var geojson, key; if (jsonData.type === 'Topology') { if (objects) { if (Array.isArray(objects)) { for (let idx = 0; idx < objects.length; idx++) { const obj = objects[idx]; geojson = topojson.feature(jsonData, jsonData.objects[obj]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { geojson = topojson.feature(jsonData, jsonData.objects[objects]); L.GeoJSON.prototype.addData.call(this, geojson); } } else { for (key in jsonData.objects) { geojson = topojson.feature(jsonData, jsonData.objects[key]); L.GeoJSON.prototype.addData.call(this, geojson); } } } else
}, }); var LatLng = { lat: 25.5629994, lon: -100.6405644, }; if ( vm._config.map.topojson.center && vm._config.map.topojson.center.length === 2 ) { LatLng.lat = vm._config.map.topojson.center[0]; LatLng.lon = vm._config.map.topojson.center[1]; } var bounds = new L.LatLngBounds( new L.LatLng(LatLng.lat + 5, LatLng.lon - 5), new L.LatLng(LatLng.lat - 5, LatLng.lon + 5) ); vm._map = new L.Map(vm._config.bindTo, { center: bounds.getCenter(), zoom: vm._config.map.topojson.zoom || 7, maxZoom: vm._config.map.topojson.maxZoom || 10, minZoom: vm._config.map.topojson.minZoom || 3, maxBounds: bounds, maxBoundsViscosity: 1.0, }); var mapTiles = L.tileLayer( 'http://b.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png', { attribution: '&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>', } ); var topoLayer = new L.TopoJSON(); mapTiles.addTo(vm._map); addTopoData(vm._topojson); // vm._map.on('zoomend', function() { // d3.selectAll('.dbox-label').remove(); // Object.values(vm._map._layers) // .filter(obj => obj.feature) // .forEach(function(layer) { // vm.drawLabel(layer); // }); // }); function addTopoData(topoData) { topoLayer.addData(topoData); topoLayer.addTo(vm._map); topoLayer.eachLayer(handleLayer); } var tip = vm.utils.d3.tip().html( vm._config.tip ? vm._config.tip.bind(this) : function (d) { let html = '<div class="d3-tip" style="z-index: 99999;"><span>' + (d.feature.properties.NOM_ENT || d.feature.properties.NOM_MUN) + '</span><br/><span>' + vm.utils.format()(d.feature.properties[vm._config.fill]) + '</span></div>'; return html; } ); d3.select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated') .call(tip); /** * Set each layer * @param {obj} layer */ function handleLayer(layer) { var value = layer.feature.properties[vm._config.fill]; if (!value) { // Remove polygons without data /** @todo validate what to do with NA's */ d3.select(layer._path).remove(); } else { var fillColor = vm._scales.color(value); layer.setStyle({ fillColor: fillColor, fillOpacity: vm._config.opacity || 0.7, color: '#555', weight: 1, opacity: 0.5, }); vm.drawLabel(layer); layer.on({ mouseover: function () { enterLayer(layer); }, mouseout: function () { leaveLayer(layer); }, }); } } function enterLayer(layer) { tip.show(layer, d3.select(layer._path).node()); } function leaveLayer(layer) { tip.hide(layer); } /** * Draw Legend */ if (typeof vm._config.legend === 'function') { vm._config.legend.call(this, vm._nodes); } Leaflet.drawColorLegend(); return vm; }; /** * Add labels for each path (layer) to display value */ Leaflet.drawLabel = function (layer) { const vm = this; const props = layer.feature.properties; const path = d3.select(layer._path).node(); const bbox = path.getBBox(); var svg = d3 .select('#' + vm._config.bindTo) .select('svg.leaflet-zoom-animated'); if (props[vm._config.fill] !== undefined) { svg .append('text') .attr('class', 'dbox-label') .attr('x', bbox.x + bbox.width / 2) .attr('y', bbox.y + d3.min([bbox.height / 2, 30])) .attr('text-anchor', 'middle') .text( `${ props[vm._config.poly_name] ? props[vm._config.poly_name] + ': ' : '' } ${vm.utils.format()(props[vm._config.fill])}` ); } return vm; }; Leaflet.init(config); return Leaflet; }
{ L.GeoJSON.prototype.addData.call(this, jsonData); }
conditional_block
adv_YAN.py
import random import os import numpy as np import torch import torch.utils.data as data_utils import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from sklearn.svm import SVC from torch.nn import GRU, Embedding, Linear from util import Embedder from tools import balance from tqdm import tqdm from DANN import DANN import argparse from defense import initialize_defense import time parser = argparse.ArgumentParser(description='Medical Attack') parser.add_argument("-p", type=int, default= 5555, help = 'the comm port the client will use') parser.add_argument("-c", action='store_true', help = 'whether to use cached model') parser.add_argument("-t", action='store_true', help = "to switch between training or testing") # parser.add_argument("--save_p", type=str, default="default", help = 'the place to store the model') parser.add_argument("-a", type=str, default='bert', help = 'targeted architecture') parser.add_argument("-d", type=str, default='none', help = 'the type of defense to do') parser.add_argument("--clf", type = str, default='SVM', help = 'the type of attack model to use') parser.add_argument("-v", action='store_true', help = 'whether to be wordy') parser.add_argument("-f", type=str, default='atk', help = 'to specify the functional') parser.add_argument("--ext_size", type=int, default=2000, help = 'the size of the ext corpus') parser.add_argument("--tgt_size", type=int, default=2000, help = 'the size of the target set which is visible to the adversary') parser.add_argument("--dataset", type = str, default = "medical", help = 'dataset') ARGS = parser.parse_args() ARCH = ARGS.a CLS = ARGS.clf CLS_NUM = 10 VERBOSE = ARGS.v #SVM parameter SVM_KERNEL = 'linear' # DANN parameter MAXITER = 1000 BATCH_SIZE = 128 LAMDA = 1.0 FUNCTION = ARGS.f # DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' # DANN_CACHED = False # MLP parameter EPOCH = 1000 HIDDEN_DIM = 80 BATCH_SIZE = 128 LEARNING_RATE = 0.001 PRINT_FREQ = 100 K = 5 DATASET = ARGS.dataset NO_BALANCE = False # CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' # CPT_PATH = 'data/part_fake_5/MLP_CPT/' if(not ARGS.t): DANN_CPT_PATH = 'data/part_fake_5/DANN_CPT/' DANN_CACHED = False CPT_PATH = 'data/part/MLP_CPT/' CACHED = False else: # toggle it to use Yan's pretrained model DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' DANN_CACHED = True CACHED = True DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } DEVICE = torch.device('cuda:0') # os.environ["CUDA_VISIBLE_DEVICES"] = "1" if(DATASET == 'medical'): # LOCAL = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_4/' if(not NO_BALANCE): DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_5/' else: DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' DS_PATH = DS_LOCAL + '{}.{}' DS_EMB_PATH = DS_LOCAL + '{}.{}' # DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' # DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.x' # TARGET_EMB_PATH = 'data/medical.test.x' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.x' # TRAIN_EMB_PATH = 'data/medical.train.x' if(not NO_BALANCE): cls_names = ["leg", "hand", "spine", "chest", "ankle", "head", "hip", "arm", "face", "shoulder"] else: cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] # cls_names = ['Hong Kong','London','Toronto','Paris','Rome'] else: cls_names = ['Hong Kong','London','Toronto','Paris','Rome','Sydney','Dubai','Bangkok','Singapore','Frankfurt'] # cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test' if(not NO_BALANCE): DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' else: DS_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' + '{}.{}' DS_EMB_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/'+'{}.{}' DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_CPT/' DANN_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_Without_Valid_CPT/' MLP_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_CPT/' MLP_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_Without_Valid_CPT/' UTIL_MODEL_PATH = 'data/part/MLP_CPT/' P_TABLE = { "bert-base": 5049, "bert": 5001, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 5002, "xlm": 5004, "roberta":5003, "ernie": 5005 } p = ARGS.p EMB_DIM_TABLE = { "bert-base": 768, # "bert": 768, "bert": 1024, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 768, "xlm": 1024, "roberta": 768, "ernie":768, "gpt-2-medium": 1024, "gpt-2-large": 1280 } HIDDEN = 100 # EMB_DIM_TABLE[ARCH] embedder = Embedder(p) embedding = embedder.embedding # export the functional port def data_embedding(): f = open(TARGET_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TARGET_EMB_PATH, ARCH) f = open(TRAIN_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TRAIN_EMB_PATH, ARCH) for key in cls_names: for i in [0, 1]: f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) def DANNA(key, size=2000): X, Y = [], [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) train_embs = np.load(TRAIN_EMB_PATH + '.' + ARCH + '.npy') # load validation set (let us load gpt2) raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') # the potato case is somehow necessary, because it is the case where all the answers should be negative if (key != 'potato'): raw_valid, X_valid = balance(key, raw_valid, X_valid) print(len(raw_valid)) Y_valid = np.array([(key in x) for x in raw_valid]) # learn a transfer clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=4000, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN) acc = clf.fit(X, Y, X_adapt=train_embs, X_valid=X_valid, Y_valid=Y_valid) return acc class NonLinearClassifier(nn.Module): def __init__(self, key, embedding_size, hidden_size, cls_num=2, device=DEVICE): super(NonLinearClassifier, self).__init__() self.fc1 = Linear(embedding_size, hidden_size) self.fc2 = Linear(hidden_size, cls_num) self.device = device self.criterion = nn.CrossEntropyLoss() self.key = key def forward(self, x): x = torch.sigmoid(self.fc1(x)) x = self.fc2(x) # , dim=0) return x def predict(self, x): x = torch.FloatTensor(x) # print(x) outputs = self(x.cuda()) # print(outputs) _, preds = torch.max(outputs, 1) # print(preds) return preds.cpu().numpy() def predict_topk(self, x, k=5): with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) return topk.cpu().numpy() def loss(self, x, y):
def _evaluate(self, x, y): preds = self.predict(x) # y = y.numpy() return np.mean(preds == y) def _evaluate_topk(self, x, y, k = K): # y = y.numpy() with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) topk = topk.cpu().numpy() acc = [int(y[i] in topk[i, :]) for i in range(len(y))] return np.mean(acc) def fit(self, X, Y, test_X = None, test_Y = None, epoch_num=EPOCH): # 2000, 4000 y_cpu = Y.copy() # self.cuda() X = torch.FloatTensor(X) Y = torch.LongTensor(Y) if(FUNCTION == 'util'): test_X = torch.FloatTensor(test_X) test_Y = torch.LongTensor(test_Y) model_path = CPT_PATH+ "{}_cracker_{}.cpt".format(self.key, ARCH) if (CACHED and os.path.exists(model_path)): print("Loading Model from {} ...".format(model_path)) self.load_state_dict(torch.load(model_path)) # X = X.cuda() # Y = torch.LongTensor(Y) preds = self.predict(X) correct = np.sum(preds == y_cpu) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) return ds = data_utils.TensorDataset(X, Y) train_loader = data_utils.DataLoader(ds, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True) counter = 0 best_acc = 0.0 if(FUNCTION == 'util'): test_ds = data_utils.TensorDataset(test_X, test_Y) test_loader = data_utils.DataLoader(test_ds, batch_size = BATCH_SIZE, shuffle = True) for epoch in tqdm(range(epoch_num)): running_loss = 0.0 criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(self.parameters(), lr=0.1, momentum = 0.9) optimizer = optim.Adam(self.parameters(), lr=LEARNING_RATE, weight_decay=1e-5) for i, data in enumerate(train_loader): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.cuda(), labels.cuda() # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = self(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() counter += 1 if ((epoch+1) % 1000 == 0): print('Epoch %d loss: %.5f Count: %d' % (epoch + 1, running_loss, counter)) running_loss = 0.0 counter = 0 preds = self.predict(X) correct = np.sum(preds == y_cpu) print(np.histogram(preds, bins = 2)) print(np.histogram(y_cpu, bins = 2)) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) if(FUNCTION == 'util'): top1 = util_early_stopping_evaluate(self, test_loader) else: top1 = early_stopping_evaluate(self, self.key) print("Early stopping Acc.: {:4f}".format(top1)) if (top1 >= best_acc): best_acc = top1 # torch.save(self.state_dict(),CPT_PATH + "{}_cracker_{}.cpt".format(self.key, ARCH)) print("Save Model {:.4f}".format(top1)) torch.save(self.state_dict(),CPT_PATH + "medical_functional_{}.cpt".format(ARCH)) print("save_path: {}".format(CPT_PATH + "medical_functional_{}.cpt".format(ARCH))) print("Early stopping set Infer {} Best acc top1. {:.4f}".format(self.key, best_acc)) def compute_utility(): print("Evaluate {} Utility".format(ARCH)) sents = [x[:-1].split('\t') for x in open(TRAIN_PATH, 'r') if x[:-1] != ''] sents_train = [x[0] for x in sents] # print(sents[0]) train_y = np.array([int(s[1]) for s in sents]) sents = [x[:-1].split('\t') for x in open(TARGET_PATH, 'r') if x[:-1] != ''] sents_test = [x[0] for x in sents] test_y = np.array([int(s[1]) for s in sents]) print(len(train_y)) print(len(test_y)) print(sents_train[0]) print(sents_test[0]) train_x = embedding(sents_train, TRAIN_EMB_PATH, ARCH) test_x = embedding(sents_test, TARGET_EMB_PATH, ARCH) if(CLS == 'MLP'): clf = NonLinearClassifier('', EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) clf.cuda() clf.fit(train_x, train_y, test_x, test_y) # assume the existence of the model acc = clf._evaluate(test_x, test_y) print("Acc. {:.4f}".format(acc)) return def util_early_stopping_evaluate(clf, dataloader): count = 0 correct = 0 for x, y in dataloader: correct += np.sum(y.numpy() == clf.predict(x)) count += x.shape[0] return correct / (count * 1.0) def early_stopping_evaluate(clf, key): # load the early stopping set f = open(TARGET_PATH, 'r') target_f = [x[:-1] for x in f if x[:-1] != ''] f.close() target_embs = embedding(target_f, TARGET_EMB_PATH, ARCH) if(not NO_BALANCE): target_f, target_embs = balance(key, target_f, target_embs) else: target_f = target_f[:1000] target_embs = target_embs[:1000, :] results = np.zeros((2, 2)) count = 0 for i, sent in enumerate(list(target_f)): pred_ = clf.predict([target_embs[i]])[0] truth_ = int(key in sent) results[pred_][truth_] += 1 count += 1 results /= (count * 1.0) acc = results[0][0] + results[1][1] # print("early_stopping set Inference {} Acc: {:.3f}".format(key, results[0][0] + results[1][1])) return acc def ATTACK(key, use_dp=False, defense=None, verbose=VERBOSE, size = 2000): # (X, Y) is from external corpus. # X are sentence embeddings. Y are labels. # To prepare an external corpus, we substitute the food keywords in Yelp dataset to body keywords. ## GET THE TRAINING DATA, NO NEED TO DEFEND X, Y = [], [] mean_direction = [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] print(DS_EMB_PATH.format(key, i)) embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] mean_direction.append(np.mean(embs, axis = 0)) X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) trans_D = mean_direction[1] - mean_direction[0] # print(trans_D) # (Target_sents, Target_X) is from target domain. # Target_X are sentence embeddings. Target_sents are original sentences. f = open(TRAIN_PATH, 'r') Target_sents = [x[:-1] for x in f if x[:-1] != ''] f.close() # trunk DEFEND Target_X = embedding(Target_sents, TRAIN_EMB_PATH, ARCH) rand_idx = np.random.permutation(Target_X.shape[0]) shuffled_target_X = Target_X[rand_idx, :] if(not NO_BALANCE): Target_sents, Target_X = balance(key, Target_sents, Target_X) else: Target_X = Target_X[rand_idx, :] Target_sents = [Target_sents[i] for i in rand_idx] Target_X = Target_X[:1000, :] Target_sents = Target_sents[:1000] # print(Target_sents[0]) Target_Y = np.array([int(key in x) for x in Target_sents]) sents = [x.split('\t') for x in Target_sents if x[:-1] != ''] # print(sents) # print(sents[0]) if(DATASET == 'medical'): target_util_y = np.array([int(s[1]) for s in sents]) else: target_util_y = np.array([0 for s in sents]) # print("Balanced: {}".format(np.mean(Target_Y))) # now the target Y here is the sensitive label if(use_dp): protected_target_X = defense(Target_X, Target_Y) # print(Target_X[0, :]) # print(torch.sum(protected_target_X[0, :])) # (X_valid, Y_valid) is from valid set. # SVM: This is regarded as shadow corpus of Target domain. # DANN or MLP: This is used to early stop. # X_valid are sentence embeddings. Y_valid are labels. raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') if(not NO_BALANCE): raw_valid, X_valid = balance(key, raw_valid, X_valid) Y_valid = np.array([int(key in x) for x in raw_valid]) # load the utility model if(DATASET == 'medical'): util_clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) # util_clf.load_state_dict(torch.load(UTIL_MODEL_PATH + "medical_functional_{}.cpt".format(ARCH))) util_clf.cuda() preds = util_clf.predict(Target_X) util_acc = np.mean(preds == target_util_y) # print("Util Acc. {:.4f}".format(acc)) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) preds = util_clf.predict(protected_target_X) protected_util_acc = np.mean(preds == target_util_y) if(VERBOSE): print("TRAINING SET SIZE: {}".format(len(Y))) print("EMBEDDINGS FROM TARGET DOMAIN: {}".format(len(Target_Y))) print("TEST SET SIZE: {}".format(len(Y_valid))) # learn a transfer print("TESTING MODEL: {}".format(CLS)) acc, protected_acc = 0.0, 0.0 util_acc, protected_util_acc = 0.0, 0.0 if CLS == 'MLP': print("Histogram of the Target Y: {}".format(np.histogram(Target_Y))) clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X, Y) # assume the existence of the model acc = clf._evaluate(Target_X, Target_Y) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) protected_acc = clf._evaluate(protected_target_X, Target_Y) elif CLS == 'SVM': # for discussion REVERSE = False # shadow clf = SVC(kernel='{}'.format(SVM_KERNEL), gamma='scale', verbose=VERBOSE, max_iter = 5000) # print(X_valid) # print(Y_valid) if(REVERSE): clf.fit(Target_X, Target_Y) else: start = time.time() clf.fit(X_valid, Y_valid) end = time.time() print("Time: {}".format(end - start)) # if(defense): # the common approach if(REVERSE): preds = clf.predict(X_valid) acc = np.mean(preds == Y_valid) else: preds = clf.predict(Target_X) acc = np.mean(preds == Target_Y) # print(acc) if(use_dp): preds = clf.predict(protected_target_X) protected_acc = np.mean(preds == Target_Y) elif CLS == 'DANN': # I have no idea whether the 1000 is. DANN_CPT_PATHs = DANN_CPT_PATH + "{}_cracker_{}.cpt".format(key, ARCH) clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=MAXITER, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN, cached = DANN_CACHED, cpt_path = DANN_CPT_PATHs) # clf.cuda() # set the size of the Target_X trans_D = 0.5 * trans_D concated_Target_X = np.concatenate([Target_X - trans_D, Target_X + trans_D], axis = 0) clf.fit(X, Y, X_adapt=concated_Target_X, X_valid=Target_X - trans_D, Y_valid=Target_Y) Target_X = torch.FloatTensor(Target_X-trans_D) acc = clf.validate(Target_X, Target_Y) # print(acc) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X).cuda() protected_acc = clf.validate(protected_target_X, Target_Y) # print("Target Domain Inference {} Acc: {:.3f}".format(key, acc)) # return acc elif CLS == 'MLP_SHADOW': clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X_valid, Y_valid) acc = clf._evaluate(Target_X, Target_Y) else: clf = None print('wrong cls\' name') return acc, protected_acc, util_acc, protected_util_acc # # predict on Target_X # acc = clf._evaluate(Target_X, Target_Y) # # results = np.zeros((2, 2)) # # count = 0 # # for i, sent in enumerate(list(Target_sents)): # # pred_ = int(clf.predict([Target_X[i]])[0]) # # truth_ = int(key in sent) # # results[pred_][truth_] += 1 # # count += 1 # # results /= (count * 1.0) # # acc = results[0][0] + results[1][1] # print("Target Domain Inference {} Acc: {:.3f} Protected: {:.4f}".format(key, acc, protected_acc)) # return acc if __name__ == '__main__': DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } print(FUNCTION) if(FUNCTION == 'atk'): # DS_prepare() # EX_DS_prepare() # init a defense to test Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] # data_embedding() _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, _, _ = ATTACK(key, use_dp = False, defense = _def, size = ARGS.ext_size) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc]) print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(key,TA, protected_acc)) print('Keyword Attacker {} on {} Embeddings'.format(CLS, ARCH)) for KT in Target_Acc_list: print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(KT[0], KT[1], KT[2])) print('Target_Acc_Top1_Average: {:.4f} Protected Target_Acc_Average: {:.4f}'.format(Target_Acc_sum / len(cls_names), protected_avg_acc / len(cls_names))) elif(FUNCTION == 'util'): compute_utility() elif(FUNCTION == 'def'): DEFENSE = 'rounding' print('Keyword Attacker {} /Defense {} on {} Embeddings'.format(CLS, DEFENSE, ARCH)) defenses = [] if(DEFENSE == 'rounding'): for i in range(10): defenses.append((i, "rounding to {} decimals".format(i), initialize_defense('rounding', decimals = i))) elif(DEFENSE == 'dp'): eps_list = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000] for eps in eps_list: defenses.append((eps, "laplace with eps {}".format(eps), initialize_defense("dp", delta = DELTA_TABLE[ARCH], eps = eps))) else: eps_list = [0.001, 0.005, 0.01, 0.1, 0.5, 1.0] for eps in eps_list: defenses.append((eps, "minmax with eps {}".format(eps), initialize_defense("minmax", cls_num = 10, eps = eps))) RESULTS = list() for defense in defenses: param, descript, _def = defense Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] print("Evaluate {} with Defense {}".format(ARCH, descript)) # data_embedding() # _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, util, protected_util = ATTACK(key, use_dp = True, defense = _def) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc, util, protected_util]) # for KT in Target_Acc_list: # print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f} Util: {:.4f} Protected Util: {:.4f}'.format(KT[0], KT[1], KT[2], KT[3], KT[4])) RESULTS.append([(param, Target_Acc_list)]) print("ARCH: {} \n RESULTS: {}".format(ARCH, RESULTS)) elif(FUNCTION == 'prepare'): data_embedding()
x = self(x) _loss = self.criterion(x, y) return _loss
identifier_body
adv_YAN.py
import random import os import numpy as np import torch import torch.utils.data as data_utils import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from sklearn.svm import SVC from torch.nn import GRU, Embedding, Linear from util import Embedder from tools import balance from tqdm import tqdm from DANN import DANN import argparse from defense import initialize_defense import time parser = argparse.ArgumentParser(description='Medical Attack') parser.add_argument("-p", type=int, default= 5555, help = 'the comm port the client will use') parser.add_argument("-c", action='store_true', help = 'whether to use cached model') parser.add_argument("-t", action='store_true', help = "to switch between training or testing") # parser.add_argument("--save_p", type=str, default="default", help = 'the place to store the model') parser.add_argument("-a", type=str, default='bert', help = 'targeted architecture') parser.add_argument("-d", type=str, default='none', help = 'the type of defense to do') parser.add_argument("--clf", type = str, default='SVM', help = 'the type of attack model to use') parser.add_argument("-v", action='store_true', help = 'whether to be wordy') parser.add_argument("-f", type=str, default='atk', help = 'to specify the functional') parser.add_argument("--ext_size", type=int, default=2000, help = 'the size of the ext corpus') parser.add_argument("--tgt_size", type=int, default=2000, help = 'the size of the target set which is visible to the adversary') parser.add_argument("--dataset", type = str, default = "medical", help = 'dataset') ARGS = parser.parse_args() ARCH = ARGS.a CLS = ARGS.clf CLS_NUM = 10 VERBOSE = ARGS.v #SVM parameter SVM_KERNEL = 'linear' # DANN parameter MAXITER = 1000 BATCH_SIZE = 128 LAMDA = 1.0 FUNCTION = ARGS.f # DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' # DANN_CACHED = False # MLP parameter EPOCH = 1000 HIDDEN_DIM = 80 BATCH_SIZE = 128 LEARNING_RATE = 0.001 PRINT_FREQ = 100 K = 5 DATASET = ARGS.dataset NO_BALANCE = False # CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' # CPT_PATH = 'data/part_fake_5/MLP_CPT/' if(not ARGS.t): DANN_CPT_PATH = 'data/part_fake_5/DANN_CPT/' DANN_CACHED = False CPT_PATH = 'data/part/MLP_CPT/' CACHED = False else: # toggle it to use Yan's pretrained model DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' DANN_CACHED = True CACHED = True DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } DEVICE = torch.device('cuda:0') # os.environ["CUDA_VISIBLE_DEVICES"] = "1" if(DATASET == 'medical'): # LOCAL = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_4/' if(not NO_BALANCE): DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_5/' else: DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' DS_PATH = DS_LOCAL + '{}.{}' DS_EMB_PATH = DS_LOCAL + '{}.{}' # DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' # DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.x' # TARGET_EMB_PATH = 'data/medical.test.x' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.x' # TRAIN_EMB_PATH = 'data/medical.train.x' if(not NO_BALANCE): cls_names = ["leg", "hand", "spine", "chest", "ankle", "head", "hip", "arm", "face", "shoulder"] else: cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] # cls_names = ['Hong Kong','London','Toronto','Paris','Rome'] else: cls_names = ['Hong Kong','London','Toronto','Paris','Rome','Sydney','Dubai','Bangkok','Singapore','Frankfurt'] # cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test' if(not NO_BALANCE): DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' else: DS_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' + '{}.{}' DS_EMB_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/'+'{}.{}' DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_CPT/' DANN_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_Without_Valid_CPT/' MLP_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_CPT/' MLP_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_Without_Valid_CPT/' UTIL_MODEL_PATH = 'data/part/MLP_CPT/' P_TABLE = { "bert-base": 5049, "bert": 5001, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 5002, "xlm": 5004, "roberta":5003, "ernie": 5005 } p = ARGS.p EMB_DIM_TABLE = { "bert-base": 768, # "bert": 768, "bert": 1024, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 768, "xlm": 1024, "roberta": 768, "ernie":768, "gpt-2-medium": 1024, "gpt-2-large": 1280 } HIDDEN = 100 # EMB_DIM_TABLE[ARCH] embedder = Embedder(p) embedding = embedder.embedding # export the functional port def data_embedding(): f = open(TARGET_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TARGET_EMB_PATH, ARCH) f = open(TRAIN_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TRAIN_EMB_PATH, ARCH) for key in cls_names: for i in [0, 1]: f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) def DANNA(key, size=2000): X, Y = [], [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) train_embs = np.load(TRAIN_EMB_PATH + '.' + ARCH + '.npy') # load validation set (let us load gpt2) raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') # the potato case is somehow necessary, because it is the case where all the answers should be negative if (key != 'potato'): raw_valid, X_valid = balance(key, raw_valid, X_valid) print(len(raw_valid)) Y_valid = np.array([(key in x) for x in raw_valid]) # learn a transfer clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=4000, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN) acc = clf.fit(X, Y, X_adapt=train_embs, X_valid=X_valid, Y_valid=Y_valid) return acc class NonLinearClassifier(nn.Module): def __init__(self, key, embedding_size, hidden_size, cls_num=2, device=DEVICE): super(NonLinearClassifier, self).__init__() self.fc1 = Linear(embedding_size, hidden_size) self.fc2 = Linear(hidden_size, cls_num) self.device = device self.criterion = nn.CrossEntropyLoss() self.key = key def forward(self, x): x = torch.sigmoid(self.fc1(x)) x = self.fc2(x) # , dim=0) return x def predict(self, x): x = torch.FloatTensor(x) # print(x) outputs = self(x.cuda()) # print(outputs) _, preds = torch.max(outputs, 1) # print(preds) return preds.cpu().numpy() def predict_topk(self, x, k=5): with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) return topk.cpu().numpy() def loss(self, x, y): x = self(x) _loss = self.criterion(x, y) return _loss def _evaluate(self, x, y): preds = self.predict(x) # y = y.numpy() return np.mean(preds == y) def
(self, x, y, k = K): # y = y.numpy() with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) topk = topk.cpu().numpy() acc = [int(y[i] in topk[i, :]) for i in range(len(y))] return np.mean(acc) def fit(self, X, Y, test_X = None, test_Y = None, epoch_num=EPOCH): # 2000, 4000 y_cpu = Y.copy() # self.cuda() X = torch.FloatTensor(X) Y = torch.LongTensor(Y) if(FUNCTION == 'util'): test_X = torch.FloatTensor(test_X) test_Y = torch.LongTensor(test_Y) model_path = CPT_PATH+ "{}_cracker_{}.cpt".format(self.key, ARCH) if (CACHED and os.path.exists(model_path)): print("Loading Model from {} ...".format(model_path)) self.load_state_dict(torch.load(model_path)) # X = X.cuda() # Y = torch.LongTensor(Y) preds = self.predict(X) correct = np.sum(preds == y_cpu) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) return ds = data_utils.TensorDataset(X, Y) train_loader = data_utils.DataLoader(ds, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True) counter = 0 best_acc = 0.0 if(FUNCTION == 'util'): test_ds = data_utils.TensorDataset(test_X, test_Y) test_loader = data_utils.DataLoader(test_ds, batch_size = BATCH_SIZE, shuffle = True) for epoch in tqdm(range(epoch_num)): running_loss = 0.0 criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(self.parameters(), lr=0.1, momentum = 0.9) optimizer = optim.Adam(self.parameters(), lr=LEARNING_RATE, weight_decay=1e-5) for i, data in enumerate(train_loader): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.cuda(), labels.cuda() # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = self(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() counter += 1 if ((epoch+1) % 1000 == 0): print('Epoch %d loss: %.5f Count: %d' % (epoch + 1, running_loss, counter)) running_loss = 0.0 counter = 0 preds = self.predict(X) correct = np.sum(preds == y_cpu) print(np.histogram(preds, bins = 2)) print(np.histogram(y_cpu, bins = 2)) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) if(FUNCTION == 'util'): top1 = util_early_stopping_evaluate(self, test_loader) else: top1 = early_stopping_evaluate(self, self.key) print("Early stopping Acc.: {:4f}".format(top1)) if (top1 >= best_acc): best_acc = top1 # torch.save(self.state_dict(),CPT_PATH + "{}_cracker_{}.cpt".format(self.key, ARCH)) print("Save Model {:.4f}".format(top1)) torch.save(self.state_dict(),CPT_PATH + "medical_functional_{}.cpt".format(ARCH)) print("save_path: {}".format(CPT_PATH + "medical_functional_{}.cpt".format(ARCH))) print("Early stopping set Infer {} Best acc top1. {:.4f}".format(self.key, best_acc)) def compute_utility(): print("Evaluate {} Utility".format(ARCH)) sents = [x[:-1].split('\t') for x in open(TRAIN_PATH, 'r') if x[:-1] != ''] sents_train = [x[0] for x in sents] # print(sents[0]) train_y = np.array([int(s[1]) for s in sents]) sents = [x[:-1].split('\t') for x in open(TARGET_PATH, 'r') if x[:-1] != ''] sents_test = [x[0] for x in sents] test_y = np.array([int(s[1]) for s in sents]) print(len(train_y)) print(len(test_y)) print(sents_train[0]) print(sents_test[0]) train_x = embedding(sents_train, TRAIN_EMB_PATH, ARCH) test_x = embedding(sents_test, TARGET_EMB_PATH, ARCH) if(CLS == 'MLP'): clf = NonLinearClassifier('', EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) clf.cuda() clf.fit(train_x, train_y, test_x, test_y) # assume the existence of the model acc = clf._evaluate(test_x, test_y) print("Acc. {:.4f}".format(acc)) return def util_early_stopping_evaluate(clf, dataloader): count = 0 correct = 0 for x, y in dataloader: correct += np.sum(y.numpy() == clf.predict(x)) count += x.shape[0] return correct / (count * 1.0) def early_stopping_evaluate(clf, key): # load the early stopping set f = open(TARGET_PATH, 'r') target_f = [x[:-1] for x in f if x[:-1] != ''] f.close() target_embs = embedding(target_f, TARGET_EMB_PATH, ARCH) if(not NO_BALANCE): target_f, target_embs = balance(key, target_f, target_embs) else: target_f = target_f[:1000] target_embs = target_embs[:1000, :] results = np.zeros((2, 2)) count = 0 for i, sent in enumerate(list(target_f)): pred_ = clf.predict([target_embs[i]])[0] truth_ = int(key in sent) results[pred_][truth_] += 1 count += 1 results /= (count * 1.0) acc = results[0][0] + results[1][1] # print("early_stopping set Inference {} Acc: {:.3f}".format(key, results[0][0] + results[1][1])) return acc def ATTACK(key, use_dp=False, defense=None, verbose=VERBOSE, size = 2000): # (X, Y) is from external corpus. # X are sentence embeddings. Y are labels. # To prepare an external corpus, we substitute the food keywords in Yelp dataset to body keywords. ## GET THE TRAINING DATA, NO NEED TO DEFEND X, Y = [], [] mean_direction = [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] print(DS_EMB_PATH.format(key, i)) embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] mean_direction.append(np.mean(embs, axis = 0)) X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) trans_D = mean_direction[1] - mean_direction[0] # print(trans_D) # (Target_sents, Target_X) is from target domain. # Target_X are sentence embeddings. Target_sents are original sentences. f = open(TRAIN_PATH, 'r') Target_sents = [x[:-1] for x in f if x[:-1] != ''] f.close() # trunk DEFEND Target_X = embedding(Target_sents, TRAIN_EMB_PATH, ARCH) rand_idx = np.random.permutation(Target_X.shape[0]) shuffled_target_X = Target_X[rand_idx, :] if(not NO_BALANCE): Target_sents, Target_X = balance(key, Target_sents, Target_X) else: Target_X = Target_X[rand_idx, :] Target_sents = [Target_sents[i] for i in rand_idx] Target_X = Target_X[:1000, :] Target_sents = Target_sents[:1000] # print(Target_sents[0]) Target_Y = np.array([int(key in x) for x in Target_sents]) sents = [x.split('\t') for x in Target_sents if x[:-1] != ''] # print(sents) # print(sents[0]) if(DATASET == 'medical'): target_util_y = np.array([int(s[1]) for s in sents]) else: target_util_y = np.array([0 for s in sents]) # print("Balanced: {}".format(np.mean(Target_Y))) # now the target Y here is the sensitive label if(use_dp): protected_target_X = defense(Target_X, Target_Y) # print(Target_X[0, :]) # print(torch.sum(protected_target_X[0, :])) # (X_valid, Y_valid) is from valid set. # SVM: This is regarded as shadow corpus of Target domain. # DANN or MLP: This is used to early stop. # X_valid are sentence embeddings. Y_valid are labels. raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') if(not NO_BALANCE): raw_valid, X_valid = balance(key, raw_valid, X_valid) Y_valid = np.array([int(key in x) for x in raw_valid]) # load the utility model if(DATASET == 'medical'): util_clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) # util_clf.load_state_dict(torch.load(UTIL_MODEL_PATH + "medical_functional_{}.cpt".format(ARCH))) util_clf.cuda() preds = util_clf.predict(Target_X) util_acc = np.mean(preds == target_util_y) # print("Util Acc. {:.4f}".format(acc)) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) preds = util_clf.predict(protected_target_X) protected_util_acc = np.mean(preds == target_util_y) if(VERBOSE): print("TRAINING SET SIZE: {}".format(len(Y))) print("EMBEDDINGS FROM TARGET DOMAIN: {}".format(len(Target_Y))) print("TEST SET SIZE: {}".format(len(Y_valid))) # learn a transfer print("TESTING MODEL: {}".format(CLS)) acc, protected_acc = 0.0, 0.0 util_acc, protected_util_acc = 0.0, 0.0 if CLS == 'MLP': print("Histogram of the Target Y: {}".format(np.histogram(Target_Y))) clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X, Y) # assume the existence of the model acc = clf._evaluate(Target_X, Target_Y) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) protected_acc = clf._evaluate(protected_target_X, Target_Y) elif CLS == 'SVM': # for discussion REVERSE = False # shadow clf = SVC(kernel='{}'.format(SVM_KERNEL), gamma='scale', verbose=VERBOSE, max_iter = 5000) # print(X_valid) # print(Y_valid) if(REVERSE): clf.fit(Target_X, Target_Y) else: start = time.time() clf.fit(X_valid, Y_valid) end = time.time() print("Time: {}".format(end - start)) # if(defense): # the common approach if(REVERSE): preds = clf.predict(X_valid) acc = np.mean(preds == Y_valid) else: preds = clf.predict(Target_X) acc = np.mean(preds == Target_Y) # print(acc) if(use_dp): preds = clf.predict(protected_target_X) protected_acc = np.mean(preds == Target_Y) elif CLS == 'DANN': # I have no idea whether the 1000 is. DANN_CPT_PATHs = DANN_CPT_PATH + "{}_cracker_{}.cpt".format(key, ARCH) clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=MAXITER, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN, cached = DANN_CACHED, cpt_path = DANN_CPT_PATHs) # clf.cuda() # set the size of the Target_X trans_D = 0.5 * trans_D concated_Target_X = np.concatenate([Target_X - trans_D, Target_X + trans_D], axis = 0) clf.fit(X, Y, X_adapt=concated_Target_X, X_valid=Target_X - trans_D, Y_valid=Target_Y) Target_X = torch.FloatTensor(Target_X-trans_D) acc = clf.validate(Target_X, Target_Y) # print(acc) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X).cuda() protected_acc = clf.validate(protected_target_X, Target_Y) # print("Target Domain Inference {} Acc: {:.3f}".format(key, acc)) # return acc elif CLS == 'MLP_SHADOW': clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X_valid, Y_valid) acc = clf._evaluate(Target_X, Target_Y) else: clf = None print('wrong cls\' name') return acc, protected_acc, util_acc, protected_util_acc # # predict on Target_X # acc = clf._evaluate(Target_X, Target_Y) # # results = np.zeros((2, 2)) # # count = 0 # # for i, sent in enumerate(list(Target_sents)): # # pred_ = int(clf.predict([Target_X[i]])[0]) # # truth_ = int(key in sent) # # results[pred_][truth_] += 1 # # count += 1 # # results /= (count * 1.0) # # acc = results[0][0] + results[1][1] # print("Target Domain Inference {} Acc: {:.3f} Protected: {:.4f}".format(key, acc, protected_acc)) # return acc if __name__ == '__main__': DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } print(FUNCTION) if(FUNCTION == 'atk'): # DS_prepare() # EX_DS_prepare() # init a defense to test Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] # data_embedding() _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, _, _ = ATTACK(key, use_dp = False, defense = _def, size = ARGS.ext_size) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc]) print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(key,TA, protected_acc)) print('Keyword Attacker {} on {} Embeddings'.format(CLS, ARCH)) for KT in Target_Acc_list: print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(KT[0], KT[1], KT[2])) print('Target_Acc_Top1_Average: {:.4f} Protected Target_Acc_Average: {:.4f}'.format(Target_Acc_sum / len(cls_names), protected_avg_acc / len(cls_names))) elif(FUNCTION == 'util'): compute_utility() elif(FUNCTION == 'def'): DEFENSE = 'rounding' print('Keyword Attacker {} /Defense {} on {} Embeddings'.format(CLS, DEFENSE, ARCH)) defenses = [] if(DEFENSE == 'rounding'): for i in range(10): defenses.append((i, "rounding to {} decimals".format(i), initialize_defense('rounding', decimals = i))) elif(DEFENSE == 'dp'): eps_list = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000] for eps in eps_list: defenses.append((eps, "laplace with eps {}".format(eps), initialize_defense("dp", delta = DELTA_TABLE[ARCH], eps = eps))) else: eps_list = [0.001, 0.005, 0.01, 0.1, 0.5, 1.0] for eps in eps_list: defenses.append((eps, "minmax with eps {}".format(eps), initialize_defense("minmax", cls_num = 10, eps = eps))) RESULTS = list() for defense in defenses: param, descript, _def = defense Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] print("Evaluate {} with Defense {}".format(ARCH, descript)) # data_embedding() # _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, util, protected_util = ATTACK(key, use_dp = True, defense = _def) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc, util, protected_util]) # for KT in Target_Acc_list: # print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f} Util: {:.4f} Protected Util: {:.4f}'.format(KT[0], KT[1], KT[2], KT[3], KT[4])) RESULTS.append([(param, Target_Acc_list)]) print("ARCH: {} \n RESULTS: {}".format(ARCH, RESULTS)) elif(FUNCTION == 'prepare'): data_embedding()
_evaluate_topk
identifier_name
adv_YAN.py
import random import os import numpy as np import torch import torch.utils.data as data_utils import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from sklearn.svm import SVC from torch.nn import GRU, Embedding, Linear from util import Embedder from tools import balance from tqdm import tqdm from DANN import DANN import argparse from defense import initialize_defense import time parser = argparse.ArgumentParser(description='Medical Attack') parser.add_argument("-p", type=int, default= 5555, help = 'the comm port the client will use') parser.add_argument("-c", action='store_true', help = 'whether to use cached model') parser.add_argument("-t", action='store_true', help = "to switch between training or testing") # parser.add_argument("--save_p", type=str, default="default", help = 'the place to store the model') parser.add_argument("-a", type=str, default='bert', help = 'targeted architecture') parser.add_argument("-d", type=str, default='none', help = 'the type of defense to do') parser.add_argument("--clf", type = str, default='SVM', help = 'the type of attack model to use') parser.add_argument("-v", action='store_true', help = 'whether to be wordy') parser.add_argument("-f", type=str, default='atk', help = 'to specify the functional') parser.add_argument("--ext_size", type=int, default=2000, help = 'the size of the ext corpus') parser.add_argument("--tgt_size", type=int, default=2000, help = 'the size of the target set which is visible to the adversary') parser.add_argument("--dataset", type = str, default = "medical", help = 'dataset') ARGS = parser.parse_args() ARCH = ARGS.a CLS = ARGS.clf CLS_NUM = 10 VERBOSE = ARGS.v #SVM parameter SVM_KERNEL = 'linear' # DANN parameter MAXITER = 1000 BATCH_SIZE = 128 LAMDA = 1.0 FUNCTION = ARGS.f # DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' # DANN_CACHED = False # MLP parameter EPOCH = 1000 HIDDEN_DIM = 80 BATCH_SIZE = 128 LEARNING_RATE = 0.001 PRINT_FREQ = 100 K = 5 DATASET = ARGS.dataset NO_BALANCE = False # CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' # CPT_PATH = 'data/part_fake_5/MLP_CPT/' if(not ARGS.t): DANN_CPT_PATH = 'data/part_fake_5/DANN_CPT/' DANN_CACHED = False CPT_PATH = 'data/part/MLP_CPT/' CACHED = False else: # toggle it to use Yan's pretrained model DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' DANN_CACHED = True CACHED = True DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } DEVICE = torch.device('cuda:0') # os.environ["CUDA_VISIBLE_DEVICES"] = "1" if(DATASET == 'medical'): # LOCAL = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_4/' if(not NO_BALANCE): DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_5/' else: DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' DS_PATH = DS_LOCAL + '{}.{}' DS_EMB_PATH = DS_LOCAL + '{}.{}' # DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' # DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.x' # TARGET_EMB_PATH = 'data/medical.test.x' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.x' # TRAIN_EMB_PATH = 'data/medical.train.x' if(not NO_BALANCE): cls_names = ["leg", "hand", "spine", "chest", "ankle", "head", "hip", "arm", "face", "shoulder"] else: cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] # cls_names = ['Hong Kong','London','Toronto','Paris','Rome'] else: cls_names = ['Hong Kong','London','Toronto','Paris','Rome','Sydney','Dubai','Bangkok','Singapore','Frankfurt'] # cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test' if(not NO_BALANCE): DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' else: DS_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' + '{}.{}' DS_EMB_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/'+'{}.{}' DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_CPT/' DANN_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_Without_Valid_CPT/' MLP_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_CPT/' MLP_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_Without_Valid_CPT/' UTIL_MODEL_PATH = 'data/part/MLP_CPT/' P_TABLE = { "bert-base": 5049, "bert": 5001, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 5002, "xlm": 5004, "roberta":5003, "ernie": 5005 } p = ARGS.p EMB_DIM_TABLE = { "bert-base": 768, # "bert": 768, "bert": 1024, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 768, "xlm": 1024, "roberta": 768, "ernie":768, "gpt-2-medium": 1024, "gpt-2-large": 1280 } HIDDEN = 100 # EMB_DIM_TABLE[ARCH] embedder = Embedder(p) embedding = embedder.embedding # export the functional port def data_embedding(): f = open(TARGET_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TARGET_EMB_PATH, ARCH) f = open(TRAIN_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TRAIN_EMB_PATH, ARCH) for key in cls_names: for i in [0, 1]: f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) def DANNA(key, size=2000): X, Y = [], [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) train_embs = np.load(TRAIN_EMB_PATH + '.' + ARCH + '.npy') # load validation set (let us load gpt2) raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') # the potato case is somehow necessary, because it is the case where all the answers should be negative if (key != 'potato'): raw_valid, X_valid = balance(key, raw_valid, X_valid) print(len(raw_valid)) Y_valid = np.array([(key in x) for x in raw_valid]) # learn a transfer clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=4000, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN) acc = clf.fit(X, Y, X_adapt=train_embs, X_valid=X_valid, Y_valid=Y_valid) return acc class NonLinearClassifier(nn.Module): def __init__(self, key, embedding_size, hidden_size, cls_num=2, device=DEVICE): super(NonLinearClassifier, self).__init__() self.fc1 = Linear(embedding_size, hidden_size) self.fc2 = Linear(hidden_size, cls_num) self.device = device self.criterion = nn.CrossEntropyLoss() self.key = key def forward(self, x): x = torch.sigmoid(self.fc1(x)) x = self.fc2(x) # , dim=0) return x def predict(self, x): x = torch.FloatTensor(x) # print(x) outputs = self(x.cuda()) # print(outputs) _, preds = torch.max(outputs, 1) # print(preds) return preds.cpu().numpy() def predict_topk(self, x, k=5): with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) return topk.cpu().numpy() def loss(self, x, y): x = self(x) _loss = self.criterion(x, y) return _loss def _evaluate(self, x, y): preds = self.predict(x) # y = y.numpy() return np.mean(preds == y) def _evaluate_topk(self, x, y, k = K): # y = y.numpy() with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) topk = topk.cpu().numpy() acc = [int(y[i] in topk[i, :]) for i in range(len(y))] return np.mean(acc) def fit(self, X, Y, test_X = None, test_Y = None, epoch_num=EPOCH): # 2000, 4000 y_cpu = Y.copy() # self.cuda() X = torch.FloatTensor(X) Y = torch.LongTensor(Y) if(FUNCTION == 'util'): test_X = torch.FloatTensor(test_X) test_Y = torch.LongTensor(test_Y) model_path = CPT_PATH+ "{}_cracker_{}.cpt".format(self.key, ARCH) if (CACHED and os.path.exists(model_path)): print("Loading Model from {} ...".format(model_path)) self.load_state_dict(torch.load(model_path)) # X = X.cuda() # Y = torch.LongTensor(Y) preds = self.predict(X) correct = np.sum(preds == y_cpu) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) return ds = data_utils.TensorDataset(X, Y) train_loader = data_utils.DataLoader(ds, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True) counter = 0 best_acc = 0.0 if(FUNCTION == 'util'): test_ds = data_utils.TensorDataset(test_X, test_Y) test_loader = data_utils.DataLoader(test_ds, batch_size = BATCH_SIZE, shuffle = True) for epoch in tqdm(range(epoch_num)): running_loss = 0.0 criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(self.parameters(), lr=0.1, momentum = 0.9) optimizer = optim.Adam(self.parameters(), lr=LEARNING_RATE, weight_decay=1e-5) for i, data in enumerate(train_loader): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.cuda(), labels.cuda() # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = self(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() counter += 1 if ((epoch+1) % 1000 == 0): print('Epoch %d loss: %.5f Count: %d' % (epoch + 1, running_loss, counter)) running_loss = 0.0 counter = 0 preds = self.predict(X) correct = np.sum(preds == y_cpu) print(np.histogram(preds, bins = 2)) print(np.histogram(y_cpu, bins = 2)) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) if(FUNCTION == 'util'): top1 = util_early_stopping_evaluate(self, test_loader) else: top1 = early_stopping_evaluate(self, self.key) print("Early stopping Acc.: {:4f}".format(top1)) if (top1 >= best_acc): best_acc = top1 # torch.save(self.state_dict(),CPT_PATH + "{}_cracker_{}.cpt".format(self.key, ARCH)) print("Save Model {:.4f}".format(top1)) torch.save(self.state_dict(),CPT_PATH + "medical_functional_{}.cpt".format(ARCH)) print("save_path: {}".format(CPT_PATH + "medical_functional_{}.cpt".format(ARCH))) print("Early stopping set Infer {} Best acc top1. {:.4f}".format(self.key, best_acc)) def compute_utility(): print("Evaluate {} Utility".format(ARCH)) sents = [x[:-1].split('\t') for x in open(TRAIN_PATH, 'r') if x[:-1] != ''] sents_train = [x[0] for x in sents] # print(sents[0]) train_y = np.array([int(s[1]) for s in sents]) sents = [x[:-1].split('\t') for x in open(TARGET_PATH, 'r') if x[:-1] != ''] sents_test = [x[0] for x in sents] test_y = np.array([int(s[1]) for s in sents]) print(len(train_y)) print(len(test_y)) print(sents_train[0]) print(sents_test[0]) train_x = embedding(sents_train, TRAIN_EMB_PATH, ARCH) test_x = embedding(sents_test, TARGET_EMB_PATH, ARCH) if(CLS == 'MLP'): clf = NonLinearClassifier('', EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) clf.cuda() clf.fit(train_x, train_y, test_x, test_y) # assume the existence of the model acc = clf._evaluate(test_x, test_y) print("Acc. {:.4f}".format(acc)) return def util_early_stopping_evaluate(clf, dataloader): count = 0 correct = 0 for x, y in dataloader: correct += np.sum(y.numpy() == clf.predict(x)) count += x.shape[0] return correct / (count * 1.0) def early_stopping_evaluate(clf, key): # load the early stopping set f = open(TARGET_PATH, 'r') target_f = [x[:-1] for x in f if x[:-1] != ''] f.close() target_embs = embedding(target_f, TARGET_EMB_PATH, ARCH) if(not NO_BALANCE): target_f, target_embs = balance(key, target_f, target_embs) else: target_f = target_f[:1000] target_embs = target_embs[:1000, :] results = np.zeros((2, 2)) count = 0 for i, sent in enumerate(list(target_f)): pred_ = clf.predict([target_embs[i]])[0] truth_ = int(key in sent) results[pred_][truth_] += 1 count += 1 results /= (count * 1.0) acc = results[0][0] + results[1][1] # print("early_stopping set Inference {} Acc: {:.3f}".format(key, results[0][0] + results[1][1])) return acc def ATTACK(key, use_dp=False, defense=None, verbose=VERBOSE, size = 2000): # (X, Y) is from external corpus. # X are sentence embeddings. Y are labels. # To prepare an external corpus, we substitute the food keywords in Yelp dataset to body keywords. ## GET THE TRAINING DATA, NO NEED TO DEFEND X, Y = [], [] mean_direction = [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] print(DS_EMB_PATH.format(key, i)) embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] mean_direction.append(np.mean(embs, axis = 0)) X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) trans_D = mean_direction[1] - mean_direction[0] # print(trans_D) # (Target_sents, Target_X) is from target domain. # Target_X are sentence embeddings. Target_sents are original sentences. f = open(TRAIN_PATH, 'r') Target_sents = [x[:-1] for x in f if x[:-1] != ''] f.close() # trunk DEFEND Target_X = embedding(Target_sents, TRAIN_EMB_PATH, ARCH) rand_idx = np.random.permutation(Target_X.shape[0]) shuffled_target_X = Target_X[rand_idx, :] if(not NO_BALANCE): Target_sents, Target_X = balance(key, Target_sents, Target_X) else: Target_X = Target_X[rand_idx, :] Target_sents = [Target_sents[i] for i in rand_idx] Target_X = Target_X[:1000, :] Target_sents = Target_sents[:1000] # print(Target_sents[0]) Target_Y = np.array([int(key in x) for x in Target_sents]) sents = [x.split('\t') for x in Target_sents if x[:-1] != ''] # print(sents) # print(sents[0]) if(DATASET == 'medical'): target_util_y = np.array([int(s[1]) for s in sents]) else: target_util_y = np.array([0 for s in sents]) # print("Balanced: {}".format(np.mean(Target_Y))) # now the target Y here is the sensitive label if(use_dp): protected_target_X = defense(Target_X, Target_Y) # print(Target_X[0, :]) # print(torch.sum(protected_target_X[0, :])) # (X_valid, Y_valid) is from valid set. # SVM: This is regarded as shadow corpus of Target domain. # DANN or MLP: This is used to early stop. # X_valid are sentence embeddings. Y_valid are labels. raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') if(not NO_BALANCE): raw_valid, X_valid = balance(key, raw_valid, X_valid) Y_valid = np.array([int(key in x) for x in raw_valid]) # load the utility model if(DATASET == 'medical'): util_clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) # util_clf.load_state_dict(torch.load(UTIL_MODEL_PATH + "medical_functional_{}.cpt".format(ARCH)))
if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) preds = util_clf.predict(protected_target_X) protected_util_acc = np.mean(preds == target_util_y) if(VERBOSE): print("TRAINING SET SIZE: {}".format(len(Y))) print("EMBEDDINGS FROM TARGET DOMAIN: {}".format(len(Target_Y))) print("TEST SET SIZE: {}".format(len(Y_valid))) # learn a transfer print("TESTING MODEL: {}".format(CLS)) acc, protected_acc = 0.0, 0.0 util_acc, protected_util_acc = 0.0, 0.0 if CLS == 'MLP': print("Histogram of the Target Y: {}".format(np.histogram(Target_Y))) clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X, Y) # assume the existence of the model acc = clf._evaluate(Target_X, Target_Y) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) protected_acc = clf._evaluate(protected_target_X, Target_Y) elif CLS == 'SVM': # for discussion REVERSE = False # shadow clf = SVC(kernel='{}'.format(SVM_KERNEL), gamma='scale', verbose=VERBOSE, max_iter = 5000) # print(X_valid) # print(Y_valid) if(REVERSE): clf.fit(Target_X, Target_Y) else: start = time.time() clf.fit(X_valid, Y_valid) end = time.time() print("Time: {}".format(end - start)) # if(defense): # the common approach if(REVERSE): preds = clf.predict(X_valid) acc = np.mean(preds == Y_valid) else: preds = clf.predict(Target_X) acc = np.mean(preds == Target_Y) # print(acc) if(use_dp): preds = clf.predict(protected_target_X) protected_acc = np.mean(preds == Target_Y) elif CLS == 'DANN': # I have no idea whether the 1000 is. DANN_CPT_PATHs = DANN_CPT_PATH + "{}_cracker_{}.cpt".format(key, ARCH) clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=MAXITER, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN, cached = DANN_CACHED, cpt_path = DANN_CPT_PATHs) # clf.cuda() # set the size of the Target_X trans_D = 0.5 * trans_D concated_Target_X = np.concatenate([Target_X - trans_D, Target_X + trans_D], axis = 0) clf.fit(X, Y, X_adapt=concated_Target_X, X_valid=Target_X - trans_D, Y_valid=Target_Y) Target_X = torch.FloatTensor(Target_X-trans_D) acc = clf.validate(Target_X, Target_Y) # print(acc) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X).cuda() protected_acc = clf.validate(protected_target_X, Target_Y) # print("Target Domain Inference {} Acc: {:.3f}".format(key, acc)) # return acc elif CLS == 'MLP_SHADOW': clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X_valid, Y_valid) acc = clf._evaluate(Target_X, Target_Y) else: clf = None print('wrong cls\' name') return acc, protected_acc, util_acc, protected_util_acc # # predict on Target_X # acc = clf._evaluate(Target_X, Target_Y) # # results = np.zeros((2, 2)) # # count = 0 # # for i, sent in enumerate(list(Target_sents)): # # pred_ = int(clf.predict([Target_X[i]])[0]) # # truth_ = int(key in sent) # # results[pred_][truth_] += 1 # # count += 1 # # results /= (count * 1.0) # # acc = results[0][0] + results[1][1] # print("Target Domain Inference {} Acc: {:.3f} Protected: {:.4f}".format(key, acc, protected_acc)) # return acc if __name__ == '__main__': DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } print(FUNCTION) if(FUNCTION == 'atk'): # DS_prepare() # EX_DS_prepare() # init a defense to test Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] # data_embedding() _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, _, _ = ATTACK(key, use_dp = False, defense = _def, size = ARGS.ext_size) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc]) print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(key,TA, protected_acc)) print('Keyword Attacker {} on {} Embeddings'.format(CLS, ARCH)) for KT in Target_Acc_list: print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(KT[0], KT[1], KT[2])) print('Target_Acc_Top1_Average: {:.4f} Protected Target_Acc_Average: {:.4f}'.format(Target_Acc_sum / len(cls_names), protected_avg_acc / len(cls_names))) elif(FUNCTION == 'util'): compute_utility() elif(FUNCTION == 'def'): DEFENSE = 'rounding' print('Keyword Attacker {} /Defense {} on {} Embeddings'.format(CLS, DEFENSE, ARCH)) defenses = [] if(DEFENSE == 'rounding'): for i in range(10): defenses.append((i, "rounding to {} decimals".format(i), initialize_defense('rounding', decimals = i))) elif(DEFENSE == 'dp'): eps_list = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000] for eps in eps_list: defenses.append((eps, "laplace with eps {}".format(eps), initialize_defense("dp", delta = DELTA_TABLE[ARCH], eps = eps))) else: eps_list = [0.001, 0.005, 0.01, 0.1, 0.5, 1.0] for eps in eps_list: defenses.append((eps, "minmax with eps {}".format(eps), initialize_defense("minmax", cls_num = 10, eps = eps))) RESULTS = list() for defense in defenses: param, descript, _def = defense Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] print("Evaluate {} with Defense {}".format(ARCH, descript)) # data_embedding() # _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, util, protected_util = ATTACK(key, use_dp = True, defense = _def) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc, util, protected_util]) # for KT in Target_Acc_list: # print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f} Util: {:.4f} Protected Util: {:.4f}'.format(KT[0], KT[1], KT[2], KT[3], KT[4])) RESULTS.append([(param, Target_Acc_list)]) print("ARCH: {} \n RESULTS: {}".format(ARCH, RESULTS)) elif(FUNCTION == 'prepare'): data_embedding()
util_clf.cuda() preds = util_clf.predict(Target_X) util_acc = np.mean(preds == target_util_y) # print("Util Acc. {:.4f}".format(acc))
random_line_split
adv_YAN.py
import random import os import numpy as np import torch import torch.utils.data as data_utils import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from sklearn.svm import SVC from torch.nn import GRU, Embedding, Linear from util import Embedder from tools import balance from tqdm import tqdm from DANN import DANN import argparse from defense import initialize_defense import time parser = argparse.ArgumentParser(description='Medical Attack') parser.add_argument("-p", type=int, default= 5555, help = 'the comm port the client will use') parser.add_argument("-c", action='store_true', help = 'whether to use cached model') parser.add_argument("-t", action='store_true', help = "to switch between training or testing") # parser.add_argument("--save_p", type=str, default="default", help = 'the place to store the model') parser.add_argument("-a", type=str, default='bert', help = 'targeted architecture') parser.add_argument("-d", type=str, default='none', help = 'the type of defense to do') parser.add_argument("--clf", type = str, default='SVM', help = 'the type of attack model to use') parser.add_argument("-v", action='store_true', help = 'whether to be wordy') parser.add_argument("-f", type=str, default='atk', help = 'to specify the functional') parser.add_argument("--ext_size", type=int, default=2000, help = 'the size of the ext corpus') parser.add_argument("--tgt_size", type=int, default=2000, help = 'the size of the target set which is visible to the adversary') parser.add_argument("--dataset", type = str, default = "medical", help = 'dataset') ARGS = parser.parse_args() ARCH = ARGS.a CLS = ARGS.clf CLS_NUM = 10 VERBOSE = ARGS.v #SVM parameter SVM_KERNEL = 'linear' # DANN parameter MAXITER = 1000 BATCH_SIZE = 128 LAMDA = 1.0 FUNCTION = ARGS.f # DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' # DANN_CACHED = False # MLP parameter EPOCH = 1000 HIDDEN_DIM = 80 BATCH_SIZE = 128 LEARNING_RATE = 0.001 PRINT_FREQ = 100 K = 5 DATASET = ARGS.dataset NO_BALANCE = False # CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' # CPT_PATH = 'data/part_fake_5/MLP_CPT/' if(not ARGS.t): DANN_CPT_PATH = 'data/part_fake_5/DANN_CPT/' DANN_CACHED = False CPT_PATH = 'data/part/MLP_CPT/' CACHED = False else: # toggle it to use Yan's pretrained model DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/DANN_CPT/' CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_5/MLP_CPT/' DANN_CACHED = True CACHED = True DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } DEVICE = torch.device('cuda:0') # os.environ["CUDA_VISIBLE_DEVICES"] = "1" if(DATASET == 'medical'): # LOCAL = '/DATACENTER/data/yyf/Py/bert_privacy/data/part_fake_4/' if(not NO_BALANCE): DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_5/' else: DS_LOCAL = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' DS_PATH = DS_LOCAL + '{}.{}' DS_EMB_PATH = DS_LOCAL + '{}.{}' # DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' # DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.test.x' # TARGET_EMB_PATH = 'data/medical.test.x' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy/data/medical.train.x' # TRAIN_EMB_PATH = 'data/medical.train.x' if(not NO_BALANCE): cls_names = ["leg", "hand", "spine", "chest", "ankle", "head", "hip", "arm", "face", "shoulder"] else: cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] # cls_names = ['Hong Kong','London','Toronto','Paris','Rome'] else: cls_names = ['Hong Kong','London','Toronto','Paris','Rome','Sydney','Dubai','Bangkok','Singapore','Frankfurt'] # cls_names = ["sack", "paltry", "settle", "lethal", "flagrant"] TARGET_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid.txt' TARGET_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/valid' TRAIN_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test.txt' TRAIN_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/Target/test' if(not NO_BALANCE): DS_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/train' + '.{}.{}' DS_EMB_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/EX_part/EMB/{}/train'.format(ARCH) + '.{}.{}' else: DS_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/' + '{}.{}' DS_EMB_PATH = '/DATACENTER/data/pxd/bert_privacy/data/part_fake_6/'+'{}.{}' DANN_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_CPT/' DANN_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/DANN_Without_Valid_CPT/' MLP_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_CPT/' MLP_O_CPT_PATH = '/DATACENTER/data/yyf/Py/bert_privacy_Yan/data/Airline/MLP_Without_Valid_CPT/' UTIL_MODEL_PATH = 'data/part/MLP_CPT/' P_TABLE = { "bert-base": 5049, "bert": 5001, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 5002, "xlm": 5004, "roberta":5003, "ernie": 5005 } p = ARGS.p EMB_DIM_TABLE = { "bert-base": 768, # "bert": 768, "bert": 1024, "gpt": 768, "gpt2": 768, "xl": 1024, "xlnet": 768, "xlm": 1024, "roberta": 768, "ernie":768, "gpt-2-medium": 1024, "gpt-2-large": 1280 } HIDDEN = 100 # EMB_DIM_TABLE[ARCH] embedder = Embedder(p) embedding = embedder.embedding # export the functional port def data_embedding(): f = open(TARGET_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TARGET_EMB_PATH, ARCH) f = open(TRAIN_PATH, 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, TRAIN_EMB_PATH, ARCH) for key in cls_names: for i in [0, 1]: f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) def DANNA(key, size=2000): X, Y = [], [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) train_embs = np.load(TRAIN_EMB_PATH + '.' + ARCH + '.npy') # load validation set (let us load gpt2) raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') # the potato case is somehow necessary, because it is the case where all the answers should be negative if (key != 'potato'): raw_valid, X_valid = balance(key, raw_valid, X_valid) print(len(raw_valid)) Y_valid = np.array([(key in x) for x in raw_valid]) # learn a transfer clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=4000, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN) acc = clf.fit(X, Y, X_adapt=train_embs, X_valid=X_valid, Y_valid=Y_valid) return acc class NonLinearClassifier(nn.Module): def __init__(self, key, embedding_size, hidden_size, cls_num=2, device=DEVICE): super(NonLinearClassifier, self).__init__() self.fc1 = Linear(embedding_size, hidden_size) self.fc2 = Linear(hidden_size, cls_num) self.device = device self.criterion = nn.CrossEntropyLoss() self.key = key def forward(self, x): x = torch.sigmoid(self.fc1(x)) x = self.fc2(x) # , dim=0) return x def predict(self, x): x = torch.FloatTensor(x) # print(x) outputs = self(x.cuda()) # print(outputs) _, preds = torch.max(outputs, 1) # print(preds) return preds.cpu().numpy() def predict_topk(self, x, k=5): with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) return topk.cpu().numpy() def loss(self, x, y): x = self(x) _loss = self.criterion(x, y) return _loss def _evaluate(self, x, y): preds = self.predict(x) # y = y.numpy() return np.mean(preds == y) def _evaluate_topk(self, x, y, k = K): # y = y.numpy() with torch.no_grad(): probs = self(x) _, topk = torch.topk(probs, k) topk = topk.cpu().numpy() acc = [int(y[i] in topk[i, :]) for i in range(len(y))] return np.mean(acc) def fit(self, X, Y, test_X = None, test_Y = None, epoch_num=EPOCH): # 2000, 4000 y_cpu = Y.copy() # self.cuda() X = torch.FloatTensor(X) Y = torch.LongTensor(Y) if(FUNCTION == 'util'): test_X = torch.FloatTensor(test_X) test_Y = torch.LongTensor(test_Y) model_path = CPT_PATH+ "{}_cracker_{}.cpt".format(self.key, ARCH) if (CACHED and os.path.exists(model_path)): print("Loading Model from {} ...".format(model_path)) self.load_state_dict(torch.load(model_path)) # X = X.cuda() # Y = torch.LongTensor(Y) preds = self.predict(X) correct = np.sum(preds == y_cpu) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) return ds = data_utils.TensorDataset(X, Y) train_loader = data_utils.DataLoader(ds, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True) counter = 0 best_acc = 0.0 if(FUNCTION == 'util'): test_ds = data_utils.TensorDataset(test_X, test_Y) test_loader = data_utils.DataLoader(test_ds, batch_size = BATCH_SIZE, shuffle = True) for epoch in tqdm(range(epoch_num)): running_loss = 0.0 criterion = nn.CrossEntropyLoss() # optimizer = optim.SGD(self.parameters(), lr=0.1, momentum = 0.9) optimizer = optim.Adam(self.parameters(), lr=LEARNING_RATE, weight_decay=1e-5) for i, data in enumerate(train_loader): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.cuda(), labels.cuda() # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = self(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() counter += 1 if ((epoch+1) % 1000 == 0): print('Epoch %d loss: %.5f Count: %d' % (epoch + 1, running_loss, counter)) running_loss = 0.0 counter = 0 preds = self.predict(X) correct = np.sum(preds == y_cpu) print(np.histogram(preds, bins = 2)) print(np.histogram(y_cpu, bins = 2)) correct = correct / len(y_cpu) # print("Source Domain batch Acc.: {:.4f}".format(correct)) if(FUNCTION == 'util'): top1 = util_early_stopping_evaluate(self, test_loader) else: top1 = early_stopping_evaluate(self, self.key) print("Early stopping Acc.: {:4f}".format(top1)) if (top1 >= best_acc): best_acc = top1 # torch.save(self.state_dict(),CPT_PATH + "{}_cracker_{}.cpt".format(self.key, ARCH)) print("Save Model {:.4f}".format(top1)) torch.save(self.state_dict(),CPT_PATH + "medical_functional_{}.cpt".format(ARCH)) print("save_path: {}".format(CPT_PATH + "medical_functional_{}.cpt".format(ARCH))) print("Early stopping set Infer {} Best acc top1. {:.4f}".format(self.key, best_acc)) def compute_utility(): print("Evaluate {} Utility".format(ARCH)) sents = [x[:-1].split('\t') for x in open(TRAIN_PATH, 'r') if x[:-1] != ''] sents_train = [x[0] for x in sents] # print(sents[0]) train_y = np.array([int(s[1]) for s in sents]) sents = [x[:-1].split('\t') for x in open(TARGET_PATH, 'r') if x[:-1] != ''] sents_test = [x[0] for x in sents] test_y = np.array([int(s[1]) for s in sents]) print(len(train_y)) print(len(test_y)) print(sents_train[0]) print(sents_test[0]) train_x = embedding(sents_train, TRAIN_EMB_PATH, ARCH) test_x = embedding(sents_test, TARGET_EMB_PATH, ARCH) if(CLS == 'MLP'): clf = NonLinearClassifier('', EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) clf.cuda() clf.fit(train_x, train_y, test_x, test_y) # assume the existence of the model acc = clf._evaluate(test_x, test_y) print("Acc. {:.4f}".format(acc)) return def util_early_stopping_evaluate(clf, dataloader): count = 0 correct = 0 for x, y in dataloader: correct += np.sum(y.numpy() == clf.predict(x)) count += x.shape[0] return correct / (count * 1.0) def early_stopping_evaluate(clf, key): # load the early stopping set f = open(TARGET_PATH, 'r') target_f = [x[:-1] for x in f if x[:-1] != ''] f.close() target_embs = embedding(target_f, TARGET_EMB_PATH, ARCH) if(not NO_BALANCE): target_f, target_embs = balance(key, target_f, target_embs) else: target_f = target_f[:1000] target_embs = target_embs[:1000, :] results = np.zeros((2, 2)) count = 0 for i, sent in enumerate(list(target_f)): pred_ = clf.predict([target_embs[i]])[0] truth_ = int(key in sent) results[pred_][truth_] += 1 count += 1 results /= (count * 1.0) acc = results[0][0] + results[1][1] # print("early_stopping set Inference {} Acc: {:.3f}".format(key, results[0][0] + results[1][1])) return acc def ATTACK(key, use_dp=False, defense=None, verbose=VERBOSE, size = 2000): # (X, Y) is from external corpus. # X are sentence embeddings. Y are labels. # To prepare an external corpus, we substitute the food keywords in Yelp dataset to body keywords. ## GET THE TRAINING DATA, NO NEED TO DEFEND X, Y = [], [] mean_direction = [] for i in [0, 1]: # while my training data is from gpt f = open(DS_PATH.format(key, i) + '.txt', 'r') sents = [x[:-1] for x in f if x[:-1] != ''] print(DS_EMB_PATH.format(key, i)) embs = embedding(sents, DS_EMB_PATH.format(key, i), ARCH) embs = embs[np.random.choice(len(embs), min(size, len(embs)), replace=False), :] mean_direction.append(np.mean(embs, axis = 0)) X.append(embs) Y.extend([i] * embs.shape[0]) X = np.concatenate(X, axis=0) Y = np.array(Y) trans_D = mean_direction[1] - mean_direction[0] # print(trans_D) # (Target_sents, Target_X) is from target domain. # Target_X are sentence embeddings. Target_sents are original sentences. f = open(TRAIN_PATH, 'r') Target_sents = [x[:-1] for x in f if x[:-1] != ''] f.close() # trunk DEFEND Target_X = embedding(Target_sents, TRAIN_EMB_PATH, ARCH) rand_idx = np.random.permutation(Target_X.shape[0]) shuffled_target_X = Target_X[rand_idx, :] if(not NO_BALANCE): Target_sents, Target_X = balance(key, Target_sents, Target_X) else: Target_X = Target_X[rand_idx, :] Target_sents = [Target_sents[i] for i in rand_idx] Target_X = Target_X[:1000, :] Target_sents = Target_sents[:1000] # print(Target_sents[0]) Target_Y = np.array([int(key in x) for x in Target_sents]) sents = [x.split('\t') for x in Target_sents if x[:-1] != ''] # print(sents) # print(sents[0]) if(DATASET == 'medical'): target_util_y = np.array([int(s[1]) for s in sents]) else: target_util_y = np.array([0 for s in sents]) # print("Balanced: {}".format(np.mean(Target_Y))) # now the target Y here is the sensitive label if(use_dp): protected_target_X = defense(Target_X, Target_Y) # print(Target_X[0, :]) # print(torch.sum(protected_target_X[0, :])) # (X_valid, Y_valid) is from valid set. # SVM: This is regarded as shadow corpus of Target domain. # DANN or MLP: This is used to early stop. # X_valid are sentence embeddings. Y_valid are labels. raw_valid, X_valid = list(open(TARGET_PATH, 'r')), np.load(TARGET_EMB_PATH + '.' + ARCH + '.npy') if(not NO_BALANCE): raw_valid, X_valid = balance(key, raw_valid, X_valid) Y_valid = np.array([int(key in x) for x in raw_valid]) # load the utility model if(DATASET == 'medical'): util_clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM, cls_num = 10) # util_clf.load_state_dict(torch.load(UTIL_MODEL_PATH + "medical_functional_{}.cpt".format(ARCH))) util_clf.cuda() preds = util_clf.predict(Target_X) util_acc = np.mean(preds == target_util_y) # print("Util Acc. {:.4f}".format(acc)) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) preds = util_clf.predict(protected_target_X) protected_util_acc = np.mean(preds == target_util_y) if(VERBOSE):
acc, protected_acc = 0.0, 0.0 util_acc, protected_util_acc = 0.0, 0.0 if CLS == 'MLP': print("Histogram of the Target Y: {}".format(np.histogram(Target_Y))) clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X, Y) # assume the existence of the model acc = clf._evaluate(Target_X, Target_Y) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X) protected_acc = clf._evaluate(protected_target_X, Target_Y) elif CLS == 'SVM': # for discussion REVERSE = False # shadow clf = SVC(kernel='{}'.format(SVM_KERNEL), gamma='scale', verbose=VERBOSE, max_iter = 5000) # print(X_valid) # print(Y_valid) if(REVERSE): clf.fit(Target_X, Target_Y) else: start = time.time() clf.fit(X_valid, Y_valid) end = time.time() print("Time: {}".format(end - start)) # if(defense): # the common approach if(REVERSE): preds = clf.predict(X_valid) acc = np.mean(preds == Y_valid) else: preds = clf.predict(Target_X) acc = np.mean(preds == Target_Y) # print(acc) if(use_dp): preds = clf.predict(protected_target_X) protected_acc = np.mean(preds == Target_Y) elif CLS == 'DANN': # I have no idea whether the 1000 is. DANN_CPT_PATHs = DANN_CPT_PATH + "{}_cracker_{}.cpt".format(key, ARCH) clf = DANN(input_size=EMB_DIM_TABLE[ARCH], maxiter=MAXITER, verbose=VERBOSE, name=key, batch_size=BATCH_SIZE, lambda_adapt=LAMDA, hidden_layer_size=HIDDEN, cached = DANN_CACHED, cpt_path = DANN_CPT_PATHs) # clf.cuda() # set the size of the Target_X trans_D = 0.5 * trans_D concated_Target_X = np.concatenate([Target_X - trans_D, Target_X + trans_D], axis = 0) clf.fit(X, Y, X_adapt=concated_Target_X, X_valid=Target_X - trans_D, Y_valid=Target_Y) Target_X = torch.FloatTensor(Target_X-trans_D) acc = clf.validate(Target_X, Target_Y) # print(acc) if(use_dp): protected_target_X = torch.FloatTensor(protected_target_X).cuda() protected_acc = clf.validate(protected_target_X, Target_Y) # print("Target Domain Inference {} Acc: {:.3f}".format(key, acc)) # return acc elif CLS == 'MLP_SHADOW': clf = NonLinearClassifier(key, EMB_DIM_TABLE[ARCH], HIDDEN_DIM) clf.cuda() clf.fit(X_valid, Y_valid) acc = clf._evaluate(Target_X, Target_Y) else: clf = None print('wrong cls\' name') return acc, protected_acc, util_acc, protected_util_acc # # predict on Target_X # acc = clf._evaluate(Target_X, Target_Y) # # results = np.zeros((2, 2)) # # count = 0 # # for i, sent in enumerate(list(Target_sents)): # # pred_ = int(clf.predict([Target_X[i]])[0]) # # truth_ = int(key in sent) # # results[pred_][truth_] += 1 # # count += 1 # # results /= (count * 1.0) # # acc = results[0][0] + results[1][1] # print("Target Domain Inference {} Acc: {:.3f} Protected: {:.4f}".format(key, acc, protected_acc)) # return acc if __name__ == '__main__': DELTA_TABLE = { "bert": 81.82, 'gpt' : 73.19, 'gpt2': 110.2, 'xl': 17.09, 'xlnet': 601.5, 'xlm': 219.4, 'roberta': 4.15, 'ernie': 28.20 } print(FUNCTION) if(FUNCTION == 'atk'): # DS_prepare() # EX_DS_prepare() # init a defense to test Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] # data_embedding() _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, _, _ = ATTACK(key, use_dp = False, defense = _def, size = ARGS.ext_size) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc]) print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(key,TA, protected_acc)) print('Keyword Attacker {} on {} Embeddings'.format(CLS, ARCH)) for KT in Target_Acc_list: print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f}'.format(KT[0], KT[1], KT[2])) print('Target_Acc_Top1_Average: {:.4f} Protected Target_Acc_Average: {:.4f}'.format(Target_Acc_sum / len(cls_names), protected_avg_acc / len(cls_names))) elif(FUNCTION == 'util'): compute_utility() elif(FUNCTION == 'def'): DEFENSE = 'rounding' print('Keyword Attacker {} /Defense {} on {} Embeddings'.format(CLS, DEFENSE, ARCH)) defenses = [] if(DEFENSE == 'rounding'): for i in range(10): defenses.append((i, "rounding to {} decimals".format(i), initialize_defense('rounding', decimals = i))) elif(DEFENSE == 'dp'): eps_list = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000] for eps in eps_list: defenses.append((eps, "laplace with eps {}".format(eps), initialize_defense("dp", delta = DELTA_TABLE[ARCH], eps = eps))) else: eps_list = [0.001, 0.005, 0.01, 0.1, 0.5, 1.0] for eps in eps_list: defenses.append((eps, "minmax with eps {}".format(eps), initialize_defense("minmax", cls_num = 10, eps = eps))) RESULTS = list() for defense in defenses: param, descript, _def = defense Source_Acc_sum = 0 Target_Acc_sum = 0 Target_Acc_list = [] print("Evaluate {} with Defense {}".format(ARCH, descript)) # data_embedding() # _def = initialize_defense('rounding', decimals = 0) protected_avg_acc = 0.0 for key in cls_names: TA, protected_acc, util, protected_util = ATTACK(key, use_dp = True, defense = _def) Target_Acc_sum += TA protected_avg_acc += protected_acc Target_Acc_list.append([key, TA, protected_acc, util, protected_util]) # for KT in Target_Acc_list: # print('INFER {} ACC: {:.4f} Protected Acc.: {:.4f} Util: {:.4f} Protected Util: {:.4f}'.format(KT[0], KT[1], KT[2], KT[3], KT[4])) RESULTS.append([(param, Target_Acc_list)]) print("ARCH: {} \n RESULTS: {}".format(ARCH, RESULTS)) elif(FUNCTION == 'prepare'): data_embedding()
print("TRAINING SET SIZE: {}".format(len(Y))) print("EMBEDDINGS FROM TARGET DOMAIN: {}".format(len(Target_Y))) print("TEST SET SIZE: {}".format(len(Y_valid))) # learn a transfer print("TESTING MODEL: {}".format(CLS))
conditional_block
models.rs
/// How our data look? // Main logs contains when bot started to run, what is total log amount // Keywords logs contains indivitual keyword with their own logs use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::RwLock; type Account = String; type KeywordId = u64; type KeywordStats = HashMap<KeywordId, KeywordStatistics>; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Statistics { pub main_stats: MainStats, pub keyword_stats: KeywordStats, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct KeywordStatistics { pub stats: KeywordStat, pub keyword_logs: Vec<Log>, } impl Statistics { pub fn new(account: Account) -> Self { Statistics { main_stats: MainStats::new(account), keyword_stats: HashMap::new(), } } } pub type Db = Arc<RwLock<HashMap<Account, Statistics>>>; pub fn blank_db() -> Db { Arc::new(RwLock::new(HashMap::new())) } // Stats is top level statistics // It contains inner individual keyword statistics // However every log related to keyword goes into keyword_db #[derive(Debug, Deserialize, Serialize, Clone)] pub struct MainStats { // Name of Account pub account_name: String, // Total error counts. It is keyword error + other errors pub error_counts: u64, // Total number of logs . It is main log + all log from keywords pub log_counts: u64, // Currently unused pub running: bool, // Total api calls made by other bot . pub no_api_calls: u64, // API calls used for this bot pub no_internal_api_calls: u64, // When the other bot was started? pub started_at: String, // when the bot was last updated. When new logs , keywoord logs come this field must be updated pub last_updated_at: String, // Logs are cleared out and only top 100 logs are placed if program memory goes beyond // 1G pub logs: Vec<Log>, } impl MainStats { pub fn new(account_name: Account) -> Self { MainStats { account_name, error_counts: 0, running: false, no_api_calls: 0, log_counts: 0, no_internal_api_calls: 0, started_at: crate::helpers::current_time_string(), last_updated_at: crate::helpers::current_time_string(), logs: Vec::new(), } } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct UpdateStat { pub error_counts: Option<u64>, pub running: Option<bool>, // How many api calls were made since last updated pub no_of_api_call_diff: Option<u64>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Log { pub r#type: String, pub time: String, pub message: String, pub meta: Option<Value>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct KeywordStat { pub id: u64, pub last_updated_at: String, pub error_counts: u64, pub log_counts: u64, pub name: Option<String>, pub keyword: Option<String>, pub placement: Option<u64>, pub running: Option<bool>, pub ads_running: Option<bool>, pub ads_position: Option<u64>, pub current_price: Option<f64>, pub is_max_price_reached: Option<bool>, pub is_min_price_reached: Option<bool>, pub max_expense_reached: Option<bool>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct UpdateKeywordStat { pub id: u64, pub name: Option<String>, pub current_price: Option<f64>, pub keyword: Option<String>, pub placement: Option<u64>, pub running: Option<bool>, pub error_counts: Option<u64>, pub ads_running: Option<bool>, pub ads_position: Option<u64>, pub logs: Option<Vec<Log>>, pub is_max_price_reached: Option<bool>, pub is_min_price_reached: Option<bool>, pub max_expense_reached: Option<bool>, } impl KeywordStatistics { pub fn update(stats: &mut Statistics, input: &UpdateKeywordStat) { let main_stats = &mut stats.main_stats; main_stats.last_updated_at = crate::helpers::current_time_string(); let keyword_stats = &mut stats.keyword_stats; if let Some(ks) = keyword_stats.get_mut(&input.id) { ks.stats.last_updated_at = crate::helpers::current_time_string(); if let Some(ru) = input.running { ks.stats.running = Some(ru); } // Todo use if let some feeling lazy atm. if input.ads_running.is_some() { ks.stats.ads_running = input.ads_running } if input.is_max_price_reached.is_some() { ks.stats.is_max_price_reached = input.is_max_price_reached } if input.is_min_price_reached.is_some() { ks.stats.is_min_price_reached = input.is_min_price_reached } if input.ads_position.is_some() { ks.stats.ads_position = input.ads_position } if input.max_expense_reached.is_some() { ks.stats.max_expense_reached = input.max_expense_reached } if let Some(cp) = input.current_price { ks.stats.current_price = Some(cp); } } else { let keyword_statistics = KeywordStatistics { stats: KeywordStat { id: input.id, error_counts: 0, log_counts: 0, name: input.name.to_owned(), keyword: input.keyword.to_owned(), placement: input.placement, last_updated_at: crate::helpers::current_time_string(), running: input.running, ads_running: input.ads_running, ads_position: input.ads_position, current_price: input.current_price, is_max_price_reached: None, is_min_price_reached: None, max_expense_reached: None, }, keyword_logs: Vec::with_capacity(1000), }; keyword_stats.insert(input.id, keyword_statistics); } } pub fn add_logs(stats: &mut Statistics, id: KeywordId, input: Log) { let main_stats = &mut stats.main_stats; let keyword_stats = &mut stats.keyword_stats; if let Some(ks) = keyword_stats.get_mut(&id) { main_stats.last_updated_at = crate::helpers::current_time_string(); if input.r#type == "error" { main_stats.error_counts += 1; ks.stats.error_counts += 1; } main_stats.log_counts += 1; ks.stats.log_counts += 1; ks.keyword_logs.push(input); } } } #[derive(Debug, Deserialize, Serialize, Clone)] struct BackupStatistics { stats: MainStats, keyword: HashMap<KeywordId, Vec<Log>>, } // // We might want to reanalyze previous record for that we are providing ability to // // use old database. // pub async fn load_old_database() -> Option<Statistics> { // let aa = std::env::var("JSON_FILE_PATH"); // if aa.is_ok() { // let ff = std::fs::File::open(aa.unwrap()).unwrap(); // let json: BackupStatistics = serde_json::from_reader(ff).unwrap(); // let stats = json.stats; // let keyword = json.keyword; // let account = stats.account_name.to_owned(); // let mut stats_hm = HashMap::new(); // stats_hm.insert(account.clone(), stats); // let mut keywords_hm = HashMap::new(); // keywords_hm.insert(account, keyword); // let arc_stats = Arc::new(Mutex::new(stats_hm)); // let arc_keywords = Arc::new(Mutex::new(keywords_hm)); // return Some(Statistics { // stats: arc_stats, // keyword_stats: arc_keywords, // }); // } // None // } pub async fn clear_database_periodically(db: Db) { loop { println!("Waiting 6 hour to clear DB!"); use tokio::time::Duration; tokio::time::delay_for(Duration::from_secs(6 * 60 * 60)).await; println!("Clearing Old Records!"); // As database keeeps growing we must keep memory usage in track // For this we will check how much process is using memory // if its greator than zero we will clear it let mut lock = db.write().await; let vv = lock.values_mut(); for statistics in vv { clear_db(statistics, 100).await } } } pub async fn clear_db(statistics: &mut Statistics, count: usize) { // use std::borrow::Cow; // #[derive(Debug, Deserialize, Serialize, Clone)] // struct Backup<'a> { // stats: Cow<'a, Statistics>, // }; // { // let content = serde_json::to_string_pretty(&Backup { // stats: Cow::Borrowed(&*statistics), // }) // .unwrap(); // let path = crate::helpers::sanitize( // &("".to_owned() // + &statistics.main_stats.account_name // + "_" // + &crate::helpers::current_time_string()), // ) + ".json"; // let mut new_file = File::create(path).unwrap(); // new_file.write_all(&content.into_bytes()).unwrap(); // }
// println!("Backup done"); let mut no_of_main_log_cleared = 0; { if count == 0 { let ms = &mut statistics.main_stats; ms.error_counts = 0; ms.log_counts = 0; ms.no_api_calls = 0; ms.no_internal_api_calls = 0; } let main_logs_len = statistics.main_stats.logs.len(); if main_logs_len > count { // [1,2,3,4,5,6,7] to keep 2 elem drain 0..(7-2) statistics.main_stats.logs.drain(0..(main_logs_len - count)); no_of_main_log_cleared += main_logs_len - count; } } println!("Main Lang Cleared"); let mut no_of_keyword_drained = 0; { let keyword_stats_hashmap = statistics.keyword_stats.values_mut(); for kstat in keyword_stats_hashmap { if count == 0 { let ss = &mut kstat.stats; ss.error_counts = 0; ss.log_counts = 0; ss.last_updated_at = crate::helpers::current_time_string(); } let log_len = kstat.keyword_logs.len(); if log_len > count { kstat.keyword_logs.drain(0..(log_len - count)); no_of_keyword_drained += log_len - count; } } } println!( "Keyword Static Cleared \n No of log cleared {} \n No of mail log cleared {}", no_of_keyword_drained, no_of_main_log_cleared ); }
random_line_split
models.rs
/// How our data look? // Main logs contains when bot started to run, what is total log amount // Keywords logs contains indivitual keyword with their own logs use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::RwLock; type Account = String; type KeywordId = u64; type KeywordStats = HashMap<KeywordId, KeywordStatistics>; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Statistics { pub main_stats: MainStats, pub keyword_stats: KeywordStats, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct KeywordStatistics { pub stats: KeywordStat, pub keyword_logs: Vec<Log>, } impl Statistics { pub fn new(account: Account) -> Self { Statistics { main_stats: MainStats::new(account), keyword_stats: HashMap::new(), } } } pub type Db = Arc<RwLock<HashMap<Account, Statistics>>>; pub fn blank_db() -> Db { Arc::new(RwLock::new(HashMap::new())) } // Stats is top level statistics // It contains inner individual keyword statistics // However every log related to keyword goes into keyword_db #[derive(Debug, Deserialize, Serialize, Clone)] pub struct MainStats { // Name of Account pub account_name: String, // Total error counts. It is keyword error + other errors pub error_counts: u64, // Total number of logs . It is main log + all log from keywords pub log_counts: u64, // Currently unused pub running: bool, // Total api calls made by other bot . pub no_api_calls: u64, // API calls used for this bot pub no_internal_api_calls: u64, // When the other bot was started? pub started_at: String, // when the bot was last updated. When new logs , keywoord logs come this field must be updated pub last_updated_at: String, // Logs are cleared out and only top 100 logs are placed if program memory goes beyond // 1G pub logs: Vec<Log>, } impl MainStats { pub fn new(account_name: Account) -> Self { MainStats { account_name, error_counts: 0, running: false, no_api_calls: 0, log_counts: 0, no_internal_api_calls: 0, started_at: crate::helpers::current_time_string(), last_updated_at: crate::helpers::current_time_string(), logs: Vec::new(), } } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct UpdateStat { pub error_counts: Option<u64>, pub running: Option<bool>, // How many api calls were made since last updated pub no_of_api_call_diff: Option<u64>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Log { pub r#type: String, pub time: String, pub message: String, pub meta: Option<Value>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct KeywordStat { pub id: u64, pub last_updated_at: String, pub error_counts: u64, pub log_counts: u64, pub name: Option<String>, pub keyword: Option<String>, pub placement: Option<u64>, pub running: Option<bool>, pub ads_running: Option<bool>, pub ads_position: Option<u64>, pub current_price: Option<f64>, pub is_max_price_reached: Option<bool>, pub is_min_price_reached: Option<bool>, pub max_expense_reached: Option<bool>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct UpdateKeywordStat { pub id: u64, pub name: Option<String>, pub current_price: Option<f64>, pub keyword: Option<String>, pub placement: Option<u64>, pub running: Option<bool>, pub error_counts: Option<u64>, pub ads_running: Option<bool>, pub ads_position: Option<u64>, pub logs: Option<Vec<Log>>, pub is_max_price_reached: Option<bool>, pub is_min_price_reached: Option<bool>, pub max_expense_reached: Option<bool>, } impl KeywordStatistics { pub fn update(stats: &mut Statistics, input: &UpdateKeywordStat) { let main_stats = &mut stats.main_stats; main_stats.last_updated_at = crate::helpers::current_time_string(); let keyword_stats = &mut stats.keyword_stats; if let Some(ks) = keyword_stats.get_mut(&input.id) { ks.stats.last_updated_at = crate::helpers::current_time_string(); if let Some(ru) = input.running { ks.stats.running = Some(ru); } // Todo use if let some feeling lazy atm. if input.ads_running.is_some() { ks.stats.ads_running = input.ads_running } if input.is_max_price_reached.is_some() { ks.stats.is_max_price_reached = input.is_max_price_reached } if input.is_min_price_reached.is_some() { ks.stats.is_min_price_reached = input.is_min_price_reached } if input.ads_position.is_some() { ks.stats.ads_position = input.ads_position } if input.max_expense_reached.is_some() { ks.stats.max_expense_reached = input.max_expense_reached } if let Some(cp) = input.current_price { ks.stats.current_price = Some(cp); } } else { let keyword_statistics = KeywordStatistics { stats: KeywordStat { id: input.id, error_counts: 0, log_counts: 0, name: input.name.to_owned(), keyword: input.keyword.to_owned(), placement: input.placement, last_updated_at: crate::helpers::current_time_string(), running: input.running, ads_running: input.ads_running, ads_position: input.ads_position, current_price: input.current_price, is_max_price_reached: None, is_min_price_reached: None, max_expense_reached: None, }, keyword_logs: Vec::with_capacity(1000), }; keyword_stats.insert(input.id, keyword_statistics); } } pub fn add_logs(stats: &mut Statistics, id: KeywordId, input: Log) { let main_stats = &mut stats.main_stats; let keyword_stats = &mut stats.keyword_stats; if let Some(ks) = keyword_stats.get_mut(&id)
} } #[derive(Debug, Deserialize, Serialize, Clone)] struct BackupStatistics { stats: MainStats, keyword: HashMap<KeywordId, Vec<Log>>, } // // We might want to reanalyze previous record for that we are providing ability to // // use old database. // pub async fn load_old_database() -> Option<Statistics> { // let aa = std::env::var("JSON_FILE_PATH"); // if aa.is_ok() { // let ff = std::fs::File::open(aa.unwrap()).unwrap(); // let json: BackupStatistics = serde_json::from_reader(ff).unwrap(); // let stats = json.stats; // let keyword = json.keyword; // let account = stats.account_name.to_owned(); // let mut stats_hm = HashMap::new(); // stats_hm.insert(account.clone(), stats); // let mut keywords_hm = HashMap::new(); // keywords_hm.insert(account, keyword); // let arc_stats = Arc::new(Mutex::new(stats_hm)); // let arc_keywords = Arc::new(Mutex::new(keywords_hm)); // return Some(Statistics { // stats: arc_stats, // keyword_stats: arc_keywords, // }); // } // None // } pub async fn clear_database_periodically(db: Db) { loop { println!("Waiting 6 hour to clear DB!"); use tokio::time::Duration; tokio::time::delay_for(Duration::from_secs(6 * 60 * 60)).await; println!("Clearing Old Records!"); // As database keeeps growing we must keep memory usage in track // For this we will check how much process is using memory // if its greator than zero we will clear it let mut lock = db.write().await; let vv = lock.values_mut(); for statistics in vv { clear_db(statistics, 100).await } } } pub async fn clear_db(statistics: &mut Statistics, count: usize) { // use std::borrow::Cow; // #[derive(Debug, Deserialize, Serialize, Clone)] // struct Backup<'a> { // stats: Cow<'a, Statistics>, // }; // { // let content = serde_json::to_string_pretty(&Backup { // stats: Cow::Borrowed(&*statistics), // }) // .unwrap(); // let path = crate::helpers::sanitize( // &("".to_owned() // + &statistics.main_stats.account_name // + "_" // + &crate::helpers::current_time_string()), // ) + ".json"; // let mut new_file = File::create(path).unwrap(); // new_file.write_all(&content.into_bytes()).unwrap(); // } // println!("Backup done"); let mut no_of_main_log_cleared = 0; { if count == 0 { let ms = &mut statistics.main_stats; ms.error_counts = 0; ms.log_counts = 0; ms.no_api_calls = 0; ms.no_internal_api_calls = 0; } let main_logs_len = statistics.main_stats.logs.len(); if main_logs_len > count { // [1,2,3,4,5,6,7] to keep 2 elem drain 0..(7-2) statistics.main_stats.logs.drain(0..(main_logs_len - count)); no_of_main_log_cleared += main_logs_len - count; } } println!("Main Lang Cleared"); let mut no_of_keyword_drained = 0; { let keyword_stats_hashmap = statistics.keyword_stats.values_mut(); for kstat in keyword_stats_hashmap { if count == 0 { let ss = &mut kstat.stats; ss.error_counts = 0; ss.log_counts = 0; ss.last_updated_at = crate::helpers::current_time_string(); } let log_len = kstat.keyword_logs.len(); if log_len > count { kstat.keyword_logs.drain(0..(log_len - count)); no_of_keyword_drained += log_len - count; } } } println!( "Keyword Static Cleared \n No of log cleared {} \n No of mail log cleared {}", no_of_keyword_drained, no_of_main_log_cleared ); }
{ main_stats.last_updated_at = crate::helpers::current_time_string(); if input.r#type == "error" { main_stats.error_counts += 1; ks.stats.error_counts += 1; } main_stats.log_counts += 1; ks.stats.log_counts += 1; ks.keyword_logs.push(input); }
conditional_block
models.rs
/// How our data look? // Main logs contains when bot started to run, what is total log amount // Keywords logs contains indivitual keyword with their own logs use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::RwLock; type Account = String; type KeywordId = u64; type KeywordStats = HashMap<KeywordId, KeywordStatistics>; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Statistics { pub main_stats: MainStats, pub keyword_stats: KeywordStats, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct KeywordStatistics { pub stats: KeywordStat, pub keyword_logs: Vec<Log>, } impl Statistics { pub fn new(account: Account) -> Self { Statistics { main_stats: MainStats::new(account), keyword_stats: HashMap::new(), } } } pub type Db = Arc<RwLock<HashMap<Account, Statistics>>>; pub fn blank_db() -> Db { Arc::new(RwLock::new(HashMap::new())) } // Stats is top level statistics // It contains inner individual keyword statistics // However every log related to keyword goes into keyword_db #[derive(Debug, Deserialize, Serialize, Clone)] pub struct MainStats { // Name of Account pub account_name: String, // Total error counts. It is keyword error + other errors pub error_counts: u64, // Total number of logs . It is main log + all log from keywords pub log_counts: u64, // Currently unused pub running: bool, // Total api calls made by other bot . pub no_api_calls: u64, // API calls used for this bot pub no_internal_api_calls: u64, // When the other bot was started? pub started_at: String, // when the bot was last updated. When new logs , keywoord logs come this field must be updated pub last_updated_at: String, // Logs are cleared out and only top 100 logs are placed if program memory goes beyond // 1G pub logs: Vec<Log>, } impl MainStats { pub fn new(account_name: Account) -> Self { MainStats { account_name, error_counts: 0, running: false, no_api_calls: 0, log_counts: 0, no_internal_api_calls: 0, started_at: crate::helpers::current_time_string(), last_updated_at: crate::helpers::current_time_string(), logs: Vec::new(), } } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct UpdateStat { pub error_counts: Option<u64>, pub running: Option<bool>, // How many api calls were made since last updated pub no_of_api_call_diff: Option<u64>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Log { pub r#type: String, pub time: String, pub message: String, pub meta: Option<Value>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct
{ pub id: u64, pub last_updated_at: String, pub error_counts: u64, pub log_counts: u64, pub name: Option<String>, pub keyword: Option<String>, pub placement: Option<u64>, pub running: Option<bool>, pub ads_running: Option<bool>, pub ads_position: Option<u64>, pub current_price: Option<f64>, pub is_max_price_reached: Option<bool>, pub is_min_price_reached: Option<bool>, pub max_expense_reached: Option<bool>, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct UpdateKeywordStat { pub id: u64, pub name: Option<String>, pub current_price: Option<f64>, pub keyword: Option<String>, pub placement: Option<u64>, pub running: Option<bool>, pub error_counts: Option<u64>, pub ads_running: Option<bool>, pub ads_position: Option<u64>, pub logs: Option<Vec<Log>>, pub is_max_price_reached: Option<bool>, pub is_min_price_reached: Option<bool>, pub max_expense_reached: Option<bool>, } impl KeywordStatistics { pub fn update(stats: &mut Statistics, input: &UpdateKeywordStat) { let main_stats = &mut stats.main_stats; main_stats.last_updated_at = crate::helpers::current_time_string(); let keyword_stats = &mut stats.keyword_stats; if let Some(ks) = keyword_stats.get_mut(&input.id) { ks.stats.last_updated_at = crate::helpers::current_time_string(); if let Some(ru) = input.running { ks.stats.running = Some(ru); } // Todo use if let some feeling lazy atm. if input.ads_running.is_some() { ks.stats.ads_running = input.ads_running } if input.is_max_price_reached.is_some() { ks.stats.is_max_price_reached = input.is_max_price_reached } if input.is_min_price_reached.is_some() { ks.stats.is_min_price_reached = input.is_min_price_reached } if input.ads_position.is_some() { ks.stats.ads_position = input.ads_position } if input.max_expense_reached.is_some() { ks.stats.max_expense_reached = input.max_expense_reached } if let Some(cp) = input.current_price { ks.stats.current_price = Some(cp); } } else { let keyword_statistics = KeywordStatistics { stats: KeywordStat { id: input.id, error_counts: 0, log_counts: 0, name: input.name.to_owned(), keyword: input.keyword.to_owned(), placement: input.placement, last_updated_at: crate::helpers::current_time_string(), running: input.running, ads_running: input.ads_running, ads_position: input.ads_position, current_price: input.current_price, is_max_price_reached: None, is_min_price_reached: None, max_expense_reached: None, }, keyword_logs: Vec::with_capacity(1000), }; keyword_stats.insert(input.id, keyword_statistics); } } pub fn add_logs(stats: &mut Statistics, id: KeywordId, input: Log) { let main_stats = &mut stats.main_stats; let keyword_stats = &mut stats.keyword_stats; if let Some(ks) = keyword_stats.get_mut(&id) { main_stats.last_updated_at = crate::helpers::current_time_string(); if input.r#type == "error" { main_stats.error_counts += 1; ks.stats.error_counts += 1; } main_stats.log_counts += 1; ks.stats.log_counts += 1; ks.keyword_logs.push(input); } } } #[derive(Debug, Deserialize, Serialize, Clone)] struct BackupStatistics { stats: MainStats, keyword: HashMap<KeywordId, Vec<Log>>, } // // We might want to reanalyze previous record for that we are providing ability to // // use old database. // pub async fn load_old_database() -> Option<Statistics> { // let aa = std::env::var("JSON_FILE_PATH"); // if aa.is_ok() { // let ff = std::fs::File::open(aa.unwrap()).unwrap(); // let json: BackupStatistics = serde_json::from_reader(ff).unwrap(); // let stats = json.stats; // let keyword = json.keyword; // let account = stats.account_name.to_owned(); // let mut stats_hm = HashMap::new(); // stats_hm.insert(account.clone(), stats); // let mut keywords_hm = HashMap::new(); // keywords_hm.insert(account, keyword); // let arc_stats = Arc::new(Mutex::new(stats_hm)); // let arc_keywords = Arc::new(Mutex::new(keywords_hm)); // return Some(Statistics { // stats: arc_stats, // keyword_stats: arc_keywords, // }); // } // None // } pub async fn clear_database_periodically(db: Db) { loop { println!("Waiting 6 hour to clear DB!"); use tokio::time::Duration; tokio::time::delay_for(Duration::from_secs(6 * 60 * 60)).await; println!("Clearing Old Records!"); // As database keeeps growing we must keep memory usage in track // For this we will check how much process is using memory // if its greator than zero we will clear it let mut lock = db.write().await; let vv = lock.values_mut(); for statistics in vv { clear_db(statistics, 100).await } } } pub async fn clear_db(statistics: &mut Statistics, count: usize) { // use std::borrow::Cow; // #[derive(Debug, Deserialize, Serialize, Clone)] // struct Backup<'a> { // stats: Cow<'a, Statistics>, // }; // { // let content = serde_json::to_string_pretty(&Backup { // stats: Cow::Borrowed(&*statistics), // }) // .unwrap(); // let path = crate::helpers::sanitize( // &("".to_owned() // + &statistics.main_stats.account_name // + "_" // + &crate::helpers::current_time_string()), // ) + ".json"; // let mut new_file = File::create(path).unwrap(); // new_file.write_all(&content.into_bytes()).unwrap(); // } // println!("Backup done"); let mut no_of_main_log_cleared = 0; { if count == 0 { let ms = &mut statistics.main_stats; ms.error_counts = 0; ms.log_counts = 0; ms.no_api_calls = 0; ms.no_internal_api_calls = 0; } let main_logs_len = statistics.main_stats.logs.len(); if main_logs_len > count { // [1,2,3,4,5,6,7] to keep 2 elem drain 0..(7-2) statistics.main_stats.logs.drain(0..(main_logs_len - count)); no_of_main_log_cleared += main_logs_len - count; } } println!("Main Lang Cleared"); let mut no_of_keyword_drained = 0; { let keyword_stats_hashmap = statistics.keyword_stats.values_mut(); for kstat in keyword_stats_hashmap { if count == 0 { let ss = &mut kstat.stats; ss.error_counts = 0; ss.log_counts = 0; ss.last_updated_at = crate::helpers::current_time_string(); } let log_len = kstat.keyword_logs.len(); if log_len > count { kstat.keyword_logs.drain(0..(log_len - count)); no_of_keyword_drained += log_len - count; } } } println!( "Keyword Static Cleared \n No of log cleared {} \n No of mail log cleared {}", no_of_keyword_drained, no_of_main_log_cleared ); }
KeywordStat
identifier_name
tsviz.py
#!/usr/bin/python3 # # tsviz # # a command-line utility to help visualize TypeScript class-dependencies and # graphs. # from argparse import ArgumentParser import re import os debug_output = False solution_path = "." allow_loose_module_match = False module_import_declaration = re.compile("import .* from [\"'](.*)[\"'];.*") module_require_declaration = re.compile(".*require\([\"'](.*)[\"']\).*") extension = ".ts" def debug(txt): global debug_output if debug_output: print(txt) def get_unix_path(file): return file.replace("\\", "/") def get_directory(file): unix_file = get_unix_path(file) return os.path.split(unix_file)[0] def set_working_basedir(root_dir): global solution_path solution_path = get_directory(get_unix_path(root_dir)) debug("Base-solution dir set to {0}".format(solution_path)) class Module(object): def __init__(self, filename): self.name = self.get_name_from_filename(filename) self.filename = os.path.abspath(filename) self.dependant_module_names = [] # dependant modules, as declared in file. # not subject to transitive dependency-elimination. self.declared_dependant_modules = [] # dependant modules as visualized in the graph, based on self.declared_dependant_modules. # subject to transitive dependency-elimination. self.dependant_modules = [] self.missing_module_names = [] self.has_missing_modules = False self.is_missing_module = False self.highlight = False self.highlighted_dependents = False self.has_circular_dependencies = False self.circular_dependencies = [] def get_name_from_filename(self, filename): if filename.find("/") == -1: return filename elif len(solution_path) == 0: return filename elif solution_path == ".": return filename else: return filename[len(solution_path)+1::] def get_friendly_id(self): return self.name.replace(".", "_").replace("-", "_").replace("/", "_") def add_dependency(self, module_name): global extension if module_name.find("/") == -1 or module_name.endswith(".json"): # node module. no need to adjust debug("Info: resolved npm-module or JSON data-file {0}.".format(module_name)) elif not module_name.endswith(extension): module_name += extension filename = module_name if filename not in self.dependant_module_names: # print("{0}: Adding to dependency: {1}".format(self.name, filename)) self.dependant_module_names.append(filename) def get_module_references(self, lines): imports = [] for line in lines: if line.startswith("import "): imports.append(line) if line.find("require("): imports.append(line) return imports
if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) match = module_require_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) return result def get_module_path(self, module): if module.find("/") != -1: return os.path.abspath(os.path.join(os.path.dirname(self.filename), module)) else: return module def get_declared_module_dependencies(self): lines = get_lines_from_file(self.filename) import_lines = self.get_module_references(lines) imports = self.get_module_imports(import_lines) return imports def apply_declared_module_dependencies(self): imports = self.get_declared_module_dependencies() for item in imports: self.add_dependency(item) def resolve_modules_from_names(self, modules): global allow_loose_module_match for name in self.dependant_module_names: module = get_module_by_filename(name, modules) if module is None and allow_loose_module_match: module = get_module_by_loose_name(name, modules) # check if we still haven't matched up! if module is None: print("ERROR! Failed to resolve dependency {0} in module {1}!".format(name, self.name)) # track missing deps consistently missing_module_id = name.replace("-", "") module = Module(missing_module_id) module.is_missing_module = True modules.append(module) if module.is_missing_module: self.has_missing_modules = True self.missing_module_names.append(module.name) self.dependant_modules.append(module) self.declared_dependant_modules = self.dependant_modules def remove_transitive_dependencies(self): # if A depends on B & C, and # B also depends on C, then # A has a transitive dependency on C through B. # This is a dependency which can be eliminated to clean up the graph. # clone list to have separate object to work on project_deps = self.dependant_modules[:] # investigate each direct sub-dependency as its own tree for dep in self.dependant_modules: # calculate all dependencies for this one tree nested_deps = dep.get_nested_dependencies() # check if any of those are direct dependencues for nested_dep in nested_deps: # if so, remove them if nested_dep in project_deps: debug("--Project {0}-- Removed transitive dependency: {1} (via {2})".format(self.name, nested_dep.name, dep.name)) project_deps.remove(nested_dep) eliminated_deps = len(self.dependant_modules) - len(project_deps) if eliminated_deps != 0: debug("--Project {0}-- Eliminated {1} transitive dependencies. Was {2}. Reduced to {3}".format(self.name, eliminated_deps, len(self.dependant_modules), len(project_deps))) self.dependant_modules = project_deps def get_nested_dependencies(self): total_deps = [] self.add_nested_dependencies_to(total_deps) return total_deps def add_nested_dependencies_to(self, all_deps): for dep in self.dependant_modules: if dep not in all_deps: all_deps.append(dep) dep.add_nested_dependencies_to(all_deps) def has_highlighted_dependencies(self): allDeps = self.get_nested_dependencies() for dep in allDeps: if dep.highlight: return True return False def has_declared_highlighted_dependencies(self): declaredDeps = self.declared_dependant_modules for dep in declaredDeps: if dep.highlight: return True return False def detect_circular_dependencies(self): all_nested_deps = self.get_nested_dependencies() for dep in all_nested_deps: for subdep in dep.declared_dependant_modules: if subdep == self: print("WARNING: Circular dependency detected! Module {0} and {1} depends on each other!".format(self.name, dep.name)) self.has_circular_dependencies = True self.circular_dependencies.append(dep) def get_module_by_filename(filename, modules): for module in modules: if module.filename == filename: return module return None def get_module_by_loose_name(name, modules): basename = os.path.basename(name).lower() for module in modules: if os.path.basename(module.filename).lower() == basename: return module return None def get_lines_from_file(file): with open(file, 'r', encoding="utf-8") as f: contents = f.read() # detect byte order marker. messes up first line in file. # this first line is often an import! bytes = contents.encode('utf-8') #print(bytes[0:3]) if bytes[0:2] == b'\xef\xff': print("BOM detected!") contents = contents[2:] if bytes[0:2] == b'\xef\xbb': #print("BOM (3-byte) detected!") contents = contents[1:] lines = contents.split("\n") # print(lines[0]) return lines def sort_modules(modules): modules.sort(key=lambda x: x.name) def get_tsfiles_in_dir(root_dir): global extension from fnmatch import fnmatch results = [] for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) # fallback to JS if no typescript if results == []: extension = ".js" for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) return results def get_modules(tsfiles): modules = [] for tsfile in tsfiles: modules.append(Module(tsfile)) return modules def process_modules(modules): # all projects & dependencies should now be known. lets analyze them for module in modules: module.resolve_modules_from_names(modules) # once all modules have resolved their dependencies, we can try to # detect ciruclar dependencies! for module in modules: module.detect_circular_dependencies() # format results in a alphabetical order sort_modules(modules) for module in modules: sort_modules(module.dependant_modules) def remove_transitive_dependencies(projects): for project in projects: project.remove_transitive_dependencies() def filter_modules(rx, projects): result = [] for project in projects: if not rx.match(str.lower(project.filename)): result.append(project) else: debug("Info: Excluding project {0}.".format(project.name)) return result def highlight_modules(rx, projects): for project in projects: if rx.match(str.lower(project.name)): debug("Highlighting project {0}".format(project.name)) project.highlight = True for project in projects: if project.highlight: deps = project.get_nested_dependencies() for dep in deps: dep.highlighted_dependents = True def render_dot_file(projects, highlight_all=False, highlight_children=False): lines = [] lines.append("digraph {") lines.append(" rankdir=\"LR\"") lines.append("") lines.append(" # apply theme") lines.append(" bgcolor=\"#222222\"") lines.append("") lines.append(" // defaults for edges and nodes can be specified") lines.append(" node [ color=\"#ffffff\" fontcolor=\"#ffffff\" ]") lines.append(" edge [ color=\"#ffffff\" ]") lines.append("") lines.append(" # module declarations") # define projects # create nodes like this # A [ label="First Node" shape="circle" ] for project in projects: id = project.get_friendly_id() styling = "" if project.highlight or project.highlighted_dependents: styling = " fillcolor=\"#30c2c2\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.is_missing_module: styling = " fillcolor=\"#f22430\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_missing_modules: styling = " fillcolor=\"#616118\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_circular_dependencies: styling = " fillcolor=\"#ff0000\" style=filled color=\"#000000\" fontcolor=\"#cccc00\"" lines.append(" {0} [ label=\"{1}\" {2} ]".format(id, project.name, styling)) # apply dependencies lines.append("") lines.append(" # project dependencies") for project in projects: proj1_id = project.get_friendly_id() for proj2 in project.dependant_modules: if proj2 is None: print("WARNING: Unable to resolve dependency with ID {0} for project {1}".format(id, project.name)) else: proj2_id = proj2.get_friendly_id() styling = "" if proj2.highlight or ((project.highlight or project.highlighted_dependents) and proj2.highlighted_dependents) or proj2.has_declared_highlighted_dependencies() or (highlight_all and proj2.has_highlighted_dependencies()): styling = " [color=\"#30c2c2\"]" elif proj2.is_missing_module or (project.has_missing_modules and proj2.has_missing_modules): styling = " [color=\"#f22430\"]" elif project.has_circular_dependencies and proj2.has_circular_dependencies: styling = " [color=\"#ff0000\"]" lines.append(" {0} -> {1}{2}".format(proj1_id, proj2_id, styling)) lines.append("") lines.append("}") return "\n".join(lines) def process(root_dir, dot_file, exclude, highlight, highlight_all, highlight_children, keep_deps): set_working_basedir(root_dir) module_files = get_tsfiles_in_dir(root_dir) modules = get_modules(module_files) if exclude: debug("Excluding projects...") excluder = re.compile(str.lower(exclude)) modules = filter_modules(excluder, modules) # pull in dependencies declared in TS-files. # requires real files, so cannot be used in test! for module in modules: module.apply_declared_module_dependencies() process_modules(modules) if not keep_deps: debug("Removing redundant dependencies...") remove_transitive_dependencies(modules) if highlight: debug("Highlighting projects...") highlighter = re.compile(str.lower(highlight)) highlight_modules(highlighter, modules) txt = render_dot_file(modules, highlight_all, highlight_children) with open(dot_file, 'w') as f: f.write(txt) print("Wrote output-file '{0}'.".format(dot_file)) def main(): global debug_output, allow_loose_module_match p = ArgumentParser() p.add_argument("--input", "-i", help="The root directory to analyze.") p.add_argument("--output", "-o", help="The file to write to.") p.add_argument("--loose", "-l", action="store_true", help="Allow loose matching of modules (may be required with path-aliases!)") p.add_argument("--keep-declared-deps", "-k", action="store_true", help="Don't remove redundant, transisitive dependencies in post-processing.") p.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output") p.add_argument("--exclude", "-e", help="Filter modules matching this expression from the graph") p.add_argument("--highlight", help="Highlights modules matching this expression in the graph") p.add_argument("--highlight-all", action="store_true", help="Highlight all paths leading to a highlighted project") p.add_argument("--highlight-children", action="store_true", help="Highlight all child-dependencies of highlighted project") args = p.parse_args() debug_output = args.verbose allow_loose_module_match = args.loose process(args.input, args.output, args.exclude, args.highlight, args.highlight_all, args.highlight_children, args.keep_declared_deps) # don't run from unit-tests if __name__ == "__main__": main()
def get_module_imports(self, imports): result = [] for item in imports: match = module_import_declaration.match(item)
random_line_split
tsviz.py
#!/usr/bin/python3 # # tsviz # # a command-line utility to help visualize TypeScript class-dependencies and # graphs. # from argparse import ArgumentParser import re import os debug_output = False solution_path = "." allow_loose_module_match = False module_import_declaration = re.compile("import .* from [\"'](.*)[\"'];.*") module_require_declaration = re.compile(".*require\([\"'](.*)[\"']\).*") extension = ".ts" def debug(txt): global debug_output if debug_output: print(txt) def get_unix_path(file): return file.replace("\\", "/") def get_directory(file): unix_file = get_unix_path(file) return os.path.split(unix_file)[0] def set_working_basedir(root_dir): global solution_path solution_path = get_directory(get_unix_path(root_dir)) debug("Base-solution dir set to {0}".format(solution_path)) class Module(object): def __init__(self, filename): self.name = self.get_name_from_filename(filename) self.filename = os.path.abspath(filename) self.dependant_module_names = [] # dependant modules, as declared in file. # not subject to transitive dependency-elimination. self.declared_dependant_modules = [] # dependant modules as visualized in the graph, based on self.declared_dependant_modules. # subject to transitive dependency-elimination. self.dependant_modules = [] self.missing_module_names = [] self.has_missing_modules = False self.is_missing_module = False self.highlight = False self.highlighted_dependents = False self.has_circular_dependencies = False self.circular_dependencies = [] def get_name_from_filename(self, filename): if filename.find("/") == -1: return filename elif len(solution_path) == 0: return filename elif solution_path == ".": return filename else: return filename[len(solution_path)+1::] def get_friendly_id(self): return self.name.replace(".", "_").replace("-", "_").replace("/", "_") def add_dependency(self, module_name): global extension if module_name.find("/") == -1 or module_name.endswith(".json"): # node module. no need to adjust debug("Info: resolved npm-module or JSON data-file {0}.".format(module_name)) elif not module_name.endswith(extension): module_name += extension filename = module_name if filename not in self.dependant_module_names: # print("{0}: Adding to dependency: {1}".format(self.name, filename)) self.dependant_module_names.append(filename) def get_module_references(self, lines): imports = [] for line in lines: if line.startswith("import "): imports.append(line) if line.find("require("): imports.append(line) return imports def get_module_imports(self, imports): result = [] for item in imports: match = module_import_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) match = module_require_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) return result def get_module_path(self, module): if module.find("/") != -1: return os.path.abspath(os.path.join(os.path.dirname(self.filename), module)) else: return module def get_declared_module_dependencies(self): lines = get_lines_from_file(self.filename) import_lines = self.get_module_references(lines) imports = self.get_module_imports(import_lines) return imports def apply_declared_module_dependencies(self): imports = self.get_declared_module_dependencies() for item in imports: self.add_dependency(item) def resolve_modules_from_names(self, modules): global allow_loose_module_match for name in self.dependant_module_names: module = get_module_by_filename(name, modules) if module is None and allow_loose_module_match: module = get_module_by_loose_name(name, modules) # check if we still haven't matched up! if module is None: print("ERROR! Failed to resolve dependency {0} in module {1}!".format(name, self.name)) # track missing deps consistently missing_module_id = name.replace("-", "") module = Module(missing_module_id) module.is_missing_module = True modules.append(module) if module.is_missing_module: self.has_missing_modules = True self.missing_module_names.append(module.name) self.dependant_modules.append(module) self.declared_dependant_modules = self.dependant_modules def remove_transitive_dependencies(self): # if A depends on B & C, and # B also depends on C, then # A has a transitive dependency on C through B. # This is a dependency which can be eliminated to clean up the graph. # clone list to have separate object to work on project_deps = self.dependant_modules[:] # investigate each direct sub-dependency as its own tree for dep in self.dependant_modules: # calculate all dependencies for this one tree nested_deps = dep.get_nested_dependencies() # check if any of those are direct dependencues for nested_dep in nested_deps: # if so, remove them if nested_dep in project_deps: debug("--Project {0}-- Removed transitive dependency: {1} (via {2})".format(self.name, nested_dep.name, dep.name)) project_deps.remove(nested_dep) eliminated_deps = len(self.dependant_modules) - len(project_deps) if eliminated_deps != 0: debug("--Project {0}-- Eliminated {1} transitive dependencies. Was {2}. Reduced to {3}".format(self.name, eliminated_deps, len(self.dependant_modules), len(project_deps))) self.dependant_modules = project_deps def get_nested_dependencies(self): total_deps = [] self.add_nested_dependencies_to(total_deps) return total_deps def add_nested_dependencies_to(self, all_deps): for dep in self.dependant_modules: if dep not in all_deps: all_deps.append(dep) dep.add_nested_dependencies_to(all_deps) def has_highlighted_dependencies(self): allDeps = self.get_nested_dependencies() for dep in allDeps: if dep.highlight: return True return False def has_declared_highlighted_dependencies(self): declaredDeps = self.declared_dependant_modules for dep in declaredDeps: if dep.highlight: return True return False def detect_circular_dependencies(self): all_nested_deps = self.get_nested_dependencies() for dep in all_nested_deps: for subdep in dep.declared_dependant_modules: if subdep == self: print("WARNING: Circular dependency detected! Module {0} and {1} depends on each other!".format(self.name, dep.name)) self.has_circular_dependencies = True self.circular_dependencies.append(dep) def get_module_by_filename(filename, modules): for module in modules: if module.filename == filename: return module return None def get_module_by_loose_name(name, modules): basename = os.path.basename(name).lower() for module in modules: if os.path.basename(module.filename).lower() == basename: return module return None def get_lines_from_file(file): with open(file, 'r', encoding="utf-8") as f: contents = f.read() # detect byte order marker. messes up first line in file. # this first line is often an import! bytes = contents.encode('utf-8') #print(bytes[0:3]) if bytes[0:2] == b'\xef\xff': print("BOM detected!") contents = contents[2:] if bytes[0:2] == b'\xef\xbb': #print("BOM (3-byte) detected!") contents = contents[1:] lines = contents.split("\n") # print(lines[0]) return lines def sort_modules(modules): modules.sort(key=lambda x: x.name) def get_tsfiles_in_dir(root_dir): global extension from fnmatch import fnmatch results = [] for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) # fallback to JS if no typescript if results == []: extension = ".js" for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) return results def get_modules(tsfiles): modules = [] for tsfile in tsfiles: modules.append(Module(tsfile)) return modules def process_modules(modules): # all projects & dependencies should now be known. lets analyze them for module in modules: module.resolve_modules_from_names(modules) # once all modules have resolved their dependencies, we can try to # detect ciruclar dependencies! for module in modules: module.detect_circular_dependencies() # format results in a alphabetical order sort_modules(modules) for module in modules: sort_modules(module.dependant_modules) def remove_transitive_dependencies(projects): for project in projects: project.remove_transitive_dependencies() def filter_modules(rx, projects): result = [] for project in projects: if not rx.match(str.lower(project.filename)): result.append(project) else: debug("Info: Excluding project {0}.".format(project.name)) return result def highlight_modules(rx, projects): for project in projects: if rx.match(str.lower(project.name)): debug("Highlighting project {0}".format(project.name)) project.highlight = True for project in projects: if project.highlight: deps = project.get_nested_dependencies() for dep in deps:
def render_dot_file(projects, highlight_all=False, highlight_children=False): lines = [] lines.append("digraph {") lines.append(" rankdir=\"LR\"") lines.append("") lines.append(" # apply theme") lines.append(" bgcolor=\"#222222\"") lines.append("") lines.append(" // defaults for edges and nodes can be specified") lines.append(" node [ color=\"#ffffff\" fontcolor=\"#ffffff\" ]") lines.append(" edge [ color=\"#ffffff\" ]") lines.append("") lines.append(" # module declarations") # define projects # create nodes like this # A [ label="First Node" shape="circle" ] for project in projects: id = project.get_friendly_id() styling = "" if project.highlight or project.highlighted_dependents: styling = " fillcolor=\"#30c2c2\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.is_missing_module: styling = " fillcolor=\"#f22430\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_missing_modules: styling = " fillcolor=\"#616118\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_circular_dependencies: styling = " fillcolor=\"#ff0000\" style=filled color=\"#000000\" fontcolor=\"#cccc00\"" lines.append(" {0} [ label=\"{1}\" {2} ]".format(id, project.name, styling)) # apply dependencies lines.append("") lines.append(" # project dependencies") for project in projects: proj1_id = project.get_friendly_id() for proj2 in project.dependant_modules: if proj2 is None: print("WARNING: Unable to resolve dependency with ID {0} for project {1}".format(id, project.name)) else: proj2_id = proj2.get_friendly_id() styling = "" if proj2.highlight or ((project.highlight or project.highlighted_dependents) and proj2.highlighted_dependents) or proj2.has_declared_highlighted_dependencies() or (highlight_all and proj2.has_highlighted_dependencies()): styling = " [color=\"#30c2c2\"]" elif proj2.is_missing_module or (project.has_missing_modules and proj2.has_missing_modules): styling = " [color=\"#f22430\"]" elif project.has_circular_dependencies and proj2.has_circular_dependencies: styling = " [color=\"#ff0000\"]" lines.append(" {0} -> {1}{2}".format(proj1_id, proj2_id, styling)) lines.append("") lines.append("}") return "\n".join(lines) def process(root_dir, dot_file, exclude, highlight, highlight_all, highlight_children, keep_deps): set_working_basedir(root_dir) module_files = get_tsfiles_in_dir(root_dir) modules = get_modules(module_files) if exclude: debug("Excluding projects...") excluder = re.compile(str.lower(exclude)) modules = filter_modules(excluder, modules) # pull in dependencies declared in TS-files. # requires real files, so cannot be used in test! for module in modules: module.apply_declared_module_dependencies() process_modules(modules) if not keep_deps: debug("Removing redundant dependencies...") remove_transitive_dependencies(modules) if highlight: debug("Highlighting projects...") highlighter = re.compile(str.lower(highlight)) highlight_modules(highlighter, modules) txt = render_dot_file(modules, highlight_all, highlight_children) with open(dot_file, 'w') as f: f.write(txt) print("Wrote output-file '{0}'.".format(dot_file)) def main(): global debug_output, allow_loose_module_match p = ArgumentParser() p.add_argument("--input", "-i", help="The root directory to analyze.") p.add_argument("--output", "-o", help="The file to write to.") p.add_argument("--loose", "-l", action="store_true", help="Allow loose matching of modules (may be required with path-aliases!)") p.add_argument("--keep-declared-deps", "-k", action="store_true", help="Don't remove redundant, transisitive dependencies in post-processing.") p.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output") p.add_argument("--exclude", "-e", help="Filter modules matching this expression from the graph") p.add_argument("--highlight", help="Highlights modules matching this expression in the graph") p.add_argument("--highlight-all", action="store_true", help="Highlight all paths leading to a highlighted project") p.add_argument("--highlight-children", action="store_true", help="Highlight all child-dependencies of highlighted project") args = p.parse_args() debug_output = args.verbose allow_loose_module_match = args.loose process(args.input, args.output, args.exclude, args.highlight, args.highlight_all, args.highlight_children, args.keep_declared_deps) # don't run from unit-tests if __name__ == "__main__": main()
dep.highlighted_dependents = True
conditional_block
tsviz.py
#!/usr/bin/python3 # # tsviz # # a command-line utility to help visualize TypeScript class-dependencies and # graphs. # from argparse import ArgumentParser import re import os debug_output = False solution_path = "." allow_loose_module_match = False module_import_declaration = re.compile("import .* from [\"'](.*)[\"'];.*") module_require_declaration = re.compile(".*require\([\"'](.*)[\"']\).*") extension = ".ts" def debug(txt): global debug_output if debug_output: print(txt) def get_unix_path(file): return file.replace("\\", "/") def get_directory(file): unix_file = get_unix_path(file) return os.path.split(unix_file)[0] def
(root_dir): global solution_path solution_path = get_directory(get_unix_path(root_dir)) debug("Base-solution dir set to {0}".format(solution_path)) class Module(object): def __init__(self, filename): self.name = self.get_name_from_filename(filename) self.filename = os.path.abspath(filename) self.dependant_module_names = [] # dependant modules, as declared in file. # not subject to transitive dependency-elimination. self.declared_dependant_modules = [] # dependant modules as visualized in the graph, based on self.declared_dependant_modules. # subject to transitive dependency-elimination. self.dependant_modules = [] self.missing_module_names = [] self.has_missing_modules = False self.is_missing_module = False self.highlight = False self.highlighted_dependents = False self.has_circular_dependencies = False self.circular_dependencies = [] def get_name_from_filename(self, filename): if filename.find("/") == -1: return filename elif len(solution_path) == 0: return filename elif solution_path == ".": return filename else: return filename[len(solution_path)+1::] def get_friendly_id(self): return self.name.replace(".", "_").replace("-", "_").replace("/", "_") def add_dependency(self, module_name): global extension if module_name.find("/") == -1 or module_name.endswith(".json"): # node module. no need to adjust debug("Info: resolved npm-module or JSON data-file {0}.".format(module_name)) elif not module_name.endswith(extension): module_name += extension filename = module_name if filename not in self.dependant_module_names: # print("{0}: Adding to dependency: {1}".format(self.name, filename)) self.dependant_module_names.append(filename) def get_module_references(self, lines): imports = [] for line in lines: if line.startswith("import "): imports.append(line) if line.find("require("): imports.append(line) return imports def get_module_imports(self, imports): result = [] for item in imports: match = module_import_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) match = module_require_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) return result def get_module_path(self, module): if module.find("/") != -1: return os.path.abspath(os.path.join(os.path.dirname(self.filename), module)) else: return module def get_declared_module_dependencies(self): lines = get_lines_from_file(self.filename) import_lines = self.get_module_references(lines) imports = self.get_module_imports(import_lines) return imports def apply_declared_module_dependencies(self): imports = self.get_declared_module_dependencies() for item in imports: self.add_dependency(item) def resolve_modules_from_names(self, modules): global allow_loose_module_match for name in self.dependant_module_names: module = get_module_by_filename(name, modules) if module is None and allow_loose_module_match: module = get_module_by_loose_name(name, modules) # check if we still haven't matched up! if module is None: print("ERROR! Failed to resolve dependency {0} in module {1}!".format(name, self.name)) # track missing deps consistently missing_module_id = name.replace("-", "") module = Module(missing_module_id) module.is_missing_module = True modules.append(module) if module.is_missing_module: self.has_missing_modules = True self.missing_module_names.append(module.name) self.dependant_modules.append(module) self.declared_dependant_modules = self.dependant_modules def remove_transitive_dependencies(self): # if A depends on B & C, and # B also depends on C, then # A has a transitive dependency on C through B. # This is a dependency which can be eliminated to clean up the graph. # clone list to have separate object to work on project_deps = self.dependant_modules[:] # investigate each direct sub-dependency as its own tree for dep in self.dependant_modules: # calculate all dependencies for this one tree nested_deps = dep.get_nested_dependencies() # check if any of those are direct dependencues for nested_dep in nested_deps: # if so, remove them if nested_dep in project_deps: debug("--Project {0}-- Removed transitive dependency: {1} (via {2})".format(self.name, nested_dep.name, dep.name)) project_deps.remove(nested_dep) eliminated_deps = len(self.dependant_modules) - len(project_deps) if eliminated_deps != 0: debug("--Project {0}-- Eliminated {1} transitive dependencies. Was {2}. Reduced to {3}".format(self.name, eliminated_deps, len(self.dependant_modules), len(project_deps))) self.dependant_modules = project_deps def get_nested_dependencies(self): total_deps = [] self.add_nested_dependencies_to(total_deps) return total_deps def add_nested_dependencies_to(self, all_deps): for dep in self.dependant_modules: if dep not in all_deps: all_deps.append(dep) dep.add_nested_dependencies_to(all_deps) def has_highlighted_dependencies(self): allDeps = self.get_nested_dependencies() for dep in allDeps: if dep.highlight: return True return False def has_declared_highlighted_dependencies(self): declaredDeps = self.declared_dependant_modules for dep in declaredDeps: if dep.highlight: return True return False def detect_circular_dependencies(self): all_nested_deps = self.get_nested_dependencies() for dep in all_nested_deps: for subdep in dep.declared_dependant_modules: if subdep == self: print("WARNING: Circular dependency detected! Module {0} and {1} depends on each other!".format(self.name, dep.name)) self.has_circular_dependencies = True self.circular_dependencies.append(dep) def get_module_by_filename(filename, modules): for module in modules: if module.filename == filename: return module return None def get_module_by_loose_name(name, modules): basename = os.path.basename(name).lower() for module in modules: if os.path.basename(module.filename).lower() == basename: return module return None def get_lines_from_file(file): with open(file, 'r', encoding="utf-8") as f: contents = f.read() # detect byte order marker. messes up first line in file. # this first line is often an import! bytes = contents.encode('utf-8') #print(bytes[0:3]) if bytes[0:2] == b'\xef\xff': print("BOM detected!") contents = contents[2:] if bytes[0:2] == b'\xef\xbb': #print("BOM (3-byte) detected!") contents = contents[1:] lines = contents.split("\n") # print(lines[0]) return lines def sort_modules(modules): modules.sort(key=lambda x: x.name) def get_tsfiles_in_dir(root_dir): global extension from fnmatch import fnmatch results = [] for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) # fallback to JS if no typescript if results == []: extension = ".js" for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) return results def get_modules(tsfiles): modules = [] for tsfile in tsfiles: modules.append(Module(tsfile)) return modules def process_modules(modules): # all projects & dependencies should now be known. lets analyze them for module in modules: module.resolve_modules_from_names(modules) # once all modules have resolved their dependencies, we can try to # detect ciruclar dependencies! for module in modules: module.detect_circular_dependencies() # format results in a alphabetical order sort_modules(modules) for module in modules: sort_modules(module.dependant_modules) def remove_transitive_dependencies(projects): for project in projects: project.remove_transitive_dependencies() def filter_modules(rx, projects): result = [] for project in projects: if not rx.match(str.lower(project.filename)): result.append(project) else: debug("Info: Excluding project {0}.".format(project.name)) return result def highlight_modules(rx, projects): for project in projects: if rx.match(str.lower(project.name)): debug("Highlighting project {0}".format(project.name)) project.highlight = True for project in projects: if project.highlight: deps = project.get_nested_dependencies() for dep in deps: dep.highlighted_dependents = True def render_dot_file(projects, highlight_all=False, highlight_children=False): lines = [] lines.append("digraph {") lines.append(" rankdir=\"LR\"") lines.append("") lines.append(" # apply theme") lines.append(" bgcolor=\"#222222\"") lines.append("") lines.append(" // defaults for edges and nodes can be specified") lines.append(" node [ color=\"#ffffff\" fontcolor=\"#ffffff\" ]") lines.append(" edge [ color=\"#ffffff\" ]") lines.append("") lines.append(" # module declarations") # define projects # create nodes like this # A [ label="First Node" shape="circle" ] for project in projects: id = project.get_friendly_id() styling = "" if project.highlight or project.highlighted_dependents: styling = " fillcolor=\"#30c2c2\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.is_missing_module: styling = " fillcolor=\"#f22430\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_missing_modules: styling = " fillcolor=\"#616118\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_circular_dependencies: styling = " fillcolor=\"#ff0000\" style=filled color=\"#000000\" fontcolor=\"#cccc00\"" lines.append(" {0} [ label=\"{1}\" {2} ]".format(id, project.name, styling)) # apply dependencies lines.append("") lines.append(" # project dependencies") for project in projects: proj1_id = project.get_friendly_id() for proj2 in project.dependant_modules: if proj2 is None: print("WARNING: Unable to resolve dependency with ID {0} for project {1}".format(id, project.name)) else: proj2_id = proj2.get_friendly_id() styling = "" if proj2.highlight or ((project.highlight or project.highlighted_dependents) and proj2.highlighted_dependents) or proj2.has_declared_highlighted_dependencies() or (highlight_all and proj2.has_highlighted_dependencies()): styling = " [color=\"#30c2c2\"]" elif proj2.is_missing_module or (project.has_missing_modules and proj2.has_missing_modules): styling = " [color=\"#f22430\"]" elif project.has_circular_dependencies and proj2.has_circular_dependencies: styling = " [color=\"#ff0000\"]" lines.append(" {0} -> {1}{2}".format(proj1_id, proj2_id, styling)) lines.append("") lines.append("}") return "\n".join(lines) def process(root_dir, dot_file, exclude, highlight, highlight_all, highlight_children, keep_deps): set_working_basedir(root_dir) module_files = get_tsfiles_in_dir(root_dir) modules = get_modules(module_files) if exclude: debug("Excluding projects...") excluder = re.compile(str.lower(exclude)) modules = filter_modules(excluder, modules) # pull in dependencies declared in TS-files. # requires real files, so cannot be used in test! for module in modules: module.apply_declared_module_dependencies() process_modules(modules) if not keep_deps: debug("Removing redundant dependencies...") remove_transitive_dependencies(modules) if highlight: debug("Highlighting projects...") highlighter = re.compile(str.lower(highlight)) highlight_modules(highlighter, modules) txt = render_dot_file(modules, highlight_all, highlight_children) with open(dot_file, 'w') as f: f.write(txt) print("Wrote output-file '{0}'.".format(dot_file)) def main(): global debug_output, allow_loose_module_match p = ArgumentParser() p.add_argument("--input", "-i", help="The root directory to analyze.") p.add_argument("--output", "-o", help="The file to write to.") p.add_argument("--loose", "-l", action="store_true", help="Allow loose matching of modules (may be required with path-aliases!)") p.add_argument("--keep-declared-deps", "-k", action="store_true", help="Don't remove redundant, transisitive dependencies in post-processing.") p.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output") p.add_argument("--exclude", "-e", help="Filter modules matching this expression from the graph") p.add_argument("--highlight", help="Highlights modules matching this expression in the graph") p.add_argument("--highlight-all", action="store_true", help="Highlight all paths leading to a highlighted project") p.add_argument("--highlight-children", action="store_true", help="Highlight all child-dependencies of highlighted project") args = p.parse_args() debug_output = args.verbose allow_loose_module_match = args.loose process(args.input, args.output, args.exclude, args.highlight, args.highlight_all, args.highlight_children, args.keep_declared_deps) # don't run from unit-tests if __name__ == "__main__": main()
set_working_basedir
identifier_name
tsviz.py
#!/usr/bin/python3 # # tsviz # # a command-line utility to help visualize TypeScript class-dependencies and # graphs. # from argparse import ArgumentParser import re import os debug_output = False solution_path = "." allow_loose_module_match = False module_import_declaration = re.compile("import .* from [\"'](.*)[\"'];.*") module_require_declaration = re.compile(".*require\([\"'](.*)[\"']\).*") extension = ".ts" def debug(txt): global debug_output if debug_output: print(txt) def get_unix_path(file): return file.replace("\\", "/") def get_directory(file): unix_file = get_unix_path(file) return os.path.split(unix_file)[0] def set_working_basedir(root_dir): global solution_path solution_path = get_directory(get_unix_path(root_dir)) debug("Base-solution dir set to {0}".format(solution_path)) class Module(object):
def get_module_by_filename(filename, modules): for module in modules: if module.filename == filename: return module return None def get_module_by_loose_name(name, modules): basename = os.path.basename(name).lower() for module in modules: if os.path.basename(module.filename).lower() == basename: return module return None def get_lines_from_file(file): with open(file, 'r', encoding="utf-8") as f: contents = f.read() # detect byte order marker. messes up first line in file. # this first line is often an import! bytes = contents.encode('utf-8') #print(bytes[0:3]) if bytes[0:2] == b'\xef\xff': print("BOM detected!") contents = contents[2:] if bytes[0:2] == b'\xef\xbb': #print("BOM (3-byte) detected!") contents = contents[1:] lines = contents.split("\n") # print(lines[0]) return lines def sort_modules(modules): modules.sort(key=lambda x: x.name) def get_tsfiles_in_dir(root_dir): global extension from fnmatch import fnmatch results = [] for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) # fallback to JS if no typescript if results == []: extension = ".js" for path, subdirs, files in os.walk(root_dir): for name in files: if fnmatch(name, "*" + extension): results.append(os.path.join(path, name)) return results def get_modules(tsfiles): modules = [] for tsfile in tsfiles: modules.append(Module(tsfile)) return modules def process_modules(modules): # all projects & dependencies should now be known. lets analyze them for module in modules: module.resolve_modules_from_names(modules) # once all modules have resolved their dependencies, we can try to # detect ciruclar dependencies! for module in modules: module.detect_circular_dependencies() # format results in a alphabetical order sort_modules(modules) for module in modules: sort_modules(module.dependant_modules) def remove_transitive_dependencies(projects): for project in projects: project.remove_transitive_dependencies() def filter_modules(rx, projects): result = [] for project in projects: if not rx.match(str.lower(project.filename)): result.append(project) else: debug("Info: Excluding project {0}.".format(project.name)) return result def highlight_modules(rx, projects): for project in projects: if rx.match(str.lower(project.name)): debug("Highlighting project {0}".format(project.name)) project.highlight = True for project in projects: if project.highlight: deps = project.get_nested_dependencies() for dep in deps: dep.highlighted_dependents = True def render_dot_file(projects, highlight_all=False, highlight_children=False): lines = [] lines.append("digraph {") lines.append(" rankdir=\"LR\"") lines.append("") lines.append(" # apply theme") lines.append(" bgcolor=\"#222222\"") lines.append("") lines.append(" // defaults for edges and nodes can be specified") lines.append(" node [ color=\"#ffffff\" fontcolor=\"#ffffff\" ]") lines.append(" edge [ color=\"#ffffff\" ]") lines.append("") lines.append(" # module declarations") # define projects # create nodes like this # A [ label="First Node" shape="circle" ] for project in projects: id = project.get_friendly_id() styling = "" if project.highlight or project.highlighted_dependents: styling = " fillcolor=\"#30c2c2\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.is_missing_module: styling = " fillcolor=\"#f22430\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_missing_modules: styling = " fillcolor=\"#616118\" style=filled color=\"#000000\" fontcolor=\"#000000\"" elif project.has_circular_dependencies: styling = " fillcolor=\"#ff0000\" style=filled color=\"#000000\" fontcolor=\"#cccc00\"" lines.append(" {0} [ label=\"{1}\" {2} ]".format(id, project.name, styling)) # apply dependencies lines.append("") lines.append(" # project dependencies") for project in projects: proj1_id = project.get_friendly_id() for proj2 in project.dependant_modules: if proj2 is None: print("WARNING: Unable to resolve dependency with ID {0} for project {1}".format(id, project.name)) else: proj2_id = proj2.get_friendly_id() styling = "" if proj2.highlight or ((project.highlight or project.highlighted_dependents) and proj2.highlighted_dependents) or proj2.has_declared_highlighted_dependencies() or (highlight_all and proj2.has_highlighted_dependencies()): styling = " [color=\"#30c2c2\"]" elif proj2.is_missing_module or (project.has_missing_modules and proj2.has_missing_modules): styling = " [color=\"#f22430\"]" elif project.has_circular_dependencies and proj2.has_circular_dependencies: styling = " [color=\"#ff0000\"]" lines.append(" {0} -> {1}{2}".format(proj1_id, proj2_id, styling)) lines.append("") lines.append("}") return "\n".join(lines) def process(root_dir, dot_file, exclude, highlight, highlight_all, highlight_children, keep_deps): set_working_basedir(root_dir) module_files = get_tsfiles_in_dir(root_dir) modules = get_modules(module_files) if exclude: debug("Excluding projects...") excluder = re.compile(str.lower(exclude)) modules = filter_modules(excluder, modules) # pull in dependencies declared in TS-files. # requires real files, so cannot be used in test! for module in modules: module.apply_declared_module_dependencies() process_modules(modules) if not keep_deps: debug("Removing redundant dependencies...") remove_transitive_dependencies(modules) if highlight: debug("Highlighting projects...") highlighter = re.compile(str.lower(highlight)) highlight_modules(highlighter, modules) txt = render_dot_file(modules, highlight_all, highlight_children) with open(dot_file, 'w') as f: f.write(txt) print("Wrote output-file '{0}'.".format(dot_file)) def main(): global debug_output, allow_loose_module_match p = ArgumentParser() p.add_argument("--input", "-i", help="The root directory to analyze.") p.add_argument("--output", "-o", help="The file to write to.") p.add_argument("--loose", "-l", action="store_true", help="Allow loose matching of modules (may be required with path-aliases!)") p.add_argument("--keep-declared-deps", "-k", action="store_true", help="Don't remove redundant, transisitive dependencies in post-processing.") p.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output") p.add_argument("--exclude", "-e", help="Filter modules matching this expression from the graph") p.add_argument("--highlight", help="Highlights modules matching this expression in the graph") p.add_argument("--highlight-all", action="store_true", help="Highlight all paths leading to a highlighted project") p.add_argument("--highlight-children", action="store_true", help="Highlight all child-dependencies of highlighted project") args = p.parse_args() debug_output = args.verbose allow_loose_module_match = args.loose process(args.input, args.output, args.exclude, args.highlight, args.highlight_all, args.highlight_children, args.keep_declared_deps) # don't run from unit-tests if __name__ == "__main__": main()
def __init__(self, filename): self.name = self.get_name_from_filename(filename) self.filename = os.path.abspath(filename) self.dependant_module_names = [] # dependant modules, as declared in file. # not subject to transitive dependency-elimination. self.declared_dependant_modules = [] # dependant modules as visualized in the graph, based on self.declared_dependant_modules. # subject to transitive dependency-elimination. self.dependant_modules = [] self.missing_module_names = [] self.has_missing_modules = False self.is_missing_module = False self.highlight = False self.highlighted_dependents = False self.has_circular_dependencies = False self.circular_dependencies = [] def get_name_from_filename(self, filename): if filename.find("/") == -1: return filename elif len(solution_path) == 0: return filename elif solution_path == ".": return filename else: return filename[len(solution_path)+1::] def get_friendly_id(self): return self.name.replace(".", "_").replace("-", "_").replace("/", "_") def add_dependency(self, module_name): global extension if module_name.find("/") == -1 or module_name.endswith(".json"): # node module. no need to adjust debug("Info: resolved npm-module or JSON data-file {0}.".format(module_name)) elif not module_name.endswith(extension): module_name += extension filename = module_name if filename not in self.dependant_module_names: # print("{0}: Adding to dependency: {1}".format(self.name, filename)) self.dependant_module_names.append(filename) def get_module_references(self, lines): imports = [] for line in lines: if line.startswith("import "): imports.append(line) if line.find("require("): imports.append(line) return imports def get_module_imports(self, imports): result = [] for item in imports: match = module_import_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) match = module_require_declaration.match(item) if match: module = match.groups()[0] full_module_path = self.get_module_path(module) result.append(full_module_path) return result def get_module_path(self, module): if module.find("/") != -1: return os.path.abspath(os.path.join(os.path.dirname(self.filename), module)) else: return module def get_declared_module_dependencies(self): lines = get_lines_from_file(self.filename) import_lines = self.get_module_references(lines) imports = self.get_module_imports(import_lines) return imports def apply_declared_module_dependencies(self): imports = self.get_declared_module_dependencies() for item in imports: self.add_dependency(item) def resolve_modules_from_names(self, modules): global allow_loose_module_match for name in self.dependant_module_names: module = get_module_by_filename(name, modules) if module is None and allow_loose_module_match: module = get_module_by_loose_name(name, modules) # check if we still haven't matched up! if module is None: print("ERROR! Failed to resolve dependency {0} in module {1}!".format(name, self.name)) # track missing deps consistently missing_module_id = name.replace("-", "") module = Module(missing_module_id) module.is_missing_module = True modules.append(module) if module.is_missing_module: self.has_missing_modules = True self.missing_module_names.append(module.name) self.dependant_modules.append(module) self.declared_dependant_modules = self.dependant_modules def remove_transitive_dependencies(self): # if A depends on B & C, and # B also depends on C, then # A has a transitive dependency on C through B. # This is a dependency which can be eliminated to clean up the graph. # clone list to have separate object to work on project_deps = self.dependant_modules[:] # investigate each direct sub-dependency as its own tree for dep in self.dependant_modules: # calculate all dependencies for this one tree nested_deps = dep.get_nested_dependencies() # check if any of those are direct dependencues for nested_dep in nested_deps: # if so, remove them if nested_dep in project_deps: debug("--Project {0}-- Removed transitive dependency: {1} (via {2})".format(self.name, nested_dep.name, dep.name)) project_deps.remove(nested_dep) eliminated_deps = len(self.dependant_modules) - len(project_deps) if eliminated_deps != 0: debug("--Project {0}-- Eliminated {1} transitive dependencies. Was {2}. Reduced to {3}".format(self.name, eliminated_deps, len(self.dependant_modules), len(project_deps))) self.dependant_modules = project_deps def get_nested_dependencies(self): total_deps = [] self.add_nested_dependencies_to(total_deps) return total_deps def add_nested_dependencies_to(self, all_deps): for dep in self.dependant_modules: if dep not in all_deps: all_deps.append(dep) dep.add_nested_dependencies_to(all_deps) def has_highlighted_dependencies(self): allDeps = self.get_nested_dependencies() for dep in allDeps: if dep.highlight: return True return False def has_declared_highlighted_dependencies(self): declaredDeps = self.declared_dependant_modules for dep in declaredDeps: if dep.highlight: return True return False def detect_circular_dependencies(self): all_nested_deps = self.get_nested_dependencies() for dep in all_nested_deps: for subdep in dep.declared_dependant_modules: if subdep == self: print("WARNING: Circular dependency detected! Module {0} and {1} depends on each other!".format(self.name, dep.name)) self.has_circular_dependencies = True self.circular_dependencies.append(dep)
identifier_body
main.rs
use std::ops::Deref; use std::path::PathBuf; use serde::Deserialize; use structopt::StructOpt; use tmux_interface::{AttachSession, NewSession, NewWindow, SelectWindow, SendKeys, SplitWindow, TmuxInterface}; const ORIGINAL_WINDOW_NAME: &str = "__DEFAULT__"; #[derive(Debug, Deserialize)] struct Setup { file: Option<String>, socket_name: Option<String>, session: Option<Session>, #[serde(rename = "window")] windows: Vec<Window>, rebuild: Option<bool>, } #[derive(Debug, Deserialize, Default)] struct Session { name: Option<String>, select: Option<String>, } #[derive(Debug, Deserialize)] struct Window { name: Option<String>, layout: String, #[serde(rename = "pane")] panes: Option<Vec<Pane>>, } #[derive(Debug, Deserialize)] struct Pane { name: Option<String>, command: Option<String>, } #[derive(Debug, StructOpt)] #[structopt(name = "txl", about = "A tmux layout manager.")] struct Args { /// TOML file that contains a txl layout. #[structopt(parse(from_os_str))] file: PathBuf, /// If true, txl will destroy the previous session if it exists, and rebuild everything. #[structopt(long)] rebuild: bool, } macro_rules! handle { ($expr:expr, |$err:ident| $err_handler:expr) => {{ match $expr { Ok(v) => v, Err($err) => { $err_handler } } }}; } fn main() { let Args { file: path, rebuild, } = <_>::from_args(); let (path, input) = { let mut path = path; macro_rules! not { ($ext:literal) => {{ path.set_extension($ext); !path.exists() }}; } if !path.exists() && not!("txl") && not!("toml") { path.set_extension(""); eprintln!("Unable to locate file: {}[.txl|.toml]", path.display()); return; } match std::fs::read_to_string(&path) { Ok(value) => (path, value), Err(err) => { eprintln!("Unable to read file: {}\n{}", path.display(), err); return; } } }; let Setup { file, socket_name, session, windows, rebuild: rebuilding, } = handle!(toml::from_str(&input), |err| { eprintln!("Input file (\"{}\") contains invalid toml: {}", path.display(), err); return; }); let rebuild = rebuild || rebuilding.unwrap_or(false); let file = file.as_ref().map(Deref::deref); let socket_name = socket_name.as_ref().map(Deref::deref); let Session { name: session_name, select, } = session.unwrap_or_default(); let session_name = session_name.as_ref().map(Deref::deref); let select = select.as_ref().map(Deref::deref); // println!("{:#?}", windows); let tmux = TmuxInterface { file, socket_name, ..Default::default() }; { // Setting up the session and whatnot. let has_session = handle!(tmux.has_session(session_name), |err| { eprintln!("Unable to check if session already exists: {}", err); return; }); if has_session { if !rebuild { // Well, we're not allowed to rebuild, so attach ourselves, and we're done. attach_to(&tmux, session_name); } println!("Found session... Destroying.."); handle!(tmux.kill_session(Some(false), None, session_name), |err| { eprintln!("Unable to kill session with the same name: {}", err); return; }); } let has_session = handle!(tmux.has_session(session_name), |err| { eprintln!("Unable to check if session already exists: {}", err); return; }); if has_session { // I've had some weird sessions where they just keep on sticking around. // Stupidest solution I've found is to just kill the server... :| handle!(tmux.kill_server(), |err| { eprintln!("Unable to kill server: {}", err); return; }); } let (width, height) = if let Some((w, h)) = term_size::dimensions() { (Some(w), Some(h)) } else { (None, None) }; let new_session = NewSession { session_name, detached: Some(true), width, height, ..Default::default() }; match tmux.new_session(&new_session) { Ok(v) => if !v.is_empty() { eprintln!("Unable to create new session: {}", v); return;
} Err(err) => { eprintln!("Unable to create new session: {}", err); return; } } } // We rename the first window, so we can locate and remove it later. match tmux.rename_window(None, ORIGINAL_WINDOW_NAME) { Ok(v) => if !v.status.success() { eprintln!("Unable to rename default window: {:?}", v); return; } Err(err) => { eprintln!("Unable to rename default window: {}", err); return; } } // This is where we need to build the actual layout... for Window { name: window_name, layout, panes, } in windows { let window_name = window_name.as_ref().map(Deref::deref); let panes = panes.unwrap_or_default(); { // Tell tmux to create the window let new_window = NewWindow { detached: Some(false), window_name, ..Default::default() }; match tmux.new_window(new_window) { Ok(v) => if !v.is_empty() { eprintln!("Unable to create new window: {}", v); return; } Err(err) => { eprintln!("Unable to create new window: {}", err); return; } } } for Action(direction, target, percentage) in parse_layout(&layout) { let selected = format!("{}", target + 1); let percentage = (percentage * 100f32) as usize; let split_window = SplitWindow { target_pane: Some(&selected), vertical: Some(direction == Direction::Vertical), horizontal: Some(direction == Direction::Horizontal), percentage: Some(percentage), ..Default::default() }; match tmux.split_window(&split_window) { Ok(v) => if !v.is_empty() { eprintln!("Unable to split window: {}", v); return; } Err(err) => { eprintln!("Unable to split window: {}", err); return; } } } let mut target = 1; for pane in panes { let target = { let old = target; target += 1; old }; let command = if let Some(ref value) = pane.command { value.deref() } else { continue; }; let selected = format!("{}", target); let keys = vec![ command, "C-m" ]; let send_keys = SendKeys { target_pane: Some(&selected), key: keys, ..Default::default() }; match tmux.send_keys(&send_keys) { Ok(v) => if !v.status.success() { eprintln!("Unable to send command ({}) to pane {}: {:?}", command, target, v); } Err(err) => { eprintln!("Unable to send command ({}) to pane {}: {}", command, target, err); } } } } // Kill the first window, as tmux just adds it, but we don't want or need it. match tmux.kill_window(None, Some(ORIGINAL_WINDOW_NAME)) { Ok(v) => if !v.status.success() { eprintln!("Unable to kill default window: {:?}", v); return; } Err(err) => { eprintln!("Unable to kill default window: {}", err); return; } } if let Some(value) = select { let select_window = SelectWindow { target_window: Some(value), ..Default::default() }; match tmux.select_window(&select_window) { Ok(v) => if !v.status.success() { eprintln!("Unable to select window: {:?}", v); return; } Err(err) => { eprintln!("Unable to select window: {}", err); return; } } } // We're done here, so we can attach to the session, and promptly fuck off. attach_to(&tmux, session_name); } fn attach_to(tmux: &TmuxInterface, session_name: Option<&str>) -> ! { let attach_session = AttachSession { target_session: session_name, detach_other: Some(true), ..Default::default() }; tmux.attach_session_with(&attach_session, |args| { println!("{:?}", args); #[cfg(any(unix, macos))] { let program = tmux.tmux.unwrap_or("tmux"); use exec::Command; let mut command = Command::new(program); command.args(&args); let error = command.exec(); panic!("{}", error); } #[cfg(not(any(unix, macos)))] { compile_error!("Windows doesn't support 'execvp'"); } }); panic!("Failed to attach to tmux session: {:?}", session_name); } #[derive(Debug, PartialEq)] enum Direction { Vertical, Horizontal, } #[derive(Debug)] struct Action(Direction, u8, f32); fn parse_layout(layout: &str) -> Vec<Action> { let mut rects = determine_rectangles(layout); let mut actions = vec![]; while let Some((parent_index, child_index, ordinal)) = find_first_pair(&rects) { let child = rects.remove(child_index); let parent = &mut rects[parent_index]; // println!("{:?} <= {:?}", parent.c, child.c); match ordinal { Ordinal::South => { let old_height = child.height; let new_height = parent.height + child.height; let percentage = old_height as f32 / new_height as f32; parent.height = new_height; actions.push(Action(Direction::Vertical, parent_index as u8, percentage)); } Ordinal::East => { let old_width = child.width; let new_width = parent.width + child.width; let percentage = old_width as f32 / new_width as f32; parent.width = new_width; actions.push(Action(Direction::Horizontal, parent_index as u8, percentage)); } _ => panic!("Someone changed the ORDINALS constant..."), } } actions.reverse(); actions } fn find_first_pair(rects: &Vec<Rect>) -> Option<(usize, usize, Ordinal)> { const ORDINALS: &[Ordinal] = &[ Ordinal::South, Ordinal::East, ]; for (left_index, rect) in rects.iter().enumerate() { for ordinal in ORDINALS { let left_edge = rect.edge(*ordinal); if let Some(right_index) = rects.iter().position(|r| r != rect && r.edge(ordinal.opposite()) == left_edge) { return Some((left_index, right_index, *ordinal)); } } } None } #[derive(Debug, PartialEq, Copy, Clone)] enum Ordinal { North, South, East, West, } impl Ordinal { fn opposite(&self) -> Ordinal { match self { Ordinal::North => Ordinal::South, Ordinal::South => Ordinal::North, Ordinal::East => Ordinal::West, Ordinal::West => Ordinal::East, } } } #[derive(Debug, PartialEq)] struct Rect { c: char, x: u8, y: u8, width: u8, height: u8, } impl Rect { fn edge(&self, direction: Ordinal) -> Edge { match direction { Ordinal::North => { Edge { x: self.x, y: self.y, length: self.width, } } Ordinal::East => { Edge { x: self.x + self.width, y: self.y, length: self.height, } } Ordinal::South => { Edge { x: self.x, y: self.y + self.height, length: self.width, } } Ordinal::West => { Edge { x: self.x, y: self.y, length: self.height, } } } } } #[derive(Debug, PartialEq)] struct Edge { x: u8, y: u8, length: u8, } fn determine_rectangles(layout: &str) -> Vec<Rect> { let (width, height, chars) = sanitise_input(layout); macro_rules! point { ($index:ident) => {{ let x = $index % width; let y = $index / width; (x, y) }}; } let mut index = 0usize; let mut rects = vec![]; let mut bit_mask = vec![false; chars.len()]; while index < chars.len() { if bit_mask[index] { index += 1; continue; } let c = chars[index]; let rect_width = { let mut rect_width = width; for offset in 1..width { let right = index + offset; if right >= chars.len() || chars[right] != c { rect_width = offset; break; } }; rect_width }; let rect_height = { let mut rect_height = height; for offset in 1..height { let below = index + (offset * width); if below >= chars.len() || chars[below] != c { rect_height = offset; break; } } rect_height }; for y_offset in 0..rect_height { for x_offset in 0..rect_width { let bit_index = index + x_offset + (y_offset * width); if chars[bit_index] != c { panic!("Invalid character at {:?}. [expected: {:?}, found: {:?}]", point!(bit_index), c, chars[bit_index]); } bit_mask[bit_index] = true; } } let (x, y) = point!(index); rects.push(Rect { c, x: x as u8, y: y as u8, width: rect_width as u8, height: rect_height as u8, }); index += 1; } rects } fn sanitise_input(layout: &str) -> (usize, usize, Vec<char>) { #[derive(PartialEq)] enum Mode { SkipWhitespace, WidthCounting, HeightCounting, } use Mode::*; // It basically treats any whitespace as newlines... // If you give it something like "000\n000 000" // It'll think you've given it a 3x3 of '0' let mut mode = SkipWhitespace; let mut width = 0usize; let mut running_width = 0usize; let mut height = 0usize; let mut chars = vec![]; for c in layout.chars() { if c.is_ascii_whitespace() { if mode == WidthCounting { if width == 0 { width = running_width; running_width = 0; } else { if width != running_width { panic!("Width's do not match! (pos:{})", chars.len()); } running_width = 0; } mode = HeightCounting; } if mode == HeightCounting { height += 1; mode = SkipWhitespace; } continue; } if mode == SkipWhitespace { mode = WidthCounting; } if mode == WidthCounting { running_width += 1; } chars.push(c); } let expected = (width * height) as usize; if expected != chars.len() { panic!("Unexpected character count. [expected: {}, got: {}]", expected, chars.len()); } (width, height, chars) }
random_line_split
main.rs
use std::ops::Deref; use std::path::PathBuf; use serde::Deserialize; use structopt::StructOpt; use tmux_interface::{AttachSession, NewSession, NewWindow, SelectWindow, SendKeys, SplitWindow, TmuxInterface}; const ORIGINAL_WINDOW_NAME: &str = "__DEFAULT__"; #[derive(Debug, Deserialize)] struct Setup { file: Option<String>, socket_name: Option<String>, session: Option<Session>, #[serde(rename = "window")] windows: Vec<Window>, rebuild: Option<bool>, } #[derive(Debug, Deserialize, Default)] struct Session { name: Option<String>, select: Option<String>, } #[derive(Debug, Deserialize)] struct Window { name: Option<String>, layout: String, #[serde(rename = "pane")] panes: Option<Vec<Pane>>, } #[derive(Debug, Deserialize)] struct Pane { name: Option<String>, command: Option<String>, } #[derive(Debug, StructOpt)] #[structopt(name = "txl", about = "A tmux layout manager.")] struct Args { /// TOML file that contains a txl layout. #[structopt(parse(from_os_str))] file: PathBuf, /// If true, txl will destroy the previous session if it exists, and rebuild everything. #[structopt(long)] rebuild: bool, } macro_rules! handle { ($expr:expr, |$err:ident| $err_handler:expr) => {{ match $expr { Ok(v) => v, Err($err) => { $err_handler } } }}; } fn main() { let Args { file: path, rebuild, } = <_>::from_args(); let (path, input) = { let mut path = path; macro_rules! not { ($ext:literal) => {{ path.set_extension($ext); !path.exists() }}; } if !path.exists() && not!("txl") && not!("toml") { path.set_extension(""); eprintln!("Unable to locate file: {}[.txl|.toml]", path.display()); return; } match std::fs::read_to_string(&path) { Ok(value) => (path, value), Err(err) => { eprintln!("Unable to read file: {}\n{}", path.display(), err); return; } } }; let Setup { file, socket_name, session, windows, rebuild: rebuilding, } = handle!(toml::from_str(&input), |err| { eprintln!("Input file (\"{}\") contains invalid toml: {}", path.display(), err); return; }); let rebuild = rebuild || rebuilding.unwrap_or(false); let file = file.as_ref().map(Deref::deref); let socket_name = socket_name.as_ref().map(Deref::deref); let Session { name: session_name, select, } = session.unwrap_or_default(); let session_name = session_name.as_ref().map(Deref::deref); let select = select.as_ref().map(Deref::deref); // println!("{:#?}", windows); let tmux = TmuxInterface { file, socket_name, ..Default::default() }; { // Setting up the session and whatnot. let has_session = handle!(tmux.has_session(session_name), |err| { eprintln!("Unable to check if session already exists: {}", err); return; }); if has_session { if !rebuild { // Well, we're not allowed to rebuild, so attach ourselves, and we're done. attach_to(&tmux, session_name); } println!("Found session... Destroying.."); handle!(tmux.kill_session(Some(false), None, session_name), |err| { eprintln!("Unable to kill session with the same name: {}", err); return; }); } let has_session = handle!(tmux.has_session(session_name), |err| { eprintln!("Unable to check if session already exists: {}", err); return; }); if has_session { // I've had some weird sessions where they just keep on sticking around. // Stupidest solution I've found is to just kill the server... :| handle!(tmux.kill_server(), |err| { eprintln!("Unable to kill server: {}", err); return; }); } let (width, height) = if let Some((w, h)) = term_size::dimensions() { (Some(w), Some(h)) } else { (None, None) }; let new_session = NewSession { session_name, detached: Some(true), width, height, ..Default::default() }; match tmux.new_session(&new_session) { Ok(v) => if !v.is_empty() { eprintln!("Unable to create new session: {}", v); return; } Err(err) => { eprintln!("Unable to create new session: {}", err); return; } } } // We rename the first window, so we can locate and remove it later. match tmux.rename_window(None, ORIGINAL_WINDOW_NAME) { Ok(v) => if !v.status.success() { eprintln!("Unable to rename default window: {:?}", v); return; } Err(err) => { eprintln!("Unable to rename default window: {}", err); return; } } // This is where we need to build the actual layout... for Window { name: window_name, layout, panes, } in windows { let window_name = window_name.as_ref().map(Deref::deref); let panes = panes.unwrap_or_default(); { // Tell tmux to create the window let new_window = NewWindow { detached: Some(false), window_name, ..Default::default() }; match tmux.new_window(new_window) { Ok(v) => if !v.is_empty() { eprintln!("Unable to create new window: {}", v); return; } Err(err) => { eprintln!("Unable to create new window: {}", err); return; } } } for Action(direction, target, percentage) in parse_layout(&layout) { let selected = format!("{}", target + 1); let percentage = (percentage * 100f32) as usize; let split_window = SplitWindow { target_pane: Some(&selected), vertical: Some(direction == Direction::Vertical), horizontal: Some(direction == Direction::Horizontal), percentage: Some(percentage), ..Default::default() }; match tmux.split_window(&split_window) { Ok(v) => if !v.is_empty() { eprintln!("Unable to split window: {}", v); return; } Err(err) => { eprintln!("Unable to split window: {}", err); return; } } } let mut target = 1; for pane in panes { let target = { let old = target; target += 1; old }; let command = if let Some(ref value) = pane.command { value.deref() } else { continue; }; let selected = format!("{}", target); let keys = vec![ command, "C-m" ]; let send_keys = SendKeys { target_pane: Some(&selected), key: keys, ..Default::default() }; match tmux.send_keys(&send_keys) { Ok(v) => if !v.status.success() { eprintln!("Unable to send command ({}) to pane {}: {:?}", command, target, v); } Err(err) => { eprintln!("Unable to send command ({}) to pane {}: {}", command, target, err); } } } } // Kill the first window, as tmux just adds it, but we don't want or need it. match tmux.kill_window(None, Some(ORIGINAL_WINDOW_NAME)) { Ok(v) => if !v.status.success() { eprintln!("Unable to kill default window: {:?}", v); return; } Err(err) => { eprintln!("Unable to kill default window: {}", err); return; } } if let Some(value) = select { let select_window = SelectWindow { target_window: Some(value), ..Default::default() }; match tmux.select_window(&select_window) { Ok(v) => if !v.status.success() { eprintln!("Unable to select window: {:?}", v); return; } Err(err) => { eprintln!("Unable to select window: {}", err); return; } } } // We're done here, so we can attach to the session, and promptly fuck off. attach_to(&tmux, session_name); } fn attach_to(tmux: &TmuxInterface, session_name: Option<&str>) -> ! { let attach_session = AttachSession { target_session: session_name, detach_other: Some(true), ..Default::default() }; tmux.attach_session_with(&attach_session, |args| { println!("{:?}", args); #[cfg(any(unix, macos))] { let program = tmux.tmux.unwrap_or("tmux"); use exec::Command; let mut command = Command::new(program); command.args(&args); let error = command.exec(); panic!("{}", error); } #[cfg(not(any(unix, macos)))] { compile_error!("Windows doesn't support 'execvp'"); } }); panic!("Failed to attach to tmux session: {:?}", session_name); } #[derive(Debug, PartialEq)] enum Direction { Vertical, Horizontal, } #[derive(Debug)] struct Action(Direction, u8, f32); fn parse_layout(layout: &str) -> Vec<Action> { let mut rects = determine_rectangles(layout); let mut actions = vec![]; while let Some((parent_index, child_index, ordinal)) = find_first_pair(&rects) { let child = rects.remove(child_index); let parent = &mut rects[parent_index]; // println!("{:?} <= {:?}", parent.c, child.c); match ordinal { Ordinal::South => { let old_height = child.height; let new_height = parent.height + child.height; let percentage = old_height as f32 / new_height as f32; parent.height = new_height; actions.push(Action(Direction::Vertical, parent_index as u8, percentage)); } Ordinal::East => { let old_width = child.width; let new_width = parent.width + child.width; let percentage = old_width as f32 / new_width as f32; parent.width = new_width; actions.push(Action(Direction::Horizontal, parent_index as u8, percentage)); } _ => panic!("Someone changed the ORDINALS constant..."), } } actions.reverse(); actions } fn find_first_pair(rects: &Vec<Rect>) -> Option<(usize, usize, Ordinal)> { const ORDINALS: &[Ordinal] = &[ Ordinal::South, Ordinal::East, ]; for (left_index, rect) in rects.iter().enumerate() { for ordinal in ORDINALS { let left_edge = rect.edge(*ordinal); if let Some(right_index) = rects.iter().position(|r| r != rect && r.edge(ordinal.opposite()) == left_edge)
} } None } #[derive(Debug, PartialEq, Copy, Clone)] enum Ordinal { North, South, East, West, } impl Ordinal { fn opposite(&self) -> Ordinal { match self { Ordinal::North => Ordinal::South, Ordinal::South => Ordinal::North, Ordinal::East => Ordinal::West, Ordinal::West => Ordinal::East, } } } #[derive(Debug, PartialEq)] struct Rect { c: char, x: u8, y: u8, width: u8, height: u8, } impl Rect { fn edge(&self, direction: Ordinal) -> Edge { match direction { Ordinal::North => { Edge { x: self.x, y: self.y, length: self.width, } } Ordinal::East => { Edge { x: self.x + self.width, y: self.y, length: self.height, } } Ordinal::South => { Edge { x: self.x, y: self.y + self.height, length: self.width, } } Ordinal::West => { Edge { x: self.x, y: self.y, length: self.height, } } } } } #[derive(Debug, PartialEq)] struct Edge { x: u8, y: u8, length: u8, } fn determine_rectangles(layout: &str) -> Vec<Rect> { let (width, height, chars) = sanitise_input(layout); macro_rules! point { ($index:ident) => {{ let x = $index % width; let y = $index / width; (x, y) }}; } let mut index = 0usize; let mut rects = vec![]; let mut bit_mask = vec![false; chars.len()]; while index < chars.len() { if bit_mask[index] { index += 1; continue; } let c = chars[index]; let rect_width = { let mut rect_width = width; for offset in 1..width { let right = index + offset; if right >= chars.len() || chars[right] != c { rect_width = offset; break; } }; rect_width }; let rect_height = { let mut rect_height = height; for offset in 1..height { let below = index + (offset * width); if below >= chars.len() || chars[below] != c { rect_height = offset; break; } } rect_height }; for y_offset in 0..rect_height { for x_offset in 0..rect_width { let bit_index = index + x_offset + (y_offset * width); if chars[bit_index] != c { panic!("Invalid character at {:?}. [expected: {:?}, found: {:?}]", point!(bit_index), c, chars[bit_index]); } bit_mask[bit_index] = true; } } let (x, y) = point!(index); rects.push(Rect { c, x: x as u8, y: y as u8, width: rect_width as u8, height: rect_height as u8, }); index += 1; } rects } fn sanitise_input(layout: &str) -> (usize, usize, Vec<char>) { #[derive(PartialEq)] enum Mode { SkipWhitespace, WidthCounting, HeightCounting, } use Mode::*; // It basically treats any whitespace as newlines... // If you give it something like "000\n000 000" // It'll think you've given it a 3x3 of '0' let mut mode = SkipWhitespace; let mut width = 0usize; let mut running_width = 0usize; let mut height = 0usize; let mut chars = vec![]; for c in layout.chars() { if c.is_ascii_whitespace() { if mode == WidthCounting { if width == 0 { width = running_width; running_width = 0; } else { if width != running_width { panic!("Width's do not match! (pos:{})", chars.len()); } running_width = 0; } mode = HeightCounting; } if mode == HeightCounting { height += 1; mode = SkipWhitespace; } continue; } if mode == SkipWhitespace { mode = WidthCounting; } if mode == WidthCounting { running_width += 1; } chars.push(c); } let expected = (width * height) as usize; if expected != chars.len() { panic!("Unexpected character count. [expected: {}, got: {}]", expected, chars.len()); } (width, height, chars) }
{ return Some((left_index, right_index, *ordinal)); }
conditional_block
main.rs
use std::ops::Deref; use std::path::PathBuf; use serde::Deserialize; use structopt::StructOpt; use tmux_interface::{AttachSession, NewSession, NewWindow, SelectWindow, SendKeys, SplitWindow, TmuxInterface}; const ORIGINAL_WINDOW_NAME: &str = "__DEFAULT__"; #[derive(Debug, Deserialize)] struct Setup { file: Option<String>, socket_name: Option<String>, session: Option<Session>, #[serde(rename = "window")] windows: Vec<Window>, rebuild: Option<bool>, } #[derive(Debug, Deserialize, Default)] struct Session { name: Option<String>, select: Option<String>, } #[derive(Debug, Deserialize)] struct Window { name: Option<String>, layout: String, #[serde(rename = "pane")] panes: Option<Vec<Pane>>, } #[derive(Debug, Deserialize)] struct Pane { name: Option<String>, command: Option<String>, } #[derive(Debug, StructOpt)] #[structopt(name = "txl", about = "A tmux layout manager.")] struct Args { /// TOML file that contains a txl layout. #[structopt(parse(from_os_str))] file: PathBuf, /// If true, txl will destroy the previous session if it exists, and rebuild everything. #[structopt(long)] rebuild: bool, } macro_rules! handle { ($expr:expr, |$err:ident| $err_handler:expr) => {{ match $expr { Ok(v) => v, Err($err) => { $err_handler } } }}; } fn main() { let Args { file: path, rebuild, } = <_>::from_args(); let (path, input) = { let mut path = path; macro_rules! not { ($ext:literal) => {{ path.set_extension($ext); !path.exists() }}; } if !path.exists() && not!("txl") && not!("toml") { path.set_extension(""); eprintln!("Unable to locate file: {}[.txl|.toml]", path.display()); return; } match std::fs::read_to_string(&path) { Ok(value) => (path, value), Err(err) => { eprintln!("Unable to read file: {}\n{}", path.display(), err); return; } } }; let Setup { file, socket_name, session, windows, rebuild: rebuilding, } = handle!(toml::from_str(&input), |err| { eprintln!("Input file (\"{}\") contains invalid toml: {}", path.display(), err); return; }); let rebuild = rebuild || rebuilding.unwrap_or(false); let file = file.as_ref().map(Deref::deref); let socket_name = socket_name.as_ref().map(Deref::deref); let Session { name: session_name, select, } = session.unwrap_or_default(); let session_name = session_name.as_ref().map(Deref::deref); let select = select.as_ref().map(Deref::deref); // println!("{:#?}", windows); let tmux = TmuxInterface { file, socket_name, ..Default::default() }; { // Setting up the session and whatnot. let has_session = handle!(tmux.has_session(session_name), |err| { eprintln!("Unable to check if session already exists: {}", err); return; }); if has_session { if !rebuild { // Well, we're not allowed to rebuild, so attach ourselves, and we're done. attach_to(&tmux, session_name); } println!("Found session... Destroying.."); handle!(tmux.kill_session(Some(false), None, session_name), |err| { eprintln!("Unable to kill session with the same name: {}", err); return; }); } let has_session = handle!(tmux.has_session(session_name), |err| { eprintln!("Unable to check if session already exists: {}", err); return; }); if has_session { // I've had some weird sessions where they just keep on sticking around. // Stupidest solution I've found is to just kill the server... :| handle!(tmux.kill_server(), |err| { eprintln!("Unable to kill server: {}", err); return; }); } let (width, height) = if let Some((w, h)) = term_size::dimensions() { (Some(w), Some(h)) } else { (None, None) }; let new_session = NewSession { session_name, detached: Some(true), width, height, ..Default::default() }; match tmux.new_session(&new_session) { Ok(v) => if !v.is_empty() { eprintln!("Unable to create new session: {}", v); return; } Err(err) => { eprintln!("Unable to create new session: {}", err); return; } } } // We rename the first window, so we can locate and remove it later. match tmux.rename_window(None, ORIGINAL_WINDOW_NAME) { Ok(v) => if !v.status.success() { eprintln!("Unable to rename default window: {:?}", v); return; } Err(err) => { eprintln!("Unable to rename default window: {}", err); return; } } // This is where we need to build the actual layout... for Window { name: window_name, layout, panes, } in windows { let window_name = window_name.as_ref().map(Deref::deref); let panes = panes.unwrap_or_default(); { // Tell tmux to create the window let new_window = NewWindow { detached: Some(false), window_name, ..Default::default() }; match tmux.new_window(new_window) { Ok(v) => if !v.is_empty() { eprintln!("Unable to create new window: {}", v); return; } Err(err) => { eprintln!("Unable to create new window: {}", err); return; } } } for Action(direction, target, percentage) in parse_layout(&layout) { let selected = format!("{}", target + 1); let percentage = (percentage * 100f32) as usize; let split_window = SplitWindow { target_pane: Some(&selected), vertical: Some(direction == Direction::Vertical), horizontal: Some(direction == Direction::Horizontal), percentage: Some(percentage), ..Default::default() }; match tmux.split_window(&split_window) { Ok(v) => if !v.is_empty() { eprintln!("Unable to split window: {}", v); return; } Err(err) => { eprintln!("Unable to split window: {}", err); return; } } } let mut target = 1; for pane in panes { let target = { let old = target; target += 1; old }; let command = if let Some(ref value) = pane.command { value.deref() } else { continue; }; let selected = format!("{}", target); let keys = vec![ command, "C-m" ]; let send_keys = SendKeys { target_pane: Some(&selected), key: keys, ..Default::default() }; match tmux.send_keys(&send_keys) { Ok(v) => if !v.status.success() { eprintln!("Unable to send command ({}) to pane {}: {:?}", command, target, v); } Err(err) => { eprintln!("Unable to send command ({}) to pane {}: {}", command, target, err); } } } } // Kill the first window, as tmux just adds it, but we don't want or need it. match tmux.kill_window(None, Some(ORIGINAL_WINDOW_NAME)) { Ok(v) => if !v.status.success() { eprintln!("Unable to kill default window: {:?}", v); return; } Err(err) => { eprintln!("Unable to kill default window: {}", err); return; } } if let Some(value) = select { let select_window = SelectWindow { target_window: Some(value), ..Default::default() }; match tmux.select_window(&select_window) { Ok(v) => if !v.status.success() { eprintln!("Unable to select window: {:?}", v); return; } Err(err) => { eprintln!("Unable to select window: {}", err); return; } } } // We're done here, so we can attach to the session, and promptly fuck off. attach_to(&tmux, session_name); } fn attach_to(tmux: &TmuxInterface, session_name: Option<&str>) -> ! { let attach_session = AttachSession { target_session: session_name, detach_other: Some(true), ..Default::default() }; tmux.attach_session_with(&attach_session, |args| { println!("{:?}", args); #[cfg(any(unix, macos))] { let program = tmux.tmux.unwrap_or("tmux"); use exec::Command; let mut command = Command::new(program); command.args(&args); let error = command.exec(); panic!("{}", error); } #[cfg(not(any(unix, macos)))] { compile_error!("Windows doesn't support 'execvp'"); } }); panic!("Failed to attach to tmux session: {:?}", session_name); } #[derive(Debug, PartialEq)] enum Direction { Vertical, Horizontal, } #[derive(Debug)] struct Action(Direction, u8, f32); fn parse_layout(layout: &str) -> Vec<Action> { let mut rects = determine_rectangles(layout); let mut actions = vec![]; while let Some((parent_index, child_index, ordinal)) = find_first_pair(&rects) { let child = rects.remove(child_index); let parent = &mut rects[parent_index]; // println!("{:?} <= {:?}", parent.c, child.c); match ordinal { Ordinal::South => { let old_height = child.height; let new_height = parent.height + child.height; let percentage = old_height as f32 / new_height as f32; parent.height = new_height; actions.push(Action(Direction::Vertical, parent_index as u8, percentage)); } Ordinal::East => { let old_width = child.width; let new_width = parent.width + child.width; let percentage = old_width as f32 / new_width as f32; parent.width = new_width; actions.push(Action(Direction::Horizontal, parent_index as u8, percentage)); } _ => panic!("Someone changed the ORDINALS constant..."), } } actions.reverse(); actions } fn find_first_pair(rects: &Vec<Rect>) -> Option<(usize, usize, Ordinal)> { const ORDINALS: &[Ordinal] = &[ Ordinal::South, Ordinal::East, ]; for (left_index, rect) in rects.iter().enumerate() { for ordinal in ORDINALS { let left_edge = rect.edge(*ordinal); if let Some(right_index) = rects.iter().position(|r| r != rect && r.edge(ordinal.opposite()) == left_edge) { return Some((left_index, right_index, *ordinal)); } } } None } #[derive(Debug, PartialEq, Copy, Clone)] enum Ordinal { North, South, East, West, } impl Ordinal { fn opposite(&self) -> Ordinal { match self { Ordinal::North => Ordinal::South, Ordinal::South => Ordinal::North, Ordinal::East => Ordinal::West, Ordinal::West => Ordinal::East, } } } #[derive(Debug, PartialEq)] struct Rect { c: char, x: u8, y: u8, width: u8, height: u8, } impl Rect { fn edge(&self, direction: Ordinal) -> Edge { match direction { Ordinal::North => { Edge { x: self.x, y: self.y, length: self.width, } } Ordinal::East => { Edge { x: self.x + self.width, y: self.y, length: self.height, } } Ordinal::South => { Edge { x: self.x, y: self.y + self.height, length: self.width, } } Ordinal::West => { Edge { x: self.x, y: self.y, length: self.height, } } } } } #[derive(Debug, PartialEq)] struct
{ x: u8, y: u8, length: u8, } fn determine_rectangles(layout: &str) -> Vec<Rect> { let (width, height, chars) = sanitise_input(layout); macro_rules! point { ($index:ident) => {{ let x = $index % width; let y = $index / width; (x, y) }}; } let mut index = 0usize; let mut rects = vec![]; let mut bit_mask = vec![false; chars.len()]; while index < chars.len() { if bit_mask[index] { index += 1; continue; } let c = chars[index]; let rect_width = { let mut rect_width = width; for offset in 1..width { let right = index + offset; if right >= chars.len() || chars[right] != c { rect_width = offset; break; } }; rect_width }; let rect_height = { let mut rect_height = height; for offset in 1..height { let below = index + (offset * width); if below >= chars.len() || chars[below] != c { rect_height = offset; break; } } rect_height }; for y_offset in 0..rect_height { for x_offset in 0..rect_width { let bit_index = index + x_offset + (y_offset * width); if chars[bit_index] != c { panic!("Invalid character at {:?}. [expected: {:?}, found: {:?}]", point!(bit_index), c, chars[bit_index]); } bit_mask[bit_index] = true; } } let (x, y) = point!(index); rects.push(Rect { c, x: x as u8, y: y as u8, width: rect_width as u8, height: rect_height as u8, }); index += 1; } rects } fn sanitise_input(layout: &str) -> (usize, usize, Vec<char>) { #[derive(PartialEq)] enum Mode { SkipWhitespace, WidthCounting, HeightCounting, } use Mode::*; // It basically treats any whitespace as newlines... // If you give it something like "000\n000 000" // It'll think you've given it a 3x3 of '0' let mut mode = SkipWhitespace; let mut width = 0usize; let mut running_width = 0usize; let mut height = 0usize; let mut chars = vec![]; for c in layout.chars() { if c.is_ascii_whitespace() { if mode == WidthCounting { if width == 0 { width = running_width; running_width = 0; } else { if width != running_width { panic!("Width's do not match! (pos:{})", chars.len()); } running_width = 0; } mode = HeightCounting; } if mode == HeightCounting { height += 1; mode = SkipWhitespace; } continue; } if mode == SkipWhitespace { mode = WidthCounting; } if mode == WidthCounting { running_width += 1; } chars.push(c); } let expected = (width * height) as usize; if expected != chars.len() { panic!("Unexpected character count. [expected: {}, got: {}]", expected, chars.len()); } (width, height, chars) }
Edge
identifier_name
rezepte.ts
import { Component } from '@angular/core'; import { AlertController } from 'ionic-angular'; import { NavController, NavParams } from 'ionic-angular'; import { Rezeptansicht } from '../rezeptansicht/rezeptansicht';
export class Rezepte { recipes = []; foundRecipes = []; ingredient_CsC = []; ingredients_HOC = []; ingredients_Pizza = []; ingredients_Pommes = []; ingredients_birne = []; score: number; level2: number; level3: number; level4: number; level5: number; missingPoints = 0; constructor(public navCtrl: NavController, public navParams: NavParams, public alertCtrl: AlertController) { this.level2 = parseInt(window.localStorage.getItem("level2")); this.level3 = parseInt(window.localStorage.getItem("level3")); this.level4 = parseInt(window.localStorage.getItem("level4")); this.level5 = parseInt(window.localStorage.getItem("level5")); this.score = parseInt(window.localStorage.getItem("score")); this.ingredient_CsC = [ {id: 9, title: 'Karotten', amount: 70, co2value: 305}, {id: 23, title: 'Peperoni', amount: 150, co2value: 7008}, {id: 10, title: 'Zwiebeln', amount: 80, co2value: 365}, {id: 11, title: 'Knoblauch', amount: 5, co2value: 1170}, {id: 12, title: 'Champignons', amount: 60, co2value: 2790}, {id: 14, title: 'Räuchertofu', amount: 175, co2value: 783}, {id: 24, title: 'Tofu', amount: 175, co2value: 1008}, {id: 13, title: 'Kidneybohnen', amount: 250, co2value: 1302}, {id: 25, title: 'Süssmais', amount: 150, co2value: 839}, {id: 15, title: 'Tomaten', amount: 500, co2value: 547}, {id: 26, title: 'Soja Sauce', amount: 10, co2value: 1633}, {id: 27, title: 'Zitronensaft', amount: 10, co2value: 1953}, {id: 28, title: 'Essig', amount: 5, co2value: 3203}, {id: 29, title: 'Senf', amount: 5, co2value: 1003}, {id: 30, title: 'Chili', amount: 5, co2value: 8243}, {id: 31, title: 'Salz', amount: 2, co2value: 603}, {id: 32, title: 'Pfeffer', amount: 2, co2value: 603}, {id: 33, title: 'Oregano', amount: 2, co2value: 8016}, {id: 34, title: 'Basilikum', amount: 2, co2value: 8016} ]; this.ingredients_HOC = [ {id: 16, title: 'Hokkaido', amount: 750, co2value: 354}, {id: 17, title: 'Kartoffeln', amount: 750, co2value: 170}, {id: 18, title: 'Orangen', amount: 400, co2value: 486}, {id: 11, title: 'Knoblauch', amount: 16, co2value: 1170}, {id: 35, title: 'Ingwer', amount: 20, co2value: 603}, {id: 36, title: 'Kurkuma (getrocknet)', amount: 4, co2value: 3723}, {id: 37, title: 'Koriander (getrocknet)', amount: 8, co2value: 3733}, {id: 38, title: 'Zucker', amount: 15, co2value: 335}, {id: 39, title: 'Salz', amount: 2, co2value: 603}, {id: 40, title: 'Kokosnussmilch', amount: 400, co2value: 1527}, {id: 41, title: 'Wasser', amount: 100, co2value: 16} ]; this.ingredients_Pizza = [ {id: 0, title: 'Wasser', amount: 320, co2value: 16}, {id: 19, title: 'Zucker', amount: 15, co2value: 335}, {id: 20, title: 'Hefe', amount: 20, co2value: 1037}, {id: 21, title: 'Weissmehl', amount: 600, co2value: 517}, {id: 22, title: 'Salz', amount: 10, co2value: 603}, {id: 42, title: 'Zwiebeln', amount: 70, co2value: 365}, {id: 43, title: 'Karotten', amount: 100, co2value: 305}, {id: 44, title: 'Knoblauch', amount: 3, co2value:1170}, {id: 45, title: 'Feta', amount: 200, co2value: 3546}, {id: 46, title: 'Feigen', amount: 100, co2value: 8236}, {id: 47, title: 'Petersilie (frisch)', amount: 45, co2value: 8096}, {id: 48, title: 'Zimt (getrocknet)', amount: 2, co2value: 8106}, {id: 49, title: 'Koriander (getrocknet)', amount: 2, co2value: 3733}, {id: 50, title: 'Kreuzkümmel (getrocknet)', amount: 2, co2value: 603}, {id: 51, title: 'Curry (getrocknet)', amount: 5, co2value: 603}, {id: 52, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 53, title: 'Olivenöl', amount: 30, co2value: 3712}, {id: 54, title: 'Wein', amount: 20, co2value: 1431}, {id: 55, title: 'Tomatenkonzentrat (dreifach konzentriert)', amount: 40, co2value: 1743}, {id: 56, title: 'Tomatenmark', amount: 400, co2value: 1003} ]; this.ingredients_Pommes = [ {id: 57, title: 'Süsskartoffeln', amount: 200, co2value: 653}, {id: 58, title: 'Rapsöl', amount: 30, co2value: 1606}, {id: 59, title: 'Italienische Kräuter', amount: 10, co2value: 603}, {id: 60, title: 'Salz', amount: 2, co2value: 603}, {id: 61, title: 'Pfeffer (getrocknet)', amount: 2, co2value: 603}, {id: 62, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 63, title: 'Paprika (edelsüss)', amount: 1, co2value: 603} ]; this.ingredients_birne = [ {id: 64, title: 'Birne', amount: 350, co2value: 286}, {id: 65, title: 'Zucker', amount: 200, co2value: 335}, {id: 66, title: 'Wasser', amount: 500, co2value: 16}, {id: 67, title: 'Zitrone', amount: 85, co2value: 486}, {id: 68, title: 'Zimt (getrocknet)', amount: 15, co2value: 8106}, {id: 69, title: 'Nelken (getrocknet)', amount: 5, co2value: 1214}, {id: 70, title: 'Schokolade (dunkel)', amount: 60, co2value: 2403}, {id: 71, title: 'Spirituosen', amount: 10, co2value: 4103}, {id: 72, title: 'Orangensaft', amount: 10, co2value: 1203} ]; //Kategorien: schnell, einfach, schick, gemerkt this.recipes = [ //http://app.eaternity.org/#!menu:7c5bd3c2-66a5-41f3-bd66-c35d40e8f4ae&customer=Eaternity&scope=PUBLIC { id: 0, title: 'Chili sin Carne', imageUrl: 'file:///android_asset/www/assets/img/0.jpg', category: 'einfach', co2: 497, recipeIngredients: this.ingredient_CsC, method: 'Die Zwiebel und Karotte in einer großen Pfanne anbraten. Den Tofu zerbröseln und hinzugeben und knusprig anbraten. Mit Sojasauce, Chili und Pfeffer würzen. Das restliche Gemüse dazugeben und für wenige Minuten anbraten lassen. Nun die Tomaten und Kidneybohnen dazugeben und ein paar Minuten köcheln lassen. Mit 1 TL Senf, etwas Essig und Zitronensaft, Salz und Kräutern abschmecken. Umrühren, einen Moment köcheln lassen und das Chili sin Carne heiß servieren. Als Garnitur passen Basilikumblätter und Kürbiskerne.'}, //http://app.eaternity.org/#!menu:4bd47d15-89eb-4e0f-b142-4aed8299d40c&customer=Eaternity&scope=PUBLIC { id: 1, title: 'Hokkaidō-Orangen Curry', imageUrl: 'file:///android_asset/www/assets/img/1.jpg', category: 'schick', co2: 233, recipeIngredients: this.ingredients_HOC, method: 'Die Kartoffeln waschen, schälen und in 2 cm Würfel schneiden. Den Kürbis waschen, halbieren, entkernen und gegebenenfalls schälen. In 2 cm Würfel schneiden. Die Orangen schälen und in 1 cm Würfel schneiden. Knoblauch und Ingwer hacken. Gewürze und Flüssigkeiten abmessen und bereitstellen.Den Ofen auf 180° Grad vorheizen. Kokosnussmilch mit Wasser, Ingwer, Knoblauch und den Gewürzen vermengen. Den Kürbis, Kartoffeln und Orangen in eine Auflaufform füllen. Die Gewürz-Kokosmilch darüber verteilen. Gut mischen und 30-40 Minuten im Ofen backen. Tipp: Dazu passt ein blumiger Basmatireis.'}, //http://app.eaternity.org/#!menu:ba34fba3-7135-4701-b8c4-ee43842cc9ad&customer=Eaternity&scope=PUBLIC { id: 2, title: 'Orientalische Pizza mit Feigen, Feta und Zwiebeln', imageUrl: 'file:///android_asset/www/assets/img/2.jpg', category: 'schick', co2: 408, recipeIngredients: this.ingredients_Pizza, method: 'Wasser, Zucker und Hefe zusammen mischen. Weissmehl und Salz abwägen und in eine Schüssel geben. Wasser-Hefemischung mit der Mehlmischung zu einem Pizzateig kneten. Den Teig gehen lassen bis die anderen Zutaten bereit sind. Eine Zwiebeln und die Karotten mit Olivenöl andünsten. Die Gewürze dazugeben und mitdünsten. Tomatenpüree beigeben und mitanziehen. Mit Rotwein ablöschen und einreduzieren lassen. Tomatenmark beigeben und 20 min kochen lassen. Tomatensauce vom Herd nehmen und etwas auskühlen lassen. Den Teig auswallen und auf das Backpapier legen. Die ausgekühlte Sauce auf dem Teig verteilen. Den Feta und die Zwiebelringe über der Pizza verteilen. Bei 220° Grad 12 min backen. Mit Petersilie bestreuen.'}, //http://app.eaternity.org/#!menu:42f27cd4-d35f-462a-bc03-fab8b3a6f1ae&customer=Eaternity&scope=PUBLIC { id: 3, title: 'Süßkartoffel-Pommes', imageUrl: 'file:///android_asset/www/assets/img/3.jpg', category: 'einfach', co2: 238, recipeIngredients: this.ingredients_Pommes, method: 'Die Süßkartoffeln schälen und in 1 cm dicke Spalten oder Scheiben schneiden. Den Ofen auf 200 Grad vorheizen. In einer Schüssel die Kräuter und Gewüze mit dem Öl vermischen. Die Kartoffelspalten mit der Gewürz-Ölmischung mischen, sodass jede Kartoffel gleichmäßig mit der Marinade benetzt ist. Auf einem Backblech mit Backpapier verteilen und etwa 25 bis 30 Min. knusprig backen.'}, //http://app.eaternity.org/#!menu:75a6bd49-3fb6-497c-b539-0250d0663c7c&customer=Eaternity&scope=PUBLIC { id: 4, title: 'Weihnachtsbirne mit Schokoladesauce', imageUrl: 'file:///android_asset/www/assets/img/4.jpg', category: 'schick', co2: 138, recipeIngredients: this.ingredients_birne, method: 'Das Wasser aufkochen und die Zitrone, Zucker, Zimtstange und Nelken zugeben. Fünf Minuten köcheln. Die Birnen im Sud 15 Minuten pochieren. Die Schokolade „au-bain-marie“ schmelzen. Mit etwas Rum oder Orangensaft vermischen. Die Birnen kurz abtropfen lassen und danach mit der Schokolade anrichten.'} //http://app.eaternity.org/#!menu:9db6366d-f56b-4924-a7b9-69ec33ed17f2&customer=Eaternity&scope=PUBLIC /* { id: 5, title: 'Erbsensuppe mit Minze', imageUrl: '../assets/img/5.jpg', category: 'schnell', co2: 165}, //http://app.eaternity.org/#!menu:2d9e98bc-025b-44e8-910d-7beabdfbdcd3&customer=Eaternity&scope=PUBLIC { id: 6, title: 'Frischer zitroniger Kartoffelsalat ', imageUrl: '../assets/img/6.jpg', category: 'schnell', co2: 261}, //http://app.eaternity.org/#!menu:75aa52f1-d9aa-4d45-9702-4663a74c0c89&customer=Eaternity&scope=PUBLIC { id: 7, title: 'Kürbis-Apfel Gulasch', imageUrl: '../assets/img/7.jpg', category: 'schnell', co2: 413} */ ]; this.foundRecipes = this.recipes; window.localStorage.setItem("recipe_list", JSON.stringify(this.recipes)); } itemTapped(event, item) { this.navCtrl.push(Rezepte, { item: item }); } showScore(){ let alert = this.alertCtrl.create({ title: 'Deine Punktzahl ist ' + this.score, message: 'Du benötigst ' + this.calculateMissingPoints() + ' Punkte für das nächste Level.', buttons: ['OK'] }); alert.present(); } calculateMissingPoints(){ if (this.score < this.level2){ this.missingPoints = this.level2 - this.score; } else if (this.score >= this.level2 && this.score < this.level3) { this.missingPoints = this.level3 - this.score; } else if (this.score >= this.level3 && this.score < this.level4) { this.missingPoints = this.level4 - this.score; } else if (this.score >= this.level4 && this.score < this.level5) { this.missingPoints = this.level5 - this.score; } return this.missingPoints; } search(searchEvent) { let term = searchEvent.target.value.toLowerCase(); this.foundRecipes = []; console.log(this.recipes); if (term != "") { this.foundRecipes = this.recipes.filter((i) => { if (i.title.toLowerCase().indexOf(term) == 0) { return true; } else { return false; } }); } } openRecipe(recipe) { this.navCtrl.push(Rezeptansicht, {recipe}); } }
@Component({ selector: 'page-rezepte', templateUrl: 'rezepte.html' })
random_line_split
rezepte.ts
import { Component } from '@angular/core'; import { AlertController } from 'ionic-angular'; import { NavController, NavParams } from 'ionic-angular'; import { Rezeptansicht } from '../rezeptansicht/rezeptansicht'; @Component({ selector: 'page-rezepte', templateUrl: 'rezepte.html' }) export class Rezepte { recipes = []; foundRecipes = []; ingredient_CsC = []; ingredients_HOC = []; ingredients_Pizza = []; ingredients_Pommes = []; ingredients_birne = []; score: number; level2: number; level3: number; level4: number; level5: number; missingPoints = 0; constructor(public navCtrl: NavController, public navParams: NavParams, public alertCtrl: AlertController) { this.level2 = parseInt(window.localStorage.getItem("level2")); this.level3 = parseInt(window.localStorage.getItem("level3")); this.level4 = parseInt(window.localStorage.getItem("level4")); this.level5 = parseInt(window.localStorage.getItem("level5")); this.score = parseInt(window.localStorage.getItem("score")); this.ingredient_CsC = [ {id: 9, title: 'Karotten', amount: 70, co2value: 305}, {id: 23, title: 'Peperoni', amount: 150, co2value: 7008}, {id: 10, title: 'Zwiebeln', amount: 80, co2value: 365}, {id: 11, title: 'Knoblauch', amount: 5, co2value: 1170}, {id: 12, title: 'Champignons', amount: 60, co2value: 2790}, {id: 14, title: 'Räuchertofu', amount: 175, co2value: 783}, {id: 24, title: 'Tofu', amount: 175, co2value: 1008}, {id: 13, title: 'Kidneybohnen', amount: 250, co2value: 1302}, {id: 25, title: 'Süssmais', amount: 150, co2value: 839}, {id: 15, title: 'Tomaten', amount: 500, co2value: 547}, {id: 26, title: 'Soja Sauce', amount: 10, co2value: 1633}, {id: 27, title: 'Zitronensaft', amount: 10, co2value: 1953}, {id: 28, title: 'Essig', amount: 5, co2value: 3203}, {id: 29, title: 'Senf', amount: 5, co2value: 1003}, {id: 30, title: 'Chili', amount: 5, co2value: 8243}, {id: 31, title: 'Salz', amount: 2, co2value: 603}, {id: 32, title: 'Pfeffer', amount: 2, co2value: 603}, {id: 33, title: 'Oregano', amount: 2, co2value: 8016}, {id: 34, title: 'Basilikum', amount: 2, co2value: 8016} ]; this.ingredients_HOC = [ {id: 16, title: 'Hokkaido', amount: 750, co2value: 354}, {id: 17, title: 'Kartoffeln', amount: 750, co2value: 170}, {id: 18, title: 'Orangen', amount: 400, co2value: 486}, {id: 11, title: 'Knoblauch', amount: 16, co2value: 1170}, {id: 35, title: 'Ingwer', amount: 20, co2value: 603}, {id: 36, title: 'Kurkuma (getrocknet)', amount: 4, co2value: 3723}, {id: 37, title: 'Koriander (getrocknet)', amount: 8, co2value: 3733}, {id: 38, title: 'Zucker', amount: 15, co2value: 335}, {id: 39, title: 'Salz', amount: 2, co2value: 603}, {id: 40, title: 'Kokosnussmilch', amount: 400, co2value: 1527}, {id: 41, title: 'Wasser', amount: 100, co2value: 16} ]; this.ingredients_Pizza = [ {id: 0, title: 'Wasser', amount: 320, co2value: 16}, {id: 19, title: 'Zucker', amount: 15, co2value: 335}, {id: 20, title: 'Hefe', amount: 20, co2value: 1037}, {id: 21, title: 'Weissmehl', amount: 600, co2value: 517}, {id: 22, title: 'Salz', amount: 10, co2value: 603}, {id: 42, title: 'Zwiebeln', amount: 70, co2value: 365}, {id: 43, title: 'Karotten', amount: 100, co2value: 305}, {id: 44, title: 'Knoblauch', amount: 3, co2value:1170}, {id: 45, title: 'Feta', amount: 200, co2value: 3546}, {id: 46, title: 'Feigen', amount: 100, co2value: 8236}, {id: 47, title: 'Petersilie (frisch)', amount: 45, co2value: 8096}, {id: 48, title: 'Zimt (getrocknet)', amount: 2, co2value: 8106}, {id: 49, title: 'Koriander (getrocknet)', amount: 2, co2value: 3733}, {id: 50, title: 'Kreuzkümmel (getrocknet)', amount: 2, co2value: 603}, {id: 51, title: 'Curry (getrocknet)', amount: 5, co2value: 603}, {id: 52, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 53, title: 'Olivenöl', amount: 30, co2value: 3712}, {id: 54, title: 'Wein', amount: 20, co2value: 1431}, {id: 55, title: 'Tomatenkonzentrat (dreifach konzentriert)', amount: 40, co2value: 1743}, {id: 56, title: 'Tomatenmark', amount: 400, co2value: 1003} ]; this.ingredients_Pommes = [ {id: 57, title: 'Süsskartoffeln', amount: 200, co2value: 653}, {id: 58, title: 'Rapsöl', amount: 30, co2value: 1606}, {id: 59, title: 'Italienische Kräuter', amount: 10, co2value: 603}, {id: 60, title: 'Salz', amount: 2, co2value: 603}, {id: 61, title: 'Pfeffer (getrocknet)', amount: 2, co2value: 603}, {id: 62, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 63, title: 'Paprika (edelsüss)', amount: 1, co2value: 603} ]; this.ingredients_birne = [ {id: 64, title: 'Birne', amount: 350, co2value: 286}, {id: 65, title: 'Zucker', amount: 200, co2value: 335}, {id: 66, title: 'Wasser', amount: 500, co2value: 16}, {id: 67, title: 'Zitrone', amount: 85, co2value: 486}, {id: 68, title: 'Zimt (getrocknet)', amount: 15, co2value: 8106}, {id: 69, title: 'Nelken (getrocknet)', amount: 5, co2value: 1214}, {id: 70, title: 'Schokolade (dunkel)', amount: 60, co2value: 2403}, {id: 71, title: 'Spirituosen', amount: 10, co2value: 4103}, {id: 72, title: 'Orangensaft', amount: 10, co2value: 1203} ]; //Kategorien: schnell, einfach, schick, gemerkt this.recipes = [ //http://app.eaternity.org/#!menu:7c5bd3c2-66a5-41f3-bd66-c35d40e8f4ae&customer=Eaternity&scope=PUBLIC { id: 0, title: 'Chili sin Carne', imageUrl: 'file:///android_asset/www/assets/img/0.jpg', category: 'einfach', co2: 497, recipeIngredients: this.ingredient_CsC, method: 'Die Zwiebel und Karotte in einer großen Pfanne anbraten. Den Tofu zerbröseln und hinzugeben und knusprig anbraten. Mit Sojasauce, Chili und Pfeffer würzen. Das restliche Gemüse dazugeben und für wenige Minuten anbraten lassen. Nun die Tomaten und Kidneybohnen dazugeben und ein paar Minuten köcheln lassen. Mit 1 TL Senf, etwas Essig und Zitronensaft, Salz und Kräutern abschmecken. Umrühren, einen Moment köcheln lassen und das Chili sin Carne heiß servieren. Als Garnitur passen Basilikumblätter und Kürbiskerne.'}, //http://app.eaternity.org/#!menu:4bd47d15-89eb-4e0f-b142-4aed8299d40c&customer=Eaternity&scope=PUBLIC { id: 1, title: 'Hokkaidō-Orangen Curry', imageUrl: 'file:///android_asset/www/assets/img/1.jpg', category: 'schick', co2: 233, recipeIngredients: this.ingredients_HOC, method: 'Die Kartoffeln waschen, schälen und in 2 cm Würfel schneiden. Den Kürbis waschen, halbieren, entkernen und gegebenenfalls schälen. In 2 cm Würfel schneiden. Die Orangen schälen und in 1 cm Würfel schneiden. Knoblauch und Ingwer hacken. Gewürze und Flüssigkeiten abmessen und bereitstellen.Den Ofen auf 180° Grad vorheizen. Kokosnussmilch mit Wasser, Ingwer, Knoblauch und den Gewürzen vermengen. Den Kürbis, Kartoffeln und Orangen in eine Auflaufform füllen. Die Gewürz-Kokosmilch darüber verteilen. Gut mischen und 30-40 Minuten im Ofen backen. Tipp: Dazu passt ein blumiger Basmatireis.'}, //http://app.eaternity.org/#!menu:ba34fba3-7135-4701-b8c4-ee43842cc9ad&customer=Eaternity&scope=PUBLIC { id: 2, title: 'Orientalische Pizza mit Feigen, Feta und Zwiebeln', imageUrl: 'file:///android_asset/www/assets/img/2.jpg', category: 'schick', co2: 408, recipeIngredients: this.ingredients_Pizza, method: 'Wasser, Zucker und Hefe zusammen mischen. Weissmehl und Salz abwägen und in eine Schüssel geben. Wasser-Hefemischung mit der Mehlmischung zu einem Pizzateig kneten. Den Teig gehen lassen bis die anderen Zutaten bereit sind. Eine Zwiebeln und die Karotten mit Olivenöl andünsten. Die Gewürze dazugeben und mitdünsten. Tomatenpüree beigeben und mitanziehen. Mit Rotwein ablöschen und einreduzieren lassen. Tomatenmark beigeben und 20 min kochen lassen. Tomatensauce vom Herd nehmen und etwas auskühlen lassen. Den Teig auswallen und auf das Backpapier legen. Die ausgekühlte Sauce auf dem Teig verteilen. Den Feta und die Zwiebelringe über der Pizza verteilen. Bei 220° Grad 12 min backen. Mit Petersilie bestreuen.'}, //http://app.eaternity.org/#!menu:42f27cd4-d35f-462a-bc03-fab8b3a6f1ae&customer=Eaternity&scope=PUBLIC { id: 3, title: 'Süßkartoffel-Pommes', imageUrl: 'file:///android_asset/www/assets/img/3.jpg', category: 'einfach', co2: 238, recipeIngredients: this.ingredients_Pommes, method: 'Die Süßkartoffeln schälen und in 1 cm dicke Spalten oder Scheiben schneiden. Den Ofen auf 200 Grad vorheizen. In einer Schüssel die Kräuter und Gewüze mit dem Öl vermischen. Die Kartoffelspalten mit der Gewürz-Ölmischung mischen, sodass jede Kartoffel gleichmäßig mit der Marinade benetzt ist. Auf einem Backblech mit Backpapier verteilen und etwa 25 bis 30 Min. knusprig backen.'}, //http://app.eaternity.org/#!menu:75a6bd49-3fb6-497c-b539-0250d0663c7c&customer=Eaternity&scope=PUBLIC { id: 4, title: 'Weihnachtsbirne mit Schokoladesauce', imageUrl: 'file:///android_asset/www/assets/img/4.jpg', category: 'schick', co2: 138, recipeIngredients: this.ingredients_birne, method: 'Das Wasser aufkochen und die Zitrone, Zucker, Zimtstange und Nelken zugeben. Fünf Minuten köcheln. Die Birnen im Sud 15 Minuten pochieren. Die Schokolade „au-bain-marie“ schmelzen. Mit etwas Rum oder Orangensaft vermischen. Die Birnen kurz abtropfen lassen und danach mit der Schokolade anrichten.'} //http://app.eaternity.org/#!menu:9db6366d-f56b-4924-a7b9-69ec33ed17f2&customer=Eaternity&scope=PUBLIC /* { id: 5, title: 'Erbsensuppe mit Minze', imageUrl: '../assets/img/5.jpg', category: 'schnell', co2: 165}, //http://app.eaternity.org/#!menu:2d9e98bc-025b-44e8-910d-7beabdfbdcd3&customer=Eaternity&scope=PUBLIC { id: 6, title: 'Frischer zitroniger Kartoffelsalat ', imageUrl: '../assets/img/6.jpg', category: 'schnell', co2: 261}, //http://app.eaternity.org/#!menu:75aa52f1-d9aa-4d45-9702-4663a74c0c89&customer=Eaternity&scope=PUBLIC { id: 7, title: 'Kürbis-Apfel Gulasch', imageUrl: '../assets/img/7.jpg', category: 'schnell', co2: 413} */ ]; this.foundRecipes = this.recipes; window.localStorage.setItem("recipe_list", JSON.stringify(this.recipes)); } itemTapped(event, item) { this.navCtrl.push(Rezepte, { item: item }); } s
: 'Deine Punktzahl ist ' + this.score, message: 'Du benötigst ' + this.calculateMissingPoints() + ' Punkte für das nächste Level.', buttons: ['OK'] }); alert.present(); } calculateMissingPoints(){ if (this.score < this.level2){ this.missingPoints = this.level2 - this.score; } else if (this.score >= this.level2 && this.score < this.level3) { this.missingPoints = this.level3 - this.score; } else if (this.score >= this.level3 && this.score < this.level4) { this.missingPoints = this.level4 - this.score; } else if (this.score >= this.level4 && this.score < this.level5) { this.missingPoints = this.level5 - this.score; } return this.missingPoints; } search(searchEvent) { let term = searchEvent.target.value.toLowerCase(); this.foundRecipes = []; console.log(this.recipes); if (term != "") { this.foundRecipes = this.recipes.filter((i) => { if (i.title.toLowerCase().indexOf(term) == 0) { return true; } else { return false; } }); } } openRecipe(recipe) { this.navCtrl.push(Rezeptansicht, {recipe}); } }
howScore(){ let alert = this.alertCtrl.create({ title
identifier_body
rezepte.ts
import { Component } from '@angular/core'; import { AlertController } from 'ionic-angular'; import { NavController, NavParams } from 'ionic-angular'; import { Rezeptansicht } from '../rezeptansicht/rezeptansicht'; @Component({ selector: 'page-rezepte', templateUrl: 'rezepte.html' }) export class Rezepte { recipes = []; foundRecipes = []; ingredient_CsC = []; ingredients_HOC = []; ingredients_Pizza = []; ingredients_Pommes = []; ingredients_birne = []; score: number; level2: number; level3: number; level4: number; level5: number; missingPoints = 0; constructor(public navCtrl: NavController, public navParams: NavParams, public alertCtrl: AlertController) { this.level2 = parseInt(window.localStorage.getItem("level2")); this.level3 = parseInt(window.localStorage.getItem("level3")); this.level4 = parseInt(window.localStorage.getItem("level4")); this.level5 = parseInt(window.localStorage.getItem("level5")); this.score = parseInt(window.localStorage.getItem("score")); this.ingredient_CsC = [ {id: 9, title: 'Karotten', amount: 70, co2value: 305}, {id: 23, title: 'Peperoni', amount: 150, co2value: 7008}, {id: 10, title: 'Zwiebeln', amount: 80, co2value: 365}, {id: 11, title: 'Knoblauch', amount: 5, co2value: 1170}, {id: 12, title: 'Champignons', amount: 60, co2value: 2790}, {id: 14, title: 'Räuchertofu', amount: 175, co2value: 783}, {id: 24, title: 'Tofu', amount: 175, co2value: 1008}, {id: 13, title: 'Kidneybohnen', amount: 250, co2value: 1302}, {id: 25, title: 'Süssmais', amount: 150, co2value: 839}, {id: 15, title: 'Tomaten', amount: 500, co2value: 547}, {id: 26, title: 'Soja Sauce', amount: 10, co2value: 1633}, {id: 27, title: 'Zitronensaft', amount: 10, co2value: 1953}, {id: 28, title: 'Essig', amount: 5, co2value: 3203}, {id: 29, title: 'Senf', amount: 5, co2value: 1003}, {id: 30, title: 'Chili', amount: 5, co2value: 8243}, {id: 31, title: 'Salz', amount: 2, co2value: 603}, {id: 32, title: 'Pfeffer', amount: 2, co2value: 603}, {id: 33, title: 'Oregano', amount: 2, co2value: 8016}, {id: 34, title: 'Basilikum', amount: 2, co2value: 8016} ]; this.ingredients_HOC = [ {id: 16, title: 'Hokkaido', amount: 750, co2value: 354}, {id: 17, title: 'Kartoffeln', amount: 750, co2value: 170}, {id: 18, title: 'Orangen', amount: 400, co2value: 486}, {id: 11, title: 'Knoblauch', amount: 16, co2value: 1170}, {id: 35, title: 'Ingwer', amount: 20, co2value: 603}, {id: 36, title: 'Kurkuma (getrocknet)', amount: 4, co2value: 3723}, {id: 37, title: 'Koriander (getrocknet)', amount: 8, co2value: 3733}, {id: 38, title: 'Zucker', amount: 15, co2value: 335}, {id: 39, title: 'Salz', amount: 2, co2value: 603}, {id: 40, title: 'Kokosnussmilch', amount: 400, co2value: 1527}, {id: 41, title: 'Wasser', amount: 100, co2value: 16} ]; this.ingredients_Pizza = [ {id: 0, title: 'Wasser', amount: 320, co2value: 16}, {id: 19, title: 'Zucker', amount: 15, co2value: 335}, {id: 20, title: 'Hefe', amount: 20, co2value: 1037}, {id: 21, title: 'Weissmehl', amount: 600, co2value: 517}, {id: 22, title: 'Salz', amount: 10, co2value: 603}, {id: 42, title: 'Zwiebeln', amount: 70, co2value: 365}, {id: 43, title: 'Karotten', amount: 100, co2value: 305}, {id: 44, title: 'Knoblauch', amount: 3, co2value:1170}, {id: 45, title: 'Feta', amount: 200, co2value: 3546}, {id: 46, title: 'Feigen', amount: 100, co2value: 8236}, {id: 47, title: 'Petersilie (frisch)', amount: 45, co2value: 8096}, {id: 48, title: 'Zimt (getrocknet)', amount: 2, co2value: 8106}, {id: 49, title: 'Koriander (getrocknet)', amount: 2, co2value: 3733}, {id: 50, title: 'Kreuzkümmel (getrocknet)', amount: 2, co2value: 603}, {id: 51, title: 'Curry (getrocknet)', amount: 5, co2value: 603}, {id: 52, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 53, title: 'Olivenöl', amount: 30, co2value: 3712}, {id: 54, title: 'Wein', amount: 20, co2value: 1431}, {id: 55, title: 'Tomatenkonzentrat (dreifach konzentriert)', amount: 40, co2value: 1743}, {id: 56, title: 'Tomatenmark', amount: 400, co2value: 1003} ]; this.ingredients_Pommes = [ {id: 57, title: 'Süsskartoffeln', amount: 200, co2value: 653}, {id: 58, title: 'Rapsöl', amount: 30, co2value: 1606}, {id: 59, title: 'Italienische Kräuter', amount: 10, co2value: 603}, {id: 60, title: 'Salz', amount: 2, co2value: 603}, {id: 61, title: 'Pfeffer (getrocknet)', amount: 2, co2value: 603}, {id: 62, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 63, title: 'Paprika (edelsüss)', amount: 1, co2value: 603} ]; this.ingredients_birne = [ {id: 64, title: 'Birne', amount: 350, co2value: 286}, {id: 65, title: 'Zucker', amount: 200, co2value: 335}, {id: 66, title: 'Wasser', amount: 500, co2value: 16}, {id: 67, title: 'Zitrone', amount: 85, co2value: 486}, {id: 68, title: 'Zimt (getrocknet)', amount: 15, co2value: 8106}, {id: 69, title: 'Nelken (getrocknet)', amount: 5, co2value: 1214}, {id: 70, title: 'Schokolade (dunkel)', amount: 60, co2value: 2403}, {id: 71, title: 'Spirituosen', amount: 10, co2value: 4103}, {id: 72, title: 'Orangensaft', amount: 10, co2value: 1203} ]; //Kategorien: schnell, einfach, schick, gemerkt this.recipes = [ //http://app.eaternity.org/#!menu:7c5bd3c2-66a5-41f3-bd66-c35d40e8f4ae&customer=Eaternity&scope=PUBLIC { id: 0, title: 'Chili sin Carne', imageUrl: 'file:///android_asset/www/assets/img/0.jpg', category: 'einfach', co2: 497, recipeIngredients: this.ingredient_CsC, method: 'Die Zwiebel und Karotte in einer großen Pfanne anbraten. Den Tofu zerbröseln und hinzugeben und knusprig anbraten. Mit Sojasauce, Chili und Pfeffer würzen. Das restliche Gemüse dazugeben und für wenige Minuten anbraten lassen. Nun die Tomaten und Kidneybohnen dazugeben und ein paar Minuten köcheln lassen. Mit 1 TL Senf, etwas Essig und Zitronensaft, Salz und Kräutern abschmecken. Umrühren, einen Moment köcheln lassen und das Chili sin Carne heiß servieren. Als Garnitur passen Basilikumblätter und Kürbiskerne.'}, //http://app.eaternity.org/#!menu:4bd47d15-89eb-4e0f-b142-4aed8299d40c&customer=Eaternity&scope=PUBLIC { id: 1, title: 'Hokkaidō-Orangen Curry', imageUrl: 'file:///android_asset/www/assets/img/1.jpg', category: 'schick', co2: 233, recipeIngredients: this.ingredients_HOC, method: 'Die Kartoffeln waschen, schälen und in 2 cm Würfel schneiden. Den Kürbis waschen, halbieren, entkernen und gegebenenfalls schälen. In 2 cm Würfel schneiden. Die Orangen schälen und in 1 cm Würfel schneiden. Knoblauch und Ingwer hacken. Gewürze und Flüssigkeiten abmessen und bereitstellen.Den Ofen auf 180° Grad vorheizen. Kokosnussmilch mit Wasser, Ingwer, Knoblauch und den Gewürzen vermengen. Den Kürbis, Kartoffeln und Orangen in eine Auflaufform füllen. Die Gewürz-Kokosmilch darüber verteilen. Gut mischen und 30-40 Minuten im Ofen backen. Tipp: Dazu passt ein blumiger Basmatireis.'}, //http://app.eaternity.org/#!menu:ba34fba3-7135-4701-b8c4-ee43842cc9ad&customer=Eaternity&scope=PUBLIC { id: 2, title: 'Orientalische Pizza mit Feigen, Feta und Zwiebeln', imageUrl: 'file:///android_asset/www/assets/img/2.jpg', category: 'schick', co2: 408, recipeIngredients: this.ingredients_Pizza, method: 'Wasser, Zucker und Hefe zusammen mischen. Weissmehl und Salz abwägen und in eine Schüssel geben. Wasser-Hefemischung mit der Mehlmischung zu einem Pizzateig kneten. Den Teig gehen lassen bis die anderen Zutaten bereit sind. Eine Zwiebeln und die Karotten mit Olivenöl andünsten. Die Gewürze dazugeben und mitdünsten. Tomatenpüree beigeben und mitanziehen. Mit Rotwein ablöschen und einreduzieren lassen. Tomatenmark beigeben und 20 min kochen lassen. Tomatensauce vom Herd nehmen und etwas auskühlen lassen. Den Teig auswallen und auf das Backpapier legen. Die ausgekühlte Sauce auf dem Teig verteilen. Den Feta und die Zwiebelringe über der Pizza verteilen. Bei 220° Grad 12 min backen. Mit Petersilie bestreuen.'}, //http://app.eaternity.org/#!menu:42f27cd4-d35f-462a-bc03-fab8b3a6f1ae&customer=Eaternity&scope=PUBLIC { id: 3, title: 'Süßkartoffel-Pommes', imageUrl: 'file:///android_asset/www/assets/img/3.jpg', category: 'einfach', co2: 238, recipeIngredients: this.ingredients_Pommes, method: 'Die Süßkartoffeln schälen und in 1 cm dicke Spalten oder Scheiben schneiden. Den Ofen auf 200 Grad vorheizen. In einer Schüssel die Kräuter und Gewüze mit dem Öl vermischen. Die Kartoffelspalten mit der Gewürz-Ölmischung mischen, sodass jede Kartoffel gleichmäßig mit der Marinade benetzt ist. Auf einem Backblech mit Backpapier verteilen und etwa 25 bis 30 Min. knusprig backen.'}, //http://app.eaternity.org/#!menu:75a6bd49-3fb6-497c-b539-0250d0663c7c&customer=Eaternity&scope=PUBLIC { id: 4, title: 'Weihnachtsbirne mit Schokoladesauce', imageUrl: 'file:///android_asset/www/assets/img/4.jpg', category: 'schick', co2: 138, recipeIngredients: this.ingredients_birne, method: 'Das Wasser aufkochen und die Zitrone, Zucker, Zimtstange und Nelken zugeben. Fünf Minuten köcheln. Die Birnen im Sud 15 Minuten pochieren. Die Schokolade „au-bain-marie“ schmelzen. Mit etwas Rum oder Orangensaft vermischen. Die Birnen kurz abtropfen lassen und danach mit der Schokolade anrichten.'} //http://app.eaternity.org/#!menu:9db6366d-f56b-4924-a7b9-69ec33ed17f2&customer=Eaternity&scope=PUBLIC /* { id: 5, title: 'Erbsensuppe mit Minze', imageUrl: '../assets/img/5.jpg', category: 'schnell', co2: 165}, //http://app.eaternity.org/#!menu:2d9e98bc-025b-44e8-910d-7beabdfbdcd3&customer=Eaternity&scope=PUBLIC { id: 6, title: 'Frischer zitroniger Kartoffelsalat ', imageUrl: '../assets/img/6.jpg', category: 'schnell', co2: 261}, //http://app.eaternity.org/#!menu:75aa52f1-d9aa-4d45-9702-4663a74c0c89&customer=Eaternity&scope=PUBLIC { id: 7, title: 'Kürbis-Apfel Gulasch', imageUrl: '../assets/img/7.jpg', category: 'schnell', co2: 413} */ ]; this.foundRecipes = this.recipes; window.localStorage.setItem("recipe_list", JSON.stringify(this.recipes)); } itemTapped(event, item) { this.navCtrl.push(Rezepte, { item: item }); } showScore(){ let alert = this.alertCtrl.create({ title: 'D
tzahl ist ' + this.score, message: 'Du benötigst ' + this.calculateMissingPoints() + ' Punkte für das nächste Level.', buttons: ['OK'] }); alert.present(); } calculateMissingPoints(){ if (this.score < this.level2){ this.missingPoints = this.level2 - this.score; } else if (this.score >= this.level2 && this.score < this.level3) { this.missingPoints = this.level3 - this.score; } else if (this.score >= this.level3 && this.score < this.level4) { this.missingPoints = this.level4 - this.score; } else if (this.score >= this.level4 && this.score < this.level5) { this.missingPoints = this.level5 - this.score; } return this.missingPoints; } search(searchEvent) { let term = searchEvent.target.value.toLowerCase(); this.foundRecipes = []; console.log(this.recipes); if (term != "") { this.foundRecipes = this.recipes.filter((i) => { if (i.title.toLowerCase().indexOf(term) == 0) { return true; } else { return false; } }); } } openRecipe(recipe) { this.navCtrl.push(Rezeptansicht, {recipe}); } }
eine Punk
identifier_name
rezepte.ts
import { Component } from '@angular/core'; import { AlertController } from 'ionic-angular'; import { NavController, NavParams } from 'ionic-angular'; import { Rezeptansicht } from '../rezeptansicht/rezeptansicht'; @Component({ selector: 'page-rezepte', templateUrl: 'rezepte.html' }) export class Rezepte { recipes = []; foundRecipes = []; ingredient_CsC = []; ingredients_HOC = []; ingredients_Pizza = []; ingredients_Pommes = []; ingredients_birne = []; score: number; level2: number; level3: number; level4: number; level5: number; missingPoints = 0; constructor(public navCtrl: NavController, public navParams: NavParams, public alertCtrl: AlertController) { this.level2 = parseInt(window.localStorage.getItem("level2")); this.level3 = parseInt(window.localStorage.getItem("level3")); this.level4 = parseInt(window.localStorage.getItem("level4")); this.level5 = parseInt(window.localStorage.getItem("level5")); this.score = parseInt(window.localStorage.getItem("score")); this.ingredient_CsC = [ {id: 9, title: 'Karotten', amount: 70, co2value: 305}, {id: 23, title: 'Peperoni', amount: 150, co2value: 7008}, {id: 10, title: 'Zwiebeln', amount: 80, co2value: 365}, {id: 11, title: 'Knoblauch', amount: 5, co2value: 1170}, {id: 12, title: 'Champignons', amount: 60, co2value: 2790}, {id: 14, title: 'Räuchertofu', amount: 175, co2value: 783}, {id: 24, title: 'Tofu', amount: 175, co2value: 1008}, {id: 13, title: 'Kidneybohnen', amount: 250, co2value: 1302}, {id: 25, title: 'Süssmais', amount: 150, co2value: 839}, {id: 15, title: 'Tomaten', amount: 500, co2value: 547}, {id: 26, title: 'Soja Sauce', amount: 10, co2value: 1633}, {id: 27, title: 'Zitronensaft', amount: 10, co2value: 1953}, {id: 28, title: 'Essig', amount: 5, co2value: 3203}, {id: 29, title: 'Senf', amount: 5, co2value: 1003}, {id: 30, title: 'Chili', amount: 5, co2value: 8243}, {id: 31, title: 'Salz', amount: 2, co2value: 603}, {id: 32, title: 'Pfeffer', amount: 2, co2value: 603}, {id: 33, title: 'Oregano', amount: 2, co2value: 8016}, {id: 34, title: 'Basilikum', amount: 2, co2value: 8016} ]; this.ingredients_HOC = [ {id: 16, title: 'Hokkaido', amount: 750, co2value: 354}, {id: 17, title: 'Kartoffeln', amount: 750, co2value: 170}, {id: 18, title: 'Orangen', amount: 400, co2value: 486}, {id: 11, title: 'Knoblauch', amount: 16, co2value: 1170}, {id: 35, title: 'Ingwer', amount: 20, co2value: 603}, {id: 36, title: 'Kurkuma (getrocknet)', amount: 4, co2value: 3723}, {id: 37, title: 'Koriander (getrocknet)', amount: 8, co2value: 3733}, {id: 38, title: 'Zucker', amount: 15, co2value: 335}, {id: 39, title: 'Salz', amount: 2, co2value: 603}, {id: 40, title: 'Kokosnussmilch', amount: 400, co2value: 1527}, {id: 41, title: 'Wasser', amount: 100, co2value: 16} ]; this.ingredients_Pizza = [ {id: 0, title: 'Wasser', amount: 320, co2value: 16}, {id: 19, title: 'Zucker', amount: 15, co2value: 335}, {id: 20, title: 'Hefe', amount: 20, co2value: 1037}, {id: 21, title: 'Weissmehl', amount: 600, co2value: 517}, {id: 22, title: 'Salz', amount: 10, co2value: 603}, {id: 42, title: 'Zwiebeln', amount: 70, co2value: 365}, {id: 43, title: 'Karotten', amount: 100, co2value: 305}, {id: 44, title: 'Knoblauch', amount: 3, co2value:1170}, {id: 45, title: 'Feta', amount: 200, co2value: 3546}, {id: 46, title: 'Feigen', amount: 100, co2value: 8236}, {id: 47, title: 'Petersilie (frisch)', amount: 45, co2value: 8096}, {id: 48, title: 'Zimt (getrocknet)', amount: 2, co2value: 8106}, {id: 49, title: 'Koriander (getrocknet)', amount: 2, co2value: 3733}, {id: 50, title: 'Kreuzkümmel (getrocknet)', amount: 2, co2value: 603}, {id: 51, title: 'Curry (getrocknet)', amount: 5, co2value: 603}, {id: 52, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 53, title: 'Olivenöl', amount: 30, co2value: 3712}, {id: 54, title: 'Wein', amount: 20, co2value: 1431}, {id: 55, title: 'Tomatenkonzentrat (dreifach konzentriert)', amount: 40, co2value: 1743}, {id: 56, title: 'Tomatenmark', amount: 400, co2value: 1003} ]; this.ingredients_Pommes = [ {id: 57, title: 'Süsskartoffeln', amount: 200, co2value: 653}, {id: 58, title: 'Rapsöl', amount: 30, co2value: 1606}, {id: 59, title: 'Italienische Kräuter', amount: 10, co2value: 603}, {id: 60, title: 'Salz', amount: 2, co2value: 603}, {id: 61, title: 'Pfeffer (getrocknet)', amount: 2, co2value: 603}, {id: 62, title: 'Chili (getrocknet)', amount: 2, co2value: 8246}, {id: 63, title: 'Paprika (edelsüss)', amount: 1, co2value: 603} ]; this.ingredients_birne = [ {id: 64, title: 'Birne', amount: 350, co2value: 286}, {id: 65, title: 'Zucker', amount: 200, co2value: 335}, {id: 66, title: 'Wasser', amount: 500, co2value: 16}, {id: 67, title: 'Zitrone', amount: 85, co2value: 486}, {id: 68, title: 'Zimt (getrocknet)', amount: 15, co2value: 8106}, {id: 69, title: 'Nelken (getrocknet)', amount: 5, co2value: 1214}, {id: 70, title: 'Schokolade (dunkel)', amount: 60, co2value: 2403}, {id: 71, title: 'Spirituosen', amount: 10, co2value: 4103}, {id: 72, title: 'Orangensaft', amount: 10, co2value: 1203} ]; //Kategorien: schnell, einfach, schick, gemerkt this.recipes = [ //http://app.eaternity.org/#!menu:7c5bd3c2-66a5-41f3-bd66-c35d40e8f4ae&customer=Eaternity&scope=PUBLIC { id: 0, title: 'Chili sin Carne', imageUrl: 'file:///android_asset/www/assets/img/0.jpg', category: 'einfach', co2: 497, recipeIngredients: this.ingredient_CsC, method: 'Die Zwiebel und Karotte in einer großen Pfanne anbraten. Den Tofu zerbröseln und hinzugeben und knusprig anbraten. Mit Sojasauce, Chili und Pfeffer würzen. Das restliche Gemüse dazugeben und für wenige Minuten anbraten lassen. Nun die Tomaten und Kidneybohnen dazugeben und ein paar Minuten köcheln lassen. Mit 1 TL Senf, etwas Essig und Zitronensaft, Salz und Kräutern abschmecken. Umrühren, einen Moment köcheln lassen und das Chili sin Carne heiß servieren. Als Garnitur passen Basilikumblätter und Kürbiskerne.'}, //http://app.eaternity.org/#!menu:4bd47d15-89eb-4e0f-b142-4aed8299d40c&customer=Eaternity&scope=PUBLIC { id: 1, title: 'Hokkaidō-Orangen Curry', imageUrl: 'file:///android_asset/www/assets/img/1.jpg', category: 'schick', co2: 233, recipeIngredients: this.ingredients_HOC, method: 'Die Kartoffeln waschen, schälen und in 2 cm Würfel schneiden. Den Kürbis waschen, halbieren, entkernen und gegebenenfalls schälen. In 2 cm Würfel schneiden. Die Orangen schälen und in 1 cm Würfel schneiden. Knoblauch und Ingwer hacken. Gewürze und Flüssigkeiten abmessen und bereitstellen.Den Ofen auf 180° Grad vorheizen. Kokosnussmilch mit Wasser, Ingwer, Knoblauch und den Gewürzen vermengen. Den Kürbis, Kartoffeln und Orangen in eine Auflaufform füllen. Die Gewürz-Kokosmilch darüber verteilen. Gut mischen und 30-40 Minuten im Ofen backen. Tipp: Dazu passt ein blumiger Basmatireis.'}, //http://app.eaternity.org/#!menu:ba34fba3-7135-4701-b8c4-ee43842cc9ad&customer=Eaternity&scope=PUBLIC { id: 2, title: 'Orientalische Pizza mit Feigen, Feta und Zwiebeln', imageUrl: 'file:///android_asset/www/assets/img/2.jpg', category: 'schick', co2: 408, recipeIngredients: this.ingredients_Pizza, method: 'Wasser, Zucker und Hefe zusammen mischen. Weissmehl und Salz abwägen und in eine Schüssel geben. Wasser-Hefemischung mit der Mehlmischung zu einem Pizzateig kneten. Den Teig gehen lassen bis die anderen Zutaten bereit sind. Eine Zwiebeln und die Karotten mit Olivenöl andünsten. Die Gewürze dazugeben und mitdünsten. Tomatenpüree beigeben und mitanziehen. Mit Rotwein ablöschen und einreduzieren lassen. Tomatenmark beigeben und 20 min kochen lassen. Tomatensauce vom Herd nehmen und etwas auskühlen lassen. Den Teig auswallen und auf das Backpapier legen. Die ausgekühlte Sauce auf dem Teig verteilen. Den Feta und die Zwiebelringe über der Pizza verteilen. Bei 220° Grad 12 min backen. Mit Petersilie bestreuen.'}, //http://app.eaternity.org/#!menu:42f27cd4-d35f-462a-bc03-fab8b3a6f1ae&customer=Eaternity&scope=PUBLIC { id: 3, title: 'Süßkartoffel-Pommes', imageUrl: 'file:///android_asset/www/assets/img/3.jpg', category: 'einfach', co2: 238, recipeIngredients: this.ingredients_Pommes, method: 'Die Süßkartoffeln schälen und in 1 cm dicke Spalten oder Scheiben schneiden. Den Ofen auf 200 Grad vorheizen. In einer Schüssel die Kräuter und Gewüze mit dem Öl vermischen. Die Kartoffelspalten mit der Gewürz-Ölmischung mischen, sodass jede Kartoffel gleichmäßig mit der Marinade benetzt ist. Auf einem Backblech mit Backpapier verteilen und etwa 25 bis 30 Min. knusprig backen.'}, //http://app.eaternity.org/#!menu:75a6bd49-3fb6-497c-b539-0250d0663c7c&customer=Eaternity&scope=PUBLIC { id: 4, title: 'Weihnachtsbirne mit Schokoladesauce', imageUrl: 'file:///android_asset/www/assets/img/4.jpg', category: 'schick', co2: 138, recipeIngredients: this.ingredients_birne, method: 'Das Wasser aufkochen und die Zitrone, Zucker, Zimtstange und Nelken zugeben. Fünf Minuten köcheln. Die Birnen im Sud 15 Minuten pochieren. Die Schokolade „au-bain-marie“ schmelzen. Mit etwas Rum oder Orangensaft vermischen. Die Birnen kurz abtropfen lassen und danach mit der Schokolade anrichten.'} //http://app.eaternity.org/#!menu:9db6366d-f56b-4924-a7b9-69ec33ed17f2&customer=Eaternity&scope=PUBLIC /* { id: 5, title: 'Erbsensuppe mit Minze', imageUrl: '../assets/img/5.jpg', category: 'schnell', co2: 165}, //http://app.eaternity.org/#!menu:2d9e98bc-025b-44e8-910d-7beabdfbdcd3&customer=Eaternity&scope=PUBLIC { id: 6, title: 'Frischer zitroniger Kartoffelsalat ', imageUrl: '../assets/img/6.jpg', category: 'schnell', co2: 261}, //http://app.eaternity.org/#!menu:75aa52f1-d9aa-4d45-9702-4663a74c0c89&customer=Eaternity&scope=PUBLIC { id: 7, title: 'Kürbis-Apfel Gulasch', imageUrl: '../assets/img/7.jpg', category: 'schnell', co2: 413} */ ]; this.foundRecipes = this.recipes; window.localStorage.setItem("recipe_list", JSON.stringify(this.recipes)); } itemTapped(event, item) { this.navCtrl.push(Rezepte, { item: item }); } showScore(){ let alert = this.alertCtrl.create({ title: 'Deine Punktzahl ist ' + this.score, message: 'Du benötigst ' + this.calculateMissingPoints() + ' Punkte für das nächste Level.', buttons: ['OK'] }); alert.present(); } calculateMissingPoints(){ if (this.score < this.level2){ this.missingPoints = this.level2 - this.score; } else if (this.score >= this.level2 && this.score < this.level3) { this.missingPoints = this.level3 - this.score; } else if (this.score >= this.level3 && this.score < this.level4) { this.missingPoints = this.level4 - this.score; } else if (this.score >= this.level4 && this.score < this.level5) { this.missingPoints = this.level5 - this.score; } return this.missingPoints; } search(searchEvent) { let term = searchEvent.target.value.toLowerCase(); this.foundRecipes = []; console.log(this.recipes); if (term != "") { this.foundRecipes = this.recipes.filter((i) => { if (i.title.toLowerCase().indexOf(term) == 0) { return true; } else { return false; } }); } } openRecipe(r
ptansicht, {recipe}); } }
ecipe) { this.navCtrl.push(Reze
conditional_block
RecipeDetail.js
import React, { Component } from 'react'; import { Carousel } from 'react-responsive-carousel'; import 'react-responsive-carousel/lib/styles/carousel.min.css'; import Comments from '../Comments'; import Countdown from 'react-countdown-now'; import { Link } from 'react-router/lib'; import LocalizedStrings from 'react-localization'; // Random component const Completionist = () => <span>00:00</span>; // Renderer callback with condition const renderer = ({ hours, minutes, seconds, completed }) => { if (completed) { // Render a complete state return <Completionist />; } else { // Render a countdown return <span> {minutes} : {seconds}</span>; } }; class RecipeDetail extends Component { constructor(props) { super(props); let strings = new LocalizedStrings({ en:{ title:'View recipes', lang:'Languages', pumkin:'Homemade pumpkin cookies', material:'Material infomation', video:'recipe video', cook:'Cooking order', relation: 'Relationship products', label_sub_preparetime: 'Ready Time', label_sub_cookingtime: 'Cook Time', btn_edit: 'Edit' }, it: { title:'레시피 보기', lang:'언어 선택', pumkin:'수제 호박 쿠키', material:'재료안내', video:'레시피 영상', cook:'조리순서', relation:'이 레시피와 비슷해요', label_sub_preparetime: '준비시간', label_sub_cookingtime: '조리시간', btn_edit: '수정' }, cn: { title:'食谱视图', lang:'选择语言', pumkin:'自制南瓜饼干', material:'材料指南', video:'食谱视频', cook:'烹饪顺序', relation: '这与食谱相似', label_sub_preparetime: '准备时间', label_sub_cookingtime: '烹饪时间', btn_edit: '编辑' } }); this.state = { detail:{ user: { avatar:'' }, cate_detail:{ title:'' }, op_function:{ title:'' }, tags:'', }, optionsCount: 0, options: {}, time:0, mins : [1,2,3,4,5,6,7,8,9,10], secs : [0,1,2,3,4,5,6,7,8,9,10], saving:false, recipeId: props.routeParams.id, relations:[], languages:strings, type_lang:'it' }; this.getRecipe() //this.pushView() this.handleAddWish = this.handleAddWish.bind(this) this.handleTime = this.handleTime.bind(this) this.handeReset = this.handeReset.bind(this) } pushView(){ const { detail } = this.state const { recipeId } = this.state let user_id = 0 if(PROFILES) user_id = PROFILES.id let data = { object_id: recipeId, type: 'View', object_type:'Post', author_id: user_id, user_id: user_id, action: 1 } let header = ('') if(user_id != 0) header = {headers:{ 'Authorization': `Bearer ${TOKENS}` }} //refreshToken() axios.put(`/api/v3/reactions/view`, data, header) .then(response => { if (response.status === 200) { this.getRecipe() } this.setState({saving: true}); }) .catch(error => { if(error.response.data.message == '토큰이 블랙리스트에 올랐습니다.'){ console.log('black list') // localStorage.removeItem('tokens') } if(error.response.data.status_code == 401 || error.response.data.status_code == 500){ alert('다시 로그인해야합니다.') window.location.href = '/logout?redirect_url=recipe/'+recipeId+'/detail' } }); } componentDidMount() { this.pushView(); this.getRelatioship(); axios.get('/api/v1/options') .then((res) => { this.setState({ optionsCount: res.data.meta.total_count, options: res.data.result }); }) .catch((err) => { console.log(err) }); window.scrollTo(0, 0); } getRelatioship(){ const { recipeId } = this.state const { detail } = this.state console.log(detail) axios.get(`/api/v3/recipes/relation/`+recipeId) .then(response => { this.setRelation(response.data); }) .catch(error => { console.log(error); }); } componentWillReceiveProps(nextProps) { const {recipeId} = this.state; if(recipeId != nextProps.params.id) { this.setState({recipeId: nextProps.params.id}); this.pushView(); this.getRelatioship(); $('html, body').animate({ scrollTop: 0 }, 0); } } setRelation(relations) { this.setState({ relations:relations['result'] }); } async getRecipe(){ const { recipeId } = this.state; await axios.get(`/api/v1/post?embeds=user%2Ccate_detail%2Cop_function%2Cop_difficulty&filters=status=1,id=${recipeId}`) .then(response => { this.setDetail(response.data); }) .catch(error => { console.log(error); }); } setDetail(detail) { this.setState({ detail:detail['result'][0] }); } renderSlideTop(banners){ let bannerss = []; banners.map((banner, index) => { if(banners[index].images){ if(bannerss[index] == undefined){ bannerss[index] = {} } bannerss[index]['images'] = banners[index].images; } }) return <Carousel showArrows={true} showIndicators={false} showStatus={true} autoPlay={true} showThumbs={false}> {bannerss.map((banner, index) => { if(banners[index].images){ return <div key={`banner-${index}`} className=""> <img src={CDN_URL+`${banner.images}`} alt="img" /> </div> } })} </Carousel> } renderSlideStep(steps){ return <Carousel showArrows={false} showIndicators={false} showStatus={true} autoPlay={true} showThumbs={false}> {Object.keys(steps).map((step, index) => { let img if(!steps[step].video) img = (<img style={{height:'80vw'}} src={CDN_URL+`${steps[step].images}`} alt="img" />) return <div key={`step-${index}`} className="item"> {img} </div> })} </Carousel> } handleBack(){ window.history.go(-1); } handleAddWish(){ if(!PROFILES) { alert('아직 로그인하지 않았습니다.') return false } const con = confirm('레시피를 찜 하시겠습니까?') if(con !== true) return false const { detail } = this.state const { saving } = this.state let action = 1 if(saving === true){ //unlike action = 0 } //if(saving === true) action = 0 let data = { object_id: detail.id, type: 'Like', object_type:'Post', action: action, author_id: detail.user.id } const { recipeId } = this.state //request new token refreshToken() axios.put(`/api/v3/reactions/react`, data, {headers:{ 'Authorization': `Bearer ${TOKENS}` }}) .then(response => { if (response.status === 200) { //alert(response.data.message) this.getRecipe() } if(saving == false) this.setState({saving: true}); else this.setState({saving: false}); }) .catch(error => { if(error.response.data.status_code == 401){ alert('다시 로그인해야합니다.') window.location.href = '/logout?redirect_url=recipe/'+recipeId+'/detail' } }); } resetErrors() { this.setState({ errors: {}, hasError: false, }) } resetMessage() { this.setState({ message: null }) } renderErrors() { const { errors, hasError } = this.state if (!hasError) return null return ( <div className="alert alert-danger alert-dismissible" role="alert"> <button type="button" className="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> <p>{this.state.errors.message}</p> </div> ) } renderMessage() { const { message } = this.state if (message) { return ( <div className="alert alert-success alert-dismissible" role="alert"> <button type="button" className="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> <p><strong>Success!</strong> { message }</p> </div> ) } } handleTime(){ this.setState({ time: 300000 }) } handeReset(){ this.setState({ time: 0 }) this.handleTime() } changeLang({name,value}){ console.log(value) this.setState({type_lang:value}); localStorage.setItem('lang', value); } getOptionTitle(_id) { const {optionsCount} = this.state; const {
lues(options); const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); for(var i = 0; i < optionsCount; i++) { if(_optionsArray[i].id == _id) { var _text = _optionsArray[i].title; if (type_lang == "en") { _text = (_optionsArray[i].title_en != null) ? _optionsArray[i].title_en : _optionsArray[i].title; } else if (type_lang == "cn") { _text = (_optionsArray[i].title_cn != null) ? _optionsArray[i].title_cn : _optionsArray[i].title; } return _text; } } return ""; } getCateTitle() { const {detail} = this.state; const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); var _text = detail.cate_detail.title; if (type_lang == "en") { _text = (detail.cate_detail.title_en != null) ? detail.cate_detail.title_en : detail.cate_detail.title; } else if (type_lang == "cn") { _text = (detail.cate_detail.title_cn != null) ? detail.cate_detail.title_cn : detail.cate_detail.title; } return _text; } render() { const { detail } = this.state const { time } = this.state const { mins } = this.state const { secs } = this.state const { recipeId } = this.state const { relations } = this.state const { options } = this.state; if(detail == undefined) window.location.href = "/404" var _cateLink = "/cats"; // cat id = 20 if(detail.cate_detail.id == 3) { // dog id = 3 _cateLink = "/dogs"; } let materials if(detail.materials != undefined){ materials = JSON.parse(detail.materials) }else{ materials = {} } let steps if(detail.steps != undefined){ steps = JSON.parse(detail.steps) }else{ steps = {} } let bannerTop = [] var thumbVideo = ""; if(detail.cooking_representation){ bannerTop = JSON.parse(detail.cooking_representation) if(bannerTop[0].images != null || bannerTop[0].images != undefined) { thumbVideo = "/uploads/"+bannerTop[0].images; } } let avatar = (<img src="/html/images/media/img2.jpg" alt="img" />) if(detail.user){ if(detail.user.avatar) if (detail.user.avatar.indexOf('http') > -1) avatar = (<img src={detail.user.avatar} alt="img" />) else avatar = (<img src={CDN_URL+detail.user.avatar} alt="img" />) }else{ window.location.href = "/404" } //tags let tags = detail.tags.split(',') || []; //language const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); //video let video = ""; var styleVideoBlock = {display:"block"}; if(detail.videos != undefined && detail.videos != "[]") { video = (<a data-fancybox href={detail['videos']}> <div style={{position:"relative",backgroundImage: "url("+thumbVideo+")",backgroundPosition: "center", backgroundRepeat: "no-repeat", backgroundSize: "cover", display:"block",width:"600px",height:"350px",margin:"auto"}}> <div style={{backgroundImage: "url('/default/images/icon/playbutton.jpg')",backgroundPosition: "center", backgroundRepeat: "no-repeat",width:"100%",height:"100%"}}></div> </div> </a>); } else { styleVideoBlock = {display: "none"}; } let _editButton = ""; if(PROFILES != undefined && detail.user_id == PROFILES.id) { _editButton = (<Link to={`/recipe/${detail.id}/edit`} className="pull-right btn btn-default btn-comment" >{languages.btn_edit}</Link>); } return (<main className="site-main"> <div className="container"> <div className="block-gird-item"> <div className="toobar"> {this.renderErrors()} {this.renderMessage()} <div className="title pull-left"> {languages.title} </div> <select className="form-control pull-right" value={type_lang} name="lang" onChange={(e) => this.changeLang({ name: e.target.name, value: e.target.value })} style={{marginLeft: 12}}> <option value="it">한국어</option> <option value="en">English</option> <option value="cn">Chinese</option> </select> <label className="pull-right">{languages.lang}</label> {_editButton} </div> <div className="items-talk-detail block-view-detail clearfix"> <br /><br /> <div className="item-talk col-sm-8 col-sm-offset-2"> <div className="photo"> {this.renderSlideTop(bannerTop)} </div> <div className="item-top"> <div className="img"> <div className="photo"> {avatar} </div> </div> <div className="detail"> <strong className="title">{ detail.user.name || ''}</strong> <span>{ detail.desc || ''}</span> </div> </div> <div className="item-description"> <strong className="title">{ detail.title }</strong> <div className="des"> { detail.content } </div> <div style={{display:"flex"}}> <div className="item-top info"> <span className="view">{ detail.total_view}</span> <span className="wishlist" onClick={this.handleAddWish}>{ detail.total_like }</span> <span className="comment">{detail.total_cmt}</span> </div> </div> <div className="link"> <Link to={_cateLink}>{this.getCateTitle()}</Link> <Link to={`/searchs?function=${detail.op_function.id}`}>{this.getOptionTitle(detail.function)}</Link> <Link to={`/searchs?difficulty=${detail.difficulty}`}><img src="/default/images/icon/icon1.png" style={{width: 20, marginRight: 4}} /> {this.getOptionTitle(detail.difficulty)}</Link> <Link to={`/searchs?prepare_time=${detail.prepare_time}`}>{languages.label_sub_preparetime} <br /> {this.getOptionTitle(detail.prepare_time)}</Link> <Link to={`/searchs?cooking_time=${detail.cooking_time}`}>{languages.label_sub_cookingtime} <br />{this.getOptionTitle(detail.cooking_time)}</Link> </div> </div> </div> </div> <hr /> <br /> <div className="block-view-related"> <div className="block-view-title">{languages.material}</div> <div className="items row"> {Object.keys(materials).map((material,index) => { let img = ""; if(materials[material].images) img = (<img src={CDN_URL+`${materials[material].images}`} alt="img" />); var _link = `/searchs?material=${materials[material].name}`; // if(_matName.charCodeAt(0) > 255) { // var _encodedKeyword = ""; // for(var _index = 0; _index < _matName.length; _index++) { // _encodedKeyword += '\\\\u' + _matName.charCodeAt(_index).toString(16); // } // _encodedKeyword = _encodedKeyword.substr(2, _encodedKeyword.length); // _link = _link+_encodedKeyword; // } else { // _link = _link+_matName; // } return <div className="col-sm-3 item " key={`material-${index}`}> <div className="item-fearture-product"> <div className="photo"> <Link to={`${_link}`} >{img}</Link> </div> <div className="detail"> <h3 className="title"><Link to={`${_link}`}>{materials[material].name} </Link> </h3> <span className="subtitle">{materials[material].quantity} {this.getOptionTitle(materials[material].unit)}</span> </div> </div> </div> })} </div> </div> <hr /> <br /> <div className="block-view-video" style={styleVideoBlock}> <div className="block-view-title">{languages.video}</div> <div className="block-video-content"> {video} </div> </div> <hr /> <br /> <div className="block-view-order"> <div className="block-view-title">{languages.cook}</div> <div className="block-content"> {Object.keys(steps).map((step,index) => { let img if(steps[step].images) img = (<img src={CDN_URL+`${steps[step].images}`} alt="img" />) return <div className="row" key={`step-${index}`}> <div className="col-sm-6"> <span className="stt"> {parseInt(index)+1} </span> <div className="des"> {steps[step].content} </div> </div> <div className="col-sm-6"> {img} </div> </div> })} </div> </div> <div className="block-tag"> <label>Tag</label> {tags.map((tag,index) =>{ return <Link to={`/searchs?tag=${tag}`} key={`tag-${index}`} className="tag">#{tag}</Link> })} </div> <div className="block-view-related"> <div className="block-view-title">{languages.relation}</div> <div className="items row"> {relations.map((relate,index) => { let name = '' if(relate.user) name = relate.user.name return <div className="col-sm-4 item " key={`relate-${index}`}> <div className="item-fearture-product"> <div className="photo"> <Link to={`/recipe/${relate.id}/detail`}> <img src={CDN_URL+`${$.parseJSON(relate.cooking_representation)[0]['images']}`} alt="img" /> </Link> </div> <div className="detail"> <h3 className="title"><Link to={`/recipe/${relate.id}/detail`}>{relate.title} </Link> </h3> <span className="subtitle"> {name} </span> </div> </div> </div> })} </div> </div> <hr /> <br /> <Comments object_type="Post" object_id={recipeId} total={detail.total_cmt} sort="id" redirect_url={`detail/${recipeId}/detail`} /> </div> </div> </main> ); } } export default RecipeDetail;
options} = this.state; const _optionsArray = Object.va
identifier_body
RecipeDetail.js
import React, { Component } from 'react'; import { Carousel } from 'react-responsive-carousel'; import 'react-responsive-carousel/lib/styles/carousel.min.css'; import Comments from '../Comments'; import Countdown from 'react-countdown-now'; import { Link } from 'react-router/lib'; import LocalizedStrings from 'react-localization'; // Random component const Completionist = () => <span>00:00</span>; // Renderer callback with condition const renderer = ({ hours, minutes, seconds, completed }) => { if (completed) { // Render a complete state return <Completionist />; } else { // Render a countdown return <span> {minutes} : {seconds}</span>; } }; class RecipeDetail extends Component { constructor(props) { super(props); let strings = new LocalizedStrings({ en:{ title:'View recipes', lang:'Languages', pumkin:'Homemade pumpkin cookies', material:'Material infomation', video:'recipe video', cook:'Cooking order', relation: 'Relationship products', label_sub_preparetime: 'Ready Time', label_sub_cookingtime: 'Cook Time', btn_edit: 'Edit' }, it: { title:'레시피 보기', lang:'언어 선택', pumkin:'수제 호박 쿠키', material:'재료안내', video:'레시피 영상', cook:'조리순서', relation:'이 레시피와 비슷해요', label_sub_preparetime: '준비시간', label_sub_cookingtime: '조리시간', btn_edit: '수정' }, cn: { title:'食谱视图', lang:'选择语言', pumkin:'自制南瓜饼干', material:'材料指南', video:'食谱视频', cook:'烹饪顺序', relation: '这与食谱相似', label_sub_preparetime: '准备时间', label_sub_cookingtime: '烹饪时间', btn_edit: '编辑' } }); this.state = { detail:{ user: { avatar:'' }, cate_detail:{ title:'' }, op_function:{ title:'' }, tags:'', }, optionsCount: 0, options: {}, time:0, mins : [1,2,3,4,5,6,7,8,9,10], secs : [0,1,2,3,4,5,6,7,8,9,10], saving:false, recipeId: props.routeParams.id, relations:[], languages:strings, type_lang:'it' }; this.getRecipe() //this.pushView() this.handleAddWish = this.handleAddWish.bind(this) this.handleTime = this.handleTime.bind(this) this.handeReset = this.handeReset.bind(this) } pushView(){ const { detail } = this.state const { recipeId } = this.state let user_id = 0 if(PROFILES) user_id = PROFILES.id let data = { object_id: recipeId, type: 'View', object_type:'Post', author_id: user_id, user_id: user_id, action: 1 } let header = ('') if(user_id != 0) header = {headers:{ 'Authorization': `Bearer ${TOKENS}` }} //refreshToken() axios.put(`/api/v3/reactions/view`, data, header) .then(response => { if (response.status === 200) { this.getRecipe() } this.setState({saving: true}); }) .catch(error => { if(error.response.data.message == '토큰이 블랙리스트에 올랐습니다.'){ console.log('black list') // localStorage.removeItem('tokens') } if(error.response.data.status_code == 401 || error.response.data.status_code == 500){ alert('다시 로그인해야합니다.') window.location.href = '/logout?redirect_url=recipe/'+recipeId+'/detail' } }); } componentDidMount() { this.pushView(); this.getRelatioship(); axios.get('/api/v1/options') .then((res) => { this.setState({ optionsCount: res.data.meta.total_count, options: res.data.result }); }) .catch((err) => { console.log(err) }); window.scrollTo(0, 0); } getRelatioship(){ const { recipeId } = this.state const { detail } = this.state console.log(detail) axios.get(`/api/v3/recipes/relation/`+recipeId) .then(response => { this.setRelation(response.data); }) .catch(error => { console.log(error); }); } componentWillReceiveProps(nextProps) { const {recipeId} = this.state; if(recipeId != nextProps.params.id) { this.setState({recipeId: nextProps.params.id}); this.pushView(); this.getRelatioship(); $('html, body').animate({ scrollTop: 0 }, 0); } } setRelation(relations) { this.setState({ relations:relations['result'] }); } async getRecipe(){ const { recipeId } = this.state; await axios.get(`/api/v1/post?embeds=user%2Ccate_detail%2Cop_function%2Cop_difficulty&filters=status=1,id=${recipeId}`) .then(response => { this.setDetail(response.data); }) .catch(error => { console.log(error); }); } setDetail(detail) { this.setState({ detail:detail['result'][0] }); } renderSlideTop(banners){ let bannerss = []; banners.map((banner, index) => { if(banners[index].images){ if(bannerss[index] == undefined){ bannerss[index] = {} } bannerss[index]['images'] = banners[index].images; } }) return <Carousel showArrows={true} showIndicators={false} showStatus={true} autoPlay={true} showThumbs={false}> {bannerss.map((banner, index) => { if(banners[index].images){ return <div key={`banner-${index}`} className=""> <img src={CDN_URL+`${banner.images}`} alt="img" /> </div> } })} </Carousel> } renderSlideStep(steps){ return <Carousel showArrows={false} showIndicators={false} showStatus={true} autoPlay={true} showThumbs={false}> {Object.keys(steps).map((step, index) => { let img if(!steps[step].video) img = (<img style={{height:'80vw'}} src={CDN_URL+`${steps[step].images}`} alt="img" />) return <div key={`step-${index}`} className="item"> {img} </div> })} </Carousel> } handleBack(){ window.history.go(-1); } handleAddWish(){ if(!PROFILES) { alert('아직 로그인하지 않았습니다.') return false } const con = confirm('레시피를 찜 하시겠습니까?') if(con !== true) return false const { detail } = this.state const { saving } = this.state let action = 1 if(saving === true){ //unlike action = 0 } //if(saving === true) action = 0 let data = { object_id: detail.id, type: 'Like', object_type:'Post', action: action, author_id: detail.user.id } const { recipeId } = this.state //request new token refreshToken() axios.put(`/api/v3/reactions/react`, data, {headers:{ 'Authorization': `Bearer ${TOKENS}` }}) .then(response => { if (response.status === 200) { //alert(response.data.message) this.getRecipe() }
}) .catch(error => { if(error.response.data.status_code == 401){ alert('다시 로그인해야합니다.') window.location.href = '/logout?redirect_url=recipe/'+recipeId+'/detail' } }); } resetErrors() { this.setState({ errors: {}, hasError: false, }) } resetMessage() { this.setState({ message: null }) } renderErrors() { const { errors, hasError } = this.state if (!hasError) return null return ( <div className="alert alert-danger alert-dismissible" role="alert"> <button type="button" className="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> <p>{this.state.errors.message}</p> </div> ) } renderMessage() { const { message } = this.state if (message) { return ( <div className="alert alert-success alert-dismissible" role="alert"> <button type="button" className="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> <p><strong>Success!</strong> { message }</p> </div> ) } } handleTime(){ this.setState({ time: 300000 }) } handeReset(){ this.setState({ time: 0 }) this.handleTime() } changeLang({name,value}){ console.log(value) this.setState({type_lang:value}); localStorage.setItem('lang', value); } getOptionTitle(_id) { const {optionsCount} = this.state; const {options} = this.state; const _optionsArray = Object.values(options); const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); for(var i = 0; i < optionsCount; i++) { if(_optionsArray[i].id == _id) { var _text = _optionsArray[i].title; if (type_lang == "en") { _text = (_optionsArray[i].title_en != null) ? _optionsArray[i].title_en : _optionsArray[i].title; } else if (type_lang == "cn") { _text = (_optionsArray[i].title_cn != null) ? _optionsArray[i].title_cn : _optionsArray[i].title; } return _text; } } return ""; } getCateTitle() { const {detail} = this.state; const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); var _text = detail.cate_detail.title; if (type_lang == "en") { _text = (detail.cate_detail.title_en != null) ? detail.cate_detail.title_en : detail.cate_detail.title; } else if (type_lang == "cn") { _text = (detail.cate_detail.title_cn != null) ? detail.cate_detail.title_cn : detail.cate_detail.title; } return _text; } render() { const { detail } = this.state const { time } = this.state const { mins } = this.state const { secs } = this.state const { recipeId } = this.state const { relations } = this.state const { options } = this.state; if(detail == undefined) window.location.href = "/404" var _cateLink = "/cats"; // cat id = 20 if(detail.cate_detail.id == 3) { // dog id = 3 _cateLink = "/dogs"; } let materials if(detail.materials != undefined){ materials = JSON.parse(detail.materials) }else{ materials = {} } let steps if(detail.steps != undefined){ steps = JSON.parse(detail.steps) }else{ steps = {} } let bannerTop = [] var thumbVideo = ""; if(detail.cooking_representation){ bannerTop = JSON.parse(detail.cooking_representation) if(bannerTop[0].images != null || bannerTop[0].images != undefined) { thumbVideo = "/uploads/"+bannerTop[0].images; } } let avatar = (<img src="/html/images/media/img2.jpg" alt="img" />) if(detail.user){ if(detail.user.avatar) if (detail.user.avatar.indexOf('http') > -1) avatar = (<img src={detail.user.avatar} alt="img" />) else avatar = (<img src={CDN_URL+detail.user.avatar} alt="img" />) }else{ window.location.href = "/404" } //tags let tags = detail.tags.split(',') || []; //language const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); //video let video = ""; var styleVideoBlock = {display:"block"}; if(detail.videos != undefined && detail.videos != "[]") { video = (<a data-fancybox href={detail['videos']}> <div style={{position:"relative",backgroundImage: "url("+thumbVideo+")",backgroundPosition: "center", backgroundRepeat: "no-repeat", backgroundSize: "cover", display:"block",width:"600px",height:"350px",margin:"auto"}}> <div style={{backgroundImage: "url('/default/images/icon/playbutton.jpg')",backgroundPosition: "center", backgroundRepeat: "no-repeat",width:"100%",height:"100%"}}></div> </div> </a>); } else { styleVideoBlock = {display: "none"}; } let _editButton = ""; if(PROFILES != undefined && detail.user_id == PROFILES.id) { _editButton = (<Link to={`/recipe/${detail.id}/edit`} className="pull-right btn btn-default btn-comment" >{languages.btn_edit}</Link>); } return (<main className="site-main"> <div className="container"> <div className="block-gird-item"> <div className="toobar"> {this.renderErrors()} {this.renderMessage()} <div className="title pull-left"> {languages.title} </div> <select className="form-control pull-right" value={type_lang} name="lang" onChange={(e) => this.changeLang({ name: e.target.name, value: e.target.value })} style={{marginLeft: 12}}> <option value="it">한국어</option> <option value="en">English</option> <option value="cn">Chinese</option> </select> <label className="pull-right">{languages.lang}</label> {_editButton} </div> <div className="items-talk-detail block-view-detail clearfix"> <br /><br /> <div className="item-talk col-sm-8 col-sm-offset-2"> <div className="photo"> {this.renderSlideTop(bannerTop)} </div> <div className="item-top"> <div className="img"> <div className="photo"> {avatar} </div> </div> <div className="detail"> <strong className="title">{ detail.user.name || ''}</strong> <span>{ detail.desc || ''}</span> </div> </div> <div className="item-description"> <strong className="title">{ detail.title }</strong> <div className="des"> { detail.content } </div> <div style={{display:"flex"}}> <div className="item-top info"> <span className="view">{ detail.total_view}</span> <span className="wishlist" onClick={this.handleAddWish}>{ detail.total_like }</span> <span className="comment">{detail.total_cmt}</span> </div> </div> <div className="link"> <Link to={_cateLink}>{this.getCateTitle()}</Link> <Link to={`/searchs?function=${detail.op_function.id}`}>{this.getOptionTitle(detail.function)}</Link> <Link to={`/searchs?difficulty=${detail.difficulty}`}><img src="/default/images/icon/icon1.png" style={{width: 20, marginRight: 4}} /> {this.getOptionTitle(detail.difficulty)}</Link> <Link to={`/searchs?prepare_time=${detail.prepare_time}`}>{languages.label_sub_preparetime} <br /> {this.getOptionTitle(detail.prepare_time)}</Link> <Link to={`/searchs?cooking_time=${detail.cooking_time}`}>{languages.label_sub_cookingtime} <br />{this.getOptionTitle(detail.cooking_time)}</Link> </div> </div> </div> </div> <hr /> <br /> <div className="block-view-related"> <div className="block-view-title">{languages.material}</div> <div className="items row"> {Object.keys(materials).map((material,index) => { let img = ""; if(materials[material].images) img = (<img src={CDN_URL+`${materials[material].images}`} alt="img" />); var _link = `/searchs?material=${materials[material].name}`; // if(_matName.charCodeAt(0) > 255) { // var _encodedKeyword = ""; // for(var _index = 0; _index < _matName.length; _index++) { // _encodedKeyword += '\\\\u' + _matName.charCodeAt(_index).toString(16); // } // _encodedKeyword = _encodedKeyword.substr(2, _encodedKeyword.length); // _link = _link+_encodedKeyword; // } else { // _link = _link+_matName; // } return <div className="col-sm-3 item " key={`material-${index}`}> <div className="item-fearture-product"> <div className="photo"> <Link to={`${_link}`} >{img}</Link> </div> <div className="detail"> <h3 className="title"><Link to={`${_link}`}>{materials[material].name} </Link> </h3> <span className="subtitle">{materials[material].quantity} {this.getOptionTitle(materials[material].unit)}</span> </div> </div> </div> })} </div> </div> <hr /> <br /> <div className="block-view-video" style={styleVideoBlock}> <div className="block-view-title">{languages.video}</div> <div className="block-video-content"> {video} </div> </div> <hr /> <br /> <div className="block-view-order"> <div className="block-view-title">{languages.cook}</div> <div className="block-content"> {Object.keys(steps).map((step,index) => { let img if(steps[step].images) img = (<img src={CDN_URL+`${steps[step].images}`} alt="img" />) return <div className="row" key={`step-${index}`}> <div className="col-sm-6"> <span className="stt"> {parseInt(index)+1} </span> <div className="des"> {steps[step].content} </div> </div> <div className="col-sm-6"> {img} </div> </div> })} </div> </div> <div className="block-tag"> <label>Tag</label> {tags.map((tag,index) =>{ return <Link to={`/searchs?tag=${tag}`} key={`tag-${index}`} className="tag">#{tag}</Link> })} </div> <div className="block-view-related"> <div className="block-view-title">{languages.relation}</div> <div className="items row"> {relations.map((relate,index) => { let name = '' if(relate.user) name = relate.user.name return <div className="col-sm-4 item " key={`relate-${index}`}> <div className="item-fearture-product"> <div className="photo"> <Link to={`/recipe/${relate.id}/detail`}> <img src={CDN_URL+`${$.parseJSON(relate.cooking_representation)[0]['images']}`} alt="img" /> </Link> </div> <div className="detail"> <h3 className="title"><Link to={`/recipe/${relate.id}/detail`}>{relate.title} </Link> </h3> <span className="subtitle"> {name} </span> </div> </div> </div> })} </div> </div> <hr /> <br /> <Comments object_type="Post" object_id={recipeId} total={detail.total_cmt} sort="id" redirect_url={`detail/${recipeId}/detail`} /> </div> </div> </main> ); } } export default RecipeDetail;
if(saving == false) this.setState({saving: true}); else this.setState({saving: false});
random_line_split
RecipeDetail.js
import React, { Component } from 'react'; import { Carousel } from 'react-responsive-carousel'; import 'react-responsive-carousel/lib/styles/carousel.min.css'; import Comments from '../Comments'; import Countdown from 'react-countdown-now'; import { Link } from 'react-router/lib'; import LocalizedStrings from 'react-localization'; // Random component const Completionist = () => <span>00:00</span>; // Renderer callback with condition const renderer = ({ hours, minutes, seconds, completed }) => { if (completed) { // Render a complete state return <Completionist />; } else { // Render a countdown return <span> {minutes} : {seconds}</span>; } }; class RecipeDetail extends Component { constructor(props) { super(props); let strings = new LocalizedStrings({ en:{ title:'View recipes', lang:'Languages', pumkin:'Homemade pumpkin cookies', material:'Material infomation', video:'recipe video', cook:'Cooking order', relation: 'Relationship products', label_sub_preparetime: 'Ready Time', label_sub_cookingtime: 'Cook Time', btn_edit: 'Edit' }, it: { title:'레시피 보기', lang:'언어 선택', pumkin:'수제 호박 쿠키', material:'재료안내', video:'레시피 영상', cook:'조리순서', relation:'이 레시피와 비슷해요', label_sub_preparetime: '준비시간', label_sub_cookingtime: '조리시간', btn_edit: '수정' }, cn: { title:'食谱视图', lang:'选择语言', pumkin:'自制南瓜饼干', material:'材料指南', video:'食谱视频', cook:'烹饪顺序', relation: '这与食谱相似', label_sub_preparetime: '准备时间', label_sub_cookingtime: '烹饪时间', btn_edit: '编辑' } }); this.state = { detail:{ user: { avatar:'' }, cate_detail:{ title:'' }, op_function:{ title:'' }, tags:'', }, optionsCount: 0, options: {}, time:0, mins : [1,2,3,4,5,6,7,8,9,10], secs : [0,1,2,3,4,5,6,7,8,9,10], saving:false, recipeId: props.routeParams.id, relations:[], languages:strings, type_lang:'it' }; this.getRecipe() //this.pushView() this.handleAddWish = this.handleAddWish.bind(this) this.handleTime = this.handleTime.bind(this) this.handeReset = this.handeReset.bind(this) } pushView(){ const { detail } = this.state const { recipeId } = this.state let user_id = 0 if(PROFILES) user_id = PROFILES.id let data = { object_id: recipeId, type: 'View', object_type:'Post', author_id: user_id, user_id: user_id, action: 1 } let header = ('') if(user_id != 0) header = {headers:{ 'Authorization': `Bearer ${TOKENS}` }} //refreshToken() axios.put(`/api/v3/reactions/view`, data, header) .then(response => { if (response.status === 200) { this.getRecipe() } this.setState({saving: true}); }) .catch(error => { if(error.response.data.message == '토큰이 블랙리스트에 올랐습니다.'){ console.log('black list') // localStorage.removeItem('tokens') } if(error.response.data.status_code == 401 || error.response.data.status_code == 500){ alert('다시 로그인해야합니다.') window.location.href = '/logout?redirect_url=recipe/'+recipeId+'/detail' } }); } componentDidMount() { this.pushView(); this.getRelatioship(); axios.get('/api/v1/options') .then((res) => { this.setState({ optionsCount: res.data.meta.total_count, options: res.data.result }); }) .catch((err) => { console.log(err) }); window.scrollTo(0, 0); } getRelatioship(){ const { recipeId } = this.state const { detail } = this.state console.log(detail) axios.get(`/api/v3/recipes/relation/`+recipeId) .then(response => { this.setRelation(response.data); }) .catch(error => { console.log(error); }); } componentWillReceiveProps(nextProps) { const {recipeId} = this.state; if(recipeId != nextProps.params.id) { this.setState({recipeId: nextProps.params.id}); this.pushView(); this.getRelatioship(); $('html, body').animate({ scrollTop: 0 }, 0); } } setRelation(relations) { this.setState({ relations:relations['result'] }); } async getRecipe(){ const { recipeId } = this.state; await axios.get(`/api/v1/post?embeds=user%2Ccate_detail%2Cop_function%2Cop_difficulty&filters=status=1,id=${recipeId}`) .then(response => { this.setDetail(response.data); }) .catch(error => { console.log(error); }); } setDetail(detail) { this.setState({ detail:detail['result'][0] }); } renderSlideTop(banners){ let bannerss = []; banners.map((banner, index) => { if(banners[index].images){ if(bannerss[index] == undefined){ bannerss[index] = {} } bannerss[index]['images'] = banners[index].images; } }) return <Carousel showArrows={true} showIndicators={false} showStatus={true} autoPlay={true} showThumbs={false}> {bannerss.map((banner, index) => { if(banners[index].images){ return <div key={`banner-${index}`} className=""> <img src={CDN_URL+`${banner.images}`} alt="img" /> </div> } })} </Carousel> } renderSlideStep(steps){ return <Carousel showArrows={false} showIndicators={false} showStatus={true} autoPlay={true} showThumbs={false}> {Object.keys(steps).map((step, index) => { let img if(!steps[step].video) img = (<img style={{height:'80vw'}} src={CDN_URL+`${steps[step].images}`} alt="img" />) return <div key={`step-${index}`} className="item"> {img} </div> })} </Carousel> } handleBack(){ window.history.go(-1); } handleAddWish(){ if(!PROFILES) { alert('아직 로그인하지 않았습니다.') return false } const con = confirm('레시피를 찜 하시겠습니까?') if(con !== true) return false const { detail } = this.state const { saving } = this.state let action = 1 if(saving === true){ //unlike action = 0 } //if(saving === true) action = 0 let data = { object_id: detail.id, type: 'Like', object_type:'Post', action: action, author_id: detail.user.id } const { recipeId } = this.state //request new token refreshToken() axios.put(`/api/v3/reactions/react`, data, {headers:{ 'Authorization': `Bearer ${TOKENS}` }}) .then(response => { if (response.status === 200) { //alert(response.data.message) this.getRecipe() } if(saving == false) this.setState({saving: true}); else this.setState({saving: false}); }) .catch(error => { if(error.response.data.status_code == 401){ alert('다시 로그인해야합니다.') window.location.href = '/logout?redirect_url=recipe/'+recipeId+'/detail' } }); } resetErrors() { this.setState({ errors: {}, hasError: false, }) } resetMessage() { this.setState({ message: null }) } renderErrors() { const { errors, hasError } = this.state if (!hasError) return null return ( <div className="alert alert-danger alert-dismissible" role="alert"> <button type="button" className="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> <p>{this.state.errors.message}</p> </div> ) } renderMessage() { const { message } = this.state if (message) { return ( <div className="alert alert-success alert-dismissible" role="alert"> <button type="button" className="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> <p><strong>Success!</strong> { message }</p> </div> ) } } handleTime(){ this.setState({ time: 300000 }) } handeReset(){ this.setState({ time: 0 }) this.handleTime() } changeLang({name,value}){ console.log(value) this.setState({type_lang:value}); localStorage.setItem('lang', value); } getOptionTitle(_id) { const {optionsCount} = this.state; const {options} = this.state; const _optionsArray = Object.values(options); const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); for(var i = 0; i < optionsCount; i++) { if(_optionsArray[i].id == _id) { var _text = _optionsArray[i].title; if (type_lang == "en") { _text = (_optionsArray[i].title_en != null) ? _optionsArray[i].title_en : _optionsArray[i].title; } else if (type_lang == "cn") { _text = (_optionsArray[i].title_cn != null) ? _optionsArray[i].title_cn : _optionsArray[i].title; } return _text; } } return ""; } getCateTitle() { const {detail} = this.state; const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); var _text = detail.cate_detail.titl
(type_lang == "en") { _text = (detail.cate_detail.title_en != null) ? detail.cate_detail.title_en : detail.cate_detail.title; } else if (type_lang == "cn") { _text = (detail.cate_detail.title_cn != null) ? detail.cate_detail.title_cn : detail.cate_detail.title; } return _text; } render() { const { detail } = this.state const { time } = this.state const { mins } = this.state const { secs } = this.state const { recipeId } = this.state const { relations } = this.state const { options } = this.state; if(detail == undefined) window.location.href = "/404" var _cateLink = "/cats"; // cat id = 20 if(detail.cate_detail.id == 3) { // dog id = 3 _cateLink = "/dogs"; } let materials if(detail.materials != undefined){ materials = JSON.parse(detail.materials) }else{ materials = {} } let steps if(detail.steps != undefined){ steps = JSON.parse(detail.steps) }else{ steps = {} } let bannerTop = [] var thumbVideo = ""; if(detail.cooking_representation){ bannerTop = JSON.parse(detail.cooking_representation) if(bannerTop[0].images != null || bannerTop[0].images != undefined) { thumbVideo = "/uploads/"+bannerTop[0].images; } } let avatar = (<img src="/html/images/media/img2.jpg" alt="img" />) if(detail.user){ if(detail.user.avatar) if (detail.user.avatar.indexOf('http') > -1) avatar = (<img src={detail.user.avatar} alt="img" />) else avatar = (<img src={CDN_URL+detail.user.avatar} alt="img" />) }else{ window.location.href = "/404" } //tags let tags = detail.tags.split(',') || []; //language const {languages} = this.state; let {type_lang} = this.state; let lang = localStorage.getItem('lang'); if(lang) type_lang = lang; languages.setLanguage(type_lang); //video let video = ""; var styleVideoBlock = {display:"block"}; if(detail.videos != undefined && detail.videos != "[]") { video = (<a data-fancybox href={detail['videos']}> <div style={{position:"relative",backgroundImage: "url("+thumbVideo+")",backgroundPosition: "center", backgroundRepeat: "no-repeat", backgroundSize: "cover", display:"block",width:"600px",height:"350px",margin:"auto"}}> <div style={{backgroundImage: "url('/default/images/icon/playbutton.jpg')",backgroundPosition: "center", backgroundRepeat: "no-repeat",width:"100%",height:"100%"}}></div> </div> </a>); } else { styleVideoBlock = {display: "none"}; } let _editButton = ""; if(PROFILES != undefined && detail.user_id == PROFILES.id) { _editButton = (<Link to={`/recipe/${detail.id}/edit`} className="pull-right btn btn-default btn-comment" >{languages.btn_edit}</Link>); } return (<main className="site-main"> <div className="container"> <div className="block-gird-item"> <div className="toobar"> {this.renderErrors()} {this.renderMessage()} <div className="title pull-left"> {languages.title} </div> <select className="form-control pull-right" value={type_lang} name="lang" onChange={(e) => this.changeLang({ name: e.target.name, value: e.target.value })} style={{marginLeft: 12}}> <option value="it">한국어</option> <option value="en">English</option> <option value="cn">Chinese</option> </select> <label className="pull-right">{languages.lang}</label> {_editButton} </div> <div className="items-talk-detail block-view-detail clearfix"> <br /><br /> <div className="item-talk col-sm-8 col-sm-offset-2"> <div className="photo"> {this.renderSlideTop(bannerTop)} </div> <div className="item-top"> <div className="img"> <div className="photo"> {avatar} </div> </div> <div className="detail"> <strong className="title">{ detail.user.name || ''}</strong> <span>{ detail.desc || ''}</span> </div> </div> <div className="item-description"> <strong className="title">{ detail.title }</strong> <div className="des"> { detail.content } </div> <div style={{display:"flex"}}> <div className="item-top info"> <span className="view">{ detail.total_view}</span> <span className="wishlist" onClick={this.handleAddWish}>{ detail.total_like }</span> <span className="comment">{detail.total_cmt}</span> </div> </div> <div className="link"> <Link to={_cateLink}>{this.getCateTitle()}</Link> <Link to={`/searchs?function=${detail.op_function.id}`}>{this.getOptionTitle(detail.function)}</Link> <Link to={`/searchs?difficulty=${detail.difficulty}`}><img src="/default/images/icon/icon1.png" style={{width: 20, marginRight: 4}} /> {this.getOptionTitle(detail.difficulty)}</Link> <Link to={`/searchs?prepare_time=${detail.prepare_time}`}>{languages.label_sub_preparetime} <br /> {this.getOptionTitle(detail.prepare_time)}</Link> <Link to={`/searchs?cooking_time=${detail.cooking_time}`}>{languages.label_sub_cookingtime} <br />{this.getOptionTitle(detail.cooking_time)}</Link> </div> </div> </div> </div> <hr /> <br /> <div className="block-view-related"> <div className="block-view-title">{languages.material}</div> <div className="items row"> {Object.keys(materials).map((material,index) => { let img = ""; if(materials[material].images) img = (<img src={CDN_URL+`${materials[material].images}`} alt="img" />); var _link = `/searchs?material=${materials[material].name}`; // if(_matName.charCodeAt(0) > 255) { // var _encodedKeyword = ""; // for(var _index = 0; _index < _matName.length; _index++) { // _encodedKeyword += '\\\\u' + _matName.charCodeAt(_index).toString(16); // } // _encodedKeyword = _encodedKeyword.substr(2, _encodedKeyword.length); // _link = _link+_encodedKeyword; // } else { // _link = _link+_matName; // } return <div className="col-sm-3 item " key={`material-${index}`}> <div className="item-fearture-product"> <div className="photo"> <Link to={`${_link}`} >{img}</Link> </div> <div className="detail"> <h3 className="title"><Link to={`${_link}`}>{materials[material].name} </Link> </h3> <span className="subtitle">{materials[material].quantity} {this.getOptionTitle(materials[material].unit)}</span> </div> </div> </div> })} </div> </div> <hr /> <br /> <div className="block-view-video" style={styleVideoBlock}> <div className="block-view-title">{languages.video}</div> <div className="block-video-content"> {video} </div> </div> <hr /> <br /> <div className="block-view-order"> <div className="block-view-title">{languages.cook}</div> <div className="block-content"> {Object.keys(steps).map((step,index) => { let img if(steps[step].images) img = (<img src={CDN_URL+`${steps[step].images}`} alt="img" />) return <div className="row" key={`step-${index}`}> <div className="col-sm-6"> <span className="stt"> {parseInt(index)+1} </span> <div className="des"> {steps[step].content} </div> </div> <div className="col-sm-6"> {img} </div> </div> })} </div> </div> <div className="block-tag"> <label>Tag</label> {tags.map((tag,index) =>{ return <Link to={`/searchs?tag=${tag}`} key={`tag-${index}`} className="tag">#{tag}</Link> })} </div> <div className="block-view-related"> <div className="block-view-title">{languages.relation}</div> <div className="items row"> {relations.map((relate,index) => { let name = '' if(relate.user) name = relate.user.name return <div className="col-sm-4 item " key={`relate-${index}`}> <div className="item-fearture-product"> <div className="photo"> <Link to={`/recipe/${relate.id}/detail`}> <img src={CDN_URL+`${$.parseJSON(relate.cooking_representation)[0]['images']}`} alt="img" /> </Link> </div> <div className="detail"> <h3 className="title"><Link to={`/recipe/${relate.id}/detail`}>{relate.title} </Link> </h3> <span className="subtitle"> {name} </span> </div> </div> </div> })} </div> </div> <hr /> <br /> <Comments object_type="Post" object_id={recipeId} total={detail.total_cmt} sort="id" redirect_url={`detail/${recipeId}/detail`} /> </div> </div> </main> ); } } export default RecipeDetail;
e; if
identifier_name
shoot_ball.js
var gl; var targetVertices=[]; var targetUVs = []; //Ball Arrays var ballVertices=[]; var ballUVs = []; var velocities = []; //Ball constants var maxBalls = 500; var radius = 0.2; var ballIndex = 0; var maxZ = 20; var minY = -50; var initVel = 1.1; var gravity = vec3(0, -0.01, 0); var powerScale = 0.01; var pointsAroundCircle = 100; var maxNumTargets = 5; var currentTargets = maxNumTargets; var index = 4 * maxNumTargets; var targetRadius = 0.3; //Buffers var vBuffer; var texBuffer; var centerList = []; var check = 0; var scoreCounter; var actualScore = 0; var missCounter; var actualMissCount = 5; var highScore; var actualhighScore = 0;; window.onload = function init() { var canvas = document.getElementById( "gl-canvas" ); gl = WebGLUtils.setupWebGL( canvas ); if ( !gl ) { alert( "WebGL isn't available" ); } // // Configure WebGL // gl.enable(gl.DEPTH_TEST); gl.depthMask(true); gl.depthFunc(gl.LEQUAL); gl.depthRange(0.0, 1.0); //Init score stuff scoreCounter = document.getElementById("scoreCounter"); scoreCounter.innerHTML = "Score: " + actualScore; missCounter = document.getElementById("missCounter"); missCounter.innerHTML = "Misses left: " + actualMissCount; highScore = document.getElementById("highScore"); highScore.innerHTML = "High Score this session: " + actualhighScore; //Spawns a ball with the initial conditions given by the sliders document.getElementById("fireButton").onclick = function(){ var power = document.getElementById("power").value; var angleY = document.getElementById("angleY").value; var angleX = document.getElementById("angleX").value; var velMagnitude = initVel * power; var xVel = velMagnitude * Math.sin(radians(angleY)) * Math.sin(radians(angleX)); var yVel = velMagnitude * Math.cos(radians(angleX)); var zVel = velMagnitude * Math.cos(radians(angleY)) * Math.sin(radians(angleX)); velocities.push(vec3(xVel, yVel, zVel)); ballVertices.push(vec3(0, 0, 0)); ballUVs.push(vec2(-1, -1)); for(var i = 0.0; i <= pointsAroundCircle; i += 1.0){ ballVertices.push(vec3(radius * Math.cos(i * (2.0 * Math.PI)/100.0), radius * Math.sin(i * (2.0 * Math.PI)/100.0), 0)); ballUVs.push(vec2(-1, -1)); } ballIndex++; }; gl.viewport( 0, 0, canvas.width, canvas.height ); gl.clearColor( 0.0, 0.0, 0.0, 1.0 ); // Load shaders and initialize attribute buffers var program = initShaders( gl, "vertex-shader", "fragment-shader" ); gl.useProgram( program ); // Load the data into the GPU vBuffer = gl.createBuffer(); gl.bindBuffer( gl.ARRAY_BUFFER, vBuffer ); gl.bufferData( gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 12, gl.STREAM_DRAW ); var vPosition = gl.getAttribLocation( program, "vPosition" ); gl.vertexAttribPointer( vPosition, 3, gl.FLOAT, false, 0, 0 ); gl.enableVertexAttribArray( vPosition ); texBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferData(gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 8, gl.STATIC_DRAW); var vTextureCoord = gl.getAttribLocation(program, "vTextureCoord"); gl.vertexAttribPointer(vTextureCoord, 2, gl.FLOAT, false, 0, 0); gl.enableVertexAttribArray(vTextureCoord); //Load texture and stuff gl.activeTexture(gl.TEXTURE0); var tex = loadTexture(gl, "target_texture.png"); gl.bindTexture(gl.TEXTURE_2D, tex); const samplerLoc = gl.getUniformLocation(program, "smplr"); gl.uniform1i(samplerLoc, 0); var maxZLoc = gl.getUniformLocation( program, "maxZ" ); gl.uniform1f(maxZLoc, maxZ); alert("Use the sliders to aim a ball and hit the targets. Miss too many and you have to restart, but each target you hit gives you more chances. Try to score as many points as you can!"); initTargets(); runPhysics(); }; //Initialize the targets and reset the balls function initTargets(){ centerList = []; targetVertices = []; ballVertices=[]; ballUVs = []; velocities = []; ballIndex = 0; for(var x = 0; x < maxNumTargets; x++){ //Set to random position let randomX = Math.random() - Math.random(); let randomY = Math.random() - Math.random(); let randomZ = Math.random() * maxZ/2 + 1; var center = vec3(randomX*randomZ, randomY*randomZ, randomZ); //keep track of the centers for collision purposes centerList.push(center); //Generate the four corners of the target (a square texture with transparent non-circle parts) targetVertices.push(vec3(center[0] - targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] - targetRadius, center[1] - targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] - targetRadius, center[2])); //Generate the texture coordinates for each vertice in order targetUVs.push(vec2(0,1)); targetUVs.push(vec2(1,1)); targetUVs.push(vec2(0,0)); targetUVs.push(vec2(1,0)); } }; //The the code to run in each update function runPhysics()
; function render() { gl.clear( gl.COLOR_BUFFER_BIT ); //Draw each ball for(var i = 0; i < ballIndex; i++){ gl.drawArrays( gl.TRIANGLE_FAN, i * (pointsAroundCircle + 2), pointsAroundCircle + 2); } //Draw each target for(var i = 0; i < maxNumTargets; i++){ gl.drawArrays(gl.TRIANGLE_STRIP, ballIndex * (pointsAroundCircle + 2) + i * 4, 4); } window.requestAnimFrame(runPhysics); }; //Function from developer.mozilla.org webgl tutorial function loadTexture(gl, url) { const texture = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, texture); //Initialize texture with one blue pixel const level = 0; const internalFormat = gl.RGBA; const width = 1; const height = 1; const border = 0; const srcFormat = gl.RGBA; const srcType = gl.UNSIGNED_BYTE; const pixel = new Uint8Array([0, 0, 255, 255]); // opaque blue gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, width, height, border, srcFormat, srcType, pixel); const image = new Image(); image.onload = function() { gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, srcFormat, srcType, image); // WebGL1 has different requirements for power of 2 images // vs non power of 2 images so check if the image is a // power of 2 in both dimensions. if (isPowerOf2(image.width) && isPowerOf2(image.height)) { // Yes, it's a power of 2. Generate mips. gl.generateMipmap(gl.TEXTURE_2D); } else { // No, it's not a power of 2. Turn of mips and set // wrapping to clamp to edge gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); } }; image.src = url; return texture; }; function isPowerOf2(value) { return (value & (value - 1)) == 0; };
{ for(var i = 0; i < velocities.length; i++){ //Move the ball by its velocity for(var j = 0; j < (pointsAroundCircle + 2); j++){ ballVertices[(pointsAroundCircle + 2) * i + j] = add(ballVertices[(pointsAroundCircle + 2) * i + j], velocities[i]); } //Add acceleration to the velocity velocities[i] = add(velocities[i], gravity); //Check if the ball is out of bounds if(ballVertices[(pointsAroundCircle + 2) * i][2] > maxZ || ballVertices[(pointsAroundCircle + 2) * i][1] < minY){ velocities.splice(i, 1); ballVertices.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); ballUVs.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); i--; ballIndex--; actualMissCount--; missCounter.innerHTML = "Misses left: " + actualMissCount; //If out of misses, reset game state while saving the new high score if necessary if(actualMissCount == 0) { if(actualScore > actualhighScore) { actualhighScore = actualScore; } alert("Out of misses! You got " + actualScore + " points this round."); actualScore = 0; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = 5; missCounter.innerHTML = "Misses left: " + actualMissCount; highScore.innerHTML = "High Score this session: " + actualhighScore; initTargets(); } } //Check for collisions else{ let ballCenter = ballVertices[(pointsAroundCircle + 2) * i]; for(let x = 0; x < centerList.length; x++) { let a = centerList[x][0] - ballCenter[0]; let b = centerList[x][1] - ballCenter[1]; let c = centerList[x][2] - ballCenter[2]; //Check if the center of the ball is within range of the center of the target if(Math.sqrt(a * a + b * b + c * c) < (radius + targetRadius)/* && Math.abs(ballCenter[2] - centerList[x][2]) <= .5*/) { centerList[x] = vec3 (25, 25, 1000); targetVertices[x*4] = vec3(25,25,1000); targetVertices[x*4 + 1] = vec3(25,25,1000); targetVertices[x*4 + 2] = vec3(25,25,1000); targetVertices[x*4 + 3] = vec3(25,25,1000);//basically just sets this target to a place off screen currentTargets--; console.log("Hit! " + currentTargets + " remaining"); actualScore++; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = actualMissCount + 2; missCounter.innerHTML = "Misses left: " + actualMissCount; //If there are no remaining targets, spawn more if(currentTargets === 0) { currentTargets = maxNumTargets; initTargets(); } } } } } //Add the vertices to the buffers gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer ); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballVertices)); gl.bufferSubData(gl.ARRAY_BUFFER, ballVertices.length * 12, flatten(targetVertices)); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballUVs)); gl.bufferSubData(gl.ARRAY_BUFFER, ballUVs.length * 8, flatten(targetUVs)); render(); }
identifier_body
shoot_ball.js
var gl; var targetVertices=[]; var targetUVs = []; //Ball Arrays var ballVertices=[]; var ballUVs = []; var velocities = []; //Ball constants var maxBalls = 500; var radius = 0.2; var ballIndex = 0; var maxZ = 20; var minY = -50; var initVel = 1.1; var gravity = vec3(0, -0.01, 0); var powerScale = 0.01; var pointsAroundCircle = 100; var maxNumTargets = 5; var currentTargets = maxNumTargets; var index = 4 * maxNumTargets; var targetRadius = 0.3; //Buffers var vBuffer; var texBuffer; var centerList = []; var check = 0; var scoreCounter; var actualScore = 0; var missCounter; var actualMissCount = 5; var highScore; var actualhighScore = 0;; window.onload = function init() { var canvas = document.getElementById( "gl-canvas" ); gl = WebGLUtils.setupWebGL( canvas ); if ( !gl ) { alert( "WebGL isn't available" ); } // // Configure WebGL // gl.enable(gl.DEPTH_TEST); gl.depthMask(true); gl.depthFunc(gl.LEQUAL); gl.depthRange(0.0, 1.0); //Init score stuff scoreCounter = document.getElementById("scoreCounter"); scoreCounter.innerHTML = "Score: " + actualScore; missCounter = document.getElementById("missCounter"); missCounter.innerHTML = "Misses left: " + actualMissCount; highScore = document.getElementById("highScore"); highScore.innerHTML = "High Score this session: " + actualhighScore; //Spawns a ball with the initial conditions given by the sliders document.getElementById("fireButton").onclick = function(){ var power = document.getElementById("power").value; var angleY = document.getElementById("angleY").value; var angleX = document.getElementById("angleX").value; var velMagnitude = initVel * power; var xVel = velMagnitude * Math.sin(radians(angleY)) * Math.sin(radians(angleX)); var yVel = velMagnitude * Math.cos(radians(angleX)); var zVel = velMagnitude * Math.cos(radians(angleY)) * Math.sin(radians(angleX)); velocities.push(vec3(xVel, yVel, zVel)); ballVertices.push(vec3(0, 0, 0)); ballUVs.push(vec2(-1, -1)); for(var i = 0.0; i <= pointsAroundCircle; i += 1.0){ ballVertices.push(vec3(radius * Math.cos(i * (2.0 * Math.PI)/100.0), radius * Math.sin(i * (2.0 * Math.PI)/100.0), 0)); ballUVs.push(vec2(-1, -1)); } ballIndex++; }; gl.viewport( 0, 0, canvas.width, canvas.height ); gl.clearColor( 0.0, 0.0, 0.0, 1.0 ); // Load shaders and initialize attribute buffers var program = initShaders( gl, "vertex-shader", "fragment-shader" ); gl.useProgram( program ); // Load the data into the GPU vBuffer = gl.createBuffer(); gl.bindBuffer( gl.ARRAY_BUFFER, vBuffer ); gl.bufferData( gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 12, gl.STREAM_DRAW ); var vPosition = gl.getAttribLocation( program, "vPosition" ); gl.vertexAttribPointer( vPosition, 3, gl.FLOAT, false, 0, 0 ); gl.enableVertexAttribArray( vPosition ); texBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferData(gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 8, gl.STATIC_DRAW); var vTextureCoord = gl.getAttribLocation(program, "vTextureCoord"); gl.vertexAttribPointer(vTextureCoord, 2, gl.FLOAT, false, 0, 0); gl.enableVertexAttribArray(vTextureCoord); //Load texture and stuff gl.activeTexture(gl.TEXTURE0); var tex = loadTexture(gl, "target_texture.png"); gl.bindTexture(gl.TEXTURE_2D, tex); const samplerLoc = gl.getUniformLocation(program, "smplr"); gl.uniform1i(samplerLoc, 0); var maxZLoc = gl.getUniformLocation( program, "maxZ" ); gl.uniform1f(maxZLoc, maxZ); alert("Use the sliders to aim a ball and hit the targets. Miss too many and you have to restart, but each target you hit gives you more chances. Try to score as many points as you can!"); initTargets(); runPhysics(); }; //Initialize the targets and reset the balls function initTargets(){ centerList = []; targetVertices = []; ballVertices=[]; ballUVs = []; velocities = []; ballIndex = 0; for(var x = 0; x < maxNumTargets; x++){ //Set to random position let randomX = Math.random() - Math.random(); let randomY = Math.random() - Math.random(); let randomZ = Math.random() * maxZ/2 + 1; var center = vec3(randomX*randomZ, randomY*randomZ, randomZ); //keep track of the centers for collision purposes centerList.push(center); //Generate the four corners of the target (a square texture with transparent non-circle parts) targetVertices.push(vec3(center[0] - targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] - targetRadius, center[1] - targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] - targetRadius, center[2])); //Generate the texture coordinates for each vertice in order targetUVs.push(vec2(0,1)); targetUVs.push(vec2(1,1)); targetUVs.push(vec2(0,0)); targetUVs.push(vec2(1,0)); } }; //The the code to run in each update function runPhysics(){ for(var i = 0; i < velocities.length; i++){ //Move the ball by its velocity for(var j = 0; j < (pointsAroundCircle + 2); j++){ ballVertices[(pointsAroundCircle + 2) * i + j] = add(ballVertices[(pointsAroundCircle + 2) * i + j], velocities[i]); } //Add acceleration to the velocity velocities[i] = add(velocities[i], gravity); //Check if the ball is out of bounds if(ballVertices[(pointsAroundCircle + 2) * i][2] > maxZ || ballVertices[(pointsAroundCircle + 2) * i][1] < minY){ velocities.splice(i, 1); ballVertices.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); ballUVs.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); i--; ballIndex--; actualMissCount--; missCounter.innerHTML = "Misses left: " + actualMissCount; //If out of misses, reset game state while saving the new high score if necessary if(actualMissCount == 0) { if(actualScore > actualhighScore) { actualhighScore = actualScore; } alert("Out of misses! You got " + actualScore + " points this round."); actualScore = 0; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = 5; missCounter.innerHTML = "Misses left: " + actualMissCount; highScore.innerHTML = "High Score this session: " + actualhighScore; initTargets(); } } //Check for collisions else{ let ballCenter = ballVertices[(pointsAroundCircle + 2) * i]; for(let x = 0; x < centerList.length; x++) { let a = centerList[x][0] - ballCenter[0]; let b = centerList[x][1] - ballCenter[1]; let c = centerList[x][2] - ballCenter[2]; //Check if the center of the ball is within range of the center of the target if(Math.sqrt(a * a + b * b + c * c) < (radius + targetRadius)/* && Math.abs(ballCenter[2] - centerList[x][2]) <= .5*/) { centerList[x] = vec3 (25, 25, 1000); targetVertices[x*4] = vec3(25,25,1000); targetVertices[x*4 + 1] = vec3(25,25,1000); targetVertices[x*4 + 2] = vec3(25,25,1000); targetVertices[x*4 + 3] = vec3(25,25,1000);//basically just sets this target to a place off screen currentTargets--; console.log("Hit! " + currentTargets + " remaining"); actualScore++; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = actualMissCount + 2; missCounter.innerHTML = "Misses left: " + actualMissCount; //If there are no remaining targets, spawn more if(currentTargets === 0) { currentTargets = maxNumTargets; initTargets(); } } } } } //Add the vertices to the buffers gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer ); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballVertices)); gl.bufferSubData(gl.ARRAY_BUFFER, ballVertices.length * 12, flatten(targetVertices)); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballUVs)); gl.bufferSubData(gl.ARRAY_BUFFER, ballUVs.length * 8, flatten(targetUVs)); render(); }; function render() { gl.clear( gl.COLOR_BUFFER_BIT ); //Draw each ball for(var i = 0; i < ballIndex; i++){ gl.drawArrays( gl.TRIANGLE_FAN, i * (pointsAroundCircle + 2), pointsAroundCircle + 2); } //Draw each target for(var i = 0; i < maxNumTargets; i++){ gl.drawArrays(gl.TRIANGLE_STRIP, ballIndex * (pointsAroundCircle + 2) + i * 4, 4); } window.requestAnimFrame(runPhysics); }; //Function from developer.mozilla.org webgl tutorial function
(gl, url) { const texture = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, texture); //Initialize texture with one blue pixel const level = 0; const internalFormat = gl.RGBA; const width = 1; const height = 1; const border = 0; const srcFormat = gl.RGBA; const srcType = gl.UNSIGNED_BYTE; const pixel = new Uint8Array([0, 0, 255, 255]); // opaque blue gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, width, height, border, srcFormat, srcType, pixel); const image = new Image(); image.onload = function() { gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, srcFormat, srcType, image); // WebGL1 has different requirements for power of 2 images // vs non power of 2 images so check if the image is a // power of 2 in both dimensions. if (isPowerOf2(image.width) && isPowerOf2(image.height)) { // Yes, it's a power of 2. Generate mips. gl.generateMipmap(gl.TEXTURE_2D); } else { // No, it's not a power of 2. Turn of mips and set // wrapping to clamp to edge gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); } }; image.src = url; return texture; }; function isPowerOf2(value) { return (value & (value - 1)) == 0; };
loadTexture
identifier_name
shoot_ball.js
var gl; var targetVertices=[]; var targetUVs = []; //Ball Arrays var ballVertices=[]; var ballUVs = []; var velocities = []; //Ball constants var maxBalls = 500; var radius = 0.2; var ballIndex = 0; var maxZ = 20; var minY = -50; var initVel = 1.1; var gravity = vec3(0, -0.01, 0); var powerScale = 0.01; var pointsAroundCircle = 100; var maxNumTargets = 5; var currentTargets = maxNumTargets; var index = 4 * maxNumTargets; var targetRadius = 0.3; //Buffers var vBuffer; var texBuffer; var centerList = []; var check = 0; var scoreCounter; var actualScore = 0; var missCounter; var actualMissCount = 5; var highScore; var actualhighScore = 0;; window.onload = function init() { var canvas = document.getElementById( "gl-canvas" ); gl = WebGLUtils.setupWebGL( canvas ); if ( !gl ) { alert( "WebGL isn't available" ); } // // Configure WebGL // gl.enable(gl.DEPTH_TEST); gl.depthMask(true); gl.depthFunc(gl.LEQUAL); gl.depthRange(0.0, 1.0); //Init score stuff scoreCounter = document.getElementById("scoreCounter"); scoreCounter.innerHTML = "Score: " + actualScore; missCounter = document.getElementById("missCounter"); missCounter.innerHTML = "Misses left: " + actualMissCount; highScore = document.getElementById("highScore"); highScore.innerHTML = "High Score this session: " + actualhighScore; //Spawns a ball with the initial conditions given by the sliders document.getElementById("fireButton").onclick = function(){ var power = document.getElementById("power").value; var angleY = document.getElementById("angleY").value; var angleX = document.getElementById("angleX").value; var velMagnitude = initVel * power; var xVel = velMagnitude * Math.sin(radians(angleY)) * Math.sin(radians(angleX)); var yVel = velMagnitude * Math.cos(radians(angleX)); var zVel = velMagnitude * Math.cos(radians(angleY)) * Math.sin(radians(angleX)); velocities.push(vec3(xVel, yVel, zVel)); ballVertices.push(vec3(0, 0, 0)); ballUVs.push(vec2(-1, -1)); for(var i = 0.0; i <= pointsAroundCircle; i += 1.0){ ballVertices.push(vec3(radius * Math.cos(i * (2.0 * Math.PI)/100.0), radius * Math.sin(i * (2.0 * Math.PI)/100.0), 0)); ballUVs.push(vec2(-1, -1)); } ballIndex++; }; gl.viewport( 0, 0, canvas.width, canvas.height ); gl.clearColor( 0.0, 0.0, 0.0, 1.0 ); // Load shaders and initialize attribute buffers var program = initShaders( gl, "vertex-shader", "fragment-shader" ); gl.useProgram( program ); // Load the data into the GPU vBuffer = gl.createBuffer(); gl.bindBuffer( gl.ARRAY_BUFFER, vBuffer ); gl.bufferData( gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 12, gl.STREAM_DRAW ); var vPosition = gl.getAttribLocation( program, "vPosition" ); gl.vertexAttribPointer( vPosition, 3, gl.FLOAT, false, 0, 0 ); gl.enableVertexAttribArray( vPosition ); texBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferData(gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 8, gl.STATIC_DRAW); var vTextureCoord = gl.getAttribLocation(program, "vTextureCoord"); gl.vertexAttribPointer(vTextureCoord, 2, gl.FLOAT, false, 0, 0); gl.enableVertexAttribArray(vTextureCoord); //Load texture and stuff gl.activeTexture(gl.TEXTURE0); var tex = loadTexture(gl, "target_texture.png"); gl.bindTexture(gl.TEXTURE_2D, tex); const samplerLoc = gl.getUniformLocation(program, "smplr"); gl.uniform1i(samplerLoc, 0); var maxZLoc = gl.getUniformLocation( program, "maxZ" ); gl.uniform1f(maxZLoc, maxZ); alert("Use the sliders to aim a ball and hit the targets. Miss too many and you have to restart, but each target you hit gives you more chances. Try to score as many points as you can!"); initTargets(); runPhysics(); }; //Initialize the targets and reset the balls function initTargets(){ centerList = []; targetVertices = []; ballVertices=[]; ballUVs = []; velocities = []; ballIndex = 0; for(var x = 0; x < maxNumTargets; x++){ //Set to random position let randomX = Math.random() - Math.random(); let randomY = Math.random() - Math.random(); let randomZ = Math.random() * maxZ/2 + 1; var center = vec3(randomX*randomZ, randomY*randomZ, randomZ); //keep track of the centers for collision purposes centerList.push(center); //Generate the four corners of the target (a square texture with transparent non-circle parts) targetVertices.push(vec3(center[0] - targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] - targetRadius, center[1] - targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] - targetRadius, center[2])); //Generate the texture coordinates for each vertice in order targetUVs.push(vec2(0,1)); targetUVs.push(vec2(1,1)); targetUVs.push(vec2(0,0)); targetUVs.push(vec2(1,0)); } }; //The the code to run in each update function runPhysics(){ for(var i = 0; i < velocities.length; i++){ //Move the ball by its velocity for(var j = 0; j < (pointsAroundCircle + 2); j++){ ballVertices[(pointsAroundCircle + 2) * i + j] = add(ballVertices[(pointsAroundCircle + 2) * i + j], velocities[i]); } //Add acceleration to the velocity velocities[i] = add(velocities[i], gravity); //Check if the ball is out of bounds if(ballVertices[(pointsAroundCircle + 2) * i][2] > maxZ || ballVertices[(pointsAroundCircle + 2) * i][1] < minY){ velocities.splice(i, 1); ballVertices.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); ballUVs.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); i--; ballIndex--; actualMissCount--; missCounter.innerHTML = "Misses left: " + actualMissCount; //If out of misses, reset game state while saving the new high score if necessary if(actualMissCount == 0)
} //Check for collisions else{ let ballCenter = ballVertices[(pointsAroundCircle + 2) * i]; for(let x = 0; x < centerList.length; x++) { let a = centerList[x][0] - ballCenter[0]; let b = centerList[x][1] - ballCenter[1]; let c = centerList[x][2] - ballCenter[2]; //Check if the center of the ball is within range of the center of the target if(Math.sqrt(a * a + b * b + c * c) < (radius + targetRadius)/* && Math.abs(ballCenter[2] - centerList[x][2]) <= .5*/) { centerList[x] = vec3 (25, 25, 1000); targetVertices[x*4] = vec3(25,25,1000); targetVertices[x*4 + 1] = vec3(25,25,1000); targetVertices[x*4 + 2] = vec3(25,25,1000); targetVertices[x*4 + 3] = vec3(25,25,1000);//basically just sets this target to a place off screen currentTargets--; console.log("Hit! " + currentTargets + " remaining"); actualScore++; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = actualMissCount + 2; missCounter.innerHTML = "Misses left: " + actualMissCount; //If there are no remaining targets, spawn more if(currentTargets === 0) { currentTargets = maxNumTargets; initTargets(); } } } } } //Add the vertices to the buffers gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer ); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballVertices)); gl.bufferSubData(gl.ARRAY_BUFFER, ballVertices.length * 12, flatten(targetVertices)); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballUVs)); gl.bufferSubData(gl.ARRAY_BUFFER, ballUVs.length * 8, flatten(targetUVs)); render(); }; function render() { gl.clear( gl.COLOR_BUFFER_BIT ); //Draw each ball for(var i = 0; i < ballIndex; i++){ gl.drawArrays( gl.TRIANGLE_FAN, i * (pointsAroundCircle + 2), pointsAroundCircle + 2); } //Draw each target for(var i = 0; i < maxNumTargets; i++){ gl.drawArrays(gl.TRIANGLE_STRIP, ballIndex * (pointsAroundCircle + 2) + i * 4, 4); } window.requestAnimFrame(runPhysics); }; //Function from developer.mozilla.org webgl tutorial function loadTexture(gl, url) { const texture = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, texture); //Initialize texture with one blue pixel const level = 0; const internalFormat = gl.RGBA; const width = 1; const height = 1; const border = 0; const srcFormat = gl.RGBA; const srcType = gl.UNSIGNED_BYTE; const pixel = new Uint8Array([0, 0, 255, 255]); // opaque blue gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, width, height, border, srcFormat, srcType, pixel); const image = new Image(); image.onload = function() { gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, srcFormat, srcType, image); // WebGL1 has different requirements for power of 2 images // vs non power of 2 images so check if the image is a // power of 2 in both dimensions. if (isPowerOf2(image.width) && isPowerOf2(image.height)) { // Yes, it's a power of 2. Generate mips. gl.generateMipmap(gl.TEXTURE_2D); } else { // No, it's not a power of 2. Turn of mips and set // wrapping to clamp to edge gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); } }; image.src = url; return texture; }; function isPowerOf2(value) { return (value & (value - 1)) == 0; };
{ if(actualScore > actualhighScore) { actualhighScore = actualScore; } alert("Out of misses! You got " + actualScore + " points this round."); actualScore = 0; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = 5; missCounter.innerHTML = "Misses left: " + actualMissCount; highScore.innerHTML = "High Score this session: " + actualhighScore; initTargets(); }
conditional_block
shoot_ball.js
var gl; var targetVertices=[]; var targetUVs = []; //Ball Arrays var ballVertices=[]; var ballUVs = []; var velocities = []; //Ball constants var maxBalls = 500; var radius = 0.2; var ballIndex = 0; var maxZ = 20; var minY = -50; var initVel = 1.1; var gravity = vec3(0, -0.01, 0); var powerScale = 0.01; var pointsAroundCircle = 100; var maxNumTargets = 5; var currentTargets = maxNumTargets; var index = 4 * maxNumTargets; var targetRadius = 0.3; //Buffers var vBuffer; var texBuffer; var centerList = []; var check = 0; var scoreCounter; var actualScore = 0; var missCounter; var actualMissCount = 5; var highScore; var actualhighScore = 0;; window.onload = function init() { var canvas = document.getElementById( "gl-canvas" ); gl = WebGLUtils.setupWebGL( canvas ); if ( !gl ) { alert( "WebGL isn't available" ); } // // Configure WebGL // gl.enable(gl.DEPTH_TEST); gl.depthMask(true); gl.depthFunc(gl.LEQUAL); gl.depthRange(0.0, 1.0); //Init score stuff scoreCounter = document.getElementById("scoreCounter"); scoreCounter.innerHTML = "Score: " + actualScore; missCounter = document.getElementById("missCounter"); missCounter.innerHTML = "Misses left: " + actualMissCount; highScore = document.getElementById("highScore"); highScore.innerHTML = "High Score this session: " + actualhighScore; //Spawns a ball with the initial conditions given by the sliders document.getElementById("fireButton").onclick = function(){ var power = document.getElementById("power").value; var angleY = document.getElementById("angleY").value; var angleX = document.getElementById("angleX").value; var velMagnitude = initVel * power; var xVel = velMagnitude * Math.sin(radians(angleY)) * Math.sin(radians(angleX)); var yVel = velMagnitude * Math.cos(radians(angleX)); var zVel = velMagnitude * Math.cos(radians(angleY)) * Math.sin(radians(angleX)); velocities.push(vec3(xVel, yVel, zVel)); ballVertices.push(vec3(0, 0, 0)); ballUVs.push(vec2(-1, -1)); for(var i = 0.0; i <= pointsAroundCircle; i += 1.0){ ballVertices.push(vec3(radius * Math.cos(i * (2.0 * Math.PI)/100.0), radius * Math.sin(i * (2.0 * Math.PI)/100.0), 0)); ballUVs.push(vec2(-1, -1)); } ballIndex++; }; gl.viewport( 0, 0, canvas.width, canvas.height ); gl.clearColor( 0.0, 0.0, 0.0, 1.0 ); // Load shaders and initialize attribute buffers var program = initShaders( gl, "vertex-shader", "fragment-shader" ); gl.useProgram( program ); // Load the data into the GPU vBuffer = gl.createBuffer(); gl.bindBuffer( gl.ARRAY_BUFFER, vBuffer ); gl.bufferData( gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 12, gl.STREAM_DRAW ); var vPosition = gl.getAttribLocation( program, "vPosition" ); gl.vertexAttribPointer( vPosition, 3, gl.FLOAT, false, 0, 0 ); gl.enableVertexAttribArray( vPosition ); texBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferData(gl.ARRAY_BUFFER, maxNumTargets * maxBalls * (pointsAroundCircle + 2) * 8, gl.STATIC_DRAW); var vTextureCoord = gl.getAttribLocation(program, "vTextureCoord"); gl.vertexAttribPointer(vTextureCoord, 2, gl.FLOAT, false, 0, 0); gl.enableVertexAttribArray(vTextureCoord); //Load texture and stuff gl.activeTexture(gl.TEXTURE0); var tex = loadTexture(gl, "target_texture.png"); gl.bindTexture(gl.TEXTURE_2D, tex); const samplerLoc = gl.getUniformLocation(program, "smplr"); gl.uniform1i(samplerLoc, 0); var maxZLoc = gl.getUniformLocation( program, "maxZ" ); gl.uniform1f(maxZLoc, maxZ); alert("Use the sliders to aim a ball and hit the targets. Miss too many and you have to restart, but each target you hit gives you more chances. Try to score as many points as you can!"); initTargets(); runPhysics(); }; //Initialize the targets and reset the balls function initTargets(){ centerList = []; targetVertices = []; ballVertices=[]; ballUVs = []; velocities = []; ballIndex = 0; for(var x = 0; x < maxNumTargets; x++){ //Set to random position let randomX = Math.random() - Math.random(); let randomY = Math.random() - Math.random(); let randomZ = Math.random() * maxZ/2 + 1; var center = vec3(randomX*randomZ, randomY*randomZ, randomZ); //keep track of the centers for collision purposes centerList.push(center); //Generate the four corners of the target (a square texture with transparent non-circle parts) targetVertices.push(vec3(center[0] - targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] + targetRadius, center[2])); targetVertices.push(vec3(center[0] - targetRadius, center[1] - targetRadius, center[2])); targetVertices.push(vec3(center[0] + targetRadius, center[1] - targetRadius, center[2])); //Generate the texture coordinates for each vertice in order targetUVs.push(vec2(0,1)); targetUVs.push(vec2(1,1)); targetUVs.push(vec2(0,0)); targetUVs.push(vec2(1,0)); } }; //The the code to run in each update function runPhysics(){ for(var i = 0; i < velocities.length; i++){ //Move the ball by its velocity for(var j = 0; j < (pointsAroundCircle + 2); j++){ ballVertices[(pointsAroundCircle + 2) * i + j] = add(ballVertices[(pointsAroundCircle + 2) * i + j], velocities[i]); } //Add acceleration to the velocity velocities[i] = add(velocities[i], gravity); //Check if the ball is out of bounds if(ballVertices[(pointsAroundCircle + 2) * i][2] > maxZ || ballVertices[(pointsAroundCircle + 2) * i][1] < minY){ velocities.splice(i, 1); ballVertices.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2); ballUVs.splice((pointsAroundCircle + 2) * i, pointsAroundCircle + 2);
actualMissCount--; missCounter.innerHTML = "Misses left: " + actualMissCount; //If out of misses, reset game state while saving the new high score if necessary if(actualMissCount == 0) { if(actualScore > actualhighScore) { actualhighScore = actualScore; } alert("Out of misses! You got " + actualScore + " points this round."); actualScore = 0; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = 5; missCounter.innerHTML = "Misses left: " + actualMissCount; highScore.innerHTML = "High Score this session: " + actualhighScore; initTargets(); } } //Check for collisions else{ let ballCenter = ballVertices[(pointsAroundCircle + 2) * i]; for(let x = 0; x < centerList.length; x++) { let a = centerList[x][0] - ballCenter[0]; let b = centerList[x][1] - ballCenter[1]; let c = centerList[x][2] - ballCenter[2]; //Check if the center of the ball is within range of the center of the target if(Math.sqrt(a * a + b * b + c * c) < (radius + targetRadius)/* && Math.abs(ballCenter[2] - centerList[x][2]) <= .5*/) { centerList[x] = vec3 (25, 25, 1000); targetVertices[x*4] = vec3(25,25,1000); targetVertices[x*4 + 1] = vec3(25,25,1000); targetVertices[x*4 + 2] = vec3(25,25,1000); targetVertices[x*4 + 3] = vec3(25,25,1000);//basically just sets this target to a place off screen currentTargets--; console.log("Hit! " + currentTargets + " remaining"); actualScore++; scoreCounter.innerHTML = "Score: " + actualScore; actualMissCount = actualMissCount + 2; missCounter.innerHTML = "Misses left: " + actualMissCount; //If there are no remaining targets, spawn more if(currentTargets === 0) { currentTargets = maxNumTargets; initTargets(); } } } } } //Add the vertices to the buffers gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer ); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballVertices)); gl.bufferSubData(gl.ARRAY_BUFFER, ballVertices.length * 12, flatten(targetVertices)); gl.bindBuffer(gl.ARRAY_BUFFER, texBuffer); gl.bufferSubData(gl.ARRAY_BUFFER, 0, flatten(ballUVs)); gl.bufferSubData(gl.ARRAY_BUFFER, ballUVs.length * 8, flatten(targetUVs)); render(); }; function render() { gl.clear( gl.COLOR_BUFFER_BIT ); //Draw each ball for(var i = 0; i < ballIndex; i++){ gl.drawArrays( gl.TRIANGLE_FAN, i * (pointsAroundCircle + 2), pointsAroundCircle + 2); } //Draw each target for(var i = 0; i < maxNumTargets; i++){ gl.drawArrays(gl.TRIANGLE_STRIP, ballIndex * (pointsAroundCircle + 2) + i * 4, 4); } window.requestAnimFrame(runPhysics); }; //Function from developer.mozilla.org webgl tutorial function loadTexture(gl, url) { const texture = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, texture); //Initialize texture with one blue pixel const level = 0; const internalFormat = gl.RGBA; const width = 1; const height = 1; const border = 0; const srcFormat = gl.RGBA; const srcType = gl.UNSIGNED_BYTE; const pixel = new Uint8Array([0, 0, 255, 255]); // opaque blue gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, width, height, border, srcFormat, srcType, pixel); const image = new Image(); image.onload = function() { gl.bindTexture(gl.TEXTURE_2D, texture); gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, srcFormat, srcType, image); // WebGL1 has different requirements for power of 2 images // vs non power of 2 images so check if the image is a // power of 2 in both dimensions. if (isPowerOf2(image.width) && isPowerOf2(image.height)) { // Yes, it's a power of 2. Generate mips. gl.generateMipmap(gl.TEXTURE_2D); } else { // No, it's not a power of 2. Turn of mips and set // wrapping to clamp to edge gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); } }; image.src = url; return texture; }; function isPowerOf2(value) { return (value & (value - 1)) == 0; };
i--; ballIndex--;
random_line_split
t_usefulness.rs
#![allow(clippy::excessive_precision)] use wide::*; use bytemuck::*; #[test] fn unpack_modify_and_repack_rgba_values() { let mask = u32x4::from(0xFF); // let input = u32x4::from([0xFF0000FF, 0x00FF00FF, 0x0000FFFF, 0x000000FF]); // unpack let r_actual = cast::<_, i32x4>(input >> 24).round_float(); let g_actual = cast::<_, i32x4>((input >> 16) & mask).round_float(); let b_actual = cast::<_, i32x4>((input >> 8) & mask).round_float(); let a_actual = cast::<_, i32x4>(input & mask).round_float(); let r_expected = f32x4::from([255.0, 0.0, 0.0, 0.0]); let g_expected = f32x4::from([0.0, 255.0, 0.0, 0.0]); let b_expected = f32x4::from([0.0, 0.0, 255.0, 0.0]); let a_expected = f32x4::from([255.0, 255.0, 255.0, 255.0]); assert_eq!(r_expected, r_actual); assert_eq!(g_expected, g_actual); assert_eq!(b_expected, b_actual); assert_eq!(a_expected, a_actual); // modify some of the data let r_new = (r_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let g_new = (g_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let b_new = (b_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let a_new = a_actual; // repack let r_u = cast::<i32x4, u32x4>(r_new.round_int()); let g_u = cast::<i32x4, u32x4>(g_new.round_int()); let b_u = cast::<i32x4, u32x4>(b_new.round_int()); let a_u = cast::<i32x4, u32x4>(a_new.round_int()); let output_actual = (r_u << 24) | (g_u << 16) | (b_u << 8) | (a_u); let output_expected = u32x4::from([0xFE0000FF, 0x00FE00FF, 0x0000FEFF, 0x000000FF]); assert_eq!(output_expected, output_actual); } /// Implement JPEG IDCT using i16x8. This has slightly different behavior than /// the normal 32 bit scalar implementation in libjpeg. It's a bit more accurate /// in some ways (since the constants are encoded in 15 bits instead of 12) but /// is more subject to hitting saturation during intermediate calculations, /// although that should normally not be a problem for photographic JPEGs. /// /// The main downside of this approach is that it is very slow to do saturating /// math on scalar types on some CPUs, so if you need bit-exact behavior on /// different architectures this is not the algorithm for you. #[test] fn test_dequantize_and_idct_i16() { fn to_fixed(x: f32) -> i16
fn kernel_i16(data: [i16x8; 8]) -> [i16x8; 8] { // kernel x let a2 = data[2]; let a6 = data[6]; let b0 = a2.saturating_add(a6).mul_scale_round_n(to_fixed(0.5411961)); let c0 = b0 .saturating_sub(a6) .saturating_sub(a6.mul_scale_round_n(to_fixed(0.847759065))); let c1 = b0.saturating_add(a2.mul_scale_round_n(to_fixed(0.765366865))); let a0 = data[0]; let a4 = data[4]; let b1 = a0.saturating_add(a4); let b2 = a0.saturating_sub(a4); let x0 = b1.saturating_add(c1); let x1 = b2.saturating_add(c0); let x2 = b2.saturating_sub(c0); let x3 = b1.saturating_sub(c1); // kernel t let t0 = data[7]; let t1 = data[5]; let t2 = data[3]; let t3 = data[1]; let p1 = t0.saturating_add(t3); let p2 = t1.saturating_add(t2); let p3 = t0.saturating_add(t2); let p4 = t1.saturating_add(t3); let p5t = p3.saturating_add(p4); let p5 = p5t.saturating_add(p5t.mul_scale_round_n(to_fixed(0.175875602))); let e0 = t0.mul_scale_round_n(to_fixed(0.298631336)); let e1 = t1 .saturating_add(t1) .saturating_add(t1.mul_scale_round_n(to_fixed(0.053119869))); let e2 = t2 .saturating_add(t2) .saturating_add(t2) .saturating_add(t2.mul_scale_round_n(to_fixed(0.072711026))); let e3 = t3.saturating_add(t3.mul_scale_round_n(to_fixed(0.501321110))); let f0 = p5.saturating_sub(p1.mul_scale_round_n(to_fixed(0.899976223))); let f1 = p5 .saturating_sub(p2) .saturating_sub(p2) .saturating_sub(p2.mul_scale_round_n(to_fixed(0.562915447))); let f2 = p3.mul_scale_round_n(to_fixed(-0.961570560)).saturating_sub(p3); let f3 = p4.mul_scale_round_n(to_fixed(-0.390180644)); let t3 = f0.saturating_add(f3).saturating_add(e3); let t2 = f1.saturating_add(f2).saturating_add(e2); let t1 = f1.saturating_add(f3).saturating_add(e1); let t0 = f0.saturating_add(f2).saturating_add(e0); [ x0.saturating_add(t3), x1.saturating_add(t2), x2.saturating_add(t1), x3.saturating_add(t0), x3.saturating_sub(t0), x2.saturating_sub(t1), x1.saturating_sub(t2), x0.saturating_sub(t3), ] } #[rustfmt::skip] let coefficients: [i16; 8 * 8] = [ -14, -39, 58, -2, 3, 3, 0, 1, 11, 27, 4, -3, 3, 0, 1, 0, -6, -13, -9, -1, -2, -1, 0, 0, -4, 0, -1, -2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; #[rustfmt::skip] let quantization_table: [i16; 8 * 8] = [ 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 ]; let c: [i16x8; 8] = cast(coefficients); let q: [i16x8; 8] = cast(quantization_table); // coefficients normally go up to 1024, shift up by 3 to get extra precision const SHIFT: i16 = 3; let data = [ (c[0] * q[0]) << SHIFT, (c[1] * q[1]) << SHIFT, (c[2] * q[2]) << SHIFT, (c[3] * q[3]) << SHIFT, (c[4] * q[4]) << SHIFT, (c[5] * q[5]) << SHIFT, (c[6] * q[6]) << SHIFT, (c[7] * q[7]) << SHIFT, ]; let pass1 = kernel_i16(data); let transpose1 = i16x8::transpose(pass1); let pass2 = kernel_i16(transpose1); let result = i16x8::transpose(pass2); // offset to recenter to 0..256 and round properly const ROUND_FACTOR: i16 = 0x2020; let round_factor = i16x8::splat(ROUND_FACTOR); let result_adj = [ result[0].saturating_add(round_factor) >> (2 * SHIFT), result[1].saturating_add(round_factor) >> (2 * SHIFT), result[2].saturating_add(round_factor) >> (2 * SHIFT), result[3].saturating_add(round_factor) >> (2 * SHIFT), result[4].saturating_add(round_factor) >> (2 * SHIFT), result[5].saturating_add(round_factor) >> (2 * SHIFT), result[6].saturating_add(round_factor) >> (2 * SHIFT), result[7].saturating_add(round_factor) >> (2 * SHIFT), ]; let output: [i16; 64] = cast(result_adj); #[rustfmt::skip] let expected_output = [ 118, 92, 110, 83, 77, 93, 144, 198, 172, 116, 114, 87, 78, 93, 146, 191, 194, 107, 91, 76, 71, 93, 160, 198, 196, 100, 80, 74, 67, 92, 174, 209, 182, 104, 88, 81, 68, 89, 178, 206, 105, 64, 59, 59, 63, 94, 183, 201, 35, 27, 28, 37, 72, 121, 203, 204, 38, 45, 41, 47, 99, 154, 223, 208 ]; assert_eq!(expected_output, output); } /// Implement JPEG IDCT using i32x8. This is most similar to the scalar /// libjpeg version which has slightly different rounding propertis than the 16 /// bit version. Some decoders are forced to use this if they want bit-by-bit /// compability across all architectures. #[test] fn test_dequantize_and_idct_i32() { fn to_fixed(x: f32) -> i32 { (x * 4096.0 + 0.5) as i32 } fn kernel_i32( [s0, s1, s2, s3, s4, s5, s6, s7]: [i32x8; 8], rounding_factor: i32, shift_right: i32, ) -> [i32x8; 8] { // kernel x let at = (s2 + s6) * to_fixed(0.5411961); let a0 = (s0 + s4) << 12; // multiply by 1, ie 4096 in fixed point) let a1 = (s0 - s4) << 12; // multiply by 1, ie 4096 in fixed point) let a2 = at + s6 * to_fixed(-1.847759065); let a3 = at + s2 * to_fixed(0.765366865); let x0 = a0 + a3 + rounding_factor; // add rounding factor here to avoid extra addition let x1 = a1 + a2 + rounding_factor; let x2 = a1 - a2 + rounding_factor; let x3 = a0 - a3 + rounding_factor; // kernel t let b0 = s7 + s1; let b1 = s5 + s3; let b2 = s7 + s3; let b3 = s5 + s1; let ct = (b2 + b3) * to_fixed(1.175875602); let c0 = ct + b0 * to_fixed(-0.899976223); let c1 = ct + b1 * to_fixed(-2.562915447); let c2 = b2 * to_fixed(-1.961570560); let c3 = b3 * to_fixed(-0.390180644); let t0 = s7 * to_fixed(0.298631336) + c0 + c2; let t1 = s5 * to_fixed(2.053119869) + c1 + c3; let t2 = s3 * to_fixed(3.072711026) + c1 + c2; let t3 = s1 * to_fixed(1.501321110) + c0 + c3; [ (x0 + t3) >> shift_right, (x1 + t2) >> shift_right, (x2 + t1) >> shift_right, (x3 + t0) >> shift_right, (x3 - t0) >> shift_right, (x2 - t1) >> shift_right, (x1 - t2) >> shift_right, (x0 - t3) >> shift_right, ] } #[rustfmt::skip] let coefficients: [i32; 8 * 8] = [ -14, -39, 58, -2, 3, 3, 0, 1, 11, 27, 4, -3, 3, 0, 1, 0, -6, -13, -9, -1, -2, -1, 0, 0, -4, 0, -1, -2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; #[rustfmt::skip] let quantization_table: [i32; 8 * 8] = [ 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 ]; let c: [i32x8; 8] = cast(coefficients); let q: [i32x8; 8] = cast(quantization_table); let scaled = [ c[0] * q[0], c[1] * q[1], c[2] * q[2], c[3] * q[3], c[4] * q[4], c[5] * q[5], c[6] * q[6], c[7] * q[7], ]; // add rounding factor before shifting right let pass1 = kernel_i32(scaled, 1 << 9, 10); let transpose1 = i32x8::transpose(pass1); // add rounding factor before shifting right (include rebasing from -128..128 // to 0..256) let pass2 = kernel_i32(transpose1, 65536 + (128 << 17), 17); let result = i32x8::transpose(pass2); let output: [i32; 64] = cast(result); // same as other DCT test with some minor rounding differences #[rustfmt::skip] let expected_output = [ 118, 92, 110, 83, 77, 93, 144, 198, 172, 116, 114, 87, 78, 93, 146, 191, 194, 107, 91, 76, 71, 93, 160, 198, 196, 100, 80, 74, 67, 92, 174, 209, 182, 104, 88, 81, 68, 89, 178, 206, 105, 64, 59, 59, 63, 94, 183, 201, 35, 27, 28, 37, 72, 121, 203, 204, 37, 45, 41, 47, 98, 154, 223, 208]; assert_eq!(expected_output, output); }
{ (x * 32767.0 + 0.5) as i16 }
identifier_body
t_usefulness.rs
#![allow(clippy::excessive_precision)] use wide::*; use bytemuck::*; #[test] fn unpack_modify_and_repack_rgba_values() { let mask = u32x4::from(0xFF); // let input = u32x4::from([0xFF0000FF, 0x00FF00FF, 0x0000FFFF, 0x000000FF]); // unpack let r_actual = cast::<_, i32x4>(input >> 24).round_float(); let g_actual = cast::<_, i32x4>((input >> 16) & mask).round_float(); let b_actual = cast::<_, i32x4>((input >> 8) & mask).round_float(); let a_actual = cast::<_, i32x4>(input & mask).round_float();
let a_expected = f32x4::from([255.0, 255.0, 255.0, 255.0]); assert_eq!(r_expected, r_actual); assert_eq!(g_expected, g_actual); assert_eq!(b_expected, b_actual); assert_eq!(a_expected, a_actual); // modify some of the data let r_new = (r_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let g_new = (g_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let b_new = (b_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let a_new = a_actual; // repack let r_u = cast::<i32x4, u32x4>(r_new.round_int()); let g_u = cast::<i32x4, u32x4>(g_new.round_int()); let b_u = cast::<i32x4, u32x4>(b_new.round_int()); let a_u = cast::<i32x4, u32x4>(a_new.round_int()); let output_actual = (r_u << 24) | (g_u << 16) | (b_u << 8) | (a_u); let output_expected = u32x4::from([0xFE0000FF, 0x00FE00FF, 0x0000FEFF, 0x000000FF]); assert_eq!(output_expected, output_actual); } /// Implement JPEG IDCT using i16x8. This has slightly different behavior than /// the normal 32 bit scalar implementation in libjpeg. It's a bit more accurate /// in some ways (since the constants are encoded in 15 bits instead of 12) but /// is more subject to hitting saturation during intermediate calculations, /// although that should normally not be a problem for photographic JPEGs. /// /// The main downside of this approach is that it is very slow to do saturating /// math on scalar types on some CPUs, so if you need bit-exact behavior on /// different architectures this is not the algorithm for you. #[test] fn test_dequantize_and_idct_i16() { fn to_fixed(x: f32) -> i16 { (x * 32767.0 + 0.5) as i16 } fn kernel_i16(data: [i16x8; 8]) -> [i16x8; 8] { // kernel x let a2 = data[2]; let a6 = data[6]; let b0 = a2.saturating_add(a6).mul_scale_round_n(to_fixed(0.5411961)); let c0 = b0 .saturating_sub(a6) .saturating_sub(a6.mul_scale_round_n(to_fixed(0.847759065))); let c1 = b0.saturating_add(a2.mul_scale_round_n(to_fixed(0.765366865))); let a0 = data[0]; let a4 = data[4]; let b1 = a0.saturating_add(a4); let b2 = a0.saturating_sub(a4); let x0 = b1.saturating_add(c1); let x1 = b2.saturating_add(c0); let x2 = b2.saturating_sub(c0); let x3 = b1.saturating_sub(c1); // kernel t let t0 = data[7]; let t1 = data[5]; let t2 = data[3]; let t3 = data[1]; let p1 = t0.saturating_add(t3); let p2 = t1.saturating_add(t2); let p3 = t0.saturating_add(t2); let p4 = t1.saturating_add(t3); let p5t = p3.saturating_add(p4); let p5 = p5t.saturating_add(p5t.mul_scale_round_n(to_fixed(0.175875602))); let e0 = t0.mul_scale_round_n(to_fixed(0.298631336)); let e1 = t1 .saturating_add(t1) .saturating_add(t1.mul_scale_round_n(to_fixed(0.053119869))); let e2 = t2 .saturating_add(t2) .saturating_add(t2) .saturating_add(t2.mul_scale_round_n(to_fixed(0.072711026))); let e3 = t3.saturating_add(t3.mul_scale_round_n(to_fixed(0.501321110))); let f0 = p5.saturating_sub(p1.mul_scale_round_n(to_fixed(0.899976223))); let f1 = p5 .saturating_sub(p2) .saturating_sub(p2) .saturating_sub(p2.mul_scale_round_n(to_fixed(0.562915447))); let f2 = p3.mul_scale_round_n(to_fixed(-0.961570560)).saturating_sub(p3); let f3 = p4.mul_scale_round_n(to_fixed(-0.390180644)); let t3 = f0.saturating_add(f3).saturating_add(e3); let t2 = f1.saturating_add(f2).saturating_add(e2); let t1 = f1.saturating_add(f3).saturating_add(e1); let t0 = f0.saturating_add(f2).saturating_add(e0); [ x0.saturating_add(t3), x1.saturating_add(t2), x2.saturating_add(t1), x3.saturating_add(t0), x3.saturating_sub(t0), x2.saturating_sub(t1), x1.saturating_sub(t2), x0.saturating_sub(t3), ] } #[rustfmt::skip] let coefficients: [i16; 8 * 8] = [ -14, -39, 58, -2, 3, 3, 0, 1, 11, 27, 4, -3, 3, 0, 1, 0, -6, -13, -9, -1, -2, -1, 0, 0, -4, 0, -1, -2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; #[rustfmt::skip] let quantization_table: [i16; 8 * 8] = [ 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 ]; let c: [i16x8; 8] = cast(coefficients); let q: [i16x8; 8] = cast(quantization_table); // coefficients normally go up to 1024, shift up by 3 to get extra precision const SHIFT: i16 = 3; let data = [ (c[0] * q[0]) << SHIFT, (c[1] * q[1]) << SHIFT, (c[2] * q[2]) << SHIFT, (c[3] * q[3]) << SHIFT, (c[4] * q[4]) << SHIFT, (c[5] * q[5]) << SHIFT, (c[6] * q[6]) << SHIFT, (c[7] * q[7]) << SHIFT, ]; let pass1 = kernel_i16(data); let transpose1 = i16x8::transpose(pass1); let pass2 = kernel_i16(transpose1); let result = i16x8::transpose(pass2); // offset to recenter to 0..256 and round properly const ROUND_FACTOR: i16 = 0x2020; let round_factor = i16x8::splat(ROUND_FACTOR); let result_adj = [ result[0].saturating_add(round_factor) >> (2 * SHIFT), result[1].saturating_add(round_factor) >> (2 * SHIFT), result[2].saturating_add(round_factor) >> (2 * SHIFT), result[3].saturating_add(round_factor) >> (2 * SHIFT), result[4].saturating_add(round_factor) >> (2 * SHIFT), result[5].saturating_add(round_factor) >> (2 * SHIFT), result[6].saturating_add(round_factor) >> (2 * SHIFT), result[7].saturating_add(round_factor) >> (2 * SHIFT), ]; let output: [i16; 64] = cast(result_adj); #[rustfmt::skip] let expected_output = [ 118, 92, 110, 83, 77, 93, 144, 198, 172, 116, 114, 87, 78, 93, 146, 191, 194, 107, 91, 76, 71, 93, 160, 198, 196, 100, 80, 74, 67, 92, 174, 209, 182, 104, 88, 81, 68, 89, 178, 206, 105, 64, 59, 59, 63, 94, 183, 201, 35, 27, 28, 37, 72, 121, 203, 204, 38, 45, 41, 47, 99, 154, 223, 208 ]; assert_eq!(expected_output, output); } /// Implement JPEG IDCT using i32x8. This is most similar to the scalar /// libjpeg version which has slightly different rounding propertis than the 16 /// bit version. Some decoders are forced to use this if they want bit-by-bit /// compability across all architectures. #[test] fn test_dequantize_and_idct_i32() { fn to_fixed(x: f32) -> i32 { (x * 4096.0 + 0.5) as i32 } fn kernel_i32( [s0, s1, s2, s3, s4, s5, s6, s7]: [i32x8; 8], rounding_factor: i32, shift_right: i32, ) -> [i32x8; 8] { // kernel x let at = (s2 + s6) * to_fixed(0.5411961); let a0 = (s0 + s4) << 12; // multiply by 1, ie 4096 in fixed point) let a1 = (s0 - s4) << 12; // multiply by 1, ie 4096 in fixed point) let a2 = at + s6 * to_fixed(-1.847759065); let a3 = at + s2 * to_fixed(0.765366865); let x0 = a0 + a3 + rounding_factor; // add rounding factor here to avoid extra addition let x1 = a1 + a2 + rounding_factor; let x2 = a1 - a2 + rounding_factor; let x3 = a0 - a3 + rounding_factor; // kernel t let b0 = s7 + s1; let b1 = s5 + s3; let b2 = s7 + s3; let b3 = s5 + s1; let ct = (b2 + b3) * to_fixed(1.175875602); let c0 = ct + b0 * to_fixed(-0.899976223); let c1 = ct + b1 * to_fixed(-2.562915447); let c2 = b2 * to_fixed(-1.961570560); let c3 = b3 * to_fixed(-0.390180644); let t0 = s7 * to_fixed(0.298631336) + c0 + c2; let t1 = s5 * to_fixed(2.053119869) + c1 + c3; let t2 = s3 * to_fixed(3.072711026) + c1 + c2; let t3 = s1 * to_fixed(1.501321110) + c0 + c3; [ (x0 + t3) >> shift_right, (x1 + t2) >> shift_right, (x2 + t1) >> shift_right, (x3 + t0) >> shift_right, (x3 - t0) >> shift_right, (x2 - t1) >> shift_right, (x1 - t2) >> shift_right, (x0 - t3) >> shift_right, ] } #[rustfmt::skip] let coefficients: [i32; 8 * 8] = [ -14, -39, 58, -2, 3, 3, 0, 1, 11, 27, 4, -3, 3, 0, 1, 0, -6, -13, -9, -1, -2, -1, 0, 0, -4, 0, -1, -2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; #[rustfmt::skip] let quantization_table: [i32; 8 * 8] = [ 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 ]; let c: [i32x8; 8] = cast(coefficients); let q: [i32x8; 8] = cast(quantization_table); let scaled = [ c[0] * q[0], c[1] * q[1], c[2] * q[2], c[3] * q[3], c[4] * q[4], c[5] * q[5], c[6] * q[6], c[7] * q[7], ]; // add rounding factor before shifting right let pass1 = kernel_i32(scaled, 1 << 9, 10); let transpose1 = i32x8::transpose(pass1); // add rounding factor before shifting right (include rebasing from -128..128 // to 0..256) let pass2 = kernel_i32(transpose1, 65536 + (128 << 17), 17); let result = i32x8::transpose(pass2); let output: [i32; 64] = cast(result); // same as other DCT test with some minor rounding differences #[rustfmt::skip] let expected_output = [ 118, 92, 110, 83, 77, 93, 144, 198, 172, 116, 114, 87, 78, 93, 146, 191, 194, 107, 91, 76, 71, 93, 160, 198, 196, 100, 80, 74, 67, 92, 174, 209, 182, 104, 88, 81, 68, 89, 178, 206, 105, 64, 59, 59, 63, 94, 183, 201, 35, 27, 28, 37, 72, 121, 203, 204, 37, 45, 41, 47, 98, 154, 223, 208]; assert_eq!(expected_output, output); }
let r_expected = f32x4::from([255.0, 0.0, 0.0, 0.0]); let g_expected = f32x4::from([0.0, 255.0, 0.0, 0.0]); let b_expected = f32x4::from([0.0, 0.0, 255.0, 0.0]);
random_line_split
t_usefulness.rs
#![allow(clippy::excessive_precision)] use wide::*; use bytemuck::*; #[test] fn unpack_modify_and_repack_rgba_values() { let mask = u32x4::from(0xFF); // let input = u32x4::from([0xFF0000FF, 0x00FF00FF, 0x0000FFFF, 0x000000FF]); // unpack let r_actual = cast::<_, i32x4>(input >> 24).round_float(); let g_actual = cast::<_, i32x4>((input >> 16) & mask).round_float(); let b_actual = cast::<_, i32x4>((input >> 8) & mask).round_float(); let a_actual = cast::<_, i32x4>(input & mask).round_float(); let r_expected = f32x4::from([255.0, 0.0, 0.0, 0.0]); let g_expected = f32x4::from([0.0, 255.0, 0.0, 0.0]); let b_expected = f32x4::from([0.0, 0.0, 255.0, 0.0]); let a_expected = f32x4::from([255.0, 255.0, 255.0, 255.0]); assert_eq!(r_expected, r_actual); assert_eq!(g_expected, g_actual); assert_eq!(b_expected, b_actual); assert_eq!(a_expected, a_actual); // modify some of the data let r_new = (r_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let g_new = (g_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let b_new = (b_actual - f32x4::from(1.0)).max(f32x4::from(0.0)); let a_new = a_actual; // repack let r_u = cast::<i32x4, u32x4>(r_new.round_int()); let g_u = cast::<i32x4, u32x4>(g_new.round_int()); let b_u = cast::<i32x4, u32x4>(b_new.round_int()); let a_u = cast::<i32x4, u32x4>(a_new.round_int()); let output_actual = (r_u << 24) | (g_u << 16) | (b_u << 8) | (a_u); let output_expected = u32x4::from([0xFE0000FF, 0x00FE00FF, 0x0000FEFF, 0x000000FF]); assert_eq!(output_expected, output_actual); } /// Implement JPEG IDCT using i16x8. This has slightly different behavior than /// the normal 32 bit scalar implementation in libjpeg. It's a bit more accurate /// in some ways (since the constants are encoded in 15 bits instead of 12) but /// is more subject to hitting saturation during intermediate calculations, /// although that should normally not be a problem for photographic JPEGs. /// /// The main downside of this approach is that it is very slow to do saturating /// math on scalar types on some CPUs, so if you need bit-exact behavior on /// different architectures this is not the algorithm for you. #[test] fn test_dequantize_and_idct_i16() { fn to_fixed(x: f32) -> i16 { (x * 32767.0 + 0.5) as i16 } fn
(data: [i16x8; 8]) -> [i16x8; 8] { // kernel x let a2 = data[2]; let a6 = data[6]; let b0 = a2.saturating_add(a6).mul_scale_round_n(to_fixed(0.5411961)); let c0 = b0 .saturating_sub(a6) .saturating_sub(a6.mul_scale_round_n(to_fixed(0.847759065))); let c1 = b0.saturating_add(a2.mul_scale_round_n(to_fixed(0.765366865))); let a0 = data[0]; let a4 = data[4]; let b1 = a0.saturating_add(a4); let b2 = a0.saturating_sub(a4); let x0 = b1.saturating_add(c1); let x1 = b2.saturating_add(c0); let x2 = b2.saturating_sub(c0); let x3 = b1.saturating_sub(c1); // kernel t let t0 = data[7]; let t1 = data[5]; let t2 = data[3]; let t3 = data[1]; let p1 = t0.saturating_add(t3); let p2 = t1.saturating_add(t2); let p3 = t0.saturating_add(t2); let p4 = t1.saturating_add(t3); let p5t = p3.saturating_add(p4); let p5 = p5t.saturating_add(p5t.mul_scale_round_n(to_fixed(0.175875602))); let e0 = t0.mul_scale_round_n(to_fixed(0.298631336)); let e1 = t1 .saturating_add(t1) .saturating_add(t1.mul_scale_round_n(to_fixed(0.053119869))); let e2 = t2 .saturating_add(t2) .saturating_add(t2) .saturating_add(t2.mul_scale_round_n(to_fixed(0.072711026))); let e3 = t3.saturating_add(t3.mul_scale_round_n(to_fixed(0.501321110))); let f0 = p5.saturating_sub(p1.mul_scale_round_n(to_fixed(0.899976223))); let f1 = p5 .saturating_sub(p2) .saturating_sub(p2) .saturating_sub(p2.mul_scale_round_n(to_fixed(0.562915447))); let f2 = p3.mul_scale_round_n(to_fixed(-0.961570560)).saturating_sub(p3); let f3 = p4.mul_scale_round_n(to_fixed(-0.390180644)); let t3 = f0.saturating_add(f3).saturating_add(e3); let t2 = f1.saturating_add(f2).saturating_add(e2); let t1 = f1.saturating_add(f3).saturating_add(e1); let t0 = f0.saturating_add(f2).saturating_add(e0); [ x0.saturating_add(t3), x1.saturating_add(t2), x2.saturating_add(t1), x3.saturating_add(t0), x3.saturating_sub(t0), x2.saturating_sub(t1), x1.saturating_sub(t2), x0.saturating_sub(t3), ] } #[rustfmt::skip] let coefficients: [i16; 8 * 8] = [ -14, -39, 58, -2, 3, 3, 0, 1, 11, 27, 4, -3, 3, 0, 1, 0, -6, -13, -9, -1, -2, -1, 0, 0, -4, 0, -1, -2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; #[rustfmt::skip] let quantization_table: [i16; 8 * 8] = [ 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 ]; let c: [i16x8; 8] = cast(coefficients); let q: [i16x8; 8] = cast(quantization_table); // coefficients normally go up to 1024, shift up by 3 to get extra precision const SHIFT: i16 = 3; let data = [ (c[0] * q[0]) << SHIFT, (c[1] * q[1]) << SHIFT, (c[2] * q[2]) << SHIFT, (c[3] * q[3]) << SHIFT, (c[4] * q[4]) << SHIFT, (c[5] * q[5]) << SHIFT, (c[6] * q[6]) << SHIFT, (c[7] * q[7]) << SHIFT, ]; let pass1 = kernel_i16(data); let transpose1 = i16x8::transpose(pass1); let pass2 = kernel_i16(transpose1); let result = i16x8::transpose(pass2); // offset to recenter to 0..256 and round properly const ROUND_FACTOR: i16 = 0x2020; let round_factor = i16x8::splat(ROUND_FACTOR); let result_adj = [ result[0].saturating_add(round_factor) >> (2 * SHIFT), result[1].saturating_add(round_factor) >> (2 * SHIFT), result[2].saturating_add(round_factor) >> (2 * SHIFT), result[3].saturating_add(round_factor) >> (2 * SHIFT), result[4].saturating_add(round_factor) >> (2 * SHIFT), result[5].saturating_add(round_factor) >> (2 * SHIFT), result[6].saturating_add(round_factor) >> (2 * SHIFT), result[7].saturating_add(round_factor) >> (2 * SHIFT), ]; let output: [i16; 64] = cast(result_adj); #[rustfmt::skip] let expected_output = [ 118, 92, 110, 83, 77, 93, 144, 198, 172, 116, 114, 87, 78, 93, 146, 191, 194, 107, 91, 76, 71, 93, 160, 198, 196, 100, 80, 74, 67, 92, 174, 209, 182, 104, 88, 81, 68, 89, 178, 206, 105, 64, 59, 59, 63, 94, 183, 201, 35, 27, 28, 37, 72, 121, 203, 204, 38, 45, 41, 47, 99, 154, 223, 208 ]; assert_eq!(expected_output, output); } /// Implement JPEG IDCT using i32x8. This is most similar to the scalar /// libjpeg version which has slightly different rounding propertis than the 16 /// bit version. Some decoders are forced to use this if they want bit-by-bit /// compability across all architectures. #[test] fn test_dequantize_and_idct_i32() { fn to_fixed(x: f32) -> i32 { (x * 4096.0 + 0.5) as i32 } fn kernel_i32( [s0, s1, s2, s3, s4, s5, s6, s7]: [i32x8; 8], rounding_factor: i32, shift_right: i32, ) -> [i32x8; 8] { // kernel x let at = (s2 + s6) * to_fixed(0.5411961); let a0 = (s0 + s4) << 12; // multiply by 1, ie 4096 in fixed point) let a1 = (s0 - s4) << 12; // multiply by 1, ie 4096 in fixed point) let a2 = at + s6 * to_fixed(-1.847759065); let a3 = at + s2 * to_fixed(0.765366865); let x0 = a0 + a3 + rounding_factor; // add rounding factor here to avoid extra addition let x1 = a1 + a2 + rounding_factor; let x2 = a1 - a2 + rounding_factor; let x3 = a0 - a3 + rounding_factor; // kernel t let b0 = s7 + s1; let b1 = s5 + s3; let b2 = s7 + s3; let b3 = s5 + s1; let ct = (b2 + b3) * to_fixed(1.175875602); let c0 = ct + b0 * to_fixed(-0.899976223); let c1 = ct + b1 * to_fixed(-2.562915447); let c2 = b2 * to_fixed(-1.961570560); let c3 = b3 * to_fixed(-0.390180644); let t0 = s7 * to_fixed(0.298631336) + c0 + c2; let t1 = s5 * to_fixed(2.053119869) + c1 + c3; let t2 = s3 * to_fixed(3.072711026) + c1 + c2; let t3 = s1 * to_fixed(1.501321110) + c0 + c3; [ (x0 + t3) >> shift_right, (x1 + t2) >> shift_right, (x2 + t1) >> shift_right, (x3 + t0) >> shift_right, (x3 - t0) >> shift_right, (x2 - t1) >> shift_right, (x1 - t2) >> shift_right, (x0 - t3) >> shift_right, ] } #[rustfmt::skip] let coefficients: [i32; 8 * 8] = [ -14, -39, 58, -2, 3, 3, 0, 1, 11, 27, 4, -3, 3, 0, 1, 0, -6, -13, -9, -1, -2, -1, 0, 0, -4, 0, -1, -2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; #[rustfmt::skip] let quantization_table: [i32; 8 * 8] = [ 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 ]; let c: [i32x8; 8] = cast(coefficients); let q: [i32x8; 8] = cast(quantization_table); let scaled = [ c[0] * q[0], c[1] * q[1], c[2] * q[2], c[3] * q[3], c[4] * q[4], c[5] * q[5], c[6] * q[6], c[7] * q[7], ]; // add rounding factor before shifting right let pass1 = kernel_i32(scaled, 1 << 9, 10); let transpose1 = i32x8::transpose(pass1); // add rounding factor before shifting right (include rebasing from -128..128 // to 0..256) let pass2 = kernel_i32(transpose1, 65536 + (128 << 17), 17); let result = i32x8::transpose(pass2); let output: [i32; 64] = cast(result); // same as other DCT test with some minor rounding differences #[rustfmt::skip] let expected_output = [ 118, 92, 110, 83, 77, 93, 144, 198, 172, 116, 114, 87, 78, 93, 146, 191, 194, 107, 91, 76, 71, 93, 160, 198, 196, 100, 80, 74, 67, 92, 174, 209, 182, 104, 88, 81, 68, 89, 178, 206, 105, 64, 59, 59, 63, 94, 183, 201, 35, 27, 28, 37, 72, 121, 203, 204, 37, 45, 41, 47, 98, 154, 223, 208]; assert_eq!(expected_output, output); }
kernel_i16
identifier_name
page.js
module.exports = (function(win, doc) { var $$ = win.mvue, _CACHE = $$._CACHE_; /** * 根据传入的页面名字,返回页面组件别名 */ function _makeViewName(name, tag) { var ret = "ma-"+name; ret = ret.replace(/[\-|\\|\/]/g, "-"); return tag ? "<"+ret+"></"+ret+">" : ret; } /** * 参数值修正,转换000为空 */ function _transParams(params) { for (var key in params) { if (params[key] == "000") { params[key] = ""; } } return params; } /** * 如果页面没有 resolve 方法,初始化操作 */ function pageDefaultInit(_params) { var that = this; // 注册 数据接受事件,用于手动初始化数据 that.$on("__updateData", function(initData) { if (typeof initData == "object") { for(var key in initData) { that.$set(key, initData[key]); } } }); that.$update = function(initData) { that.$emit("__updateData", initData); } that.$set('params', _params || {}); that.$update(_params); // this.$emit("mgViewShow"); // this.$broadcast("mgViewShow"); } /** * 如果页面有 resolve 方法,初始化操作 */ function pageResolveInit(init, _params) { var that = this, _defer = $.defer(); // 注册 数据更新事件,用于手动触发刷新动作 that.$on("__refreshData", function(params, defer) { var initDefer = _defer; // 创建后续的数据刷新回调动作 if (initDefer.status == "resolved") { initDefer = $.defer(); initDefer.then(function(initData) { that.$emit("__updateData", initData); if (defer && defer.resolve) { defer.resolve(initData); } }) } init.call(that, params || _params, initDefer); }) // 通过前面注册的事件,将数据更新到对象实例上 _defer.then(function(initData) { that.$emit("__updateData", initData); }); pageDefaultInit.call(that, _params); that.$emit("__refreshData"); // 手动触发一下更新 // 绑定数据更新快捷方法 that.$refresh = function(params, defer) { that.$emit("__refreshData", params, defer); } } /** * 包装页面对象,添加底层共用的方法,同时执行相关的操作 * * ============================================================ * 比较重要的几个操作说明: * * compiled: 页面在此事件中,会尝试插入 页面实例 到容器对像中去,同 * 时会给 容器对象 添加 MG_CHILDREN 对象,指向的是 页面实例。页面自 * 身的容器上会添加 MG_PAGE 对象,保存的是页面创建的信息,比如创建时 * 的 参数、所在容器 等。 * * ready: 页面在此事件中,会触发一个 mgViewShow 的事件,通知页面中 * 的组件进行渲染,或者重新启动相关UI事件和监听。 * * beforeDestroy: 页面在此事件中,会触发一个 mgViewDestroy 事件, * 通知页面的UI组件进行资源释放等操作。 */ function commonView(page) { var old = page.data, mixins; // 采用新的方式,组件的 data 必须为函数 if (!$.isFun(old)) { page.data = function() { return $.extend(true, {params: {}}, old); } } page.replace = true; page.inherit = true; // 底层页面初始化等相关的方法 mixins = { compiled: function() { var $parent = this.$parent, mgpage, $wrap; // 如果是采用容器插入渲染,执行相关的初始化操作 if ($parent && $parent.$options.name == "mgRender") { mgpage = $parent.MG_PAGE; $wrap = mgpage.wrapper; if (this.$el.nodeType === 3) { this.$el = this.$el.nextElementSibling; this._isFragment = false; }
this.$parent; } else { $wrap = $(this.$el); mgpage = {params: null, wrapper: null}; mgpage.wrapper = $wrap; mgpage.params = {}; } // 给包含容器添加私有的样式类 if (page && page.style && page.style.page) { mgpage.wrapper.addClass(page.style.page); } // 绑定对象句柄到 DOM 上 this.MG_PAGE = mgpage; this.$el.MG_VUE_CTRL = this; }, ready: function() { var mgpage = this.MG_PAGE, params, $wrap = mgpage.wrapper, inpara = $(this.$el).attr("params"); /* 修复当前实例的 $parent 实例链 */ var $par = this.$el.parentNode, fscope; do { if ($par.MG_VUE_CTRL) { fscope = $par.MG_VUE_CTRL; break; } $par = $par.parentNode; } while ($par && $par != doc.body); fscope = fscope ? fscope : undefined; this.$parent = fscope; if (inpara != null) { if (fscope && fscope[inpara] !== undefined) { inpara = fscope[inpara] } else if (typeof inpara == "string") { inpara = $.parseJSON(inpara); } if (typeof inpara == "object") { mgpage.params = inpara; } } params = _transParams(mgpage.params); if ($.isFun(page.resolve)) { pageResolveInit.call(this, page.resolve, params); } else { pageDefaultInit.call(this, params); } // 尝试通知父渲染容器,当前元素已经渲染完成 mgpage.mgwrap && mgpage.mgwrap.$emit("mgViewRender", this); $$.emit("viewReady", $wrap, this); }, beforeDestroy: function() { var $wrap = this.MG_PAGE.wrapper; $$.emit("viewDestroy", $wrap, this); this.$emit("mgViewHide"); this.$emit("mgViewDestroy"); }, } // 添加底层方法到每个具体的page对象上 if (typeof page.mixins == "array") { page.mixins.push(mixins); } else { page.mixins = [mixins]; } return page; // 返回修改后的page对象 } /*========================================================== * 全局对象暴漏方法,主要是 initView 和 loadView 两个方法 *==========================================================*/ $$.renderView = function(view, $wrap, paras) { if (!view.match(/^ma-/)) view = _makeViewName(view); var tmp = '<'+view+'></'+view+'>', ret; ret = new Vue({ template: tmp, name: "mgRender" }); $wrap[0].MG_PAGE = ret.MG_PAGE = { params : paras, wrapper: $wrap }; $wrap[0].MG_WRAP = ret; return ret.$mount(); } /** * 异步组件加载方法,根据 Vue 和 Webpack 的要求 * 结合两者,具体原理链接如下: * * http://cn.vuejs.org/guide/components.html#异步组件 */ $$.initView = function(resolve) { return function(page) { // 实例初始化页面组件对象 resolve(commonView(page)); } } /** * 页面路由包装方法,转换同步和异步组件逻辑一致,同时 * 将要渲染的页面直接插入到 MG-VIEW 中 */ $$.loadView = function(name, initFix) { var cname = _makeViewName(name); /** * 如果为一个对象,说明为同步加载页面,则先 * 转换为函数,保证同步和异步加载逻辑一致 */ if (typeof initFix == "object") { initFix = commonView(initFix); } $$.component(cname, initFix); /** * 返回新的一个方法,同时触发 routerOn 事件 */ return function(params, lastMatch, nowMatch, $route) { var $wrapper, $cache = $$._CACHE_, find = -1, $show, $wshow, $hide, $whide, last = -1, mitem, offset, eve = $route.evetype; if (lastMatch.match) { lastMatch = lastMatch.match; lastMatch = lastMatch[lastMatch.length-1].item; } // 尝试获取缓存的 要插入 的页面对象 mitem = nowMatch[nowMatch.length-1].item; for(var i=0; i<$cache.length; i++) { if ($cache[i].item == mitem) { find = i; } if ($cache[i].item == lastMatch) { last = i; } } if (find !== -1) { $wshow = $cache[find].$wrap; $show = $wshow[0].MG_CHILDREN; } else { $wrapper = $('<div class="wrapper"></div>'); $wrapper.appendTo($("mg-view")); $wshow = $wrapper; /* 新插入的无实例,无缓存,需修改缓存 */ var addCache = { item: mitem, $wrap: $wshow }; if (eve == "pushstate") { var end = $cache.push(addCache), del; if (last !== -1) { for(var i=last+1; i<end-1; i++) { var _item = $cache[i], _wrap = _item.$wrap[0]; _wrap.MG_WRAP.$destroy(true); _item.$wrap.remove(); } $cache.splice(last+1, end-last-2); } // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.shift().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last -= 1; // 修正旧元素坐标 }; } else { $cache.unshift(addCache); $cache.splice(1, last); // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.pop().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last += 1; // 修正旧元素坐标 }; } } // 尝试获取缓存的 要隐藏 的页面对象 $hide = $cache[last]; $whide = $hide ? $hide.$wrap : null; // 如果已经有插入的实例对象,则为缓存,手动调用相关方法 if ($show && $show.$emit) { if ($$.refreshView) { var fixpara = _transParams(params); $show.$set("params", fixpara); $show.$refresh && $show.$refresh(fixpara); $show.$emit("hook:ready"); } $show.$emit("mgViewShow"); $show.$broadcast("mgViewShow"); } // 必选先执行 routeOn 回调,保证 loader 插入到页面中 $$.emit("routeOn", $wshow, $whide, nowMatch, $route); // 创建要插入的新对象,并直接渲染 if (!$show) $$.renderView(cname, $wshow, params); } } $$.on("routeOn.page", function($wshow, $whide) { // 切换进入和退出页面的显示状态 $.rafCall(function() { $whide && $whide.addClass("hide"); $wshow.removeClass("hide"); }); }); })(window, document);
this.$appendTo($wrap[0]); $wrap[0].MG_CHILDREN = this; mgpage.mgwrap =
conditional_block
page.js
module.exports = (function(win, doc) { var $$ = win.mvue, _CACHE = $$._CACHE_; /** * 根据传入的页面名字,返回页面组件别名 */ function _makeViewName(name, tag) { var ret = "ma-"+name; ret = ret.replace(/[\-|\\|\/]/g, "-"); return tag ? "<"+ret+"></"+ret+">" : ret; } /** * 参数值修正,转换000为空 */ function _transParams(params) { for (var key in params) { if (params[key] == "000") { params[key] = ""; } } return params; } /** * 如果页面没有 resolve 方法,初始化操作 */ function pageDefaultInit(_params) { var that = this; // 注册 数据接受事件,用于手动初始化数据 that.$on("__updateData", function(initData) { if (typeof initData == "object") { for(var key in initData) { that.$set(key, initData[key]); } } }); that.$update = function(initData) { that.$emit("__updateData", initData); } that.$set('params', _params || {}); that.$update(_params); // this.$emit("mgViewShow"); // this.$broadcast("mgViewShow"); } /** * 如果页面有 resolve 方法,初始化操作 */ function pageResolveInit(init, _params) { var that = this, _defer = $.defer(); // 注册 数据更新事件,用于手动触发刷新动作 that.$on("__refreshData", function(params, defer) { var initDefer = _defer; // 创建后续的数据刷新回调动作 if (initDefer.status == "resolved") { initDefer = $.defer(); initDefer.then(function(initData) { that.$emit("__updateData", initData); if (defer && defer.resolve) { defer.resolve(initData); } }) } init.call(that, params || _params, initDefer); }) // 通过前面注册的事件,将数据更新到对象实例上 _defer.then(function(initData) { that.$emit("__updateData", initData); }); pageDefaultInit.call(that, _params); that.$emit("__refreshData"); // 手动触发一下更新 // 绑定数据更新快捷方法 that.$refresh = function(params, defer) { that.$emit("__refreshData", params, defer); } } /** * 包装页面对象,添加底层共用的方法,同时执行相关的操作 * * ============================================================ * 比较重要的几个操作说明: * * compiled: 页面在此事件中,会尝试插入 页面实例 到容器对像中去,同 * 时会给 容器对象 添加 MG_CHILDREN 对象,指向的是 页面实例。页面自 * 身的容器上会添加 MG_PAGE 对象,保存的是页面创建的信息,比如创建时 * 的 参数、所在容器 等。 * * ready: 页面在此事件中,会触发一个 mgViewShow 的事件,通知页面中 * 的组件进行渲染,或者重新启动相关UI事件和监听。 * * beforeDestroy: 页面在此事件中,会触发一个 mgViewDestroy 事件, * 通知页面的UI组件进行资源释放等操作。 */ function commonView(page) { var old = page.data, mixins; // 采用新的方式,组件的 data 必须为函数 if (!$.isFun(old)) { page.data = function() { return $.extend(true, {params: {}}, old); } } page.replace = true; page.inherit = true; // 底层页面初始化等相关的方法 mixins = { compiled: function() { var $parent = this.$parent, mgpage, $wrap; // 如果是采用容器插入渲染,执行相关的初始化操作
if (this.$el.nodeType === 3) { this.$el = this.$el.nextElementSibling; this._isFragment = false; } this.$appendTo($wrap[0]); $wrap[0].MG_CHILDREN = this; mgpage.mgwrap = this.$parent; } else { $wrap = $(this.$el); mgpage = {params: null, wrapper: null}; mgpage.wrapper = $wrap; mgpage.params = {}; } // 给包含容器添加私有的样式类 if (page && page.style && page.style.page) { mgpage.wrapper.addClass(page.style.page); } // 绑定对象句柄到 DOM 上 this.MG_PAGE = mgpage; this.$el.MG_VUE_CTRL = this; }, ready: function() { var mgpage = this.MG_PAGE, params, $wrap = mgpage.wrapper, inpara = $(this.$el).attr("params"); /* 修复当前实例的 $parent 实例链 */ var $par = this.$el.parentNode, fscope; do { if ($par.MG_VUE_CTRL) { fscope = $par.MG_VUE_CTRL; break; } $par = $par.parentNode; } while ($par && $par != doc.body); fscope = fscope ? fscope : undefined; this.$parent = fscope; if (inpara != null) { if (fscope && fscope[inpara] !== undefined) { inpara = fscope[inpara] } else if (typeof inpara == "string") { inpara = $.parseJSON(inpara); } if (typeof inpara == "object") { mgpage.params = inpara; } } params = _transParams(mgpage.params); if ($.isFun(page.resolve)) { pageResolveInit.call(this, page.resolve, params); } else { pageDefaultInit.call(this, params); } // 尝试通知父渲染容器,当前元素已经渲染完成 mgpage.mgwrap && mgpage.mgwrap.$emit("mgViewRender", this); $$.emit("viewReady", $wrap, this); }, beforeDestroy: function() { var $wrap = this.MG_PAGE.wrapper; $$.emit("viewDestroy", $wrap, this); this.$emit("mgViewHide"); this.$emit("mgViewDestroy"); }, } // 添加底层方法到每个具体的page对象上 if (typeof page.mixins == "array") { page.mixins.push(mixins); } else { page.mixins = [mixins]; } return page; // 返回修改后的page对象 } /*========================================================== * 全局对象暴漏方法,主要是 initView 和 loadView 两个方法 *==========================================================*/ $$.renderView = function(view, $wrap, paras) { if (!view.match(/^ma-/)) view = _makeViewName(view); var tmp = '<'+view+'></'+view+'>', ret; ret = new Vue({ template: tmp, name: "mgRender" }); $wrap[0].MG_PAGE = ret.MG_PAGE = { params : paras, wrapper: $wrap }; $wrap[0].MG_WRAP = ret; return ret.$mount(); } /** * 异步组件加载方法,根据 Vue 和 Webpack 的要求 * 结合两者,具体原理链接如下: * * http://cn.vuejs.org/guide/components.html#异步组件 */ $$.initView = function(resolve) { return function(page) { // 实例初始化页面组件对象 resolve(commonView(page)); } } /** * 页面路由包装方法,转换同步和异步组件逻辑一致,同时 * 将要渲染的页面直接插入到 MG-VIEW 中 */ $$.loadView = function(name, initFix) { var cname = _makeViewName(name); /** * 如果为一个对象,说明为同步加载页面,则先 * 转换为函数,保证同步和异步加载逻辑一致 */ if (typeof initFix == "object") { initFix = commonView(initFix); } $$.component(cname, initFix); /** * 返回新的一个方法,同时触发 routerOn 事件 */ return function(params, lastMatch, nowMatch, $route) { var $wrapper, $cache = $$._CACHE_, find = -1, $show, $wshow, $hide, $whide, last = -1, mitem, offset, eve = $route.evetype; if (lastMatch.match) { lastMatch = lastMatch.match; lastMatch = lastMatch[lastMatch.length-1].item; } // 尝试获取缓存的 要插入 的页面对象 mitem = nowMatch[nowMatch.length-1].item; for(var i=0; i<$cache.length; i++) { if ($cache[i].item == mitem) { find = i; } if ($cache[i].item == lastMatch) { last = i; } } if (find !== -1) { $wshow = $cache[find].$wrap; $show = $wshow[0].MG_CHILDREN; } else { $wrapper = $('<div class="wrapper"></div>'); $wrapper.appendTo($("mg-view")); $wshow = $wrapper; /* 新插入的无实例,无缓存,需修改缓存 */ var addCache = { item: mitem, $wrap: $wshow }; if (eve == "pushstate") { var end = $cache.push(addCache), del; if (last !== -1) { for(var i=last+1; i<end-1; i++) { var _item = $cache[i], _wrap = _item.$wrap[0]; _wrap.MG_WRAP.$destroy(true); _item.$wrap.remove(); } $cache.splice(last+1, end-last-2); } // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.shift().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last -= 1; // 修正旧元素坐标 }; } else { $cache.unshift(addCache); $cache.splice(1, last); // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.pop().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last += 1; // 修正旧元素坐标 }; } } // 尝试获取缓存的 要隐藏 的页面对象 $hide = $cache[last]; $whide = $hide ? $hide.$wrap : null; // 如果已经有插入的实例对象,则为缓存,手动调用相关方法 if ($show && $show.$emit) { if ($$.refreshView) { var fixpara = _transParams(params); $show.$set("params", fixpara); $show.$refresh && $show.$refresh(fixpara); $show.$emit("hook:ready"); } $show.$emit("mgViewShow"); $show.$broadcast("mgViewShow"); } // 必选先执行 routeOn 回调,保证 loader 插入到页面中 $$.emit("routeOn", $wshow, $whide, nowMatch, $route); // 创建要插入的新对象,并直接渲染 if (!$show) $$.renderView(cname, $wshow, params); } } $$.on("routeOn.page", function($wshow, $whide) { // 切换进入和退出页面的显示状态 $.rafCall(function() { $whide && $whide.addClass("hide"); $wshow.removeClass("hide"); }); }); })(window, document);
if ($parent && $parent.$options.name == "mgRender") { mgpage = $parent.MG_PAGE; $wrap = mgpage.wrapper;
random_line_split
page.js
module.exports = (function(win, doc) { var $$ = win.mvue, _CACHE = $$._CACHE_; /** * 根据传入的页面名字,返回页面组件别名 */ function _makeViewName(name, tag) { v
"+name; ret = ret.replace(/[\-|\\|\/]/g, "-"); return tag ? "<"+ret+"></"+ret+">" : ret; } /** * 参数值修正,转换000为空 */ function _transParams(params) { for (var key in params) { if (params[key] == "000") { params[key] = ""; } } return params; } /** * 如果页面没有 resolve 方法,初始化操作 */ function pageDefaultInit(_params) { var that = this; // 注册 数据接受事件,用于手动初始化数据 that.$on("__updateData", function(initData) { if (typeof initData == "object") { for(var key in initData) { that.$set(key, initData[key]); } } }); that.$update = function(initData) { that.$emit("__updateData", initData); } that.$set('params', _params || {}); that.$update(_params); // this.$emit("mgViewShow"); // this.$broadcast("mgViewShow"); } /** * 如果页面有 resolve 方法,初始化操作 */ function pageResolveInit(init, _params) { var that = this, _defer = $.defer(); // 注册 数据更新事件,用于手动触发刷新动作 that.$on("__refreshData", function(params, defer) { var initDefer = _defer; // 创建后续的数据刷新回调动作 if (initDefer.status == "resolved") { initDefer = $.defer(); initDefer.then(function(initData) { that.$emit("__updateData", initData); if (defer && defer.resolve) { defer.resolve(initData); } }) } init.call(that, params || _params, initDefer); }) // 通过前面注册的事件,将数据更新到对象实例上 _defer.then(function(initData) { that.$emit("__updateData", initData); }); pageDefaultInit.call(that, _params); that.$emit("__refreshData"); // 手动触发一下更新 // 绑定数据更新快捷方法 that.$refresh = function(params, defer) { that.$emit("__refreshData", params, defer); } } /** * 包装页面对象,添加底层共用的方法,同时执行相关的操作 * * ============================================================ * 比较重要的几个操作说明: * * compiled: 页面在此事件中,会尝试插入 页面实例 到容器对像中去,同 * 时会给 容器对象 添加 MG_CHILDREN 对象,指向的是 页面实例。页面自 * 身的容器上会添加 MG_PAGE 对象,保存的是页面创建的信息,比如创建时 * 的 参数、所在容器 等。 * * ready: 页面在此事件中,会触发一个 mgViewShow 的事件,通知页面中 * 的组件进行渲染,或者重新启动相关UI事件和监听。 * * beforeDestroy: 页面在此事件中,会触发一个 mgViewDestroy 事件, * 通知页面的UI组件进行资源释放等操作。 */ function commonView(page) { var old = page.data, mixins; // 采用新的方式,组件的 data 必须为函数 if (!$.isFun(old)) { page.data = function() { return $.extend(true, {params: {}}, old); } } page.replace = true; page.inherit = true; // 底层页面初始化等相关的方法 mixins = { compiled: function() { var $parent = this.$parent, mgpage, $wrap; // 如果是采用容器插入渲染,执行相关的初始化操作 if ($parent && $parent.$options.name == "mgRender") { mgpage = $parent.MG_PAGE; $wrap = mgpage.wrapper; if (this.$el.nodeType === 3) { this.$el = this.$el.nextElementSibling; this._isFragment = false; } this.$appendTo($wrap[0]); $wrap[0].MG_CHILDREN = this; mgpage.mgwrap = this.$parent; } else { $wrap = $(this.$el); mgpage = {params: null, wrapper: null}; mgpage.wrapper = $wrap; mgpage.params = {}; } // 给包含容器添加私有的样式类 if (page && page.style && page.style.page) { mgpage.wrapper.addClass(page.style.page); } // 绑定对象句柄到 DOM 上 this.MG_PAGE = mgpage; this.$el.MG_VUE_CTRL = this; }, ready: function() { var mgpage = this.MG_PAGE, params, $wrap = mgpage.wrapper, inpara = $(this.$el).attr("params"); /* 修复当前实例的 $parent 实例链 */ var $par = this.$el.parentNode, fscope; do { if ($par.MG_VUE_CTRL) { fscope = $par.MG_VUE_CTRL; break; } $par = $par.parentNode; } while ($par && $par != doc.body); fscope = fscope ? fscope : undefined; this.$parent = fscope; if (inpara != null) { if (fscope && fscope[inpara] !== undefined) { inpara = fscope[inpara] } else if (typeof inpara == "string") { inpara = $.parseJSON(inpara); } if (typeof inpara == "object") { mgpage.params = inpara; } } params = _transParams(mgpage.params); if ($.isFun(page.resolve)) { pageResolveInit.call(this, page.resolve, params); } else { pageDefaultInit.call(this, params); } // 尝试通知父渲染容器,当前元素已经渲染完成 mgpage.mgwrap && mgpage.mgwrap.$emit("mgViewRender", this); $$.emit("viewReady", $wrap, this); }, beforeDestroy: function() { var $wrap = this.MG_PAGE.wrapper; $$.emit("viewDestroy", $wrap, this); this.$emit("mgViewHide"); this.$emit("mgViewDestroy"); }, } // 添加底层方法到每个具体的page对象上 if (typeof page.mixins == "array") { page.mixins.push(mixins); } else { page.mixins = [mixins]; } return page; // 返回修改后的page对象 } /*========================================================== * 全局对象暴漏方法,主要是 initView 和 loadView 两个方法 *==========================================================*/ $$.renderView = function(view, $wrap, paras) { if (!view.match(/^ma-/)) view = _makeViewName(view); var tmp = '<'+view+'></'+view+'>', ret; ret = new Vue({ template: tmp, name: "mgRender" }); $wrap[0].MG_PAGE = ret.MG_PAGE = { params : paras, wrapper: $wrap }; $wrap[0].MG_WRAP = ret; return ret.$mount(); } /** * 异步组件加载方法,根据 Vue 和 Webpack 的要求 * 结合两者,具体原理链接如下: * * http://cn.vuejs.org/guide/components.html#异步组件 */ $$.initView = function(resolve) { return function(page) { // 实例初始化页面组件对象 resolve(commonView(page)); } } /** * 页面路由包装方法,转换同步和异步组件逻辑一致,同时 * 将要渲染的页面直接插入到 MG-VIEW 中 */ $$.loadView = function(name, initFix) { var cname = _makeViewName(name); /** * 如果为一个对象,说明为同步加载页面,则先 * 转换为函数,保证同步和异步加载逻辑一致 */ if (typeof initFix == "object") { initFix = commonView(initFix); } $$.component(cname, initFix); /** * 返回新的一个方法,同时触发 routerOn 事件 */ return function(params, lastMatch, nowMatch, $route) { var $wrapper, $cache = $$._CACHE_, find = -1, $show, $wshow, $hide, $whide, last = -1, mitem, offset, eve = $route.evetype; if (lastMatch.match) { lastMatch = lastMatch.match; lastMatch = lastMatch[lastMatch.length-1].item; } // 尝试获取缓存的 要插入 的页面对象 mitem = nowMatch[nowMatch.length-1].item; for(var i=0; i<$cache.length; i++) { if ($cache[i].item == mitem) { find = i; } if ($cache[i].item == lastMatch) { last = i; } } if (find !== -1) { $wshow = $cache[find].$wrap; $show = $wshow[0].MG_CHILDREN; } else { $wrapper = $('<div class="wrapper"></div>'); $wrapper.appendTo($("mg-view")); $wshow = $wrapper; /* 新插入的无实例,无缓存,需修改缓存 */ var addCache = { item: mitem, $wrap: $wshow }; if (eve == "pushstate") { var end = $cache.push(addCache), del; if (last !== -1) { for(var i=last+1; i<end-1; i++) { var _item = $cache[i], _wrap = _item.$wrap[0]; _wrap.MG_WRAP.$destroy(true); _item.$wrap.remove(); } $cache.splice(last+1, end-last-2); } // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.shift().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last -= 1; // 修正旧元素坐标 }; } else { $cache.unshift(addCache); $cache.splice(1, last); // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.pop().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last += 1; // 修正旧元素坐标 }; } } // 尝试获取缓存的 要隐藏 的页面对象 $hide = $cache[last]; $whide = $hide ? $hide.$wrap : null; // 如果已经有插入的实例对象,则为缓存,手动调用相关方法 if ($show && $show.$emit) { if ($$.refreshView) { var fixpara = _transParams(params); $show.$set("params", fixpara); $show.$refresh && $show.$refresh(fixpara); $show.$emit("hook:ready"); } $show.$emit("mgViewShow"); $show.$broadcast("mgViewShow"); } // 必选先执行 routeOn 回调,保证 loader 插入到页面中 $$.emit("routeOn", $wshow, $whide, nowMatch, $route); // 创建要插入的新对象,并直接渲染 if (!$show) $$.renderView(cname, $wshow, params); } } $$.on("routeOn.page", function($wshow, $whide) { // 切换进入和退出页面的显示状态 $.rafCall(function() { $whide && $whide.addClass("hide"); $wshow.removeClass("hide"); }); }); })(window, document);
ar ret = "ma-
identifier_name
page.js
module.exports = (function(win, doc) { var $$ = win.mvue, _CACHE = $$._CACHE_; /** * 根据传入的页面名字,返回页面组件别名 */ function _makeViewName(name, tag) { var ret = "ma-"+name;
*/ function _transParams(params) { for (var key in params) { if (params[key] == "000") { params[key] = ""; } } return params; } /** * 如果页面没有 resolve 方法,初始化操作 */ function pageDefaultInit(_params) { var that = this; // 注册 数据接受事件,用于手动初始化数据 that.$on("__updateData", function(initData) { if (typeof initData == "object") { for(var key in initData) { that.$set(key, initData[key]); } } }); that.$update = function(initData) { that.$emit("__updateData", initData); } that.$set('params', _params || {}); that.$update(_params); // this.$emit("mgViewShow"); // this.$broadcast("mgViewShow"); } /** * 如果页面有 resolve 方法,初始化操作 */ function pageResolveInit(init, _params) { var that = this, _defer = $.defer(); // 注册 数据更新事件,用于手动触发刷新动作 that.$on("__refreshData", function(params, defer) { var initDefer = _defer; // 创建后续的数据刷新回调动作 if (initDefer.status == "resolved") { initDefer = $.defer(); initDefer.then(function(initData) { that.$emit("__updateData", initData); if (defer && defer.resolve) { defer.resolve(initData); } }) } init.call(that, params || _params, initDefer); }) // 通过前面注册的事件,将数据更新到对象实例上 _defer.then(function(initData) { that.$emit("__updateData", initData); }); pageDefaultInit.call(that, _params); that.$emit("__refreshData"); // 手动触发一下更新 // 绑定数据更新快捷方法 that.$refresh = function(params, defer) { that.$emit("__refreshData", params, defer); } } /** * 包装页面对象,添加底层共用的方法,同时执行相关的操作 * * ============================================================ * 比较重要的几个操作说明: * * compiled: 页面在此事件中,会尝试插入 页面实例 到容器对像中去,同 * 时会给 容器对象 添加 MG_CHILDREN 对象,指向的是 页面实例。页面自 * 身的容器上会添加 MG_PAGE 对象,保存的是页面创建的信息,比如创建时 * 的 参数、所在容器 等。 * * ready: 页面在此事件中,会触发一个 mgViewShow 的事件,通知页面中 * 的组件进行渲染,或者重新启动相关UI事件和监听。 * * beforeDestroy: 页面在此事件中,会触发一个 mgViewDestroy 事件, * 通知页面的UI组件进行资源释放等操作。 */ function commonView(page) { var old = page.data, mixins; // 采用新的方式,组件的 data 必须为函数 if (!$.isFun(old)) { page.data = function() { return $.extend(true, {params: {}}, old); } } page.replace = true; page.inherit = true; // 底层页面初始化等相关的方法 mixins = { compiled: function() { var $parent = this.$parent, mgpage, $wrap; // 如果是采用容器插入渲染,执行相关的初始化操作 if ($parent && $parent.$options.name == "mgRender") { mgpage = $parent.MG_PAGE; $wrap = mgpage.wrapper; if (this.$el.nodeType === 3) { this.$el = this.$el.nextElementSibling; this._isFragment = false; } this.$appendTo($wrap[0]); $wrap[0].MG_CHILDREN = this; mgpage.mgwrap = this.$parent; } else { $wrap = $(this.$el); mgpage = {params: null, wrapper: null}; mgpage.wrapper = $wrap; mgpage.params = {}; } // 给包含容器添加私有的样式类 if (page && page.style && page.style.page) { mgpage.wrapper.addClass(page.style.page); } // 绑定对象句柄到 DOM 上 this.MG_PAGE = mgpage; this.$el.MG_VUE_CTRL = this; }, ready: function() { var mgpage = this.MG_PAGE, params, $wrap = mgpage.wrapper, inpara = $(this.$el).attr("params"); /* 修复当前实例的 $parent 实例链 */ var $par = this.$el.parentNode, fscope; do { if ($par.MG_VUE_CTRL) { fscope = $par.MG_VUE_CTRL; break; } $par = $par.parentNode; } while ($par && $par != doc.body); fscope = fscope ? fscope : undefined; this.$parent = fscope; if (inpara != null) { if (fscope && fscope[inpara] !== undefined) { inpara = fscope[inpara] } else if (typeof inpara == "string") { inpara = $.parseJSON(inpara); } if (typeof inpara == "object") { mgpage.params = inpara; } } params = _transParams(mgpage.params); if ($.isFun(page.resolve)) { pageResolveInit.call(this, page.resolve, params); } else { pageDefaultInit.call(this, params); } // 尝试通知父渲染容器,当前元素已经渲染完成 mgpage.mgwrap && mgpage.mgwrap.$emit("mgViewRender", this); $$.emit("viewReady", $wrap, this); }, beforeDestroy: function() { var $wrap = this.MG_PAGE.wrapper; $$.emit("viewDestroy", $wrap, this); this.$emit("mgViewHide"); this.$emit("mgViewDestroy"); }, } // 添加底层方法到每个具体的page对象上 if (typeof page.mixins == "array") { page.mixins.push(mixins); } else { page.mixins = [mixins]; } return page; // 返回修改后的page对象 } /*========================================================== * 全局对象暴漏方法,主要是 initView 和 loadView 两个方法 *==========================================================*/ $$.renderView = function(view, $wrap, paras) { if (!view.match(/^ma-/)) view = _makeViewName(view); var tmp = '<'+view+'></'+view+'>', ret; ret = new Vue({ template: tmp, name: "mgRender" }); $wrap[0].MG_PAGE = ret.MG_PAGE = { params : paras, wrapper: $wrap }; $wrap[0].MG_WRAP = ret; return ret.$mount(); } /** * 异步组件加载方法,根据 Vue 和 Webpack 的要求 * 结合两者,具体原理链接如下: * * http://cn.vuejs.org/guide/components.html#异步组件 */ $$.initView = function(resolve) { return function(page) { // 实例初始化页面组件对象 resolve(commonView(page)); } } /** * 页面路由包装方法,转换同步和异步组件逻辑一致,同时 * 将要渲染的页面直接插入到 MG-VIEW 中 */ $$.loadView = function(name, initFix) { var cname = _makeViewName(name); /** * 如果为一个对象,说明为同步加载页面,则先 * 转换为函数,保证同步和异步加载逻辑一致 */ if (typeof initFix == "object") { initFix = commonView(initFix); } $$.component(cname, initFix); /** * 返回新的一个方法,同时触发 routerOn 事件 */ return function(params, lastMatch, nowMatch, $route) { var $wrapper, $cache = $$._CACHE_, find = -1, $show, $wshow, $hide, $whide, last = -1, mitem, offset, eve = $route.evetype; if (lastMatch.match) { lastMatch = lastMatch.match; lastMatch = lastMatch[lastMatch.length-1].item; } // 尝试获取缓存的 要插入 的页面对象 mitem = nowMatch[nowMatch.length-1].item; for(var i=0; i<$cache.length; i++) { if ($cache[i].item == mitem) { find = i; } if ($cache[i].item == lastMatch) { last = i; } } if (find !== -1) { $wshow = $cache[find].$wrap; $show = $wshow[0].MG_CHILDREN; } else { $wrapper = $('<div class="wrapper"></div>'); $wrapper.appendTo($("mg-view")); $wshow = $wrapper; /* 新插入的无实例,无缓存,需修改缓存 */ var addCache = { item: mitem, $wrap: $wshow }; if (eve == "pushstate") { var end = $cache.push(addCache), del; if (last !== -1) { for(var i=last+1; i<end-1; i++) { var _item = $cache[i], _wrap = _item.$wrap[0]; _wrap.MG_WRAP.$destroy(true); _item.$wrap.remove(); } $cache.splice(last+1, end-last-2); } // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.shift().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last -= 1; // 修正旧元素坐标 }; } else { $cache.unshift(addCache); $cache.splice(1, last); // 剪头剪尾操作,保证数组长度 if ($cache.length > 2) { del = $cache.pop().$wrap; del[0].MG_WRAP.$destroy(true); del.remove(); last += 1; // 修正旧元素坐标 }; } } // 尝试获取缓存的 要隐藏 的页面对象 $hide = $cache[last]; $whide = $hide ? $hide.$wrap : null; // 如果已经有插入的实例对象,则为缓存,手动调用相关方法 if ($show && $show.$emit) { if ($$.refreshView) { var fixpara = _transParams(params); $show.$set("params", fixpara); $show.$refresh && $show.$refresh(fixpara); $show.$emit("hook:ready"); } $show.$emit("mgViewShow"); $show.$broadcast("mgViewShow"); } // 必选先执行 routeOn 回调,保证 loader 插入到页面中 $$.emit("routeOn", $wshow, $whide, nowMatch, $route); // 创建要插入的新对象,并直接渲染 if (!$show) $$.renderView(cname, $wshow, params); } } $$.on("routeOn.page", function($wshow, $whide) { // 切换进入和退出页面的显示状态 $.rafCall(function() { $whide && $whide.addClass("hide"); $wshow.removeClass("hide"); }); }); })(window, document);
ret = ret.replace(/[\-|\\|\/]/g, "-"); return tag ? "<"+ret+"></"+ret+">" : ret; } /** * 参数值修正,转换000为空
identifier_body
api_op_PostText.go
// Code generated by smithy-go-codegen DO NOT EDIT. package lexruntimeservice import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/lexruntimeservice/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Sends user input to Amazon Lex. Client applications can use this API to send // requests to Amazon Lex at runtime. Amazon Lex then interprets the user input // using the machine learning model it built for the bot. In response, Amazon Lex // returns the next message to convey to the user an optional responseCard to // display. Consider the following example messages: // // * For a user input "I // would like a pizza", Amazon Lex might return a response with a message eliciting // slot data (for example, PizzaSize): "What size pizza would you like?" // // * // After the user provides all of the pizza order information, Amazon Lex might // return a response with a message to obtain user confirmation "Proceed with the // pizza order?". // // * After the user replies to a confirmation prompt with a // "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese // pizza has been ordered.". // // Not all Amazon Lex messages require a user response. // For example, a conclusion statement does not require a response. Some messages // require only a "yes" or "no" user response. In addition to the message, Amazon // Lex provides additional context about the message in the response that you might // use to enhance client behavior, for example, to display the appropriate client // user interface. These are the slotToElicit, dialogState, intentName, and slots // fields in the response. Consider the following examples: // // * If the message // is to elicit slot data, Amazon Lex returns the following context information: // // // * dialogState set to ElicitSlot // // * intentName set to the intent name in // the current context // // * slotToElicit set to the slot name for which the // message is eliciting information // // * slots set to a map of slots, // configured for the intent, with currently known values // // * If the message is // a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit // is set to null. // // * If the message is a clarification prompt (configured for // the intent) that indicates that user intent is not understood, the dialogState // is set to ElicitIntent and slotToElicit is set to null. // // In addition, Amazon Lex // also returns your application-specific sessionAttributes. For more information, // see Managing Conversation Context // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html). func (c *Client) PostText(ctx context.Context, params *PostTextInput, optFns ...func(*Options)) (*PostTextOutput, error) { if params == nil { params = &PostTextInput{} } result, metadata, err := c.invokeOperation(ctx, "PostText", params, optFns, addOperationPostTextMiddlewares) if err != nil { return nil, err } out := result.(*PostTextOutput) out.ResultMetadata = metadata return out, nil } type PostTextInput struct { // The alias of the Amazon Lex bot. // // This member is required. BotAlias *string // The name of the Amazon Lex bot. // // This member is required. BotName *string // The text that the user entered (Amazon Lex interprets this text). // // This member is required. InputText *string // The ID of the client application user. Amazon Lex uses this to identify a user's // conversation with your bot. At runtime, each request must contain the userID // field. To decide the user ID to use for your application, consider the following // factors. // // * The userID field must not contain any personally identifiable // information of the user, for example, name, personal identification numbers, or // other end user personal information. // // * If you want a user to start a // conversation on one device and continue on another device, use a user-specific // identifier. // // * If you want the same user to be able to have two independent // conversations on two different devices, choose a device-specific identifier. // // // * A user can't have two independent conversations with two different versions of // the same bot. For example, a user can't have a conversation with the PROD and // BETA versions of the same bot. If you anticipate that a user will need to have // conversation with two different versions, for example, while testing, include // the bot alias in the user ID to separate the two conversations. // // This member is required. UserId *string // Request-specific information passed between Amazon Lex and a client application. // The namespace x-amz-lex: is reserved for special attributes. Don't create any // request attributes with the prefix x-amz-lex:. For more information, see Setting // Request Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs). RequestAttributes map[string]*string // Application-specific information passed between Amazon Lex and a client // application. For more information, see Setting Session Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs). SessionAttributes map[string]*string } type PostTextOutput struct { // One to four alternative intents that may be applicable to the user's intent. // Each alternative includes a score that indicates how confident Amazon Lex is // that the intent matches the user's intent. The intents are sorted by the // confidence score. AlternativeIntents []*types.PredictedIntent // The version of the bot that responded to the conversation. You can use this // information to help determine if one version of a bot is performing better than // another version. If you have enabled the new natural language understanding // (NLU) model, you can use this to determine if the improvement is due to changes // to the bot or changes to the NLU. For more information about enabling the new // NLU, see the enableModelImprovements // (https://docs.aws.amazon.com/lex/latest/dg/API_PutBot.html#lex-PutBot-request-enableModelImprovements) // parameter of the PutBot operation. BotVersion *string // Identifies the current state of the user interaction. Amazon Lex returns one of // the following values as dialogState. The client can optionally use this // information to customize the user interface. // // * ElicitIntent - Amazon Lex // wants to elicit user intent. For example, a user might utter an intent ("I want // to order a pizza"). If Amazon Lex cannot infer the user intent from this // utterance, it will return this dialogState. // // * ConfirmIntent - Amazon Lex is // expecting a "yes" or "no" response. For example, Amazon Lex wants user // confirmation before fulfilling an intent. Instead of a simple "yes" or "no," a // user might respond with additional information. For example, "yes, but make it // thick crust pizza" or "no, I want to order a drink". Amazon Lex can process such // additional information (in these examples, update the crust type slot value, or // change intent from OrderPizza to OrderDrink). // // * ElicitSlot - Amazon Lex is // expecting a slot value for the current intent. For example, suppose that in the // response Amazon Lex sends this message: "What size pizza would you like?". A // user might reply with the slot value (e.g., "medium"). The user might also // provide additional information in the response (e.g., "medium thick crust // pizza"). Amazon Lex can process such additional information appropriately. // // // * Fulfilled - Conveys that the Lambda function configured for the intent has // successfully fulfilled the intent. // // * ReadyForFulfillment - Conveys that the // client has to fulfill the intent. // // * Failed - Conveys that the conversation // with the user failed. This can happen for various reasons including that the // user did not provide an appropriate response to prompts from the service (you // can configure how many times Amazon Lex can prompt a user for specific // information), or the Lambda function failed to fulfill the intent. DialogState types.DialogState // The current user intent that Amazon Lex is aware of. IntentName *string // The message to convey to the user. The message can come from the bot's // configuration or from a Lambda function. If the intent is not configured with a // Lambda function, or if the Lambda function returned Delegate as the // dialogAction.type its response, Amazon Lex decides on the next course of action // and selects an appropriate message from the bot's configuration based on the // current interaction context. For example, if Amazon Lex isn't able to understand // user input, it uses a clarification prompt message. When you create an intent // you can assign messages to groups. When messages are assigned to groups Amazon // Lex returns one message from each group in the response. The message field is an // escaped JSON string containing the messages. For more information about the // structure of the JSON string returned, see msg-prompts-formats. If the Lambda // function returns a message, Amazon Lex passes it to the client in its response. Message *string // The format of the response message. One of the following values: // // * // PlainText - The message contains plain UTF-8 text. // // * CustomPayload - The // message is a custom format defined by the Lambda function. // // * SSML - The // message contains text formatted for voice output. // // * Composite - The message // contains an escaped JSON object containing one or more messages from the groups // that messages were assigned to when the intent was created. MessageFormat types.MessageFormatType // Provides a score that indicates how confident Amazon Lex is that the returned // intent is the one that matches the user's intent. The score is between 0.0 and // 1.0. For more information, see Confidence Scores // (https://docs.aws.amazon.com/lex/latest/dg/confidence-scores.html). The score is // a relative score, not an absolute score. The score may change based on // improvements to the Amazon Lex natural language understanding (NLU) model. NluIntentConfidence *types.IntentConfidence // Represents the options that the user has to respond to the current prompt. // Response Card can come from the bot configuration (in the Amazon Lex console, // choose the settings button next to a slot) or from a code hook (Lambda // function). ResponseCard *types.ResponseCard // The sentiment expressed in and utterance. When the bot is configured to send // utterances to Amazon Comprehend for sentiment analysis, this field contains the // result of the analysis. SentimentResponse *types.SentimentResponse // A map of key-value pairs representing the session-specific context information. SessionAttributes map[string]*string // A unique identifier for the session. SessionId *string // If the dialogState value is ElicitSlot, returns the name of the slot for which // Amazon Lex is eliciting a value. SlotToElicit *string // The intent slots that Amazon Lex detected from the user input in the // conversation. Amazon Lex creates a resolution list containing likely values for // a slot. The value that it returns is determined by the valueSelectionStrategy // selected when the slot type was created or updated. If valueSelectionStrategy is // set to ORIGINAL_VALUE, the value provided by the user is returned, if the user // value is similar to the slot values. If valueSelectionStrategy is set to // TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if // there is no resolution list, null. If you don't specify a // valueSelectionStrategy, the default is ORIGINAL_VALUE. Slots map[string]*string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationPostTextMiddlewares(stack *middleware.Stack, options Options) (err error)
func newServiceMetadataMiddleware_opPostText(region string) awsmiddleware.RegisterServiceMetadata { return awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "lex", OperationName: "PostText", } }
{ err = stack.Serialize.Add(&awsRestjson1_serializeOpPostText{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpPostText{}, middleware.After) if err != nil { return err } awsmiddleware.AddRequestInvocationIDMiddleware(stack) smithyhttp.AddContentLengthMiddleware(stack) addResolveEndpointMiddleware(stack, options) v4.AddComputePayloadSHA256Middleware(stack) addRetryMiddlewares(stack, options) addHTTPSignerV4Middleware(stack, options) awsmiddleware.AddAttemptClockSkewMiddleware(stack) addClientUserAgent(stack) smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) smithyhttp.AddCloseResponseBodyMiddleware(stack) addOpPostTextValidationMiddleware(stack) stack.Initialize.Add(newServiceMetadataMiddleware_opPostText(options.Region), middleware.Before) addRequestIDRetrieverMiddleware(stack) addResponseErrorMiddleware(stack) return nil }
identifier_body
api_op_PostText.go
// Code generated by smithy-go-codegen DO NOT EDIT. package lexruntimeservice import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/lexruntimeservice/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Sends user input to Amazon Lex. Client applications can use this API to send // requests to Amazon Lex at runtime. Amazon Lex then interprets the user input // using the machine learning model it built for the bot. In response, Amazon Lex // returns the next message to convey to the user an optional responseCard to // display. Consider the following example messages: // // * For a user input "I // would like a pizza", Amazon Lex might return a response with a message eliciting // slot data (for example, PizzaSize): "What size pizza would you like?" // // * // After the user provides all of the pizza order information, Amazon Lex might // return a response with a message to obtain user confirmation "Proceed with the // pizza order?". // // * After the user replies to a confirmation prompt with a // "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese // pizza has been ordered.". // // Not all Amazon Lex messages require a user response. // For example, a conclusion statement does not require a response. Some messages // require only a "yes" or "no" user response. In addition to the message, Amazon // Lex provides additional context about the message in the response that you might // use to enhance client behavior, for example, to display the appropriate client // user interface. These are the slotToElicit, dialogState, intentName, and slots // fields in the response. Consider the following examples: // // * If the message // is to elicit slot data, Amazon Lex returns the following context information: // // // * dialogState set to ElicitSlot // // * intentName set to the intent name in // the current context // // * slotToElicit set to the slot name for which the // message is eliciting information // // * slots set to a map of slots, // configured for the intent, with currently known values // // * If the message is // a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit // is set to null. // // * If the message is a clarification prompt (configured for // the intent) that indicates that user intent is not understood, the dialogState // is set to ElicitIntent and slotToElicit is set to null. // // In addition, Amazon Lex // also returns your application-specific sessionAttributes. For more information, // see Managing Conversation Context // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html). func (c *Client) PostText(ctx context.Context, params *PostTextInput, optFns ...func(*Options)) (*PostTextOutput, error) { if params == nil { params = &PostTextInput{} } result, metadata, err := c.invokeOperation(ctx, "PostText", params, optFns, addOperationPostTextMiddlewares) if err != nil { return nil, err } out := result.(*PostTextOutput) out.ResultMetadata = metadata return out, nil } type PostTextInput struct { // The alias of the Amazon Lex bot. // // This member is required. BotAlias *string // The name of the Amazon Lex bot. // // This member is required. BotName *string // The text that the user entered (Amazon Lex interprets this text). // // This member is required. InputText *string // The ID of the client application user. Amazon Lex uses this to identify a user's // conversation with your bot. At runtime, each request must contain the userID // field. To decide the user ID to use for your application, consider the following // factors. // // * The userID field must not contain any personally identifiable // information of the user, for example, name, personal identification numbers, or // other end user personal information. // // * If you want a user to start a // conversation on one device and continue on another device, use a user-specific // identifier. // // * If you want the same user to be able to have two independent // conversations on two different devices, choose a device-specific identifier. // // // * A user can't have two independent conversations with two different versions of // the same bot. For example, a user can't have a conversation with the PROD and // BETA versions of the same bot. If you anticipate that a user will need to have // conversation with two different versions, for example, while testing, include // the bot alias in the user ID to separate the two conversations. // // This member is required. UserId *string // Request-specific information passed between Amazon Lex and a client application. // The namespace x-amz-lex: is reserved for special attributes. Don't create any // request attributes with the prefix x-amz-lex:. For more information, see Setting // Request Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs). RequestAttributes map[string]*string // Application-specific information passed between Amazon Lex and a client // application. For more information, see Setting Session Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs). SessionAttributes map[string]*string } type PostTextOutput struct { // One to four alternative intents that may be applicable to the user's intent. // Each alternative includes a score that indicates how confident Amazon Lex is // that the intent matches the user's intent. The intents are sorted by the // confidence score. AlternativeIntents []*types.PredictedIntent // The version of the bot that responded to the conversation. You can use this // information to help determine if one version of a bot is performing better than // another version. If you have enabled the new natural language understanding // (NLU) model, you can use this to determine if the improvement is due to changes // to the bot or changes to the NLU. For more information about enabling the new // NLU, see the enableModelImprovements // (https://docs.aws.amazon.com/lex/latest/dg/API_PutBot.html#lex-PutBot-request-enableModelImprovements) // parameter of the PutBot operation. BotVersion *string // Identifies the current state of the user interaction. Amazon Lex returns one of // the following values as dialogState. The client can optionally use this // information to customize the user interface. // // * ElicitIntent - Amazon Lex
// expecting a "yes" or "no" response. For example, Amazon Lex wants user // confirmation before fulfilling an intent. Instead of a simple "yes" or "no," a // user might respond with additional information. For example, "yes, but make it // thick crust pizza" or "no, I want to order a drink". Amazon Lex can process such // additional information (in these examples, update the crust type slot value, or // change intent from OrderPizza to OrderDrink). // // * ElicitSlot - Amazon Lex is // expecting a slot value for the current intent. For example, suppose that in the // response Amazon Lex sends this message: "What size pizza would you like?". A // user might reply with the slot value (e.g., "medium"). The user might also // provide additional information in the response (e.g., "medium thick crust // pizza"). Amazon Lex can process such additional information appropriately. // // // * Fulfilled - Conveys that the Lambda function configured for the intent has // successfully fulfilled the intent. // // * ReadyForFulfillment - Conveys that the // client has to fulfill the intent. // // * Failed - Conveys that the conversation // with the user failed. This can happen for various reasons including that the // user did not provide an appropriate response to prompts from the service (you // can configure how many times Amazon Lex can prompt a user for specific // information), or the Lambda function failed to fulfill the intent. DialogState types.DialogState // The current user intent that Amazon Lex is aware of. IntentName *string // The message to convey to the user. The message can come from the bot's // configuration or from a Lambda function. If the intent is not configured with a // Lambda function, or if the Lambda function returned Delegate as the // dialogAction.type its response, Amazon Lex decides on the next course of action // and selects an appropriate message from the bot's configuration based on the // current interaction context. For example, if Amazon Lex isn't able to understand // user input, it uses a clarification prompt message. When you create an intent // you can assign messages to groups. When messages are assigned to groups Amazon // Lex returns one message from each group in the response. The message field is an // escaped JSON string containing the messages. For more information about the // structure of the JSON string returned, see msg-prompts-formats. If the Lambda // function returns a message, Amazon Lex passes it to the client in its response. Message *string // The format of the response message. One of the following values: // // * // PlainText - The message contains plain UTF-8 text. // // * CustomPayload - The // message is a custom format defined by the Lambda function. // // * SSML - The // message contains text formatted for voice output. // // * Composite - The message // contains an escaped JSON object containing one or more messages from the groups // that messages were assigned to when the intent was created. MessageFormat types.MessageFormatType // Provides a score that indicates how confident Amazon Lex is that the returned // intent is the one that matches the user's intent. The score is between 0.0 and // 1.0. For more information, see Confidence Scores // (https://docs.aws.amazon.com/lex/latest/dg/confidence-scores.html). The score is // a relative score, not an absolute score. The score may change based on // improvements to the Amazon Lex natural language understanding (NLU) model. NluIntentConfidence *types.IntentConfidence // Represents the options that the user has to respond to the current prompt. // Response Card can come from the bot configuration (in the Amazon Lex console, // choose the settings button next to a slot) or from a code hook (Lambda // function). ResponseCard *types.ResponseCard // The sentiment expressed in and utterance. When the bot is configured to send // utterances to Amazon Comprehend for sentiment analysis, this field contains the // result of the analysis. SentimentResponse *types.SentimentResponse // A map of key-value pairs representing the session-specific context information. SessionAttributes map[string]*string // A unique identifier for the session. SessionId *string // If the dialogState value is ElicitSlot, returns the name of the slot for which // Amazon Lex is eliciting a value. SlotToElicit *string // The intent slots that Amazon Lex detected from the user input in the // conversation. Amazon Lex creates a resolution list containing likely values for // a slot. The value that it returns is determined by the valueSelectionStrategy // selected when the slot type was created or updated. If valueSelectionStrategy is // set to ORIGINAL_VALUE, the value provided by the user is returned, if the user // value is similar to the slot values. If valueSelectionStrategy is set to // TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if // there is no resolution list, null. If you don't specify a // valueSelectionStrategy, the default is ORIGINAL_VALUE. Slots map[string]*string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationPostTextMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpPostText{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpPostText{}, middleware.After) if err != nil { return err } awsmiddleware.AddRequestInvocationIDMiddleware(stack) smithyhttp.AddContentLengthMiddleware(stack) addResolveEndpointMiddleware(stack, options) v4.AddComputePayloadSHA256Middleware(stack) addRetryMiddlewares(stack, options) addHTTPSignerV4Middleware(stack, options) awsmiddleware.AddAttemptClockSkewMiddleware(stack) addClientUserAgent(stack) smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) smithyhttp.AddCloseResponseBodyMiddleware(stack) addOpPostTextValidationMiddleware(stack) stack.Initialize.Add(newServiceMetadataMiddleware_opPostText(options.Region), middleware.Before) addRequestIDRetrieverMiddleware(stack) addResponseErrorMiddleware(stack) return nil } func newServiceMetadataMiddleware_opPostText(region string) awsmiddleware.RegisterServiceMetadata { return awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "lex", OperationName: "PostText", } }
// wants to elicit user intent. For example, a user might utter an intent ("I want // to order a pizza"). If Amazon Lex cannot infer the user intent from this // utterance, it will return this dialogState. // // * ConfirmIntent - Amazon Lex is
random_line_split
api_op_PostText.go
// Code generated by smithy-go-codegen DO NOT EDIT. package lexruntimeservice import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/lexruntimeservice/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Sends user input to Amazon Lex. Client applications can use this API to send // requests to Amazon Lex at runtime. Amazon Lex then interprets the user input // using the machine learning model it built for the bot. In response, Amazon Lex // returns the next message to convey to the user an optional responseCard to // display. Consider the following example messages: // // * For a user input "I // would like a pizza", Amazon Lex might return a response with a message eliciting // slot data (for example, PizzaSize): "What size pizza would you like?" // // * // After the user provides all of the pizza order information, Amazon Lex might // return a response with a message to obtain user confirmation "Proceed with the // pizza order?". // // * After the user replies to a confirmation prompt with a // "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese // pizza has been ordered.". // // Not all Amazon Lex messages require a user response. // For example, a conclusion statement does not require a response. Some messages // require only a "yes" or "no" user response. In addition to the message, Amazon // Lex provides additional context about the message in the response that you might // use to enhance client behavior, for example, to display the appropriate client // user interface. These are the slotToElicit, dialogState, intentName, and slots // fields in the response. Consider the following examples: // // * If the message // is to elicit slot data, Amazon Lex returns the following context information: // // // * dialogState set to ElicitSlot // // * intentName set to the intent name in // the current context // // * slotToElicit set to the slot name for which the // message is eliciting information // // * slots set to a map of slots, // configured for the intent, with currently known values // // * If the message is // a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit // is set to null. // // * If the message is a clarification prompt (configured for // the intent) that indicates that user intent is not understood, the dialogState // is set to ElicitIntent and slotToElicit is set to null. // // In addition, Amazon Lex // also returns your application-specific sessionAttributes. For more information, // see Managing Conversation Context // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html). func (c *Client) PostText(ctx context.Context, params *PostTextInput, optFns ...func(*Options)) (*PostTextOutput, error) { if params == nil { params = &PostTextInput{} } result, metadata, err := c.invokeOperation(ctx, "PostText", params, optFns, addOperationPostTextMiddlewares) if err != nil { return nil, err } out := result.(*PostTextOutput) out.ResultMetadata = metadata return out, nil } type PostTextInput struct { // The alias of the Amazon Lex bot. // // This member is required. BotAlias *string // The name of the Amazon Lex bot. // // This member is required. BotName *string // The text that the user entered (Amazon Lex interprets this text). // // This member is required. InputText *string // The ID of the client application user. Amazon Lex uses this to identify a user's // conversation with your bot. At runtime, each request must contain the userID // field. To decide the user ID to use for your application, consider the following // factors. // // * The userID field must not contain any personally identifiable // information of the user, for example, name, personal identification numbers, or // other end user personal information. // // * If you want a user to start a // conversation on one device and continue on another device, use a user-specific // identifier. // // * If you want the same user to be able to have two independent // conversations on two different devices, choose a device-specific identifier. // // // * A user can't have two independent conversations with two different versions of // the same bot. For example, a user can't have a conversation with the PROD and // BETA versions of the same bot. If you anticipate that a user will need to have // conversation with two different versions, for example, while testing, include // the bot alias in the user ID to separate the two conversations. // // This member is required. UserId *string // Request-specific information passed between Amazon Lex and a client application. // The namespace x-amz-lex: is reserved for special attributes. Don't create any // request attributes with the prefix x-amz-lex:. For more information, see Setting // Request Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs). RequestAttributes map[string]*string // Application-specific information passed between Amazon Lex and a client // application. For more information, see Setting Session Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs). SessionAttributes map[string]*string } type PostTextOutput struct { // One to four alternative intents that may be applicable to the user's intent. // Each alternative includes a score that indicates how confident Amazon Lex is // that the intent matches the user's intent. The intents are sorted by the // confidence score. AlternativeIntents []*types.PredictedIntent // The version of the bot that responded to the conversation. You can use this // information to help determine if one version of a bot is performing better than // another version. If you have enabled the new natural language understanding // (NLU) model, you can use this to determine if the improvement is due to changes // to the bot or changes to the NLU. For more information about enabling the new // NLU, see the enableModelImprovements // (https://docs.aws.amazon.com/lex/latest/dg/API_PutBot.html#lex-PutBot-request-enableModelImprovements) // parameter of the PutBot operation. BotVersion *string // Identifies the current state of the user interaction. Amazon Lex returns one of // the following values as dialogState. The client can optionally use this // information to customize the user interface. // // * ElicitIntent - Amazon Lex // wants to elicit user intent. For example, a user might utter an intent ("I want // to order a pizza"). If Amazon Lex cannot infer the user intent from this // utterance, it will return this dialogState. // // * ConfirmIntent - Amazon Lex is // expecting a "yes" or "no" response. For example, Amazon Lex wants user // confirmation before fulfilling an intent. Instead of a simple "yes" or "no," a // user might respond with additional information. For example, "yes, but make it // thick crust pizza" or "no, I want to order a drink". Amazon Lex can process such // additional information (in these examples, update the crust type slot value, or // change intent from OrderPizza to OrderDrink). // // * ElicitSlot - Amazon Lex is // expecting a slot value for the current intent. For example, suppose that in the // response Amazon Lex sends this message: "What size pizza would you like?". A // user might reply with the slot value (e.g., "medium"). The user might also // provide additional information in the response (e.g., "medium thick crust // pizza"). Amazon Lex can process such additional information appropriately. // // // * Fulfilled - Conveys that the Lambda function configured for the intent has // successfully fulfilled the intent. // // * ReadyForFulfillment - Conveys that the // client has to fulfill the intent. // // * Failed - Conveys that the conversation // with the user failed. This can happen for various reasons including that the // user did not provide an appropriate response to prompts from the service (you // can configure how many times Amazon Lex can prompt a user for specific // information), or the Lambda function failed to fulfill the intent. DialogState types.DialogState // The current user intent that Amazon Lex is aware of. IntentName *string // The message to convey to the user. The message can come from the bot's // configuration or from a Lambda function. If the intent is not configured with a // Lambda function, or if the Lambda function returned Delegate as the // dialogAction.type its response, Amazon Lex decides on the next course of action // and selects an appropriate message from the bot's configuration based on the // current interaction context. For example, if Amazon Lex isn't able to understand // user input, it uses a clarification prompt message. When you create an intent // you can assign messages to groups. When messages are assigned to groups Amazon // Lex returns one message from each group in the response. The message field is an // escaped JSON string containing the messages. For more information about the // structure of the JSON string returned, see msg-prompts-formats. If the Lambda // function returns a message, Amazon Lex passes it to the client in its response. Message *string // The format of the response message. One of the following values: // // * // PlainText - The message contains plain UTF-8 text. // // * CustomPayload - The // message is a custom format defined by the Lambda function. // // * SSML - The // message contains text formatted for voice output. // // * Composite - The message // contains an escaped JSON object containing one or more messages from the groups // that messages were assigned to when the intent was created. MessageFormat types.MessageFormatType // Provides a score that indicates how confident Amazon Lex is that the returned // intent is the one that matches the user's intent. The score is between 0.0 and // 1.0. For more information, see Confidence Scores // (https://docs.aws.amazon.com/lex/latest/dg/confidence-scores.html). The score is // a relative score, not an absolute score. The score may change based on // improvements to the Amazon Lex natural language understanding (NLU) model. NluIntentConfidence *types.IntentConfidence // Represents the options that the user has to respond to the current prompt. // Response Card can come from the bot configuration (in the Amazon Lex console, // choose the settings button next to a slot) or from a code hook (Lambda // function). ResponseCard *types.ResponseCard // The sentiment expressed in and utterance. When the bot is configured to send // utterances to Amazon Comprehend for sentiment analysis, this field contains the // result of the analysis. SentimentResponse *types.SentimentResponse // A map of key-value pairs representing the session-specific context information. SessionAttributes map[string]*string // A unique identifier for the session. SessionId *string // If the dialogState value is ElicitSlot, returns the name of the slot for which // Amazon Lex is eliciting a value. SlotToElicit *string // The intent slots that Amazon Lex detected from the user input in the // conversation. Amazon Lex creates a resolution list containing likely values for // a slot. The value that it returns is determined by the valueSelectionStrategy // selected when the slot type was created or updated. If valueSelectionStrategy is // set to ORIGINAL_VALUE, the value provided by the user is returned, if the user // value is similar to the slot values. If valueSelectionStrategy is set to // TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if // there is no resolution list, null. If you don't specify a // valueSelectionStrategy, the default is ORIGINAL_VALUE. Slots map[string]*string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationPostTextMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpPostText{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpPostText{}, middleware.After) if err != nil { return err } awsmiddleware.AddRequestInvocationIDMiddleware(stack) smithyhttp.AddContentLengthMiddleware(stack) addResolveEndpointMiddleware(stack, options) v4.AddComputePayloadSHA256Middleware(stack) addRetryMiddlewares(stack, options) addHTTPSignerV4Middleware(stack, options) awsmiddleware.AddAttemptClockSkewMiddleware(stack) addClientUserAgent(stack) smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) smithyhttp.AddCloseResponseBodyMiddleware(stack) addOpPostTextValidationMiddleware(stack) stack.Initialize.Add(newServiceMetadataMiddleware_opPostText(options.Region), middleware.Before) addRequestIDRetrieverMiddleware(stack) addResponseErrorMiddleware(stack) return nil } func
(region string) awsmiddleware.RegisterServiceMetadata { return awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "lex", OperationName: "PostText", } }
newServiceMetadataMiddleware_opPostText
identifier_name
api_op_PostText.go
// Code generated by smithy-go-codegen DO NOT EDIT. package lexruntimeservice import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/lexruntimeservice/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Sends user input to Amazon Lex. Client applications can use this API to send // requests to Amazon Lex at runtime. Amazon Lex then interprets the user input // using the machine learning model it built for the bot. In response, Amazon Lex // returns the next message to convey to the user an optional responseCard to // display. Consider the following example messages: // // * For a user input "I // would like a pizza", Amazon Lex might return a response with a message eliciting // slot data (for example, PizzaSize): "What size pizza would you like?" // // * // After the user provides all of the pizza order information, Amazon Lex might // return a response with a message to obtain user confirmation "Proceed with the // pizza order?". // // * After the user replies to a confirmation prompt with a // "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese // pizza has been ordered.". // // Not all Amazon Lex messages require a user response. // For example, a conclusion statement does not require a response. Some messages // require only a "yes" or "no" user response. In addition to the message, Amazon // Lex provides additional context about the message in the response that you might // use to enhance client behavior, for example, to display the appropriate client // user interface. These are the slotToElicit, dialogState, intentName, and slots // fields in the response. Consider the following examples: // // * If the message // is to elicit slot data, Amazon Lex returns the following context information: // // // * dialogState set to ElicitSlot // // * intentName set to the intent name in // the current context // // * slotToElicit set to the slot name for which the // message is eliciting information // // * slots set to a map of slots, // configured for the intent, with currently known values // // * If the message is // a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit // is set to null. // // * If the message is a clarification prompt (configured for // the intent) that indicates that user intent is not understood, the dialogState // is set to ElicitIntent and slotToElicit is set to null. // // In addition, Amazon Lex // also returns your application-specific sessionAttributes. For more information, // see Managing Conversation Context // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html). func (c *Client) PostText(ctx context.Context, params *PostTextInput, optFns ...func(*Options)) (*PostTextOutput, error) { if params == nil { params = &PostTextInput{} } result, metadata, err := c.invokeOperation(ctx, "PostText", params, optFns, addOperationPostTextMiddlewares) if err != nil
out := result.(*PostTextOutput) out.ResultMetadata = metadata return out, nil } type PostTextInput struct { // The alias of the Amazon Lex bot. // // This member is required. BotAlias *string // The name of the Amazon Lex bot. // // This member is required. BotName *string // The text that the user entered (Amazon Lex interprets this text). // // This member is required. InputText *string // The ID of the client application user. Amazon Lex uses this to identify a user's // conversation with your bot. At runtime, each request must contain the userID // field. To decide the user ID to use for your application, consider the following // factors. // // * The userID field must not contain any personally identifiable // information of the user, for example, name, personal identification numbers, or // other end user personal information. // // * If you want a user to start a // conversation on one device and continue on another device, use a user-specific // identifier. // // * If you want the same user to be able to have two independent // conversations on two different devices, choose a device-specific identifier. // // // * A user can't have two independent conversations with two different versions of // the same bot. For example, a user can't have a conversation with the PROD and // BETA versions of the same bot. If you anticipate that a user will need to have // conversation with two different versions, for example, while testing, include // the bot alias in the user ID to separate the two conversations. // // This member is required. UserId *string // Request-specific information passed between Amazon Lex and a client application. // The namespace x-amz-lex: is reserved for special attributes. Don't create any // request attributes with the prefix x-amz-lex:. For more information, see Setting // Request Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs). RequestAttributes map[string]*string // Application-specific information passed between Amazon Lex and a client // application. For more information, see Setting Session Attributes // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs). SessionAttributes map[string]*string } type PostTextOutput struct { // One to four alternative intents that may be applicable to the user's intent. // Each alternative includes a score that indicates how confident Amazon Lex is // that the intent matches the user's intent. The intents are sorted by the // confidence score. AlternativeIntents []*types.PredictedIntent // The version of the bot that responded to the conversation. You can use this // information to help determine if one version of a bot is performing better than // another version. If you have enabled the new natural language understanding // (NLU) model, you can use this to determine if the improvement is due to changes // to the bot or changes to the NLU. For more information about enabling the new // NLU, see the enableModelImprovements // (https://docs.aws.amazon.com/lex/latest/dg/API_PutBot.html#lex-PutBot-request-enableModelImprovements) // parameter of the PutBot operation. BotVersion *string // Identifies the current state of the user interaction. Amazon Lex returns one of // the following values as dialogState. The client can optionally use this // information to customize the user interface. // // * ElicitIntent - Amazon Lex // wants to elicit user intent. For example, a user might utter an intent ("I want // to order a pizza"). If Amazon Lex cannot infer the user intent from this // utterance, it will return this dialogState. // // * ConfirmIntent - Amazon Lex is // expecting a "yes" or "no" response. For example, Amazon Lex wants user // confirmation before fulfilling an intent. Instead of a simple "yes" or "no," a // user might respond with additional information. For example, "yes, but make it // thick crust pizza" or "no, I want to order a drink". Amazon Lex can process such // additional information (in these examples, update the crust type slot value, or // change intent from OrderPizza to OrderDrink). // // * ElicitSlot - Amazon Lex is // expecting a slot value for the current intent. For example, suppose that in the // response Amazon Lex sends this message: "What size pizza would you like?". A // user might reply with the slot value (e.g., "medium"). The user might also // provide additional information in the response (e.g., "medium thick crust // pizza"). Amazon Lex can process such additional information appropriately. // // // * Fulfilled - Conveys that the Lambda function configured for the intent has // successfully fulfilled the intent. // // * ReadyForFulfillment - Conveys that the // client has to fulfill the intent. // // * Failed - Conveys that the conversation // with the user failed. This can happen for various reasons including that the // user did not provide an appropriate response to prompts from the service (you // can configure how many times Amazon Lex can prompt a user for specific // information), or the Lambda function failed to fulfill the intent. DialogState types.DialogState // The current user intent that Amazon Lex is aware of. IntentName *string // The message to convey to the user. The message can come from the bot's // configuration or from a Lambda function. If the intent is not configured with a // Lambda function, or if the Lambda function returned Delegate as the // dialogAction.type its response, Amazon Lex decides on the next course of action // and selects an appropriate message from the bot's configuration based on the // current interaction context. For example, if Amazon Lex isn't able to understand // user input, it uses a clarification prompt message. When you create an intent // you can assign messages to groups. When messages are assigned to groups Amazon // Lex returns one message from each group in the response. The message field is an // escaped JSON string containing the messages. For more information about the // structure of the JSON string returned, see msg-prompts-formats. If the Lambda // function returns a message, Amazon Lex passes it to the client in its response. Message *string // The format of the response message. One of the following values: // // * // PlainText - The message contains plain UTF-8 text. // // * CustomPayload - The // message is a custom format defined by the Lambda function. // // * SSML - The // message contains text formatted for voice output. // // * Composite - The message // contains an escaped JSON object containing one or more messages from the groups // that messages were assigned to when the intent was created. MessageFormat types.MessageFormatType // Provides a score that indicates how confident Amazon Lex is that the returned // intent is the one that matches the user's intent. The score is between 0.0 and // 1.0. For more information, see Confidence Scores // (https://docs.aws.amazon.com/lex/latest/dg/confidence-scores.html). The score is // a relative score, not an absolute score. The score may change based on // improvements to the Amazon Lex natural language understanding (NLU) model. NluIntentConfidence *types.IntentConfidence // Represents the options that the user has to respond to the current prompt. // Response Card can come from the bot configuration (in the Amazon Lex console, // choose the settings button next to a slot) or from a code hook (Lambda // function). ResponseCard *types.ResponseCard // The sentiment expressed in and utterance. When the bot is configured to send // utterances to Amazon Comprehend for sentiment analysis, this field contains the // result of the analysis. SentimentResponse *types.SentimentResponse // A map of key-value pairs representing the session-specific context information. SessionAttributes map[string]*string // A unique identifier for the session. SessionId *string // If the dialogState value is ElicitSlot, returns the name of the slot for which // Amazon Lex is eliciting a value. SlotToElicit *string // The intent slots that Amazon Lex detected from the user input in the // conversation. Amazon Lex creates a resolution list containing likely values for // a slot. The value that it returns is determined by the valueSelectionStrategy // selected when the slot type was created or updated. If valueSelectionStrategy is // set to ORIGINAL_VALUE, the value provided by the user is returned, if the user // value is similar to the slot values. If valueSelectionStrategy is set to // TOP_RESOLUTION Amazon Lex returns the first value in the resolution list or, if // there is no resolution list, null. If you don't specify a // valueSelectionStrategy, the default is ORIGINAL_VALUE. Slots map[string]*string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationPostTextMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpPostText{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpPostText{}, middleware.After) if err != nil { return err } awsmiddleware.AddRequestInvocationIDMiddleware(stack) smithyhttp.AddContentLengthMiddleware(stack) addResolveEndpointMiddleware(stack, options) v4.AddComputePayloadSHA256Middleware(stack) addRetryMiddlewares(stack, options) addHTTPSignerV4Middleware(stack, options) awsmiddleware.AddAttemptClockSkewMiddleware(stack) addClientUserAgent(stack) smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) smithyhttp.AddCloseResponseBodyMiddleware(stack) addOpPostTextValidationMiddleware(stack) stack.Initialize.Add(newServiceMetadataMiddleware_opPostText(options.Region), middleware.Before) addRequestIDRetrieverMiddleware(stack) addResponseErrorMiddleware(stack) return nil } func newServiceMetadataMiddleware_opPostText(region string) awsmiddleware.RegisterServiceMetadata { return awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "lex", OperationName: "PostText", } }
{ return nil, err }
conditional_block
controller.py
#!/usr/bin/env python #-*- coding: utf-8 -*- """ __author__ = "Kevin Bedin, Clement Bichat, Aurelien Grenier" __version__ = "1.0.1" __date__ = "2020-01-20" __status__ = "Development" """ """ Contexte : - Ulysse Unmaned Surface Vehicule - Utilisation avec ROS et le package Mavros Objectif : - Lors de la réception d'un warning sur le topic /warning ,une consigne de waypoint est envoyé dans le service /mavros/mission/set_current afin d'effectuer à nouveau la fauchée. - Diagnostique Documentation : - http://wiki.ros.org/mavros_msgs """ import rospy import rospkg from std_msgs.msg import Int16 from sensor_msgs.msg import BatteryState from mavros_msgs.msg import WaypointList,State from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue from mavros_msgs.msg import State from mavros_msgs.msg import WaypointList import mavros_msgs.srv import threading class Controller(object): def __init__(self,battery_min=20): self.diag_pub = rospy.Publisher("/diagnostics",DiagnosticArray,queue_size=1) self.nav_pub = rospy.Publisher("/navigation/line_status",KeyValue,queue_size=1) self.battery_sub = rospy.Subscriber("/mavros/battery",BatteryState,self.battery_callback) self.state_sub=rospy.Subscriber("/mavros/state",State,self.state_callback) self.waypoints_sub = rospy.Subscriber("/mavros/mission/waypoints",WaypointList,self.waypoints_callback) self.warning = rospy.Subscriber ("/warning", Int16, self.warning_callback ) self.waypoints_list = [] self.last_waypoint_list = [] self.warning_list = [] self.last_waypoint_dictionnary = {} self.nbr_reg = 0 # Nombre de lignes traversières self.nbr_trav = 0 # Nombre de lignes régulières self.battery_min = battery_min self.wp_enregistre = 0 self.wp_number = 0 self.current_wp = 0 self.last_waypoint = 0 self.waypoint_warning = 0 self.arming="Disarmed" #Armed or Disarmed self.mode="HOLD" #HOLD, LOITER, AUTO, MANUAL def battery_callback(self,data): """ Callback de lecture de la tension de la batterie Entrée : data : sensor_msgs BatteryState Message """ diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=(data.voltage<self.battery_min),name="controller/battery_level/Level", message="{0:.2f}".format(data.voltage))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) #voltage/current/charge/capacity/design_capacity #percentage.... #more informations : http://docs.ros.org/melodic/api/sensor_msgs/html/msg/BatteryState.html def state_callback(self,data): """ Callback de lecture de l'état du drone : - Armed ou Disarmed et Mode (Hold, Manual, Loiter...) - Publication de diagnostics Entrée : data : mavros_msgs State Message """ self.arming=data.armed*"Armed"+(1-data.armed)*"Disarmed" self.mode=data.mode diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def waypoints_callback(self,data): """ Callback appelé à chaque passage de waypoint : - Détection de début et fin de ligne + publication sur /navigation/line_status - Enregistrement des waypoints de fin de lignes - Enregistrement dans un dictionnaire à chaque nouvelle ligne de la type de ligne et du numéro du premier waypoint - Publication de diagnostics Entrée : data : mavros_msgs Waypoint - data.waypoints : liste des waypoints de la mission - data.current_seq : waypoint courant """ #----- Callback appelé à chaque changement de waypoints #rospy.logwarn("Changement de waypoint") rospy.loginfo("Nouveau waypoint :" + str(data.current_seq)) rospy.loginfo("Liste des waypoints enregistrés" + str(self.last_waypoint_list)) self.wp_number=len(data.waypoints) self.waypoints_list = data.waypoints #------------ Fin de ligne ---------------------- if data.waypoints[data.current_seq].param1 == 1: if data.waypoints[self.current_wp].param1 == 0: #------------- Enregistrement du dernier waypoint de la ligne self.last_waypoint = self.current_wp rospy.loginfo("Fin de ligne") rospy.loginfo("Waypoint enregistré de fin de ligne "+str(self.last_waypoint)) self.last_waypoint_list.append(self.last_waypoint) nav_msg = KeyValue() nav_msg.key = "End" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" else: nav_msg.value = "Trav" self.nav_pub.publish(nav_msg) #----------Debut de ligne ---------------------- elif data.waypoints[data.current_seq].param1 == 0: if data.waypoints[self.current_wp].param1 == 1: rospy.loginfo("Début de ligne") nav_msg = KeyValue() nav_msg.key = "Start" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" self.nbr_reg += 1 ligne = "Reg" + str(self.nbr_reg) else: nav_msg.value = "Trav" self.nbr_trav += 1 ligne = "Trav" + str(self.nbr_trav) self.last_waypoint_dictionnary[ligne] = data.current_seq self.nav_pub.publish(nav_msg) self.current_wp = data.current_seq #rospy.loginfo("dictionnaire : " + str(self.last_waypoint_dictionnary)) #---------Pubication des diagnostiques----------- diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def warning_callback (self
""" Callback appelé lors de la réception d'un msg sur /warning - Appel des fonctions wpt_chang() et stop() Entrée : data : Int16 """ self.warning = data.data rospy.logwarn("Warning, type" + str(self.warning)) if (self.warning != -1): self.waypoint_warning = self.current_wp now = rospy.get_rostime() rospy.logwarn("Warning : Retour en arrière, nouveau Waypoint :" + str(self.last_waypoint)) self.wpt_chang() elif (self.warning == -1): rospy.logwarn("Mode Loiter activé") self.stop() def wpt_chang(self): """ Fonction lors de la réception d'un warning publie la consigne du wpt de correction sur le service /mavros/mission/set_current. Le wpt de correction est le dernier de la ligne précedente. Mise à zéro de la variable warning """ if self.waypoints_list[self.waypoint_warning].param1 == 0 : if len(self.last_waypoint_list) == 0: #----- Warning dans la première ligne droite : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en ligne droite : on retourne au début du virage précédent self.wpt_correction = self.last_waypoint_list[-1] else : if len(self.last_waypoint_list) == 1: #----- Warning dans le premier virage : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en virage : on retourne au debut du virage précédent self.wpt_correction = self.last_waypoint_list[len(self.last_waypoint_list)-2] rospy.wait_for_service('/mavros/mission/set_current') try: current_wpt = rospy.ServiceProxy('/mavros/mission/set_current', mavros_msgs.srv.WaypointSetCurrent) wpt = current_wpt(wp_seq = (self.wpt_correction)) rospy.loginfo("Nouveau waypoint de correction :" + str(self.wpt_correction)) self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") def stop(self): """ Fonction lors de la réception d'un warning publie la consigne de mise en mode stationnaire sur le service /mavros/set_mode. """ rospy.wait_for_service('/mavros/set_mode') # timeout ? try: mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode) mode_ = mode(custom_mode = "HOLD") rospy.loginfo("Mode HOLD activé") self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") # rosservice call /mavros/set_mode "base_mode: 0 # custom_mode: 'HOLD'" def diagnostic_publisher(self): """ Fonction de publication des diagnostiques : - current_wpt - Etat - ... Lancement dans un thread pour avoir une publication régulière """ diagnostics=DiagnosticArray() while not(rospy.is_shutdown()): diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) rospy.sleep(0.5) if __name__=='__main__': try: rospy.init_node("Controller", anonymous=False) controller=Controller(battery_min=12) rate=rospy.Rate(1) #----- Publication des messages des diagnostiques régulièrement diag_thread=threading.Thread(target=controller.diagnostic_publisher) diag_thread.start() while not rospy.is_shutdown(): rospy.spin() rate.sleep() except rospy.ROSInterruptException: pass
, data):
identifier_name
controller.py
#!/usr/bin/env python #-*- coding: utf-8 -*- """ __author__ = "Kevin Bedin, Clement Bichat, Aurelien Grenier" __version__ = "1.0.1" __date__ = "2020-01-20" __status__ = "Development" """ """ Contexte : - Ulysse Unmaned Surface Vehicule - Utilisation avec ROS et le package Mavros Objectif : - Lors de la réception d'un warning sur le topic /warning ,une consigne de waypoint est envoyé dans le service /mavros/mission/set_current afin d'effectuer à nouveau la fauchée. - Diagnostique Documentation : - http://wiki.ros.org/mavros_msgs """ import rospy import rospkg from std_msgs.msg import Int16 from sensor_msgs.msg import BatteryState from mavros_msgs.msg import WaypointList,State from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue from mavros_msgs.msg import State from mavros_msgs.msg import WaypointList import mavros_msgs.srv import threading class Controller(object): def __init__(self,battery_min=20): self.diag_pub = rospy.Publisher("/diagnostics",DiagnosticArray,queue_size=1) self.nav_pub = rospy.Publisher("/navigation/line_status",KeyValue,queue_size=1) self.battery_sub = rospy.Subscriber("/mavros/battery",BatteryState,self.battery_callback) self.state_sub=rospy.Subscriber("/mavros/state",State,self.state_callback) self.waypoints_sub = rospy.Subscriber("/mavros/mission/waypoints",WaypointList,self.waypoints_callback) self.warning = rospy.Subscriber ("/warning", Int16, self.warning_callback ) self.waypoints_list = [] self.last_waypoint_list = [] self.warning_list = [] self.last_waypoint_dictionnary = {} self.nbr_reg = 0 # Nombre de lignes traversières self.nbr_trav = 0 # Nombre de lignes régulières self.battery_min = battery_min self.wp_enregistre = 0 self.wp_number = 0 self.current_wp = 0 self.last_waypoint = 0 self.waypoint_warning = 0 self.arming="Disarmed" #Armed or Disarmed self.mode="HOLD" #HOLD, LOITER, AUTO, MANUAL def battery_callback(self,data): """ Callback de lecture de la tension de la batterie Entrée : data : sensor_msgs BatteryState Message """ diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=(data.voltage<self.battery_min),name="controller/battery_level/Level", message="{0:.2f}".format(data.voltage))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) #voltage/current/charge/capacity/design_capacity #percentage.... #more informations : http://docs.ros.org/melodic/api/sensor_msgs/html/msg/BatteryState.html def state_callback(self,data): """ Callback de lecture de l'état du drone : - Armed ou Disarmed et Mode (Hold, Manual, Loiter...) - Publication de diagnostics Entrée : data : mavros_msgs State Message """ self.arming=data.armed*"Armed"+(1-data.armed)*"Disarmed" self.mode=data.mode diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def waypoints_callback(self,data): """ Callback appelé à chaque passage de waypoint : - Détection de début et fin de ligne + publication sur /navigation/line_status - Enregistrement des waypoints de fin de lignes - Enregistrement dans un dictionnaire à chaque nouvelle ligne de la type de ligne et du numéro du premier waypoint - Publication de diagnostics Entrée : data : mavros_msgs Waypoint - data.waypoints : liste des waypoints de la mission - data.current_seq : waypoint courant """ #----- Callback appelé à chaque changement de waypoints #rospy.logwarn("Changement de waypoint") rospy.loginfo("Nouveau waypoint :" + str(data.current_seq)) rospy.loginfo("Liste des waypoints enregistrés" + str(self.last_waypoint_list)) self.wp_number=len(data.waypoints) self.waypoints_list = data.waypoints #------------ Fin de ligne ---------------------- if data.waypoints[data.current_seq].param1 == 1: if data.waypoints[self.current_wp].param1 == 0: #------------- Enregistrement du dernier waypoint de la ligne self.last_waypoint = self.current_wp rospy.loginfo("Fin de ligne") rospy.loginfo("Waypoint enregistré de fin de ligne "+str(self.last_waypoint)) self.last_waypoint_list.append(self.last_waypoint) nav_msg = KeyValue() nav_msg.key = "End" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg"
: nav_msg.value = "Trav" self.nav_pub.publish(nav_msg) #----------Debut de ligne ---------------------- elif data.waypoints[data.current_seq].param1 == 0: if data.waypoints[self.current_wp].param1 == 1: rospy.loginfo("Début de ligne") nav_msg = KeyValue() nav_msg.key = "Start" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" self.nbr_reg += 1 ligne = "Reg" + str(self.nbr_reg) else: nav_msg.value = "Trav" self.nbr_trav += 1 ligne = "Trav" + str(self.nbr_trav) self.last_waypoint_dictionnary[ligne] = data.current_seq self.nav_pub.publish(nav_msg) self.current_wp = data.current_seq #rospy.loginfo("dictionnaire : " + str(self.last_waypoint_dictionnary)) #---------Pubication des diagnostiques----------- diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def warning_callback (self, data): """ Callback appelé lors de la réception d'un msg sur /warning - Appel des fonctions wpt_chang() et stop() Entrée : data : Int16 """ self.warning = data.data rospy.logwarn("Warning, type" + str(self.warning)) if (self.warning != -1): self.waypoint_warning = self.current_wp now = rospy.get_rostime() rospy.logwarn("Warning : Retour en arrière, nouveau Waypoint :" + str(self.last_waypoint)) self.wpt_chang() elif (self.warning == -1): rospy.logwarn("Mode Loiter activé") self.stop() def wpt_chang(self): """ Fonction lors de la réception d'un warning publie la consigne du wpt de correction sur le service /mavros/mission/set_current. Le wpt de correction est le dernier de la ligne précedente. Mise à zéro de la variable warning """ if self.waypoints_list[self.waypoint_warning].param1 == 0 : if len(self.last_waypoint_list) == 0: #----- Warning dans la première ligne droite : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en ligne droite : on retourne au début du virage précédent self.wpt_correction = self.last_waypoint_list[-1] else : if len(self.last_waypoint_list) == 1: #----- Warning dans le premier virage : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en virage : on retourne au debut du virage précédent self.wpt_correction = self.last_waypoint_list[len(self.last_waypoint_list)-2] rospy.wait_for_service('/mavros/mission/set_current') try: current_wpt = rospy.ServiceProxy('/mavros/mission/set_current', mavros_msgs.srv.WaypointSetCurrent) wpt = current_wpt(wp_seq = (self.wpt_correction)) rospy.loginfo("Nouveau waypoint de correction :" + str(self.wpt_correction)) self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") def stop(self): """ Fonction lors de la réception d'un warning publie la consigne de mise en mode stationnaire sur le service /mavros/set_mode. """ rospy.wait_for_service('/mavros/set_mode') # timeout ? try: mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode) mode_ = mode(custom_mode = "HOLD") rospy.loginfo("Mode HOLD activé") self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") # rosservice call /mavros/set_mode "base_mode: 0 # custom_mode: 'HOLD'" def diagnostic_publisher(self): """ Fonction de publication des diagnostiques : - current_wpt - Etat - ... Lancement dans un thread pour avoir une publication régulière """ diagnostics=DiagnosticArray() while not(rospy.is_shutdown()): diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) rospy.sleep(0.5) if __name__=='__main__': try: rospy.init_node("Controller", anonymous=False) controller=Controller(battery_min=12) rate=rospy.Rate(1) #----- Publication des messages des diagnostiques régulièrement diag_thread=threading.Thread(target=controller.diagnostic_publisher) diag_thread.start() while not rospy.is_shutdown(): rospy.spin() rate.sleep() except rospy.ROSInterruptException: pass
else
conditional_block
controller.py
#!/usr/bin/env python #-*- coding: utf-8 -*- """ __author__ = "Kevin Bedin, Clement Bichat, Aurelien Grenier" __version__ = "1.0.1" __date__ = "2020-01-20" __status__ = "Development" """ """ Contexte : - Ulysse Unmaned Surface Vehicule - Utilisation avec ROS et le package Mavros Objectif : - Lors de la réception d'un warning sur le topic /warning ,une consigne de waypoint est envoyé dans le service /mavros/mission/set_current afin d'effectuer à nouveau la fauchée. - Diagnostique Documentation : - http://wiki.ros.org/mavros_msgs """ import rospy import rospkg from std_msgs.msg import Int16 from sensor_msgs.msg import BatteryState from mavros_msgs.msg import WaypointList,State from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue from mavros_msgs.msg import State from mavros_msgs.msg import WaypointList import mavros_msgs.srv import threading class Controller(object): def __init__(self,battery_min=20): self.diag_pub = rospy.Publisher("/diagnostics",DiagnosticArray,queue_size=1) self.nav_pub = rospy.Publisher("/navigation/line_status",KeyValue,queue_size=1) self.battery_sub = rospy.Subscriber("/mavros/battery",BatteryState,self.battery_callback) self.state_sub=rospy.Subscriber("/mavros/state",State,self.state_callback) self.waypoints_sub = rospy.Subscriber("/mavros/mission/waypoints",WaypointList,self.waypoints_callback) self.warning = rospy.Subscriber ("/warning", Int16, self.warning_callback ) self.waypoints_list = [] self.last_waypoint_list = [] self.warning_list = [] self.last_waypoint_dictionnary = {} self.nbr_reg = 0 # Nombre de lignes traversières self.nbr_trav = 0 # Nombre de lignes régulières
self.battery_min = battery_min self.wp_enregistre = 0 self.wp_number = 0 self.current_wp = 0 self.last_waypoint = 0 self.waypoint_warning = 0 self.arming="Disarmed" #Armed or Disarmed self.mode="HOLD" #HOLD, LOITER, AUTO, MANUAL def battery_callback(self,data): """ Callback de lecture de la tension de la batterie Entrée : data : sensor_msgs BatteryState Message """ diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=(data.voltage<self.battery_min),name="controller/battery_level/Level", message="{0:.2f}".format(data.voltage))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) #voltage/current/charge/capacity/design_capacity #percentage.... #more informations : http://docs.ros.org/melodic/api/sensor_msgs/html/msg/BatteryState.html def state_callback(self,data): """ Callback de lecture de l'état du drone : - Armed ou Disarmed et Mode (Hold, Manual, Loiter...) - Publication de diagnostics Entrée : data : mavros_msgs State Message """ self.arming=data.armed*"Armed"+(1-data.armed)*"Disarmed" self.mode=data.mode diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def waypoints_callback(self,data): """ Callback appelé à chaque passage de waypoint : - Détection de début et fin de ligne + publication sur /navigation/line_status - Enregistrement des waypoints de fin de lignes - Enregistrement dans un dictionnaire à chaque nouvelle ligne de la type de ligne et du numéro du premier waypoint - Publication de diagnostics Entrée : data : mavros_msgs Waypoint - data.waypoints : liste des waypoints de la mission - data.current_seq : waypoint courant """ #----- Callback appelé à chaque changement de waypoints #rospy.logwarn("Changement de waypoint") rospy.loginfo("Nouveau waypoint :" + str(data.current_seq)) rospy.loginfo("Liste des waypoints enregistrés" + str(self.last_waypoint_list)) self.wp_number=len(data.waypoints) self.waypoints_list = data.waypoints #------------ Fin de ligne ---------------------- if data.waypoints[data.current_seq].param1 == 1: if data.waypoints[self.current_wp].param1 == 0: #------------- Enregistrement du dernier waypoint de la ligne self.last_waypoint = self.current_wp rospy.loginfo("Fin de ligne") rospy.loginfo("Waypoint enregistré de fin de ligne "+str(self.last_waypoint)) self.last_waypoint_list.append(self.last_waypoint) nav_msg = KeyValue() nav_msg.key = "End" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" else: nav_msg.value = "Trav" self.nav_pub.publish(nav_msg) #----------Debut de ligne ---------------------- elif data.waypoints[data.current_seq].param1 == 0: if data.waypoints[self.current_wp].param1 == 1: rospy.loginfo("Début de ligne") nav_msg = KeyValue() nav_msg.key = "Start" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" self.nbr_reg += 1 ligne = "Reg" + str(self.nbr_reg) else: nav_msg.value = "Trav" self.nbr_trav += 1 ligne = "Trav" + str(self.nbr_trav) self.last_waypoint_dictionnary[ligne] = data.current_seq self.nav_pub.publish(nav_msg) self.current_wp = data.current_seq #rospy.loginfo("dictionnaire : " + str(self.last_waypoint_dictionnary)) #---------Pubication des diagnostiques----------- diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def warning_callback (self, data): """ Callback appelé lors de la réception d'un msg sur /warning - Appel des fonctions wpt_chang() et stop() Entrée : data : Int16 """ self.warning = data.data rospy.logwarn("Warning, type" + str(self.warning)) if (self.warning != -1): self.waypoint_warning = self.current_wp now = rospy.get_rostime() rospy.logwarn("Warning : Retour en arrière, nouveau Waypoint :" + str(self.last_waypoint)) self.wpt_chang() elif (self.warning == -1): rospy.logwarn("Mode Loiter activé") self.stop() def wpt_chang(self): """ Fonction lors de la réception d'un warning publie la consigne du wpt de correction sur le service /mavros/mission/set_current. Le wpt de correction est le dernier de la ligne précedente. Mise à zéro de la variable warning """ if self.waypoints_list[self.waypoint_warning].param1 == 0 : if len(self.last_waypoint_list) == 0: #----- Warning dans la première ligne droite : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en ligne droite : on retourne au début du virage précédent self.wpt_correction = self.last_waypoint_list[-1] else : if len(self.last_waypoint_list) == 1: #----- Warning dans le premier virage : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en virage : on retourne au debut du virage précédent self.wpt_correction = self.last_waypoint_list[len(self.last_waypoint_list)-2] rospy.wait_for_service('/mavros/mission/set_current') try: current_wpt = rospy.ServiceProxy('/mavros/mission/set_current', mavros_msgs.srv.WaypointSetCurrent) wpt = current_wpt(wp_seq = (self.wpt_correction)) rospy.loginfo("Nouveau waypoint de correction :" + str(self.wpt_correction)) self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") def stop(self): """ Fonction lors de la réception d'un warning publie la consigne de mise en mode stationnaire sur le service /mavros/set_mode. """ rospy.wait_for_service('/mavros/set_mode') # timeout ? try: mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode) mode_ = mode(custom_mode = "HOLD") rospy.loginfo("Mode HOLD activé") self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") # rosservice call /mavros/set_mode "base_mode: 0 # custom_mode: 'HOLD'" def diagnostic_publisher(self): """ Fonction de publication des diagnostiques : - current_wpt - Etat - ... Lancement dans un thread pour avoir une publication régulière """ diagnostics=DiagnosticArray() while not(rospy.is_shutdown()): diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) rospy.sleep(0.5) if __name__=='__main__': try: rospy.init_node("Controller", anonymous=False) controller=Controller(battery_min=12) rate=rospy.Rate(1) #----- Publication des messages des diagnostiques régulièrement diag_thread=threading.Thread(target=controller.diagnostic_publisher) diag_thread.start() while not rospy.is_shutdown(): rospy.spin() rate.sleep() except rospy.ROSInterruptException: pass
random_line_split
controller.py
#!/usr/bin/env python #-*- coding: utf-8 -*- """ __author__ = "Kevin Bedin, Clement Bichat, Aurelien Grenier" __version__ = "1.0.1" __date__ = "2020-01-20" __status__ = "Development" """ """ Contexte : - Ulysse Unmaned Surface Vehicule - Utilisation avec ROS et le package Mavros Objectif : - Lors de la réception d'un warning sur le topic /warning ,une consigne de waypoint est envoyé dans le service /mavros/mission/set_current afin d'effectuer à nouveau la fauchée. - Diagnostique Documentation : - http://wiki.ros.org/mavros_msgs """ import rospy import rospkg from std_msgs.msg import Int16 from sensor_msgs.msg import BatteryState from mavros_msgs.msg import WaypointList,State from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue from mavros_msgs.msg import State from mavros_msgs.msg import WaypointList import mavros_msgs.srv import threading class Controller(object): def __init__(self,battery_min=20): self.diag_pub = rospy.Publisher("/diagnostics",DiagnosticArray,queue_size=1) self.nav_pub = rospy.Publisher("/navigation/line_status",KeyValue,queue_size=1) self.battery_sub = rospy.Subscriber("/mavros/battery",BatteryState,self.battery_callback) self.state_sub=rospy.Subscriber("/mavros/state",State,self.state_callback) self.waypoints_sub = rospy.Subscriber("/mavros/mission/waypoints",WaypointList,self.waypoints_callback) self.warning = rospy.Subscriber ("/warning", Int16, self.warning_callback ) self.waypoints_list = [] self.last_waypoint_list = [] self.warning_list = [] self.last_waypoint_dictionnary = {} self.nbr_reg = 0 # Nombre de lignes traversières self.nbr_trav = 0 # Nombre de lignes régulières self.battery_min = battery_min self.wp_enregistre = 0 self.wp_number = 0 self.current_wp = 0 self.last_waypoint = 0 self.waypoint_warning = 0 self.arming="Disarmed" #Armed or Disarmed self.mode="HOLD" #HOLD, LOITER, AUTO, MANUAL def battery_callback(self,data): """ Callback de lecture de la tension de la batterie Entrée : data : sensor_msgs BatteryState Message """ diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=(data.voltage<self.battery_min),name="controller/battery_level/Level", message="{0:.2f}".format(data.voltage))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) #voltage/current/charge/capacity/design_capacity #percentage.... #more informations : http://docs.ros.org/melodic/api/sensor_msgs/html/msg/BatteryState.html def state_callback(self,data): """ Callback de lecture de l'état du drone : - Armed ou Disarmed et Mode (Hold, Manual, Loiter...) - Publication de diagnostics Entrée : data : mavros_msgs State Message """ self.arming=data.armed*"Armed"+(1-data.armed)*"Disarmed" self.mode=data.mode diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def waypoints_callback(self,data): """ Callback appelé à chaque passage de waypoint : - Détection de début et fin de ligne + publication sur /navigation/line_status - Enregistrement des waypoints de fin de lignes - Enregistrement dans un dictionnaire à chaque nouvelle ligne de la type de ligne et du numéro du premier waypoint - Publication de diagnostics Entrée : data : mavros_msgs Waypoint - data.waypoints : liste des waypoints de la mission - data.current_seq : waypoint courant """ #----- Callback appelé à chaque changement de waypoints #rospy.logwarn("Changement de waypoint") rospy.loginfo("Nouveau waypoint :" + str(data.current_seq)) rospy.loginfo("Liste des waypoints enregistrés" + str(self.last_waypoint_list)) self.wp_number=len(data.waypoints) self.waypoints_list = data.waypoints #------------ Fin de ligne ---------------------- if data.waypoints[data.current_seq].param1 == 1: if data.waypoints[self.current_wp].param1 == 0: #------------- Enregistrement du dernier waypoint de la ligne self.last_waypoint = self.current_wp rospy.loginfo("Fin de ligne") rospy.loginfo("Waypoint enregistré de fin de ligne "+str(self.last_waypoint)) self.last_waypoint_list.append(self.last_waypoint) nav_msg = KeyValue() nav_msg.key = "End" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" else: nav_msg.value = "Trav" self.nav_pub.publish(nav_msg) #----------Debut de ligne ---------------------- elif data.waypoints[data.current_seq].param1 == 0: if data.waypoints[self.current_wp].param1 == 1: rospy.loginfo("Début de ligne") nav_msg = KeyValue() nav_msg.key = "Start" if data.waypoints[data.current_seq].param2 == 0: nav_msg.value = "Reg" self.nbr_reg += 1 ligne = "Reg" + str(self.nbr_reg) else: nav_msg.value = "Trav" self.nbr_trav += 1 ligne = "Trav" + str(self.nbr_trav) self.last_waypoint_dictionnary[ligne] = data.current_seq self.nav_pub.publish(nav_msg) self.current_wp = data.current_seq #rospy.loginfo("dictionnaire : " + str(self.last_waypoint_dictionnary)) #---------Pubication des diagnostiques----------- diagnostics=DiagnosticArray() diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) def warning_callback (self, data): """ Callback a
: """ Fonction lors de la réception d'un warning publie la consigne du wpt de correction sur le service /mavros/mission/set_current. Le wpt de correction est le dernier de la ligne précedente. Mise à zéro de la variable warning """ if self.waypoints_list[self.waypoint_warning].param1 == 0 : if len(self.last_waypoint_list) == 0: #----- Warning dans la première ligne droite : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en ligne droite : on retourne au début du virage précédent self.wpt_correction = self.last_waypoint_list[-1] else : if len(self.last_waypoint_list) == 1: #----- Warning dans le premier virage : on recommence la mission de 0 self.wpt_correction = 0 else : #-----Warning en virage : on retourne au debut du virage précédent self.wpt_correction = self.last_waypoint_list[len(self.last_waypoint_list)-2] rospy.wait_for_service('/mavros/mission/set_current') try: current_wpt = rospy.ServiceProxy('/mavros/mission/set_current', mavros_msgs.srv.WaypointSetCurrent) wpt = current_wpt(wp_seq = (self.wpt_correction)) rospy.loginfo("Nouveau waypoint de correction :" + str(self.wpt_correction)) self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") def stop(self): """ Fonction lors de la réception d'un warning publie la consigne de mise en mode stationnaire sur le service /mavros/set_mode. """ rospy.wait_for_service('/mavros/set_mode') # timeout ? try: mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode) mode_ = mode(custom_mode = "HOLD") rospy.loginfo("Mode HOLD activé") self.warning = 0.0 except rospy.ServiceException, e: rospy.logwarn("Erreur") # rosservice call /mavros/set_mode "base_mode: 0 # custom_mode: 'HOLD'" def diagnostic_publisher(self): """ Fonction de publication des diagnostiques : - current_wpt - Etat - ... Lancement dans un thread pour avoir une publication régulière """ diagnostics=DiagnosticArray() while not(rospy.is_shutdown()): diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Number", message=str(self.wp_number))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/waypoints/Current", message=str(self.current_wp))) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Mode", message=self.mode)) diagnostics.status.append(DiagnosticStatus(level=0,name="controller/state/Arming", message=self.arming)) diagnostics.header.stamp=rospy.Time.now() self.diag_pub.publish(diagnostics) rospy.sleep(0.5) if __name__=='__main__': try: rospy.init_node("Controller", anonymous=False) controller=Controller(battery_min=12) rate=rospy.Rate(1) #----- Publication des messages des diagnostiques régulièrement diag_thread=threading.Thread(target=controller.diagnostic_publisher) diag_thread.start() while not rospy.is_shutdown(): rospy.spin() rate.sleep() except rospy.ROSInterruptException: pass
ppelé lors de la réception d'un msg sur /warning - Appel des fonctions wpt_chang() et stop() Entrée : data : Int16 """ self.warning = data.data rospy.logwarn("Warning, type" + str(self.warning)) if (self.warning != -1): self.waypoint_warning = self.current_wp now = rospy.get_rostime() rospy.logwarn("Warning : Retour en arrière, nouveau Waypoint :" + str(self.last_waypoint)) self.wpt_chang() elif (self.warning == -1): rospy.logwarn("Mode Loiter activé") self.stop() def wpt_chang(self)
identifier_body
lfs.go
package lfs import ( "context" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" git "gopkg.in/libgit2/git2go.v26" "github.com/pkg/errors" ) // git lfs track // Since the git-lfs devs discourage using git-lfs in go projects we're just // calling the git-lfs CLI. func Track(filename, repositoryLocation string) (string, error) { cmd := exec.Command("git-lfs", "track", filename) cmd.Dir = repositoryLocation out, err := cmd.Output() // wait to ensure .gitattributes file is up to date. // a monument to all my sins. time.Sleep(2 * time.Second) output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Adds and commits data found in path func AddAndCommitData(path, msg string) (string, error) { changes, repositoryLocation, err := AddData(path) if err != nil { return "", err } var commitId string if changes { commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } return commitId, nil } // Add a file to the index func Add(filename string) error { // Open repository at path. repo, _, err := openContainingRepository(filename) if err != nil { return err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return err } if changed { err := addToIndex(repo, filename) if err != nil { return err } } return nil } func openContainingRepository(filename string) (repo *git.Repository, repositoryLocation string, err error) { // Strip path from filename path := filepath.Dir(filename) path, err = filepath.Abs(path) if err != nil { return nil, "", err } // Open repository at path. return openRepository(path) } // Add and commit file func AddAndCommit(filename, msg string) (string, error) { // Open repository at path. repo, repositoryLocation, err := openContainingRepository(filename) if err != nil { return "", err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return "", err } var commitId string if changed { err := addToIndex(repo, filename) if err != nil { return "", err } commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } else { return "", errors.New("No changes. Nothing to commit") } return commitId, nil } // Add the data at the path to the index. Will return true if there's anything // to be committed. func AddData(path string) (changes bool, repositoryLocation string, err error) { wd, err := os.Getwd() if err != nil { return false, "", err } defer os.Chdir(wd) repo, repositoryLocation, err := openRepository(path) if err != nil { return false, "", err } os.Chdir(repositoryLocation) dataPath, err := filepath.Rel(repositoryLocation, path) if err != nil { return false, "", err } // ensure git-lfs tracks all files recursively by adding ** pattern, see // git PATTERN FORMAT description for more details. dataPattern := "" + dataPath + "/**" gitAttr := ".gitattributes" // if pattern already exists don't rerun the track command b, err := ioutil.ReadFile(gitAttr) if err != nil { pe := err.(*os.PathError) if pe.Err.Error() != "no such file or directory" { return false, "", err } } if !strings.Contains(string(b), dataPattern) { output, err := Track(dataPattern, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not track files using git-lfs: "+output) } } changed, err := fileChanged(repo, gitAttr) if err != nil { return false, "", err } if changed { err := addToIndex(repo, gitAttr) if err != nil { return false, "", err } changes = changed } changed = false err = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error { if err != nil { return errors.Wrap(err, "Could not add "+path+"to repository") } if info.IsDir() { return nil } changedFile, err := fileChanged(repo, path) if err != nil { return err } // One file is changed we can return if changedFile { changed = true return nil } return nil }) if err != nil { return false, "", err } if changed { output, err := add(dataPath, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not add files "+output) } } changes = changed return changes, repositoryLocation, nil } // Add a path to the index. func addToIndex(repo *git.Repository, path string) error { index, err := repo.Index() if err != nil { return err } err = index.AddByPath(path) if err != nil { return err } _, err = index.WriteTree() if err != nil { return err } err = index.Write() if err != nil { return err } return err } // Removes the last directory in a path and returns it. func popLastDirectory(path string) string { // split the path into a list of dirs /a/b/c --> [a,b,c] then remove // the last one and create a new path --> /a/b list := strings.Split(path, "/") list = list[0 : len(list)-1] path = "/" + filepath.Join(list...) return path } // Returns true if file is new, modified or deleted. func fileChanged(repo *git.Repository, path string) (bool, error) { status, err := repo.StatusFile(path) if err != nil { return false, err } if status == git.StatusWtNew || status == git.StatusWtModified || status == git.StatusWtDeleted { return true, nil } return false, nil } // Commits staged changes. func commit(path, msg string) (string, error) { repo, err := git.OpenRepository(path) if err != nil { return "", err } index, err := repo.Index() if err != nil { return "", err } treeId, err := index.WriteTree() if err != nil { return "", err } tree, err := repo.LookupTree(treeId) if err != nil { return "", err } err = index.Write() if err != nil { return "", err } var sig = &git.Signature{ "walrus", "walrus@github.com/fjukstad/walrus", time.Now(), } var commitId *git.Oid currentBranch, err := repo.Head() if err != nil { commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree) } else { currentTip, err := repo.LookupCommit(currentBranch.Target()) if err != nil { return "", err } commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree, currentTip) } if err != nil { return "", err } err = index.Write() if err != nil { return "", err } return commitId.String(), nil } // Will try to open a git repository located at the given path. If it is not // found it will traverse the directory tree outwards until i either finds a // repository or hits the root. If no repository is found it will initialize one // in the current working directory. func openRepository(path string) (repo *git.Repository, repositoryPath string, err error) { wd, err := os.Getwd() if err != nil { return nil, "", err } for { repo, err = git.OpenRepository(path) if err != nil { path = popLastDirectory(path) // Root hit if path == "/" { path = wd log.Println("Output directory is not in a git repository. Creating one in " + path) repo, err = git.InitRepository(wd, false) if err != nil { return nil, "", errors.Wrap(err, "Could not initialize git repository") } break } } else { break } } return repo, path, nil } // git add // To speed up dev time for the prototype, use the exec pkg not git2go package // to add files. Future versions will get rid of this hacky way of doing things // by creating the blobs, softlinks etc. but that's for later! func add(path, repositoryLocation string) (string, error) { cmd := exec.Command("git", "add", path) cmd.Dir = repositoryLocation out, err := cmd.Output() output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Starts a git-lfs server in a Docker container func StartServer(mountDir string) error { c, err := client.NewEnvClient() if err != nil { return errors.Wrap(err, "Could not create Docker client") } image := "fjukstad/lfs-server" _, err = c.ImagePull(context.Background(), image, types.ImagePullOptions{}) if err != nil { return errors.Wrap(err, "Could not pull iamge") } hostPath, err := filepath.Abs(mountDir) if err != nil { return errors.Wrap(err, "Could not create absolute git-lfs directory path") } bind := hostPath + ":/lfs" ps := make(nat.PortSet) ps["9999/tcp"] = struct{}{} pm := make(nat.PortMap) pm["9999/tcp"] = []nat.PortBinding{nat.PortBinding{"0.0.0.0", "9999"}} resp, err := c.ContainerCreate(context.Background(), &container.Config{Image: image, ExposedPorts: ps}, &container.HostConfig{ Binds: []string{bind}, PortBindings: pm}, &network.NetworkingConfig{}, "git-lfs-server") if err != nil || resp.ID == " " { return errors.Wrap(err, "Could not create git-lfs server container") } containerId := resp.ID err = c.ContainerStart(context.Background(), containerId, types.ContainerStartOptions{}) return err } // Get the head id of the repository found at hostpath func GetHead(hostpath string) (string, error)
func PrintDiff(path, id string) (string, error) { path, err := filepath.Abs(path) if err != nil { return "", errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return "", errors.Wrap(err, "Could not open repository") } index, err := repo.Index() if err != nil { return "", errors.Wrap(err, "Could not get head") } oid, err := git.NewOid(id) if err != nil { return "", errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return "", errors.Wrap(err, "Could not lookup commit id "+id) } oldTree, err := commit.Tree() if err != nil { return "", errors.Wrap(err, "Could not lookup tree") } diff, err := repo.DiffTreeToIndex(oldTree, index, &git.DiffOptions{}) if err != nil { return "", errors.Wrap(err, "Could not diff tree to index") } stats, err := diff.Stats() if err != nil { return "", errors.Wrap(err, "Could not get diff stats") } return stats.String(git.DiffStatsFull, 80) } func Reset(path, id string) error { path, err := filepath.Abs(path) if err != nil { return errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return errors.Wrap(err, "Could not open repository") } oid, err := git.NewOid(id) if err != nil { return errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return errors.Wrap(err, "Could not lookup commit id "+id) } err = repo.ResetToCommit(commit, git.ResetMixed, &git.CheckoutOpts{}) if err != nil { return errors.Wrap(err, "Could not reset to id "+id) } return nil }
{ repo, _, err := openRepository(hostpath) if err != nil { return "", errors.Wrap(err, "Could not open repository") } ref, err := repo.Head() if err != nil { return "", errors.Wrap(err, "Could not get head") } head := ref.Target() return head.String(), nil }
identifier_body
lfs.go
package lfs import ( "context" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" git "gopkg.in/libgit2/git2go.v26" "github.com/pkg/errors" ) // git lfs track // Since the git-lfs devs discourage using git-lfs in go projects we're just // calling the git-lfs CLI. func Track(filename, repositoryLocation string) (string, error) { cmd := exec.Command("git-lfs", "track", filename) cmd.Dir = repositoryLocation out, err := cmd.Output() // wait to ensure .gitattributes file is up to date. // a monument to all my sins. time.Sleep(2 * time.Second) output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Adds and commits data found in path func AddAndCommitData(path, msg string) (string, error) { changes, repositoryLocation, err := AddData(path) if err != nil { return "", err } var commitId string if changes { commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } return commitId, nil } // Add a file to the index func
(filename string) error { // Open repository at path. repo, _, err := openContainingRepository(filename) if err != nil { return err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return err } if changed { err := addToIndex(repo, filename) if err != nil { return err } } return nil } func openContainingRepository(filename string) (repo *git.Repository, repositoryLocation string, err error) { // Strip path from filename path := filepath.Dir(filename) path, err = filepath.Abs(path) if err != nil { return nil, "", err } // Open repository at path. return openRepository(path) } // Add and commit file func AddAndCommit(filename, msg string) (string, error) { // Open repository at path. repo, repositoryLocation, err := openContainingRepository(filename) if err != nil { return "", err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return "", err } var commitId string if changed { err := addToIndex(repo, filename) if err != nil { return "", err } commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } else { return "", errors.New("No changes. Nothing to commit") } return commitId, nil } // Add the data at the path to the index. Will return true if there's anything // to be committed. func AddData(path string) (changes bool, repositoryLocation string, err error) { wd, err := os.Getwd() if err != nil { return false, "", err } defer os.Chdir(wd) repo, repositoryLocation, err := openRepository(path) if err != nil { return false, "", err } os.Chdir(repositoryLocation) dataPath, err := filepath.Rel(repositoryLocation, path) if err != nil { return false, "", err } // ensure git-lfs tracks all files recursively by adding ** pattern, see // git PATTERN FORMAT description for more details. dataPattern := "" + dataPath + "/**" gitAttr := ".gitattributes" // if pattern already exists don't rerun the track command b, err := ioutil.ReadFile(gitAttr) if err != nil { pe := err.(*os.PathError) if pe.Err.Error() != "no such file or directory" { return false, "", err } } if !strings.Contains(string(b), dataPattern) { output, err := Track(dataPattern, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not track files using git-lfs: "+output) } } changed, err := fileChanged(repo, gitAttr) if err != nil { return false, "", err } if changed { err := addToIndex(repo, gitAttr) if err != nil { return false, "", err } changes = changed } changed = false err = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error { if err != nil { return errors.Wrap(err, "Could not add "+path+"to repository") } if info.IsDir() { return nil } changedFile, err := fileChanged(repo, path) if err != nil { return err } // One file is changed we can return if changedFile { changed = true return nil } return nil }) if err != nil { return false, "", err } if changed { output, err := add(dataPath, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not add files "+output) } } changes = changed return changes, repositoryLocation, nil } // Add a path to the index. func addToIndex(repo *git.Repository, path string) error { index, err := repo.Index() if err != nil { return err } err = index.AddByPath(path) if err != nil { return err } _, err = index.WriteTree() if err != nil { return err } err = index.Write() if err != nil { return err } return err } // Removes the last directory in a path and returns it. func popLastDirectory(path string) string { // split the path into a list of dirs /a/b/c --> [a,b,c] then remove // the last one and create a new path --> /a/b list := strings.Split(path, "/") list = list[0 : len(list)-1] path = "/" + filepath.Join(list...) return path } // Returns true if file is new, modified or deleted. func fileChanged(repo *git.Repository, path string) (bool, error) { status, err := repo.StatusFile(path) if err != nil { return false, err } if status == git.StatusWtNew || status == git.StatusWtModified || status == git.StatusWtDeleted { return true, nil } return false, nil } // Commits staged changes. func commit(path, msg string) (string, error) { repo, err := git.OpenRepository(path) if err != nil { return "", err } index, err := repo.Index() if err != nil { return "", err } treeId, err := index.WriteTree() if err != nil { return "", err } tree, err := repo.LookupTree(treeId) if err != nil { return "", err } err = index.Write() if err != nil { return "", err } var sig = &git.Signature{ "walrus", "walrus@github.com/fjukstad/walrus", time.Now(), } var commitId *git.Oid currentBranch, err := repo.Head() if err != nil { commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree) } else { currentTip, err := repo.LookupCommit(currentBranch.Target()) if err != nil { return "", err } commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree, currentTip) } if err != nil { return "", err } err = index.Write() if err != nil { return "", err } return commitId.String(), nil } // Will try to open a git repository located at the given path. If it is not // found it will traverse the directory tree outwards until i either finds a // repository or hits the root. If no repository is found it will initialize one // in the current working directory. func openRepository(path string) (repo *git.Repository, repositoryPath string, err error) { wd, err := os.Getwd() if err != nil { return nil, "", err } for { repo, err = git.OpenRepository(path) if err != nil { path = popLastDirectory(path) // Root hit if path == "/" { path = wd log.Println("Output directory is not in a git repository. Creating one in " + path) repo, err = git.InitRepository(wd, false) if err != nil { return nil, "", errors.Wrap(err, "Could not initialize git repository") } break } } else { break } } return repo, path, nil } // git add // To speed up dev time for the prototype, use the exec pkg not git2go package // to add files. Future versions will get rid of this hacky way of doing things // by creating the blobs, softlinks etc. but that's for later! func add(path, repositoryLocation string) (string, error) { cmd := exec.Command("git", "add", path) cmd.Dir = repositoryLocation out, err := cmd.Output() output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Starts a git-lfs server in a Docker container func StartServer(mountDir string) error { c, err := client.NewEnvClient() if err != nil { return errors.Wrap(err, "Could not create Docker client") } image := "fjukstad/lfs-server" _, err = c.ImagePull(context.Background(), image, types.ImagePullOptions{}) if err != nil { return errors.Wrap(err, "Could not pull iamge") } hostPath, err := filepath.Abs(mountDir) if err != nil { return errors.Wrap(err, "Could not create absolute git-lfs directory path") } bind := hostPath + ":/lfs" ps := make(nat.PortSet) ps["9999/tcp"] = struct{}{} pm := make(nat.PortMap) pm["9999/tcp"] = []nat.PortBinding{nat.PortBinding{"0.0.0.0", "9999"}} resp, err := c.ContainerCreate(context.Background(), &container.Config{Image: image, ExposedPorts: ps}, &container.HostConfig{ Binds: []string{bind}, PortBindings: pm}, &network.NetworkingConfig{}, "git-lfs-server") if err != nil || resp.ID == " " { return errors.Wrap(err, "Could not create git-lfs server container") } containerId := resp.ID err = c.ContainerStart(context.Background(), containerId, types.ContainerStartOptions{}) return err } // Get the head id of the repository found at hostpath func GetHead(hostpath string) (string, error) { repo, _, err := openRepository(hostpath) if err != nil { return "", errors.Wrap(err, "Could not open repository") } ref, err := repo.Head() if err != nil { return "", errors.Wrap(err, "Could not get head") } head := ref.Target() return head.String(), nil } func PrintDiff(path, id string) (string, error) { path, err := filepath.Abs(path) if err != nil { return "", errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return "", errors.Wrap(err, "Could not open repository") } index, err := repo.Index() if err != nil { return "", errors.Wrap(err, "Could not get head") } oid, err := git.NewOid(id) if err != nil { return "", errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return "", errors.Wrap(err, "Could not lookup commit id "+id) } oldTree, err := commit.Tree() if err != nil { return "", errors.Wrap(err, "Could not lookup tree") } diff, err := repo.DiffTreeToIndex(oldTree, index, &git.DiffOptions{}) if err != nil { return "", errors.Wrap(err, "Could not diff tree to index") } stats, err := diff.Stats() if err != nil { return "", errors.Wrap(err, "Could not get diff stats") } return stats.String(git.DiffStatsFull, 80) } func Reset(path, id string) error { path, err := filepath.Abs(path) if err != nil { return errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return errors.Wrap(err, "Could not open repository") } oid, err := git.NewOid(id) if err != nil { return errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return errors.Wrap(err, "Could not lookup commit id "+id) } err = repo.ResetToCommit(commit, git.ResetMixed, &git.CheckoutOpts{}) if err != nil { return errors.Wrap(err, "Could not reset to id "+id) } return nil }
Add
identifier_name
lfs.go
package lfs import ( "context" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" git "gopkg.in/libgit2/git2go.v26" "github.com/pkg/errors" ) // git lfs track // Since the git-lfs devs discourage using git-lfs in go projects we're just // calling the git-lfs CLI. func Track(filename, repositoryLocation string) (string, error) { cmd := exec.Command("git-lfs", "track", filename) cmd.Dir = repositoryLocation out, err := cmd.Output() // wait to ensure .gitattributes file is up to date. // a monument to all my sins. time.Sleep(2 * time.Second) output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Adds and commits data found in path func AddAndCommitData(path, msg string) (string, error) { changes, repositoryLocation, err := AddData(path) if err != nil { return "", err } var commitId string if changes { commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } return commitId, nil } // Add a file to the index func Add(filename string) error { // Open repository at path. repo, _, err := openContainingRepository(filename) if err != nil { return err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return err } if changed { err := addToIndex(repo, filename) if err != nil { return err } } return nil } func openContainingRepository(filename string) (repo *git.Repository, repositoryLocation string, err error) { // Strip path from filename path := filepath.Dir(filename) path, err = filepath.Abs(path) if err != nil { return nil, "", err } // Open repository at path. return openRepository(path) } // Add and commit file func AddAndCommit(filename, msg string) (string, error) { // Open repository at path. repo, repositoryLocation, err := openContainingRepository(filename) if err != nil { return "", err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return "", err } var commitId string if changed { err := addToIndex(repo, filename) if err != nil { return "", err } commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } else { return "", errors.New("No changes. Nothing to commit") } return commitId, nil } // Add the data at the path to the index. Will return true if there's anything // to be committed. func AddData(path string) (changes bool, repositoryLocation string, err error) { wd, err := os.Getwd() if err != nil { return false, "", err } defer os.Chdir(wd) repo, repositoryLocation, err := openRepository(path) if err != nil { return false, "", err } os.Chdir(repositoryLocation) dataPath, err := filepath.Rel(repositoryLocation, path) if err != nil { return false, "", err } // ensure git-lfs tracks all files recursively by adding ** pattern, see // git PATTERN FORMAT description for more details. dataPattern := "" + dataPath + "/**" gitAttr := ".gitattributes" // if pattern already exists don't rerun the track command b, err := ioutil.ReadFile(gitAttr) if err != nil { pe := err.(*os.PathError) if pe.Err.Error() != "no such file or directory" { return false, "", err } } if !strings.Contains(string(b), dataPattern) { output, err := Track(dataPattern, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not track files using git-lfs: "+output) } } changed, err := fileChanged(repo, gitAttr) if err != nil { return false, "", err } if changed { err := addToIndex(repo, gitAttr) if err != nil { return false, "", err } changes = changed } changed = false err = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error { if err != nil { return errors.Wrap(err, "Could not add "+path+"to repository") } if info.IsDir() { return nil } changedFile, err := fileChanged(repo, path) if err != nil { return err } // One file is changed we can return if changedFile { changed = true return nil } return nil }) if err != nil { return false, "", err } if changed { output, err := add(dataPath, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not add files "+output) } } changes = changed return changes, repositoryLocation, nil } // Add a path to the index. func addToIndex(repo *git.Repository, path string) error { index, err := repo.Index() if err != nil { return err } err = index.AddByPath(path) if err != nil { return err } _, err = index.WriteTree() if err != nil { return err } err = index.Write() if err != nil { return err } return err } // Removes the last directory in a path and returns it. func popLastDirectory(path string) string { // split the path into a list of dirs /a/b/c --> [a,b,c] then remove // the last one and create a new path --> /a/b list := strings.Split(path, "/") list = list[0 : len(list)-1] path = "/" + filepath.Join(list...) return path } // Returns true if file is new, modified or deleted. func fileChanged(repo *git.Repository, path string) (bool, error) { status, err := repo.StatusFile(path) if err != nil { return false, err } if status == git.StatusWtNew || status == git.StatusWtModified || status == git.StatusWtDeleted { return true, nil } return false, nil } // Commits staged changes. func commit(path, msg string) (string, error) { repo, err := git.OpenRepository(path) if err != nil { return "", err } index, err := repo.Index() if err != nil { return "", err } treeId, err := index.WriteTree() if err != nil { return "", err } tree, err := repo.LookupTree(treeId) if err != nil { return "", err } err = index.Write() if err != nil { return "", err } var sig = &git.Signature{ "walrus", "walrus@github.com/fjukstad/walrus", time.Now(), } var commitId *git.Oid currentBranch, err := repo.Head() if err != nil { commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree) } else { currentTip, err := repo.LookupCommit(currentBranch.Target()) if err != nil { return "", err } commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree, currentTip) } if err != nil { return "", err } err = index.Write() if err != nil { return "", err } return commitId.String(), nil } // Will try to open a git repository located at the given path. If it is not // found it will traverse the directory tree outwards until i either finds a // repository or hits the root. If no repository is found it will initialize one // in the current working directory. func openRepository(path string) (repo *git.Repository, repositoryPath string, err error) { wd, err := os.Getwd() if err != nil { return nil, "", err } for { repo, err = git.OpenRepository(path) if err != nil { path = popLastDirectory(path) // Root hit if path == "/"
} else { break } } return repo, path, nil } // git add // To speed up dev time for the prototype, use the exec pkg not git2go package // to add files. Future versions will get rid of this hacky way of doing things // by creating the blobs, softlinks etc. but that's for later! func add(path, repositoryLocation string) (string, error) { cmd := exec.Command("git", "add", path) cmd.Dir = repositoryLocation out, err := cmd.Output() output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Starts a git-lfs server in a Docker container func StartServer(mountDir string) error { c, err := client.NewEnvClient() if err != nil { return errors.Wrap(err, "Could not create Docker client") } image := "fjukstad/lfs-server" _, err = c.ImagePull(context.Background(), image, types.ImagePullOptions{}) if err != nil { return errors.Wrap(err, "Could not pull iamge") } hostPath, err := filepath.Abs(mountDir) if err != nil { return errors.Wrap(err, "Could not create absolute git-lfs directory path") } bind := hostPath + ":/lfs" ps := make(nat.PortSet) ps["9999/tcp"] = struct{}{} pm := make(nat.PortMap) pm["9999/tcp"] = []nat.PortBinding{nat.PortBinding{"0.0.0.0", "9999"}} resp, err := c.ContainerCreate(context.Background(), &container.Config{Image: image, ExposedPorts: ps}, &container.HostConfig{ Binds: []string{bind}, PortBindings: pm}, &network.NetworkingConfig{}, "git-lfs-server") if err != nil || resp.ID == " " { return errors.Wrap(err, "Could not create git-lfs server container") } containerId := resp.ID err = c.ContainerStart(context.Background(), containerId, types.ContainerStartOptions{}) return err } // Get the head id of the repository found at hostpath func GetHead(hostpath string) (string, error) { repo, _, err := openRepository(hostpath) if err != nil { return "", errors.Wrap(err, "Could not open repository") } ref, err := repo.Head() if err != nil { return "", errors.Wrap(err, "Could not get head") } head := ref.Target() return head.String(), nil } func PrintDiff(path, id string) (string, error) { path, err := filepath.Abs(path) if err != nil { return "", errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return "", errors.Wrap(err, "Could not open repository") } index, err := repo.Index() if err != nil { return "", errors.Wrap(err, "Could not get head") } oid, err := git.NewOid(id) if err != nil { return "", errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return "", errors.Wrap(err, "Could not lookup commit id "+id) } oldTree, err := commit.Tree() if err != nil { return "", errors.Wrap(err, "Could not lookup tree") } diff, err := repo.DiffTreeToIndex(oldTree, index, &git.DiffOptions{}) if err != nil { return "", errors.Wrap(err, "Could not diff tree to index") } stats, err := diff.Stats() if err != nil { return "", errors.Wrap(err, "Could not get diff stats") } return stats.String(git.DiffStatsFull, 80) } func Reset(path, id string) error { path, err := filepath.Abs(path) if err != nil { return errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return errors.Wrap(err, "Could not open repository") } oid, err := git.NewOid(id) if err != nil { return errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return errors.Wrap(err, "Could not lookup commit id "+id) } err = repo.ResetToCommit(commit, git.ResetMixed, &git.CheckoutOpts{}) if err != nil { return errors.Wrap(err, "Could not reset to id "+id) } return nil }
{ path = wd log.Println("Output directory is not in a git repository. Creating one in " + path) repo, err = git.InitRepository(wd, false) if err != nil { return nil, "", errors.Wrap(err, "Could not initialize git repository") } break }
conditional_block
lfs.go
package lfs import ( "context" "io/ioutil" "log" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" git "gopkg.in/libgit2/git2go.v26" "github.com/pkg/errors" ) // git lfs track // Since the git-lfs devs discourage using git-lfs in go projects we're just // calling the git-lfs CLI. func Track(filename, repositoryLocation string) (string, error) { cmd := exec.Command("git-lfs", "track", filename) cmd.Dir = repositoryLocation out, err := cmd.Output() // wait to ensure .gitattributes file is up to date. // a monument to all my sins. time.Sleep(2 * time.Second) output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Adds and commits data found in path func AddAndCommitData(path, msg string) (string, error) { changes, repositoryLocation, err := AddData(path) if err != nil { return "", err } var commitId string if changes { commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } return commitId, nil } // Add a file to the index func Add(filename string) error { // Open repository at path. repo, _, err := openContainingRepository(filename) if err != nil { return err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return err } if changed { err := addToIndex(repo, filename) if err != nil { return err } } return nil } func openContainingRepository(filename string) (repo *git.Repository, repositoryLocation string, err error) { // Strip path from filename path := filepath.Dir(filename) path, err = filepath.Abs(path) if err != nil { return nil, "", err } // Open repository at path. return openRepository(path) } // Add and commit file func AddAndCommit(filename, msg string) (string, error) { // Open repository at path. repo, repositoryLocation, err := openContainingRepository(filename) if err != nil { return "", err } // Check if the file has been changed and commit it if it has. changed, err := fileChanged(repo, filename) if err != nil { return "", err } var commitId string if changed { err := addToIndex(repo, filename) if err != nil { return "", err } commitId, err = commit(repositoryLocation, msg) if err != nil { return "", err } } else { return "", errors.New("No changes. Nothing to commit") } return commitId, nil } // Add the data at the path to the index. Will return true if there's anything // to be committed. func AddData(path string) (changes bool, repositoryLocation string, err error) { wd, err := os.Getwd() if err != nil { return false, "", err } defer os.Chdir(wd) repo, repositoryLocation, err := openRepository(path) if err != nil { return false, "", err } os.Chdir(repositoryLocation) dataPath, err := filepath.Rel(repositoryLocation, path) if err != nil { return false, "", err } // ensure git-lfs tracks all files recursively by adding ** pattern, see // git PATTERN FORMAT description for more details. dataPattern := "" + dataPath + "/**" gitAttr := ".gitattributes" // if pattern already exists don't rerun the track command b, err := ioutil.ReadFile(gitAttr) if err != nil { pe := err.(*os.PathError) if pe.Err.Error() != "no such file or directory" { return false, "", err } } if !strings.Contains(string(b), dataPattern) { output, err := Track(dataPattern, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not track files using git-lfs: "+output) } } changed, err := fileChanged(repo, gitAttr) if err != nil { return false, "", err } if changed { err := addToIndex(repo, gitAttr) if err != nil { return false, "", err } changes = changed } changed = false err = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error { if err != nil { return errors.Wrap(err, "Could not add "+path+"to repository") } if info.IsDir() { return nil } changedFile, err := fileChanged(repo, path) if err != nil { return err } // One file is changed we can return if changedFile { changed = true return nil } return nil }) if err != nil { return false, "", err } if changed { output, err := add(dataPath, repositoryLocation) if err != nil { return false, "", errors.Wrap(err, "Could not add files "+output) } } changes = changed return changes, repositoryLocation, nil } // Add a path to the index. func addToIndex(repo *git.Repository, path string) error { index, err := repo.Index() if err != nil { return err } err = index.AddByPath(path) if err != nil { return err } _, err = index.WriteTree() if err != nil { return err } err = index.Write() if err != nil { return err } return err } // Removes the last directory in a path and returns it. func popLastDirectory(path string) string { // split the path into a list of dirs /a/b/c --> [a,b,c] then remove // the last one and create a new path --> /a/b list := strings.Split(path, "/") list = list[0 : len(list)-1] path = "/" + filepath.Join(list...) return path } // Returns true if file is new, modified or deleted. func fileChanged(repo *git.Repository, path string) (bool, error) { status, err := repo.StatusFile(path) if err != nil { return false, err } if status == git.StatusWtNew || status == git.StatusWtModified || status == git.StatusWtDeleted { return true, nil } return false, nil } // Commits staged changes. func commit(path, msg string) (string, error) { repo, err := git.OpenRepository(path) if err != nil { return "", err } index, err := repo.Index() if err != nil { return "", err } treeId, err := index.WriteTree() if err != nil { return "", err } tree, err := repo.LookupTree(treeId) if err != nil { return "", err } err = index.Write() if err != nil { return "", err } var sig = &git.Signature{ "walrus", "walrus@github.com/fjukstad/walrus", time.Now(), } var commitId *git.Oid currentBranch, err := repo.Head() if err != nil { commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree) } else { currentTip, err := repo.LookupCommit(currentBranch.Target()) if err != nil { return "", err } commitId, err = repo.CreateCommit("HEAD", sig, sig, msg, tree, currentTip) } if err != nil { return "", err } err = index.Write() if err != nil { return "", err } return commitId.String(), nil } // Will try to open a git repository located at the given path. If it is not // found it will traverse the directory tree outwards until i either finds a // repository or hits the root. If no repository is found it will initialize one // in the current working directory. func openRepository(path string) (repo *git.Repository, repositoryPath string, err error) { wd, err := os.Getwd() if err != nil { return nil, "", err } for { repo, err = git.OpenRepository(path) if err != nil { path = popLastDirectory(path) // Root hit if path == "/" { path = wd log.Println("Output directory is not in a git repository. Creating one in " + path) repo, err = git.InitRepository(wd, false) if err != nil { return nil, "", errors.Wrap(err, "Could not initialize git repository") } break } } else { break } } return repo, path, nil } // git add // To speed up dev time for the prototype, use the exec pkg not git2go package // to add files. Future versions will get rid of this hacky way of doing things // by creating the blobs, softlinks etc. but that's for later! func add(path, repositoryLocation string) (string, error) { cmd := exec.Command("git", "add", path) cmd.Dir = repositoryLocation out, err := cmd.Output() output := string(out) output = strings.TrimRight(output, "\n") return output, err } // Starts a git-lfs server in a Docker container func StartServer(mountDir string) error { c, err := client.NewEnvClient() if err != nil { return errors.Wrap(err, "Could not create Docker client") } image := "fjukstad/lfs-server" _, err = c.ImagePull(context.Background(), image, types.ImagePullOptions{}) if err != nil { return errors.Wrap(err, "Could not pull iamge") } hostPath, err := filepath.Abs(mountDir) if err != nil { return errors.Wrap(err, "Could not create absolute git-lfs directory path") } bind := hostPath + ":/lfs" ps := make(nat.PortSet) ps["9999/tcp"] = struct{}{} pm := make(nat.PortMap) pm["9999/tcp"] = []nat.PortBinding{nat.PortBinding{"0.0.0.0", "9999"}} resp, err := c.ContainerCreate(context.Background(), &container.Config{Image: image, ExposedPorts: ps}, &container.HostConfig{ Binds: []string{bind}, PortBindings: pm}, &network.NetworkingConfig{}, "git-lfs-server") if err != nil || resp.ID == " " { return errors.Wrap(err, "Could not create git-lfs server container") } containerId := resp.ID err = c.ContainerStart(context.Background(), containerId, types.ContainerStartOptions{}) return err } // Get the head id of the repository found at hostpath func GetHead(hostpath string) (string, error) { repo, _, err := openRepository(hostpath) if err != nil { return "", errors.Wrap(err, "Could not open repository") } ref, err := repo.Head() if err != nil { return "", errors.Wrap(err, "Could not get head") } head := ref.Target() return head.String(), nil } func PrintDiff(path, id string) (string, error) { path, err := filepath.Abs(path) if err != nil { return "", errors.Wrap(err, "Could not get absolute path of output directory") } repo, _, err := openRepository(path) if err != nil { return "", errors.Wrap(err, "Could not open repository") } index, err := repo.Index() if err != nil { return "", errors.Wrap(err, "Could not get head") } oid, err := git.NewOid(id) if err != nil { return "", errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return "", errors.Wrap(err, "Could not lookup commit id "+id) } oldTree, err := commit.Tree() if err != nil { return "", errors.Wrap(err, "Could not lookup tree") } diff, err := repo.DiffTreeToIndex(oldTree, index, &git.DiffOptions{}) if err != nil { return "", errors.Wrap(err, "Could not diff tree to index") } stats, err := diff.Stats() if err != nil { return "", errors.Wrap(err, "Could not get diff stats") } return stats.String(git.DiffStatsFull, 80) } func Reset(path, id string) error {
} repo, _, err := openRepository(path) if err != nil { return errors.Wrap(err, "Could not open repository") } oid, err := git.NewOid(id) if err != nil { return errors.Wrap(err, "Could not create oid for id "+id) } commit, err := repo.LookupCommit(oid) if err != nil { return errors.Wrap(err, "Could not lookup commit id "+id) } err = repo.ResetToCommit(commit, git.ResetMixed, &git.CheckoutOpts{}) if err != nil { return errors.Wrap(err, "Could not reset to id "+id) } return nil }
path, err := filepath.Abs(path) if err != nil { return errors.Wrap(err, "Could not get absolute path of output directory")
random_line_split
pip_test.go
package main import ( "encoding/json" "fmt" "io" "os" "os/exec" "path/filepath" "strconv" "testing" gofrogcmd "github.com/jfrog/gofrog/io" "github.com/jfrog/jfrog-cli-go/artifactory/commands/pip" piputils "github.com/jfrog/jfrog-cli-go/artifactory/utils/pip" "github.com/jfrog/jfrog-cli-go/inttestutils" "github.com/jfrog/jfrog-cli-go/utils/cliutils" "github.com/jfrog/jfrog-cli-go/utils/tests" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" ) type PipCmd struct { Command string Options []string } func TestPipInstall(t *testing.T) { // Init pip. initPipTest(t) // Init CLI without credential flags. artifactoryCli = tests.NewJfrogCli(execMain, "jfrog rt", "") // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName, "--module=setuppy-with-module"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"pip-install", "-r", "requirements.txt", "--module=requirements-verbose-use-cache", "--build-name=" + tests.PipBuildName}, 5, true}, } // Run test cases. for buildNumber, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipCmd(t, test.name, createPipProject(t, test.outputFolder, test.project), strconv.Itoa(buildNumber), test.moduleId, test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup inttestutils.DeleteBuild(artifactoryDetails.Url, tests.PipBuildName, artHttpDetails) cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipCmd(t *testing.T, outputFolder, projectPath, buildNumber, module string, expectedDependencies int, args []string) { wd, err := os.Getwd() if err != nil { t.Error(err) } err = os.Chdir(projectPath) if err != nil { t.Error(err) } defer os.Chdir(wd) args = append(args, "--build-number="+buildNumber) err = artifactoryCli.Exec(args...) if err != nil { t.Errorf("Failed executing pip-install command: %s", err.Error()) cleanPipTest(t, outputFolder) return } artifactoryCli.Exec("bp", tests.PipBuildName, buildNumber) buildInfo := inttestutils.GetBuildInfo(artifactoryDetails.Url, tests.PipBuildName, buildNumber, t, artHttpDetails) if buildInfo.Modules == nil || len(buildInfo.Modules) == 0 { t.Error("Pip build info was not generated correctly, no modules were created.") } if expectedDependencies != len(buildInfo.Modules[0].Dependencies) { t.Error("Incorrect number of artifacts found in the build-info, expected:", expectedDependencies, " Found:", len(buildInfo.Modules[0].Dependencies)) } if module != buildInfo.Modules[0].Id { t.Error(fmt.Errorf("Expected module name %s, got %s", module, buildInfo.Modules[0].Id)) } } func cleanPipTest(t *testing.T, outFolder string) { // Clean pip environment from installed packages. pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } // If no packages to uninstall, return. if out == "" { return } // Save freeze output to file. freezeTarget, err := fileutils.CreateFilePath(tests.Temp, outFolder+"-freeze.txt") if err != nil { t.Error(err) } file, err := os.Create(freezeTarget) if err != nil { t.Error(err) } defer file.Close() _, err = file.Write([]byte(out)) if err != nil { t.Error(err) } // Delete freezed packages. pipUninstallCmd := &PipCmd{Command: "uninstall", Options: []string{"-y", "-r", freezeTarget}} err = gofrogcmd.RunCmd(pipUninstallCmd) if err != nil { t.Fatal(err) } } func createPipProject(t *testing.T, outFolder, projectName string) string { projectSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", projectName) projectTarget := filepath.Join(tests.Out, outFolder+"-"+projectName) err := fileutils.CreateDirIfNotExist(projectTarget) if err != nil { t.Error(err) } // Copy pip-installation file. err = fileutils.CopyDir(projectSrc, projectTarget, true) if err != nil { t.Error(err) } // Copy pip-config file. configSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", "pip.yaml") configTarget := filepath.Join(projectTarget, ".jfrog", "projects") tests.ReplaceTemplateVariables(configSrc, configTarget) return projectTarget } func initPipTest(t *testing.T) { if !*tests.TestPip { t.Skip("Skipping Pip test. To run Pip test add the '-test.pip=true' option.") } if !isRepoExist(tests.PypiRemoteRepo) { t.Error("Pypi test remote repository doesn't exist.") } if !isRepoExist(tests.PypiVirtualRepo) { t.Error("Pypi test virtual repository doesn't exist.") } if t.Failed() { t.FailNow() } } func setPathEnvForPipInstall(t *testing.T) string { // Keep original value of 'PATH'. pathValue, exists := os.LookupEnv("PATH") if !exists { t.Fatal("Couldn't find PATH variable, failing pip tests.") } // Append the path. virtualEnvPath := *tests.PipVirtualEnv if virtualEnvPath != "" { var newPathValue string if cliutils.IsWindows() { newPathValue = fmt.Sprintf("%s;%s", virtualEnvPath, pathValue) } else { newPathValue = fmt.Sprintf("%s:%s", virtualEnvPath, pathValue) } err := os.Setenv("PATH", newPathValue) if err != nil { t.Fatal(err) } } // Return original PATH value. return pathValue } // Ensure that the provided pip virtual-environment is empty from installed packages. func validateEmptyPipEnv(t *testing.T) { //pipFreezeCmd := &PipFreezeCmd{Executable: "pip", Command: "freeze"} pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } if out != "" { t.Fatalf("Provided pip virtual-environment contains installed packages: %s\n. Please provide a clean environment.", out) } } func (pfc *PipCmd) GetCmd() *exec.Cmd { var cmd []string cmd = append(cmd, "pip") cmd = append(cmd, pfc.Command) cmd = append(cmd, pfc.Options...) return exec.Command(cmd[0], cmd[1:]...) } func (pfc *PipCmd) GetEnv() map[string]string { return map[string]string{} } func (pfc *PipCmd) GetStdWriter() io.WriteCloser { return nil } func (pfc *PipCmd) GetErrWriter() io.WriteCloser { return nil } func TestPipDepsTree(t *testing.T) { initPipTest(t) // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string
{"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall", "-v"}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall"}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v"}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"-r", "requirements.txt"}, 5, true}, } // Run test cases. for _, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipDepsTreeCmd(t, createPipProject(t, test.outputFolder, test.project), test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipDepsTreeCmd(t *testing.T, projectPath string, expectedElements int, args []string) { wd, err := os.Getwd() if err != nil { t.Fatal(err) } err = os.Chdir(projectPath) if err != nil { t.Fatal(err) } defer os.Chdir(wd) // Get pip configuration. pipConfig, err := piputils.GetPipConfiguration() if err != nil { t.Fatalf("Error occurred while attempting to read pip-configuration file: %s\n"+ "Please run 'jfrog rt pip-config' command prior to running 'jfrog rt pip-deps-tree'.", err.Error()) } // Set arg values. rtDetails, err := pipConfig.RtDetails() if err != nil { t.Fatal(err) } // Create command. pipDepsTreeCmd := pip.NewPipDepTreeCommand() pipDepsTreeCmd.SetRtDetails(rtDetails).SetRepo(pipConfig.TargetRepo()).SetArgs(args) err = pipDepsTreeCmd.Run() if err != nil { t.Fatalf("Failed while executing pip-deps-tree command: %s", err) } // Check result elements. treeJsonData, err := pipDepsTreeCmd.DepsTreeRoot.MarshalJSON() if err != nil { t.Fatalf("Failed parsing tree json: %s", err) } // Count dependencies. var depsTreeTest []DependenciesTreeTest err = json.Unmarshal(treeJsonData, &depsTreeTest) if err != nil { t.Error(err) } depsCount := countDependencies(depsTreeTest) if expectedElements != depsCount { t.Errorf("Incorrect number of dependencies found, expected: %d, found: %d", expectedElements, depsCount) } } type DependenciesTreeTest struct { Id string `json:"id,omitempty"` DirectDependencies []DependenciesTreeTest `json:"dependencies,omitempty"` } func countDependencies(allDeps []DependenciesTreeTest) int { depsMap := make(map[string]int) // Iterate over dependencies, resolve and discover more dependencies. index := -1 var currentDep string for { index++ // Check if should stop. if len(allDeps) < index+1 { break } currentDep = allDeps[index].Id // Check if current dependency already resolved. if _, ok := depsMap[currentDep]; ok { // Already resolved. continue } // Add currentDep dependencies for cound. allDeps = append(allDeps, allDeps[index].DirectDependencies...) } return index }
moduleId string args []string expectedDependencies int cleanAfterExecution bool }{
random_line_split
pip_test.go
package main import ( "encoding/json" "fmt" "io" "os" "os/exec" "path/filepath" "strconv" "testing" gofrogcmd "github.com/jfrog/gofrog/io" "github.com/jfrog/jfrog-cli-go/artifactory/commands/pip" piputils "github.com/jfrog/jfrog-cli-go/artifactory/utils/pip" "github.com/jfrog/jfrog-cli-go/inttestutils" "github.com/jfrog/jfrog-cli-go/utils/cliutils" "github.com/jfrog/jfrog-cli-go/utils/tests" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" ) type PipCmd struct { Command string Options []string } func TestPipInstall(t *testing.T) { // Init pip. initPipTest(t) // Init CLI without credential flags. artifactoryCli = tests.NewJfrogCli(execMain, "jfrog rt", "") // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName, "--module=setuppy-with-module"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"pip-install", "-r", "requirements.txt", "--module=requirements-verbose-use-cache", "--build-name=" + tests.PipBuildName}, 5, true}, } // Run test cases. for buildNumber, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipCmd(t, test.name, createPipProject(t, test.outputFolder, test.project), strconv.Itoa(buildNumber), test.moduleId, test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup inttestutils.DeleteBuild(artifactoryDetails.Url, tests.PipBuildName, artHttpDetails) cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipCmd(t *testing.T, outputFolder, projectPath, buildNumber, module string, expectedDependencies int, args []string) { wd, err := os.Getwd() if err != nil { t.Error(err) } err = os.Chdir(projectPath) if err != nil { t.Error(err) } defer os.Chdir(wd) args = append(args, "--build-number="+buildNumber) err = artifactoryCli.Exec(args...) if err != nil { t.Errorf("Failed executing pip-install command: %s", err.Error()) cleanPipTest(t, outputFolder) return } artifactoryCli.Exec("bp", tests.PipBuildName, buildNumber) buildInfo := inttestutils.GetBuildInfo(artifactoryDetails.Url, tests.PipBuildName, buildNumber, t, artHttpDetails) if buildInfo.Modules == nil || len(buildInfo.Modules) == 0 { t.Error("Pip build info was not generated correctly, no modules were created.") } if expectedDependencies != len(buildInfo.Modules[0].Dependencies) { t.Error("Incorrect number of artifacts found in the build-info, expected:", expectedDependencies, " Found:", len(buildInfo.Modules[0].Dependencies)) } if module != buildInfo.Modules[0].Id { t.Error(fmt.Errorf("Expected module name %s, got %s", module, buildInfo.Modules[0].Id)) } } func cleanPipTest(t *testing.T, outFolder string) { // Clean pip environment from installed packages. pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } // If no packages to uninstall, return. if out == "" { return } // Save freeze output to file. freezeTarget, err := fileutils.CreateFilePath(tests.Temp, outFolder+"-freeze.txt") if err != nil { t.Error(err) } file, err := os.Create(freezeTarget) if err != nil { t.Error(err) } defer file.Close() _, err = file.Write([]byte(out)) if err != nil { t.Error(err) } // Delete freezed packages. pipUninstallCmd := &PipCmd{Command: "uninstall", Options: []string{"-y", "-r", freezeTarget}} err = gofrogcmd.RunCmd(pipUninstallCmd) if err != nil { t.Fatal(err) } } func createPipProject(t *testing.T, outFolder, projectName string) string { projectSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", projectName) projectTarget := filepath.Join(tests.Out, outFolder+"-"+projectName) err := fileutils.CreateDirIfNotExist(projectTarget) if err != nil { t.Error(err) } // Copy pip-installation file. err = fileutils.CopyDir(projectSrc, projectTarget, true) if err != nil { t.Error(err) } // Copy pip-config file. configSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", "pip.yaml") configTarget := filepath.Join(projectTarget, ".jfrog", "projects") tests.ReplaceTemplateVariables(configSrc, configTarget) return projectTarget } func initPipTest(t *testing.T) { if !*tests.TestPip { t.Skip("Skipping Pip test. To run Pip test add the '-test.pip=true' option.") } if !isRepoExist(tests.PypiRemoteRepo) { t.Error("Pypi test remote repository doesn't exist.") } if !isRepoExist(tests.PypiVirtualRepo) { t.Error("Pypi test virtual repository doesn't exist.") } if t.Failed() { t.FailNow() } } func setPathEnvForPipInstall(t *testing.T) string { // Keep original value of 'PATH'. pathValue, exists := os.LookupEnv("PATH") if !exists { t.Fatal("Couldn't find PATH variable, failing pip tests.") } // Append the path. virtualEnvPath := *tests.PipVirtualEnv if virtualEnvPath != "" { var newPathValue string if cliutils.IsWindows() { newPathValue = fmt.Sprintf("%s;%s", virtualEnvPath, pathValue) } else { newPathValue = fmt.Sprintf("%s:%s", virtualEnvPath, pathValue) } err := os.Setenv("PATH", newPathValue) if err != nil { t.Fatal(err) } } // Return original PATH value. return pathValue } // Ensure that the provided pip virtual-environment is empty from installed packages. func validateEmptyPipEnv(t *testing.T) { //pipFreezeCmd := &PipFreezeCmd{Executable: "pip", Command: "freeze"} pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } if out != "" { t.Fatalf("Provided pip virtual-environment contains installed packages: %s\n. Please provide a clean environment.", out) } } func (pfc *PipCmd) GetCmd() *exec.Cmd { var cmd []string cmd = append(cmd, "pip") cmd = append(cmd, pfc.Command) cmd = append(cmd, pfc.Options...) return exec.Command(cmd[0], cmd[1:]...) } func (pfc *PipCmd) GetEnv() map[string]string { return map[string]string{} } func (pfc *PipCmd) GetStdWriter() io.WriteCloser
func (pfc *PipCmd) GetErrWriter() io.WriteCloser { return nil } func TestPipDepsTree(t *testing.T) { initPipTest(t) // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall", "-v"}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall"}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v"}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"-r", "requirements.txt"}, 5, true}, } // Run test cases. for _, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipDepsTreeCmd(t, createPipProject(t, test.outputFolder, test.project), test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipDepsTreeCmd(t *testing.T, projectPath string, expectedElements int, args []string) { wd, err := os.Getwd() if err != nil { t.Fatal(err) } err = os.Chdir(projectPath) if err != nil { t.Fatal(err) } defer os.Chdir(wd) // Get pip configuration. pipConfig, err := piputils.GetPipConfiguration() if err != nil { t.Fatalf("Error occurred while attempting to read pip-configuration file: %s\n"+ "Please run 'jfrog rt pip-config' command prior to running 'jfrog rt pip-deps-tree'.", err.Error()) } // Set arg values. rtDetails, err := pipConfig.RtDetails() if err != nil { t.Fatal(err) } // Create command. pipDepsTreeCmd := pip.NewPipDepTreeCommand() pipDepsTreeCmd.SetRtDetails(rtDetails).SetRepo(pipConfig.TargetRepo()).SetArgs(args) err = pipDepsTreeCmd.Run() if err != nil { t.Fatalf("Failed while executing pip-deps-tree command: %s", err) } // Check result elements. treeJsonData, err := pipDepsTreeCmd.DepsTreeRoot.MarshalJSON() if err != nil { t.Fatalf("Failed parsing tree json: %s", err) } // Count dependencies. var depsTreeTest []DependenciesTreeTest err = json.Unmarshal(treeJsonData, &depsTreeTest) if err != nil { t.Error(err) } depsCount := countDependencies(depsTreeTest) if expectedElements != depsCount { t.Errorf("Incorrect number of dependencies found, expected: %d, found: %d", expectedElements, depsCount) } } type DependenciesTreeTest struct { Id string `json:"id,omitempty"` DirectDependencies []DependenciesTreeTest `json:"dependencies,omitempty"` } func countDependencies(allDeps []DependenciesTreeTest) int { depsMap := make(map[string]int) // Iterate over dependencies, resolve and discover more dependencies. index := -1 var currentDep string for { index++ // Check if should stop. if len(allDeps) < index+1 { break } currentDep = allDeps[index].Id // Check if current dependency already resolved. if _, ok := depsMap[currentDep]; ok { // Already resolved. continue } // Add currentDep dependencies for cound. allDeps = append(allDeps, allDeps[index].DirectDependencies...) } return index }
{ return nil }
identifier_body
pip_test.go
package main import ( "encoding/json" "fmt" "io" "os" "os/exec" "path/filepath" "strconv" "testing" gofrogcmd "github.com/jfrog/gofrog/io" "github.com/jfrog/jfrog-cli-go/artifactory/commands/pip" piputils "github.com/jfrog/jfrog-cli-go/artifactory/utils/pip" "github.com/jfrog/jfrog-cli-go/inttestutils" "github.com/jfrog/jfrog-cli-go/utils/cliutils" "github.com/jfrog/jfrog-cli-go/utils/tests" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" ) type PipCmd struct { Command string Options []string } func TestPipInstall(t *testing.T) { // Init pip. initPipTest(t) // Init CLI without credential flags. artifactoryCli = tests.NewJfrogCli(execMain, "jfrog rt", "") // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName, "--module=setuppy-with-module"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"pip-install", "-r", "requirements.txt", "--module=requirements-verbose-use-cache", "--build-name=" + tests.PipBuildName}, 5, true}, } // Run test cases. for buildNumber, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipCmd(t, test.name, createPipProject(t, test.outputFolder, test.project), strconv.Itoa(buildNumber), test.moduleId, test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup inttestutils.DeleteBuild(artifactoryDetails.Url, tests.PipBuildName, artHttpDetails) cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipCmd(t *testing.T, outputFolder, projectPath, buildNumber, module string, expectedDependencies int, args []string) { wd, err := os.Getwd() if err != nil { t.Error(err) } err = os.Chdir(projectPath) if err != nil
defer os.Chdir(wd) args = append(args, "--build-number="+buildNumber) err = artifactoryCli.Exec(args...) if err != nil { t.Errorf("Failed executing pip-install command: %s", err.Error()) cleanPipTest(t, outputFolder) return } artifactoryCli.Exec("bp", tests.PipBuildName, buildNumber) buildInfo := inttestutils.GetBuildInfo(artifactoryDetails.Url, tests.PipBuildName, buildNumber, t, artHttpDetails) if buildInfo.Modules == nil || len(buildInfo.Modules) == 0 { t.Error("Pip build info was not generated correctly, no modules were created.") } if expectedDependencies != len(buildInfo.Modules[0].Dependencies) { t.Error("Incorrect number of artifacts found in the build-info, expected:", expectedDependencies, " Found:", len(buildInfo.Modules[0].Dependencies)) } if module != buildInfo.Modules[0].Id { t.Error(fmt.Errorf("Expected module name %s, got %s", module, buildInfo.Modules[0].Id)) } } func cleanPipTest(t *testing.T, outFolder string) { // Clean pip environment from installed packages. pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } // If no packages to uninstall, return. if out == "" { return } // Save freeze output to file. freezeTarget, err := fileutils.CreateFilePath(tests.Temp, outFolder+"-freeze.txt") if err != nil { t.Error(err) } file, err := os.Create(freezeTarget) if err != nil { t.Error(err) } defer file.Close() _, err = file.Write([]byte(out)) if err != nil { t.Error(err) } // Delete freezed packages. pipUninstallCmd := &PipCmd{Command: "uninstall", Options: []string{"-y", "-r", freezeTarget}} err = gofrogcmd.RunCmd(pipUninstallCmd) if err != nil { t.Fatal(err) } } func createPipProject(t *testing.T, outFolder, projectName string) string { projectSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", projectName) projectTarget := filepath.Join(tests.Out, outFolder+"-"+projectName) err := fileutils.CreateDirIfNotExist(projectTarget) if err != nil { t.Error(err) } // Copy pip-installation file. err = fileutils.CopyDir(projectSrc, projectTarget, true) if err != nil { t.Error(err) } // Copy pip-config file. configSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", "pip.yaml") configTarget := filepath.Join(projectTarget, ".jfrog", "projects") tests.ReplaceTemplateVariables(configSrc, configTarget) return projectTarget } func initPipTest(t *testing.T) { if !*tests.TestPip { t.Skip("Skipping Pip test. To run Pip test add the '-test.pip=true' option.") } if !isRepoExist(tests.PypiRemoteRepo) { t.Error("Pypi test remote repository doesn't exist.") } if !isRepoExist(tests.PypiVirtualRepo) { t.Error("Pypi test virtual repository doesn't exist.") } if t.Failed() { t.FailNow() } } func setPathEnvForPipInstall(t *testing.T) string { // Keep original value of 'PATH'. pathValue, exists := os.LookupEnv("PATH") if !exists { t.Fatal("Couldn't find PATH variable, failing pip tests.") } // Append the path. virtualEnvPath := *tests.PipVirtualEnv if virtualEnvPath != "" { var newPathValue string if cliutils.IsWindows() { newPathValue = fmt.Sprintf("%s;%s", virtualEnvPath, pathValue) } else { newPathValue = fmt.Sprintf("%s:%s", virtualEnvPath, pathValue) } err := os.Setenv("PATH", newPathValue) if err != nil { t.Fatal(err) } } // Return original PATH value. return pathValue } // Ensure that the provided pip virtual-environment is empty from installed packages. func validateEmptyPipEnv(t *testing.T) { //pipFreezeCmd := &PipFreezeCmd{Executable: "pip", Command: "freeze"} pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } if out != "" { t.Fatalf("Provided pip virtual-environment contains installed packages: %s\n. Please provide a clean environment.", out) } } func (pfc *PipCmd) GetCmd() *exec.Cmd { var cmd []string cmd = append(cmd, "pip") cmd = append(cmd, pfc.Command) cmd = append(cmd, pfc.Options...) return exec.Command(cmd[0], cmd[1:]...) } func (pfc *PipCmd) GetEnv() map[string]string { return map[string]string{} } func (pfc *PipCmd) GetStdWriter() io.WriteCloser { return nil } func (pfc *PipCmd) GetErrWriter() io.WriteCloser { return nil } func TestPipDepsTree(t *testing.T) { initPipTest(t) // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall", "-v"}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall"}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v"}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"-r", "requirements.txt"}, 5, true}, } // Run test cases. for _, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipDepsTreeCmd(t, createPipProject(t, test.outputFolder, test.project), test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipDepsTreeCmd(t *testing.T, projectPath string, expectedElements int, args []string) { wd, err := os.Getwd() if err != nil { t.Fatal(err) } err = os.Chdir(projectPath) if err != nil { t.Fatal(err) } defer os.Chdir(wd) // Get pip configuration. pipConfig, err := piputils.GetPipConfiguration() if err != nil { t.Fatalf("Error occurred while attempting to read pip-configuration file: %s\n"+ "Please run 'jfrog rt pip-config' command prior to running 'jfrog rt pip-deps-tree'.", err.Error()) } // Set arg values. rtDetails, err := pipConfig.RtDetails() if err != nil { t.Fatal(err) } // Create command. pipDepsTreeCmd := pip.NewPipDepTreeCommand() pipDepsTreeCmd.SetRtDetails(rtDetails).SetRepo(pipConfig.TargetRepo()).SetArgs(args) err = pipDepsTreeCmd.Run() if err != nil { t.Fatalf("Failed while executing pip-deps-tree command: %s", err) } // Check result elements. treeJsonData, err := pipDepsTreeCmd.DepsTreeRoot.MarshalJSON() if err != nil { t.Fatalf("Failed parsing tree json: %s", err) } // Count dependencies. var depsTreeTest []DependenciesTreeTest err = json.Unmarshal(treeJsonData, &depsTreeTest) if err != nil { t.Error(err) } depsCount := countDependencies(depsTreeTest) if expectedElements != depsCount { t.Errorf("Incorrect number of dependencies found, expected: %d, found: %d", expectedElements, depsCount) } } type DependenciesTreeTest struct { Id string `json:"id,omitempty"` DirectDependencies []DependenciesTreeTest `json:"dependencies,omitempty"` } func countDependencies(allDeps []DependenciesTreeTest) int { depsMap := make(map[string]int) // Iterate over dependencies, resolve and discover more dependencies. index := -1 var currentDep string for { index++ // Check if should stop. if len(allDeps) < index+1 { break } currentDep = allDeps[index].Id // Check if current dependency already resolved. if _, ok := depsMap[currentDep]; ok { // Already resolved. continue } // Add currentDep dependencies for cound. allDeps = append(allDeps, allDeps[index].DirectDependencies...) } return index }
{ t.Error(err) }
conditional_block
pip_test.go
package main import ( "encoding/json" "fmt" "io" "os" "os/exec" "path/filepath" "strconv" "testing" gofrogcmd "github.com/jfrog/gofrog/io" "github.com/jfrog/jfrog-cli-go/artifactory/commands/pip" piputils "github.com/jfrog/jfrog-cli-go/artifactory/utils/pip" "github.com/jfrog/jfrog-cli-go/inttestutils" "github.com/jfrog/jfrog-cli-go/utils/cliutils" "github.com/jfrog/jfrog-cli-go/utils/tests" "github.com/jfrog/jfrog-client-go/utils/io/fileutils" ) type PipCmd struct { Command string Options []string } func TestPipInstall(t *testing.T) { // Init pip. initPipTest(t) // Init CLI without credential flags. artifactoryCli = tests.NewJfrogCli(execMain, "jfrog rt", "") // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{"pip-install", ".", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName, "--module=setuppy-with-module"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "--build-name=" + tests.PipBuildName}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"pip-install", "-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v", "--build-name=" + tests.PipBuildName}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"pip-install", "-r", "requirements.txt", "--module=requirements-verbose-use-cache", "--build-name=" + tests.PipBuildName}, 5, true}, } // Run test cases. for buildNumber, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipCmd(t, test.name, createPipProject(t, test.outputFolder, test.project), strconv.Itoa(buildNumber), test.moduleId, test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup inttestutils.DeleteBuild(artifactoryDetails.Url, tests.PipBuildName, artHttpDetails) cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipCmd(t *testing.T, outputFolder, projectPath, buildNumber, module string, expectedDependencies int, args []string) { wd, err := os.Getwd() if err != nil { t.Error(err) } err = os.Chdir(projectPath) if err != nil { t.Error(err) } defer os.Chdir(wd) args = append(args, "--build-number="+buildNumber) err = artifactoryCli.Exec(args...) if err != nil { t.Errorf("Failed executing pip-install command: %s", err.Error()) cleanPipTest(t, outputFolder) return } artifactoryCli.Exec("bp", tests.PipBuildName, buildNumber) buildInfo := inttestutils.GetBuildInfo(artifactoryDetails.Url, tests.PipBuildName, buildNumber, t, artHttpDetails) if buildInfo.Modules == nil || len(buildInfo.Modules) == 0 { t.Error("Pip build info was not generated correctly, no modules were created.") } if expectedDependencies != len(buildInfo.Modules[0].Dependencies) { t.Error("Incorrect number of artifacts found in the build-info, expected:", expectedDependencies, " Found:", len(buildInfo.Modules[0].Dependencies)) } if module != buildInfo.Modules[0].Id { t.Error(fmt.Errorf("Expected module name %s, got %s", module, buildInfo.Modules[0].Id)) } } func cleanPipTest(t *testing.T, outFolder string) { // Clean pip environment from installed packages. pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } // If no packages to uninstall, return. if out == "" { return } // Save freeze output to file. freezeTarget, err := fileutils.CreateFilePath(tests.Temp, outFolder+"-freeze.txt") if err != nil { t.Error(err) } file, err := os.Create(freezeTarget) if err != nil { t.Error(err) } defer file.Close() _, err = file.Write([]byte(out)) if err != nil { t.Error(err) } // Delete freezed packages. pipUninstallCmd := &PipCmd{Command: "uninstall", Options: []string{"-y", "-r", freezeTarget}} err = gofrogcmd.RunCmd(pipUninstallCmd) if err != nil { t.Fatal(err) } } func createPipProject(t *testing.T, outFolder, projectName string) string { projectSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", projectName) projectTarget := filepath.Join(tests.Out, outFolder+"-"+projectName) err := fileutils.CreateDirIfNotExist(projectTarget) if err != nil { t.Error(err) } // Copy pip-installation file. err = fileutils.CopyDir(projectSrc, projectTarget, true) if err != nil { t.Error(err) } // Copy pip-config file. configSrc := filepath.Join(filepath.FromSlash(tests.GetTestResourcesPath()), "pip", "pip.yaml") configTarget := filepath.Join(projectTarget, ".jfrog", "projects") tests.ReplaceTemplateVariables(configSrc, configTarget) return projectTarget } func initPipTest(t *testing.T) { if !*tests.TestPip { t.Skip("Skipping Pip test. To run Pip test add the '-test.pip=true' option.") } if !isRepoExist(tests.PypiRemoteRepo) { t.Error("Pypi test remote repository doesn't exist.") } if !isRepoExist(tests.PypiVirtualRepo) { t.Error("Pypi test virtual repository doesn't exist.") } if t.Failed() { t.FailNow() } } func setPathEnvForPipInstall(t *testing.T) string { // Keep original value of 'PATH'. pathValue, exists := os.LookupEnv("PATH") if !exists { t.Fatal("Couldn't find PATH variable, failing pip tests.") } // Append the path. virtualEnvPath := *tests.PipVirtualEnv if virtualEnvPath != "" { var newPathValue string if cliutils.IsWindows() { newPathValue = fmt.Sprintf("%s;%s", virtualEnvPath, pathValue) } else { newPathValue = fmt.Sprintf("%s:%s", virtualEnvPath, pathValue) } err := os.Setenv("PATH", newPathValue) if err != nil { t.Fatal(err) } } // Return original PATH value. return pathValue } // Ensure that the provided pip virtual-environment is empty from installed packages. func validateEmptyPipEnv(t *testing.T) { //pipFreezeCmd := &PipFreezeCmd{Executable: "pip", Command: "freeze"} pipFreezeCmd := &PipCmd{Command: "freeze", Options: []string{"--local"}} out, err := gofrogcmd.RunCmdOutput(pipFreezeCmd) if err != nil { t.Fatal(err) } if out != "" { t.Fatalf("Provided pip virtual-environment contains installed packages: %s\n. Please provide a clean environment.", out) } } func (pfc *PipCmd) GetCmd() *exec.Cmd { var cmd []string cmd = append(cmd, "pip") cmd = append(cmd, pfc.Command) cmd = append(cmd, pfc.Options...) return exec.Command(cmd[0], cmd[1:]...) } func (pfc *PipCmd) GetEnv() map[string]string { return map[string]string{} } func (pfc *PipCmd)
() io.WriteCloser { return nil } func (pfc *PipCmd) GetErrWriter() io.WriteCloser { return nil } func TestPipDepsTree(t *testing.T) { initPipTest(t) // Add virtual-environment path to 'PATH' for executing all pip and python commands inside the virtual-environment. pathValue := setPathEnvForPipInstall(t) if t.Failed() { t.FailNow() } defer os.Setenv("PATH", pathValue) // Check pip env is clean. validateEmptyPipEnv(t) // Populate cli config with 'default' server. oldHomeDir, newHomeDir := prepareHomeDir(t) defer os.Setenv(cliutils.HomeDir, oldHomeDir) defer os.RemoveAll(newHomeDir) // Create test cases. allTests := []struct { name string project string outputFolder string moduleId string args []string expectedDependencies int cleanAfterExecution bool }{ {"setuppy", "setuppyproject", "setuppy", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"setuppy-verbose", "setuppyproject", "setuppy-verbose", "jfrog-python-example", []string{".", "--no-cache-dir", "--force-reinstall", "-v"}, 3, true}, {"setuppy-with-module", "setuppyproject", "setuppy-with-module", "setuppy-with-module", []string{".", "--no-cache-dir", "--force-reinstall"}, 3, true}, {"requirements", "requirementsproject", "requirements", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall"}, 5, true}, {"requirements-verbose", "requirementsproject", "requirements-verbose", tests.PipBuildName, []string{"-r", "requirements.txt", "--no-cache-dir", "--force-reinstall", "-v"}, 5, false}, {"requirements-use-cache", "requirementsproject", "requirements-verbose", "requirements-verbose-use-cache", []string{"-r", "requirements.txt"}, 5, true}, } // Run test cases. for _, test := range allTests { t.Run(test.name, func(t *testing.T) { testPipDepsTreeCmd(t, createPipProject(t, test.outputFolder, test.project), test.expectedDependencies, test.args) if test.cleanAfterExecution { // cleanup cleanPipTest(t, test.name) } }) } cleanPipTest(t, "cleanup") tests.CleanFileSystem() } func testPipDepsTreeCmd(t *testing.T, projectPath string, expectedElements int, args []string) { wd, err := os.Getwd() if err != nil { t.Fatal(err) } err = os.Chdir(projectPath) if err != nil { t.Fatal(err) } defer os.Chdir(wd) // Get pip configuration. pipConfig, err := piputils.GetPipConfiguration() if err != nil { t.Fatalf("Error occurred while attempting to read pip-configuration file: %s\n"+ "Please run 'jfrog rt pip-config' command prior to running 'jfrog rt pip-deps-tree'.", err.Error()) } // Set arg values. rtDetails, err := pipConfig.RtDetails() if err != nil { t.Fatal(err) } // Create command. pipDepsTreeCmd := pip.NewPipDepTreeCommand() pipDepsTreeCmd.SetRtDetails(rtDetails).SetRepo(pipConfig.TargetRepo()).SetArgs(args) err = pipDepsTreeCmd.Run() if err != nil { t.Fatalf("Failed while executing pip-deps-tree command: %s", err) } // Check result elements. treeJsonData, err := pipDepsTreeCmd.DepsTreeRoot.MarshalJSON() if err != nil { t.Fatalf("Failed parsing tree json: %s", err) } // Count dependencies. var depsTreeTest []DependenciesTreeTest err = json.Unmarshal(treeJsonData, &depsTreeTest) if err != nil { t.Error(err) } depsCount := countDependencies(depsTreeTest) if expectedElements != depsCount { t.Errorf("Incorrect number of dependencies found, expected: %d, found: %d", expectedElements, depsCount) } } type DependenciesTreeTest struct { Id string `json:"id,omitempty"` DirectDependencies []DependenciesTreeTest `json:"dependencies,omitempty"` } func countDependencies(allDeps []DependenciesTreeTest) int { depsMap := make(map[string]int) // Iterate over dependencies, resolve and discover more dependencies. index := -1 var currentDep string for { index++ // Check if should stop. if len(allDeps) < index+1 { break } currentDep = allDeps[index].Id // Check if current dependency already resolved. if _, ok := depsMap[currentDep]; ok { // Already resolved. continue } // Add currentDep dependencies for cound. allDeps = append(allDeps, allDeps[index].DirectDependencies...) } return index }
GetStdWriter
identifier_name
porcelain.go
package lightning import ( "encoding/json" "fmt" "log" "time" "github.com/tidwall/gjson" ) var InvoiceListeningTimeout = time.Minute * 150 var WaitSendPayTimeout = time.Hour * 24 var WaitPaymentMaxAttempts = 60 type Client struct { Path string PaymentHandler func(gjson.Result) LastInvoiceIndex int } // ListenForInvoices starts a goroutine that will repeatedly call waitanyinvoice. // Each payment received will be fed into the client.PaymentHandler function. // You can change that function in the meantime. // Or you can set it to nil if you want to stop listening for invoices. func (ln *Client) ListenForInvoices() { go func() { for { if ln.PaymentHandler == nil { log.Print("won't listen for invoices: no PaymentHandler.") return } res, err := ln.CallWithCustomTimeout(InvoiceListeningTimeout, "waitanyinvoice", ln.LastInvoiceIndex) if err != nil { if _, ok := err.(ErrorTimeout); ok { time.Sleep(time.Minute) } else { log.Printf("error waiting for invoice %d: %s", ln.LastInvoiceIndex, err.Error()) time.Sleep(5 * time.Second) } continue } index := res.Get("pay_index").Int() ln.LastInvoiceIndex = int(index) ln.PaymentHandler(res) } }() } // PayAndWaitUntilResolution implements its 'pay' logic, querying and retrying routes. // It's like the default 'pay' plugin, but it blocks until a final success or failure is achieved. // After it returns you can be sure a failed payment will not succeed anymore. // Any value in params will be passed to 'getroute' or 'sendpay' or smart defaults will be used. // This includes values from the default 'pay' plugin. func (ln *Client) PayAndWaitUntilResolution( bolt11 string, params map[string]interface{}, ) (success bool, payment gjson.Result, tries []Try, err error) { decoded, err := ln.Call("decodepay", bolt11) if err != nil { return false, payment, tries, err } hash := decoded.Get("payment_hash").String() fakePayment := gjson.Parse(`{"payment_hash": "` + hash + `"}`) exclude := []string{} payee := decoded.Get("payee").String() delayFinalHop := decoded.Get("min_final_cltv_expiry").Int() var msatoshi float64 if imsatoshi, ok := params["msatoshi"]; ok { if converted, err := toFloat(imsatoshi); err == nil { msatoshi = converted } } else { msatoshi = decoded.Get("msatoshi").Float() } riskfactor, ok := params["riskfactor"] if !ok { riskfactor = 10 } label, ok := params["label"] if !ok { label = "" } maxfeepercent := 0.5 if imaxfeepercent, ok := params["maxfeepercent"]; ok { if converted, err := toFloat(imaxfeepercent); err == nil { maxfeepercent = converted } } exemptfee := 5000.0 if iexemptfee, ok := params["exemptfee"]; ok { if converted, err := toFloat(iexemptfee); err == nil { exemptfee = converted } } routehints := decoded.Get("routes").Array() if len(routehints) > 0 { for _, rh := range routehints { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, &rh) if done { return true, payment, tries, nil } } } else { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, nil) if done { return true, payment, tries, nil } } return false, fakePayment, tries, nil } func tryPayment( ln *Client, tries *[]Try, bolt11 string, payee string, msatoshi float64, hash string, label interface{}, exclude *[]string, delayFinalHop int64, riskfactor interface{}, maxfeepercent float64, exemptfee float64, hint *gjson.Result, ) (paid bool, payment gjson.Result) { for try := 0; try < 30; try++ { target := payee if hint != nil { target = hint.Get("0.pubkey").String() } res, err := ln.CallNamed("getroute", "id", target, "riskfactor", riskfactor, "cltv", delayFinalHop, "msatoshi", msatoshi, "fuzzpercent", 0, "exclude", *exclude, ) if err != nil { // no route or invalid parameters, call it a simple failure return } if !res.Get("route").Exists() { continue } route := res.Get("route") // if we're using a route hint, increment the queried route with the missing parts if hint != nil { route = addHintToRoute(route, *hint, payee, delayFinalHop) } // inspect route, it shouldn't be too expensive if route.Get("0.msatoshi").Float()/msatoshi > (1 + 1/maxfeepercent) { // too expensive, but we'll still accept it if the payment is small if msatoshi > exemptfee { // otherwise try the next route // we force that by excluding a channel *exclude = append(*exclude, getWorstChannel(route)) continue } } // ignore returned value here as we'll get it from waitsendpay below _, err = ln.CallNamed("sendpay", "route", route.Value(), "payment_hash", hash, "label", label, "bolt11", bolt11, ) if err != nil { // the command may return an error and we don't care if _, ok := err.(ErrorCommand); ok { // we don't care because we'll see this in the next call } else { // otherwise it's a different odd error, stop return } } // this should wait indefinitely, but 24h is enough payment, err = ln.CallWithCustomTimeout(WaitSendPayTimeout, "waitsendpay", hash) if err != nil { if cmderr, ok := err.(ErrorCommand); ok { *tries = append(*tries, Try{route.Value(), &cmderr, false}) switch cmderr.Code { case 200, 202: // try again continue case 204: // error in route, eliminate erring channel and try again data, ok0 := cmderr.Data.(map[string]interface{}) ichannel, ok1 := data["erring_channel"] channel, ok2 := ichannel.(string) if !ok0 || !ok1 || !ok2 { // should never happen return } // if erring channel is in the route hint just stop altogether if hint != nil { for _, hhop := range hint.Array() { if hhop.Get("short_channel_id").String() == channel { return } } } // get erring channel a direction by inspecting the route var direction int64 for _, hop := range route.Array() { if hop.Get("channel").String() == channel { direction = hop.Get("direction").Int() goto gotdirection } } // we should never get here return gotdirection: *exclude = append(*exclude, fmt.Sprintf("%s/%d", channel, direction)) continue } } // a different error, call it a complete failure return } // payment suceeded *tries = append(*tries, Try{route.Value(), nil, true}) return true, payment } // stop trying return } func getWorstChannel(route gjson.Result) (worstChannel string) { var worstFee int64 = 0 hops := route.Array() if len(hops) == 1 { return hops[0].Get("channel").String() + "/" + hops[0].Get("direction").String() } for i := 0; i+1 < len(hops); i++ { hop := hops[i] next := hops[i+1] fee := hop.Get("msatoshi").Int() - next.Get("msatoshi").Int() if fee > worstFee { worstFee = fee worstChannel = hop.Get("channel").String() + "/" + hop.Get("direction").String() } } return } func addHintToRoute( route gjson.Result, hint gjson.Result, finalPeer string, finalHopDelay int64, ) gjson.Result { var extrafees int64 = 0 // these extra fees will be added to the public part var extradelay int64 = 0 // this extra delay will be added to the public part // we know exactly the length of our new route npublichops := route.Get("#").Int() nhinthops := hint.Get("#").Int() newroute := make([]map[string]interface{}, npublichops+nhinthops) // so we can start adding the last hops (from the last and backwards) r := len(newroute) - 1 lastPublicHop := route.Array()[npublichops-1] hhops := hint.Array() for h := len(hhops) - 1; h >= 0; h-- { hhop := hhops[h] nextdelay, delaydelta, nextmsat, fees, nextpeer := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, h, ) // delay for this hop is anything in the next hop plus the delta delay := nextdelay + delaydelta // calculate this channel direction var direction int if hhop.Get("pubkey").String() < nextpeer { direction = 1 } else { direction = 0 } newroute[r] = map[string]interface{}{ "id": nextpeer, "channel": hhop.Get("short_channel_id").Value(), "direction": direction, "msatoshi": int64(nextmsat) + fees, "delay": delay, } // bump extra stuff for the public part extrafees += fees extradelay += delaydelta r-- } // since these parameters are always based on the 'next' part of the route, we need // to run a fake thing here with the hint channel at index -1 so we'll get the parameters // for actually index 0 -- this is not to add them to the actual route, but only to // grab the 'extra' fees/delay we need to apply to the public part of the route _, delaydelta, _, fees, _ := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, -1, ) extrafees += fees extradelay += delaydelta // ~ // now we start from the beggining with the public part of the route r = 0 route.ForEach(func(_, hop gjson.Result) bool { newroute[r] = map[string]interface{}{ "id": hop.Get("id").Value(), "channel": hop.Get("channel").Value(), "direction": hop.Get("direction").Value(), "delay": hop.Get("delay").Int() + extradelay, "msatoshi": hop.Get("msatoshi").Int() + extrafees, } r++ return true }) // turn it into a gjson.Result newroutejsonstr, _ := json.Marshal(newroute) newroutegjson := gjson.ParseBytes(newroutejsonstr) return newroutegjson } func grabParameters( fullHint gjson.Result, fullNewRoute []map[string]interface{}, lastPublicHop gjson.Result, finalPeer string, finalHopDelay int64, r int, // the full route hop index we're working on h int, // the hint part channel index we're working on ) ( nextdelay int64, // delay amount for the hop after this or the final node's cltv delaydelta int64, // delaydelta is given by the next hop hint or 0 nextmsat int64, // msatoshi amount for the hop after this (or the final amount) fees int64, // fees are zero in the last hop, or a crazy calculation otherwise nextpeer string, // next node id (or the final node) )
type Try struct { Route interface{} `json:"route"` Error *ErrorCommand `json:"error"` Success bool `json:"success"` }
{ if int64(h) == fullHint.Get("#").Int()-1 { // this is the first iteration, means it's the last hint channel/hop nextmsat = lastPublicHop.Get("msatoshi").Int() // this is the final amount, yes it is. nextdelay = finalHopDelay nextpeer = finalPeer delaydelta = 0 fees = 0 } else { // now we'll get the value of a hop we've just calculated/iterated over nextHintHop := fullNewRoute[r+1] nextmsat = nextHintHop["msatoshi"].(int64) nextdelay = nextHintHop["delay"].(int64) nextHintChannel := fullHint.Array()[h+1] nextpeer = nextHintChannel.Get("pubkey").String() delaydelta = nextHintChannel.Get("cltv_expiry_delta").Int() // fees for this hop are based on the next fees = nextHintChannel.Get("fee_base_msat").Int() + int64( (float64(nextmsat)/1000)*nextHintChannel.Get("fee_proportional_millionths").Float()/1000, ) } return }
identifier_body
porcelain.go
package lightning import ( "encoding/json" "fmt" "log" "time" "github.com/tidwall/gjson" ) var InvoiceListeningTimeout = time.Minute * 150 var WaitSendPayTimeout = time.Hour * 24 var WaitPaymentMaxAttempts = 60 type Client struct { Path string PaymentHandler func(gjson.Result) LastInvoiceIndex int } // ListenForInvoices starts a goroutine that will repeatedly call waitanyinvoice. // Each payment received will be fed into the client.PaymentHandler function. // You can change that function in the meantime. // Or you can set it to nil if you want to stop listening for invoices. func (ln *Client) ListenForInvoices() { go func() { for { if ln.PaymentHandler == nil { log.Print("won't listen for invoices: no PaymentHandler.") return } res, err := ln.CallWithCustomTimeout(InvoiceListeningTimeout, "waitanyinvoice", ln.LastInvoiceIndex) if err != nil { if _, ok := err.(ErrorTimeout); ok { time.Sleep(time.Minute) } else { log.Printf("error waiting for invoice %d: %s", ln.LastInvoiceIndex, err.Error()) time.Sleep(5 * time.Second) } continue } index := res.Get("pay_index").Int() ln.LastInvoiceIndex = int(index) ln.PaymentHandler(res) } }() } // PayAndWaitUntilResolution implements its 'pay' logic, querying and retrying routes. // It's like the default 'pay' plugin, but it blocks until a final success or failure is achieved. // After it returns you can be sure a failed payment will not succeed anymore. // Any value in params will be passed to 'getroute' or 'sendpay' or smart defaults will be used. // This includes values from the default 'pay' plugin. func (ln *Client) PayAndWaitUntilResolution( bolt11 string, params map[string]interface{}, ) (success bool, payment gjson.Result, tries []Try, err error) { decoded, err := ln.Call("decodepay", bolt11) if err != nil { return false, payment, tries, err } hash := decoded.Get("payment_hash").String() fakePayment := gjson.Parse(`{"payment_hash": "` + hash + `"}`) exclude := []string{} payee := decoded.Get("payee").String() delayFinalHop := decoded.Get("min_final_cltv_expiry").Int() var msatoshi float64 if imsatoshi, ok := params["msatoshi"]; ok { if converted, err := toFloat(imsatoshi); err == nil { msatoshi = converted } } else { msatoshi = decoded.Get("msatoshi").Float() } riskfactor, ok := params["riskfactor"] if !ok { riskfactor = 10 } label, ok := params["label"] if !ok { label = "" } maxfeepercent := 0.5 if imaxfeepercent, ok := params["maxfeepercent"]; ok { if converted, err := toFloat(imaxfeepercent); err == nil { maxfeepercent = converted } } exemptfee := 5000.0 if iexemptfee, ok := params["exemptfee"]; ok { if converted, err := toFloat(iexemptfee); err == nil { exemptfee = converted } } routehints := decoded.Get("routes").Array() if len(routehints) > 0 { for _, rh := range routehints { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, &rh) if done { return true, payment, tries, nil } } } else { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, nil) if done { return true, payment, tries, nil } } return false, fakePayment, tries, nil } func tryPayment( ln *Client, tries *[]Try, bolt11 string, payee string, msatoshi float64, hash string, label interface{}, exclude *[]string, delayFinalHop int64, riskfactor interface{}, maxfeepercent float64, exemptfee float64, hint *gjson.Result, ) (paid bool, payment gjson.Result) { for try := 0; try < 30; try++ { target := payee if hint != nil { target = hint.Get("0.pubkey").String() } res, err := ln.CallNamed("getroute", "id", target, "riskfactor", riskfactor, "cltv", delayFinalHop, "msatoshi", msatoshi, "fuzzpercent", 0, "exclude", *exclude, ) if err != nil { // no route or invalid parameters, call it a simple failure return } if !res.Get("route").Exists() { continue } route := res.Get("route") // if we're using a route hint, increment the queried route with the missing parts if hint != nil { route = addHintToRoute(route, *hint, payee, delayFinalHop) } // inspect route, it shouldn't be too expensive if route.Get("0.msatoshi").Float()/msatoshi > (1 + 1/maxfeepercent) { // too expensive, but we'll still accept it if the payment is small if msatoshi > exemptfee { // otherwise try the next route // we force that by excluding a channel *exclude = append(*exclude, getWorstChannel(route)) continue } } // ignore returned value here as we'll get it from waitsendpay below _, err = ln.CallNamed("sendpay", "route", route.Value(), "payment_hash", hash, "label", label, "bolt11", bolt11, ) if err != nil { // the command may return an error and we don't care if _, ok := err.(ErrorCommand); ok { // we don't care because we'll see this in the next call } else { // otherwise it's a different odd error, stop return } } // this should wait indefinitely, but 24h is enough payment, err = ln.CallWithCustomTimeout(WaitSendPayTimeout, "waitsendpay", hash) if err != nil { if cmderr, ok := err.(ErrorCommand); ok { *tries = append(*tries, Try{route.Value(), &cmderr, false}) switch cmderr.Code { case 200, 202: // try again continue case 204: // error in route, eliminate erring channel and try again data, ok0 := cmderr.Data.(map[string]interface{}) ichannel, ok1 := data["erring_channel"] channel, ok2 := ichannel.(string) if !ok0 || !ok1 || !ok2 { // should never happen return } // if erring channel is in the route hint just stop altogether if hint != nil { for _, hhop := range hint.Array() { if hhop.Get("short_channel_id").String() == channel { return } } } // get erring channel a direction by inspecting the route var direction int64 for _, hop := range route.Array() { if hop.Get("channel").String() == channel { direction = hop.Get("direction").Int() goto gotdirection } } // we should never get here return gotdirection: *exclude = append(*exclude, fmt.Sprintf("%s/%d", channel, direction)) continue } } // a different error, call it a complete failure return } // payment suceeded *tries = append(*tries, Try{route.Value(), nil, true}) return true, payment } // stop trying return } func getWorstChannel(route gjson.Result) (worstChannel string) { var worstFee int64 = 0 hops := route.Array() if len(hops) == 1 { return hops[0].Get("channel").String() + "/" + hops[0].Get("direction").String() } for i := 0; i+1 < len(hops); i++ { hop := hops[i] next := hops[i+1] fee := hop.Get("msatoshi").Int() - next.Get("msatoshi").Int() if fee > worstFee { worstFee = fee worstChannel = hop.Get("channel").String() + "/" + hop.Get("direction").String() } } return } func addHintToRoute( route gjson.Result, hint gjson.Result, finalPeer string, finalHopDelay int64, ) gjson.Result { var extrafees int64 = 0 // these extra fees will be added to the public part var extradelay int64 = 0 // this extra delay will be added to the public part // we know exactly the length of our new route npublichops := route.Get("#").Int() nhinthops := hint.Get("#").Int() newroute := make([]map[string]interface{}, npublichops+nhinthops) // so we can start adding the last hops (from the last and backwards) r := len(newroute) - 1 lastPublicHop := route.Array()[npublichops-1] hhops := hint.Array() for h := len(hhops) - 1; h >= 0; h-- { hhop := hhops[h] nextdelay, delaydelta, nextmsat, fees, nextpeer := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, h, ) // delay for this hop is anything in the next hop plus the delta delay := nextdelay + delaydelta // calculate this channel direction var direction int if hhop.Get("pubkey").String() < nextpeer { direction = 1 } else { direction = 0 } newroute[r] = map[string]interface{}{ "id": nextpeer, "channel": hhop.Get("short_channel_id").Value(), "direction": direction, "msatoshi": int64(nextmsat) + fees, "delay": delay, } // bump extra stuff for the public part extrafees += fees extradelay += delaydelta r-- } // since these parameters are always based on the 'next' part of the route, we need // to run a fake thing here with the hint channel at index -1 so we'll get the parameters // for actually index 0 -- this is not to add them to the actual route, but only to // grab the 'extra' fees/delay we need to apply to the public part of the route _, delaydelta, _, fees, _ := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, -1, ) extrafees += fees extradelay += delaydelta // ~ // now we start from the beggining with the public part of the route r = 0 route.ForEach(func(_, hop gjson.Result) bool { newroute[r] = map[string]interface{}{ "id": hop.Get("id").Value(), "channel": hop.Get("channel").Value(), "direction": hop.Get("direction").Value(), "delay": hop.Get("delay").Int() + extradelay, "msatoshi": hop.Get("msatoshi").Int() + extrafees, } r++ return true }) // turn it into a gjson.Result newroutejsonstr, _ := json.Marshal(newroute) newroutegjson := gjson.ParseBytes(newroutejsonstr) return newroutegjson } func grabParameters( fullHint gjson.Result, fullNewRoute []map[string]interface{}, lastPublicHop gjson.Result, finalPeer string, finalHopDelay int64, r int, // the full route hop index we're working on h int, // the hint part channel index we're working on ) ( nextdelay int64, // delay amount for the hop after this or the final node's cltv delaydelta int64, // delaydelta is given by the next hop hint or 0 nextmsat int64, // msatoshi amount for the hop after this (or the final amount) fees int64, // fees are zero in the last hop, or a crazy calculation otherwise nextpeer string, // next node id (or the final node) ) { if int64(h) == fullHint.Get("#").Int()-1
else { // now we'll get the value of a hop we've just calculated/iterated over nextHintHop := fullNewRoute[r+1] nextmsat = nextHintHop["msatoshi"].(int64) nextdelay = nextHintHop["delay"].(int64) nextHintChannel := fullHint.Array()[h+1] nextpeer = nextHintChannel.Get("pubkey").String() delaydelta = nextHintChannel.Get("cltv_expiry_delta").Int() // fees for this hop are based on the next fees = nextHintChannel.Get("fee_base_msat").Int() + int64( (float64(nextmsat)/1000)*nextHintChannel.Get("fee_proportional_millionths").Float()/1000, ) } return } type Try struct { Route interface{} `json:"route"` Error *ErrorCommand `json:"error"` Success bool `json:"success"` }
{ // this is the first iteration, means it's the last hint channel/hop nextmsat = lastPublicHop.Get("msatoshi").Int() // this is the final amount, yes it is. nextdelay = finalHopDelay nextpeer = finalPeer delaydelta = 0 fees = 0 }
conditional_block
porcelain.go
package lightning import ( "encoding/json" "fmt" "log" "time" "github.com/tidwall/gjson" ) var InvoiceListeningTimeout = time.Minute * 150 var WaitSendPayTimeout = time.Hour * 24 var WaitPaymentMaxAttempts = 60 type Client struct { Path string PaymentHandler func(gjson.Result) LastInvoiceIndex int } // ListenForInvoices starts a goroutine that will repeatedly call waitanyinvoice. // Each payment received will be fed into the client.PaymentHandler function. // You can change that function in the meantime. // Or you can set it to nil if you want to stop listening for invoices. func (ln *Client) ListenForInvoices() { go func() { for { if ln.PaymentHandler == nil { log.Print("won't listen for invoices: no PaymentHandler.") return } res, err := ln.CallWithCustomTimeout(InvoiceListeningTimeout, "waitanyinvoice", ln.LastInvoiceIndex) if err != nil { if _, ok := err.(ErrorTimeout); ok { time.Sleep(time.Minute) } else { log.Printf("error waiting for invoice %d: %s", ln.LastInvoiceIndex, err.Error()) time.Sleep(5 * time.Second) } continue } index := res.Get("pay_index").Int() ln.LastInvoiceIndex = int(index) ln.PaymentHandler(res) } }() }
// It's like the default 'pay' plugin, but it blocks until a final success or failure is achieved. // After it returns you can be sure a failed payment will not succeed anymore. // Any value in params will be passed to 'getroute' or 'sendpay' or smart defaults will be used. // This includes values from the default 'pay' plugin. func (ln *Client) PayAndWaitUntilResolution( bolt11 string, params map[string]interface{}, ) (success bool, payment gjson.Result, tries []Try, err error) { decoded, err := ln.Call("decodepay", bolt11) if err != nil { return false, payment, tries, err } hash := decoded.Get("payment_hash").String() fakePayment := gjson.Parse(`{"payment_hash": "` + hash + `"}`) exclude := []string{} payee := decoded.Get("payee").String() delayFinalHop := decoded.Get("min_final_cltv_expiry").Int() var msatoshi float64 if imsatoshi, ok := params["msatoshi"]; ok { if converted, err := toFloat(imsatoshi); err == nil { msatoshi = converted } } else { msatoshi = decoded.Get("msatoshi").Float() } riskfactor, ok := params["riskfactor"] if !ok { riskfactor = 10 } label, ok := params["label"] if !ok { label = "" } maxfeepercent := 0.5 if imaxfeepercent, ok := params["maxfeepercent"]; ok { if converted, err := toFloat(imaxfeepercent); err == nil { maxfeepercent = converted } } exemptfee := 5000.0 if iexemptfee, ok := params["exemptfee"]; ok { if converted, err := toFloat(iexemptfee); err == nil { exemptfee = converted } } routehints := decoded.Get("routes").Array() if len(routehints) > 0 { for _, rh := range routehints { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, &rh) if done { return true, payment, tries, nil } } } else { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, nil) if done { return true, payment, tries, nil } } return false, fakePayment, tries, nil } func tryPayment( ln *Client, tries *[]Try, bolt11 string, payee string, msatoshi float64, hash string, label interface{}, exclude *[]string, delayFinalHop int64, riskfactor interface{}, maxfeepercent float64, exemptfee float64, hint *gjson.Result, ) (paid bool, payment gjson.Result) { for try := 0; try < 30; try++ { target := payee if hint != nil { target = hint.Get("0.pubkey").String() } res, err := ln.CallNamed("getroute", "id", target, "riskfactor", riskfactor, "cltv", delayFinalHop, "msatoshi", msatoshi, "fuzzpercent", 0, "exclude", *exclude, ) if err != nil { // no route or invalid parameters, call it a simple failure return } if !res.Get("route").Exists() { continue } route := res.Get("route") // if we're using a route hint, increment the queried route with the missing parts if hint != nil { route = addHintToRoute(route, *hint, payee, delayFinalHop) } // inspect route, it shouldn't be too expensive if route.Get("0.msatoshi").Float()/msatoshi > (1 + 1/maxfeepercent) { // too expensive, but we'll still accept it if the payment is small if msatoshi > exemptfee { // otherwise try the next route // we force that by excluding a channel *exclude = append(*exclude, getWorstChannel(route)) continue } } // ignore returned value here as we'll get it from waitsendpay below _, err = ln.CallNamed("sendpay", "route", route.Value(), "payment_hash", hash, "label", label, "bolt11", bolt11, ) if err != nil { // the command may return an error and we don't care if _, ok := err.(ErrorCommand); ok { // we don't care because we'll see this in the next call } else { // otherwise it's a different odd error, stop return } } // this should wait indefinitely, but 24h is enough payment, err = ln.CallWithCustomTimeout(WaitSendPayTimeout, "waitsendpay", hash) if err != nil { if cmderr, ok := err.(ErrorCommand); ok { *tries = append(*tries, Try{route.Value(), &cmderr, false}) switch cmderr.Code { case 200, 202: // try again continue case 204: // error in route, eliminate erring channel and try again data, ok0 := cmderr.Data.(map[string]interface{}) ichannel, ok1 := data["erring_channel"] channel, ok2 := ichannel.(string) if !ok0 || !ok1 || !ok2 { // should never happen return } // if erring channel is in the route hint just stop altogether if hint != nil { for _, hhop := range hint.Array() { if hhop.Get("short_channel_id").String() == channel { return } } } // get erring channel a direction by inspecting the route var direction int64 for _, hop := range route.Array() { if hop.Get("channel").String() == channel { direction = hop.Get("direction").Int() goto gotdirection } } // we should never get here return gotdirection: *exclude = append(*exclude, fmt.Sprintf("%s/%d", channel, direction)) continue } } // a different error, call it a complete failure return } // payment suceeded *tries = append(*tries, Try{route.Value(), nil, true}) return true, payment } // stop trying return } func getWorstChannel(route gjson.Result) (worstChannel string) { var worstFee int64 = 0 hops := route.Array() if len(hops) == 1 { return hops[0].Get("channel").String() + "/" + hops[0].Get("direction").String() } for i := 0; i+1 < len(hops); i++ { hop := hops[i] next := hops[i+1] fee := hop.Get("msatoshi").Int() - next.Get("msatoshi").Int() if fee > worstFee { worstFee = fee worstChannel = hop.Get("channel").String() + "/" + hop.Get("direction").String() } } return } func addHintToRoute( route gjson.Result, hint gjson.Result, finalPeer string, finalHopDelay int64, ) gjson.Result { var extrafees int64 = 0 // these extra fees will be added to the public part var extradelay int64 = 0 // this extra delay will be added to the public part // we know exactly the length of our new route npublichops := route.Get("#").Int() nhinthops := hint.Get("#").Int() newroute := make([]map[string]interface{}, npublichops+nhinthops) // so we can start adding the last hops (from the last and backwards) r := len(newroute) - 1 lastPublicHop := route.Array()[npublichops-1] hhops := hint.Array() for h := len(hhops) - 1; h >= 0; h-- { hhop := hhops[h] nextdelay, delaydelta, nextmsat, fees, nextpeer := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, h, ) // delay for this hop is anything in the next hop plus the delta delay := nextdelay + delaydelta // calculate this channel direction var direction int if hhop.Get("pubkey").String() < nextpeer { direction = 1 } else { direction = 0 } newroute[r] = map[string]interface{}{ "id": nextpeer, "channel": hhop.Get("short_channel_id").Value(), "direction": direction, "msatoshi": int64(nextmsat) + fees, "delay": delay, } // bump extra stuff for the public part extrafees += fees extradelay += delaydelta r-- } // since these parameters are always based on the 'next' part of the route, we need // to run a fake thing here with the hint channel at index -1 so we'll get the parameters // for actually index 0 -- this is not to add them to the actual route, but only to // grab the 'extra' fees/delay we need to apply to the public part of the route _, delaydelta, _, fees, _ := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, -1, ) extrafees += fees extradelay += delaydelta // ~ // now we start from the beggining with the public part of the route r = 0 route.ForEach(func(_, hop gjson.Result) bool { newroute[r] = map[string]interface{}{ "id": hop.Get("id").Value(), "channel": hop.Get("channel").Value(), "direction": hop.Get("direction").Value(), "delay": hop.Get("delay").Int() + extradelay, "msatoshi": hop.Get("msatoshi").Int() + extrafees, } r++ return true }) // turn it into a gjson.Result newroutejsonstr, _ := json.Marshal(newroute) newroutegjson := gjson.ParseBytes(newroutejsonstr) return newroutegjson } func grabParameters( fullHint gjson.Result, fullNewRoute []map[string]interface{}, lastPublicHop gjson.Result, finalPeer string, finalHopDelay int64, r int, // the full route hop index we're working on h int, // the hint part channel index we're working on ) ( nextdelay int64, // delay amount for the hop after this or the final node's cltv delaydelta int64, // delaydelta is given by the next hop hint or 0 nextmsat int64, // msatoshi amount for the hop after this (or the final amount) fees int64, // fees are zero in the last hop, or a crazy calculation otherwise nextpeer string, // next node id (or the final node) ) { if int64(h) == fullHint.Get("#").Int()-1 { // this is the first iteration, means it's the last hint channel/hop nextmsat = lastPublicHop.Get("msatoshi").Int() // this is the final amount, yes it is. nextdelay = finalHopDelay nextpeer = finalPeer delaydelta = 0 fees = 0 } else { // now we'll get the value of a hop we've just calculated/iterated over nextHintHop := fullNewRoute[r+1] nextmsat = nextHintHop["msatoshi"].(int64) nextdelay = nextHintHop["delay"].(int64) nextHintChannel := fullHint.Array()[h+1] nextpeer = nextHintChannel.Get("pubkey").String() delaydelta = nextHintChannel.Get("cltv_expiry_delta").Int() // fees for this hop are based on the next fees = nextHintChannel.Get("fee_base_msat").Int() + int64( (float64(nextmsat)/1000)*nextHintChannel.Get("fee_proportional_millionths").Float()/1000, ) } return } type Try struct { Route interface{} `json:"route"` Error *ErrorCommand `json:"error"` Success bool `json:"success"` }
// PayAndWaitUntilResolution implements its 'pay' logic, querying and retrying routes.
random_line_split
porcelain.go
package lightning import ( "encoding/json" "fmt" "log" "time" "github.com/tidwall/gjson" ) var InvoiceListeningTimeout = time.Minute * 150 var WaitSendPayTimeout = time.Hour * 24 var WaitPaymentMaxAttempts = 60 type Client struct { Path string PaymentHandler func(gjson.Result) LastInvoiceIndex int } // ListenForInvoices starts a goroutine that will repeatedly call waitanyinvoice. // Each payment received will be fed into the client.PaymentHandler function. // You can change that function in the meantime. // Or you can set it to nil if you want to stop listening for invoices. func (ln *Client) ListenForInvoices() { go func() { for { if ln.PaymentHandler == nil { log.Print("won't listen for invoices: no PaymentHandler.") return } res, err := ln.CallWithCustomTimeout(InvoiceListeningTimeout, "waitanyinvoice", ln.LastInvoiceIndex) if err != nil { if _, ok := err.(ErrorTimeout); ok { time.Sleep(time.Minute) } else { log.Printf("error waiting for invoice %d: %s", ln.LastInvoiceIndex, err.Error()) time.Sleep(5 * time.Second) } continue } index := res.Get("pay_index").Int() ln.LastInvoiceIndex = int(index) ln.PaymentHandler(res) } }() } // PayAndWaitUntilResolution implements its 'pay' logic, querying and retrying routes. // It's like the default 'pay' plugin, but it blocks until a final success or failure is achieved. // After it returns you can be sure a failed payment will not succeed anymore. // Any value in params will be passed to 'getroute' or 'sendpay' or smart defaults will be used. // This includes values from the default 'pay' plugin. func (ln *Client) PayAndWaitUntilResolution( bolt11 string, params map[string]interface{}, ) (success bool, payment gjson.Result, tries []Try, err error) { decoded, err := ln.Call("decodepay", bolt11) if err != nil { return false, payment, tries, err } hash := decoded.Get("payment_hash").String() fakePayment := gjson.Parse(`{"payment_hash": "` + hash + `"}`) exclude := []string{} payee := decoded.Get("payee").String() delayFinalHop := decoded.Get("min_final_cltv_expiry").Int() var msatoshi float64 if imsatoshi, ok := params["msatoshi"]; ok { if converted, err := toFloat(imsatoshi); err == nil { msatoshi = converted } } else { msatoshi = decoded.Get("msatoshi").Float() } riskfactor, ok := params["riskfactor"] if !ok { riskfactor = 10 } label, ok := params["label"] if !ok { label = "" } maxfeepercent := 0.5 if imaxfeepercent, ok := params["maxfeepercent"]; ok { if converted, err := toFloat(imaxfeepercent); err == nil { maxfeepercent = converted } } exemptfee := 5000.0 if iexemptfee, ok := params["exemptfee"]; ok { if converted, err := toFloat(iexemptfee); err == nil { exemptfee = converted } } routehints := decoded.Get("routes").Array() if len(routehints) > 0 { for _, rh := range routehints { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, &rh) if done { return true, payment, tries, nil } } } else { done, payment := tryPayment(ln, &tries, bolt11, payee, msatoshi, hash, label, &exclude, delayFinalHop, riskfactor, maxfeepercent, exemptfee, nil) if done { return true, payment, tries, nil } } return false, fakePayment, tries, nil } func tryPayment( ln *Client, tries *[]Try, bolt11 string, payee string, msatoshi float64, hash string, label interface{}, exclude *[]string, delayFinalHop int64, riskfactor interface{}, maxfeepercent float64, exemptfee float64, hint *gjson.Result, ) (paid bool, payment gjson.Result) { for try := 0; try < 30; try++ { target := payee if hint != nil { target = hint.Get("0.pubkey").String() } res, err := ln.CallNamed("getroute", "id", target, "riskfactor", riskfactor, "cltv", delayFinalHop, "msatoshi", msatoshi, "fuzzpercent", 0, "exclude", *exclude, ) if err != nil { // no route or invalid parameters, call it a simple failure return } if !res.Get("route").Exists() { continue } route := res.Get("route") // if we're using a route hint, increment the queried route with the missing parts if hint != nil { route = addHintToRoute(route, *hint, payee, delayFinalHop) } // inspect route, it shouldn't be too expensive if route.Get("0.msatoshi").Float()/msatoshi > (1 + 1/maxfeepercent) { // too expensive, but we'll still accept it if the payment is small if msatoshi > exemptfee { // otherwise try the next route // we force that by excluding a channel *exclude = append(*exclude, getWorstChannel(route)) continue } } // ignore returned value here as we'll get it from waitsendpay below _, err = ln.CallNamed("sendpay", "route", route.Value(), "payment_hash", hash, "label", label, "bolt11", bolt11, ) if err != nil { // the command may return an error and we don't care if _, ok := err.(ErrorCommand); ok { // we don't care because we'll see this in the next call } else { // otherwise it's a different odd error, stop return } } // this should wait indefinitely, but 24h is enough payment, err = ln.CallWithCustomTimeout(WaitSendPayTimeout, "waitsendpay", hash) if err != nil { if cmderr, ok := err.(ErrorCommand); ok { *tries = append(*tries, Try{route.Value(), &cmderr, false}) switch cmderr.Code { case 200, 202: // try again continue case 204: // error in route, eliminate erring channel and try again data, ok0 := cmderr.Data.(map[string]interface{}) ichannel, ok1 := data["erring_channel"] channel, ok2 := ichannel.(string) if !ok0 || !ok1 || !ok2 { // should never happen return } // if erring channel is in the route hint just stop altogether if hint != nil { for _, hhop := range hint.Array() { if hhop.Get("short_channel_id").String() == channel { return } } } // get erring channel a direction by inspecting the route var direction int64 for _, hop := range route.Array() { if hop.Get("channel").String() == channel { direction = hop.Get("direction").Int() goto gotdirection } } // we should never get here return gotdirection: *exclude = append(*exclude, fmt.Sprintf("%s/%d", channel, direction)) continue } } // a different error, call it a complete failure return } // payment suceeded *tries = append(*tries, Try{route.Value(), nil, true}) return true, payment } // stop trying return } func getWorstChannel(route gjson.Result) (worstChannel string) { var worstFee int64 = 0 hops := route.Array() if len(hops) == 1 { return hops[0].Get("channel").String() + "/" + hops[0].Get("direction").String() } for i := 0; i+1 < len(hops); i++ { hop := hops[i] next := hops[i+1] fee := hop.Get("msatoshi").Int() - next.Get("msatoshi").Int() if fee > worstFee { worstFee = fee worstChannel = hop.Get("channel").String() + "/" + hop.Get("direction").String() } } return } func
( route gjson.Result, hint gjson.Result, finalPeer string, finalHopDelay int64, ) gjson.Result { var extrafees int64 = 0 // these extra fees will be added to the public part var extradelay int64 = 0 // this extra delay will be added to the public part // we know exactly the length of our new route npublichops := route.Get("#").Int() nhinthops := hint.Get("#").Int() newroute := make([]map[string]interface{}, npublichops+nhinthops) // so we can start adding the last hops (from the last and backwards) r := len(newroute) - 1 lastPublicHop := route.Array()[npublichops-1] hhops := hint.Array() for h := len(hhops) - 1; h >= 0; h-- { hhop := hhops[h] nextdelay, delaydelta, nextmsat, fees, nextpeer := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, h, ) // delay for this hop is anything in the next hop plus the delta delay := nextdelay + delaydelta // calculate this channel direction var direction int if hhop.Get("pubkey").String() < nextpeer { direction = 1 } else { direction = 0 } newroute[r] = map[string]interface{}{ "id": nextpeer, "channel": hhop.Get("short_channel_id").Value(), "direction": direction, "msatoshi": int64(nextmsat) + fees, "delay": delay, } // bump extra stuff for the public part extrafees += fees extradelay += delaydelta r-- } // since these parameters are always based on the 'next' part of the route, we need // to run a fake thing here with the hint channel at index -1 so we'll get the parameters // for actually index 0 -- this is not to add them to the actual route, but only to // grab the 'extra' fees/delay we need to apply to the public part of the route _, delaydelta, _, fees, _ := grabParameters( hint, newroute, lastPublicHop, finalPeer, finalHopDelay, r, -1, ) extrafees += fees extradelay += delaydelta // ~ // now we start from the beggining with the public part of the route r = 0 route.ForEach(func(_, hop gjson.Result) bool { newroute[r] = map[string]interface{}{ "id": hop.Get("id").Value(), "channel": hop.Get("channel").Value(), "direction": hop.Get("direction").Value(), "delay": hop.Get("delay").Int() + extradelay, "msatoshi": hop.Get("msatoshi").Int() + extrafees, } r++ return true }) // turn it into a gjson.Result newroutejsonstr, _ := json.Marshal(newroute) newroutegjson := gjson.ParseBytes(newroutejsonstr) return newroutegjson } func grabParameters( fullHint gjson.Result, fullNewRoute []map[string]interface{}, lastPublicHop gjson.Result, finalPeer string, finalHopDelay int64, r int, // the full route hop index we're working on h int, // the hint part channel index we're working on ) ( nextdelay int64, // delay amount for the hop after this or the final node's cltv delaydelta int64, // delaydelta is given by the next hop hint or 0 nextmsat int64, // msatoshi amount for the hop after this (or the final amount) fees int64, // fees are zero in the last hop, or a crazy calculation otherwise nextpeer string, // next node id (or the final node) ) { if int64(h) == fullHint.Get("#").Int()-1 { // this is the first iteration, means it's the last hint channel/hop nextmsat = lastPublicHop.Get("msatoshi").Int() // this is the final amount, yes it is. nextdelay = finalHopDelay nextpeer = finalPeer delaydelta = 0 fees = 0 } else { // now we'll get the value of a hop we've just calculated/iterated over nextHintHop := fullNewRoute[r+1] nextmsat = nextHintHop["msatoshi"].(int64) nextdelay = nextHintHop["delay"].(int64) nextHintChannel := fullHint.Array()[h+1] nextpeer = nextHintChannel.Get("pubkey").String() delaydelta = nextHintChannel.Get("cltv_expiry_delta").Int() // fees for this hop are based on the next fees = nextHintChannel.Get("fee_base_msat").Int() + int64( (float64(nextmsat)/1000)*nextHintChannel.Get("fee_proportional_millionths").Float()/1000, ) } return } type Try struct { Route interface{} `json:"route"` Error *ErrorCommand `json:"error"` Success bool `json:"success"` }
addHintToRoute
identifier_name
ProgramsTab.py
3 #!/usr/bin/env python # # File: ProgramsTab.py # by @BitK_ # import re import json from functools import partial from java.awt import ( Font, Color, GridBagLayout, GridBagConstraints, Dimension, Desktop, GridLayout, BorderLayout, FlowLayout, ) from java.net import URI from javax.swing import ( Box, BoxLayout, SpringLayout, JList, JTable, JPanel, JButton, JScrollPane, JLabel, JTextField, ListCellRenderer, ListSelectionModel, DefaultListModel, ) from BetterJava import ( ColumnPanel, make_constraints, RowPanel, FixedColumnPanel, FixedRowPanel, SplitPanel, make_title_border, HTMLRenderer, CallbackActionListener, ) from javax.swing.BorderFactory import createEmptyBorder from helpers import async_call, same_size import context def guess_scope(s): domain_pattern = re.compile( ( r"^" r"(?:(?P<protocol>https?)://)?" r"(?P<host>" r"(?:\*\.)?" # allow wildcard at the start r"[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*" r"(?:\.[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*)+" r")" r"(?P<port>:[0-9]+)?" # potential port r"(?:/(?P<file>.*))?" # potential path r"$" ) ) match = domain_pattern.match(s) if match: url = {"enabled": True} url["protocol"] = match.group("protocol") or "any" host = re.escape(match.group("host")) host_with_stars = host.replace("\\*", ".*") url["host"] = "^{}$".format(host_with_stars) if match.group("port"): url["port"] = match.group("port") if match.group("file"): url["file"] = match.group("file") return url else: return None class ScopesBox(ColumnPanel): def __init__(self, scopes): ColumnPanel.__init__(self) scope_list = JList(tuple(entry.scope for entry in scopes)) scope_list.setVisibleRowCount(10) btn_list = RowPanel() select_all = JButton("Select all") select_all.setMaximumSize(select_all.getPreferredSize()) select_all.addActionListener( CallbackActionListener(partial(self.do_selection, scope_list, scopes)) ) btn_list.add(select_all) add_scope = JButton("Add to scope") add_scope.setMaximumSize(add_scope.getPreferredSize()) add_scope.addActionListener( CallbackActionListener(partial(self.add_to_scope, scope_list)) ) btn_list.add(add_scope) self.add(JScrollPane(scope_list)) self.add(btn_list) self.setBorder(make_title_border("Scopes")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) def add_to_scope(self, scope_list, event): config = json.loads(context.callbacks.saveConfigAsJson("target.scope")) config["target"]["scope"]["advanced_mode"] = True for maybe_url in scope_list.getSelectedValues(): url = guess_scope(maybe_url) if url: config["target"]["scope"]["include"].append(url) context.callbacks.loadConfigFromJson(json.dumps(config)) def do_selection(self, scope_list, scopes, event): scope_list.setSelectionInterval(0, len(scopes) - 1) class OutOfScopeBox(ColumnPanel): def __init__(self, out_of_scope): ColumnPanel.__init__(self) out_of_scope_list = JList(tuple(out_of_scope)) self.add(JScrollPane(out_of_scope_list)) self.setBorder(make_title_border("Out of scope")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) class RewardBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Rewards")) rewards = [ ["minimum", program.bounty_reward_min], ["low", program.bounty_reward_low], ["medium", program.bounty_reward_medium], ["high", program.bounty_reward_high], ["critical", program.bounty_reward_critical], ] table = JTable(rewards, ["level", "reward"]) table.setMaximumSize(table.getPreferredSize()) self.add(table) class StatsBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Stats")) stats = [ ["Average response time", program.stats.average_first_time_response], ["Reports - total", program.stats.total_reports], ["Reports - last month", program.stats.total_reports_current_month], ["Reports - last week", program.stats.total_reports_last7_days], ["Reports - last 24h", program.stats.total_reports_last24_hours], ["Hunter thanked", program.stats.total_hunter_thanked], ] table = JTable(stats, ["", ""]) self.add(table) class RulesBox(JScrollPane): def __init__(self, html_rules): html = u"<html><body>{}</body></html>".format(html_rules)
class TitleBtnBox(FixedColumnPanel): def __init__(self, program): url = "https://yeswehack.com/programs/{}".format(program.slug) btn = JButton("Open in browser") btn.addActionListener( CallbackActionListener(lambda _: Desktop.getDesktop().browse(URI(url))) ) self.add(btn) class UABox(JPanel): def __init__(self, program): self.setLayout(GridBagLayout()) self.setBorder(make_title_border("User-Agent", padding=5)) btn = JButton("Add to settings") ua_text = JTextField(program.user_agent) self.add( ua_text, make_constraints(weightx=4, fill=GridBagConstraints.HORIZONTAL) ) self.add(btn, make_constraints(weightx=1)) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height + 10)) def add_to_options(event): prefix = "Generated by YWH-addon" config = json.loads( context.callbacks.saveConfigAsJson("proxy.match_replace_rules") ) # remove other YWH addon rules match_replace_rules = filter( lambda rule: not rule["comment"].startswith(prefix), config["proxy"]["match_replace_rules"], ) new_rule = { "is_simple_match": False, "enabled": True, "rule_type": "request_header", "string_match": "^User-Agent: (.*)$", "string_replace": "User-Agent: $1 {}".format(program.user_agent), "comment": "{} for {}".format(prefix, program.slug), } match_replace_rules.append(new_rule) config["proxy"]["match_replace_rules"] = match_replace_rules context.callbacks.loadConfigFromJson(json.dumps(config)) btn.addActionListener(CallbackActionListener(add_to_options)) class TitleBox(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) title = JLabel(program.title) title.setFont(Font("Arial", Font.BOLD, 28)) title.setHorizontalAlignment(JLabel.CENTER) title.setVerticalAlignment(JLabel.CENTER) title.setBorder(createEmptyBorder(15, 5, 15, 5)) if not program.public: lbl = JLabel("Private") lbl.setFont(Font("Arial", Font.BOLD, 20)) lbl.setForeground(Color(0xFF2424)) lbl.setBorder(createEmptyBorder(15, 15, 15, 15)) leftbox = lbl else: leftbox = Box.createHorizontalGlue() btnbox = TitleBtnBox(program) btnbox.setBorder(createEmptyBorder(5, 5, 5, 5)) self.add(leftbox, BorderLayout.LINE_START) self.add(title, BorderLayout.CENTER) self.add(btnbox, BorderLayout.LINE_END) same_size(leftbox, btnbox) self.setMaximumSize(Dimension(99999, self.getPreferredSize().height)) class ProgramPane(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) left_col = RulesBox(program.rules_html) right_col = ColumnPanel() scopes = ScopesBox(program.scopes) right_col.add(scopes) if program.out_of_scope: out_of_scopes = OutOfScopeBox(program.out_of_scope) right_col.add(out_of_scopes) if program.user_agent: right_col.add(UABox(program)) reward_stat = FixedRowPanel() reward_stat.add(RewardBox(program)) reward_stat.add(StatsBox(program)) reward_stat.setMaximumSize( Dimension(99999, reward_stat.getPreferredSize().height) ) right_col.add(reward_stat) right_col.add(Box.createVerticalGlue()) cols = FixedRowPanel() cols.add(left_col) cols.add(right_col) self.add(TitleBox(program), BorderLayout.PAGE_START) self.add(cols, BorderLayout.CENTER) class ProgramRenderer(ListCellRenderer, JLabel): def getListCellRendererComponent( self, jlist, program, index, isSelected, cellHashFocus ): if isSelected: self.setBackground(Color(0xFF2424)) self.setForeground(Color.white) else: if program.public: self.setBackground(Color.white) else: self.setBackground(Color(0xFFDDDDD)) self.setForeground(Color.black) self.setText(program.title) self.setOpaque(1) self.setBorder(createEmptyBorder(5, 10, 5, 10)) return self class ProgramsTab(JPanel): def __init__(self): self.programs = [] self.setLayout(BoxLayout(self, BoxLayout.PAGE_AXIS)) self.JprogramList = JList() self.JprogramList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION) self.JprogramList.addListSelectionListener(self.handle_select) scrollPane = JScrollPane(self.JprogramList) scrollPane.setMinimumSize(Dimension(300, 0)) self.splitPane = SplitPanel(scrollPane, JPanel()) self.add(self.splitPane) self.load_program_list() def load_program_list(self): # fetch and display program async async_call( context.api.get_programs, self.display_program_list, self.display_error ) def display_program_list(self, programs): self.programs = programs # titles = tuple(program.title for program in self.programs) model = DefaultListModel() for program in programs: model.addElement(program) self.JprogramList.setModel(model) self.JprogramList.setCellRenderer(ProgramRenderer()) if self.programs: async_call( lambda: context.api.get_program_details(self.programs[0].slug), self.load_program_details, ) else: self.splitPane.setRightComponent(JPanel()) def display_error(self, error): self.JprogramList.setListData(tuple()) self.splitPane.setRightComponent(JLabel("Error : {}".format(error))) def load_program_details(self, pgm_details): pane = ProgramPane(pgm_details) loc = self.splitPane.getDividerLocation() self.splitPane.setRightComponent(pane) self.splitPane.setDividerLocation(loc) def handle_select(self, event): jlist = event.source if event.valueIsAdjusting: return None selected_idx = jlist.getSelectedIndex() if selected_idx < 0 or selected_idx > len(self.programs): return None slug = self.programs[selected_idx].slug async_call( lambda: context.api.get_program_details(slug), self.load_program_details )
html_renderer = HTMLRenderer(html) html_renderer.add_css_file("style.css") JScrollPane.__init__(self, html_renderer) self.setBorder(make_title_border("Rules"))
random_line_split
ProgramsTab.py
3 #!/usr/bin/env python # # File: ProgramsTab.py # by @BitK_ # import re import json from functools import partial from java.awt import ( Font, Color, GridBagLayout, GridBagConstraints, Dimension, Desktop, GridLayout, BorderLayout, FlowLayout, ) from java.net import URI from javax.swing import ( Box, BoxLayout, SpringLayout, JList, JTable, JPanel, JButton, JScrollPane, JLabel, JTextField, ListCellRenderer, ListSelectionModel, DefaultListModel, ) from BetterJava import ( ColumnPanel, make_constraints, RowPanel, FixedColumnPanel, FixedRowPanel, SplitPanel, make_title_border, HTMLRenderer, CallbackActionListener, ) from javax.swing.BorderFactory import createEmptyBorder from helpers import async_call, same_size import context def guess_scope(s): domain_pattern = re.compile( ( r"^" r"(?:(?P<protocol>https?)://)?" r"(?P<host>" r"(?:\*\.)?" # allow wildcard at the start r"[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*" r"(?:\.[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*)+" r")" r"(?P<port>:[0-9]+)?" # potential port r"(?:/(?P<file>.*))?" # potential path r"$" ) ) match = domain_pattern.match(s) if match: url = {"enabled": True} url["protocol"] = match.group("protocol") or "any" host = re.escape(match.group("host")) host_with_stars = host.replace("\\*", ".*") url["host"] = "^{}$".format(host_with_stars) if match.group("port"): url["port"] = match.group("port") if match.group("file"): url["file"] = match.group("file") return url else: return None class ScopesBox(ColumnPanel): def __init__(self, scopes): ColumnPanel.__init__(self) scope_list = JList(tuple(entry.scope for entry in scopes)) scope_list.setVisibleRowCount(10) btn_list = RowPanel() select_all = JButton("Select all") select_all.setMaximumSize(select_all.getPreferredSize()) select_all.addActionListener( CallbackActionListener(partial(self.do_selection, scope_list, scopes)) ) btn_list.add(select_all) add_scope = JButton("Add to scope") add_scope.setMaximumSize(add_scope.getPreferredSize()) add_scope.addActionListener( CallbackActionListener(partial(self.add_to_scope, scope_list)) ) btn_list.add(add_scope) self.add(JScrollPane(scope_list)) self.add(btn_list) self.setBorder(make_title_border("Scopes")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) def add_to_scope(self, scope_list, event): config = json.loads(context.callbacks.saveConfigAsJson("target.scope")) config["target"]["scope"]["advanced_mode"] = True for maybe_url in scope_list.getSelectedValues(): url = guess_scope(maybe_url) if url: config["target"]["scope"]["include"].append(url) context.callbacks.loadConfigFromJson(json.dumps(config)) def do_selection(self, scope_list, scopes, event): scope_list.setSelectionInterval(0, len(scopes) - 1) class OutOfScopeBox(ColumnPanel): def __init__(self, out_of_scope): ColumnPanel.__init__(self) out_of_scope_list = JList(tuple(out_of_scope)) self.add(JScrollPane(out_of_scope_list)) self.setBorder(make_title_border("Out of scope")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) class RewardBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Rewards")) rewards = [ ["minimum", program.bounty_reward_min], ["low", program.bounty_reward_low], ["medium", program.bounty_reward_medium], ["high", program.bounty_reward_high], ["critical", program.bounty_reward_critical], ] table = JTable(rewards, ["level", "reward"]) table.setMaximumSize(table.getPreferredSize()) self.add(table) class StatsBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Stats")) stats = [ ["Average response time", program.stats.average_first_time_response], ["Reports - total", program.stats.total_reports], ["Reports - last month", program.stats.total_reports_current_month], ["Reports - last week", program.stats.total_reports_last7_days], ["Reports - last 24h", program.stats.total_reports_last24_hours], ["Hunter thanked", program.stats.total_hunter_thanked], ] table = JTable(stats, ["", ""]) self.add(table) class RulesBox(JScrollPane): def __init__(self, html_rules): html = u"<html><body>{}</body></html>".format(html_rules) html_renderer = HTMLRenderer(html) html_renderer.add_css_file("style.css") JScrollPane.__init__(self, html_renderer) self.setBorder(make_title_border("Rules")) class TitleBtnBox(FixedColumnPanel): def __init__(self, program): url = "https://yeswehack.com/programs/{}".format(program.slug) btn = JButton("Open in browser") btn.addActionListener( CallbackActionListener(lambda _: Desktop.getDesktop().browse(URI(url))) ) self.add(btn) class UABox(JPanel): def __init__(self, program): self.setLayout(GridBagLayout()) self.setBorder(make_title_border("User-Agent", padding=5)) btn = JButton("Add to settings") ua_text = JTextField(program.user_agent) self.add( ua_text, make_constraints(weightx=4, fill=GridBagConstraints.HORIZONTAL) ) self.add(btn, make_constraints(weightx=1)) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height + 10)) def add_to_options(event): prefix = "Generated by YWH-addon" config = json.loads( context.callbacks.saveConfigAsJson("proxy.match_replace_rules") ) # remove other YWH addon rules match_replace_rules = filter( lambda rule: not rule["comment"].startswith(prefix), config["proxy"]["match_replace_rules"], ) new_rule = { "is_simple_match": False, "enabled": True, "rule_type": "request_header", "string_match": "^User-Agent: (.*)$", "string_replace": "User-Agent: $1 {}".format(program.user_agent), "comment": "{} for {}".format(prefix, program.slug), } match_replace_rules.append(new_rule) config["proxy"]["match_replace_rules"] = match_replace_rules context.callbacks.loadConfigFromJson(json.dumps(config)) btn.addActionListener(CallbackActionListener(add_to_options)) class TitleBox(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) title = JLabel(program.title) title.setFont(Font("Arial", Font.BOLD, 28)) title.setHorizontalAlignment(JLabel.CENTER) title.setVerticalAlignment(JLabel.CENTER) title.setBorder(createEmptyBorder(15, 5, 15, 5)) if not program.public: lbl = JLabel("Private") lbl.setFont(Font("Arial", Font.BOLD, 20)) lbl.setForeground(Color(0xFF2424)) lbl.setBorder(createEmptyBorder(15, 15, 15, 15)) leftbox = lbl else: leftbox = Box.createHorizontalGlue() btnbox = TitleBtnBox(program) btnbox.setBorder(createEmptyBorder(5, 5, 5, 5)) self.add(leftbox, BorderLayout.LINE_START) self.add(title, BorderLayout.CENTER) self.add(btnbox, BorderLayout.LINE_END) same_size(leftbox, btnbox) self.setMaximumSize(Dimension(99999, self.getPreferredSize().height)) class ProgramPane(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) left_col = RulesBox(program.rules_html) right_col = ColumnPanel() scopes = ScopesBox(program.scopes) right_col.add(scopes) if program.out_of_scope: out_of_scopes = OutOfScopeBox(program.out_of_scope) right_col.add(out_of_scopes) if program.user_agent: right_col.add(UABox(program)) reward_stat = FixedRowPanel() reward_stat.add(RewardBox(program)) reward_stat.add(StatsBox(program)) reward_stat.setMaximumSize( Dimension(99999, reward_stat.getPreferredSize().height) ) right_col.add(reward_stat) right_col.add(Box.createVerticalGlue()) cols = FixedRowPanel() cols.add(left_col) cols.add(right_col) self.add(TitleBox(program), BorderLayout.PAGE_START) self.add(cols, BorderLayout.CENTER) class ProgramRenderer(ListCellRenderer, JLabel): def getListCellRendererComponent( self, jlist, program, index, isSelected, cellHashFocus ): if isSelected: self.setBackground(Color(0xFF2424)) self.setForeground(Color.white) else: if program.public: self.setBackground(Color.white) else:
self.setForeground(Color.black) self.setText(program.title) self.setOpaque(1) self.setBorder(createEmptyBorder(5, 10, 5, 10)) return self class ProgramsTab(JPanel): def __init__(self): self.programs = [] self.setLayout(BoxLayout(self, BoxLayout.PAGE_AXIS)) self.JprogramList = JList() self.JprogramList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION) self.JprogramList.addListSelectionListener(self.handle_select) scrollPane = JScrollPane(self.JprogramList) scrollPane.setMinimumSize(Dimension(300, 0)) self.splitPane = SplitPanel(scrollPane, JPanel()) self.add(self.splitPane) self.load_program_list() def load_program_list(self): # fetch and display program async async_call( context.api.get_programs, self.display_program_list, self.display_error ) def display_program_list(self, programs): self.programs = programs # titles = tuple(program.title for program in self.programs) model = DefaultListModel() for program in programs: model.addElement(program) self.JprogramList.setModel(model) self.JprogramList.setCellRenderer(ProgramRenderer()) if self.programs: async_call( lambda: context.api.get_program_details(self.programs[0].slug), self.load_program_details, ) else: self.splitPane.setRightComponent(JPanel()) def display_error(self, error): self.JprogramList.setListData(tuple()) self.splitPane.setRightComponent(JLabel("Error : {}".format(error))) def load_program_details(self, pgm_details): pane = ProgramPane(pgm_details) loc = self.splitPane.getDividerLocation() self.splitPane.setRightComponent(pane) self.splitPane.setDividerLocation(loc) def handle_select(self, event): jlist = event.source if event.valueIsAdjusting: return None selected_idx = jlist.getSelectedIndex() if selected_idx < 0 or selected_idx > len(self.programs): return None slug = self.programs[selected_idx].slug async_call( lambda: context.api.get_program_details(slug), self.load_program_details )
self.setBackground(Color(0xFFDDDDD))
conditional_block
ProgramsTab.py
3 #!/usr/bin/env python # # File: ProgramsTab.py # by @BitK_ # import re import json from functools import partial from java.awt import ( Font, Color, GridBagLayout, GridBagConstraints, Dimension, Desktop, GridLayout, BorderLayout, FlowLayout, ) from java.net import URI from javax.swing import ( Box, BoxLayout, SpringLayout, JList, JTable, JPanel, JButton, JScrollPane, JLabel, JTextField, ListCellRenderer, ListSelectionModel, DefaultListModel, ) from BetterJava import ( ColumnPanel, make_constraints, RowPanel, FixedColumnPanel, FixedRowPanel, SplitPanel, make_title_border, HTMLRenderer, CallbackActionListener, ) from javax.swing.BorderFactory import createEmptyBorder from helpers import async_call, same_size import context def guess_scope(s): domain_pattern = re.compile( ( r"^" r"(?:(?P<protocol>https?)://)?" r"(?P<host>" r"(?:\*\.)?" # allow wildcard at the start r"[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*" r"(?:\.[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*)+" r")" r"(?P<port>:[0-9]+)?" # potential port r"(?:/(?P<file>.*))?" # potential path r"$" ) ) match = domain_pattern.match(s) if match: url = {"enabled": True} url["protocol"] = match.group("protocol") or "any" host = re.escape(match.group("host")) host_with_stars = host.replace("\\*", ".*") url["host"] = "^{}$".format(host_with_stars) if match.group("port"): url["port"] = match.group("port") if match.group("file"): url["file"] = match.group("file") return url else: return None class ScopesBox(ColumnPanel): def __init__(self, scopes): ColumnPanel.__init__(self) scope_list = JList(tuple(entry.scope for entry in scopes)) scope_list.setVisibleRowCount(10) btn_list = RowPanel() select_all = JButton("Select all") select_all.setMaximumSize(select_all.getPreferredSize()) select_all.addActionListener( CallbackActionListener(partial(self.do_selection, scope_list, scopes)) ) btn_list.add(select_all) add_scope = JButton("Add to scope") add_scope.setMaximumSize(add_scope.getPreferredSize()) add_scope.addActionListener( CallbackActionListener(partial(self.add_to_scope, scope_list)) ) btn_list.add(add_scope) self.add(JScrollPane(scope_list)) self.add(btn_list) self.setBorder(make_title_border("Scopes")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) def
(self, scope_list, event): config = json.loads(context.callbacks.saveConfigAsJson("target.scope")) config["target"]["scope"]["advanced_mode"] = True for maybe_url in scope_list.getSelectedValues(): url = guess_scope(maybe_url) if url: config["target"]["scope"]["include"].append(url) context.callbacks.loadConfigFromJson(json.dumps(config)) def do_selection(self, scope_list, scopes, event): scope_list.setSelectionInterval(0, len(scopes) - 1) class OutOfScopeBox(ColumnPanel): def __init__(self, out_of_scope): ColumnPanel.__init__(self) out_of_scope_list = JList(tuple(out_of_scope)) self.add(JScrollPane(out_of_scope_list)) self.setBorder(make_title_border("Out of scope")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) class RewardBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Rewards")) rewards = [ ["minimum", program.bounty_reward_min], ["low", program.bounty_reward_low], ["medium", program.bounty_reward_medium], ["high", program.bounty_reward_high], ["critical", program.bounty_reward_critical], ] table = JTable(rewards, ["level", "reward"]) table.setMaximumSize(table.getPreferredSize()) self.add(table) class StatsBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Stats")) stats = [ ["Average response time", program.stats.average_first_time_response], ["Reports - total", program.stats.total_reports], ["Reports - last month", program.stats.total_reports_current_month], ["Reports - last week", program.stats.total_reports_last7_days], ["Reports - last 24h", program.stats.total_reports_last24_hours], ["Hunter thanked", program.stats.total_hunter_thanked], ] table = JTable(stats, ["", ""]) self.add(table) class RulesBox(JScrollPane): def __init__(self, html_rules): html = u"<html><body>{}</body></html>".format(html_rules) html_renderer = HTMLRenderer(html) html_renderer.add_css_file("style.css") JScrollPane.__init__(self, html_renderer) self.setBorder(make_title_border("Rules")) class TitleBtnBox(FixedColumnPanel): def __init__(self, program): url = "https://yeswehack.com/programs/{}".format(program.slug) btn = JButton("Open in browser") btn.addActionListener( CallbackActionListener(lambda _: Desktop.getDesktop().browse(URI(url))) ) self.add(btn) class UABox(JPanel): def __init__(self, program): self.setLayout(GridBagLayout()) self.setBorder(make_title_border("User-Agent", padding=5)) btn = JButton("Add to settings") ua_text = JTextField(program.user_agent) self.add( ua_text, make_constraints(weightx=4, fill=GridBagConstraints.HORIZONTAL) ) self.add(btn, make_constraints(weightx=1)) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height + 10)) def add_to_options(event): prefix = "Generated by YWH-addon" config = json.loads( context.callbacks.saveConfigAsJson("proxy.match_replace_rules") ) # remove other YWH addon rules match_replace_rules = filter( lambda rule: not rule["comment"].startswith(prefix), config["proxy"]["match_replace_rules"], ) new_rule = { "is_simple_match": False, "enabled": True, "rule_type": "request_header", "string_match": "^User-Agent: (.*)$", "string_replace": "User-Agent: $1 {}".format(program.user_agent), "comment": "{} for {}".format(prefix, program.slug), } match_replace_rules.append(new_rule) config["proxy"]["match_replace_rules"] = match_replace_rules context.callbacks.loadConfigFromJson(json.dumps(config)) btn.addActionListener(CallbackActionListener(add_to_options)) class TitleBox(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) title = JLabel(program.title) title.setFont(Font("Arial", Font.BOLD, 28)) title.setHorizontalAlignment(JLabel.CENTER) title.setVerticalAlignment(JLabel.CENTER) title.setBorder(createEmptyBorder(15, 5, 15, 5)) if not program.public: lbl = JLabel("Private") lbl.setFont(Font("Arial", Font.BOLD, 20)) lbl.setForeground(Color(0xFF2424)) lbl.setBorder(createEmptyBorder(15, 15, 15, 15)) leftbox = lbl else: leftbox = Box.createHorizontalGlue() btnbox = TitleBtnBox(program) btnbox.setBorder(createEmptyBorder(5, 5, 5, 5)) self.add(leftbox, BorderLayout.LINE_START) self.add(title, BorderLayout.CENTER) self.add(btnbox, BorderLayout.LINE_END) same_size(leftbox, btnbox) self.setMaximumSize(Dimension(99999, self.getPreferredSize().height)) class ProgramPane(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) left_col = RulesBox(program.rules_html) right_col = ColumnPanel() scopes = ScopesBox(program.scopes) right_col.add(scopes) if program.out_of_scope: out_of_scopes = OutOfScopeBox(program.out_of_scope) right_col.add(out_of_scopes) if program.user_agent: right_col.add(UABox(program)) reward_stat = FixedRowPanel() reward_stat.add(RewardBox(program)) reward_stat.add(StatsBox(program)) reward_stat.setMaximumSize( Dimension(99999, reward_stat.getPreferredSize().height) ) right_col.add(reward_stat) right_col.add(Box.createVerticalGlue()) cols = FixedRowPanel() cols.add(left_col) cols.add(right_col) self.add(TitleBox(program), BorderLayout.PAGE_START) self.add(cols, BorderLayout.CENTER) class ProgramRenderer(ListCellRenderer, JLabel): def getListCellRendererComponent( self, jlist, program, index, isSelected, cellHashFocus ): if isSelected: self.setBackground(Color(0xFF2424)) self.setForeground(Color.white) else: if program.public: self.setBackground(Color.white) else: self.setBackground(Color(0xFFDDDDD)) self.setForeground(Color.black) self.setText(program.title) self.setOpaque(1) self.setBorder(createEmptyBorder(5, 10, 5, 10)) return self class ProgramsTab(JPanel): def __init__(self): self.programs = [] self.setLayout(BoxLayout(self, BoxLayout.PAGE_AXIS)) self.JprogramList = JList() self.JprogramList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION) self.JprogramList.addListSelectionListener(self.handle_select) scrollPane = JScrollPane(self.JprogramList) scrollPane.setMinimumSize(Dimension(300, 0)) self.splitPane = SplitPanel(scrollPane, JPanel()) self.add(self.splitPane) self.load_program_list() def load_program_list(self): # fetch and display program async async_call( context.api.get_programs, self.display_program_list, self.display_error ) def display_program_list(self, programs): self.programs = programs # titles = tuple(program.title for program in self.programs) model = DefaultListModel() for program in programs: model.addElement(program) self.JprogramList.setModel(model) self.JprogramList.setCellRenderer(ProgramRenderer()) if self.programs: async_call( lambda: context.api.get_program_details(self.programs[0].slug), self.load_program_details, ) else: self.splitPane.setRightComponent(JPanel()) def display_error(self, error): self.JprogramList.setListData(tuple()) self.splitPane.setRightComponent(JLabel("Error : {}".format(error))) def load_program_details(self, pgm_details): pane = ProgramPane(pgm_details) loc = self.splitPane.getDividerLocation() self.splitPane.setRightComponent(pane) self.splitPane.setDividerLocation(loc) def handle_select(self, event): jlist = event.source if event.valueIsAdjusting: return None selected_idx = jlist.getSelectedIndex() if selected_idx < 0 or selected_idx > len(self.programs): return None slug = self.programs[selected_idx].slug async_call( lambda: context.api.get_program_details(slug), self.load_program_details )
add_to_scope
identifier_name
ProgramsTab.py
3 #!/usr/bin/env python # # File: ProgramsTab.py # by @BitK_ # import re import json from functools import partial from java.awt import ( Font, Color, GridBagLayout, GridBagConstraints, Dimension, Desktop, GridLayout, BorderLayout, FlowLayout, ) from java.net import URI from javax.swing import ( Box, BoxLayout, SpringLayout, JList, JTable, JPanel, JButton, JScrollPane, JLabel, JTextField, ListCellRenderer, ListSelectionModel, DefaultListModel, ) from BetterJava import ( ColumnPanel, make_constraints, RowPanel, FixedColumnPanel, FixedRowPanel, SplitPanel, make_title_border, HTMLRenderer, CallbackActionListener, ) from javax.swing.BorderFactory import createEmptyBorder from helpers import async_call, same_size import context def guess_scope(s): domain_pattern = re.compile( ( r"^" r"(?:(?P<protocol>https?)://)?" r"(?P<host>" r"(?:\*\.)?" # allow wildcard at the start r"[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*" r"(?:\.[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*)+" r")" r"(?P<port>:[0-9]+)?" # potential port r"(?:/(?P<file>.*))?" # potential path r"$" ) ) match = domain_pattern.match(s) if match: url = {"enabled": True} url["protocol"] = match.group("protocol") or "any" host = re.escape(match.group("host")) host_with_stars = host.replace("\\*", ".*") url["host"] = "^{}$".format(host_with_stars) if match.group("port"): url["port"] = match.group("port") if match.group("file"): url["file"] = match.group("file") return url else: return None class ScopesBox(ColumnPanel): def __init__(self, scopes): ColumnPanel.__init__(self) scope_list = JList(tuple(entry.scope for entry in scopes)) scope_list.setVisibleRowCount(10) btn_list = RowPanel() select_all = JButton("Select all") select_all.setMaximumSize(select_all.getPreferredSize()) select_all.addActionListener( CallbackActionListener(partial(self.do_selection, scope_list, scopes)) ) btn_list.add(select_all) add_scope = JButton("Add to scope") add_scope.setMaximumSize(add_scope.getPreferredSize()) add_scope.addActionListener( CallbackActionListener(partial(self.add_to_scope, scope_list)) ) btn_list.add(add_scope) self.add(JScrollPane(scope_list)) self.add(btn_list) self.setBorder(make_title_border("Scopes")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) def add_to_scope(self, scope_list, event): config = json.loads(context.callbacks.saveConfigAsJson("target.scope")) config["target"]["scope"]["advanced_mode"] = True for maybe_url in scope_list.getSelectedValues(): url = guess_scope(maybe_url) if url: config["target"]["scope"]["include"].append(url) context.callbacks.loadConfigFromJson(json.dumps(config)) def do_selection(self, scope_list, scopes, event): scope_list.setSelectionInterval(0, len(scopes) - 1) class OutOfScopeBox(ColumnPanel): def __init__(self, out_of_scope): ColumnPanel.__init__(self) out_of_scope_list = JList(tuple(out_of_scope)) self.add(JScrollPane(out_of_scope_list)) self.setBorder(make_title_border("Out of scope")) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height)) class RewardBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Rewards")) rewards = [ ["minimum", program.bounty_reward_min], ["low", program.bounty_reward_low], ["medium", program.bounty_reward_medium], ["high", program.bounty_reward_high], ["critical", program.bounty_reward_critical], ] table = JTable(rewards, ["level", "reward"]) table.setMaximumSize(table.getPreferredSize()) self.add(table) class StatsBox(JPanel): def __init__(self, program): self.setLayout(GridLayout()) self.setBorder(make_title_border("Stats")) stats = [ ["Average response time", program.stats.average_first_time_response], ["Reports - total", program.stats.total_reports], ["Reports - last month", program.stats.total_reports_current_month], ["Reports - last week", program.stats.total_reports_last7_days], ["Reports - last 24h", program.stats.total_reports_last24_hours], ["Hunter thanked", program.stats.total_hunter_thanked], ] table = JTable(stats, ["", ""]) self.add(table) class RulesBox(JScrollPane): def __init__(self, html_rules): html = u"<html><body>{}</body></html>".format(html_rules) html_renderer = HTMLRenderer(html) html_renderer.add_css_file("style.css") JScrollPane.__init__(self, html_renderer) self.setBorder(make_title_border("Rules")) class TitleBtnBox(FixedColumnPanel): def __init__(self, program): url = "https://yeswehack.com/programs/{}".format(program.slug) btn = JButton("Open in browser") btn.addActionListener( CallbackActionListener(lambda _: Desktop.getDesktop().browse(URI(url))) ) self.add(btn) class UABox(JPanel):
class TitleBox(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) title = JLabel(program.title) title.setFont(Font("Arial", Font.BOLD, 28)) title.setHorizontalAlignment(JLabel.CENTER) title.setVerticalAlignment(JLabel.CENTER) title.setBorder(createEmptyBorder(15, 5, 15, 5)) if not program.public: lbl = JLabel("Private") lbl.setFont(Font("Arial", Font.BOLD, 20)) lbl.setForeground(Color(0xFF2424)) lbl.setBorder(createEmptyBorder(15, 15, 15, 15)) leftbox = lbl else: leftbox = Box.createHorizontalGlue() btnbox = TitleBtnBox(program) btnbox.setBorder(createEmptyBorder(5, 5, 5, 5)) self.add(leftbox, BorderLayout.LINE_START) self.add(title, BorderLayout.CENTER) self.add(btnbox, BorderLayout.LINE_END) same_size(leftbox, btnbox) self.setMaximumSize(Dimension(99999, self.getPreferredSize().height)) class ProgramPane(JPanel): def __init__(self, program): self.setLayout(BorderLayout()) left_col = RulesBox(program.rules_html) right_col = ColumnPanel() scopes = ScopesBox(program.scopes) right_col.add(scopes) if program.out_of_scope: out_of_scopes = OutOfScopeBox(program.out_of_scope) right_col.add(out_of_scopes) if program.user_agent: right_col.add(UABox(program)) reward_stat = FixedRowPanel() reward_stat.add(RewardBox(program)) reward_stat.add(StatsBox(program)) reward_stat.setMaximumSize( Dimension(99999, reward_stat.getPreferredSize().height) ) right_col.add(reward_stat) right_col.add(Box.createVerticalGlue()) cols = FixedRowPanel() cols.add(left_col) cols.add(right_col) self.add(TitleBox(program), BorderLayout.PAGE_START) self.add(cols, BorderLayout.CENTER) class ProgramRenderer(ListCellRenderer, JLabel): def getListCellRendererComponent( self, jlist, program, index, isSelected, cellHashFocus ): if isSelected: self.setBackground(Color(0xFF2424)) self.setForeground(Color.white) else: if program.public: self.setBackground(Color.white) else: self.setBackground(Color(0xFFDDDDD)) self.setForeground(Color.black) self.setText(program.title) self.setOpaque(1) self.setBorder(createEmptyBorder(5, 10, 5, 10)) return self class ProgramsTab(JPanel): def __init__(self): self.programs = [] self.setLayout(BoxLayout(self, BoxLayout.PAGE_AXIS)) self.JprogramList = JList() self.JprogramList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION) self.JprogramList.addListSelectionListener(self.handle_select) scrollPane = JScrollPane(self.JprogramList) scrollPane.setMinimumSize(Dimension(300, 0)) self.splitPane = SplitPanel(scrollPane, JPanel()) self.add(self.splitPane) self.load_program_list() def load_program_list(self): # fetch and display program async async_call( context.api.get_programs, self.display_program_list, self.display_error ) def display_program_list(self, programs): self.programs = programs # titles = tuple(program.title for program in self.programs) model = DefaultListModel() for program in programs: model.addElement(program) self.JprogramList.setModel(model) self.JprogramList.setCellRenderer(ProgramRenderer()) if self.programs: async_call( lambda: context.api.get_program_details(self.programs[0].slug), self.load_program_details, ) else: self.splitPane.setRightComponent(JPanel()) def display_error(self, error): self.JprogramList.setListData(tuple()) self.splitPane.setRightComponent(JLabel("Error : {}".format(error))) def load_program_details(self, pgm_details): pane = ProgramPane(pgm_details) loc = self.splitPane.getDividerLocation() self.splitPane.setRightComponent(pane) self.splitPane.setDividerLocation(loc) def handle_select(self, event): jlist = event.source if event.valueIsAdjusting: return None selected_idx = jlist.getSelectedIndex() if selected_idx < 0 or selected_idx > len(self.programs): return None slug = self.programs[selected_idx].slug async_call( lambda: context.api.get_program_details(slug), self.load_program_details )
def __init__(self, program): self.setLayout(GridBagLayout()) self.setBorder(make_title_border("User-Agent", padding=5)) btn = JButton("Add to settings") ua_text = JTextField(program.user_agent) self.add( ua_text, make_constraints(weightx=4, fill=GridBagConstraints.HORIZONTAL) ) self.add(btn, make_constraints(weightx=1)) self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height + 10)) def add_to_options(event): prefix = "Generated by YWH-addon" config = json.loads( context.callbacks.saveConfigAsJson("proxy.match_replace_rules") ) # remove other YWH addon rules match_replace_rules = filter( lambda rule: not rule["comment"].startswith(prefix), config["proxy"]["match_replace_rules"], ) new_rule = { "is_simple_match": False, "enabled": True, "rule_type": "request_header", "string_match": "^User-Agent: (.*)$", "string_replace": "User-Agent: $1 {}".format(program.user_agent), "comment": "{} for {}".format(prefix, program.slug), } match_replace_rules.append(new_rule) config["proxy"]["match_replace_rules"] = match_replace_rules context.callbacks.loadConfigFromJson(json.dumps(config)) btn.addActionListener(CallbackActionListener(add_to_options))
identifier_body
cluster.go
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package m3 import ( "errors" "fmt" "io" "sync" "time" "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/query/storage/m3/storagemetadata" xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/ident" ) var ( errNamespaceIDNotSet = errors.New("namespace ID not set") errSessionNotSet = errors.New("session not set") errRetentionNotSet = errors.New("retention not set") errResolutionNotSet = errors.New("resolution not set") errNegativeDataLatency = errors.New("negative dataLatency") // DefaultClusterNamespaceDownsampleOptions is a default options. // NB(antanas): this was made public to access it in promremote storage. // Ideally downsampling could be decoupled from m3 storage. DefaultClusterNamespaceDownsampleOptions = ClusterNamespaceDownsampleOptions{ All: true, } ) // ClusterConfigType is an enum representing the configuration used // to create a Clusters interface type ClusterConfigType int const ( // ClusterConfigTypeStatic is for static configuration. ClusterConfigTypeStatic = iota // ClusterConfigTypeDynamic is for dynamic configuration. ClusterConfigTypeDynamic ) // Clusters is a flattened collection of local storage clusters and namespaces. type Clusters interface { io.Closer // ClusterNamespaces returns all known and ready cluster namespaces. ClusterNamespaces() ClusterNamespaces // NonReadyClusterNamespaces returns all cluster namespaces not in the ready state. NonReadyClusterNamespaces() ClusterNamespaces // UnaggregatedClusterNamespace returns the valid unaggregated // cluster namespace. If the namespace is not yet initialized, returns false. UnaggregatedClusterNamespace() (ClusterNamespace, bool) // AggregatedClusterNamespace returns an aggregated cluster namespace // at a specific retention and resolution. AggregatedClusterNamespace(attrs RetentionResolution) (ClusterNamespace, bool) // ConfigType returns the type of configuration used to create this Clusters // object. ConfigType() ClusterConfigType } // RetentionResolution is a tuple of retention and resolution that describes // an aggregated metrics policy. type RetentionResolution struct { Retention time.Duration Resolution time.Duration } // ClusterNamespace is a local storage cluster namespace. type ClusterNamespace interface { NamespaceID() ident.ID Options() ClusterNamespaceOptions Session() client.Session } // ClusterNamespaceOptions is a set of options type ClusterNamespaceOptions struct { // Note: Don't allow direct access, as we want to provide defaults // and/or error if call to access a field is not relevant/correct. attributes storagemetadata.Attributes downsample *ClusterNamespaceDownsampleOptions dataLatency time.Duration readOnly bool } // NewClusterNamespaceOptions creates new cluster namespace options. func NewClusterNamespaceOptions( attributes storagemetadata.Attributes, downsample *ClusterNamespaceDownsampleOptions, ) ClusterNamespaceOptions { return ClusterNamespaceOptions{ attributes: attributes, downsample: downsample, } } // Attributes returns the storage attributes of the cluster namespace. func (o ClusterNamespaceOptions) Attributes() storagemetadata.Attributes { return o.attributes } // DataLatency returns the duration after which the data is available in this cluster namespace. func (o ClusterNamespaceOptions) DataLatency() time.Duration { return o.dataLatency } // ReadOnly returns the value of ReadOnly option for a cluster namespace. func (o ClusterNamespaceOptions) ReadOnly() bool { return o.readOnly } // DownsampleOptions returns the downsample options for a cluster namespace, // which is only valid if the namespace is an aggregated cluster namespace. func (o ClusterNamespaceOptions) DownsampleOptions() ( ClusterNamespaceDownsampleOptions, error, ) { if o.attributes.MetricsType != storagemetadata.AggregatedMetricsType { return ClusterNamespaceDownsampleOptions{}, errNotAggregatedClusterNamespace } if o.downsample == nil
return *o.downsample, nil } // ClusterNamespaceDownsampleOptions is the downsample options for // a cluster namespace. type ClusterNamespaceDownsampleOptions struct { All bool } // ClusterNamespaces is a slice of ClusterNamespace instances. type ClusterNamespaces []ClusterNamespace // NumAggregatedClusterNamespaces returns the number of aggregated // cluster namespaces. func (n ClusterNamespaces) NumAggregatedClusterNamespaces() int { count := 0 for _, namespace := range n { if namespace.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType { count++ } } return count } // UnaggregatedClusterNamespaceDefinition is the definition for the // cluster namespace that holds unaggregated metrics data. type UnaggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration } // Validate will validate the cluster namespace definition. func (def UnaggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } return nil } // AggregatedClusterNamespaceDefinition is a definition for a // cluster namespace that holds aggregated metrics data at a // specific retention and resolution. type AggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration Resolution time.Duration Downsample *ClusterNamespaceDownsampleOptions DataLatency time.Duration ReadOnly bool } // Validate validates the cluster namespace definition. func (def AggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } if def.Resolution <= 0 { return errResolutionNotSet } if def.DataLatency < 0 { return errNegativeDataLatency } return nil } type clusters struct { namespaces []ClusterNamespace unaggregatedNamespace ClusterNamespace aggregatedNamespaces map[RetentionResolution]ClusterNamespace } // NewClusters instantiates a new Clusters instance. func NewClusters( unaggregatedClusterNamespace UnaggregatedClusterNamespaceDefinition, aggregatedClusterNamespaces ...AggregatedClusterNamespaceDefinition, ) (Clusters, error) { expectedAggregated := len(aggregatedClusterNamespaces) expectedAll := 1 + expectedAggregated namespaces := make(ClusterNamespaces, 0, expectedAll) aggregatedNamespaces := make(map[RetentionResolution]ClusterNamespace, expectedAggregated) def := unaggregatedClusterNamespace unaggregatedNamespace, err := newUnaggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, unaggregatedNamespace) for _, def := range aggregatedClusterNamespaces { namespace, err := newAggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, namespace) key := RetentionResolution{ Retention: namespace.Options().Attributes().Retention, Resolution: namespace.Options().Attributes().Resolution, } _, exists := aggregatedNamespaces[key] if exists { return nil, fmt.Errorf("duplicate aggregated namespace exists for: "+ "retention=%s, resolution=%s", key.Retention.String(), key.Resolution.String()) } aggregatedNamespaces[key] = namespace } return &clusters{ namespaces: namespaces, unaggregatedNamespace: unaggregatedNamespace, aggregatedNamespaces: aggregatedNamespaces, }, nil } func (c *clusters) ClusterNamespaces() ClusterNamespaces { return c.namespaces } func (c *clusters) NonReadyClusterNamespaces() ClusterNamespaces { // statically configured cluster namespaces are always considered ready. return nil } func (c *clusters) UnaggregatedClusterNamespace() (ClusterNamespace, bool) { return c.unaggregatedNamespace, true } func (c *clusters) AggregatedClusterNamespace( attrs RetentionResolution, ) (ClusterNamespace, bool) { namespace, ok := c.aggregatedNamespaces[attrs] return namespace, ok } func (c *clusters) ConfigType() ClusterConfigType { return ClusterConfigTypeStatic } func (c *clusters) Close() error { var ( wg sync.WaitGroup syncMultiErrs syncMultiErrs uniqueSessions []client.Session ) // Collect unique sessions, some namespaces may share same // client session (same cluster) uniqueSessions = append(uniqueSessions, c.unaggregatedNamespace.Session()) for _, namespace := range c.aggregatedNamespaces { unique := true for _, session := range uniqueSessions { if namespace.Session() == session { unique = false break } } if unique { uniqueSessions = append(uniqueSessions, namespace.Session()) } } for _, session := range uniqueSessions { session := session // Capture for lambda wg.Add(1) go func() { defer wg.Done() err := session.Close() syncMultiErrs.add(err) }() } wg.Wait() return syncMultiErrs.lastError() } type clusterNamespace struct { namespaceID ident.ID options ClusterNamespaceOptions session client.Session } func newUnaggregatedClusterNamespace( def UnaggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.UnaggregatedMetricsType, Retention: def.Retention, }, }, session: def.Session, }, nil } func newAggregatedClusterNamespace( def AggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.AggregatedMetricsType, Retention: def.Retention, Resolution: def.Resolution, }, downsample: def.Downsample, dataLatency: def.DataLatency, readOnly: def.ReadOnly, }, session: def.Session, }, nil } func (n *clusterNamespace) NamespaceID() ident.ID { return n.namespaceID } func (n *clusterNamespace) Options() ClusterNamespaceOptions { return n.options } func (n *clusterNamespace) Session() client.Session { return n.session } type syncMultiErrs struct { sync.Mutex multiErr xerrors.MultiError } func (errs *syncMultiErrs) add(err error) { errs.Lock() errs.multiErr = errs.multiErr.Add(err) errs.Unlock() } func (errs *syncMultiErrs) lastError() error { errs.Lock() defer errs.Unlock() // TODO: consider taking a debug param when building a syncMultiErrs // which would determine wether to return only the last error message // or the consolidated list of errors. return errs.multiErr.LastError() }
{ return DefaultClusterNamespaceDownsampleOptions, nil }
conditional_block
cluster.go
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package m3 import ( "errors" "fmt" "io" "sync" "time" "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/query/storage/m3/storagemetadata" xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/ident" ) var ( errNamespaceIDNotSet = errors.New("namespace ID not set") errSessionNotSet = errors.New("session not set") errRetentionNotSet = errors.New("retention not set") errResolutionNotSet = errors.New("resolution not set") errNegativeDataLatency = errors.New("negative dataLatency") // DefaultClusterNamespaceDownsampleOptions is a default options. // NB(antanas): this was made public to access it in promremote storage. // Ideally downsampling could be decoupled from m3 storage. DefaultClusterNamespaceDownsampleOptions = ClusterNamespaceDownsampleOptions{ All: true, } ) // ClusterConfigType is an enum representing the configuration used // to create a Clusters interface type ClusterConfigType int const ( // ClusterConfigTypeStatic is for static configuration. ClusterConfigTypeStatic = iota // ClusterConfigTypeDynamic is for dynamic configuration. ClusterConfigTypeDynamic ) // Clusters is a flattened collection of local storage clusters and namespaces. type Clusters interface { io.Closer // ClusterNamespaces returns all known and ready cluster namespaces. ClusterNamespaces() ClusterNamespaces // NonReadyClusterNamespaces returns all cluster namespaces not in the ready state. NonReadyClusterNamespaces() ClusterNamespaces // UnaggregatedClusterNamespace returns the valid unaggregated // cluster namespace. If the namespace is not yet initialized, returns false. UnaggregatedClusterNamespace() (ClusterNamespace, bool) // AggregatedClusterNamespace returns an aggregated cluster namespace // at a specific retention and resolution. AggregatedClusterNamespace(attrs RetentionResolution) (ClusterNamespace, bool) // ConfigType returns the type of configuration used to create this Clusters // object. ConfigType() ClusterConfigType } // RetentionResolution is a tuple of retention and resolution that describes // an aggregated metrics policy. type RetentionResolution struct { Retention time.Duration Resolution time.Duration } // ClusterNamespace is a local storage cluster namespace. type ClusterNamespace interface { NamespaceID() ident.ID Options() ClusterNamespaceOptions Session() client.Session } // ClusterNamespaceOptions is a set of options type ClusterNamespaceOptions struct { // Note: Don't allow direct access, as we want to provide defaults // and/or error if call to access a field is not relevant/correct. attributes storagemetadata.Attributes downsample *ClusterNamespaceDownsampleOptions dataLatency time.Duration readOnly bool } // NewClusterNamespaceOptions creates new cluster namespace options. func NewClusterNamespaceOptions( attributes storagemetadata.Attributes, downsample *ClusterNamespaceDownsampleOptions, ) ClusterNamespaceOptions { return ClusterNamespaceOptions{ attributes: attributes, downsample: downsample, } } // Attributes returns the storage attributes of the cluster namespace. func (o ClusterNamespaceOptions) Attributes() storagemetadata.Attributes { return o.attributes } // DataLatency returns the duration after which the data is available in this cluster namespace. func (o ClusterNamespaceOptions) DataLatency() time.Duration
// ReadOnly returns the value of ReadOnly option for a cluster namespace. func (o ClusterNamespaceOptions) ReadOnly() bool { return o.readOnly } // DownsampleOptions returns the downsample options for a cluster namespace, // which is only valid if the namespace is an aggregated cluster namespace. func (o ClusterNamespaceOptions) DownsampleOptions() ( ClusterNamespaceDownsampleOptions, error, ) { if o.attributes.MetricsType != storagemetadata.AggregatedMetricsType { return ClusterNamespaceDownsampleOptions{}, errNotAggregatedClusterNamespace } if o.downsample == nil { return DefaultClusterNamespaceDownsampleOptions, nil } return *o.downsample, nil } // ClusterNamespaceDownsampleOptions is the downsample options for // a cluster namespace. type ClusterNamespaceDownsampleOptions struct { All bool } // ClusterNamespaces is a slice of ClusterNamespace instances. type ClusterNamespaces []ClusterNamespace // NumAggregatedClusterNamespaces returns the number of aggregated // cluster namespaces. func (n ClusterNamespaces) NumAggregatedClusterNamespaces() int { count := 0 for _, namespace := range n { if namespace.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType { count++ } } return count } // UnaggregatedClusterNamespaceDefinition is the definition for the // cluster namespace that holds unaggregated metrics data. type UnaggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration } // Validate will validate the cluster namespace definition. func (def UnaggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } return nil } // AggregatedClusterNamespaceDefinition is a definition for a // cluster namespace that holds aggregated metrics data at a // specific retention and resolution. type AggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration Resolution time.Duration Downsample *ClusterNamespaceDownsampleOptions DataLatency time.Duration ReadOnly bool } // Validate validates the cluster namespace definition. func (def AggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } if def.Resolution <= 0 { return errResolutionNotSet } if def.DataLatency < 0 { return errNegativeDataLatency } return nil } type clusters struct { namespaces []ClusterNamespace unaggregatedNamespace ClusterNamespace aggregatedNamespaces map[RetentionResolution]ClusterNamespace } // NewClusters instantiates a new Clusters instance. func NewClusters( unaggregatedClusterNamespace UnaggregatedClusterNamespaceDefinition, aggregatedClusterNamespaces ...AggregatedClusterNamespaceDefinition, ) (Clusters, error) { expectedAggregated := len(aggregatedClusterNamespaces) expectedAll := 1 + expectedAggregated namespaces := make(ClusterNamespaces, 0, expectedAll) aggregatedNamespaces := make(map[RetentionResolution]ClusterNamespace, expectedAggregated) def := unaggregatedClusterNamespace unaggregatedNamespace, err := newUnaggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, unaggregatedNamespace) for _, def := range aggregatedClusterNamespaces { namespace, err := newAggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, namespace) key := RetentionResolution{ Retention: namespace.Options().Attributes().Retention, Resolution: namespace.Options().Attributes().Resolution, } _, exists := aggregatedNamespaces[key] if exists { return nil, fmt.Errorf("duplicate aggregated namespace exists for: "+ "retention=%s, resolution=%s", key.Retention.String(), key.Resolution.String()) } aggregatedNamespaces[key] = namespace } return &clusters{ namespaces: namespaces, unaggregatedNamespace: unaggregatedNamespace, aggregatedNamespaces: aggregatedNamespaces, }, nil } func (c *clusters) ClusterNamespaces() ClusterNamespaces { return c.namespaces } func (c *clusters) NonReadyClusterNamespaces() ClusterNamespaces { // statically configured cluster namespaces are always considered ready. return nil } func (c *clusters) UnaggregatedClusterNamespace() (ClusterNamespace, bool) { return c.unaggregatedNamespace, true } func (c *clusters) AggregatedClusterNamespace( attrs RetentionResolution, ) (ClusterNamespace, bool) { namespace, ok := c.aggregatedNamespaces[attrs] return namespace, ok } func (c *clusters) ConfigType() ClusterConfigType { return ClusterConfigTypeStatic } func (c *clusters) Close() error { var ( wg sync.WaitGroup syncMultiErrs syncMultiErrs uniqueSessions []client.Session ) // Collect unique sessions, some namespaces may share same // client session (same cluster) uniqueSessions = append(uniqueSessions, c.unaggregatedNamespace.Session()) for _, namespace := range c.aggregatedNamespaces { unique := true for _, session := range uniqueSessions { if namespace.Session() == session { unique = false break } } if unique { uniqueSessions = append(uniqueSessions, namespace.Session()) } } for _, session := range uniqueSessions { session := session // Capture for lambda wg.Add(1) go func() { defer wg.Done() err := session.Close() syncMultiErrs.add(err) }() } wg.Wait() return syncMultiErrs.lastError() } type clusterNamespace struct { namespaceID ident.ID options ClusterNamespaceOptions session client.Session } func newUnaggregatedClusterNamespace( def UnaggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.UnaggregatedMetricsType, Retention: def.Retention, }, }, session: def.Session, }, nil } func newAggregatedClusterNamespace( def AggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.AggregatedMetricsType, Retention: def.Retention, Resolution: def.Resolution, }, downsample: def.Downsample, dataLatency: def.DataLatency, readOnly: def.ReadOnly, }, session: def.Session, }, nil } func (n *clusterNamespace) NamespaceID() ident.ID { return n.namespaceID } func (n *clusterNamespace) Options() ClusterNamespaceOptions { return n.options } func (n *clusterNamespace) Session() client.Session { return n.session } type syncMultiErrs struct { sync.Mutex multiErr xerrors.MultiError } func (errs *syncMultiErrs) add(err error) { errs.Lock() errs.multiErr = errs.multiErr.Add(err) errs.Unlock() } func (errs *syncMultiErrs) lastError() error { errs.Lock() defer errs.Unlock() // TODO: consider taking a debug param when building a syncMultiErrs // which would determine wether to return only the last error message // or the consolidated list of errors. return errs.multiErr.LastError() }
{ return o.dataLatency }
identifier_body
cluster.go
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package m3 import ( "errors" "fmt" "io" "sync" "time" "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/query/storage/m3/storagemetadata" xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/ident" ) var ( errNamespaceIDNotSet = errors.New("namespace ID not set") errSessionNotSet = errors.New("session not set") errRetentionNotSet = errors.New("retention not set") errResolutionNotSet = errors.New("resolution not set") errNegativeDataLatency = errors.New("negative dataLatency") // DefaultClusterNamespaceDownsampleOptions is a default options. // NB(antanas): this was made public to access it in promremote storage. // Ideally downsampling could be decoupled from m3 storage. DefaultClusterNamespaceDownsampleOptions = ClusterNamespaceDownsampleOptions{ All: true, } ) // ClusterConfigType is an enum representing the configuration used // to create a Clusters interface type ClusterConfigType int const ( // ClusterConfigTypeStatic is for static configuration. ClusterConfigTypeStatic = iota // ClusterConfigTypeDynamic is for dynamic configuration. ClusterConfigTypeDynamic ) // Clusters is a flattened collection of local storage clusters and namespaces. type Clusters interface { io.Closer // ClusterNamespaces returns all known and ready cluster namespaces. ClusterNamespaces() ClusterNamespaces // NonReadyClusterNamespaces returns all cluster namespaces not in the ready state. NonReadyClusterNamespaces() ClusterNamespaces // UnaggregatedClusterNamespace returns the valid unaggregated // cluster namespace. If the namespace is not yet initialized, returns false. UnaggregatedClusterNamespace() (ClusterNamespace, bool) // AggregatedClusterNamespace returns an aggregated cluster namespace // at a specific retention and resolution. AggregatedClusterNamespace(attrs RetentionResolution) (ClusterNamespace, bool) // ConfigType returns the type of configuration used to create this Clusters // object. ConfigType() ClusterConfigType } // RetentionResolution is a tuple of retention and resolution that describes // an aggregated metrics policy. type RetentionResolution struct { Retention time.Duration Resolution time.Duration } // ClusterNamespace is a local storage cluster namespace. type ClusterNamespace interface { NamespaceID() ident.ID Options() ClusterNamespaceOptions Session() client.Session } // ClusterNamespaceOptions is a set of options type ClusterNamespaceOptions struct { // Note: Don't allow direct access, as we want to provide defaults // and/or error if call to access a field is not relevant/correct. attributes storagemetadata.Attributes downsample *ClusterNamespaceDownsampleOptions dataLatency time.Duration readOnly bool } // NewClusterNamespaceOptions creates new cluster namespace options. func NewClusterNamespaceOptions( attributes storagemetadata.Attributes, downsample *ClusterNamespaceDownsampleOptions, ) ClusterNamespaceOptions { return ClusterNamespaceOptions{ attributes: attributes, downsample: downsample, } } // Attributes returns the storage attributes of the cluster namespace. func (o ClusterNamespaceOptions) Attributes() storagemetadata.Attributes { return o.attributes } // DataLatency returns the duration after which the data is available in this cluster namespace. func (o ClusterNamespaceOptions) DataLatency() time.Duration { return o.dataLatency } // ReadOnly returns the value of ReadOnly option for a cluster namespace. func (o ClusterNamespaceOptions) ReadOnly() bool { return o.readOnly } // DownsampleOptions returns the downsample options for a cluster namespace, // which is only valid if the namespace is an aggregated cluster namespace. func (o ClusterNamespaceOptions) DownsampleOptions() ( ClusterNamespaceDownsampleOptions, error, ) { if o.attributes.MetricsType != storagemetadata.AggregatedMetricsType { return ClusterNamespaceDownsampleOptions{}, errNotAggregatedClusterNamespace } if o.downsample == nil { return DefaultClusterNamespaceDownsampleOptions, nil } return *o.downsample, nil } // ClusterNamespaceDownsampleOptions is the downsample options for // a cluster namespace. type ClusterNamespaceDownsampleOptions struct { All bool } // ClusterNamespaces is a slice of ClusterNamespace instances. type ClusterNamespaces []ClusterNamespace // NumAggregatedClusterNamespaces returns the number of aggregated // cluster namespaces. func (n ClusterNamespaces) NumAggregatedClusterNamespaces() int { count := 0 for _, namespace := range n { if namespace.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType { count++ } } return count } // UnaggregatedClusterNamespaceDefinition is the definition for the // cluster namespace that holds unaggregated metrics data. type UnaggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration } // Validate will validate the cluster namespace definition. func (def UnaggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } return nil } // AggregatedClusterNamespaceDefinition is a definition for a // cluster namespace that holds aggregated metrics data at a // specific retention and resolution. type AggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration Resolution time.Duration Downsample *ClusterNamespaceDownsampleOptions DataLatency time.Duration ReadOnly bool } // Validate validates the cluster namespace definition. func (def AggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } if def.Resolution <= 0 { return errResolutionNotSet } if def.DataLatency < 0 { return errNegativeDataLatency } return nil } type clusters struct { namespaces []ClusterNamespace unaggregatedNamespace ClusterNamespace aggregatedNamespaces map[RetentionResolution]ClusterNamespace } // NewClusters instantiates a new Clusters instance. func NewClusters( unaggregatedClusterNamespace UnaggregatedClusterNamespaceDefinition, aggregatedClusterNamespaces ...AggregatedClusterNamespaceDefinition, ) (Clusters, error) { expectedAggregated := len(aggregatedClusterNamespaces) expectedAll := 1 + expectedAggregated namespaces := make(ClusterNamespaces, 0, expectedAll) aggregatedNamespaces := make(map[RetentionResolution]ClusterNamespace, expectedAggregated) def := unaggregatedClusterNamespace unaggregatedNamespace, err := newUnaggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, unaggregatedNamespace) for _, def := range aggregatedClusterNamespaces { namespace, err := newAggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, namespace) key := RetentionResolution{ Retention: namespace.Options().Attributes().Retention, Resolution: namespace.Options().Attributes().Resolution, } _, exists := aggregatedNamespaces[key] if exists { return nil, fmt.Errorf("duplicate aggregated namespace exists for: "+ "retention=%s, resolution=%s", key.Retention.String(), key.Resolution.String()) } aggregatedNamespaces[key] = namespace } return &clusters{ namespaces: namespaces, unaggregatedNamespace: unaggregatedNamespace, aggregatedNamespaces: aggregatedNamespaces, }, nil } func (c *clusters) ClusterNamespaces() ClusterNamespaces { return c.namespaces } func (c *clusters) NonReadyClusterNamespaces() ClusterNamespaces { // statically configured cluster namespaces are always considered ready. return nil } func (c *clusters) UnaggregatedClusterNamespace() (ClusterNamespace, bool) { return c.unaggregatedNamespace, true } func (c *clusters) AggregatedClusterNamespace( attrs RetentionResolution, ) (ClusterNamespace, bool) { namespace, ok := c.aggregatedNamespaces[attrs] return namespace, ok } func (c *clusters) ConfigType() ClusterConfigType { return ClusterConfigTypeStatic } func (c *clusters) Close() error { var ( wg sync.WaitGroup syncMultiErrs syncMultiErrs uniqueSessions []client.Session ) // Collect unique sessions, some namespaces may share same // client session (same cluster) uniqueSessions = append(uniqueSessions, c.unaggregatedNamespace.Session()) for _, namespace := range c.aggregatedNamespaces { unique := true for _, session := range uniqueSessions { if namespace.Session() == session { unique = false break } } if unique { uniqueSessions = append(uniqueSessions, namespace.Session()) } } for _, session := range uniqueSessions { session := session // Capture for lambda wg.Add(1) go func() { defer wg.Done() err := session.Close() syncMultiErrs.add(err) }() } wg.Wait() return syncMultiErrs.lastError() } type clusterNamespace struct { namespaceID ident.ID options ClusterNamespaceOptions session client.Session } func
( def UnaggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.UnaggregatedMetricsType, Retention: def.Retention, }, }, session: def.Session, }, nil } func newAggregatedClusterNamespace( def AggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.AggregatedMetricsType, Retention: def.Retention, Resolution: def.Resolution, }, downsample: def.Downsample, dataLatency: def.DataLatency, readOnly: def.ReadOnly, }, session: def.Session, }, nil } func (n *clusterNamespace) NamespaceID() ident.ID { return n.namespaceID } func (n *clusterNamespace) Options() ClusterNamespaceOptions { return n.options } func (n *clusterNamespace) Session() client.Session { return n.session } type syncMultiErrs struct { sync.Mutex multiErr xerrors.MultiError } func (errs *syncMultiErrs) add(err error) { errs.Lock() errs.multiErr = errs.multiErr.Add(err) errs.Unlock() } func (errs *syncMultiErrs) lastError() error { errs.Lock() defer errs.Unlock() // TODO: consider taking a debug param when building a syncMultiErrs // which would determine wether to return only the last error message // or the consolidated list of errors. return errs.multiErr.LastError() }
newUnaggregatedClusterNamespace
identifier_name
cluster.go
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package m3 import ( "errors" "fmt" "io" "sync" "time" "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/query/storage/m3/storagemetadata" xerrors "github.com/m3db/m3/src/x/errors" "github.com/m3db/m3/src/x/ident" ) var ( errNamespaceIDNotSet = errors.New("namespace ID not set") errSessionNotSet = errors.New("session not set") errRetentionNotSet = errors.New("retention not set") errResolutionNotSet = errors.New("resolution not set") errNegativeDataLatency = errors.New("negative dataLatency") // DefaultClusterNamespaceDownsampleOptions is a default options. // NB(antanas): this was made public to access it in promremote storage. // Ideally downsampling could be decoupled from m3 storage. DefaultClusterNamespaceDownsampleOptions = ClusterNamespaceDownsampleOptions{ All: true, } ) // ClusterConfigType is an enum representing the configuration used // to create a Clusters interface type ClusterConfigType int const ( // ClusterConfigTypeStatic is for static configuration. ClusterConfigTypeStatic = iota // ClusterConfigTypeDynamic is for dynamic configuration. ClusterConfigTypeDynamic ) // Clusters is a flattened collection of local storage clusters and namespaces. type Clusters interface { io.Closer // ClusterNamespaces returns all known and ready cluster namespaces. ClusterNamespaces() ClusterNamespaces // NonReadyClusterNamespaces returns all cluster namespaces not in the ready state. NonReadyClusterNamespaces() ClusterNamespaces // UnaggregatedClusterNamespace returns the valid unaggregated // cluster namespace. If the namespace is not yet initialized, returns false. UnaggregatedClusterNamespace() (ClusterNamespace, bool) // AggregatedClusterNamespace returns an aggregated cluster namespace // at a specific retention and resolution. AggregatedClusterNamespace(attrs RetentionResolution) (ClusterNamespace, bool) // ConfigType returns the type of configuration used to create this Clusters // object. ConfigType() ClusterConfigType } // RetentionResolution is a tuple of retention and resolution that describes // an aggregated metrics policy. type RetentionResolution struct { Retention time.Duration Resolution time.Duration } // ClusterNamespace is a local storage cluster namespace. type ClusterNamespace interface { NamespaceID() ident.ID Options() ClusterNamespaceOptions Session() client.Session } // ClusterNamespaceOptions is a set of options type ClusterNamespaceOptions struct { // Note: Don't allow direct access, as we want to provide defaults // and/or error if call to access a field is not relevant/correct. attributes storagemetadata.Attributes downsample *ClusterNamespaceDownsampleOptions dataLatency time.Duration readOnly bool } // NewClusterNamespaceOptions creates new cluster namespace options. func NewClusterNamespaceOptions( attributes storagemetadata.Attributes, downsample *ClusterNamespaceDownsampleOptions, ) ClusterNamespaceOptions { return ClusterNamespaceOptions{ attributes: attributes, downsample: downsample, } } // Attributes returns the storage attributes of the cluster namespace. func (o ClusterNamespaceOptions) Attributes() storagemetadata.Attributes { return o.attributes } // DataLatency returns the duration after which the data is available in this cluster namespace. func (o ClusterNamespaceOptions) DataLatency() time.Duration { return o.dataLatency } // ReadOnly returns the value of ReadOnly option for a cluster namespace. func (o ClusterNamespaceOptions) ReadOnly() bool { return o.readOnly } // DownsampleOptions returns the downsample options for a cluster namespace, // which is only valid if the namespace is an aggregated cluster namespace. func (o ClusterNamespaceOptions) DownsampleOptions() ( ClusterNamespaceDownsampleOptions, error, ) { if o.attributes.MetricsType != storagemetadata.AggregatedMetricsType { return ClusterNamespaceDownsampleOptions{}, errNotAggregatedClusterNamespace } if o.downsample == nil { return DefaultClusterNamespaceDownsampleOptions, nil } return *o.downsample, nil } // ClusterNamespaceDownsampleOptions is the downsample options for // a cluster namespace. type ClusterNamespaceDownsampleOptions struct { All bool } // ClusterNamespaces is a slice of ClusterNamespace instances. type ClusterNamespaces []ClusterNamespace // NumAggregatedClusterNamespaces returns the number of aggregated // cluster namespaces. func (n ClusterNamespaces) NumAggregatedClusterNamespaces() int { count := 0 for _, namespace := range n { if namespace.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType { count++ } } return count } // UnaggregatedClusterNamespaceDefinition is the definition for the // cluster namespace that holds unaggregated metrics data. type UnaggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration } // Validate will validate the cluster namespace definition. func (def UnaggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } return nil } // AggregatedClusterNamespaceDefinition is a definition for a // cluster namespace that holds aggregated metrics data at a // specific retention and resolution. type AggregatedClusterNamespaceDefinition struct { NamespaceID ident.ID Session client.Session Retention time.Duration Resolution time.Duration Downsample *ClusterNamespaceDownsampleOptions DataLatency time.Duration ReadOnly bool } // Validate validates the cluster namespace definition. func (def AggregatedClusterNamespaceDefinition) Validate() error { if def.NamespaceID == nil || len(def.NamespaceID.String()) == 0 { return errNamespaceIDNotSet } if def.Session == nil { return errSessionNotSet } if def.Retention <= 0 { return errRetentionNotSet } if def.Resolution <= 0 { return errResolutionNotSet } if def.DataLatency < 0 { return errNegativeDataLatency } return nil } type clusters struct { namespaces []ClusterNamespace unaggregatedNamespace ClusterNamespace aggregatedNamespaces map[RetentionResolution]ClusterNamespace } // NewClusters instantiates a new Clusters instance. func NewClusters( unaggregatedClusterNamespace UnaggregatedClusterNamespaceDefinition, aggregatedClusterNamespaces ...AggregatedClusterNamespaceDefinition, ) (Clusters, error) { expectedAggregated := len(aggregatedClusterNamespaces) expectedAll := 1 + expectedAggregated namespaces := make(ClusterNamespaces, 0, expectedAll) aggregatedNamespaces := make(map[RetentionResolution]ClusterNamespace,
expectedAggregated) def := unaggregatedClusterNamespace unaggregatedNamespace, err := newUnaggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, unaggregatedNamespace) for _, def := range aggregatedClusterNamespaces { namespace, err := newAggregatedClusterNamespace(def) if err != nil { return nil, err } namespaces = append(namespaces, namespace) key := RetentionResolution{ Retention: namespace.Options().Attributes().Retention, Resolution: namespace.Options().Attributes().Resolution, } _, exists := aggregatedNamespaces[key] if exists { return nil, fmt.Errorf("duplicate aggregated namespace exists for: "+ "retention=%s, resolution=%s", key.Retention.String(), key.Resolution.String()) } aggregatedNamespaces[key] = namespace } return &clusters{ namespaces: namespaces, unaggregatedNamespace: unaggregatedNamespace, aggregatedNamespaces: aggregatedNamespaces, }, nil } func (c *clusters) ClusterNamespaces() ClusterNamespaces { return c.namespaces } func (c *clusters) NonReadyClusterNamespaces() ClusterNamespaces { // statically configured cluster namespaces are always considered ready. return nil } func (c *clusters) UnaggregatedClusterNamespace() (ClusterNamespace, bool) { return c.unaggregatedNamespace, true } func (c *clusters) AggregatedClusterNamespace( attrs RetentionResolution, ) (ClusterNamespace, bool) { namespace, ok := c.aggregatedNamespaces[attrs] return namespace, ok } func (c *clusters) ConfigType() ClusterConfigType { return ClusterConfigTypeStatic } func (c *clusters) Close() error { var ( wg sync.WaitGroup syncMultiErrs syncMultiErrs uniqueSessions []client.Session ) // Collect unique sessions, some namespaces may share same // client session (same cluster) uniqueSessions = append(uniqueSessions, c.unaggregatedNamespace.Session()) for _, namespace := range c.aggregatedNamespaces { unique := true for _, session := range uniqueSessions { if namespace.Session() == session { unique = false break } } if unique { uniqueSessions = append(uniqueSessions, namespace.Session()) } } for _, session := range uniqueSessions { session := session // Capture for lambda wg.Add(1) go func() { defer wg.Done() err := session.Close() syncMultiErrs.add(err) }() } wg.Wait() return syncMultiErrs.lastError() } type clusterNamespace struct { namespaceID ident.ID options ClusterNamespaceOptions session client.Session } func newUnaggregatedClusterNamespace( def UnaggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.UnaggregatedMetricsType, Retention: def.Retention, }, }, session: def.Session, }, nil } func newAggregatedClusterNamespace( def AggregatedClusterNamespaceDefinition, ) (ClusterNamespace, error) { if err := def.Validate(); err != nil { return nil, err } ns := def.NamespaceID // Set namespace to NoFinalize to avoid cloning it in write operations ns.NoFinalize() return &clusterNamespace{ namespaceID: ns, options: ClusterNamespaceOptions{ attributes: storagemetadata.Attributes{ MetricsType: storagemetadata.AggregatedMetricsType, Retention: def.Retention, Resolution: def.Resolution, }, downsample: def.Downsample, dataLatency: def.DataLatency, readOnly: def.ReadOnly, }, session: def.Session, }, nil } func (n *clusterNamespace) NamespaceID() ident.ID { return n.namespaceID } func (n *clusterNamespace) Options() ClusterNamespaceOptions { return n.options } func (n *clusterNamespace) Session() client.Session { return n.session } type syncMultiErrs struct { sync.Mutex multiErr xerrors.MultiError } func (errs *syncMultiErrs) add(err error) { errs.Lock() errs.multiErr = errs.multiErr.Add(err) errs.Unlock() } func (errs *syncMultiErrs) lastError() error { errs.Lock() defer errs.Unlock() // TODO: consider taking a debug param when building a syncMultiErrs // which would determine wether to return only the last error message // or the consolidated list of errors. return errs.multiErr.LastError() }
random_line_split
switch.go
// vi: sw=4 ts=4: /* --------------------------------------------------------------------------- Copyright (c) 2013-2015 AT&T Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------- */ /* Mnemonic: switch Abstract: Functions associated with the switch datastructure. This module also contains the functions that implement path-finding. Dijkstra's algorithm is implemented (see Path_to) to determine a path between two hosts which we assume are connected to one or two switches. The path finding algorithm allows for disjoint networks which occurs when one or more switches are not managed by the controller(s) used to create the network graph. Date: 24 November 2013 Author: E. Scott Daniels Mods: 10 Mar 2014 - We allow a target to be either a switch or host when looking for a path. 13 May 2014 - Corrected bug in debug string. 11 Jun 2014 - Changes to support finding all paths between two VMs rather than just the shortest one. 29 Jun 2014 - Changes to support user link limits. 29 Jul 2014 : Mlag support 23 Oct 2014 - Find path functions return an indication that no path might have been caused by a capacity issue rather than no path. 17 Jun 2015 - Added checking for nil pointer on some functions. General cleanup of comments and switch to stringer interface instead of To_str(). Added support for oneway bandwidth reserations with a function that checks outbound capacity on all switch links. 10 Sep 2015 - Allow finding attached 'hosts' based on uuid. */ package gizmos import ( "fmt" "strings" "github.com/att/tegu" ) /* Defines a switch. */ type Switch struct { id *string // reference id for the switch links []*Link // links to other switches lidx int // next open index in links hosts map[string] bool // hosts that are attched to this switch hvmid map[string]*string // vmids of attached hosts hport map[string] int // the port that the host (string) attaches to // these are for path finding and are needed externally Prev *Switch // previous low cost switch Plink int // index of link on Prev used to reach this node Cost int // cost to reach this node through Prev/Plink Flags int // visited and maybe others } /* Constructor. Generates a switch object with the given id. */ func Mk_switch( id *string ) ( s *Switch ) { tokens := strings.SplitN( *id, "@", 2 ) // in q-lite world we get host@interface and we need only host portion id = &tokens[0] s = &Switch { id: id, lidx: 0, } if id == nil { dup_str := "no_id_given" id = &dup_str } s.links = make( []*Link, 32 ) s.hosts = make( map[string]bool, 64 ) s.hport = make( map[string]int, 64 ) s.hvmid = make( map[string]*string, 64 ) return } /* Destruction */ func (s *Switch) Nuke() { for i := 0; i < s.lidx; i++ { s.links[i] = nil } s.links = nil s.hosts = nil s.hport = nil } /* Add a link to the switch. */ func (s *Switch) Add_link( link *Link ) { var ( new_links []*Link i int ) if s == nil { return } if s.lidx >= len( s.links ) { new_links = make( []*Link, s.lidx + 32 ) for i = 0; i < len( s.links ); i++ { new_links[i] = s.links[i] } s.links = new_links } s.links[s.lidx] = link s.lidx++ } /* Track an attached host (by name only) */ func (s *Switch) Add_host( host *string, vmid *string, port int ) { if s == nil { return } s.hosts[*host] = true s.hport[*host] = port s.hvmid[*host] = vmid } /* Returns true if the named host is attached to the switch. The host may be a pointer to either a host name or uuid string. */ func (s *Switch) Has_host( host *string ) (bool) { if s == nil { return false } if s.hvmid[*host] != nil { // allow searches based on the uuid return true } return s.hosts[*host] } /* Return the ID that has been associated with this switch. Likely this is the DPID. */ func (s *Switch) Get_id( ) ( *string ) { if s == nil { return nil } return s.id } /* Return the ith link in our index or nil if i is out of range. Allows the user programme to loop through the list if needed. Yes, this _could_ have been implemented to drive a callback for each list element, but that just makes the user code more complicated requiring an extra function or closure and IMHO adds uneeded maintence and/or learning curve issues. */ func (s *Switch) Get_link( i int ) ( l *Link ) { if s == nil { return nil } l = nil if i >= 0 && i < s.lidx { l = s.links[i] } return } // -------------- shortest, single, path finding ------------------------------------------------------------- /* Probe all of the neighbours of the switch to see if they are attached to the target host. If a neighbour has the target, we set the reverse path in the neighbour and return it indicating success. If a neighbour does not have the target, we update the neighbour's cost and reverse path _ONLY_ if the cost through the current switch is lower than the cost recorded at the neighbour. If no neighbour links to the target, we return null. The usr max value is a percentage which defines the max percentage of a link that the user (tenant in openstack terms) is allowed to reserve on any given link. We will not probe a neighbour if the link to it cannot accept the additional capacity. The target may be the name of the host we're looking for, or the ID of the endpoint switch to support finding a path to a "gateway". */ func (s *Switch)
( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( fsw *Switch // next neighbour switch (through link) ) found = nil cap_trip = false //fmt.Printf( "\n\nsearching neighbours of (%s) for %s\n", s.To_str(), *target ) for i := 0; i < s.lidx; i++ { if s != fsw { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { fsw = s.links[i].forward // at the switch on the other side of the link if (fsw.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "switch:probe_neigbour: following link %d -- has capacity to (%s) and NOT visited", i, fsw.To_str() ) if s.Cost + s.links[i].Cost < fsw.Cost { //fmt.Printf( "\tsetting cost: %d\n", s.Cost + s.links[i].Cost ) fsw.Cost = s.Cost + s.links[i].Cost fsw.Prev = s // shortest path to this node is through s fsw.Plink = i // using its ith link } obj_sheep.Baa( 3, "compare: (%s) (%s)", *target, *(fsw.Get_id()) ) if fsw.Has_host( target ) || *(fsw.Get_id()) == *target { // target is attahced to this switch, or the target is a swtich that is the forward switch fsw.Prev = s fsw.Plink = i found = fsw return } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) cap_trip = true } } } return } /* Implements Dijkstra's algorithm for finding the shortest path in the network starting from the switch given and stoping when it finds a switch that has the target host attached. At the moment, link costs are all the same, so there is no ordering of queued nodes such that the lowest cost is always searched next. A path may exist, but not be available if the usage on a link cannot support the additional capacity that is requested via inc_cap. The usr_max vlaue is a percentage (1-100) which indicaes the max percentage of a link that the user may reserve. The cap_trip return value is set to true if one or more links could not be followed because of capacity. If return switch is nil, and cap-trip is true then the most likely cause of failure is capacity, though it _is_ possible that there really is no path between the swtich and the target, but we stunbled onto a link at capacity before discovering that there is no real path. The only way to know for sure is to run two searches, first with inc_cap of 0, but that seems silly. */ func (s *Switch) Path_to( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( sw *Switch fifo []*Switch push int = 0 pop int = 0 pidx int = 0 lcap_trip bool = false // local detection of capacity exceeded on one or more links ) if s == nil { return } cap_trip = false found = nil fifo = make( []*Switch, 4096 ) obj_sheep.Baa( 2, "switch:Path_to: looking for path to %s", *target ) s.Prev = nil fifo[push] = s push++ for ; push != pop; { // if we run out of things in the fifo we're done and found no path sw = fifo[pop] pop++ if pop > len( fifo ) { pop = 0; } found, cap_trip = sw.probe_neighbours( target, commence, conclude, inc_cap, usr, usr_max ) if found != nil { return } if cap_trip { lcap_trip = true // must preserve this } if sw.Flags & tegu.SWFL_VISITED == 0 { // possible that it was pushed multiple times and already had it's neighbours queued for i := 0; i < sw.lidx; i++ { has_room, err := sw.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { if sw.links[i].forward.Flags & tegu.SWFL_VISITED == 0 { fifo[push] = sw.links[i].forward push++ if push > len( fifo ) { push = 0; } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) lcap_trip = true } } } sw.Flags |= tegu.SWFL_VISITED if pidx > 1 { pidx-- } } cap_trip = lcap_trip // indication that we tripped on capacity at least once if lcap was set return } // -------------------- find all paths ------------------------------------------------ /* A list of links each of which represents a unique path between two switches. */ type trail_list struct { links [][]*Link lidx int // next entry to populate ep *Switch // far end switch } /* Examine all neighbours of the switch 's' for possible connectivity to target host. If s houses the target host, then we push the current path to this host into the trail list and return. */ func (s *Switch) ap_search_neighbours( target *string, clinks []*Link, clidx int, tl *trail_list ) { if s.Has_host( target ) { tl.ep = s // mark the end switch obj_sheep.Baa( 3, "search_neighbours: target found on switch: %s\n", *s.id ) c := make( []*Link, clidx ) copy( c, clinks[0:clidx+1] ) // copy and push into the trail list tl.links[tl.lidx] = c tl.lidx++ } else { // not the end, keep searching forward // TODO: check to see that we aren't beyond limit s.Flags |= tegu.SWFL_VISITED obj_sheep.Baa( 3, "search_neighbours: testing switch: %s has %d links", *s.id, s.lidx ) for i := 0; i < s.lidx; i++ { // for each link to a neighbour sn := s.links[i].Get_forward_sw() if (sn.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "search_neighbours: advancing over link %d switch: %s", i, *sn.id ) clinks[clidx] = s.links[i] // push the link onto the trail and check out the switch at the other end sn.ap_search_neighbours( target, clinks, clidx+1, tl ) obj_sheep.Baa( 3, "search_neighbours: back to switch: %s", *s.id ) } } } s.Flags &= ^tegu.SWFL_VISITED // as we back out we allow paths to come back through } /* Starting at switch s, this function finds all possible paths to the switch that houses the target host, and then returns the list of unique links that are traversed by one or more paths provided that each link can support the increased amount of capacity (inc_amt). The endpoint switch is also returned. If any of the links cannot support the capacity, the list will be nil or empty; this is also the case if no paths are found. The error message will indicate the exact reason if that is important to the caller. Usr_max is a perctage value (1-100) that defines the maximum percentage of any link that the user may reserve. */ func (s *Switch) All_paths_to( target *string, commence int64, conclude int64, inc_amt int64, usr *string, usr_max int64 ) ( links []*Link, ep *Switch, err error ) { var ( ulinks map[string]*Link // unique list of links involved in all trails ) links = nil ep = nil err = nil tl := &trail_list{ lidx: 0 } tl.links = make( [][]*Link, 4096 ) clinks := make( []*Link, 4096 ) // working set of links s.ap_search_neighbours( target, clinks, 0, tl ) if tl.lidx > 0 { // found at least one trail ulinks = make( map[string]*Link ) ep = tl.ep obj_sheep.Baa( 2, "switch/all-paths: %d trails found to target", tl.lidx ) for i := 0; i < tl.lidx; i++ { // for each trail between the two endpoints obj_sheep.Baa( 3, "Trail %d follows:", i ) for j := range tl.links[i] { lid := tl.links[i][j].Get_id() // add if not already found in another trail if ulinks[*lid] == nil { ulinks[*lid] = tl.links[i][j] } obj_sheep.Baa( 3, "link %d: %s", j, tl.links[i][j].To_str( ) ) } } obj_sheep.Baa( 2, "found %d unique links across %d trails", len( ulinks ), tl.lidx ) links = make( []*Link, len( ulinks ) ) i := 0 for _, v := range ulinks { // TODO: Add tenant based check _, err := v.Has_capacity( commence, conclude, inc_amt, usr, usr_max ) if err != nil { err = fmt.Errorf( "no capacity found between switch (%s) and target (%s)", *s.id, *target ) obj_sheep.Baa( 2, "all_paths: no capacity on link: %s", err ) links = nil break } // TODO: Add warning if the capacity for the link is above threshold (here, or when the usage is actually bumpped up?) links[i] = v i++ } } else { err = fmt.Errorf( "no paths found bwtween switch (%s) and target (%s)", *s.id, *target ) } return } /* Checks all links to determine if they _all_ have the capacity to support additional outbound traffic (inc_cap). Used to check for gating when a path isn't built, but rate limiting at ingress is needed. */ func (s *Switch) Has_capacity_out( commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( bool ) { if s == nil { return false } for i := 0; i < s.lidx; i++ { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if ! has_room { obj_sheep.Baa( 2, "switch/cap_out: no capacity on link from %s: %s", s.id, err ) return false } } obj_sheep.Baa( 2, "switch/cap_out: %s has capacity", s.id ) return true } // -------------------- formatting ---------------------------------------------------- /* Generate some useable representation for debugging Deprectated -- use Stringer interface (String()) */ func (s *Switch) To_str( ) ( string ) { return s.String() } /* Generate some useable representation for debugging */ func (s *Switch) String( ) ( string ) { if s != nil { return fmt.Sprintf( "%s %d links cost=%d fl=0x%02x", *s.id, s.lidx, s.Cost, s.Flags ) } return "null-switch" } /* Generate a string containing json represntation of the switch. */ func (s *Switch) To_json( ) ( jstr string ) { var sep = "" if s == nil { jstr = `{ id: "null_switch" }` return } if s.lidx > 0 { jstr = fmt.Sprintf( `{ "id": %q, "links": [ `, *s.id ) for i := 0; i < s.lidx; i++ { jstr += fmt.Sprintf( "%s%s", sep, s.links[i].To_json() ) sep = "," } jstr += " ]" } else { jstr = fmt.Sprintf( `{ "id": %q }`, *s.id ) } if len( s.hosts ) > 0 { jstr += fmt.Sprintf( `, "conn_hosts": [ ` ) sep = "" for k := range s.hosts { if s.hosts[k] == true { vmid := "unknown" if s.hvmid[k] != nil { vmid = *s.hvmid[k] } jstr += fmt.Sprintf( `%s { "host": %q, "port": %d, "vmid": %q }`, sep, k, s.hport[k], vmid ) sep = "," } } jstr += " ]" } jstr += " }" return }
probe_neighbours
identifier_name
switch.go
// vi: sw=4 ts=4: /* --------------------------------------------------------------------------- Copyright (c) 2013-2015 AT&T Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------- */ /* Mnemonic: switch Abstract: Functions associated with the switch datastructure. This module also contains the functions that implement path-finding. Dijkstra's algorithm is implemented (see Path_to) to determine a path between two hosts which we assume are connected to one or two switches. The path finding algorithm allows for disjoint networks which occurs when one or more switches are not managed by the controller(s) used to create the network graph. Date: 24 November 2013 Author: E. Scott Daniels Mods: 10 Mar 2014 - We allow a target to be either a switch or host when looking for a path. 13 May 2014 - Corrected bug in debug string. 11 Jun 2014 - Changes to support finding all paths between two VMs rather than just the shortest one. 29 Jun 2014 - Changes to support user link limits. 29 Jul 2014 : Mlag support 23 Oct 2014 - Find path functions return an indication that no path might have been caused by a capacity issue rather than no path. 17 Jun 2015 - Added checking for nil pointer on some functions. General cleanup of comments and switch to stringer interface instead of To_str(). Added support for oneway bandwidth reserations with a function that checks outbound capacity on all switch links. 10 Sep 2015 - Allow finding attached 'hosts' based on uuid. */ package gizmos import ( "fmt" "strings" "github.com/att/tegu" ) /* Defines a switch. */ type Switch struct { id *string // reference id for the switch links []*Link // links to other switches lidx int // next open index in links hosts map[string] bool // hosts that are attched to this switch hvmid map[string]*string // vmids of attached hosts hport map[string] int // the port that the host (string) attaches to // these are for path finding and are needed externally Prev *Switch // previous low cost switch Plink int // index of link on Prev used to reach this node Cost int // cost to reach this node through Prev/Plink Flags int // visited and maybe others } /* Constructor. Generates a switch object with the given id. */ func Mk_switch( id *string ) ( s *Switch ) { tokens := strings.SplitN( *id, "@", 2 ) // in q-lite world we get host@interface and we need only host portion id = &tokens[0] s = &Switch { id: id, lidx: 0, } if id == nil { dup_str := "no_id_given" id = &dup_str } s.links = make( []*Link, 32 ) s.hosts = make( map[string]bool, 64 ) s.hport = make( map[string]int, 64 ) s.hvmid = make( map[string]*string, 64 ) return } /* Destruction */ func (s *Switch) Nuke() { for i := 0; i < s.lidx; i++ { s.links[i] = nil } s.links = nil s.hosts = nil s.hport = nil } /* Add a link to the switch. */ func (s *Switch) Add_link( link *Link ) { var ( new_links []*Link i int ) if s == nil { return } if s.lidx >= len( s.links ) { new_links = make( []*Link, s.lidx + 32 ) for i = 0; i < len( s.links ); i++ { new_links[i] = s.links[i] } s.links = new_links } s.links[s.lidx] = link s.lidx++ } /* Track an attached host (by name only) */ func (s *Switch) Add_host( host *string, vmid *string, port int ) { if s == nil { return } s.hosts[*host] = true s.hport[*host] = port s.hvmid[*host] = vmid } /* Returns true if the named host is attached to the switch. The host may be a pointer to either a host name or uuid string. */ func (s *Switch) Has_host( host *string ) (bool) { if s == nil { return false } if s.hvmid[*host] != nil { // allow searches based on the uuid return true } return s.hosts[*host] } /* Return the ID that has been associated with this switch. Likely this is the DPID. */ func (s *Switch) Get_id( ) ( *string ) { if s == nil { return nil } return s.id } /* Return the ith link in our index or nil if i is out of range. Allows the user programme to loop through the list if needed. Yes, this _could_ have been implemented to drive a callback for each list element, but that just makes the user code more complicated requiring an extra function or closure and IMHO adds uneeded maintence and/or learning curve issues. */ func (s *Switch) Get_link( i int ) ( l *Link ) { if s == nil { return nil } l = nil if i >= 0 && i < s.lidx { l = s.links[i] } return } // -------------- shortest, single, path finding ------------------------------------------------------------- /* Probe all of the neighbours of the switch to see if they are attached to the target host. If a neighbour has the target, we set the reverse path in the neighbour and return it indicating success. If a neighbour does not have the target, we update the neighbour's cost and reverse path _ONLY_ if the cost through the current switch is lower than the cost recorded at the neighbour. If no neighbour links to the target, we return null. The usr max value is a percentage which defines the max percentage of a link that the user (tenant in openstack terms) is allowed to reserve on any given link. We will not probe a neighbour if the link to it cannot accept the additional capacity. The target may be the name of the host we're looking for, or the ID of the endpoint switch to support finding a path to a "gateway". */ func (s *Switch) probe_neighbours( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( fsw *Switch // next neighbour switch (through link) ) found = nil cap_trip = false //fmt.Printf( "\n\nsearching neighbours of (%s) for %s\n", s.To_str(), *target ) for i := 0; i < s.lidx; i++ { if s != fsw { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { fsw = s.links[i].forward // at the switch on the other side of the link if (fsw.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "switch:probe_neigbour: following link %d -- has capacity to (%s) and NOT visited", i, fsw.To_str() ) if s.Cost + s.links[i].Cost < fsw.Cost { //fmt.Printf( "\tsetting cost: %d\n", s.Cost + s.links[i].Cost ) fsw.Cost = s.Cost + s.links[i].Cost fsw.Prev = s // shortest path to this node is through s fsw.Plink = i // using its ith link } obj_sheep.Baa( 3, "compare: (%s) (%s)", *target, *(fsw.Get_id()) ) if fsw.Has_host( target ) || *(fsw.Get_id()) == *target { // target is attahced to this switch, or the target is a swtich that is the forward switch fsw.Prev = s fsw.Plink = i found = fsw return } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) cap_trip = true } } } return } /* Implements Dijkstra's algorithm for finding the shortest path in the network starting from the switch given and stoping when it finds a switch that has the target host attached. At the moment, link costs are all the same, so there is no ordering of queued nodes such that the lowest cost is always searched next. A path may exist, but not be available if the usage on a link cannot support the additional capacity that is requested via inc_cap. The usr_max vlaue is a percentage (1-100) which indicaes the max percentage of a link that the user may reserve. The cap_trip return value is set to true if one or more links could not be followed because of capacity. If return switch is nil, and cap-trip is true then the most likely cause of failure is capacity, though it _is_ possible that there really is no path between the swtich and the target, but we stunbled onto a link at capacity before discovering that there is no real path. The only way to know for sure is to run two searches, first with inc_cap of 0, but that seems silly. */ func (s *Switch) Path_to( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( sw *Switch fifo []*Switch push int = 0 pop int = 0 pidx int = 0 lcap_trip bool = false // local detection of capacity exceeded on one or more links ) if s == nil { return } cap_trip = false found = nil fifo = make( []*Switch, 4096 ) obj_sheep.Baa( 2, "switch:Path_to: looking for path to %s", *target ) s.Prev = nil fifo[push] = s push++ for ; push != pop; { // if we run out of things in the fifo we're done and found no path sw = fifo[pop] pop++ if pop > len( fifo ) { pop = 0; } found, cap_trip = sw.probe_neighbours( target, commence, conclude, inc_cap, usr, usr_max ) if found != nil { return } if cap_trip { lcap_trip = true // must preserve this } if sw.Flags & tegu.SWFL_VISITED == 0 { // possible that it was pushed multiple times and already had it's neighbours queued for i := 0; i < sw.lidx; i++ { has_room, err := sw.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { if sw.links[i].forward.Flags & tegu.SWFL_VISITED == 0 { fifo[push] = sw.links[i].forward push++ if push > len( fifo ) { push = 0; } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) lcap_trip = true } } } sw.Flags |= tegu.SWFL_VISITED if pidx > 1 { pidx-- } } cap_trip = lcap_trip // indication that we tripped on capacity at least once if lcap was set return } // -------------------- find all paths ------------------------------------------------ /* A list of links each of which represents a unique path between two switches. */ type trail_list struct { links [][]*Link lidx int // next entry to populate ep *Switch // far end switch } /* Examine all neighbours of the switch 's' for possible connectivity to target host. If s houses the target host, then we push the current path to this host into the trail list and return. */ func (s *Switch) ap_search_neighbours( target *string, clinks []*Link, clidx int, tl *trail_list ) { if s.Has_host( target ) { tl.ep = s // mark the end switch obj_sheep.Baa( 3, "search_neighbours: target found on switch: %s\n", *s.id ) c := make( []*Link, clidx ) copy( c, clinks[0:clidx+1] ) // copy and push into the trail list tl.links[tl.lidx] = c tl.lidx++ } else { // not the end, keep searching forward // TODO: check to see that we aren't beyond limit s.Flags |= tegu.SWFL_VISITED obj_sheep.Baa( 3, "search_neighbours: testing switch: %s has %d links", *s.id, s.lidx ) for i := 0; i < s.lidx; i++ { // for each link to a neighbour sn := s.links[i].Get_forward_sw() if (sn.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "search_neighbours: advancing over link %d switch: %s", i, *sn.id ) clinks[clidx] = s.links[i] // push the link onto the trail and check out the switch at the other end sn.ap_search_neighbours( target, clinks, clidx+1, tl ) obj_sheep.Baa( 3, "search_neighbours: back to switch: %s", *s.id ) } } } s.Flags &= ^tegu.SWFL_VISITED // as we back out we allow paths to come back through } /* Starting at switch s, this function finds all possible paths to the switch that houses the target host, and then returns the list of unique links that are traversed by one or more paths provided that each link can support the increased amount of capacity (inc_amt). The endpoint switch is also returned. If any of the links cannot support the capacity, the list will be nil or empty; this is also the case if no paths are found. The error message will indicate the exact reason if that is important to the caller. Usr_max is a perctage value (1-100) that defines the maximum percentage of any link that the user may reserve. */ func (s *Switch) All_paths_to( target *string, commence int64, conclude int64, inc_amt int64, usr *string, usr_max int64 ) ( links []*Link, ep *Switch, err error ) { var ( ulinks map[string]*Link // unique list of links involved in all trails ) links = nil ep = nil err = nil tl := &trail_list{ lidx: 0 } tl.links = make( [][]*Link, 4096 ) clinks := make( []*Link, 4096 ) // working set of links s.ap_search_neighbours( target, clinks, 0, tl ) if tl.lidx > 0 { // found at least one trail ulinks = make( map[string]*Link ) ep = tl.ep obj_sheep.Baa( 2, "switch/all-paths: %d trails found to target", tl.lidx ) for i := 0; i < tl.lidx; i++ { // for each trail between the two endpoints obj_sheep.Baa( 3, "Trail %d follows:", i ) for j := range tl.links[i] { lid := tl.links[i][j].Get_id() // add if not already found in another trail if ulinks[*lid] == nil { ulinks[*lid] = tl.links[i][j] } obj_sheep.Baa( 3, "link %d: %s", j, tl.links[i][j].To_str( ) ) } } obj_sheep.Baa( 2, "found %d unique links across %d trails", len( ulinks ), tl.lidx ) links = make( []*Link, len( ulinks ) ) i := 0 for _, v := range ulinks { // TODO: Add tenant based check _, err := v.Has_capacity( commence, conclude, inc_amt, usr, usr_max ) if err != nil { err = fmt.Errorf( "no capacity found between switch (%s) and target (%s)", *s.id, *target ) obj_sheep.Baa( 2, "all_paths: no capacity on link: %s", err ) links = nil break } // TODO: Add warning if the capacity for the link is above threshold (here, or when the usage is actually bumpped up?) links[i] = v i++ } } else { err = fmt.Errorf( "no paths found bwtween switch (%s) and target (%s)", *s.id, *target ) } return } /* Checks all links to determine if they _all_ have the capacity to support additional outbound traffic (inc_cap). Used to check for gating when a path isn't built, but rate limiting at ingress is needed. */ func (s *Switch) Has_capacity_out( commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( bool ) { if s == nil { return false } for i := 0; i < s.lidx; i++ { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if ! has_room { obj_sheep.Baa( 2, "switch/cap_out: no capacity on link from %s: %s", s.id, err ) return false } } obj_sheep.Baa( 2, "switch/cap_out: %s has capacity", s.id ) return true } // -------------------- formatting ---------------------------------------------------- /* Generate some useable representation for debugging Deprectated -- use Stringer interface (String()) */ func (s *Switch) To_str( ) ( string ) { return s.String() } /* Generate some useable representation for debugging */ func (s *Switch) String( ) ( string ) { if s != nil { return fmt.Sprintf( "%s %d links cost=%d fl=0x%02x", *s.id, s.lidx, s.Cost, s.Flags ) } return "null-switch" } /* Generate a string containing json represntation of the switch. */ func (s *Switch) To_json( ) ( jstr string ) { var sep = "" if s == nil { jstr = `{ id: "null_switch" }` return } if s.lidx > 0 { jstr = fmt.Sprintf( `{ "id": %q, "links": [ `, *s.id ) for i := 0; i < s.lidx; i++ { jstr += fmt.Sprintf( "%s%s", sep, s.links[i].To_json() )
} jstr += " ]" } else { jstr = fmt.Sprintf( `{ "id": %q }`, *s.id ) } if len( s.hosts ) > 0 { jstr += fmt.Sprintf( `, "conn_hosts": [ ` ) sep = "" for k := range s.hosts { if s.hosts[k] == true { vmid := "unknown" if s.hvmid[k] != nil { vmid = *s.hvmid[k] } jstr += fmt.Sprintf( `%s { "host": %q, "port": %d, "vmid": %q }`, sep, k, s.hport[k], vmid ) sep = "," } } jstr += " ]" } jstr += " }" return }
sep = ","
random_line_split
switch.go
// vi: sw=4 ts=4: /* --------------------------------------------------------------------------- Copyright (c) 2013-2015 AT&T Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------- */ /* Mnemonic: switch Abstract: Functions associated with the switch datastructure. This module also contains the functions that implement path-finding. Dijkstra's algorithm is implemented (see Path_to) to determine a path between two hosts which we assume are connected to one or two switches. The path finding algorithm allows for disjoint networks which occurs when one or more switches are not managed by the controller(s) used to create the network graph. Date: 24 November 2013 Author: E. Scott Daniels Mods: 10 Mar 2014 - We allow a target to be either a switch or host when looking for a path. 13 May 2014 - Corrected bug in debug string. 11 Jun 2014 - Changes to support finding all paths between two VMs rather than just the shortest one. 29 Jun 2014 - Changes to support user link limits. 29 Jul 2014 : Mlag support 23 Oct 2014 - Find path functions return an indication that no path might have been caused by a capacity issue rather than no path. 17 Jun 2015 - Added checking for nil pointer on some functions. General cleanup of comments and switch to stringer interface instead of To_str(). Added support for oneway bandwidth reserations with a function that checks outbound capacity on all switch links. 10 Sep 2015 - Allow finding attached 'hosts' based on uuid. */ package gizmos import ( "fmt" "strings" "github.com/att/tegu" ) /* Defines a switch. */ type Switch struct { id *string // reference id for the switch links []*Link // links to other switches lidx int // next open index in links hosts map[string] bool // hosts that are attched to this switch hvmid map[string]*string // vmids of attached hosts hport map[string] int // the port that the host (string) attaches to // these are for path finding and are needed externally Prev *Switch // previous low cost switch Plink int // index of link on Prev used to reach this node Cost int // cost to reach this node through Prev/Plink Flags int // visited and maybe others } /* Constructor. Generates a switch object with the given id. */ func Mk_switch( id *string ) ( s *Switch ) { tokens := strings.SplitN( *id, "@", 2 ) // in q-lite world we get host@interface and we need only host portion id = &tokens[0] s = &Switch { id: id, lidx: 0, } if id == nil { dup_str := "no_id_given" id = &dup_str } s.links = make( []*Link, 32 ) s.hosts = make( map[string]bool, 64 ) s.hport = make( map[string]int, 64 ) s.hvmid = make( map[string]*string, 64 ) return } /* Destruction */ func (s *Switch) Nuke() { for i := 0; i < s.lidx; i++ { s.links[i] = nil } s.links = nil s.hosts = nil s.hport = nil } /* Add a link to the switch. */ func (s *Switch) Add_link( link *Link ) { var ( new_links []*Link i int ) if s == nil { return } if s.lidx >= len( s.links ) { new_links = make( []*Link, s.lidx + 32 ) for i = 0; i < len( s.links ); i++ { new_links[i] = s.links[i] } s.links = new_links } s.links[s.lidx] = link s.lidx++ } /* Track an attached host (by name only) */ func (s *Switch) Add_host( host *string, vmid *string, port int ) { if s == nil { return } s.hosts[*host] = true s.hport[*host] = port s.hvmid[*host] = vmid } /* Returns true if the named host is attached to the switch. The host may be a pointer to either a host name or uuid string. */ func (s *Switch) Has_host( host *string ) (bool) { if s == nil { return false } if s.hvmid[*host] != nil { // allow searches based on the uuid return true } return s.hosts[*host] } /* Return the ID that has been associated with this switch. Likely this is the DPID. */ func (s *Switch) Get_id( ) ( *string ) { if s == nil { return nil } return s.id } /* Return the ith link in our index or nil if i is out of range. Allows the user programme to loop through the list if needed. Yes, this _could_ have been implemented to drive a callback for each list element, but that just makes the user code more complicated requiring an extra function or closure and IMHO adds uneeded maintence and/or learning curve issues. */ func (s *Switch) Get_link( i int ) ( l *Link ) { if s == nil { return nil } l = nil if i >= 0 && i < s.lidx { l = s.links[i] } return } // -------------- shortest, single, path finding ------------------------------------------------------------- /* Probe all of the neighbours of the switch to see if they are attached to the target host. If a neighbour has the target, we set the reverse path in the neighbour and return it indicating success. If a neighbour does not have the target, we update the neighbour's cost and reverse path _ONLY_ if the cost through the current switch is lower than the cost recorded at the neighbour. If no neighbour links to the target, we return null. The usr max value is a percentage which defines the max percentage of a link that the user (tenant in openstack terms) is allowed to reserve on any given link. We will not probe a neighbour if the link to it cannot accept the additional capacity. The target may be the name of the host we're looking for, or the ID of the endpoint switch to support finding a path to a "gateway". */ func (s *Switch) probe_neighbours( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( fsw *Switch // next neighbour switch (through link) ) found = nil cap_trip = false //fmt.Printf( "\n\nsearching neighbours of (%s) for %s\n", s.To_str(), *target ) for i := 0; i < s.lidx; i++ { if s != fsw { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { fsw = s.links[i].forward // at the switch on the other side of the link if (fsw.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "switch:probe_neigbour: following link %d -- has capacity to (%s) and NOT visited", i, fsw.To_str() ) if s.Cost + s.links[i].Cost < fsw.Cost { //fmt.Printf( "\tsetting cost: %d\n", s.Cost + s.links[i].Cost ) fsw.Cost = s.Cost + s.links[i].Cost fsw.Prev = s // shortest path to this node is through s fsw.Plink = i // using its ith link } obj_sheep.Baa( 3, "compare: (%s) (%s)", *target, *(fsw.Get_id()) ) if fsw.Has_host( target ) || *(fsw.Get_id()) == *target { // target is attahced to this switch, or the target is a swtich that is the forward switch fsw.Prev = s fsw.Plink = i found = fsw return } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) cap_trip = true } } } return } /* Implements Dijkstra's algorithm for finding the shortest path in the network starting from the switch given and stoping when it finds a switch that has the target host attached. At the moment, link costs are all the same, so there is no ordering of queued nodes such that the lowest cost is always searched next. A path may exist, but not be available if the usage on a link cannot support the additional capacity that is requested via inc_cap. The usr_max vlaue is a percentage (1-100) which indicaes the max percentage of a link that the user may reserve. The cap_trip return value is set to true if one or more links could not be followed because of capacity. If return switch is nil, and cap-trip is true then the most likely cause of failure is capacity, though it _is_ possible that there really is no path between the swtich and the target, but we stunbled onto a link at capacity before discovering that there is no real path. The only way to know for sure is to run two searches, first with inc_cap of 0, but that seems silly. */ func (s *Switch) Path_to( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( sw *Switch fifo []*Switch push int = 0 pop int = 0 pidx int = 0 lcap_trip bool = false // local detection of capacity exceeded on one or more links ) if s == nil { return } cap_trip = false found = nil fifo = make( []*Switch, 4096 ) obj_sheep.Baa( 2, "switch:Path_to: looking for path to %s", *target ) s.Prev = nil fifo[push] = s push++ for ; push != pop; { // if we run out of things in the fifo we're done and found no path sw = fifo[pop] pop++ if pop > len( fifo ) { pop = 0; } found, cap_trip = sw.probe_neighbours( target, commence, conclude, inc_cap, usr, usr_max ) if found != nil { return } if cap_trip { lcap_trip = true // must preserve this } if sw.Flags & tegu.SWFL_VISITED == 0 { // possible that it was pushed multiple times and already had it's neighbours queued for i := 0; i < sw.lidx; i++ { has_room, err := sw.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { if sw.links[i].forward.Flags & tegu.SWFL_VISITED == 0 { fifo[push] = sw.links[i].forward push++ if push > len( fifo ) { push = 0; } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) lcap_trip = true } } } sw.Flags |= tegu.SWFL_VISITED if pidx > 1 { pidx-- } } cap_trip = lcap_trip // indication that we tripped on capacity at least once if lcap was set return } // -------------------- find all paths ------------------------------------------------ /* A list of links each of which represents a unique path between two switches. */ type trail_list struct { links [][]*Link lidx int // next entry to populate ep *Switch // far end switch } /* Examine all neighbours of the switch 's' for possible connectivity to target host. If s houses the target host, then we push the current path to this host into the trail list and return. */ func (s *Switch) ap_search_neighbours( target *string, clinks []*Link, clidx int, tl *trail_list ) { if s.Has_host( target ) { tl.ep = s // mark the end switch obj_sheep.Baa( 3, "search_neighbours: target found on switch: %s\n", *s.id ) c := make( []*Link, clidx ) copy( c, clinks[0:clidx+1] ) // copy and push into the trail list tl.links[tl.lidx] = c tl.lidx++ } else { // not the end, keep searching forward // TODO: check to see that we aren't beyond limit s.Flags |= tegu.SWFL_VISITED obj_sheep.Baa( 3, "search_neighbours: testing switch: %s has %d links", *s.id, s.lidx ) for i := 0; i < s.lidx; i++ { // for each link to a neighbour sn := s.links[i].Get_forward_sw() if (sn.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "search_neighbours: advancing over link %d switch: %s", i, *sn.id ) clinks[clidx] = s.links[i] // push the link onto the trail and check out the switch at the other end sn.ap_search_neighbours( target, clinks, clidx+1, tl ) obj_sheep.Baa( 3, "search_neighbours: back to switch: %s", *s.id ) } } } s.Flags &= ^tegu.SWFL_VISITED // as we back out we allow paths to come back through } /* Starting at switch s, this function finds all possible paths to the switch that houses the target host, and then returns the list of unique links that are traversed by one or more paths provided that each link can support the increased amount of capacity (inc_amt). The endpoint switch is also returned. If any of the links cannot support the capacity, the list will be nil or empty; this is also the case if no paths are found. The error message will indicate the exact reason if that is important to the caller. Usr_max is a perctage value (1-100) that defines the maximum percentage of any link that the user may reserve. */ func (s *Switch) All_paths_to( target *string, commence int64, conclude int64, inc_amt int64, usr *string, usr_max int64 ) ( links []*Link, ep *Switch, err error ) { var ( ulinks map[string]*Link // unique list of links involved in all trails ) links = nil ep = nil err = nil tl := &trail_list{ lidx: 0 } tl.links = make( [][]*Link, 4096 ) clinks := make( []*Link, 4096 ) // working set of links s.ap_search_neighbours( target, clinks, 0, tl ) if tl.lidx > 0 { // found at least one trail ulinks = make( map[string]*Link ) ep = tl.ep obj_sheep.Baa( 2, "switch/all-paths: %d trails found to target", tl.lidx ) for i := 0; i < tl.lidx; i++ { // for each trail between the two endpoints obj_sheep.Baa( 3, "Trail %d follows:", i ) for j := range tl.links[i] { lid := tl.links[i][j].Get_id() // add if not already found in another trail if ulinks[*lid] == nil { ulinks[*lid] = tl.links[i][j] } obj_sheep.Baa( 3, "link %d: %s", j, tl.links[i][j].To_str( ) ) } } obj_sheep.Baa( 2, "found %d unique links across %d trails", len( ulinks ), tl.lidx ) links = make( []*Link, len( ulinks ) ) i := 0 for _, v := range ulinks { // TODO: Add tenant based check _, err := v.Has_capacity( commence, conclude, inc_amt, usr, usr_max ) if err != nil { err = fmt.Errorf( "no capacity found between switch (%s) and target (%s)", *s.id, *target ) obj_sheep.Baa( 2, "all_paths: no capacity on link: %s", err ) links = nil break } // TODO: Add warning if the capacity for the link is above threshold (here, or when the usage is actually bumpped up?) links[i] = v i++ } } else { err = fmt.Errorf( "no paths found bwtween switch (%s) and target (%s)", *s.id, *target ) } return } /* Checks all links to determine if they _all_ have the capacity to support additional outbound traffic (inc_cap). Used to check for gating when a path isn't built, but rate limiting at ingress is needed. */ func (s *Switch) Has_capacity_out( commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( bool )
// -------------------- formatting ---------------------------------------------------- /* Generate some useable representation for debugging Deprectated -- use Stringer interface (String()) */ func (s *Switch) To_str( ) ( string ) { return s.String() } /* Generate some useable representation for debugging */ func (s *Switch) String( ) ( string ) { if s != nil { return fmt.Sprintf( "%s %d links cost=%d fl=0x%02x", *s.id, s.lidx, s.Cost, s.Flags ) } return "null-switch" } /* Generate a string containing json represntation of the switch. */ func (s *Switch) To_json( ) ( jstr string ) { var sep = "" if s == nil { jstr = `{ id: "null_switch" }` return } if s.lidx > 0 { jstr = fmt.Sprintf( `{ "id": %q, "links": [ `, *s.id ) for i := 0; i < s.lidx; i++ { jstr += fmt.Sprintf( "%s%s", sep, s.links[i].To_json() ) sep = "," } jstr += " ]" } else { jstr = fmt.Sprintf( `{ "id": %q }`, *s.id ) } if len( s.hosts ) > 0 { jstr += fmt.Sprintf( `, "conn_hosts": [ ` ) sep = "" for k := range s.hosts { if s.hosts[k] == true { vmid := "unknown" if s.hvmid[k] != nil { vmid = *s.hvmid[k] } jstr += fmt.Sprintf( `%s { "host": %q, "port": %d, "vmid": %q }`, sep, k, s.hport[k], vmid ) sep = "," } } jstr += " ]" } jstr += " }" return }
{ if s == nil { return false } for i := 0; i < s.lidx; i++ { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if ! has_room { obj_sheep.Baa( 2, "switch/cap_out: no capacity on link from %s: %s", s.id, err ) return false } } obj_sheep.Baa( 2, "switch/cap_out: %s has capacity", s.id ) return true }
identifier_body
switch.go
// vi: sw=4 ts=4: /* --------------------------------------------------------------------------- Copyright (c) 2013-2015 AT&T Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------- */ /* Mnemonic: switch Abstract: Functions associated with the switch datastructure. This module also contains the functions that implement path-finding. Dijkstra's algorithm is implemented (see Path_to) to determine a path between two hosts which we assume are connected to one or two switches. The path finding algorithm allows for disjoint networks which occurs when one or more switches are not managed by the controller(s) used to create the network graph. Date: 24 November 2013 Author: E. Scott Daniels Mods: 10 Mar 2014 - We allow a target to be either a switch or host when looking for a path. 13 May 2014 - Corrected bug in debug string. 11 Jun 2014 - Changes to support finding all paths between two VMs rather than just the shortest one. 29 Jun 2014 - Changes to support user link limits. 29 Jul 2014 : Mlag support 23 Oct 2014 - Find path functions return an indication that no path might have been caused by a capacity issue rather than no path. 17 Jun 2015 - Added checking for nil pointer on some functions. General cleanup of comments and switch to stringer interface instead of To_str(). Added support for oneway bandwidth reserations with a function that checks outbound capacity on all switch links. 10 Sep 2015 - Allow finding attached 'hosts' based on uuid. */ package gizmos import ( "fmt" "strings" "github.com/att/tegu" ) /* Defines a switch. */ type Switch struct { id *string // reference id for the switch links []*Link // links to other switches lidx int // next open index in links hosts map[string] bool // hosts that are attched to this switch hvmid map[string]*string // vmids of attached hosts hport map[string] int // the port that the host (string) attaches to // these are for path finding and are needed externally Prev *Switch // previous low cost switch Plink int // index of link on Prev used to reach this node Cost int // cost to reach this node through Prev/Plink Flags int // visited and maybe others } /* Constructor. Generates a switch object with the given id. */ func Mk_switch( id *string ) ( s *Switch ) { tokens := strings.SplitN( *id, "@", 2 ) // in q-lite world we get host@interface and we need only host portion id = &tokens[0] s = &Switch { id: id, lidx: 0, } if id == nil { dup_str := "no_id_given" id = &dup_str } s.links = make( []*Link, 32 ) s.hosts = make( map[string]bool, 64 ) s.hport = make( map[string]int, 64 ) s.hvmid = make( map[string]*string, 64 ) return } /* Destruction */ func (s *Switch) Nuke() { for i := 0; i < s.lidx; i++ { s.links[i] = nil } s.links = nil s.hosts = nil s.hport = nil } /* Add a link to the switch. */ func (s *Switch) Add_link( link *Link ) { var ( new_links []*Link i int ) if s == nil { return } if s.lidx >= len( s.links ) { new_links = make( []*Link, s.lidx + 32 ) for i = 0; i < len( s.links ); i++ { new_links[i] = s.links[i] } s.links = new_links } s.links[s.lidx] = link s.lidx++ } /* Track an attached host (by name only) */ func (s *Switch) Add_host( host *string, vmid *string, port int ) { if s == nil { return } s.hosts[*host] = true s.hport[*host] = port s.hvmid[*host] = vmid } /* Returns true if the named host is attached to the switch. The host may be a pointer to either a host name or uuid string. */ func (s *Switch) Has_host( host *string ) (bool) { if s == nil { return false } if s.hvmid[*host] != nil { // allow searches based on the uuid return true } return s.hosts[*host] } /* Return the ID that has been associated with this switch. Likely this is the DPID. */ func (s *Switch) Get_id( ) ( *string ) { if s == nil
return s.id } /* Return the ith link in our index or nil if i is out of range. Allows the user programme to loop through the list if needed. Yes, this _could_ have been implemented to drive a callback for each list element, but that just makes the user code more complicated requiring an extra function or closure and IMHO adds uneeded maintence and/or learning curve issues. */ func (s *Switch) Get_link( i int ) ( l *Link ) { if s == nil { return nil } l = nil if i >= 0 && i < s.lidx { l = s.links[i] } return } // -------------- shortest, single, path finding ------------------------------------------------------------- /* Probe all of the neighbours of the switch to see if they are attached to the target host. If a neighbour has the target, we set the reverse path in the neighbour and return it indicating success. If a neighbour does not have the target, we update the neighbour's cost and reverse path _ONLY_ if the cost through the current switch is lower than the cost recorded at the neighbour. If no neighbour links to the target, we return null. The usr max value is a percentage which defines the max percentage of a link that the user (tenant in openstack terms) is allowed to reserve on any given link. We will not probe a neighbour if the link to it cannot accept the additional capacity. The target may be the name of the host we're looking for, or the ID of the endpoint switch to support finding a path to a "gateway". */ func (s *Switch) probe_neighbours( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( fsw *Switch // next neighbour switch (through link) ) found = nil cap_trip = false //fmt.Printf( "\n\nsearching neighbours of (%s) for %s\n", s.To_str(), *target ) for i := 0; i < s.lidx; i++ { if s != fsw { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { fsw = s.links[i].forward // at the switch on the other side of the link if (fsw.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "switch:probe_neigbour: following link %d -- has capacity to (%s) and NOT visited", i, fsw.To_str() ) if s.Cost + s.links[i].Cost < fsw.Cost { //fmt.Printf( "\tsetting cost: %d\n", s.Cost + s.links[i].Cost ) fsw.Cost = s.Cost + s.links[i].Cost fsw.Prev = s // shortest path to this node is through s fsw.Plink = i // using its ith link } obj_sheep.Baa( 3, "compare: (%s) (%s)", *target, *(fsw.Get_id()) ) if fsw.Has_host( target ) || *(fsw.Get_id()) == *target { // target is attahced to this switch, or the target is a swtich that is the forward switch fsw.Prev = s fsw.Plink = i found = fsw return } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) cap_trip = true } } } return } /* Implements Dijkstra's algorithm for finding the shortest path in the network starting from the switch given and stoping when it finds a switch that has the target host attached. At the moment, link costs are all the same, so there is no ordering of queued nodes such that the lowest cost is always searched next. A path may exist, but not be available if the usage on a link cannot support the additional capacity that is requested via inc_cap. The usr_max vlaue is a percentage (1-100) which indicaes the max percentage of a link that the user may reserve. The cap_trip return value is set to true if one or more links could not be followed because of capacity. If return switch is nil, and cap-trip is true then the most likely cause of failure is capacity, though it _is_ possible that there really is no path between the swtich and the target, but we stunbled onto a link at capacity before discovering that there is no real path. The only way to know for sure is to run two searches, first with inc_cap of 0, but that seems silly. */ func (s *Switch) Path_to( target *string, commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( found *Switch, cap_trip bool ) { var ( sw *Switch fifo []*Switch push int = 0 pop int = 0 pidx int = 0 lcap_trip bool = false // local detection of capacity exceeded on one or more links ) if s == nil { return } cap_trip = false found = nil fifo = make( []*Switch, 4096 ) obj_sheep.Baa( 2, "switch:Path_to: looking for path to %s", *target ) s.Prev = nil fifo[push] = s push++ for ; push != pop; { // if we run out of things in the fifo we're done and found no path sw = fifo[pop] pop++ if pop > len( fifo ) { pop = 0; } found, cap_trip = sw.probe_neighbours( target, commence, conclude, inc_cap, usr, usr_max ) if found != nil { return } if cap_trip { lcap_trip = true // must preserve this } if sw.Flags & tegu.SWFL_VISITED == 0 { // possible that it was pushed multiple times and already had it's neighbours queued for i := 0; i < sw.lidx; i++ { has_room, err := sw.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if has_room { if sw.links[i].forward.Flags & tegu.SWFL_VISITED == 0 { fifo[push] = sw.links[i].forward push++ if push > len( fifo ) { push = 0; } } } else { obj_sheep.Baa( 2, "no capacity on link: %s", err ) lcap_trip = true } } } sw.Flags |= tegu.SWFL_VISITED if pidx > 1 { pidx-- } } cap_trip = lcap_trip // indication that we tripped on capacity at least once if lcap was set return } // -------------------- find all paths ------------------------------------------------ /* A list of links each of which represents a unique path between two switches. */ type trail_list struct { links [][]*Link lidx int // next entry to populate ep *Switch // far end switch } /* Examine all neighbours of the switch 's' for possible connectivity to target host. If s houses the target host, then we push the current path to this host into the trail list and return. */ func (s *Switch) ap_search_neighbours( target *string, clinks []*Link, clidx int, tl *trail_list ) { if s.Has_host( target ) { tl.ep = s // mark the end switch obj_sheep.Baa( 3, "search_neighbours: target found on switch: %s\n", *s.id ) c := make( []*Link, clidx ) copy( c, clinks[0:clidx+1] ) // copy and push into the trail list tl.links[tl.lidx] = c tl.lidx++ } else { // not the end, keep searching forward // TODO: check to see that we aren't beyond limit s.Flags |= tegu.SWFL_VISITED obj_sheep.Baa( 3, "search_neighbours: testing switch: %s has %d links", *s.id, s.lidx ) for i := 0; i < s.lidx; i++ { // for each link to a neighbour sn := s.links[i].Get_forward_sw() if (sn.Flags & tegu.SWFL_VISITED) == 0 { obj_sheep.Baa( 3, "search_neighbours: advancing over link %d switch: %s", i, *sn.id ) clinks[clidx] = s.links[i] // push the link onto the trail and check out the switch at the other end sn.ap_search_neighbours( target, clinks, clidx+1, tl ) obj_sheep.Baa( 3, "search_neighbours: back to switch: %s", *s.id ) } } } s.Flags &= ^tegu.SWFL_VISITED // as we back out we allow paths to come back through } /* Starting at switch s, this function finds all possible paths to the switch that houses the target host, and then returns the list of unique links that are traversed by one or more paths provided that each link can support the increased amount of capacity (inc_amt). The endpoint switch is also returned. If any of the links cannot support the capacity, the list will be nil or empty; this is also the case if no paths are found. The error message will indicate the exact reason if that is important to the caller. Usr_max is a perctage value (1-100) that defines the maximum percentage of any link that the user may reserve. */ func (s *Switch) All_paths_to( target *string, commence int64, conclude int64, inc_amt int64, usr *string, usr_max int64 ) ( links []*Link, ep *Switch, err error ) { var ( ulinks map[string]*Link // unique list of links involved in all trails ) links = nil ep = nil err = nil tl := &trail_list{ lidx: 0 } tl.links = make( [][]*Link, 4096 ) clinks := make( []*Link, 4096 ) // working set of links s.ap_search_neighbours( target, clinks, 0, tl ) if tl.lidx > 0 { // found at least one trail ulinks = make( map[string]*Link ) ep = tl.ep obj_sheep.Baa( 2, "switch/all-paths: %d trails found to target", tl.lidx ) for i := 0; i < tl.lidx; i++ { // for each trail between the two endpoints obj_sheep.Baa( 3, "Trail %d follows:", i ) for j := range tl.links[i] { lid := tl.links[i][j].Get_id() // add if not already found in another trail if ulinks[*lid] == nil { ulinks[*lid] = tl.links[i][j] } obj_sheep.Baa( 3, "link %d: %s", j, tl.links[i][j].To_str( ) ) } } obj_sheep.Baa( 2, "found %d unique links across %d trails", len( ulinks ), tl.lidx ) links = make( []*Link, len( ulinks ) ) i := 0 for _, v := range ulinks { // TODO: Add tenant based check _, err := v.Has_capacity( commence, conclude, inc_amt, usr, usr_max ) if err != nil { err = fmt.Errorf( "no capacity found between switch (%s) and target (%s)", *s.id, *target ) obj_sheep.Baa( 2, "all_paths: no capacity on link: %s", err ) links = nil break } // TODO: Add warning if the capacity for the link is above threshold (here, or when the usage is actually bumpped up?) links[i] = v i++ } } else { err = fmt.Errorf( "no paths found bwtween switch (%s) and target (%s)", *s.id, *target ) } return } /* Checks all links to determine if they _all_ have the capacity to support additional outbound traffic (inc_cap). Used to check for gating when a path isn't built, but rate limiting at ingress is needed. */ func (s *Switch) Has_capacity_out( commence, conclude, inc_cap int64, usr *string, usr_max int64 ) ( bool ) { if s == nil { return false } for i := 0; i < s.lidx; i++ { has_room, err := s.links[i].Has_capacity( commence, conclude, inc_cap, usr, usr_max ) if ! has_room { obj_sheep.Baa( 2, "switch/cap_out: no capacity on link from %s: %s", s.id, err ) return false } } obj_sheep.Baa( 2, "switch/cap_out: %s has capacity", s.id ) return true } // -------------------- formatting ---------------------------------------------------- /* Generate some useable representation for debugging Deprectated -- use Stringer interface (String()) */ func (s *Switch) To_str( ) ( string ) { return s.String() } /* Generate some useable representation for debugging */ func (s *Switch) String( ) ( string ) { if s != nil { return fmt.Sprintf( "%s %d links cost=%d fl=0x%02x", *s.id, s.lidx, s.Cost, s.Flags ) } return "null-switch" } /* Generate a string containing json represntation of the switch. */ func (s *Switch) To_json( ) ( jstr string ) { var sep = "" if s == nil { jstr = `{ id: "null_switch" }` return } if s.lidx > 0 { jstr = fmt.Sprintf( `{ "id": %q, "links": [ `, *s.id ) for i := 0; i < s.lidx; i++ { jstr += fmt.Sprintf( "%s%s", sep, s.links[i].To_json() ) sep = "," } jstr += " ]" } else { jstr = fmt.Sprintf( `{ "id": %q }`, *s.id ) } if len( s.hosts ) > 0 { jstr += fmt.Sprintf( `, "conn_hosts": [ ` ) sep = "" for k := range s.hosts { if s.hosts[k] == true { vmid := "unknown" if s.hvmid[k] != nil { vmid = *s.hvmid[k] } jstr += fmt.Sprintf( `%s { "host": %q, "port": %d, "vmid": %q }`, sep, k, s.hport[k], vmid ) sep = "," } } jstr += " ]" } jstr += " }" return }
{ return nil }
conditional_block
message.go
package message import ( "fmt" "bytes" "errors" "time" "unsafe" "crypto/sha256" "encoding/binary" "encoding/hex" "dad-go/common" "dad-go/node" ) const ( MSGCMDLEN = 12 CMDOFFSET = 4 CHECKSUMLEN = 4 HASHLEN = 32 // hash length in byte MSGHDRLEN = 24 ) // The Inventory type const ( TXN = 0x01 // Transaction BLOCK = 0x02 CONSENSUS = 0xe0 ) type messager interface { verify([]byte) error serialization() ([]byte, error) deserialization([]byte) error handle(*node) error } // The network communication message header type msgHdr struct { Magic uint32 CMD [MSGCMDLEN]byte // The message type Length uint32 Checksum [CHECKSUMLEN]byte } // The message body and header type msgCont struct { hdr msgHdr p interface{} } type varStr struct { len uint buf []byte } type verACK struct { msgHdr // No payload } type version struct { Hdr msgHdr P struct { Version uint32 Services uint64 TimeStamp uint32 Port uint16 Nonce uint32 // TODO remove tempory to get serilization function passed UserAgent uint8 StartHeight uint32 // FIXME check with the specify relay type length Relay uint8 } } type headersReq struct { hdr msgHdr p struct { len uint8 hashStart [HASHLEN]byte hashEnd [HASHLEN]byte } } type addrReq struct { Hdr msgHdr // No payload } type blkHeader struct { hdr msgHdr blkHdr []byte } type addr struct { msgHdr // TBD } type invPayload struct { invType uint8 blk []byte } type inv struct { hdr msgHdr p invPayload } type dataReq struct { msgHdr // TBD } type block struct { msgHdr // TBD } // Transaction message type trn struct { msgHdr // TBD } // Alloc different message stucture // @t the message name or type // @len the message length only valid for varible length structure // // Return: // @messager the messager structure // @error error code // FixMe fix the ugly multiple return. func allocMsg(t string, length int) (messager, error) { switch t { case "msgheader": var msg msgHdr return &msg, nil case "version": var msg version return &msg, nil case "verack": var msg verACK return &msg, nil case "getheaders": var msg headersReq return &msg, nil case "headers": var msg blkHeader return &msg, nil case "getaddr": var msg addrReq return &msg, nil case "addr": var msg addr return &msg, nil case "inv": var msg inv // the 1 is the inv type lenght msg.p.blk = make([]byte, length - MSGHDRLEN - 1) return &msg, nil case "getdata": var msg dataReq return &msg, nil case "block": var msg block return &msg, nil case "tx": var msg trn return &msg, nil default: return nil, errors.New("Unknown message type") } } // TODO combine all of message alloc in one function via interface func newMsg(t string) ([]byte, error) { switch t { case "version": return newVersion() case "verack": return newVerack() case "getheaders": return newHeadersReq() case "getaddr": return newGetAddr() default: return nil, errors.New("Unknown message type") } } func (hdr *msgHdr) init(cmd string, checksum []byte, length uint32) { hdr.Magic = NETMAGIC copy(hdr.CMD[0: uint32(len(cmd))], cmd) copy(hdr.Checksum[:], checksum[:CHECKSUMLEN]) hdr.Length = length fmt.Printf("The message payload length is %d\n", hdr.Length) fmt.Printf("The message header length is %d\n", uint32(unsafe.Sizeof(*hdr))) } func (msg *version) init(n node) { // Do the init } func newVersion() ([]byte, error) { common.Trace() var msg version // TODO Need Node read lock or channel msg.P.Version = nodes.node.version msg.P.Services = nodes.node.services // FIXME Time overflow msg.P.TimeStamp = uint32(time.Now().UTC().UnixNano()) msg.P.Port = nodes.node.port msg.P.Nonce = nodes.node.nonce fmt.Printf("The nonce is 0x%x", msg.P.Nonce) msg.P.UserAgent = 0x00 // Fixme Get the block height from ledger msg.P.StartHeight = 1 if nodes.node.relay { msg.P.Relay = 1 } else { msg.P.Relay = 0 } msg.Hdr.Magic = NETMAGIC ver := "version" copy(msg.Hdr.CMD[0:7], ver) p := new(bytes.Buffer) err := binary.Write(p, binary.LittleEndian, &(msg.P)) if err != nil { fmt.Println("Binary Write failed at new Msg") return nil, err } s := sha256.Sum256(p.Bytes()) s2 := s[:] s = sha256.Sum256(s2) buf := bytes.NewBuffer(s[:4]) binary.Read(buf, binary.LittleEndian, &(msg.Hdr.Checksum)) msg.Hdr.Length = uint32(len(p.Bytes())) fmt.Printf("The message payload length is %d\n", msg.Hdr.Length) m, err := msg.serialization() if (err != nil) { fmt.Println("Error Convert net message ", err.Error()) return nil, err } str := hex.EncodeToString(m) fmt.Printf("The message length is %d, %s\n", len(m), str) return m, nil } func newVerack() ([]byte, error) { var msg verACK // Fixme the check is the []byte{0} instead of 0 var sum []byte sum = []byte{0x5d, 0xf6, 0xe0, 0xe2} msg.msgHdr.init("verack", sum, 0) buf, err := msg.serialization() if (err != nil) { return nil, err } str := hex.EncodeToString(buf) fmt.Printf("The message tx verack length is %d, %s", len(buf), str) return buf, err } func newGetAddr() ([]byte, error) { var msg addrReq // Fixme the check is the []byte{0} instead of 0 var sum []byte sum = []byte{0x5d, 0xf6, 0xe0, 0xe2} msg.Hdr.init("getaddr", sum, 0) buf, err := msg.serialization() if (err != nil) { return nil, err } str := hex.EncodeToString(buf) fmt.Printf("The message get addr length is %d, %s", len(buf), str) return buf, err } func magicVerify(magic uint32) bool { if (magic != NETMAGIC) { return false } return true } func payloadLen(buf []byte) int { var h msgHdr h.deserialization(buf) return int(h.Length) } func msgType(buf []byte) (string, error) { cmd := buf[CMDOFFSET : CMDOFFSET + MSGCMDLEN] n := bytes.IndexByte(cmd, 0) if (n < 0 || n >= MSGCMDLEN) { return "", errors.New("Unexpected length of CMD command") } s := string(cmd[:n]) return s, nil } func checkSum(p []byte) []byte { t := sha256.Sum256(p) s := sha256.Sum256(t[:]) // Currently we only need the front 4 bytes as checksum return s[: CHECKSUMLEN] } func reverse(input []byte) []byte { if len(input) == 0 { return input } return append(reverse(input[1:]), input[0]) } func newHeadersReq() ([]byte, error) { var h headersReq // Fixme correct with the exactly request length h.p.len = 1 buf, err := LedgerGetHeader() if (err != nil) { return nil, err } copy(h.p.hashStart[:], reverse(buf)) p := new(bytes.Buffer) err = binary.Write(p, binary.LittleEndian, &(h.p)) if err != nil { fmt.Println("Binary Write failed at new headersReq") return nil, err } s := checkSum(p.Bytes()) h.hdr.init("getheaders", s, uint32(len(p.Bytes()))) m, err := h.serialization() str := hex.EncodeToString(m) fmt.Printf("The message length is %d, %s\n", len(m), str) return m, err } // Verify the message header information // @p payload of the message func (hdr msgHdr) verify(buf []byte) error { if (hdr.Magic != NETMAGIC) { fmt.Printf("Unmatched magic number 0x%d\n", hdr.Magic) return errors.New("Unmatched magic number") } checkSum := checkSum(buf) if (bytes.Equal(hdr.Checksum[:], checkSum[:]) == false) { str1 := hex.EncodeToString(hdr.Checksum[:]) str2 := hex.EncodeToString(checkSum[:]) fmt.Printf("Message Checksum error, Received checksum %s Wanted checksum: %s\n", str1, str2) return errors.New("Message Checksum error") } return nil } func (msg version) verify(buf []byte) error { err := msg.Hdr.verify(buf) // TODO verify the message Content return err } func (msg headersReq) verify(buf []byte) error { // TODO verify the message Content err := msg.hdr.verify(buf) return err } func (msg blkHeader) verify(buf []byte) error { // TODO verify the message Content err := msg.hdr.verify(buf) return err } func (msg addrReq) verify(buf []byte) error { // TODO verify the message Content err := msg.Hdr.verify(buf) return err } func (msg inv) verify(buf []byte) error { // TODO verify the message Content err := msg.hdr.verify(buf) return err } // FIXME how to avoid duplicate serial/deserial function as // most of them are the same func (hdr msgHdr) serialization() ([]byte, error) { var buf bytes.Buffer err := binary.Write(&buf, binary.LittleEndian, hdr) if err != nil { return nil, err } return buf.Bytes(), err } func (msg *msgHdr) deserialization(p []byte) error { buf := bytes.NewBuffer(p[0 : MSGHDRLEN]) err := binary.Read(buf, binary.LittleEndian, msg) return err } func (msg version) serialization() ([]byte, error) { var buf bytes.Buffer fmt.Printf("The size of messge is %d in serialization\n", uint32(unsafe.Sizeof(msg))) err := binary.Write(&buf, binary.LittleEndian, msg) if err != nil { return nil, err } return buf.Bytes(), err } func (msg *version) deserialization(p []byte) error { fmt.Printf("The size of messge is %d in deserialization\n", uint32(unsafe.Sizeof(*msg))) buf := bytes.NewBuffer(p) err := binary.Read(buf, binary.LittleEndian, msg) return err } func (msg headersReq)
() ([]byte, error) { var buf bytes.Buffer fmt.Printf("The size of messge is %d in serialization\n", uint32(unsafe.Sizeof(msg))) err := binary.Write(&buf, binary.LittleEndian, msg) if err != nil { return nil, err } return buf.Bytes(), err } func (msg *headersReq) deserialization(p []byte) error { fmt.Printf("The size of messge is %d in deserialization\n", uint32(unsafe.Sizeof(*msg))) buf := bytes.NewBuffer(p) err := binary.Read(buf, binary.LittleEndian, msg) return err } func (msg blkHeader) serialization() ([]byte, error) { var buf bytes.Buffer fmt.Printf("The size of messge is %d in serialization\n", uint32(unsafe.Sizeof(msg))) err := binary.Write(&buf, binary.LittleEndian, msg) if err != nil { return nil, err } // TODO serilization the header, then the payload return buf.Bytes(), err } func (msg *blkHeader) deserialization(p []byte) error { fmt.Printf("The size of messge is %d in deserialization\n", uint32(unsafe.Sizeof(*msg))) err := msg.hdr.deserialization(p) msg.blkHdr = p[MSGHDRLEN : ] return err } func (msg addrReq) serialization() ([]byte, error) { var buf bytes.Buffer fmt.Printf("The size of messge is %d in serialization\n", uint32(unsafe.Sizeof(msg))) err := binary.Write(&buf, binary.LittleEndian, msg) if err != nil { return nil, err } return buf.Bytes(), err } func (msg *addrReq) deserialization(p []byte) error { fmt.Printf("The size of messge is %d in deserialization\n", uint32(unsafe.Sizeof(*msg))) buf := bytes.NewBuffer(p) err := binary.Read(buf, binary.LittleEndian, msg) return err } func (msg inv) serialization() ([]byte, error) { var buf bytes.Buffer fmt.Printf("The size of messge is %d in serialization\n", uint32(unsafe.Sizeof(msg))) err := binary.Write(&buf, binary.LittleEndian, msg) if err != nil { return nil, err } return buf.Bytes(), err } func (msg *inv) deserialization(p []byte) error { fmt.Printf("The size of messge is %d in deserialization\n", uint32(unsafe.Sizeof(*msg))) err := msg.hdr.deserialization(p) msg.p.invType = p[MSGHDRLEN] msg.p.blk = p[MSGHDRLEN + 1 :] return err } func (msg inv) invType() byte { return msg.p.invType } //func (msg inv) invLen() (uint64, uint8) { func (msg inv) invLen() (uint64, uint8) { var val uint64 var size uint8 len := binary.LittleEndian.Uint64(msg.p.blk[0:1]) if (len < 0xfd) { val = len size = 1 } else if (len == 0xfd) { val = binary.LittleEndian.Uint64(msg.p.blk[1 : 3]) size = 3 } else if (len == 0xfe) { val = binary.LittleEndian.Uint64(msg.p.blk[1 : 5]) size = 5 } else if (len == 0xff) { val = binary.LittleEndian.Uint64(msg.p.blk[1 : 9]) size = 9 } return val, size }
serialization
identifier_name