file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
_typecheck.py
|
an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.util import tf_inspect
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = tf_inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
|
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
|
random_line_split
|
|
_typecheck.py
|
"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.util import tf_inspect
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = tf_inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
|
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
|
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
|
conditional_block
|
_typecheck.py
|
"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.util import tf_inspect
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def
|
(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = tf_inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
|
_type
|
identifier_name
|
game.go
|
(xInt int, zInt int, dir bool, offset mgl32.Vec2) []float32 {
x := float32(xInt)
z := float32(zInt)
var xAxis, zAxis float32
if dir {
xAxis = 1
zAxis = 0
} else {
xAxis = 0
zAxis = 1
}
return []float32{
-0.5*xAxis + x + offset.X(), 0, -0.5*zAxis + z + offset.Y(), 1, 0,
0.5*xAxis + x + offset.X(), 0, 0.5*zAxis + z + offset.Y(), 0, 0,
-0.5*xAxis + x + offset.X(), 1, -0.5*zAxis + z + offset.Y(), 1, 1,
0.5*xAxis + x + offset.X(), 0, 0.5*zAxis + z + offset.Y(), 0, 0,
0.5*xAxis + x + offset.X(), 1, 0.5*zAxis + z + offset.Y(), 0, 1,
-0.5*xAxis + x + offset.X(), 1, -0.5*zAxis + z + offset.Y(), 1, 1,
}
}
func check(e error) {
if e != nil {
panic(e)
}
}
var screenWidth = 800
var screenHeight = 600
var vertexArray []float32
var numFloorTiles, numWallTiles int
var fov float64
var projection mgl32.Mat4
const (
FOG_DISTANCE float32 = 8.0
)
func init() {
runtime.LockOSThread()
}
func main() {
keys = map[glfw.Key]bool{}
rand.Seed(time.Now().Unix())
player := &Player{
Yaw: 0,
Pitch: 0,
X: 0,
Y: 0,
Speed: 2,
Size: 0.7,
Health: 1,
}
fov = 90.0
fmt.Println("Generating Dungeon...")
dungeon := dungeon.NewDungeon(50, 200)
fmt.Println("Generated!")
room := dungeon.Rooms[0]
player.X = float64(room.Y + room.Height/2)
player.Y = float64(room.X + room.Width/2)
dungeon.Print()
enemies := []*Enemy{}
bloods := []*Blood{}
vertexArray = []float32{
// Enemy Sprite
-0.3, 0.6, 0, 1, 0,
0.3, 0.6, 0, 0, 0,
-0.3, 0.0, 0, 1, 1,
0.3, 0.6, 0, 0, 0,
0.3, 0.0, 0, 0, 1,
-0.3, 0.0, 0, 1, 1,
// Blood Sprite
-0.5, 0, -0.5, 0, 0,
0.5, 0, -0.5, 1, 0,
-0.5, 0, 0.5, 0, 1,
0.5, 0, -0.5, 1, 0,
0.5, 0, 0.5, 1, 1,
-0.5, 0, 0.5, 0, 1,
}
numFloorTiles = 0
numWallTiles = 0
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
vertexArray = append(vertexArray, FloorTile(x, y)...)
numFloorTiles++
if rand.Int()%10 == 0 {
enemies = append(enemies, &Enemy{
X: float64(x),
Y: float64(y),
Size: 0.5,
DPS: 0.1,
})
}
}
}
}
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
if y > 0 && dungeon.Grid[y-1][x] == 0 || y == 0 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, -0.5})...)
numWallTiles++
}
if y < len(dungeon.Grid)-1 && dungeon.Grid[y+1][x] == 0 || y == len(dungeon.Grid)-1 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, 0.5})...)
numWallTiles++
}
if x > 0 && dungeon.Grid[y][x-1] == 0 || x == 0 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{-0.5, 0})...)
numWallTiles++
}
if x < len(row)-1 && dungeon.Grid[y][x+1] == 0 || x == len(row)-1 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{0.5, 0})...)
numWallTiles++
}
}
}
}
err := glfw.Init()
check(err)
defer glfw.Terminate()
glfw.WindowHint(glfw.Resizable, glfw.True)
platform.WHint() //platform-specific window hinting
window, err := glfw.CreateWindow(screenWidth, screenHeight, "Dungeon", nil, nil)
check(err)
window.MakeContextCurrent()
window.SetInputMode(glfw.CursorMode, glfw.CursorDisabled)
window.SetKeyCallback(glfw.KeyCallback(KeyCallback))
window.SetSizeCallback(glfw.SizeCallback(SizeCallback))
window.SetMouseButtonCallback(glfw.MouseButtonCallback(MouseButtonCallback))
fmt.Println("Initializing GL")
err = gl.Init()
check(err)
fmt.Println("Loading Shaders")
vertexShader, err := ioutil.ReadFile("shaders/shader.vert")
check(err)
fragmentShader, err := ioutil.ReadFile("shaders/shader.frag")
check(err)
program, err := newProgram(string(vertexShader)+"\x00", string(fragmentShader)+"\x00")
check(err)
gl.UseProgram(program)
projection = mgl32.Perspective(mgl32.DegToRad(float32(fov)), float32(screenWidth)/float32(screenHeight), 0.001, 20.0)
//camera := mgl32.LookAtV(mgl32.Vec3{3, 1, 0}, mgl32.Vec3{2, 1, 0}, mgl32.Vec3{0, 1, 0})
camera := mgl32.LookAtV(mgl32.Vec3{0, 1, 0}, mgl32.Vec3{1, 1, 0}, mgl32.Vec3{0, 1, 0})
viewProjUniform := gl.GetUniformLocation(program, gl.Str("viewProj\x00"))
viewProj := camera.Mul4(projection)
gl.UniformMatrix4fv(viewProjUniform, 1, false, &viewProj[0])
model := mgl32.Ident4()
modelUniform := gl.GetUniformLocation(program, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
textureUniform := gl.GetUniformLocation(program, gl.Str("tex\x00"))
gl.Uniform1i(textureUniform, 0)
fogDistUniform := gl.GetUniformLocation(program, gl.Str("fogDist\x00"))
gl.Uniform1f(fogDistUniform, FOG_DISTANCE)
fmt.Println("Loading Textures")
floorTexture, err := newTexture("textures/floor.png")
check(err)
wallTexture, err := newTexture("textures/wall.png")
check(err)
enemyTexture, err := newTexture("textures/monster.png")
check(err)
bloodTexture, err := newTexture("textures/blood.png")
check(err)
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertexArray)*4, gl.Ptr(vertexArray), gl.STATIC_DRAW)
vertAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
texCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texCoordAttrib)
gl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 5*4
|
WallTile
|
identifier_name
|
|
game.go
|
, 1,
-0.5*xAxis + x + offset.X(), 1, -0.5*zAxis + z + offset.Y(), 1, 1,
}
}
func check(e error) {
if e != nil {
panic(e)
}
}
var screenWidth = 800
var screenHeight = 600
var vertexArray []float32
var numFloorTiles, numWallTiles int
var fov float64
var projection mgl32.Mat4
const (
FOG_DISTANCE float32 = 8.0
)
func init() {
runtime.LockOSThread()
}
func main() {
keys = map[glfw.Key]bool{}
rand.Seed(time.Now().Unix())
player := &Player{
Yaw: 0,
Pitch: 0,
X: 0,
Y: 0,
Speed: 2,
Size: 0.7,
Health: 1,
}
fov = 90.0
fmt.Println("Generating Dungeon...")
dungeon := dungeon.NewDungeon(50, 200)
fmt.Println("Generated!")
room := dungeon.Rooms[0]
player.X = float64(room.Y + room.Height/2)
player.Y = float64(room.X + room.Width/2)
dungeon.Print()
enemies := []*Enemy{}
bloods := []*Blood{}
vertexArray = []float32{
// Enemy Sprite
-0.3, 0.6, 0, 1, 0,
0.3, 0.6, 0, 0, 0,
-0.3, 0.0, 0, 1, 1,
0.3, 0.6, 0, 0, 0,
0.3, 0.0, 0, 0, 1,
-0.3, 0.0, 0, 1, 1,
// Blood Sprite
-0.5, 0, -0.5, 0, 0,
0.5, 0, -0.5, 1, 0,
-0.5, 0, 0.5, 0, 1,
0.5, 0, -0.5, 1, 0,
0.5, 0, 0.5, 1, 1,
-0.5, 0, 0.5, 0, 1,
}
numFloorTiles = 0
numWallTiles = 0
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
vertexArray = append(vertexArray, FloorTile(x, y)...)
numFloorTiles++
if rand.Int()%10 == 0 {
enemies = append(enemies, &Enemy{
X: float64(x),
Y: float64(y),
Size: 0.5,
DPS: 0.1,
})
}
}
}
}
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
if y > 0 && dungeon.Grid[y-1][x] == 0 || y == 0 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, -0.5})...)
numWallTiles++
}
if y < len(dungeon.Grid)-1 && dungeon.Grid[y+1][x] == 0 || y == len(dungeon.Grid)-1 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, 0.5})...)
numWallTiles++
}
if x > 0 && dungeon.Grid[y][x-1] == 0 || x == 0 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{-0.5, 0})...)
numWallTiles++
}
if x < len(row)-1 && dungeon.Grid[y][x+1] == 0 || x == len(row)-1 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{0.5, 0})...)
numWallTiles++
}
}
}
}
err := glfw.Init()
check(err)
defer glfw.Terminate()
glfw.WindowHint(glfw.Resizable, glfw.True)
platform.WHint() //platform-specific window hinting
window, err := glfw.CreateWindow(screenWidth, screenHeight, "Dungeon", nil, nil)
check(err)
window.MakeContextCurrent()
window.SetInputMode(glfw.CursorMode, glfw.CursorDisabled)
window.SetKeyCallback(glfw.KeyCallback(KeyCallback))
window.SetSizeCallback(glfw.SizeCallback(SizeCallback))
window.SetMouseButtonCallback(glfw.MouseButtonCallback(MouseButtonCallback))
fmt.Println("Initializing GL")
err = gl.Init()
check(err)
fmt.Println("Loading Shaders")
vertexShader, err := ioutil.ReadFile("shaders/shader.vert")
check(err)
fragmentShader, err := ioutil.ReadFile("shaders/shader.frag")
check(err)
program, err := newProgram(string(vertexShader)+"\x00", string(fragmentShader)+"\x00")
check(err)
gl.UseProgram(program)
projection = mgl32.Perspective(mgl32.DegToRad(float32(fov)), float32(screenWidth)/float32(screenHeight), 0.001, 20.0)
//camera := mgl32.LookAtV(mgl32.Vec3{3, 1, 0}, mgl32.Vec3{2, 1, 0}, mgl32.Vec3{0, 1, 0})
camera := mgl32.LookAtV(mgl32.Vec3{0, 1, 0}, mgl32.Vec3{1, 1, 0}, mgl32.Vec3{0, 1, 0})
viewProjUniform := gl.GetUniformLocation(program, gl.Str("viewProj\x00"))
viewProj := camera.Mul4(projection)
gl.UniformMatrix4fv(viewProjUniform, 1, false, &viewProj[0])
model := mgl32.Ident4()
modelUniform := gl.GetUniformLocation(program, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
textureUniform := gl.GetUniformLocation(program, gl.Str("tex\x00"))
gl.Uniform1i(textureUniform, 0)
fogDistUniform := gl.GetUniformLocation(program, gl.Str("fogDist\x00"))
gl.Uniform1f(fogDistUniform, FOG_DISTANCE)
fmt.Println("Loading Textures")
floorTexture, err := newTexture("textures/floor.png")
check(err)
wallTexture, err := newTexture("textures/wall.png")
check(err)
enemyTexture, err := newTexture("textures/monster.png")
check(err)
bloodTexture, err := newTexture("textures/blood.png")
check(err)
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertexArray)*4, gl.Ptr(vertexArray), gl.STATIC_DRAW)
vertAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
texCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texCoordAttrib)
gl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 5*4, gl.PtrOffset(3*4))
gl.Enable(gl.DEPTH_TEST)
gl.DepthFunc(gl.LESS)
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.ClearColor(0, 0, 0, 1)
previousTime := glfw.GetTime()
lastFPS := previousTime
fps := 0
for !window.ShouldClose() {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
time := glfw.GetTime()
delta := time - previousTime
previousTime = time
gl.UseProgram(program)
fps++
if time-lastFPS > 1
|
mouseSensitivity := 0.75
mouseX, mouseY := window.GetCursorPos()
window.SetCursorPos(float64(screenWidth/2), float64(screenHeight/2))
ratio := float64(screenWidth) / float64(screenHeight)
mouseDeltaX := float64(screenWidth/2) - mouseX
mouseDeltaY := float64(screenHeight/2) - mouseY
player
|
{
fmt.Println("FPS is ", fps)
lastFPS = time
fps = 0
}
|
conditional_block
|
game.go
|
, 1,
-0.5*xAxis + x + offset.X(), 1, -0.5*zAxis + z + offset.Y(), 1, 1,
}
}
func check(e error)
|
var screenWidth = 800
var screenHeight = 600
var vertexArray []float32
var numFloorTiles, numWallTiles int
var fov float64
var projection mgl32.Mat4
const (
FOG_DISTANCE float32 = 8.0
)
func init() {
runtime.LockOSThread()
}
func main() {
keys = map[glfw.Key]bool{}
rand.Seed(time.Now().Unix())
player := &Player{
Yaw: 0,
Pitch: 0,
X: 0,
Y: 0,
Speed: 2,
Size: 0.7,
Health: 1,
}
fov = 90.0
fmt.Println("Generating Dungeon...")
dungeon := dungeon.NewDungeon(50, 200)
fmt.Println("Generated!")
room := dungeon.Rooms[0]
player.X = float64(room.Y + room.Height/2)
player.Y = float64(room.X + room.Width/2)
dungeon.Print()
enemies := []*Enemy{}
bloods := []*Blood{}
vertexArray = []float32{
// Enemy Sprite
-0.3, 0.6, 0, 1, 0,
0.3, 0.6, 0, 0, 0,
-0.3, 0.0, 0, 1, 1,
0.3, 0.6, 0, 0, 0,
0.3, 0.0, 0, 0, 1,
-0.3, 0.0, 0, 1, 1,
// Blood Sprite
-0.5, 0, -0.5, 0, 0,
0.5, 0, -0.5, 1, 0,
-0.5, 0, 0.5, 0, 1,
0.5, 0, -0.5, 1, 0,
0.5, 0, 0.5, 1, 1,
-0.5, 0, 0.5, 0, 1,
}
numFloorTiles = 0
numWallTiles = 0
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
vertexArray = append(vertexArray, FloorTile(x, y)...)
numFloorTiles++
if rand.Int()%10 == 0 {
enemies = append(enemies, &Enemy{
X: float64(x),
Y: float64(y),
Size: 0.5,
DPS: 0.1,
})
}
}
}
}
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
if y > 0 && dungeon.Grid[y-1][x] == 0 || y == 0 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, -0.5})...)
numWallTiles++
}
if y < len(dungeon.Grid)-1 && dungeon.Grid[y+1][x] == 0 || y == len(dungeon.Grid)-1 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, 0.5})...)
numWallTiles++
}
if x > 0 && dungeon.Grid[y][x-1] == 0 || x == 0 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{-0.5, 0})...)
numWallTiles++
}
if x < len(row)-1 && dungeon.Grid[y][x+1] == 0 || x == len(row)-1 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{0.5, 0})...)
numWallTiles++
}
}
}
}
err := glfw.Init()
check(err)
defer glfw.Terminate()
glfw.WindowHint(glfw.Resizable, glfw.True)
platform.WHint() //platform-specific window hinting
window, err := glfw.CreateWindow(screenWidth, screenHeight, "Dungeon", nil, nil)
check(err)
window.MakeContextCurrent()
window.SetInputMode(glfw.CursorMode, glfw.CursorDisabled)
window.SetKeyCallback(glfw.KeyCallback(KeyCallback))
window.SetSizeCallback(glfw.SizeCallback(SizeCallback))
window.SetMouseButtonCallback(glfw.MouseButtonCallback(MouseButtonCallback))
fmt.Println("Initializing GL")
err = gl.Init()
check(err)
fmt.Println("Loading Shaders")
vertexShader, err := ioutil.ReadFile("shaders/shader.vert")
check(err)
fragmentShader, err := ioutil.ReadFile("shaders/shader.frag")
check(err)
program, err := newProgram(string(vertexShader)+"\x00", string(fragmentShader)+"\x00")
check(err)
gl.UseProgram(program)
projection = mgl32.Perspective(mgl32.DegToRad(float32(fov)), float32(screenWidth)/float32(screenHeight), 0.001, 20.0)
//camera := mgl32.LookAtV(mgl32.Vec3{3, 1, 0}, mgl32.Vec3{2, 1, 0}, mgl32.Vec3{0, 1, 0})
camera := mgl32.LookAtV(mgl32.Vec3{0, 1, 0}, mgl32.Vec3{1, 1, 0}, mgl32.Vec3{0, 1, 0})
viewProjUniform := gl.GetUniformLocation(program, gl.Str("viewProj\x00"))
viewProj := camera.Mul4(projection)
gl.UniformMatrix4fv(viewProjUniform, 1, false, &viewProj[0])
model := mgl32.Ident4()
modelUniform := gl.GetUniformLocation(program, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
textureUniform := gl.GetUniformLocation(program, gl.Str("tex\x00"))
gl.Uniform1i(textureUniform, 0)
fogDistUniform := gl.GetUniformLocation(program, gl.Str("fogDist\x00"))
gl.Uniform1f(fogDistUniform, FOG_DISTANCE)
fmt.Println("Loading Textures")
floorTexture, err := newTexture("textures/floor.png")
check(err)
wallTexture, err := newTexture("textures/wall.png")
check(err)
enemyTexture, err := newTexture("textures/monster.png")
check(err)
bloodTexture, err := newTexture("textures/blood.png")
check(err)
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertexArray)*4, gl.Ptr(vertexArray), gl.STATIC_DRAW)
vertAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
texCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texCoordAttrib)
gl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 5*4, gl.PtrOffset(3*4))
gl.Enable(gl.DEPTH_TEST)
gl.DepthFunc(gl.LESS)
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.ClearColor(0, 0, 0, 1)
previousTime := glfw.GetTime()
lastFPS := previousTime
fps := 0
for !window.ShouldClose() {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
time := glfw.GetTime()
delta := time - previousTime
previousTime = time
gl.UseProgram(program)
fps++
if time-lastFPS > 1 {
fmt.Println("FPS is ", fps)
lastFPS = time
fps = 0
}
mouseSensitivity := 0.75
mouseX, mouseY := window.GetCursorPos()
window.SetCursorPos(float64(screenWidth/2), float64(screenHeight/2))
ratio := float64(screenWidth) / float64(screenHeight)
mouseDeltaX := float64(screenWidth/2) - mouseX
mouseDeltaY := float64(screenHeight/2) - mouseY
player
|
{
if e != nil {
panic(e)
}
}
|
identifier_body
|
game.go
|
0.6, 0, 1, 0,
0.3, 0.6, 0, 0, 0,
-0.3, 0.0, 0, 1, 1,
0.3, 0.6, 0, 0, 0,
0.3, 0.0, 0, 0, 1,
-0.3, 0.0, 0, 1, 1,
// Blood Sprite
-0.5, 0, -0.5, 0, 0,
0.5, 0, -0.5, 1, 0,
-0.5, 0, 0.5, 0, 1,
0.5, 0, -0.5, 1, 0,
0.5, 0, 0.5, 1, 1,
-0.5, 0, 0.5, 0, 1,
}
numFloorTiles = 0
numWallTiles = 0
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
vertexArray = append(vertexArray, FloorTile(x, y)...)
numFloorTiles++
if rand.Int()%10 == 0 {
enemies = append(enemies, &Enemy{
X: float64(x),
Y: float64(y),
Size: 0.5,
DPS: 0.1,
})
}
}
}
}
for y, row := range dungeon.Grid {
for x, col := range row {
if col == 1 {
if y > 0 && dungeon.Grid[y-1][x] == 0 || y == 0 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, -0.5})...)
numWallTiles++
}
if y < len(dungeon.Grid)-1 && dungeon.Grid[y+1][x] == 0 || y == len(dungeon.Grid)-1 {
vertexArray = append(vertexArray, WallTile(x, y, true, mgl32.Vec2{0, 0.5})...)
numWallTiles++
}
if x > 0 && dungeon.Grid[y][x-1] == 0 || x == 0 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{-0.5, 0})...)
numWallTiles++
}
if x < len(row)-1 && dungeon.Grid[y][x+1] == 0 || x == len(row)-1 {
vertexArray = append(vertexArray, WallTile(x, y, false, mgl32.Vec2{0.5, 0})...)
numWallTiles++
}
}
}
}
err := glfw.Init()
check(err)
defer glfw.Terminate()
glfw.WindowHint(glfw.Resizable, glfw.True)
platform.WHint() //platform-specific window hinting
window, err := glfw.CreateWindow(screenWidth, screenHeight, "Dungeon", nil, nil)
check(err)
window.MakeContextCurrent()
window.SetInputMode(glfw.CursorMode, glfw.CursorDisabled)
window.SetKeyCallback(glfw.KeyCallback(KeyCallback))
window.SetSizeCallback(glfw.SizeCallback(SizeCallback))
window.SetMouseButtonCallback(glfw.MouseButtonCallback(MouseButtonCallback))
fmt.Println("Initializing GL")
err = gl.Init()
check(err)
fmt.Println("Loading Shaders")
vertexShader, err := ioutil.ReadFile("shaders/shader.vert")
check(err)
fragmentShader, err := ioutil.ReadFile("shaders/shader.frag")
check(err)
program, err := newProgram(string(vertexShader)+"\x00", string(fragmentShader)+"\x00")
check(err)
gl.UseProgram(program)
projection = mgl32.Perspective(mgl32.DegToRad(float32(fov)), float32(screenWidth)/float32(screenHeight), 0.001, 20.0)
//camera := mgl32.LookAtV(mgl32.Vec3{3, 1, 0}, mgl32.Vec3{2, 1, 0}, mgl32.Vec3{0, 1, 0})
camera := mgl32.LookAtV(mgl32.Vec3{0, 1, 0}, mgl32.Vec3{1, 1, 0}, mgl32.Vec3{0, 1, 0})
viewProjUniform := gl.GetUniformLocation(program, gl.Str("viewProj\x00"))
viewProj := camera.Mul4(projection)
gl.UniformMatrix4fv(viewProjUniform, 1, false, &viewProj[0])
model := mgl32.Ident4()
modelUniform := gl.GetUniformLocation(program, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
textureUniform := gl.GetUniformLocation(program, gl.Str("tex\x00"))
gl.Uniform1i(textureUniform, 0)
fogDistUniform := gl.GetUniformLocation(program, gl.Str("fogDist\x00"))
gl.Uniform1f(fogDistUniform, FOG_DISTANCE)
fmt.Println("Loading Textures")
floorTexture, err := newTexture("textures/floor.png")
check(err)
wallTexture, err := newTexture("textures/wall.png")
check(err)
enemyTexture, err := newTexture("textures/monster.png")
check(err)
bloodTexture, err := newTexture("textures/blood.png")
check(err)
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertexArray)*4, gl.Ptr(vertexArray), gl.STATIC_DRAW)
vertAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
texCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texCoordAttrib)
gl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 5*4, gl.PtrOffset(3*4))
gl.Enable(gl.DEPTH_TEST)
gl.DepthFunc(gl.LESS)
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.ClearColor(0, 0, 0, 1)
previousTime := glfw.GetTime()
lastFPS := previousTime
fps := 0
for !window.ShouldClose() {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
time := glfw.GetTime()
delta := time - previousTime
previousTime = time
gl.UseProgram(program)
fps++
if time-lastFPS > 1 {
fmt.Println("FPS is ", fps)
lastFPS = time
fps = 0
}
mouseSensitivity := 0.75
mouseX, mouseY := window.GetCursorPos()
window.SetCursorPos(float64(screenWidth/2), float64(screenHeight/2))
ratio := float64(screenWidth) / float64(screenHeight)
mouseDeltaX := float64(screenWidth/2) - mouseX
mouseDeltaY := float64(screenHeight/2) - mouseY
player.Yaw -= mouseSensitivity * delta * mouseDeltaX
player.Pitch += mouseSensitivity * delta * mouseDeltaY * ratio
//fmt.Println(yaw/math.Pi*360)
if player.Pitch > math.Pi/2 {
player.Pitch = math.Pi / 2
}
if player.Pitch < -math.Pi/2 {
player.Pitch = -math.Pi / 2
}
direction := mgl32.Vec2{0, 0}
if keys[glfw.KeyW] {
direction = direction.Add(mgl32.Vec2{0, 1})
}
if keys[glfw.KeyS] {
direction = direction.Add(mgl32.Vec2{0, -1})
}
if keys[glfw.KeyA] {
direction = direction.Add(mgl32.Vec2{1, 0})
}
if keys[glfw.KeyD] {
direction = direction.Add(mgl32.Vec2{-1, 0})
}
direction = direction.Normalize()
if direction.Len() > 0 {
boost := 1.0
if keys[glfw.KeyLeftShift] {
boost = 2.0
}
cos := float32(math.Cos(player.Yaw - math.Pi/2))
|
sin := float32(math.Sin(player.Yaw - math.Pi/2))
rotated := mgl32.Vec2{
|
random_line_split
|
|
service_map.py
|
-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
|
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
|
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
|
identifier_body
|
service_map.py
|
-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def
|
(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
|
__init__
|
identifier_name
|
service_map.py
|
-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
|
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
|
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
|
conditional_block
|
service_map.py
|
-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
@property
def object_priority(self):
if self.personality:
|
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance
|
return _TARGET_PERSONALITY
|
random_line_split
|
app.js
|
var stud2 =parseInt(prompt("Enter second student marks"));
var stud3 =parseInt(prompt("Enter third student marks"));
var obtMarks = stud1+stud2+stud3;
var total= 300;
function Mainfunction(){
console.log("student"+" "+stud1+" " +"student"+" "+ stud2+" " +"student"+" "+ stud3)
console.log(obtMarks)
}
Mainfunction()
function percentage(){
var per= obtMarks*100/total;
console.log (per)
}
percentage()
function average(){
var averageMarks = obtMarks / 3 ;
console.log(averageMarks)
}
average()
/* 5. You have learned the function indexOf. Code your own custom
/* function that will perform the same functionality. You can code
/* for single character as of now.*/
function indexOf(){
var str = "hello world,Live as if you were to die tomorrow";
var n = str.indexOf("were");
console.log(n)
}
indexOf()
/* 6. Write a function to delete all vowels from a sentence. Assume
/* that the sentence is not more than 25 characters long.*/
function delVowel(){
var strings = ["That which does not kill us makes us stronger.” "];
strings = strings.map(function (string) {
return string.replace(/[aeiou]/g, '');
});
console.log(strings);
}
delVowel()
/* 7. Write a function with switch statement to count the number of
/* occurrences of any two vowels in succession in a line of text.
/* For example, in the sentence*/
function findOccu
|
var str = "Pleases read this application and give me gratuity";
var count = 0;
switch (str) {
case 'a':
count++;
case 'A':
count++
case 'e':
case 'E':
case 'i':
case 'I':
case 'o':
case 'O':
case 'u':
case 'U':
console.log (1);
default:
console.log(0);
}
}
findOccurrences();
/* 8. The distance between two cities (in km.) is input through the
/* keyboard. Write four functions to convert and print this
/* distance in meters, feet, inches and centimeters.*/
var dist=prompt("Enter distance between two cities(in km)");
function meter(){
var meter= dist*1000;
console.log(meter)
}
meter()
function feet(){
var feet = dist*3280.84;
console.log(feet)
}
feet()
function inches(){
var inch= dist*39370.1;
console.log(inch)
}
inches()
function centimeter(){
var centimeter= dist*100000
console.log(centimeter)
}
centimeter();
/* . A cashier has currency notes of denominations 10, 50 and
/* 100. If the amount to be withdrawn is input through the
/* keyboard in hundreds, find the total number of currency notes
/* of each denomination the cashier will have to give to the
/* withdrawer.*/
function currencyDenomination() {
var cash = +prompt("Enter cash (in hundreds): ");
var hundred = cash / 100;
var fifty = cash / 50;
var ten = cash / 10;
console.log(hundred)
console.log(fifty)
console.log(ten)
}
currencyDenomination()
/* CHAPTER# 43-48 */
/* 1. Show an alert box on click on a link.*/
function display(){
alert("hello world")
}
display();
/* 2. Display some Mobile images in browser. On click on an
/* image Show the message in alert to user.*/
/* kindly see answer in html section*/
/* 3. Display 10 student records in table and each row should contain a delete
/* button. If you click on a button to delete a record, entire row should be
/* deleted. */
function del(){
var table = document.getElementById("tables");
table.deleteRow(-1);
}
del()
/* 4. Display an image in browser. Change the picture on mouseover and set the
/* first picture on mouseout.*/
/* kindly see answer in html section*/
/* 5. Show a counter in browser. Counter should increase on click on increase
/* button and decrease on click on decrease button. And show updated counter
/* value in browser.*/
var i = 0;
function buttonClick() {
document.getElementById('inc').value = ++i;
}
var o = 0;
function decreaseClick(){
document.getElementById('dec').value= --o;
}
/* CHAPTER# 49-52*/
/* 1. Create a signup form and display form data in your web
/* page on submission.*/
function getValue(){
var txt= document.getElementById("user_input").value
var txt2= document.getElementById("pass").value
var txt3 = document.getElementById("rep").value
document.getElementById('display').innerHTML= txt+txt2+txt3
}
/* 2. Suppose in your webpage there is content area in which
/* you have entered your item details, but user can only see
/* some details on first look. When user clicks on “Read
/* more” button, full detail of that particular item will be
/* displayed. */
function morePara() {
var expParagraph = "Web Desk .July 21, 2020 HomeLatestPakistan One killed, multiple injured in Turbat blast Play Video .At least one person was killed and seven others were injured in a blast in Turbat Bazaar, said the police on Tuesday.According to the police, at least seven were injured when the blast took place in the bazaar, with two in a critical condition. The injured have been moved to a nearby hospital.The blast took place near a car after which the vehicle caught on fire, confirmed police. A motorcycle near the site of the blast also caught on fire.The windows of the nearby buildings were also shattered due to the intensity of the blast."
document.getElementById("para").innerHTML = expParagraph;
}
/* 3. In previous assignment you have created a tabular data
/* using javascript. Let’s modify that. Create a form which
/* takes student’s details and show each student detail in
/* table. Each row of table must contain a delete button and
/* an edit button. On click on delete button entire row should
/* be deleted. On click on edit button, a hidden form will
/* appear with the values of that row.*/
function edit_row(no)
{
document.getElementById("edit_button"+no).style.display="none";
document.getElementById("save_button"+no).style.display="block";
var name=document.getElementById("name_row"+no);
var country=document.getElementById("country_row"+no);
var age=document.getElementById("age_row"+no);
var name_data=name.innerHTML;
var country_data=country.innerHTML;
var age_data=age.innerHTML;
name.innerHTML="<input type='text' id='name_text"+no+"' value='"+name_data+"'>";
country.innerHTML="<input type='text' id='country_text"+no+"' value='"+country_data+"'>";
age.innerHTML="<input type='text' id='age_text"+no+"' value='"+age_data+"'>";
}
function save_row(no)
{
var name_val=document.getElementById("name_text"+no).value;
var country_val=document.getElementById("country_text"+no).value;
var age_val=document.getElementById("age_text"+no).value;
document.getElementById("name_row"+no).innerHTML=name_val;
document.getElementById("country_row"+no).innerHTML=country_val;
document.getElementById("age_row"+no).innerHTML=age_val;
document.getElementById("edit_button"+no).style.display="block";
document.getElementById("save_button"+no).style.display="none";
}
function delete_row(no)
{
document.getElementById("row"+no+"").outerHTML="";
}
function add_row()
{
var new_name=document.getElementById("new_name").value;
var new_country=document.getElementById("new_country").value;
var new_age=document.getElementById("new_age").value;
var table=document.getElementById("data_table");
var table_len=(table.rows.length)-1;
var row = table.insertRow(table_len).outerHTML="<tr id='row"+table_len+"'><td id='name_row"+table_len+"'>"+new_name+"</td><td id='country_row"+table_len+"'>"+new_country+"</td><td id='age_row"+table_len+"'>"+new_age+"</td><td><input type='button' id='edit_button"+table_len+"' value='Edit' class='edit' onclick='edit_row("+table_len+")'> <
|
rrences() {
|
identifier_name
|
app.js
|
var stud2 =parseInt(prompt("Enter second student marks"));
var stud3 =parseInt(prompt("Enter third student marks"));
var obtMarks = stud1+stud2+stud3;
var total= 300;
function Mainfunction(){
console.log("student"+" "+stud1+" " +"student"+" "+ stud2+" " +"student"+" "+ stud3)
console.log(obtMarks)
}
Mainfunction()
function percentage(){
var per= obtMarks*100/total;
console.log (per)
}
percentage()
function average(){
var averageMarks = obtMarks / 3 ;
console.log(averageMarks)
}
average()
/* 5. You have learned the function indexOf. Code your own custom
/* function that will perform the same functionality. You can code
/* for single character as of now.*/
function indexOf(){
var str = "hello world,Live as if you were to die tomorrow";
var n = str.indexOf("were");
console.log(n)
}
indexOf()
/* 6. Write a function to delete all vowels from a sentence. Assume
/* that the sentence is not more than 25 characters long.*/
function delVowel(){
var strings = ["That which does not kill us makes us stronger.” "];
strings = strings.map(function (string) {
return string.replace(/[aeiou]/g, '');
});
console.log(strings);
}
delVowel()
/* 7. Write a function with switch statement to count the number of
/* occurrences of any two vowels in succession in a line of text.
/* For example, in the sentence*/
function findOccurrences() {
var str = "Pleases read this application and give me gratuity";
var count = 0;
switch (str) {
case 'a':
count++;
case 'A':
count++
case 'e':
case 'E':
case 'i':
case 'I':
case 'o':
case 'O':
case 'u':
case 'U':
console.log (1);
default:
console.log(0);
}
}
findOccurrences();
/* 8. The distance between two cities (in km.) is input through the
/* keyboard. Write four functions to convert and print this
/* distance in meters, feet, inches and centimeters.*/
var dist=prompt("Enter distance between two cities(in km)");
function meter(){
var meter= dist*1000;
console.log(meter)
}
meter()
function feet(){
var feet = dist*3280.84;
console.log(feet)
}
feet()
function inches(){
var inch= dist*39370.1;
console.log(inch)
}
|
var centimeter= dist*100000
console.log(centimeter)
}
centimeter();
/* . A cashier has currency notes of denominations 10, 50 and
/* 100. If the amount to be withdrawn is input through the
/* keyboard in hundreds, find the total number of currency notes
/* of each denomination the cashier will have to give to the
/* withdrawer.*/
function currencyDenomination() {
var cash = +prompt("Enter cash (in hundreds): ");
var hundred = cash / 100;
var fifty = cash / 50;
var ten = cash / 10;
console.log(hundred)
console.log(fifty)
console.log(ten)
}
currencyDenomination()
/* CHAPTER# 43-48 */
/* 1. Show an alert box on click on a link.*/
function display(){
alert("hello world")
}
display();
/* 2. Display some Mobile images in browser. On click on an
/* image Show the message in alert to user.*/
/* kindly see answer in html section*/
/* 3. Display 10 student records in table and each row should contain a delete
/* button. If you click on a button to delete a record, entire row should be
/* deleted. */
function del(){
var table = document.getElementById("tables");
table.deleteRow(-1);
}
del()
/* 4. Display an image in browser. Change the picture on mouseover and set the
/* first picture on mouseout.*/
/* kindly see answer in html section*/
/* 5. Show a counter in browser. Counter should increase on click on increase
/* button and decrease on click on decrease button. And show updated counter
/* value in browser.*/
var i = 0;
function buttonClick() {
document.getElementById('inc').value = ++i;
}
var o = 0;
function decreaseClick(){
document.getElementById('dec').value= --o;
}
/* CHAPTER# 49-52*/
/* 1. Create a signup form and display form data in your web
/* page on submission.*/
function getValue(){
var txt= document.getElementById("user_input").value
var txt2= document.getElementById("pass").value
var txt3 = document.getElementById("rep").value
document.getElementById('display').innerHTML= txt+txt2+txt3
}
/* 2. Suppose in your webpage there is content area in which
/* you have entered your item details, but user can only see
/* some details on first look. When user clicks on “Read
/* more” button, full detail of that particular item will be
/* displayed. */
function morePara() {
var expParagraph = "Web Desk .July 21, 2020 HomeLatestPakistan One killed, multiple injured in Turbat blast Play Video .At least one person was killed and seven others were injured in a blast in Turbat Bazaar, said the police on Tuesday.According to the police, at least seven were injured when the blast took place in the bazaar, with two in a critical condition. The injured have been moved to a nearby hospital.The blast took place near a car after which the vehicle caught on fire, confirmed police. A motorcycle near the site of the blast also caught on fire.The windows of the nearby buildings were also shattered due to the intensity of the blast."
document.getElementById("para").innerHTML = expParagraph;
}
/* 3. In previous assignment you have created a tabular data
/* using javascript. Let’s modify that. Create a form which
/* takes student’s details and show each student detail in
/* table. Each row of table must contain a delete button and
/* an edit button. On click on delete button entire row should
/* be deleted. On click on edit button, a hidden form will
/* appear with the values of that row.*/
function edit_row(no)
{
document.getElementById("edit_button"+no).style.display="none";
document.getElementById("save_button"+no).style.display="block";
var name=document.getElementById("name_row"+no);
var country=document.getElementById("country_row"+no);
var age=document.getElementById("age_row"+no);
var name_data=name.innerHTML;
var country_data=country.innerHTML;
var age_data=age.innerHTML;
name.innerHTML="<input type='text' id='name_text"+no+"' value='"+name_data+"'>";
country.innerHTML="<input type='text' id='country_text"+no+"' value='"+country_data+"'>";
age.innerHTML="<input type='text' id='age_text"+no+"' value='"+age_data+"'>";
}
function save_row(no)
{
var name_val=document.getElementById("name_text"+no).value;
var country_val=document.getElementById("country_text"+no).value;
var age_val=document.getElementById("age_text"+no).value;
document.getElementById("name_row"+no).innerHTML=name_val;
document.getElementById("country_row"+no).innerHTML=country_val;
document.getElementById("age_row"+no).innerHTML=age_val;
document.getElementById("edit_button"+no).style.display="block";
document.getElementById("save_button"+no).style.display="none";
}
function delete_row(no)
{
document.getElementById("row"+no+"").outerHTML="";
}
function add_row()
{
var new_name=document.getElementById("new_name").value;
var new_country=document.getElementById("new_country").value;
var new_age=document.getElementById("new_age").value;
var table=document.getElementById("data_table");
var table_len=(table.rows.length)-1;
var row = table.insertRow(table_len).outerHTML="<tr id='row"+table_len+"'><td id='name_row"+table_len+"'>"+new_name+"</td><td id='country_row"+table_len+"'>"+new_country+"</td><td id='age_row"+table_len+"'>"+new_age+"</td><td><input type='button' id='edit_button"+table_len+"' value='Edit' class='edit' onclick='edit_row("+table_len+")'> <input type='
|
inches()
function centimeter(){
|
random_line_split
|
app.js
|
var stud2 =parseInt(prompt("Enter second student marks"));
var stud3 =parseInt(prompt("Enter third student marks"));
var obtMarks = stud1+stud2+stud3;
var total= 300;
function Mainfunction(){
console.log("student"+" "+stud1+" " +"student"+" "+ stud2+" " +"student"+" "+ stud3)
console.log(obtMarks)
}
Mainfunction()
function percentage(){
var per= obtMarks*100/total;
console.log (per)
}
percentage()
function average(){
var averageMarks = obtMarks / 3 ;
console.log(averageMarks)
}
average()
/* 5. You have learned the function indexOf. Code your own custom
/* function that will perform the same functionality. You can code
/* for single character as of now.*/
function indexOf(){
var str = "hello world,Live as if you were to die tomorrow";
var n = str.indexOf("were");
console.log(n)
}
indexOf()
/* 6. Write a function to delete all vowels from a sentence. Assume
/* that the sentence is not more than 25 characters long.*/
function delVowel(){
|
delVowel()
/* 7. Write a function with switch statement to count the number of
/* occurrences of any two vowels in succession in a line of text.
/* For example, in the sentence*/
function findOccurrences() {
var str = "Pleases read this application and give me gratuity";
var count = 0;
switch (str) {
case 'a':
count++;
case 'A':
count++
case 'e':
case 'E':
case 'i':
case 'I':
case 'o':
case 'O':
case 'u':
case 'U':
console.log (1);
default:
console.log(0);
}
}
findOccurrences();
/* 8. The distance between two cities (in km.) is input through the
/* keyboard. Write four functions to convert and print this
/* distance in meters, feet, inches and centimeters.*/
var dist=prompt("Enter distance between two cities(in km)");
function meter(){
var meter= dist*1000;
console.log(meter)
}
meter()
function feet(){
var feet = dist*3280.84;
console.log(feet)
}
feet()
function inches(){
var inch= dist*39370.1;
console.log(inch)
}
inches()
function centimeter(){
var centimeter= dist*100000
console.log(centimeter)
}
centimeter();
/* . A cashier has currency notes of denominations 10, 50 and
/* 100. If the amount to be withdrawn is input through the
/* keyboard in hundreds, find the total number of currency notes
/* of each denomination the cashier will have to give to the
/* withdrawer.*/
function currencyDenomination() {
var cash = +prompt("Enter cash (in hundreds): ");
var hundred = cash / 100;
var fifty = cash / 50;
var ten = cash / 10;
console.log(hundred)
console.log(fifty)
console.log(ten)
}
currencyDenomination()
/* CHAPTER# 43-48 */
/* 1. Show an alert box on click on a link.*/
function display(){
alert("hello world")
}
display();
/* 2. Display some Mobile images in browser. On click on an
/* image Show the message in alert to user.*/
/* kindly see answer in html section*/
/* 3. Display 10 student records in table and each row should contain a delete
/* button. If you click on a button to delete a record, entire row should be
/* deleted. */
function del(){
var table = document.getElementById("tables");
table.deleteRow(-1);
}
del()
/* 4. Display an image in browser. Change the picture on mouseover and set the
/* first picture on mouseout.*/
/* kindly see answer in html section*/
/* 5. Show a counter in browser. Counter should increase on click on increase
/* button and decrease on click on decrease button. And show updated counter
/* value in browser.*/
var i = 0;
function buttonClick() {
document.getElementById('inc').value = ++i;
}
var o = 0;
function decreaseClick(){
document.getElementById('dec').value= --o;
}
/* CHAPTER# 49-52*/
/* 1. Create a signup form and display form data in your web
/* page on submission.*/
function getValue(){
var txt= document.getElementById("user_input").value
var txt2= document.getElementById("pass").value
var txt3 = document.getElementById("rep").value
document.getElementById('display').innerHTML= txt+txt2+txt3
}
/* 2. Suppose in your webpage there is content area in which
/* you have entered your item details, but user can only see
/* some details on first look. When user clicks on “Read
/* more” button, full detail of that particular item will be
/* displayed. */
function morePara() {
var expParagraph = "Web Desk .July 21, 2020 HomeLatestPakistan One killed, multiple injured in Turbat blast Play Video .At least one person was killed and seven others were injured in a blast in Turbat Bazaar, said the police on Tuesday.According to the police, at least seven were injured when the blast took place in the bazaar, with two in a critical condition. The injured have been moved to a nearby hospital.The blast took place near a car after which the vehicle caught on fire, confirmed police. A motorcycle near the site of the blast also caught on fire.The windows of the nearby buildings were also shattered due to the intensity of the blast."
document.getElementById("para").innerHTML = expParagraph;
}
/* 3. In previous assignment you have created a tabular data
/* using javascript. Let’s modify that. Create a form which
/* takes student’s details and show each student detail in
/* table. Each row of table must contain a delete button and
/* an edit button. On click on delete button entire row should
/* be deleted. On click on edit button, a hidden form will
/* appear with the values of that row.*/
function edit_row(no)
{
document.getElementById("edit_button"+no).style.display="none";
document.getElementById("save_button"+no).style.display="block";
var name=document.getElementById("name_row"+no);
var country=document.getElementById("country_row"+no);
var age=document.getElementById("age_row"+no);
var name_data=name.innerHTML;
var country_data=country.innerHTML;
var age_data=age.innerHTML;
name.innerHTML="<input type='text' id='name_text"+no+"' value='"+name_data+"'>";
country.innerHTML="<input type='text' id='country_text"+no+"' value='"+country_data+"'>";
age.innerHTML="<input type='text' id='age_text"+no+"' value='"+age_data+"'>";
}
function save_row(no)
{
var name_val=document.getElementById("name_text"+no).value;
var country_val=document.getElementById("country_text"+no).value;
var age_val=document.getElementById("age_text"+no).value;
document.getElementById("name_row"+no).innerHTML=name_val;
document.getElementById("country_row"+no).innerHTML=country_val;
document.getElementById("age_row"+no).innerHTML=age_val;
document.getElementById("edit_button"+no).style.display="block";
document.getElementById("save_button"+no).style.display="none";
}
function delete_row(no)
{
document.getElementById("row"+no+"").outerHTML="";
}
function add_row()
{
var new_name=document.getElementById("new_name").value;
var new_country=document.getElementById("new_country").value;
var new_age=document.getElementById("new_age").value;
var table=document.getElementById("data_table");
var table_len=(table.rows.length)-1;
var row = table.insertRow(table_len).outerHTML="<tr id='row"+table_len+"'><td id='name_row"+table_len+"'>"+new_name+"</td><td id='country_row"+table_len+"'>"+new_country+"</td><td id='age_row"+table_len+"'>"+new_age+"</td><td><input type='button' id='edit_button"+table_len+"' value='Edit' class='edit' onclick='edit_row("+table_len+")'> <input type='
|
var strings = ["That which does not kill us makes us stronger.” "];
strings = strings.map(function (string) {
return string.replace(/[aeiou]/g, '');
});
console.log(strings);
}
|
identifier_body
|
app.js
|
console.log(answer);
}
leapYear();
/* 3. If the lengths of the sides of a triangle are denoted by a, b, and
/* c, then area of triangle is given by
/* area = S(S − a)(S − b)(S − c)
/* where, S = ( a + b + c ) / 2
/* Calculate area of triangle using 2 functions*/
function input(){
var a = prompt("enter value of a ")
var b = prompt("enter value of b")
var c = prompt("enter value of c ")
var s = (a+b+c) / 2;
console.log(s)
var area = Math.sqrt(s*((s-a)*(s-b)*(s-c)));
console.log(area)
}
input();
/* 4. Write a function that receives marks received by a student in 3
/* subjects and returns the average and percentage of these
/* marks. there should be 3 functions one is the mainFunction
/* and other are for average and percentage. Call those functions
/* from mainFunction and display result in mainFunction.*/
var stud1 =parseInt(prompt("Enter first student marks"));
var stud2 =parseInt(prompt("Enter second student marks"));
var stud3 =parseInt(prompt("Enter third student marks"));
var obtMarks = stud1+stud2+stud3;
var total= 300;
function Mainfunction(){
console.log("student"+" "+stud1+" " +"student"+" "+ stud2+" " +"student"+" "+ stud3)
console.log(obtMarks)
}
Mainfunction()
function percentage(){
var per= obtMarks*100/total;
console.log (per)
}
percentage()
function average(){
var averageMarks = obtMarks / 3 ;
console.log(averageMarks)
}
average()
/* 5. You have learned the function indexOf. Code your own custom
/* function that will perform the same functionality. You can code
/* for single character as of now.*/
function indexOf(){
var str = "hello world,Live as if you were to die tomorrow";
var n = str.indexOf("were");
console.log(n)
}
indexOf()
/* 6. Write a function to delete all vowels from a sentence. Assume
/* that the sentence is not more than 25 characters long.*/
function delVowel(){
var strings = ["That which does not kill us makes us stronger.” "];
strings = strings.map(function (string) {
return string.replace(/[aeiou]/g, '');
});
console.log(strings);
}
delVowel()
/* 7. Write a function with switch statement to count the number of
/* occurrences of any two vowels in succession in a line of text.
/* For example, in the sentence*/
function findOccurrences() {
var str = "Pleases read this application and give me gratuity";
var count = 0;
switch (str) {
case 'a':
count++;
case 'A':
count++
case 'e':
case 'E':
case 'i':
case 'I':
case 'o':
case 'O':
case 'u':
case 'U':
console.log (1);
default:
console.log(0);
}
}
findOccurrences();
/* 8. The distance between two cities (in km.) is input through the
/* keyboard. Write four functions to convert and print this
/* distance in meters, feet, inches and centimeters.*/
var dist=prompt("Enter distance between two cities(in km)");
function meter(){
var meter= dist*1000;
console.log(meter)
}
meter()
function feet(){
var feet = dist*3280.84;
console.log(feet)
}
feet()
function inches(){
var inch= dist*39370.1;
console.log(inch)
}
inches()
function centimeter(){
var centimeter= dist*100000
console.log(centimeter)
}
centimeter();
/* . A cashier has currency notes of denominations 10, 50 and
/* 100. If the amount to be withdrawn is input through the
/* keyboard in hundreds, find the total number of currency notes
/* of each denomination the cashier will have to give to the
/* withdrawer.*/
function currencyDenomination() {
var cash = +prompt("Enter cash (in hundreds): ");
var hundred = cash / 100;
var fifty = cash / 50;
var ten = cash / 10;
console.log(hundred)
console.log(fifty)
console.log(ten)
}
currencyDenomination()
/* CHAPTER# 43-48 */
/* 1. Show an alert box on click on a link.*/
function display(){
alert("hello world")
}
display();
/* 2. Display some Mobile images in browser. On click on an
/* image Show the message in alert to user.*/
/* kindly see answer in html section*/
/* 3. Display 10 student records in table and each row should contain a delete
/* button. If you click on a button to delete a record, entire row should be
/* deleted. */
function del(){
var table = document.getElementById("tables");
table.deleteRow(-1);
}
del()
/* 4. Display an image in browser. Change the picture on mouseover and set the
/* first picture on mouseout.*/
/* kindly see answer in html section*/
/* 5. Show a counter in browser. Counter should increase on click on increase
/* button and decrease on click on decrease button. And show updated counter
/* value in browser.*/
var i = 0;
function buttonClick() {
document.getElementById('inc').value = ++i;
}
var o = 0;
function decreaseClick(){
document.getElementById('dec').value= --o;
}
/* CHAPTER# 49-52*/
/* 1. Create a signup form and display form data in your web
/* page on submission.*/
function getValue(){
var txt= document.getElementById("user_input").value
var txt2= document.getElementById("pass").value
var txt3 = document.getElementById("rep").value
document.getElementById('display').innerHTML= txt+txt2+txt3
}
/* 2. Suppose in your webpage there is content area in which
/* you have entered your item details, but user can only see
/* some details on first look. When user clicks on “Read
/* more” button, full detail of that particular item will be
/* displayed. */
function morePara() {
var expParagraph = "Web Desk .July 21, 2020 HomeLatestPakistan One killed, multiple injured in Turbat blast Play Video .At least one person was killed and seven others were injured in a blast in Turbat Bazaar, said the police on Tuesday.According to the police, at least seven were injured when the blast took place in the bazaar, with two in a critical condition. The injured have been moved to a nearby hospital.The blast took place near a car after which the vehicle caught on fire, confirmed police. A motorcycle near the site of the blast also caught on fire.The windows of the nearby buildings were also shattered due to the intensity of the blast."
document.getElementById("para").innerHTML = expParagraph;
}
/* 3. In previous assignment you have created a tabular data
/* using javascript. Let’s modify that. Create a form which
/* takes student’s details and show each student detail in
/* table. Each row of table must contain a delete button and
/* an edit button. On click on delete button entire row should
/* be deleted. On click on edit button, a hidden form will
/* appear with the values of that row.*/
function edit_row(no)
{
document.getElementById("edit_button"+no).style.display="none";
document.getElementById("save_button"+no).style.display="block";
var name=document.getElementById("name_row"+no);
var country=document.getElementById("country_row"+no);
var age=document.getElementById("age_row"+no);
var name_data=name.innerHTML;
var country_data=country.innerHTML;
var age_data=age.innerHTML;
name.innerHTML="<input type='text' id='name_text"+no+"' value='"+name_data+"'>";
country.innerHTML="<input type='text' id='country_text"+no+"' value='"+country_data+"'>";
age.innerHTML="<input type='text' id='age_text"+no+"' value='"+age_data+"'>";
}
function save_row(no)
{
var name_val=document.getElementById("name_text"+no).value;
var country_val=document.getElementById("country_text"+no
|
{
answer = "not a leap year";
}
|
conditional_block
|
|
lib.rs
|
as the error type, which means that it
//! doesn't store any information about the error.
//! This can be changed by using `#[logos(error = T)]` attribute on the enum.
//! The type `T` can be any type that implements `Clone`, `PartialEq`,
//! `Default` and `From<E>` for each callback's error type.
//!
//! ## Token disambiguation
//!
//! Rule of thumb is:
//!
//! + Longer beats shorter.
//! + Specific beats generic.
//!
//! If any two definitions could match the same input, like `fast` and `[a-zA-Z]+`
//! in the example above, it's the longer and more specific definition of `Token::Fast`
//! that will be the result.
//!
//! This is done by comparing numeric priority attached to each definition. Every consecutive,
//! non-repeating single byte adds 2 to the priority, while every range or regex class adds 1.
//! Loops or optional blocks are ignored, while alternations count the shortest alternative:
//!
//! + `[a-zA-Z]+` has a priority of 1 (lowest possible), because at minimum it can match a single byte to a class.
//! + `foobar` has a priority of 12.
//! + `(foo|hello)(bar)?` has a priority of 6, `foo` being it's shortest possible match.
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://maciej.codes/kosz/logos.png")]
#[cfg(not(feature = "std"))]
extern crate core as std;
#[cfg(feature = "export_derive")]
pub use logos_derive::Logos;
use std::fmt::Debug;
mod lexer;
pub mod source;
#[doc(hidden)]
pub mod internal;
pub use crate::lexer::{Lexer, Span, SpannedIter};
pub use crate::source::Source;
/// Trait implemented for an enum representing all tokens. You should never have
/// to implement it manually, use the `#[derive(Logos)]` attribute on your enum.
pub trait Logos<'source>: Sized {
/// Associated type `Extras` for the particular lexer. This can be set using
/// `#[logos(extras = MyExtras)]` and accessed inside callbacks.
type Extras;
/// Source type this token can be lexed from. This will default to `str`,
/// unless one of the defined patterns explicitly uses non-unicode byte values
/// or byte slices, in which case that implementation will use `[u8]`.
type Source: Source + ?Sized + 'source;
/// Error type returned by the lexer. This can be set using
/// `#[logos(error = MyError)]`. Defaults to `()` if not set.
type Error: Default + Clone + PartialEq + Debug + 'source;
/// The heart of Logos. Called by the `Lexer`. The implementation for this function
/// is generated by the `logos-derive` crate.
fn lex(lexer: &mut Lexer<'source, Self>);
/// Create a new instance of a `Lexer` that will produce tokens implementing
/// this `Logos`.
fn lexer(source: &'source Self::Source) -> Lexer<'source, Self>
where
Self::Extras: Default,
{
Lexer::new(source)
}
/// Create a new instance of a `Lexer` with the provided `Extras` that will
/// produce tokens implementing this `Logos`.
fn lexer_with_extras(
source: &'source Self::Source,
extras: Self::Extras,
) -> Lexer<'source, Self> {
Lexer::with_extras(source, extras)
}
}
/// Type that can be returned from a callback, informing the `Lexer`, to skip
/// current token match. See also [`logos::skip`](./fn.skip.html).
///
/// # Example
///
/// ```rust
/// use logos::{Logos, Skip};
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token<'a> {
/// // We will treat "abc" as if it was whitespace.
/// // This is identical to using `logos::skip`.
/// #[regex(" |abc", |_| Skip)]
/// Ignored,
///
/// #[regex("[a-zA-Z]+")]
/// Text(&'a str),
/// }
///
/// let tokens: Vec<_> = Token::lexer("Hello abc world").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::Text("Hello")),
/// Ok(Token::Text("world")),
/// ],
/// );
/// ```
pub struct Skip;
/// Type that can be returned from a callback, either producing a field
/// for a token, or skipping it.
///
/// # Example
///
/// ```rust
/// use logos::{Logos, Filter};
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token {
/// #[regex(r"[ \n\f\t]+", logos::skip)]
/// Ignored,
///
/// #[regex("[0-9]+", |lex| {
/// let n: u64 = lex.slice().parse().unwrap();
///
/// // Only emit a token if `n` is an even number
/// match n % 2 {
/// 0 => Filter::Emit(n),
/// _ => Filter::Skip,
/// }
/// })]
/// EvenNumber(u64)
/// }
///
/// let tokens: Vec<_> = Token::lexer("20 11 42 23 100 8002").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::EvenNumber(20)),
/// // skipping 11
/// Ok(Token::EvenNumber(42)),
/// // skipping 23
/// Ok(Token::EvenNumber(100)),
/// Ok(Token::EvenNumber(8002))
/// ]
/// );
/// ```
pub enum Filter<T> {
/// Emit a token with a given value `T`. Use `()` for unit variants without fields.
Emit(T),
/// Skip current match, analog to [`Skip`](./struct.Skip.html).
Skip,
}
/// Type that can be returned from a callback, either producing a field
/// for a token, skipping it, or emitting an error.
///
/// # Example
///
/// ```rust
/// use logos::{Logos, FilterResult};
///
/// #[derive(Debug, PartialEq, Clone, Default)]
/// enum LexingError {
/// NumberParseError,
/// NumberIsTen,
/// #[default]
/// Other,
/// }
///
/// impl From<std::num::ParseIntError> for LexingError {
/// fn from(_: std::num::ParseIntError) -> Self {
/// LexingError::NumberParseError
/// }
/// }
///
/// #[derive(Logos, Debug, PartialEq)]
/// #[logos(error = LexingError)]
/// enum Token {
/// #[regex(r"[ \n\f\t]+", logos::skip)]
/// Ignored,
///
/// #[regex("[0-9]+", |lex| {
/// let n: u64 = lex.slice().parse().unwrap();
///
/// // Only emit a token if `n` is an even number.
/// if n % 2 == 0 {
/// // Emit an error if `n` is 10.
/// if n == 10 {
/// FilterResult::Error(LexingError::NumberIsTen)
/// } else {
/// FilterResult::Emit(n)
/// }
/// } else {
/// FilterResult::Skip
/// }
/// })]
/// NiceEvenNumber(u64)
/// }
///
/// let tokens: Vec<_> = Token::lexer("20 11 42 23 100 10").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::NiceEvenNumber(20)),
/// // skipping 11
/// Ok(Token::NiceEvenNumber(42)),
/// // skipping 23
/// Ok(Token::NiceEvenNumber(100)),
/// // error at 10
/// Err(LexingError::NumberIsTen),
/// ]
/// );
/// ```
pub enum FilterResult<T, E> {
/// Emit a token with a given value `T`. Use `()` for unit variants without fields.
Emit(T),
/// Skip current match, analog to [`Skip`](./struct.Skip.html).
Skip,
/// Emit a `<Token as Logos>::ERROR` token.
Error(E),
}
/// Predefined callback that will inform the `Lexer` to skip a definition.
///
/// # Example
///
/// ```rust
/// use logos::Logos;
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token<'a> {
/// // We will treat "abc" as if it was whitespace
/// #[regex(" |abc", logos::skip)]
/// Ignored,
///
/// #[regex("[a-zA-Z]+")]
/// Text(&'a str),
/// }
///
/// let tokens: Vec<_> = Token::lexer("Hello abc world").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::Text("Hello")),
/// Ok(Token::Text("world")),
/// ],
/// );
/// ```
#[inline]
pub fn skip<'source, Token: Logos<'source>>(_: &mut Lexer<'source, Token>) -> Skip
|
{
Skip
}
|
identifier_body
|
|
lib.rs
|
| `Option<T>` | `Ok(Token::Value(T))` **or** `Err(<Token as Logos>::Error::default())` |
//! | `Result<T, E>` | `Ok(Token::Value(T))` **or** `Err(<Token as Logos>::Error::from(err))` |
//! | [`Skip`](./struct.Skip.html) | _skips matched input_ |
//! | [`Filter<T>`](./enum.Filter.html) | `Ok(Token::Value(T))` **or** _skips matched input_ |
//! | [`FilterResult<T, E>`](./enum.FilterResult.html) | `Ok(Token::Value(T))` **or** `Err(<Token as Logos>::Error::from(err))` **or** _skips matched input_ |
//!
//! Callbacks can be also used to do perform more specialized lexing in place
//! where regular expressions are too limiting. For specifics look at
//! [`Lexer::remainder`](./struct.Lexer.html#method.remainder) and
//! [`Lexer::bump`](./struct.Lexer.html#method.bump).
//!
//! ## Errors
//!
//! By default, **Logos** uses `()` as the error type, which means that it
//! doesn't store any information about the error.
//! This can be changed by using `#[logos(error = T)]` attribute on the enum.
//! The type `T` can be any type that implements `Clone`, `PartialEq`,
//! `Default` and `From<E>` for each callback's error type.
//!
//! ## Token disambiguation
//!
//! Rule of thumb is:
//!
//! + Longer beats shorter.
//! + Specific beats generic.
//!
//! If any two definitions could match the same input, like `fast` and `[a-zA-Z]+`
//! in the example above, it's the longer and more specific definition of `Token::Fast`
//! that will be the result.
//!
//! This is done by comparing numeric priority attached to each definition. Every consecutive,
//! non-repeating single byte adds 2 to the priority, while every range or regex class adds 1.
//! Loops or optional blocks are ignored, while alternations count the shortest alternative:
//!
//! + `[a-zA-Z]+` has a priority of 1 (lowest possible), because at minimum it can match a single byte to a class.
//! + `foobar` has a priority of 12.
//! + `(foo|hello)(bar)?` has a priority of 6, `foo` being it's shortest possible match.
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://maciej.codes/kosz/logos.png")]
#[cfg(not(feature = "std"))]
extern crate core as std;
#[cfg(feature = "export_derive")]
pub use logos_derive::Logos;
use std::fmt::Debug;
mod lexer;
pub mod source;
#[doc(hidden)]
pub mod internal;
pub use crate::lexer::{Lexer, Span, SpannedIter};
pub use crate::source::Source;
/// Trait implemented for an enum representing all tokens. You should never have
/// to implement it manually, use the `#[derive(Logos)]` attribute on your enum.
pub trait Logos<'source>: Sized {
/// Associated type `Extras` for the particular lexer. This can be set using
/// `#[logos(extras = MyExtras)]` and accessed inside callbacks.
type Extras;
/// Source type this token can be lexed from. This will default to `str`,
/// unless one of the defined patterns explicitly uses non-unicode byte values
/// or byte slices, in which case that implementation will use `[u8]`.
type Source: Source + ?Sized + 'source;
/// Error type returned by the lexer. This can be set using
/// `#[logos(error = MyError)]`. Defaults to `()` if not set.
type Error: Default + Clone + PartialEq + Debug + 'source;
/// The heart of Logos. Called by the `Lexer`. The implementation for this function
/// is generated by the `logos-derive` crate.
fn lex(lexer: &mut Lexer<'source, Self>);
/// Create a new instance of a `Lexer` that will produce tokens implementing
/// this `Logos`.
fn lexer(source: &'source Self::Source) -> Lexer<'source, Self>
where
Self::Extras: Default,
{
Lexer::new(source)
}
/// Create a new instance of a `Lexer` with the provided `Extras` that will
/// produce tokens implementing this `Logos`.
fn lexer_with_extras(
source: &'source Self::Source,
extras: Self::Extras,
) -> Lexer<'source, Self> {
Lexer::with_extras(source, extras)
}
}
/// Type that can be returned from a callback, informing the `Lexer`, to skip
/// current token match. See also [`logos::skip`](./fn.skip.html).
///
/// # Example
///
/// ```rust
/// use logos::{Logos, Skip};
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token<'a> {
/// // We will treat "abc" as if it was whitespace.
/// // This is identical to using `logos::skip`.
/// #[regex(" |abc", |_| Skip)]
/// Ignored,
///
/// #[regex("[a-zA-Z]+")]
/// Text(&'a str),
/// }
///
/// let tokens: Vec<_> = Token::lexer("Hello abc world").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::Text("Hello")),
/// Ok(Token::Text("world")),
/// ],
/// );
/// ```
pub struct Skip;
/// Type that can be returned from a callback, either producing a field
/// for a token, or skipping it.
///
/// # Example
///
/// ```rust
/// use logos::{Logos, Filter};
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token {
/// #[regex(r"[ \n\f\t]+", logos::skip)]
/// Ignored,
///
/// #[regex("[0-9]+", |lex| {
/// let n: u64 = lex.slice().parse().unwrap();
///
/// // Only emit a token if `n` is an even number
/// match n % 2 {
/// 0 => Filter::Emit(n),
/// _ => Filter::Skip,
/// }
/// })]
/// EvenNumber(u64)
/// }
///
/// let tokens: Vec<_> = Token::lexer("20 11 42 23 100 8002").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::EvenNumber(20)),
/// // skipping 11
/// Ok(Token::EvenNumber(42)),
/// // skipping 23
/// Ok(Token::EvenNumber(100)),
/// Ok(Token::EvenNumber(8002))
/// ]
/// );
/// ```
pub enum Filter<T> {
/// Emit a token with a given value `T`. Use `()` for unit variants without fields.
Emit(T),
/// Skip current match, analog to [`Skip`](./struct.Skip.html).
Skip,
}
/// Type that can be returned from a callback, either producing a field
/// for a token, skipping it, or emitting an error.
///
/// # Example
///
/// ```rust
/// use logos::{Logos, FilterResult};
///
/// #[derive(Debug, PartialEq, Clone, Default)]
/// enum LexingError {
/// NumberParseError,
/// NumberIsTen,
/// #[default]
/// Other,
/// }
///
/// impl From<std::num::ParseIntError> for LexingError {
/// fn from(_: std::num::ParseIntError) -> Self {
/// LexingError::NumberParseError
/// }
/// }
///
/// #[derive(Logos, Debug, PartialEq)]
/// #[logos(error = LexingError)]
/// enum Token {
/// #[regex(r"[ \n\f\t]+", logos::skip)]
/// Ignored,
///
/// #[regex("[0-9]+", |lex| {
/// let n: u64 = lex.slice().parse().unwrap();
///
/// // Only emit a token if `n` is an even number.
/// if n % 2 == 0 {
/// // Emit an error if `n` is 10.
/// if n == 10 {
/// FilterResult::Error(LexingError::NumberIsTen)
/// } else {
/// FilterResult::Emit(n)
/// }
/// } else {
/// FilterResult::Skip
/// }
/// })]
/// NiceEvenNumber(u64)
/// }
///
/// let tokens: Vec<_> = Token::lexer("20 11 42 23 100 10").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::NiceEvenNumber(20)),
/// // skipping 11
/// Ok(Token::NiceEvenNumber(42)),
/// // skipping 23
/// Ok(Token::NiceEvenNumber(100)),
/// // error at 10
/// Err(LexingError::NumberIsTen),
/// ]
/// );
/// ```
pub enum
|
FilterResult
|
identifier_name
|
|
lib.rs
|
&mut Lexer<Token>) -> Option<u64> {
//! let slice = lex.slice();
//! let n: u64 = slice[..slice.len() - 1].parse().ok()?; // skip 'm'
//! Some(n * 1_000_000)
//! }
//!
//! #[derive(Logos, Debug, PartialEq)]
//! #[logos(skip r"[ \t\n\f]+")]
//! enum Token {
//! // Callbacks can use closure syntax, or refer
//! // to a function defined elsewhere.
//! //
//! // Each pattern can have its own callback.
//! #[regex("[0-9]+", |lex| lex.slice().parse().ok())]
//! #[regex("[0-9]+k", kilo)]
//! #[regex("[0-9]+m", mega)]
//! Number(u64),
//! }
//!
//! fn main() {
//! let mut lex = Token::lexer("5 42k 75m");
//!
//! assert_eq!(lex.next(), Some(Ok(Token::Number(5))));
//! assert_eq!(lex.slice(), "5");
//!
//! assert_eq!(lex.next(), Some(Ok(Token::Number(42_000))));
//! assert_eq!(lex.slice(), "42k");
//!
//! assert_eq!(lex.next(), Some(Ok(Token::Number(75_000_000))));
//! assert_eq!(lex.slice(), "75m");
//!
//! assert_eq!(lex.next(), None);
//! }
//! ```
//!
//! Logos can handle callbacks with following return types:
//!
//! | Return type | Produces |
//! |--------------------------------------------------|-----------------------------------------------------------------------------------------------------|
//! | `()` | `Ok(Token::Unit)` |
//! | `bool` | `Ok(Token::Unit)` **or** `Err(<Token as Logos>::Error::default())` |
//! | `Result<(), E>` | `Ok(Token::Unit)` **or** `Err(<Token as Logos>::Error::from(err))` |
//! | `T` | `Ok(Token::Value(T))` |
//! | `Option<T>` | `Ok(Token::Value(T))` **or** `Err(<Token as Logos>::Error::default())` |
//! | `Result<T, E>` | `Ok(Token::Value(T))` **or** `Err(<Token as Logos>::Error::from(err))` |
//! | [`Skip`](./struct.Skip.html) | _skips matched input_ |
//! | [`Filter<T>`](./enum.Filter.html) | `Ok(Token::Value(T))` **or** _skips matched input_ |
//! | [`FilterResult<T, E>`](./enum.FilterResult.html) | `Ok(Token::Value(T))` **or** `Err(<Token as Logos>::Error::from(err))` **or** _skips matched input_ |
//!
//! Callbacks can be also used to do perform more specialized lexing in place
//! where regular expressions are too limiting. For specifics look at
//! [`Lexer::remainder`](./struct.Lexer.html#method.remainder) and
//! [`Lexer::bump`](./struct.Lexer.html#method.bump).
//!
//! ## Errors
//!
//! By default, **Logos** uses `()` as the error type, which means that it
//! doesn't store any information about the error.
//! This can be changed by using `#[logos(error = T)]` attribute on the enum.
//! The type `T` can be any type that implements `Clone`, `PartialEq`,
//! `Default` and `From<E>` for each callback's error type.
//!
//! ## Token disambiguation
//!
//! Rule of thumb is:
//!
//! + Longer beats shorter.
//! + Specific beats generic.
//!
//! If any two definitions could match the same input, like `fast` and `[a-zA-Z]+`
//! in the example above, it's the longer and more specific definition of `Token::Fast`
//! that will be the result.
//!
//! This is done by comparing numeric priority attached to each definition. Every consecutive,
//! non-repeating single byte adds 2 to the priority, while every range or regex class adds 1.
//! Loops or optional blocks are ignored, while alternations count the shortest alternative:
//!
//! + `[a-zA-Z]+` has a priority of 1 (lowest possible), because at minimum it can match a single byte to a class.
//! + `foobar` has a priority of 12.
//! + `(foo|hello)(bar)?` has a priority of 6, `foo` being it's shortest possible match.
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://maciej.codes/kosz/logos.png")]
#[cfg(not(feature = "std"))]
extern crate core as std;
#[cfg(feature = "export_derive")]
pub use logos_derive::Logos;
use std::fmt::Debug;
mod lexer;
pub mod source;
#[doc(hidden)]
pub mod internal;
pub use crate::lexer::{Lexer, Span, SpannedIter};
pub use crate::source::Source;
/// Trait implemented for an enum representing all tokens. You should never have
/// to implement it manually, use the `#[derive(Logos)]` attribute on your enum.
pub trait Logos<'source>: Sized {
/// Associated type `Extras` for the particular lexer. This can be set using
/// `#[logos(extras = MyExtras)]` and accessed inside callbacks.
type Extras;
/// Source type this token can be lexed from. This will default to `str`,
/// unless one of the defined patterns explicitly uses non-unicode byte values
/// or byte slices, in which case that implementation will use `[u8]`.
type Source: Source + ?Sized + 'source;
/// Error type returned by the lexer. This can be set using
/// `#[logos(error = MyError)]`. Defaults to `()` if not set.
type Error: Default + Clone + PartialEq + Debug + 'source;
/// The heart of Logos. Called by the `Lexer`. The implementation for this function
/// is generated by the `logos-derive` crate.
fn lex(lexer: &mut Lexer<'source, Self>);
/// Create a new instance of a `Lexer` that will produce tokens implementing
/// this `Logos`.
fn lexer(source: &'source Self::Source) -> Lexer<'source, Self>
where
Self::Extras: Default,
{
Lexer::new(source)
}
/// Create a new instance of a `Lexer` with the provided `Extras` that will
/// produce tokens implementing this `Logos`.
fn lexer_with_extras(
source: &'source Self::Source,
extras: Self::Extras,
) -> Lexer<'source, Self> {
Lexer::with_extras(source, extras)
}
}
/// Type that can be returned from a callback, informing the `Lexer`, to skip
/// current token match. See also [`logos::skip`](./fn.skip.html).
///
/// # Example
///
/// ```rust
/// use logos::{Logos, Skip};
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token<'a> {
/// // We will treat "abc" as if it was whitespace.
/// // This is identical to using `logos::skip`.
/// #[regex(" |abc", |_| Skip)]
/// Ignored,
///
/// #[regex("[a-zA-Z]+")]
/// Text(&'a str),
/// }
///
/// let tokens: Vec<_> = Token::lexer("Hello abc world").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::Text("Hello")),
/// Ok(Token::Text("world")),
/// ],
/// );
/// ```
pub struct Skip;
/// Type that can be returned from a callback, either producing a field
/// for a token, or skipping it.
///
/// # Example
///
/// ```rust
/// use logos::{Logos, Filter};
///
/// #[derive(Logos, Debug, PartialEq)]
/// enum Token {
/// #[regex(r"[ \n\f\t]+", logos::skip)]
/// Ignored,
///
/// #[regex("[0-9]+", |lex| {
/// let n: u64 = lex.slice().parse().unwrap();
///
/// // Only emit a token if `n` is an even number
/// match n % 2 {
/// 0 => Filter::Emit(n),
/// _ => Filter::Skip,
/// }
/// })]
/// EvenNumber(u64)
/// }
///
/// let tokens: Vec<_> = Token::lexer("20 11 42 23 100 8002").collect();
///
/// assert_eq!(
/// tokens,
/// &[
/// Ok(Token::EvenNumber(20)),
/// // skipping 11
/// Ok(Token::EvenNumber(42)),
/// // skipping 23
/// Ok(Token::EvenNumber(100)),
/// Ok(Token::EvenNumber(8002))
/// ]
/// );
/// ```
pub enum Filter<T> {
/// Emit a token with a given value `T`. Use `()` for unit variants without fields.
Emit(T),
/// Skip current match, analog to [`Skip`](./struct.Skip.html).
Skip,
|
random_line_split
|
||
environment.py
|
.ps:
p.daemon = (
True
) # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_spaces", None))
self.observation_space, self.action_space, self.spec = self.remotes[0].recv()
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def reset(self, specific_env=None):
if specific_env is not None:
self.remotes[specific_env].send(("reset", None))
return self.remotes[specific_env].recv()
for remote in self.remotes:
remote.send(("reset", None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(("reset_task", None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="human"):
for pipe in self.remotes:
pipe.send(("render", None))
imgs = np.stack([pipe.recv() for pipe in self.remotes])
bigimg = tile_images(imgs)
if mode == "human":
import cv2
cv2.imshow("vecenv", bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == "rgb_array":
return bigimg
else:
raise NotImplementedError
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return float(np.sign(reward))
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA
)
return frame[:, :, None]
def unwrap(env):
|
def make_single_env(env_name, **kwargs):
env = gym.make(env_name)
env = AddEpisodeStats(env)
if "NoFrameskip" in env_name:
env = wrap_deepmind(make_atari(env, env_name), **kwargs)
return env
def make_atari(env, env_id, max_episode_steps=4500):
env._max_episode_steps = max_episode_steps * 4
assert "NoFrameskip" in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
if "Montezuma" in env_id or "Pitfall" in env_id:
env = MontezumaInfoWrapper(env, room_address=3 if "Montezuma" in env_id else 1)
else:
env = DummyMontezumaInfoWrapper(env)
env = AddRandomStateToInfo(env)
return env
def wrap_deepmind(env, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
# env = NormalizeObservation(env)
return env
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
rl_common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(visited_rooms=copy(self.visited_rooms))
self.visited_rooms.clear()
return obs, rew, done, info
def reset(self):
return self.env.reset()
class DummyMontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env):
super(DummyMontezumaInfoWrapper, self).__init__(env)
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(pos_count=0, visited_rooms=set([0]))
return obs, rew, done, info
def reset(self):
return self.env.reset()
class AddEpisodeStats(gym.Wrapper):
def __init__(self, env):
"""Adds the random state to the info field on the first step after reset
"""
gym.Wrapper.__init__(self, env)
def step(self, action):
ob, r, d, info = self.env.step(action)
self.reward += r
self.length += 1
if d:
if "episode" not in info:
info["episode"] = {}
info["episode"]["reward"] = self.reward
info["episode"]["length"] = self.length
return ob, r, d, info
def reset(self, **kwargs):
self.reward = 0
self.length = 0
return self.env.reset(**kwargs)
class AddRandomStateToInfo(gym.Wrapper):
def __init__(self, env):
"""Adds the random state to the info field on the first step after reset
"""
gym.Wrapper.__init__(self, env)
def step(self, action):
ob, r, d, info = self.env.step(action)
if d:
if "episode" not in info:
|
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
|
identifier_body
|
environment.py
|
env = env_fn()
while True:
cmd, data = remote.recv()
if cmd == "step":
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == "reset":
ob = env.reset()
remote.send(ob)
elif cmd == "render":
remote.send(env.render(mode="rgb_array"))
elif cmd == "close":
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space, env.spec))
else:
raise NotImplementedError
class SubprocVecEnv:
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
self.num_envs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.num_envs)])
self.ps = [
Process(target=worker, args=(work_remote, remote, env_fn))
for (work_remote, remote, env_fn) in zip(
self.work_remotes, self.remotes, env_fns
)
]
for p in self.ps:
p.daemon = (
True
) # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_spaces", None))
self.observation_space, self.action_space, self.spec = self.remotes[0].recv()
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def reset(self, specific_env=None):
if specific_env is not None:
self.remotes[specific_env].send(("reset", None))
return self.remotes[specific_env].recv()
for remote in self.remotes:
remote.send(("reset", None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(("reset_task", None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="human"):
for pipe in self.remotes:
pipe.send(("render", None))
imgs = np.stack([pipe.recv() for pipe in self.remotes])
bigimg = tile_images(imgs)
if mode == "human":
import cv2
cv2.imshow("vecenv", bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == "rgb_array":
return bigimg
else:
raise NotImplementedError
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return float(np.sign(reward))
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA
)
return frame[:, :, None]
def unwrap(env):
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
def make_single_env(env_name, **kwargs):
env = gym.make(env_name)
env = AddEpisodeStats(env)
if "NoFrameskip" in env_name:
env = wrap_deepmind(make_atari(env, env_name), **kwargs)
return env
def make_atari(env, env_id, max_episode_steps=4500):
env._max_episode_steps = max_episode_steps * 4
assert "NoFrameskip" in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
if "Montezuma" in env_id or "Pitfall" in env_id:
env = MontezumaInfoWrapper(env, room_address=3 if "Montezuma" in env_id else 1)
else:
env = DummyMontezumaInfoWrapper(env)
env = AddRandomStateToInfo(env)
return env
def wrap_deepmind(env, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
# env = NormalizeObservation(env)
return env
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
rl_common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(visited_rooms=copy(self.visited_rooms))
self.visited_rooms.clear()
return obs, rew, done, info
def reset(self):
return self.env.reset()
class DummyMontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env):
super(DummyMontezumaInfoWrapper, self).__init__(env)
def step(self, action):
obs, rew, done, info = self.env
|
cv2.ocl.setUseOpenCL(False)
def worker(remote, parent_remote, env_fn):
parent_remote.close()
|
random_line_split
|
|
environment.py
|
.ps:
p.daemon = (
True
) # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_spaces", None))
self.observation_space, self.action_space, self.spec = self.remotes[0].recv()
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def reset(self, specific_env=None):
if specific_env is not None:
self.remotes[specific_env].send(("reset", None))
return self.remotes[specific_env].recv()
for remote in self.remotes:
remote.send(("reset", None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(("reset_task", None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="human"):
for pipe in self.remotes:
pipe.send(("render", None))
imgs = np.stack([pipe.recv() for pipe in self.remotes])
bigimg = tile_images(imgs)
if mode == "human":
import cv2
cv2.imshow("vecenv", bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == "rgb_array":
|
else:
raise NotImplementedError
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return float(np.sign(reward))
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA
)
return frame[:, :, None]
def unwrap(env):
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
def make_single_env(env_name, **kwargs):
env = gym.make(env_name)
env = AddEpisodeStats(env)
if "NoFrameskip" in env_name:
env = wrap_deepmind(make_atari(env, env_name), **kwargs)
return env
def make_atari(env, env_id, max_episode_steps=4500):
env._max_episode_steps = max_episode_steps * 4
assert "NoFrameskip" in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
if "Montezuma" in env_id or "Pitfall" in env_id:
env = MontezumaInfoWrapper(env, room_address=3 if "Montezuma" in env_id else 1)
else:
env = DummyMontezumaInfoWrapper(env)
env = AddRandomStateToInfo(env)
return env
def wrap_deepmind(env, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
# env = NormalizeObservation(env)
return env
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
rl_common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(visited_rooms=copy(self.visited_rooms))
self.visited_rooms.clear()
return obs, rew, done, info
def reset(self):
return self.env.reset()
class DummyMontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env):
super(DummyMontezumaInfoWrapper, self).__init__(env)
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(pos_count=0, visited_rooms=set([0]))
return obs, rew, done, info
def reset(self):
return self.env.reset()
class AddEpisodeStats(gym.Wrapper):
def __init__(self, env):
"""Adds the random state to the info field on the first step after reset
"""
gym.Wrapper.__init__(self, env)
def step(self, action):
ob, r, d, info = self.env.step(action)
self.reward += r
self.length += 1
if d:
if "episode" not in info:
info["episode"] = {}
info["episode"]["reward"] = self.reward
info["episode"]["length"] = self.length
return ob, r, d, info
def reset(self, **kwargs):
self.reward = 0
self.length = 0
return self.env.reset(**kwargs)
class AddRandomStateToInfo(gym.Wrapper):
def __init__(self, env):
"""Adds the random state to the info field on the first step after reset
"""
gym.Wrapper.__init__(self, env)
def step(self, action):
ob, r, d, info = self.env.step(action)
if d:
if "episode" not in info:
|
return bigimg
|
conditional_block
|
environment.py
|
.ps:
p.daemon = (
True
) # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_spaces", None))
self.observation_space, self.action_space, self.spec = self.remotes[0].recv()
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def
|
(self, specific_env=None):
if specific_env is not None:
self.remotes[specific_env].send(("reset", None))
return self.remotes[specific_env].recv()
for remote in self.remotes:
remote.send(("reset", None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(("reset_task", None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="human"):
for pipe in self.remotes:
pipe.send(("render", None))
imgs = np.stack([pipe.recv() for pipe in self.remotes])
bigimg = tile_images(imgs)
if mode == "human":
import cv2
cv2.imshow("vecenv", bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == "rgb_array":
return bigimg
else:
raise NotImplementedError
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return float(np.sign(reward))
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA
)
return frame[:, :, None]
def unwrap(env):
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
def make_single_env(env_name, **kwargs):
env = gym.make(env_name)
env = AddEpisodeStats(env)
if "NoFrameskip" in env_name:
env = wrap_deepmind(make_atari(env, env_name), **kwargs)
return env
def make_atari(env, env_id, max_episode_steps=4500):
env._max_episode_steps = max_episode_steps * 4
assert "NoFrameskip" in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
if "Montezuma" in env_id or "Pitfall" in env_id:
env = MontezumaInfoWrapper(env, room_address=3 if "Montezuma" in env_id else 1)
else:
env = DummyMontezumaInfoWrapper(env)
env = AddRandomStateToInfo(env)
return env
def wrap_deepmind(env, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
# env = NormalizeObservation(env)
return env
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
rl_common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(visited_rooms=copy(self.visited_rooms))
self.visited_rooms.clear()
return obs, rew, done, info
def reset(self):
return self.env.reset()
class DummyMontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env):
super(DummyMontezumaInfoWrapper, self).__init__(env)
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done:
if "episode" not in info:
info["episode"] = {}
info["episode"].update(pos_count=0, visited_rooms=set([0]))
return obs, rew, done, info
def reset(self):
return self.env.reset()
class AddEpisodeStats(gym.Wrapper):
def __init__(self, env):
"""Adds the random state to the info field on the first step after reset
"""
gym.Wrapper.__init__(self, env)
def step(self, action):
ob, r, d, info = self.env.step(action)
self.reward += r
self.length += 1
if d:
if "episode" not in info:
info["episode"] = {}
info["episode"]["reward"] = self.reward
info["episode"]["length"] = self.length
return ob, r, d, info
def reset(self, **kwargs):
self.reward = 0
self.length = 0
return self.env.reset(**kwargs)
class AddRandomStateToInfo(gym.Wrapper):
def __init__(self, env):
"""Adds the random state to the info field on the first step after reset
"""
gym.Wrapper.__init__(self, env)
def step(self, action):
ob, r, d, info = self.env.step(action)
if d:
if "episode" not in info
|
reset
|
identifier_name
|
messages.d.ts
|
init({ entity, offsetId, minId, maxId, fromUser, offsetDate, addOffset, filter, search, replyTo, }: MessageIterParams): Promise<false | undefined>;
_loadNextChunk(): Promise<true | undefined>;
_messageInRange(message: Message): boolean;
[Symbol.asyncIterator](): AsyncIterator<Message, any, undefined>;
_updateOffset(lastMessage: Message, response: any): void;
}
interface IDsIterInterface {
entity: EntityLike;
ids: Api.TypeInputMessage[];
}
export declare class
|
extends RequestIter {
_ids?: Api.TypeInputMessage[];
_offset?: number;
_ty: number | undefined;
private _entity;
_init({ entity, ids }: IDsIterInterface): Promise<void>;
[Symbol.asyncIterator](): AsyncIterator<Message, any, undefined>;
_loadNextChunk(): Promise<false | undefined>;
}
/**
* Interface for iterating over messages. used in both {@link iterMessages} and {@link getMessages}.
*/
export interface IterMessagesParams {
/** Number of messages to be retrieved.<br/>
* Due to limitations with the API retrieving more than 3000 messages will take longer than half a minute. (might even take longer)<br/>
* if undefined is passed instead of a number the library will try to retrieve all the messages.*/
limit?: number;
/** Offset date (messages previous to this date will be retrieved). Exclusive. */
offsetDate?: DateLike;
/** Offset message ID (only messages previous to the given ID will be retrieved). Exclusive. */
offsetId?: number;
/** All the messages with a higher (newer) ID or equal to this will be excluded. */
maxId?: number;
/** All the messages with a lower (older) ID or equal to this will be excluded. */
minId?: number;
/** Additional message offset (all of the specified offsets + this offset = older messages). */
addOffset?: number;
/** The string to be used as a search query. */
search?: string;
/** The filter to use when returning messages.<br/>
* For instance, InputMessagesFilterPhotos would yield only messages containing photos.
*/
filter?: Api.TypeMessagesFilter | Api.TypeMessagesFilter[];
/** Only messages from this user will be returned. */
fromUser?: EntityLike;
/** Wait time (in seconds) between different GetHistory requests.<br/>
* Use this parameter to avoid hitting the FloodWaitError as needed.<br/>
* If left to undefined, it will default to 1 second only if the number of messages is higher than 3000.
* If the ids parameter is used, this time will default to 10 seconds only if the amount of IDs is higher than 300.
*/
waitTime?: number;
/** A single integer ID (or several IDs) for the message that should be returned.<br/>
* This parameter takes precedence over the rest (which will be ignored if this is set).<br/>
* This can for instance be used to get the message with ID 123 from a channel.<br/>
* **Note** that if the message doesn"t exist, undefined will appear in its place.
*/
ids?: number | number[] | Api.TypeInputMessage | Api.TypeInputMessage[];
/** If set to `true`, the messages will be returned in reverse order (from oldest to newest, instead of the default newest to oldest).<br/>
* This also means that the meaning of offsetId and offsetDate parameters is reversed, although they will still be exclusive.<br/>
* `minId` becomes equivalent to `offsetId` instead of being `maxId` as well since messages are returned in ascending order.<br/>
* You cannot use this if both entity and ids are undefined.
*/
reverse?: boolean;
/** If set to a message ID, the messages that reply to this ID will be returned.<br/>
* This feature is also known as comments in posts of broadcast channels, or viewing threads in groups.<br/>
* This feature can only be used in broadcast channels and their linked supergroups. Using it in a chat or private conversation will result in PEER_ID_INVALID error.<br/>
* When using this parameter, the filter and search parameters have no effect, since Telegram's API doesn't support searching messages in replies.
*/
replyTo?: number;
}
/**
* Interface for sending a message. only message is required
*/
export interface SendMessageParams {
/** The message to be sent, or another message object to resend as a copy.<br/>
* The maximum length for a message is 35,000 bytes or 4,096 characters.<br/>
* Longer messages will not be sliced automatically, and you should slice them manually if the text to send is longer than said length. */
message: MessageLike;
/** Whether to reply to a message or not. If an integer is provided, it should be the ID of the message that it should reply to. */
replyTo?: number | Api.Message;
/** Optional attributes that override the inferred ones, like DocumentAttributeFilename and so on. */
attributes?: Api.TypeDocumentAttribute[];
/** See the {@link parseMode} property for allowed values. Markdown parsing will be used by default. */
parseMode?: any;
/** A list of message formatting entities. When provided, the parseMode is ignored. */
formattingEntities?: Api.TypeMessageEntity[];
/** Should the link preview be shown? */
linkPreview?: boolean;
/** Sends a message with a file attached (e.g. a photo, video, audio or document). The message may be empty. */
file?: FileLike;
/** Optional JPEG thumbnail (for documents). Telegram will ignore this parameter unless you pass a .jpg file!<br/>
* The file must also be small in dimensions and in disk size. Successful thumbnails were files below 20kB and 320x320px.<br/>
* Width/height and dimensions/size ratios may be important.
* For Telegram to accept a thumbnail, you must provide the dimensions of the underlying media through `attributes:` with DocumentAttributesVideo.
*/
thumb?: FileLike;
/** Whether to send the given file as a document or not. */
forceDocument?: false;
/** Whether the existing draft should be cleared or not. */
clearDraft?: false;
/** The matrix (list of lists), row list or button to be shown after sending the message.<br/>
* This parameter will only work if you have signed in as a bot. You can also pass your own ReplyMarkup here.<br/>
* <br/>
* All the following limits apply together:
* - There can be 100 buttons at most (any more are ignored).
* - There can be 8 buttons per row at most (more are ignored).
* - The maximum callback data per button is 64 bytes.
* - The maximum data that can be embedded in total is just over 4KB, shared between inline callback data and text.
*/
buttons?: MarkupLike;
/** Whether the message should notify people in a broadcast channel or not. Defaults to false, which means it will notify them. Set it to True to alter this behaviour. */
silent?: boolean;
/** Whether the sent video supports streaming or not.<br/>
* Note that Telegram only recognizes as streamable some formats like MP4, and others like AVI or MKV will not work.<br/>
* You should convert these to MP4 before sending if you want them to be streamable. Unsupported formats will result in VideoContentTypeError. */
supportStreaming?: boolean;
/** If set, the message won't send immediately, and instead it will be scheduled to be automatically sent at a later time. */
schedule?: DateLike;
}
/** interface used for forwarding messages */
export interface ForwardMessagesParams {
/** The message(s) to forward, or their integer IDs. */
messages: MessageIDLike[];
/** If the given messages are integer IDs and not instances of the Message class, this must be specified in order for the forward to work.<br/> */
fromPeer: EntityLike;
/** Whether the message should notify people with sound or not.<br/>
* Defaults to false (send with a notification sound unless the person has the chat muted). Set it to true to alter this behaviour. */
silent?: boolean;
/** If set, the message(s) won't forward immediately, and instead they will be scheduled to be automatically sent at a later time. */
schedule?: DateLike;
}
/** Interface for editing messages */
export interface EditMessageParams {
/** The ID of the message (or Message itself) to be edited. If the entity was a Message, then this message will be treated as the new text. */
message: Api.Message | number;
/** The new text of the message. Does nothing if the entity was a Message. */
text: string;
/** See the {@link TelegramClient.parseMode} property for allowed values. Markdown parsing will be used by default. */
parseMode?: any;
/** A list of message formatting entities. When provided, the parseMode is ignored. */
formattingEntities?: Api.TypeMessageEntity[];
/** Should the link preview be shown? */
linkPreview?: boolean;
/** The file object that should replace the existing media in the message. // not supported yet. */
file?: FileLike | FileLike[];
/** thumbnail to be edited. // not supported yet */
force
|
_IDsIter
|
identifier_name
|
messages.d.ts
|
_init({ entity, offsetId, minId, maxId, fromUser, offsetDate, addOffset, filter, search, replyTo, }: MessageIterParams): Promise<false | undefined>;
_loadNextChunk(): Promise<true | undefined>;
_messageInRange(message: Message): boolean;
[Symbol.asyncIterator](): AsyncIterator<Message, any, undefined>;
_updateOffset(lastMessage: Message, response: any): void;
}
interface IDsIterInterface {
entity: EntityLike;
ids: Api.TypeInputMessage[];
}
export declare class _IDsIter extends RequestIter {
_ids?: Api.TypeInputMessage[];
_offset?: number;
_ty: number | undefined;
private _entity;
_init({ entity, ids }: IDsIterInterface): Promise<void>;
[Symbol.asyncIterator](): AsyncIterator<Message, any, undefined>;
_loadNextChunk(): Promise<false | undefined>;
}
/**
* Interface for iterating over messages. used in both {@link iterMessages} and {@link getMessages}.
*/
export interface IterMessagesParams {
/** Number of messages to be retrieved.<br/>
* Due to limitations with the API retrieving more than 3000 messages will take longer than half a minute. (might even take longer)<br/>
* if undefined is passed instead of a number the library will try to retrieve all the messages.*/
limit?: number;
/** Offset date (messages previous to this date will be retrieved). Exclusive. */
offsetDate?: DateLike;
/** Offset message ID (only messages previous to the given ID will be retrieved). Exclusive. */
offsetId?: number;
/** All the messages with a higher (newer) ID or equal to this will be excluded. */
maxId?: number;
/** All the messages with a lower (older) ID or equal to this will be excluded. */
minId?: number;
/** Additional message offset (all of the specified offsets + this offset = older messages). */
addOffset?: number;
/** The string to be used as a search query. */
search?: string;
/** The filter to use when returning messages.<br/>
* For instance, InputMessagesFilterPhotos would yield only messages containing photos.
*/
filter?: Api.TypeMessagesFilter | Api.TypeMessagesFilter[];
/** Only messages from this user will be returned. */
fromUser?: EntityLike;
/** Wait time (in seconds) between different GetHistory requests.<br/>
* Use this parameter to avoid hitting the FloodWaitError as needed.<br/>
* If left to undefined, it will default to 1 second only if the number of messages is higher than 3000.
* If the ids parameter is used, this time will default to 10 seconds only if the amount of IDs is higher than 300.
*/
waitTime?: number;
/** A single integer ID (or several IDs) for the message that should be returned.<br/>
* This parameter takes precedence over the rest (which will be ignored if this is set).<br/>
* This can for instance be used to get the message with ID 123 from a channel.<br/>
* **Note** that if the message doesn"t exist, undefined will appear in its place.
*/
ids?: number | number[] | Api.TypeInputMessage | Api.TypeInputMessage[];
/** If set to `true`, the messages will be returned in reverse order (from oldest to newest, instead of the default newest to oldest).<br/>
* This also means that the meaning of offsetId and offsetDate parameters is reversed, although they will still be exclusive.<br/>
* `minId` becomes equivalent to `offsetId` instead of being `maxId` as well since messages are returned in ascending order.<br/>
* You cannot use this if both entity and ids are undefined.
*/
reverse?: boolean;
/** If set to a message ID, the messages that reply to this ID will be returned.<br/>
* This feature is also known as comments in posts of broadcast channels, or viewing threads in groups.<br/>
* This feature can only be used in broadcast channels and their linked supergroups. Using it in a chat or private conversation will result in PEER_ID_INVALID error.<br/>
* When using this parameter, the filter and search parameters have no effect, since Telegram's API doesn't support searching messages in replies.
*/
replyTo?: number;
}
/**
* Interface for sending a message. only message is required
*/
export interface SendMessageParams {
/** The message to be sent, or another message object to resend as a copy.<br/>
* The maximum length for a message is 35,000 bytes or 4,096 characters.<br/>
* Longer messages will not be sliced automatically, and you should slice them manually if the text to send is longer than said length. */
message: MessageLike;
/** Whether to reply to a message or not. If an integer is provided, it should be the ID of the message that it should reply to. */
replyTo?: number | Api.Message;
/** Optional attributes that override the inferred ones, like DocumentAttributeFilename and so on. */
attributes?: Api.TypeDocumentAttribute[];
/** See the {@link parseMode} property for allowed values. Markdown parsing will be used by default. */
parseMode?: any;
/** A list of message formatting entities. When provided, the parseMode is ignored. */
formattingEntities?: Api.TypeMessageEntity[];
/** Should the link preview be shown? */
linkPreview?: boolean;
/** Sends a message with a file attached (e.g. a photo, video, audio or document). The message may be empty. */
file?: FileLike;
/** Optional JPEG thumbnail (for documents). Telegram will ignore this parameter unless you pass a .jpg file!<br/>
* The file must also be small in dimensions and in disk size. Successful thumbnails were files below 20kB and 320x320px.<br/>
* Width/height and dimensions/size ratios may be important.
* For Telegram to accept a thumbnail, you must provide the dimensions of the underlying media through `attributes:` with DocumentAttributesVideo.
*/
thumb?: FileLike;
/** Whether to send the given file as a document or not. */
forceDocument?: false;
/** Whether the existing draft should be cleared or not. */
clearDraft?: false;
/** The matrix (list of lists), row list or button to be shown after sending the message.<br/>
* This parameter will only work if you have signed in as a bot. You can also pass your own ReplyMarkup here.<br/>
* <br/>
* All the following limits apply together:
* - There can be 100 buttons at most (any more are ignored).
* - There can be 8 buttons per row at most (more are ignored).
* - The maximum callback data per button is 64 bytes.
* - The maximum data that can be embedded in total is just over 4KB, shared between inline callback data and text.
*/
buttons?: MarkupLike;
/** Whether the message should notify people in a broadcast channel or not. Defaults to false, which means it will notify them. Set it to True to alter this behaviour. */
silent?: boolean;
/** Whether the sent video supports streaming or not.<br/>
* Note that Telegram only recognizes as streamable some formats like MP4, and others like AVI or MKV will not work.<br/>
* You should convert these to MP4 before sending if you want them to be streamable. Unsupported formats will result in VideoContentTypeError. */
supportStreaming?: boolean;
/** If set, the message won't send immediately, and instead it will be scheduled to be automatically sent at a later time. */
schedule?: DateLike;
}
/** interface used for forwarding messages */
export interface ForwardMessagesParams {
/** The message(s) to forward, or their integer IDs. */
messages: MessageIDLike[];
/** If the given messages are integer IDs and not instances of the Message class, this must be specified in order for the forward to work.<br/> */
|
schedule?: DateLike;
}
/** Interface for editing messages */
export interface EditMessageParams {
/** The ID of the message (or Message itself) to be edited. If the entity was a Message, then this message will be treated as the new text. */
message: Api.Message | number;
/** The new text of the message. Does nothing if the entity was a Message. */
text: string;
/** See the {@link TelegramClient.parseMode} property for allowed values. Markdown parsing will be used by default. */
parseMode?: any;
/** A list of message formatting entities. When provided, the parseMode is ignored. */
formattingEntities?: Api.TypeMessageEntity[];
/** Should the link preview be shown? */
linkPreview?: boolean;
/** The file object that should replace the existing media in the message. // not supported yet. */
file?: FileLike | FileLike[];
/** thumbnail to be edited. // not supported yet */
force
|
fromPeer: EntityLike;
/** Whether the message should notify people with sound or not.<br/>
* Defaults to false (send with a notification sound unless the person has the chat muted). Set it to true to alter this behaviour. */
silent?: boolean;
/** If set, the message(s) won't forward immediately, and instead they will be scheduled to be automatically sent at a later time. */
|
random_line_split
|
inferred_modules.go
|
the default value.
// > 0 -> set the value.
MinimumModuleSize int
// The number of items in a longer prefix needed to break out into it's own prefix.
//
// For example, with the tokens `pkg_mod_sub1_a`, `pkg_mod_sub2_b`, `pkg_mod_sub2_c`,
// `pkg_mod_sub3_d`:
//
// MinimumSubmoduleSize = 3 will result in:
//
// pkg:mod:Sub1A, pkg:mod:Sub2B, pkg:mod:Sub2C, pkg:mod:Sub3D
//
// MinimumSubmoduleSize = 2 will result in:
//
// pkg:mod:Sub1A, pkg:modSub2:B, pkg:modSub2C, pkg:mod:Sub3D
//
// < 0 -> don't bin into submodules. Only the most common prefix will be used.
// = 0 -> apply the default value.
// > 0 -> set the value.
MimimumSubmoduleSize int
}
// A strategy to infer module placement from global analysis of all items (Resources & DataSources).
func InferredModules(
info *b.ProviderInfo, finalize Make, opts *InferredModulesOpts,
) (b.Strategy, error) {
if opts == nil {
opts = &InferredModulesOpts{}
}
err := opts.ensurePrefix(info)
if err != nil {
return b.Strategy{}, fmt.Errorf("inferring pkg prefix: %w", err)
}
contract.Assertf(opts.MinimumModuleSize >= 0, "Cannot have a minimum modules size less then zero")
if opts.MinimumModuleSize == 0 {
opts.MinimumModuleSize = defaultMinimumModuleSize
}
if opts.MimimumSubmoduleSize == 0 {
opts.MimimumSubmoduleSize = defaultMinimumSubmoduleSize
}
if opts.MainModule == "" {
opts.MainModule = "index"
}
tokenMap := opts.computeTokens(info)
rIsEmpty := func(r *b.ResourceInfo) bool { return r.Tok == "" }
dIsEmpty := func(r *b.DataSourceInfo) bool { return r.Tok == "" }
return b.Strategy{
Resource: tokenFromMap(tokenMap, rIsEmpty, finalize, func(tk string, resource *b.ResourceInfo) {
checkedApply(&resource.Tok, tokens.Type(tk))
}),
DataSource: tokenFromMap(tokenMap, dIsEmpty, finalize, func(tk string, datasource *b.DataSourceInfo) {
checkedApply(&datasource.Tok, tokens.ModuleMember(tk))
}),
}, nil
}
func (opts *InferredModulesOpts) ensurePrefix(info *b.ProviderInfo) error {
prefix := opts.TfPkgPrefix
var noCommonality bool
findPrefix := func(key string, _ shim.Resource) bool {
if noCommonality {
return false
}
if prefix == "" {
prefix = key
return true
}
prefix = sharedPrefix(key, prefix)
if prefix == "" {
noCommonality = true
}
return true
}
mapProviderItems(info, findPrefix)
if noCommonality {
return fmt.Errorf("no common prefix detected")
}
if prefix == "" {
return fmt.Errorf("no items found")
}
opts.TfPkgPrefix = prefix
return nil
}
type node struct {
segment string
children map[string]*node
// tfToken is only non-empty if the node represents a literal tf token
tfToken string
}
func (n *node) child(segment string) *node {
if n.children == nil {
n.children = map[string]*node{}
}
v, ok := n.children[segment]
if ok {
return v
}
child := &node{segment: segment}
n.children[segment] = child
return child
}
func (n *node) insert(child *node) {
if n.children == nil {
n.children = map[string]*node{}
}
_, ok := n.children[child.segment]
contract.Assertf(!ok, "duplicate segment in child: %q", child.segment)
n.children[child.segment] = child
}
func (n *node) len() int {
i := 0
if n.tfToken != "" {
i++
}
for _, child := range n.children {
i += child.len()
}
return i
}
// A depth first search of child nodes.
//
// parent is a function that returns parent nodes, with the immediate parent starting at 0
// and each increment increasing the indirection. 1 yields the grandparent, 2 the
// great-grandparent, etc. parent panics when no node is available.
//
// dfs will pick up nodes inserted up the hierarchy during traversal, but only if they
// were inserted with unique names.
func (n *node) dfs(iter func(parent func(int) *node, node *node)) {
parentStack := []*node{n}
fullIter(n.children, func(_ string, child *node) {
child.dfsInner(&parentStack, iter)
})
}
// Iterate over a map in any order, ensuring that all keys in the map are iterated over,
// even if they were added during the iteration.
//
// There is no guarantee of the order of the iteration.
func fullIter[K comparable, V any](m map[K]V, f func(K, V)) {
seen := map[K]bool{}
for done := false; !done; {
done = true
for k, v := range m {
if seen[k] {
continue
}
seen[k] = true
done = false
f(k, v)
}
}
}
func (n *node) dfsInner(parentStack *[]*node, iter func(parent func(int) *node, node *node)) {
// Pop this node onto the parent stack so children can access it
*parentStack = append(*parentStack, n)
// Iterate over children by key, making sure that newly added keys are iterated over
fullIter(n.children, func(k string, v *node) {
v.dfsInner(parentStack, iter)
})
// Pop the node off afterwards
*parentStack = (*parentStack)[:len(*parentStack)-1]
iter(func(i int) *node { return (*parentStack)[len(*parentStack)-1-i] }, n)
}
// Precompute the mapping from tf tokens to pulumi modules.
//
// The resulting map is complete for all TF resources and datasources in info.P.
func (opts *InferredModulesOpts) computeTokens(info *b.ProviderInfo) map[string]tokenInfo
|
contract.Assertf(tree.tfToken == "", "We don't expect a resource called '%s'", opts.TfPkgPrefix)
output := map[string]tokenInfo{}
// Collapse the segment tree via a depth first traversal.
tree.dfs(func(parent func(int) *node, n *node) {
if parent(0) == tree {
// Inject each path as a node
if n.len() < opts.MinimumModuleSize {
// Node segment is not big enough for its own module, so inject each token
// into the main module
for _, child := range n.children {
output[child.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment + "_" + child.segment,
}
}
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment,
}
}
} else {
// Node segment will form its own modules, so inject each token as a
// module member of `n.segment`.
for _, child := range n.children {
contract.Assertf(child.tfToken != "", "child of %q: %#v", n.segment, child)
output[child.tfToken] = tokenInfo{
mod: n.segment,
name: child.segment,
}
}
// If the node is both a module and a item, put the item in the module
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: n.segment,
name: n.segment,
}
}
}
} else {
// flatten the tree by injecting children into the parent node.
if n.len() < opts.MimimumSubmoduleSize {
|
{
contract.Assertf(opts.TfPkgPrefix != "", "TF package prefix not provided or computed")
tree := &node{segment: opts.TfPkgPrefix}
// Build segment tree:
//
// Expand each item (resource | datasource) into it's segments (divided by "_"), then
// insert each token into the tree structure. The tree is defined by segments, where
// each node represents a segment and each path a token.
mapProviderItems(info, func(s string, _ shim.Resource) bool {
segments := strings.Split(strings.TrimPrefix(s, opts.TfPkgPrefix), "_")
contract.Assertf(len(segments) > 0, "No segments found")
contract.Assertf(segments[0] != "", "Empty segment from splitting %q with prefix %q", s, opts.TfPkgPrefix)
node := tree
for _, segment := range segments {
node = node.child(segment)
}
node.tfToken = s
return true
})
|
identifier_body
|
inferred_modules.go
|
the default value.
// > 0 -> set the value.
MinimumModuleSize int
// The number of items in a longer prefix needed to break out into it's own prefix.
//
// For example, with the tokens `pkg_mod_sub1_a`, `pkg_mod_sub2_b`, `pkg_mod_sub2_c`,
// `pkg_mod_sub3_d`:
//
// MinimumSubmoduleSize = 3 will result in:
//
// pkg:mod:Sub1A, pkg:mod:Sub2B, pkg:mod:Sub2C, pkg:mod:Sub3D
//
// MinimumSubmoduleSize = 2 will result in:
//
// pkg:mod:Sub1A, pkg:modSub2:B, pkg:modSub2C, pkg:mod:Sub3D
//
// < 0 -> don't bin into submodules. Only the most common prefix will be used.
// = 0 -> apply the default value.
// > 0 -> set the value.
MimimumSubmoduleSize int
}
// A strategy to infer module placement from global analysis of all items (Resources & DataSources).
func InferredModules(
info *b.ProviderInfo, finalize Make, opts *InferredModulesOpts,
) (b.Strategy, error) {
if opts == nil {
opts = &InferredModulesOpts{}
}
err := opts.ensurePrefix(info)
if err != nil {
return b.Strategy{}, fmt.Errorf("inferring pkg prefix: %w", err)
}
contract.Assertf(opts.MinimumModuleSize >= 0, "Cannot have a minimum modules size less then zero")
if opts.MinimumModuleSize == 0 {
opts.MinimumModuleSize = defaultMinimumModuleSize
}
if opts.MimimumSubmoduleSize == 0 {
opts.MimimumSubmoduleSize = defaultMinimumSubmoduleSize
}
if opts.MainModule == "" {
opts.MainModule = "index"
}
tokenMap := opts.computeTokens(info)
rIsEmpty := func(r *b.ResourceInfo) bool { return r.Tok == "" }
dIsEmpty := func(r *b.DataSourceInfo) bool { return r.Tok == "" }
return b.Strategy{
Resource: tokenFromMap(tokenMap, rIsEmpty, finalize, func(tk string, resource *b.ResourceInfo) {
checkedApply(&resource.Tok, tokens.Type(tk))
}),
DataSource: tokenFromMap(tokenMap, dIsEmpty, finalize, func(tk string, datasource *b.DataSourceInfo) {
checkedApply(&datasource.Tok, tokens.ModuleMember(tk))
}),
}, nil
}
func (opts *InferredModulesOpts) ensurePrefix(info *b.ProviderInfo) error {
prefix := opts.TfPkgPrefix
var noCommonality bool
findPrefix := func(key string, _ shim.Resource) bool {
if noCommonality {
return false
}
if prefix == "" {
prefix = key
return true
}
prefix = sharedPrefix(key, prefix)
if prefix == "" {
noCommonality = true
}
return true
}
mapProviderItems(info, findPrefix)
if noCommonality {
return fmt.Errorf("no common prefix detected")
}
if prefix == "" {
return fmt.Errorf("no items found")
}
opts.TfPkgPrefix = prefix
return nil
}
type node struct {
segment string
children map[string]*node
// tfToken is only non-empty if the node represents a literal tf token
tfToken string
}
func (n *node) child(segment string) *node {
if n.children == nil {
n.children = map[string]*node{}
}
v, ok := n.children[segment]
if ok {
return v
}
child := &node{segment: segment}
n.children[segment] = child
return child
}
func (n *node) insert(child *node) {
if n.children == nil {
n.children = map[string]*node{}
}
_, ok := n.children[child.segment]
contract.Assertf(!ok, "duplicate segment in child: %q", child.segment)
n.children[child.segment] = child
}
func (n *node) len() int {
i := 0
if n.tfToken != ""
|
for _, child := range n.children {
i += child.len()
}
return i
}
// A depth first search of child nodes.
//
// parent is a function that returns parent nodes, with the immediate parent starting at 0
// and each increment increasing the indirection. 1 yields the grandparent, 2 the
// great-grandparent, etc. parent panics when no node is available.
//
// dfs will pick up nodes inserted up the hierarchy during traversal, but only if they
// were inserted with unique names.
func (n *node) dfs(iter func(parent func(int) *node, node *node)) {
parentStack := []*node{n}
fullIter(n.children, func(_ string, child *node) {
child.dfsInner(&parentStack, iter)
})
}
// Iterate over a map in any order, ensuring that all keys in the map are iterated over,
// even if they were added during the iteration.
//
// There is no guarantee of the order of the iteration.
func fullIter[K comparable, V any](m map[K]V, f func(K, V)) {
seen := map[K]bool{}
for done := false; !done; {
done = true
for k, v := range m {
if seen[k] {
continue
}
seen[k] = true
done = false
f(k, v)
}
}
}
func (n *node) dfsInner(parentStack *[]*node, iter func(parent func(int) *node, node *node)) {
// Pop this node onto the parent stack so children can access it
*parentStack = append(*parentStack, n)
// Iterate over children by key, making sure that newly added keys are iterated over
fullIter(n.children, func(k string, v *node) {
v.dfsInner(parentStack, iter)
})
// Pop the node off afterwards
*parentStack = (*parentStack)[:len(*parentStack)-1]
iter(func(i int) *node { return (*parentStack)[len(*parentStack)-1-i] }, n)
}
// Precompute the mapping from tf tokens to pulumi modules.
//
// The resulting map is complete for all TF resources and datasources in info.P.
func (opts *InferredModulesOpts) computeTokens(info *b.ProviderInfo) map[string]tokenInfo {
contract.Assertf(opts.TfPkgPrefix != "", "TF package prefix not provided or computed")
tree := &node{segment: opts.TfPkgPrefix}
// Build segment tree:
//
// Expand each item (resource | datasource) into it's segments (divided by "_"), then
// insert each token into the tree structure. The tree is defined by segments, where
// each node represents a segment and each path a token.
mapProviderItems(info, func(s string, _ shim.Resource) bool {
segments := strings.Split(strings.TrimPrefix(s, opts.TfPkgPrefix), "_")
contract.Assertf(len(segments) > 0, "No segments found")
contract.Assertf(segments[0] != "", "Empty segment from splitting %q with prefix %q", s, opts.TfPkgPrefix)
node := tree
for _, segment := range segments {
node = node.child(segment)
}
node.tfToken = s
return true
})
contract.Assertf(tree.tfToken == "", "We don't expect a resource called '%s'", opts.TfPkgPrefix)
output := map[string]tokenInfo{}
// Collapse the segment tree via a depth first traversal.
tree.dfs(func(parent func(int) *node, n *node) {
if parent(0) == tree {
// Inject each path as a node
if n.len() < opts.MinimumModuleSize {
// Node segment is not big enough for its own module, so inject each token
// into the main module
for _, child := range n.children {
output[child.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment + "_" + child.segment,
}
}
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment,
}
}
} else {
// Node segment will form its own modules, so inject each token as a
// module member of `n.segment`.
for _, child := range n.children {
contract.Assertf(child.tfToken != "", "child of %q: %#v", n.segment, child)
output[child.tfToken] = tokenInfo{
mod: n.segment,
name: child.segment,
}
}
// If the node is both a module and a item, put the item in the module
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: n.segment,
name: n.segment,
}
}
}
} else {
// flatten the tree by injecting children into the parent node.
if n.len() < opts.MimimumSubmoduleSize {
|
{
i++
}
|
conditional_block
|
inferred_modules.go
|
the default value.
// > 0 -> set the value.
MinimumModuleSize int
// The number of items in a longer prefix needed to break out into it's own prefix.
//
// For example, with the tokens `pkg_mod_sub1_a`, `pkg_mod_sub2_b`, `pkg_mod_sub2_c`,
// `pkg_mod_sub3_d`:
//
// MinimumSubmoduleSize = 3 will result in:
//
// pkg:mod:Sub1A, pkg:mod:Sub2B, pkg:mod:Sub2C, pkg:mod:Sub3D
//
// MinimumSubmoduleSize = 2 will result in:
//
// pkg:mod:Sub1A, pkg:modSub2:B, pkg:modSub2C, pkg:mod:Sub3D
//
// < 0 -> don't bin into submodules. Only the most common prefix will be used.
// = 0 -> apply the default value.
// > 0 -> set the value.
MimimumSubmoduleSize int
}
// A strategy to infer module placement from global analysis of all items (Resources & DataSources).
func InferredModules(
info *b.ProviderInfo, finalize Make, opts *InferredModulesOpts,
) (b.Strategy, error) {
if opts == nil {
opts = &InferredModulesOpts{}
}
err := opts.ensurePrefix(info)
if err != nil {
return b.Strategy{}, fmt.Errorf("inferring pkg prefix: %w", err)
}
contract.Assertf(opts.MinimumModuleSize >= 0, "Cannot have a minimum modules size less then zero")
if opts.MinimumModuleSize == 0 {
opts.MinimumModuleSize = defaultMinimumModuleSize
}
if opts.MimimumSubmoduleSize == 0 {
opts.MimimumSubmoduleSize = defaultMinimumSubmoduleSize
}
if opts.MainModule == "" {
opts.MainModule = "index"
}
tokenMap := opts.computeTokens(info)
rIsEmpty := func(r *b.ResourceInfo) bool { return r.Tok == "" }
dIsEmpty := func(r *b.DataSourceInfo) bool { return r.Tok == "" }
return b.Strategy{
Resource: tokenFromMap(tokenMap, rIsEmpty, finalize, func(tk string, resource *b.ResourceInfo) {
checkedApply(&resource.Tok, tokens.Type(tk))
}),
DataSource: tokenFromMap(tokenMap, dIsEmpty, finalize, func(tk string, datasource *b.DataSourceInfo) {
checkedApply(&datasource.Tok, tokens.ModuleMember(tk))
}),
}, nil
}
func (opts *InferredModulesOpts) ensurePrefix(info *b.ProviderInfo) error {
prefix := opts.TfPkgPrefix
var noCommonality bool
findPrefix := func(key string, _ shim.Resource) bool {
if noCommonality {
return false
}
if prefix == "" {
prefix = key
return true
}
prefix = sharedPrefix(key, prefix)
if prefix == "" {
noCommonality = true
}
return true
}
mapProviderItems(info, findPrefix)
if noCommonality {
return fmt.Errorf("no common prefix detected")
}
if prefix == "" {
return fmt.Errorf("no items found")
}
opts.TfPkgPrefix = prefix
return nil
}
type node struct {
segment string
children map[string]*node
// tfToken is only non-empty if the node represents a literal tf token
tfToken string
}
func (n *node) child(segment string) *node {
if n.children == nil {
n.children = map[string]*node{}
}
v, ok := n.children[segment]
if ok {
return v
}
child := &node{segment: segment}
n.children[segment] = child
return child
}
func (n *node) insert(child *node) {
if n.children == nil {
n.children = map[string]*node{}
}
_, ok := n.children[child.segment]
contract.Assertf(!ok, "duplicate segment in child: %q", child.segment)
n.children[child.segment] = child
}
func (n *node) len() int {
i := 0
if n.tfToken != "" {
i++
}
for _, child := range n.children {
i += child.len()
}
return i
}
// A depth first search of child nodes.
//
// parent is a function that returns parent nodes, with the immediate parent starting at 0
// and each increment increasing the indirection. 1 yields the grandparent, 2 the
// great-grandparent, etc. parent panics when no node is available.
//
// dfs will pick up nodes inserted up the hierarchy during traversal, but only if they
// were inserted with unique names.
func (n *node) dfs(iter func(parent func(int) *node, node *node)) {
parentStack := []*node{n}
fullIter(n.children, func(_ string, child *node) {
child.dfsInner(&parentStack, iter)
})
}
// Iterate over a map in any order, ensuring that all keys in the map are iterated over,
// even if they were added during the iteration.
//
// There is no guarantee of the order of the iteration.
func fullIter[K comparable, V any](m map[K]V, f func(K, V)) {
seen := map[K]bool{}
for done := false; !done; {
done = true
for k, v := range m {
if seen[k] {
continue
}
seen[k] = true
done = false
f(k, v)
}
}
}
func (n *node)
|
(parentStack *[]*node, iter func(parent func(int) *node, node *node)) {
// Pop this node onto the parent stack so children can access it
*parentStack = append(*parentStack, n)
// Iterate over children by key, making sure that newly added keys are iterated over
fullIter(n.children, func(k string, v *node) {
v.dfsInner(parentStack, iter)
})
// Pop the node off afterwards
*parentStack = (*parentStack)[:len(*parentStack)-1]
iter(func(i int) *node { return (*parentStack)[len(*parentStack)-1-i] }, n)
}
// Precompute the mapping from tf tokens to pulumi modules.
//
// The resulting map is complete for all TF resources and datasources in info.P.
func (opts *InferredModulesOpts) computeTokens(info *b.ProviderInfo) map[string]tokenInfo {
contract.Assertf(opts.TfPkgPrefix != "", "TF package prefix not provided or computed")
tree := &node{segment: opts.TfPkgPrefix}
// Build segment tree:
//
// Expand each item (resource | datasource) into it's segments (divided by "_"), then
// insert each token into the tree structure. The tree is defined by segments, where
// each node represents a segment and each path a token.
mapProviderItems(info, func(s string, _ shim.Resource) bool {
segments := strings.Split(strings.TrimPrefix(s, opts.TfPkgPrefix), "_")
contract.Assertf(len(segments) > 0, "No segments found")
contract.Assertf(segments[0] != "", "Empty segment from splitting %q with prefix %q", s, opts.TfPkgPrefix)
node := tree
for _, segment := range segments {
node = node.child(segment)
}
node.tfToken = s
return true
})
contract.Assertf(tree.tfToken == "", "We don't expect a resource called '%s'", opts.TfPkgPrefix)
output := map[string]tokenInfo{}
// Collapse the segment tree via a depth first traversal.
tree.dfs(func(parent func(int) *node, n *node) {
if parent(0) == tree {
// Inject each path as a node
if n.len() < opts.MinimumModuleSize {
// Node segment is not big enough for its own module, so inject each token
// into the main module
for _, child := range n.children {
output[child.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment + "_" + child.segment,
}
}
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment,
}
}
} else {
// Node segment will form its own modules, so inject each token as a
// module member of `n.segment`.
for _, child := range n.children {
contract.Assertf(child.tfToken != "", "child of %q: %#v", n.segment, child)
output[child.tfToken] = tokenInfo{
mod: n.segment,
name: child.segment,
}
}
// If the node is both a module and a item, put the item in the module
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: n.segment,
name: n.segment,
}
}
}
} else {
// flatten the tree by injecting children into the parent node.
if n.len() < opts.MimimumSubmoduleSize {
|
dfsInner
|
identifier_name
|
inferred_modules.go
|
apply the default value.
// > 0 -> set the value.
MinimumModuleSize int
// The number of items in a longer prefix needed to break out into it's own prefix.
//
// For example, with the tokens `pkg_mod_sub1_a`, `pkg_mod_sub2_b`, `pkg_mod_sub2_c`,
// `pkg_mod_sub3_d`:
//
// MinimumSubmoduleSize = 3 will result in:
//
// pkg:mod:Sub1A, pkg:mod:Sub2B, pkg:mod:Sub2C, pkg:mod:Sub3D
//
// MinimumSubmoduleSize = 2 will result in:
//
// pkg:mod:Sub1A, pkg:modSub2:B, pkg:modSub2C, pkg:mod:Sub3D
//
// < 0 -> don't bin into submodules. Only the most common prefix will be used.
// = 0 -> apply the default value.
// > 0 -> set the value.
MimimumSubmoduleSize int
}
// A strategy to infer module placement from global analysis of all items (Resources & DataSources).
func InferredModules(
info *b.ProviderInfo, finalize Make, opts *InferredModulesOpts,
) (b.Strategy, error) {
if opts == nil {
opts = &InferredModulesOpts{}
}
err := opts.ensurePrefix(info)
if err != nil {
return b.Strategy{}, fmt.Errorf("inferring pkg prefix: %w", err)
}
contract.Assertf(opts.MinimumModuleSize >= 0, "Cannot have a minimum modules size less then zero")
if opts.MinimumModuleSize == 0 {
opts.MinimumModuleSize = defaultMinimumModuleSize
}
if opts.MimimumSubmoduleSize == 0 {
opts.MimimumSubmoduleSize = defaultMinimumSubmoduleSize
}
if opts.MainModule == "" {
opts.MainModule = "index"
}
tokenMap := opts.computeTokens(info)
rIsEmpty := func(r *b.ResourceInfo) bool { return r.Tok == "" }
dIsEmpty := func(r *b.DataSourceInfo) bool { return r.Tok == "" }
return b.Strategy{
Resource: tokenFromMap(tokenMap, rIsEmpty, finalize, func(tk string, resource *b.ResourceInfo) {
checkedApply(&resource.Tok, tokens.Type(tk))
}),
DataSource: tokenFromMap(tokenMap, dIsEmpty, finalize, func(tk string, datasource *b.DataSourceInfo) {
checkedApply(&datasource.Tok, tokens.ModuleMember(tk))
}),
}, nil
}
func (opts *InferredModulesOpts) ensurePrefix(info *b.ProviderInfo) error {
prefix := opts.TfPkgPrefix
var noCommonality bool
findPrefix := func(key string, _ shim.Resource) bool {
if noCommonality {
return false
}
if prefix == "" {
prefix = key
return true
}
prefix = sharedPrefix(key, prefix)
if prefix == "" {
noCommonality = true
}
return true
}
mapProviderItems(info, findPrefix)
if noCommonality {
return fmt.Errorf("no common prefix detected")
}
if prefix == "" {
return fmt.Errorf("no items found")
}
opts.TfPkgPrefix = prefix
return nil
}
type node struct {
segment string
children map[string]*node
// tfToken is only non-empty if the node represents a literal tf token
tfToken string
}
func (n *node) child(segment string) *node {
if n.children == nil {
n.children = map[string]*node{}
}
v, ok := n.children[segment]
if ok {
return v
}
child := &node{segment: segment}
n.children[segment] = child
return child
}
func (n *node) insert(child *node) {
if n.children == nil {
n.children = map[string]*node{}
}
_, ok := n.children[child.segment]
contract.Assertf(!ok, "duplicate segment in child: %q", child.segment)
n.children[child.segment] = child
}
func (n *node) len() int {
i := 0
if n.tfToken != "" {
i++
}
for _, child := range n.children {
i += child.len()
}
return i
}
// A depth first search of child nodes.
//
// parent is a function that returns parent nodes, with the immediate parent starting at 0
// and each increment increasing the indirection. 1 yields the grandparent, 2 the
// great-grandparent, etc. parent panics when no node is available.
//
// dfs will pick up nodes inserted up the hierarchy during traversal, but only if they
// were inserted with unique names.
func (n *node) dfs(iter func(parent func(int) *node, node *node)) {
parentStack := []*node{n}
fullIter(n.children, func(_ string, child *node) {
child.dfsInner(&parentStack, iter)
})
}
// Iterate over a map in any order, ensuring that all keys in the map are iterated over,
// even if they were added during the iteration.
//
// There is no guarantee of the order of the iteration.
func fullIter[K comparable, V any](m map[K]V, f func(K, V)) {
seen := map[K]bool{}
for done := false; !done; {
done = true
for k, v := range m {
if seen[k] {
continue
}
seen[k] = true
done = false
f(k, v)
}
}
}
func (n *node) dfsInner(parentStack *[]*node, iter func(parent func(int) *node, node *node)) {
// Pop this node onto the parent stack so children can access it
*parentStack = append(*parentStack, n)
// Iterate over children by key, making sure that newly added keys are iterated over
fullIter(n.children, func(k string, v *node) {
v.dfsInner(parentStack, iter)
})
// Pop the node off afterwards
*parentStack = (*parentStack)[:len(*parentStack)-1]
iter(func(i int) *node { return (*parentStack)[len(*parentStack)-1-i] }, n)
}
// Precompute the mapping from tf tokens to pulumi modules.
//
// The resulting map is complete for all TF resources and datasources in info.P.
func (opts *InferredModulesOpts) computeTokens(info *b.ProviderInfo) map[string]tokenInfo {
contract.Assertf(opts.TfPkgPrefix != "", "TF package prefix not provided or computed")
tree := &node{segment: opts.TfPkgPrefix}
// Build segment tree:
|
//
// Expand each item (resource | datasource) into it's segments (divided by "_"), then
// insert each token into the tree structure. The tree is defined by segments, where
// each node represents a segment and each path a token.
mapProviderItems(info, func(s string, _ shim.Resource) bool {
segments := strings.Split(strings.TrimPrefix(s, opts.TfPkgPrefix), "_")
contract.Assertf(len(segments) > 0, "No segments found")
contract.Assertf(segments[0] != "", "Empty segment from splitting %q with prefix %q", s, opts.TfPkgPrefix)
node := tree
for _, segment := range segments {
node = node.child(segment)
}
node.tfToken = s
return true
})
contract.Assertf(tree.tfToken == "", "We don't expect a resource called '%s'", opts.TfPkgPrefix)
output := map[string]tokenInfo{}
// Collapse the segment tree via a depth first traversal.
tree.dfs(func(parent func(int) *node, n *node) {
if parent(0) == tree {
// Inject each path as a node
if n.len() < opts.MinimumModuleSize {
// Node segment is not big enough for its own module, so inject each token
// into the main module
for _, child := range n.children {
output[child.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment + "_" + child.segment,
}
}
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: opts.MainModule,
name: n.segment,
}
}
} else {
// Node segment will form its own modules, so inject each token as a
// module member of `n.segment`.
for _, child := range n.children {
contract.Assertf(child.tfToken != "", "child of %q: %#v", n.segment, child)
output[child.tfToken] = tokenInfo{
mod: n.segment,
name: child.segment,
}
}
// If the node is both a module and a item, put the item in the module
if n.tfToken != "" {
output[n.tfToken] = tokenInfo{
mod: n.segment,
name: n.segment,
}
}
}
} else {
// flatten the tree by injecting children into the parent node.
if n.len() < opts.MimimumSubmoduleSize {
|
random_line_split
|
|
worker.go
|
:"error"`
}
func (r RuleEval) Work() {
promql := strings.TrimSpace(r.rule.PromQl)
if promql == "" {
logger.Errorf("rule_eval:%d promql is blank", r.RuleID())
return
}
var value model.Value
var err error
if r.rule.Algorithm == "" {
var warnings prom.Warnings
value, warnings, err = reader.Client.Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("rule_eval:%d promql:%s, error:%v", r.RuleID(), promql, err)
notifyToMaintainer(err, "failed to query prometheus")
return
}
if len(warnings) > 0 {
logger.Errorf("rule_eval:%d promql:%s, warnings:%v", r.RuleID(), promql, warnings)
return
}
} else {
var res AnomalyPoint
count := len(config.C.AnomalyDataApi)
for _, i := range rand.Perm(count) {
url := fmt.Sprintf("%s?rid=%d", config.C.AnomalyDataApi[i], r.rule.Id)
err = httplib.Get(url).SetTimeout(time.Duration(3000) * time.Millisecond).ToJSON(&res)
if err != nil {
logger.Errorf("curl %s fail: %v", url, err)
continue
}
if res.Err != "" {
logger.Errorf("curl %s fail: %s", url, res.Err)
continue
}
value = res.Data
logger.Debugf("curl %s get: %+v", url, res.Data)
}
}
r.judge(conv.ConvertVectors(value))
}
type WorkersType struct {
rules map[string]RuleEval
recordRules map[string]RecordingRuleEval
}
var Workers = &WorkersType{rules: make(map[string]RuleEval), recordRules: make(map[string]RecordingRuleEval)}
func (ws *WorkersType) Build(rids []int64) {
rules := make(map[string]*models.AlertRule)
for i := 0; i < len(rids); i++ {
rule := memsto.AlertRuleCache.Get(rids[i])
if rule == nil {
continue
}
hash := str.MD5(fmt.Sprintf("%d_%d_%s",
rule.Id,
rule.PromEvalInterval,
rule.PromQl,
))
rules[hash] = rule
}
// stop old
for hash := range Workers.rules {
if _, has := rules[hash]; !has {
Workers.rules[hash].Stop()
delete(Workers.rules, hash)
}
}
// start new
for hash := range rules {
if _, has := Workers.rules[hash]; has {
// already exists
continue
}
elst, err := models.AlertCurEventGetByRule(rules[hash].Id)
if err != nil {
logger.Errorf("worker_build: AlertCurEventGetByRule failed: %v", err)
continue
}
firemap := make(map[string]*models.AlertCurEvent)
for i := 0; i < len(elst); i++ {
elst[i].DB2Mem()
firemap[elst[i].Hash] = elst[i]
}
re := RuleEval{
rule: rules[hash],
quit: make(chan struct{}),
fires: firemap,
pendings: make(map[string]*models.AlertCurEvent),
}
go re.Start()
Workers.rules[hash] = re
}
}
func (ws *WorkersType) BuildRe(rids []int64) {
rules := make(map[string]*models.RecordingRule)
for i := 0; i < len(rids); i++ {
rule := memsto.RecordingRuleCache.Get(rids[i])
if rule == nil {
continue
}
if rule.Disabled == 1 {
continue
}
hash := str.MD5(fmt.Sprintf("%d_%d_%s_%s",
rule.Id,
rule.PromEvalInterval,
rule.PromQl,
rule.AppendTags,
))
rules[hash] = rule
}
// stop old
for hash := range Workers.recordRules {
if _, has := rules[hash]; !has {
Workers.recordRules[hash].Stop()
delete(Workers.recordRules, hash)
}
}
// start new
for hash := range rules {
|
func (r RuleEval) judge(vectors []conv.Vector) {
// 有可能rule的一些配置已经发生变化,比如告警接收人、callbacks等
// 这些信息的修改是不会引起worker restart的,但是确实会影响告警处理逻辑
// 所以,这里直接从memsto.AlertRuleCache中获取并覆盖
curRule := memsto.AlertRuleCache.Get(r.rule.Id)
if curRule == nil {
return
}
r.rule = curRule
count := len(vectors)
alertingKeys := make(map[string]struct{})
now := time.Now().Unix()
for i := 0; i < count; i++ {
// compute hash
hash := str.MD5(fmt.Sprintf("%d_%s", r.rule.Id, vectors[i].Key))
alertingKeys[hash] = struct{}{}
// rule disabled in this time span?
if isNoneffective(vectors[i].Timestamp, r.rule) {
continue
}
// handle series tags
tagsMap := make(map[string]string)
for label, value := range vectors[i].Labels {
tagsMap[string(label)] = string(value)
}
// handle rule tags
for _, tag := range r.rule.AppendTagsJSON {
arr := strings.SplitN(tag, "=", 2)
tagsMap[arr[0]] = arr[1]
}
tagsMap["rulename"] = r.rule.Name
// handle target note
targetIdent, has := vectors[i].Labels["ident"]
targetNote := ""
if has {
target, exists := memsto.TargetCache.Get(string(targetIdent))
if exists {
targetNote = target.Note
// 对于包含ident的告警事件,check一下ident所属bg和rule所属bg是否相同
// 如果告警规则选择了只在本BG生效,那其他BG的机器就不能因此规则产生告警
if r.rule.EnableInBG == 1 && target.GroupId != r.rule.GroupId {
continue
}
}
}
event := &models.AlertCurEvent{
TriggerTime: vectors[i].Timestamp,
TagsMap: tagsMap,
GroupId: r.rule.GroupId,
RuleName: r.rule.Name,
}
bg := memsto.BusiGroupCache.GetByBusiGroupId(r.rule.GroupId)
if bg != nil {
event.GroupName = bg.Name
}
// isMuted only need TriggerTime RuleName and TagsMap
if isMuted(event) {
logger.Infof("event_muted: rule_id=%d %s", r.rule.Id, vectors[i].Key)
continue
}
tagsArr := labelMapToArr(tagsMap)
sort.Strings(tagsArr)
event.Cluster = r.rule.Cluster
event.Hash = hash
event.RuleId = r.rule.Id
event.RuleName = r.rule.Name
event.RuleNote = r.rule.Note
event.RuleProd = r.rule.Prod
event.RuleAlgo = r.rule.Algorithm
event.Severity = r.rule.Severity
event.PromForDuration = r.rule.PromForDuration
event.PromQl = r.rule.PromQl
event.PromEvalInterval = r.rule.PromEvalInterval
event.Callbacks = r.rule.Callbacks
event.CallbacksJSON = r.rule.CallbacksJSON
event.RunbookUrl = r.rule.RunbookUrl
event.NotifyRecovered = r.rule.NotifyRecovered
event.NotifyChannels = r.rule.NotifyChannels
event.NotifyChannelsJSON = r.rule.NotifyChannelsJSON
event.NotifyGroups = r.rule.NotifyGroups
event.NotifyGroupsJSON = r.rule.NotifyGroupsJSON
event.TargetIdent = string(targetIdent)
event.TargetNote = targetNote
event.TriggerValue = readableValue(vectors[i].Value)
event.TagsJSON = tagsArr
event.Tags = strings.Join(tagsArr, ",,")
event.IsRecovered = false
event.LastEvalTime = now
r.handleNewEvent(event)
}
// handle recovered events
r.recoverRule(alertingKeys, now)
}
func readableValue(value float64) string {
ret := fmt.Sprintf("%.5f", value)
ret = strings.TrimRight(ret, "0")
return strings.TrimRight(ret, ".")
}
func labelMapToArr(m map[string]string) []string {
numLabels := len(m)
labelStrings := make([]string, 0, numLabels)
for label, value := range
|
if _, has := Workers.recordRules[hash]; has {
// already exists
continue
}
re := RecordingRuleEval{
rule: rules[hash],
quit: make(chan struct{}),
}
go re.Start()
Workers.recordRules[hash] = re
}
}
|
conditional_block
|
worker.go
|
= target.Note
// 对于包含ident的告警事件,check一下ident所属bg和rule所属bg是否相同
// 如果告警规则选择了只在本BG生效,那其他BG的机器就不能因此规则产生告警
if r.rule.EnableInBG == 1 && target.GroupId != r.rule.GroupId {
continue
}
}
}
event := &models.AlertCurEvent{
TriggerTime: vectors[i].Timestamp,
TagsMap: tagsMap,
GroupId: r.rule.GroupId,
RuleName: r.rule.Name,
}
bg := memsto.BusiGroupCache.GetByBusiGroupId(r.rule.GroupId)
if bg != nil {
event.GroupName = bg.Name
}
// isMuted only need TriggerTime RuleName and TagsMap
if isMuted(event) {
logger.Infof("event_muted: rule_id=%d %s", r.rule.Id, vectors[i].Key)
continue
}
tagsArr := labelMapToArr(tagsMap)
sort.Strings(tagsArr)
event.Cluster = r.rule.Cluster
event.Hash = hash
event.RuleId = r.rule.Id
event.RuleName = r.rule.Name
event.RuleNote = r.rule.Note
event.RuleProd = r.rule.Prod
event.RuleAlgo = r.rule.Algorithm
event.Severity = r.rule.Severity
event.PromForDuration = r.rule.PromForDuration
event.PromQl = r.rule.PromQl
event.PromEvalInterval = r.rule.PromEvalInterval
event.Callbacks = r.rule.Callbacks
event.CallbacksJSON = r.rule.CallbacksJSON
event.RunbookUrl = r.rule.RunbookUrl
event.NotifyRecovered = r.rule.NotifyRecovered
event.NotifyChannels = r.rule.NotifyChannels
event.NotifyChannelsJSON = r.rule.NotifyChannelsJSON
event.NotifyGroups = r.rule.NotifyGroups
event.NotifyGroupsJSON = r.rule.NotifyGroupsJSON
event.TargetIdent = string(targetIdent)
event.TargetNote = targetNote
event.TriggerValue = readableValue(vectors[i].Value)
event.TagsJSON = tagsArr
event.Tags = strings.Join(tagsArr, ",,")
event.IsRecovered = false
event.LastEvalTime = now
r.handleNewEvent(event)
}
// handle recovered events
r.recoverRule(alertingKeys, now)
}
func readableValue(value float64) string {
ret := fmt.Sprintf("%.5f", value)
ret = strings.TrimRight(ret, "0")
return strings.TrimRight(ret, ".")
}
func labelMapToArr(m map[string]string) []string {
numLabels := len(m)
labelStrings := make([]string, 0, numLabels)
for label, value := range m {
labelStrings = append(labelStrings, fmt.Sprintf("%s=%s", label, value))
}
if numLabels > 1 {
sort.Strings(labelStrings)
}
return labelStrings
}
func (r RuleEval) handleNewEvent(event *models.AlertCurEvent) {
if event.PromForDuration == 0 {
r.fireEvent(event)
return
}
_, has := r.pendings[event.Hash]
if has {
r.pendings[event.Hash].LastEvalTime = event.LastEvalTime
} else {
r.pendings[event.Hash] = event
}
if r.pendings[event.Hash].LastEvalTime-r.pendings[event.Hash].TriggerTime+int64(event.PromEvalInterval) >= int64(event.PromForDuration) {
r.fireEvent(event)
}
}
func (r RuleEval) fireEvent(event *models.AlertCurEvent) {
if fired, has := r.fires[event.Hash]; has {
r.fires[event.Hash].LastEvalTime = event.LastEvalTime
if r.rule.NotifyRepeatStep == 0 {
// 说明不想重复通知,那就直接返回了,nothing to do
return
}
// 之前发送过告警了,这次是否要继续发送,要看是否过了通道静默时间
if event.LastEvalTime > fired.LastSentTime+int64(r.rule.NotifyRepeatStep)*60 {
if r.rule.NotifyMaxNumber == 0 {
// 最大可以发送次数如果是0,表示不想限制最大发送次数,一直发即可
event.NotifyCurNumber = fired.NotifyCurNumber + 1
r.pushEventToQueue(event)
} else {
// 有最大发送次数的限制,就要看已经发了几次了,是否达到了最大发送次数
if fired.NotifyCurNumber >= r.rule.NotifyMaxNumber {
return
} else {
event.NotifyCurNumber = fired.NotifyCurNumber + 1
r.pushEventToQueue(event)
}
}
}
} else {
event.NotifyCurNumber = 1
r.pushEventToQueue(event)
}
}
func (r RuleEval) recoverRule(alertingKeys map[string]struct{}, now int64) {
for hash := range r.pendings {
if _, has := alertingKeys[hash]; has {
continue
}
delete(r.pendings, hash)
}
for hash, event := range r.fires {
if _, has := alertingKeys[hash]; has {
continue
}
// 如果配置了留观时长,就不能立马恢复了
if r.rule.RecoverDuration > 0 && now-event.LastEvalTime < r.rule.RecoverDuration {
continue
}
// 没查到触发阈值的vector,姑且就认为这个vector的值恢复了
// 我确实无法分辨,是prom中有值但是未满足阈值所以没返回,还是prom中确实丢了一些点导致没有数据可以返回,尴尬
delete(r.fires, hash)
delete(r.pendings, hash)
event.IsRecovered = true
event.LastEvalTime = now
// 可能是因为调整了promql才恢复的,所以事件里边要体现最新的promql,否则用户会比较困惑
// 当然,其实rule的各个字段都可能发生变化了,都更新一下吧
event.RuleName = r.rule.Name
event.RuleNote = r.rule.Note
event.RuleProd = r.rule.Prod
event.RuleAlgo = r.rule.Algorithm
event.Severity = r.rule.Severity
event.PromForDuration = r.rule.PromForDuration
event.PromQl = r.rule.PromQl
event.PromEvalInterval = r.rule.PromEvalInterval
event.Callbacks = r.rule.Callbacks
event.CallbacksJSON = r.rule.CallbacksJSON
event.RunbookUrl = r.rule.RunbookUrl
event.NotifyRecovered = r.rule.NotifyRecovered
event.NotifyChannels = r.rule.NotifyChannels
event.NotifyChannelsJSON = r.rule.NotifyChannelsJSON
event.NotifyGroups = r.rule.NotifyGroups
event.NotifyGroupsJSON = r.rule.NotifyGroupsJSON
r.pushEventToQueue(event)
}
}
func (r RuleEval) pushEventToQueue(event *models.AlertCurEvent) {
if !event.IsRecovered {
event.LastSentTime = event.LastEvalTime
r.fires[event.Hash] = event
}
promstat.CounterAlertsTotal.WithLabelValues(config.C.ClusterName).Inc()
LogEvent(event, "push_queue")
if !EventQueue.PushFront(event) {
logger.Warningf("event_push_queue: queue is full")
}
}
func filterRecordingRules() {
ids := memsto.RecordingRuleCache.GetRuleIds()
count := len(ids)
mines := make([]int64, 0, count)
for i := 0; i < count; i++ {
node, err := naming.HashRing.GetNode(fmt.Sprint(ids[i]))
if err != nil {
logger.Warning("failed to get node from hashring:", err)
continue
}
if node == config.C.Heartbeat.Endpoint {
mines = append(mines, ids[i])
}
}
Workers.BuildRe(mines)
}
type RecordingRuleEval struct {
rule *models.RecordingRule
quit chan struct{}
}
func (r RecordingRuleEval) Stop() {
logger.Infof("recording_rule_eval:%d stopping", r.RuleID())
close(r.quit)
}
func (r RecordingRuleEval) RuleID() int64 {
return r.rule.Id
}
func (r RecordingRuleEval) Start() {
logger.Infof("recording_rule_eval:%d started", r.RuleID())
for {
select {
case <-r.quit:
// logger.Infof("rule_eval:%d stopped", r.RuleID())
return
default:
r.Work()
interval := r.rule.PromEvalInterval
if interval <= 0 {
interval = 10
}
time.Sleep(time.Duration(interval) * time.Second)
}
}
}
func (r RecordingRuleEval) Work() {
promql := strings.TrimSpace(r.rule.PromQl)
if promql == "" {
logger.Errorf("recording_rule_eval:%d promql is blank", r.RuleID())
return
}
value, warnings, err := reader.Client.Query(context.Backgrou
|
nd(), promql, time.Now())
if err != nil {
logger.Errorf("recording_rule_eval
|
identifier_body
|
|
worker.go
|
{
logger.Infof("rule_eval:%d started", r.RuleID())
for {
select {
case <-r.quit:
// logger.Infof("rule_eval:%d stopped", r.RuleID())
return
default:
r.Work()
logger.Debugf("rule executed,rule_id=%d", r.RuleID())
interval := r.rule.PromEvalInterval
if interval <= 0 {
interval = 10
}
time.Sleep(time.Duration(interval) * time.Second)
}
}
}
type AnomalyPoint struct {
Data model.Matrix `json:"data"`
Err string `json:"error"`
}
func (r RuleEval) Work() {
promql := strings.TrimSpace(r.rule.PromQl)
if promql == "" {
logger.Errorf("rule_eval:%d promql is blank", r.RuleID())
return
}
var value model.Value
var err error
if r.rule.Algorithm == "" {
var warnings prom.Warnings
value, warnings, err = reader.Client.Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("rule_eval:%d promql:%s, error:%v", r.RuleID(), promql, err)
notifyToMaintainer(err, "failed to query prometheus")
return
}
if len(warnings) > 0 {
logger.Errorf("rule_eval:%d promql:%s, warnings:%v", r.RuleID(), promql, warnings)
return
}
} else {
var res AnomalyPoint
count := len(config.C.AnomalyDataApi)
for _, i := range rand.Perm(count) {
url := fmt.Sprintf("%s?rid=%d", config.C.AnomalyDataApi[i], r.rule.Id)
err = httplib.Get(url).SetTimeout(time.Duration(3000) * time.Millisecond).ToJSON(&res)
if err != nil {
logger.Errorf("curl %s fail: %v", url, err)
continue
}
if res.Err != "" {
logger.Errorf("curl %s fail: %s", url, res.Err)
continue
}
value = res.Data
logger.Debugf("curl %s get: %+v", url, res.Data)
}
}
r.judge(conv.ConvertVectors(value))
}
type WorkersType struct {
rules map[string]RuleEval
recordRules map[string]RecordingRuleEval
}
var Workers = &WorkersType{rules: make(map[string]RuleEval), recordRules: make(map[string]RecordingRuleEval)}
func (ws *WorkersType) Build(rids []int64) {
rules := make(map[string]*models.AlertRule)
for i := 0; i < len(rids); i++ {
rule := memsto.AlertRuleCache.Get(rids[i])
if rule == nil {
continue
}
hash := str.MD5(fmt.Sprintf("%d_%d_%s",
rule.Id,
rule.PromEvalInterval,
rule.PromQl,
))
rules[hash] = rule
}
// stop old
for hash := range Workers.rules {
if _, has := rules[hash]; !has {
Workers.rules[hash].Stop()
delete(Workers.rules, hash)
}
}
// start new
for hash := range rules {
if _, has := Workers.rules[hash]; has {
// already exists
continue
}
elst, err := models.AlertCurEventGetByRule(rules[hash].Id)
if err != nil {
logger.Errorf("worker_build: AlertCurEventGetByRule failed: %v", err)
continue
}
firemap := make(map[string]*models.AlertCurEvent)
for i := 0; i < len(elst); i++ {
elst[i].DB2Mem()
firemap[elst[i].Hash] = elst[i]
}
re := RuleEval{
rule: rules[hash],
quit: make(chan struct{}),
fires: firemap,
pendings: make(map[string]*models.AlertCurEvent),
}
go re.Start()
Workers.rules[hash] = re
}
}
func (ws *WorkersType) BuildRe(rids []int64) {
rules := make(map[string]*models.RecordingRule)
for i := 0; i < len(rids); i++ {
rule := memsto.RecordingRuleCache.Get(rids[i])
if rule == nil {
continue
}
if rule.Disabled == 1 {
continue
}
hash := str.MD5(fmt.Sprintf("%d_%d_%s_%s",
rule.Id,
rule.PromEvalInterval,
rule.PromQl,
rule.AppendTags,
))
rules[hash] = rule
}
// stop old
for hash := range Workers.recordRules {
if _, has := rules[hash]; !has {
Workers.recordRules[hash].Stop()
delete(Workers.recordRules, hash)
}
}
// start new
for hash := range rules {
if _, has := Workers.recordRules[hash]; has {
// already exists
continue
}
re := RecordingRuleEval{
rule: rules[hash],
quit: make(chan struct{}),
}
go re.Start()
Workers.recordRules[hash] = re
}
}
func (r RuleEval) judge(vectors []conv.Vector) {
// 有可能rule的一些配置已经发生变化,比如告警接收人、callbacks等
// 这些信息的修改是不会引起worker restart的,但是确实会影响告警处理逻辑
// 所以,这里直接从memsto.AlertRuleCache中获取并覆盖
curRule := memsto.AlertRuleCache.Get(r.rule.Id)
if curRule == nil {
return
}
r.rule = curRule
count := len(vectors)
alertingKeys := make(map[string]struct{})
now := time.Now().Unix()
for i := 0; i < count; i++ {
// compute hash
hash := str.MD5(fmt.Sprintf("%d_%s", r.rule.Id, vectors[i].Key))
alertingKeys[hash] = struct{}{}
// rule disabled in this time span?
if isNoneffective(vectors[i].Timestamp, r.rule) {
continue
}
// handle series tags
tagsMap := make(map[string]string)
for label, value := range vectors[i].Labels {
tagsMap[string(label)] = string(value)
}
// handle rule tags
for _, tag := range r.rule.AppendTagsJSON {
arr := strings.SplitN(tag, "=", 2)
tagsMap[arr[0]] = arr[1]
}
tagsMap["rulename"] = r.rule.Name
// handle target note
targetIdent, has := vectors[i].Labels["ident"]
targetNote := ""
if has {
target, exists := memsto.TargetCache.Get(string(targetIdent))
if exists {
targetNote = target.Note
// 对于包含ident的告警事件,check一下ident所属bg和rule所属bg是否相同
// 如果告警规则选择了只在本BG生效,那其他BG的机器就不能因此规则产生告警
if r.rule.EnableInBG == 1 && target.GroupId != r.rule.GroupId {
continue
}
}
}
event := &models.AlertCurEvent{
TriggerTime: vectors[i].Timestamp,
TagsMap: tagsMap,
GroupId: r.rule.GroupId,
RuleName: r.rule.Name,
}
bg := memsto.BusiGroupCache.GetByBusiGroupId(r.rule.GroupId)
if bg != nil {
event.GroupName = bg.Name
}
// isMuted only need TriggerTime RuleName and TagsMap
if isMuted(event) {
logger.Infof("event_muted: rule_id=%d %s", r.rule.Id, vectors[i].Key)
continue
}
tagsArr := labelMapToArr(tagsMap)
sort.Strings(tagsArr)
event.Cluster = r.rule.Cluster
event.Hash = hash
event.RuleId = r.rule.Id
event.RuleName = r.rule.Name
event.RuleNote = r.rule.Note
event.RuleProd = r.rule.Prod
event.RuleAlgo = r.rule.Algorithm
event.Severity = r.rule.Severity
event.PromForDuration = r.rule.PromForDuration
event.PromQl = r.rule.PromQl
event.PromEvalInterval = r.rule.PromEvalInterval
event.Callbacks = r.rule.Callbacks
event.CallbacksJSON = r.rule.CallbacksJSON
event.RunbookUrl = r.rule.RunbookUrl
event.NotifyRecovered = r.rule.NotifyRecovered
event.NotifyChannels = r.rule.NotifyChannels
event.NotifyChannelsJSON = r.rule.NotifyChannelsJSON
event.NotifyGroups = r.rule.NotifyGroups
event.NotifyGroupsJSON = r.rule.NotifyGroupsJSON
event.TargetIdent = string(targetIdent)
event.TargetNote = targetNote
event.TriggerValue = readableValue(vectors[i].Value)
event
|
art()
|
identifier_name
|
|
worker.go
|
:"error"`
}
func (r RuleEval) Work() {
promql := strings.TrimSpace(r.rule.PromQl)
if promql == "" {
logger.Errorf("rule_eval:%d promql is blank", r.RuleID())
return
}
var value model.Value
var err error
if r.rule.Algorithm == "" {
var warnings prom.Warnings
value, warnings, err = reader.Client.Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("rule_eval:%d promql:%s, error:%v", r.RuleID(), promql, err)
notifyToMaintainer(err, "failed to query prometheus")
return
}
if len(warnings) > 0 {
logger.Errorf("rule_eval:%d promql:%s, warnings:%v", r.RuleID(), promql, warnings)
return
}
} else {
var res AnomalyPoint
count := len(config.C.AnomalyDataApi)
for _, i := range rand.Perm(count) {
url := fmt.Sprintf("%s?rid=%d", config.C.AnomalyDataApi[i], r.rule.Id)
err = httplib.Get(url).SetTimeout(time.Duration(3000) * time.Millisecond).ToJSON(&res)
if err != nil {
logger.Errorf("curl %s fail: %v", url, err)
continue
}
if res.Err != "" {
logger.Errorf("curl %s fail: %s", url, res.Err)
continue
}
value = res.Data
logger.Debugf("curl %s get: %+v", url, res.Data)
}
}
r.judge(conv.ConvertVectors(value))
}
type WorkersType struct {
rules map[string]RuleEval
recordRules map[string]RecordingRuleEval
}
var Workers = &WorkersType{rules: make(map[string]RuleEval), recordRules: make(map[string]RecordingRuleEval)}
func (ws *WorkersType) Build(rids []int64) {
rules := make(map[string]*models.AlertRule)
for i := 0; i < len(rids); i++ {
rule := memsto.AlertRuleCache.Get(rids[i])
if rule == nil {
continue
}
hash := str.MD5(fmt.Sprintf("%d_%d_%s",
rule.Id,
rule.PromEvalInterval,
rule.PromQl,
))
rules[hash] = rule
}
// stop old
for hash := range Workers.rules {
if _, has := rules[hash]; !has {
Workers.rules[hash].Stop()
delete(Workers.rules, hash)
}
}
// start new
for hash := range rules {
if _, has := Workers.rules[hash]; has {
// already exists
continue
}
elst, err := models.AlertCurEventGetByRule(rules[hash].Id)
if err != nil {
logger.Errorf("worker_build: AlertCurEventGetByRule failed: %v", err)
continue
}
firemap := make(map[string]*models.AlertCurEvent)
for i := 0; i < len(elst); i++ {
elst[i].DB2Mem()
firemap[elst[i].Hash] = elst[i]
}
re := RuleEval{
rule: rules[hash],
quit: make(chan struct{}),
fires: firemap,
pendings: make(map[string]*models.AlertCurEvent),
}
go re.Start()
Workers.rules[hash] = re
}
}
func (ws *WorkersType) BuildRe(rids []int64) {
rules := make(map[string]*models.RecordingRule)
for i := 0; i < len(rids); i++ {
rule := memsto.RecordingRuleCache.Get(rids[i])
if rule == nil {
continue
}
if rule.Disabled == 1 {
continue
}
hash := str.MD5(fmt.Sprintf("%d_%d_%s_%s",
rule.Id,
rule.PromEvalInterval,
rule.PromQl,
rule.AppendTags,
))
rules[hash] = rule
}
// stop old
for hash := range Workers.recordRules {
if _, has := rules[hash]; !has {
Workers.recordRules[hash].Stop()
delete(Workers.recordRules, hash)
}
}
// start new
for hash := range rules {
if _, has := Workers.recordRules[hash]; has {
// already exists
continue
}
re := RecordingRuleEval{
rule: rules[hash],
quit: make(chan struct{}),
}
go re.Start()
Workers.recordRules[hash] = re
}
}
func (r RuleEval) judge(vectors []conv.Vector) {
// 有可能rule的一些配置已经发生变化,比如告警接收人、callbacks等
// 这些信息的修改是不会引起worker restart的,但是确实会影响告警处理逻辑
// 所以,这里直接从memsto.AlertRuleCache中获取并覆盖
curRule := memsto.AlertRuleCache.Get(r.rule.Id)
if curRule == nil {
return
}
r.rule = curRule
count := len(vectors)
alertingKeys := make(map[string]struct{})
now := time.Now().Unix()
for i := 0; i < count; i++ {
// compute hash
hash := str.MD5(fmt.Sprintf("%d_%s", r.rule.Id, vectors[i].Key))
alertingKeys[hash] = struct{}{}
// rule disabled in this time span?
if isNoneffective(vectors[i].Timestamp, r.rule) {
continue
}
// handle series tags
tagsMap := make(map[string]string)
for label, value := range vectors[i].Labels {
tagsMap[string(label)] = string(value)
}
// handle rule tags
for _, tag := range r.rule.AppendTagsJSON {
arr := strings.SplitN(tag, "=", 2)
tagsMap[arr[0]] = arr[1]
}
tagsMap["rulename"] = r.rule.Name
// handle target note
targetIdent, has := vectors[i].Labels["ident"]
targetNote := ""
if has {
target, exists := memsto.TargetCache.Get(string(targetIdent))
if exists {
targetNote = target.Note
// 对于包含ident的告警事件,check一下ident所属bg和rule所属bg是否相同
// 如果告警规则选择了只在本BG生效,那其他BG的机器就不能因此规则产生告警
if r.rule.EnableInBG == 1 && target.GroupId != r.rule.GroupId {
continue
}
}
}
event := &models.AlertCurEvent{
TriggerTime: vectors[i].Timestamp,
TagsMap: tagsMap,
GroupId: r.rule.GroupId,
RuleName: r.rule.Name,
}
bg := memsto.BusiGroupCache.GetByBusiGroupId(r.rule.GroupId)
if bg != nil {
event.GroupName = bg.Name
}
// isMuted only need TriggerTime RuleName and TagsMap
if isMuted(event) {
logger.Infof("event_muted: rule_id=%d %s", r.rule.Id, vectors[i].Key)
continue
}
tagsArr := labelMapToArr(tagsMap)
sort.Strings(tagsArr)
event.Cluster = r.rule.Cluster
event.Hash = hash
event.RuleId = r.rule.Id
event.RuleName = r.rule.Name
event.RuleNote = r.rule.Note
event.RuleProd = r.rule.Prod
event.RuleAlgo = r.rule.Algorithm
event.Severity = r.rule.Severity
event.PromForDuration = r.rule.PromForDuration
event.PromQl = r.rule.PromQl
event.PromEvalInterval = r.rule.PromEvalInterval
event.Callbacks = r.rule.Callbacks
event.CallbacksJSON = r.rule.CallbacksJSON
event.RunbookUrl = r.rule.RunbookUrl
event.NotifyRecovered = r.rule.NotifyRecovered
event.NotifyChannels = r.rule.NotifyChannels
event.NotifyChannelsJSON = r.rule.NotifyChannelsJSON
event.NotifyGroups = r.rule.NotifyGroups
event.NotifyGroupsJSON = r.rule.NotifyGroupsJSON
event.TargetIdent = string(targetIdent)
event.TargetNote = targetNote
event.TriggerValue = readableValue(vectors[i].Value)
event.TagsJSON = tagsArr
event.Tags = strings.Join(tagsArr, ",,")
event.IsRecovered = false
event.LastEvalTime = now
r.handleNewEvent(event)
}
// handle recovered events
r.recoverRule(alertingKeys, now)
}
func readableValue(value float64) string {
ret := fmt.Sprintf("%.5f", value)
ret = strings.TrimRight(ret, "0")
return strings.TrimRight(ret, ".")
}
|
numLabels := len(m)
labelStrings := make([]string, 0, numLabels)
for label, value :=
|
func labelMapToArr(m map[string]string) []string {
|
random_line_split
|
event_test.go
|
false, "name": "Value", "type": "uint256"
}],
"name": "MixedCase",
"type": "event"
}`)
// 1000000
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
// 1000000,2218516807680,1000001
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
func TestEventId(t *testing.T) {
var table = []struct {
definition string
expectations map[string]common.Hash
}{
{
definition: `[
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
},
},
}
for _, test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
|
for name, event := range abi.Events {
if event.Id() != test.expectations[name] {
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.Id())
}
}
}
}
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
type testStruct struct {
Value1 [2]uint8
Value2 uint8
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
var i uint8 = 1
for ; i <= 3; i++ {
b.Write(packNum(reflect.ValueOf(i)))
}
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
require.Equal(t, uint8(3), rst.Value2)
}
func TestEventTupleUnpack(t *testing.T) {
type EventTransfer struct {
Value *big.Int
}
type EventTransferWithTag struct {
// this is valid because `value` is not exportable,
// so value is only unmarshalled into `Value1`.
value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithSameFieldAndTag struct {
Value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithDuplicatedTag struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"value"`
}
type BadEventTransferWithEmptyTag struct {
Value *big.Int `abi:""`
}
type EventPledge struct {
Who common.Address
Wad *big.Int
Currency [3]byte
}
type BadEventPledge struct {
Who string
Wad int
Currency [3]byte
}
type EventMixedCase struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"_value"`
Value3 *big.Int `abi:"Value"`
}
bigint := new(big.Int)
bigintExpected := big.NewInt(1000000)
bigintExpected2 := big.NewInt(2218516807680)
bigintExpected3 := big.NewInt(1000001)
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
var testCases = []struct {
data string
dest interface{}
expected interface{}
jsonLog []byte
error string
name string
}{{
transferData1,
&EventTransfer{},
&EventTransfer{Value: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure",
}, {
transferData1,
&[]interface{}{&bigint},
&[]interface{}{&bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into slice",
}, {
transferData1,
&EventTransferWithTag{},
&EventTransferWithTag{Value1: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure with abi: tag",
}, {
transferData1,
&BadEventTransferWithDuplicatedTag{},
&BadEventTransferWithDuplicatedTag{},
jsonEventTransfer,
"struct: abi tag in 'Value2' already mapped",
"Can not unpack ERC20 Transfer event with duplicated abi tag",
}, {
transferData1,
&BadEventTransferWithSameFieldAndTag{},
&BadEventTransferWithSameFieldAndTag{},
jsonEventTransfer,
"abi: multiple variables maps to the same abi field 'value'",
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
}, {
transferData1,
&BadEventTransferWithEmptyTag{},
&BadEventTransferWithEmptyTag{},
jsonEventTransfer,
"struct: abi tag in 'Value' is empty",
"Can not unpack ERC20 Transfer event with an empty tag",
}, {
pledgeData1,
&EventPledge{},
&EventPledge{
addr,
bigintExpected2,
[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into structure",
}, {
pledgeData1,
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into slice",
}, {
|
random_line_split
|
|
event_test.go
|
test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
for name, event := range abi.Events {
if event.Id() != test.expectations[name] {
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.Id())
}
}
}
}
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
type testStruct struct {
Value1 [2]uint8
Value2 uint8
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
var i uint8 = 1
for ; i <= 3; i++ {
b.Write(packNum(reflect.ValueOf(i)))
}
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
require.Equal(t, uint8(3), rst.Value2)
}
func TestEventTupleUnpack(t *testing.T) {
type EventTransfer struct {
Value *big.Int
}
type EventTransferWithTag struct {
// this is valid because `value` is not exportable,
// so value is only unmarshalled into `Value1`.
value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithSameFieldAndTag struct {
Value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithDuplicatedTag struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"value"`
}
type BadEventTransferWithEmptyTag struct {
Value *big.Int `abi:""`
}
type EventPledge struct {
Who common.Address
Wad *big.Int
Currency [3]byte
}
type BadEventPledge struct {
Who string
Wad int
Currency [3]byte
}
type EventMixedCase struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"_value"`
Value3 *big.Int `abi:"Value"`
}
bigint := new(big.Int)
bigintExpected := big.NewInt(1000000)
bigintExpected2 := big.NewInt(2218516807680)
bigintExpected3 := big.NewInt(1000001)
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
var testCases = []struct {
data string
dest interface{}
expected interface{}
jsonLog []byte
error string
name string
}{{
transferData1,
&EventTransfer{},
&EventTransfer{Value: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure",
}, {
transferData1,
&[]interface{}{&bigint},
&[]interface{}{&bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into slice",
}, {
transferData1,
&EventTransferWithTag{},
&EventTransferWithTag{Value1: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure with abi: tag",
}, {
transferData1,
&BadEventTransferWithDuplicatedTag{},
&BadEventTransferWithDuplicatedTag{},
jsonEventTransfer,
"struct: abi tag in 'Value2' already mapped",
"Can not unpack ERC20 Transfer event with duplicated abi tag",
}, {
transferData1,
&BadEventTransferWithSameFieldAndTag{},
&BadEventTransferWithSameFieldAndTag{},
jsonEventTransfer,
"abi: multiple variables maps to the same abi field 'value'",
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
}, {
transferData1,
&BadEventTransferWithEmptyTag{},
&BadEventTransferWithEmptyTag{},
jsonEventTransfer,
"struct: abi tag in 'Value' is empty",
"Can not unpack ERC20 Transfer event with an empty tag",
}, {
pledgeData1,
&EventPledge{},
&EventPledge{
addr,
bigintExpected2,
[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into structure",
}, {
pledgeData1,
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into slice",
}, {
pledgeData1,
&[3]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[3]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into an array",
}, {
pledgeData1,
&[]interface{}{new(int), 0, 0},
&[]interface{}{},
jsonEventPledge,
"abi: cannot unmarshal common.Address in to int",
"Can not unpack Pledge event into slice with wrong types",
}, {
pledgeData1,
&BadEventPledge{},
&BadEventPledge{},
jsonEventPledge,
"abi: cannot unmarshal common.Address in to string",
"Can not unpack Pledge event into struct with wrong filed types",
}, {
pledgeData1,
&[]interface{}{common.Address{}, new(big.Int)},
&[]interface{}{},
jsonEventPledge,
"abi: insufficient number of elements in the list/array for unpack, want 3, got 2",
"Can not unpack Pledge event into too short slice",
}, {
pledgeData1,
new(map[string]interface{}),
&[]interface{}{},
jsonEventPledge,
"abi: cannot unmarshal tuple into map[string]interface {}",
"Can not unpack Pledge event into map",
}, {
mixedCaseData1,
&EventMixedCase{},
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
jsonEventMixedCase,
"",
"Can unpack abi variables with mixed case",
}}
for _, tc := range testCases {
assert := assert.New(t)
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" {
assert.Nil(err, "Should be able to unpack event data.")
assert.Equal(tc.expected, tc.dest, tc.name)
} else {
assert.EqualError(err, tc.error, tc.name)
}
})
}
}
func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, assert *assert.Assertions) error {
data, err := hex.DecodeString(hexData)
assert.NoError(err, "Hex data should be a correct hex-string")
var e Event
assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI")
a := ABI{Events: map[string]Event{"e": e}}
return a.Unpack(dest, "e", data)
}
/*
Taken from
https://github.com/ethereum/go-ethereum/pull/15568
*/
type testResult struct {
Values [2]*big.Int
Value1 *big.Int
Value2 *big.Int
}
type testCase struct {
definition string
want testResult
}
func (tc testCase) encoded(intType, arrayType Type) []byte {
var b bytes.Buffer
if tc.want.Value1 != nil {
val, _ := intType.pack(reflect.ValueOf(tc.want.Value1))
b.Write(val)
}
if !reflect.DeepEqual(tc.want.Values, [2]*big.Int{nil, nil}) {
val, _ := arrayType.pack(reflect.ValueOf(tc.want.Values))
b.Write(val)
}
if tc.want.Value2 != nil
|
{
val, _ := intType.pack(reflect.ValueOf(tc.want.Value2))
b.Write(val)
}
|
conditional_block
|
|
event_test.go
|
, "name": "Value", "type": "uint256"
}],
"name": "MixedCase",
"type": "event"
}`)
// 1000000
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
// 1000000,2218516807680,1000001
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
func TestEventId(t *testing.T) {
var table = []struct {
definition string
expectations map[string]common.Hash
}{
{
definition: `[
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
},
},
}
for _, test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
for name, event := range abi.Events {
if event.Id() != test.expectations[name] {
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.Id())
}
}
}
}
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
type testStruct struct {
Value1 [2]uint8
Value2 uint8
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
var i uint8 = 1
for ; i <= 3; i++ {
b.Write(packNum(reflect.ValueOf(i)))
}
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
require.Equal(t, uint8(3), rst.Value2)
}
func TestEventTupleUnpack(t *testing.T)
|
Value2 *big.Int `abi:"value"`
}
type BadEventTransferWithEmptyTag struct {
Value *big.Int `abi:""`
}
type EventPledge struct {
Who common.Address
Wad *big.Int
Currency [3]byte
}
type BadEventPledge struct {
Who string
Wad int
Currency [3]byte
}
type EventMixedCase struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"_value"`
Value3 *big.Int `abi:"Value"`
}
bigint := new(big.Int)
bigintExpected := big.NewInt(1000000)
bigintExpected2 := big.NewInt(2218516807680)
bigintExpected3 := big.NewInt(1000001)
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
var testCases = []struct {
data string
dest interface{}
expected interface{}
jsonLog []byte
error string
name string
}{{
transferData1,
&EventTransfer{},
&EventTransfer{Value: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure",
}, {
transferData1,
&[]interface{}{&bigint},
&[]interface{}{&bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into slice",
}, {
transferData1,
&EventTransferWithTag{},
&EventTransferWithTag{Value1: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure with abi: tag",
}, {
transferData1,
&BadEventTransferWithDuplicatedTag{},
&BadEventTransferWithDuplicatedTag{},
jsonEventTransfer,
"struct: abi tag in 'Value2' already mapped",
"Can not unpack ERC20 Transfer event with duplicated abi tag",
}, {
transferData1,
&BadEventTransferWithSameFieldAndTag{},
&BadEventTransferWithSameFieldAndTag{},
jsonEventTransfer,
"abi: multiple variables maps to the same abi field 'value'",
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
}, {
transferData1,
&BadEventTransferWithEmptyTag{},
&BadEventTransferWithEmptyTag{},
jsonEventTransfer,
"struct: abi tag in 'Value' is empty",
"Can not unpack ERC20 Transfer event with an empty tag",
}, {
pledgeData1,
&EventPledge{},
&EventPledge{
addr,
bigintExpected2,
[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into structure",
}, {
pledgeData1,
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into slice",
}, {
|
{
type EventTransfer struct {
Value *big.Int
}
type EventTransferWithTag struct {
// this is valid because `value` is not exportable,
// so value is only unmarshalled into `Value1`.
value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithSameFieldAndTag struct {
Value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithDuplicatedTag struct {
Value1 *big.Int `abi:"value"`
|
identifier_body
|
event_test.go
|
type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
},
},
}
for _, test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
for name, event := range abi.Events {
if event.Id() != test.expectations[name] {
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.Id())
}
}
}
}
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
type testStruct struct {
Value1 [2]uint8
Value2 uint8
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
var i uint8 = 1
for ; i <= 3; i++ {
b.Write(packNum(reflect.ValueOf(i)))
}
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
require.Equal(t, uint8(3), rst.Value2)
}
func TestEventTupleUnpack(t *testing.T) {
type EventTransfer struct {
Value *big.Int
}
type EventTransferWithTag struct {
// this is valid because `value` is not exportable,
// so value is only unmarshalled into `Value1`.
value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithSameFieldAndTag struct {
Value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithDuplicatedTag struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"value"`
}
type BadEventTransferWithEmptyTag struct {
Value *big.Int `abi:""`
}
type EventPledge struct {
Who common.Address
Wad *big.Int
Currency [3]byte
}
type BadEventPledge struct {
Who string
Wad int
Currency [3]byte
}
type EventMixedCase struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"_value"`
Value3 *big.Int `abi:"Value"`
}
bigint := new(big.Int)
bigintExpected := big.NewInt(1000000)
bigintExpected2 := big.NewInt(2218516807680)
bigintExpected3 := big.NewInt(1000001)
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
var testCases = []struct {
data string
dest interface{}
expected interface{}
jsonLog []byte
error string
name string
}{{
transferData1,
&EventTransfer{},
&EventTransfer{Value: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure",
}, {
transferData1,
&[]interface{}{&bigint},
&[]interface{}{&bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into slice",
}, {
transferData1,
&EventTransferWithTag{},
&EventTransferWithTag{Value1: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure with abi: tag",
}, {
transferData1,
&BadEventTransferWithDuplicatedTag{},
&BadEventTransferWithDuplicatedTag{},
jsonEventTransfer,
"struct: abi tag in 'Value2' already mapped",
"Can not unpack ERC20 Transfer event with duplicated abi tag",
}, {
transferData1,
&BadEventTransferWithSameFieldAndTag{},
&BadEventTransferWithSameFieldAndTag{},
jsonEventTransfer,
"abi: multiple variables maps to the same abi field 'value'",
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
}, {
transferData1,
&BadEventTransferWithEmptyTag{},
&BadEventTransferWithEmptyTag{},
jsonEventTransfer,
"struct: abi tag in 'Value' is empty",
"Can not unpack ERC20 Transfer event with an empty tag",
}, {
pledgeData1,
&EventPledge{},
&EventPledge{
addr,
bigintExpected2,
[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into structure",
}, {
pledgeData1,
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into slice",
}, {
pledgeData1,
&[3]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[3]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into an array",
}, {
pledgeData1,
&[]interface{}{new(int), 0, 0},
&[]interface{}{},
jsonEventPledge,
"abi: cannot unmarshal common.Address in to int",
"Can not unpack Pledge event into slice with wrong types",
}, {
pledgeData1,
&BadEventPledge{},
&BadEventPledge{},
jsonEventPledge,
"abi: cannot unmarshal common.Address in to string",
"Can not unpack Pledge event into struct with wrong filed types",
}, {
pledgeData1,
&[]interface{}{common.Address{}, new(big.Int)},
&[]interface{}{},
jsonEventPledge,
"abi: insufficient number of elements in the list/array for unpack, want 3, got 2",
"Can not unpack Pledge event into too short slice",
}, {
pledgeData1,
new(map[string]interface{}),
&[]interface{}{},
jsonEventPledge,
"abi: cannot unmarshal tuple into map[string]interface {}",
"Can not unpack Pledge event into map",
}, {
mixedCaseData1,
&EventMixedCase{},
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
jsonEventMixedCase,
"",
"Can unpack abi variables with mixed case",
}}
for _, tc := range testCases {
assert := assert.New(t)
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" {
assert.Nil(err, "Should be able to unpack event data.")
assert.Equal(tc.expected, tc.dest, tc.name)
} else {
assert.EqualError(err, tc.error, tc.name)
}
})
}
}
func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, assert *assert.Assertions) error {
data, err := hex.DecodeString(hexData)
assert.NoError(err, "Hex data should be a correct hex-string")
var e Event
assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI")
a := ABI{Events: map[string]Event{"e": e}}
return a.Unpack(dest, "e", data)
}
/*
Taken from
https://github.com/ethereum/go-ethereum/pull/15568
*/
type testResult struct {
Values [2]*big.Int
Value1 *big.Int
Value2 *big.Int
}
type testCase struct {
definition string
want testResult
}
func (tc testCase)
|
encoded
|
identifier_name
|
|
PolicyEditForm.js
|
= forwardRef((props, ref) => {
useImperativeHandle(ref, () => ({
form,
handleSubmit
}));
// 判断策略名称是否存在
const asyncValidatePolicyNameRepeat = (rule, value, callback) => {
debouncedCheckPolicyName(() => {
props.actions.existPolicyName({ username: value }).then(({ data }) => {
if (data.isExist) {
callback("该政策名称已存在,请重新输入")
} else {
callback()
}
}).catch(() => {
callback("请求错误, 校验失败")
})
})
}
// 获取政策下的全部账号
const getExcludeIds = (list = []) => {
const {
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let result = specialAccountRules.reduce((result, item) => {
return result.concat(item.accountList)
}, [])
result = result.concat(whiteList.accountList, list)
return {
ids: result.map(item => item.accountId),
items: result
}
}
// 判断政策下是否包含返点
const hasRebate = (values) => {
const {
specialAccountRules,
globalAccountRule
} = values
let index = [ ...specialAccountRules, globalAccountRule ].findIndex((rule) => {
return rule.rebateRule?.rebateType
})
return index > -1
}
// 处理表单数据
const handleValues = (values) => {
let newValue = { ...values }
const { globalAccountRule, specialAccountRules } = values
// 处理全局折扣 阶梯设置
if (globalAccountRule.rebateRule?.rebateType === RULE_REBATE_LADDER) {
const { rebateNumbers = [], percentage = [] } = globalAccountRule.rebateRule.rebateStepRules;
const _rebateStepRules = [];
for (let index = 0; index < rebateNumbers.length - 1; index++) {
_rebateStepRules.push({
amountLowLimit: rebateNumbers[index],
amountHighLimit: rebateNumbers[index + 1],
rebateRatio: percentage[index]
})
}
if (_rebateStepRules.length > 0) {
newValue.globalAccountRule.rebateRule.rebateStepRules = _rebateStepRules;
}
}
// 处理 accountList -> accountIds
newValue.specialAccountRules = specialAccountRules.map((rule, index) => {
let newRule = { ...rule }
newRule.ruleId = index + 1
newRule.accountIds = newRule.accountList.map(item => item.accountId)
delete newRule.uuid
delete newRule.accountList
return newRule
})
newValue.whiteList.accountIds = newValue.whiteList.accountList.map(item => item.accountId)
delete newValue.whiteList.accountList
// 处理时间
newValue.validStartTime = values.policyTime[0].format('YYYY-MM-DD 00:00:00');
newValue.validEndTime = values.policyTime[1].format('YYYY-MM-DD 23:59:59');
delete newValue.policyTime
newValue.isGuaranteed = newValue.isGuaranteed ? 1 : 2
newValue.mcnId = mcnId
newValue.id = data.id
return newValue
}
// 提交前置操作
const handleSubmit = (submit) => {
props.form.validateFields((err, values) => {
if (err) return;
if (!values.globalAccountRule.discountRule && !values.globalAccountRule.rebateRule) {
message.warn('折扣与返点至少填一项')
return
}
/*
if (hasRebate(values) && !values.rebateSettlementCycle) {
message.warn('当前全局或特殊规则中设置了返点, 请添加返点规则')
return
}
*/
const body = handleValues(values)
submit(body)
})
}
// 删除平台触发删除关联账号
const onDeselectPlatform = (key, clear, confirm) => {
function hasDeleted() {
const list = getExcludeIds().items
if (list.length === 0) return
return clear || list.find(account => account.platformId === key)
}
// 没有关联账号
if (!hasDeleted()) {
confirm()
return
}
Modal.confirm
|
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let r1 = specialAccountRules.map(rule => {
let newRule = { ...rule }
newRule.accountList = clear ? [] : newRule.accountList.filter(item => item.platformId !== key)
return newRule
})
let r2 = clear ? [] : whiteList.accountList.filter(item => item.platformId !== key)
setFieldsValue({
'specialAccountRules': r1,
'whiteList.accountList': r2
})
}
})
}
const { form, data } = props;
const { globalAccountRule = {}, mcnId, specialAccountRules = [], whiteList = {} } = data
const { getFieldDecorator, getFieldValue, setFieldsValue } = form;
return (
<Form {...formItemLayout} className="policy-manage-details-container-scroll" id="scroll-box">
<ConfigProvider getPopupContainer={() => document.getElementById('scroll-box')}>
<FormItem label='主账号名称'>
{data.identityName || '-'}
</FormItem>
<FormItem label='主账号ID'>
{data.mcnId}
</FormItem>
<FormItem label='政策名称'>
{getFieldDecorator('policyName', {
initialValue: data.policyName,
rules: [
{ required: true, message: '请填写政策名称' },
{
pattern: /^.{1,30}$/,
message: '政策名称最多可输入30个字'
}
// { validator: asyncValidatePolicyNameRepeat }
]
})(
<Input placeholder='请输入' style={{ width: 330 }} />
)}
</FormItem>
<FormItem label='是否有合同'>
{getFieldDecorator('hasContract', {
initialValue: data.hasContract,
rules: [
{ required: true, message: '请选择是否有合同' },
]
})(
<Radio.Group>
<Radio value={1}>是</Radio>
<Radio value={2}>否</Radio>
</Radio.Group>
)}
</FormItem>
<FormItem label="政策有效期">
{getFieldDecorator('policyTime', {
initialValue: data.validStartTime ? [ moment(data.validStartTime), moment(data.validEndTime) ] : [],
rules: [
{ type: 'array', required: true, message: '请添加政策有效期' },
{
validator: (rule, value, callback) => {
const [ start, end ] = value
const min = moment(start).add(30, 'd')
if (end < min) {
return callback(`结束日期与开始日期的间隔至少30天`)
}
callback()
}
}
]
})(
<RangePicker />
)}
</FormItem>
<FormItem label="政策级别" {...formItemLayout}>
{getFieldDecorator('policyLevel', {
initialValue: data.policyLevel,
rules: [ { required: true, message: '该项为必填项,请选择!' } ]
})(
<RadioGroup>
{
Object.entries(POLICY_LEVEL).map(([ key, item ]) =>
<Radio key={key} value={parseInt(key)}>
<IconFont type={item.icon} /> {item.text}
</Radio>)
}
</RadioGroup>
)}
</FormItem>
<FormItem label='平台'>
{getFieldDecorator(`globalAccountRule.platformIds`, {
initialValue: (globalAccountRule.platformList || []).map(item => item.platformId),
rules: [
{ required: true, message: '请选择平台' }
]
})(
<SelectAllCheck
action={props.actions.getGlobalRulePlatforms}
options={props.allPlatforms}
onDeselect={onDeselectPlatform}
/>
)}
</FormItem>
<FormItem label='设置全局规则' required>
<Icon style={{ color: "#faad14" }} type="exclamation-circle" theme="filled" /> 折扣与返点至少填一项
<DiscountEdit form={props.form} rule={globalAccountRule.discountRule} fieldKeyPrefix="globalAccountRule." />
<RebateEdit form={props.form} rule={globalAccountRule.rebateRule} fieldKeyPrefix="globalAccountRule." />
</FormItem>
<FormItem label='特殊账号'>
{getFieldDecorator(`specialAccountRules`, {
initialValue: specialAccountRules
})(
<SpecialRuleEdit
actions={props.actions}
getExcludeIds={getExcludeIds}
platforms={getFieldValue('globalAccountRule.platformIds')}
params={{
mcn
|
({
title: "特殊账号或白名单中设置了该平台的账号,若删除该平台,账号将一起删除,是否确认此操作?",
onOk: () => {
// 删除平台
confirm()
// 删除账号
const {
|
identifier_body
|
PolicyEditForm.js
|
forwardRef((props, ref) => {
useImperativeHandle(ref, () => ({
form,
handleSubmit
}));
// 判断策略名称是否存在
const asyncValidatePolicyNameRepeat = (rule, value, callback) => {
debouncedCheckPolicyName(() => {
props.actions.existPolicyName({ username: value }).then(({ data }) => {
if (data.isExist) {
callback("该政策名称已存在,请重新输入")
} else {
callback()
}
}).catch(() => {
callback("请求错误, 校验失败")
})
})
}
// 获取政策下的全部账号
const getExcludeIds = (list = []) => {
const {
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let result = specialAccountRules.reduce((result, item) => {
return result.concat(item.accountList)
}, [])
result = result.concat(whiteList.accountList, list)
return {
ids: result.map(item => item.accountId),
items: result
}
}
// 判断政策下是否包含返点
const hasRebate = (values) => {
const {
specialAccountRules,
globalAccountRule
} = values
let index = [ ...specialAccountRules, globalAccountRule ].findIndex((rule) => {
return rule.rebateRule?.rebateType
})
return index > -1
}
// 处理表单数据
const handleValues = (values) => {
let newValue = { ...values }
const { globalAccountRule, specialAccountRules } = values
// 处理全局折扣 阶梯设置
if (globalAccountRule.rebateRule?.rebateType === RULE_REBATE_LADDER) {
const { rebateNumbers = [], percentage = [] } = globalAccountRule.rebateRule.rebateStepRules;
const _rebateStepRules = [];
for (let index = 0; index < rebateNumbers.length - 1; index++) {
_rebateStepRules.push({
amountLowLimit: rebateNumbers[index],
amountHighLimit: rebateNumbers[index + 1],
|
// 处理 accountList -> accountIds
newValue.specialAccountRules = specialAccountRules.map((rule, index) => {
let newRule = { ...rule }
newRule.ruleId = index + 1
newRule.accountIds = newRule.accountList.map(item => item.accountId)
delete newRule.uuid
delete newRule.accountList
return newRule
})
newValue.whiteList.accountIds = newValue.whiteList.accountList.map(item => item.accountId)
delete newValue.whiteList.accountList
// 处理时间
newValue.validStartTime = values.policyTime[0].format('YYYY-MM-DD 00:00:00');
newValue.validEndTime = values.policyTime[1].format('YYYY-MM-DD 23:59:59');
delete newValue.policyTime
newValue.isGuaranteed = newValue.isGuaranteed ? 1 : 2
newValue.mcnId = mcnId
newValue.id = data.id
return newValue
}
// 提交前置操作
const handleSubmit = (submit) => {
props.form.validateFields((err, values) => {
if (err) return;
if (!values.globalAccountRule.discountRule && !values.globalAccountRule.rebateRule) {
message.warn('折扣与返点至少填一项')
return
}
/*
if (hasRebate(values) && !values.rebateSettlementCycle) {
message.warn('当前全局或特殊规则中设置了返点, 请添加返点规则')
return
}
*/
const body = handleValues(values)
submit(body)
})
}
// 删除平台触发删除关联账号
const onDeselectPlatform = (key, clear, confirm) => {
function hasDeleted() {
const list = getExcludeIds().items
if (list.length === 0) return
return clear || list.find(account => account.platformId === key)
}
// 没有关联账号
if (!hasDeleted()) {
confirm()
return
}
Modal.confirm({
title: "特殊账号或白名单中设置了该平台的账号,若删除该平台,账号将一起删除,是否确认此操作?",
onOk: () => {
// 删除平台
confirm()
// 删除账号
const {
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let r1 = specialAccountRules.map(rule => {
let newRule = { ...rule }
newRule.accountList = clear ? [] : newRule.accountList.filter(item => item.platformId !== key)
return newRule
})
let r2 = clear ? [] : whiteList.accountList.filter(item => item.platformId !== key)
setFieldsValue({
'specialAccountRules': r1,
'whiteList.accountList': r2
})
}
})
}
const { form, data } = props;
const { globalAccountRule = {}, mcnId, specialAccountRules = [], whiteList = {} } = data
const { getFieldDecorator, getFieldValue, setFieldsValue } = form;
return (
<Form {...formItemLayout} className="policy-manage-details-container-scroll" id="scroll-box">
<ConfigProvider getPopupContainer={() => document.getElementById('scroll-box')}>
<FormItem label='主账号名称'>
{data.identityName || '-'}
</FormItem>
<FormItem label='主账号ID'>
{data.mcnId}
</FormItem>
<FormItem label='政策名称'>
{getFieldDecorator('policyName', {
initialValue: data.policyName,
rules: [
{ required: true, message: '请填写政策名称' },
{
pattern: /^.{1,30}$/,
message: '政策名称最多可输入30个字'
}
// { validator: asyncValidatePolicyNameRepeat }
]
})(
<Input placeholder='请输入' style={{ width: 330 }} />
)}
</FormItem>
<FormItem label='是否有合同'>
{getFieldDecorator('hasContract', {
initialValue: data.hasContract,
rules: [
{ required: true, message: '请选择是否有合同' },
]
})(
<Radio.Group>
<Radio value={1}>是</Radio>
<Radio value={2}>否</Radio>
</Radio.Group>
)}
</FormItem>
<FormItem label="政策有效期">
{getFieldDecorator('policyTime', {
initialValue: data.validStartTime ? [ moment(data.validStartTime), moment(data.validEndTime) ] : [],
rules: [
{ type: 'array', required: true, message: '请添加政策有效期' },
{
validator: (rule, value, callback) => {
const [ start, end ] = value
const min = moment(start).add(30, 'd')
if (end < min) {
return callback(`结束日期与开始日期的间隔至少30天`)
}
callback()
}
}
]
})(
<RangePicker />
)}
</FormItem>
<FormItem label="政策级别" {...formItemLayout}>
{getFieldDecorator('policyLevel', {
initialValue: data.policyLevel,
rules: [ { required: true, message: '该项为必填项,请选择!' } ]
})(
<RadioGroup>
{
Object.entries(POLICY_LEVEL).map(([ key, item ]) =>
<Radio key={key} value={parseInt(key)}>
<IconFont type={item.icon} /> {item.text}
</Radio>)
}
</RadioGroup>
)}
</FormItem>
<FormItem label='平台'>
{getFieldDecorator(`globalAccountRule.platformIds`, {
initialValue: (globalAccountRule.platformList || []).map(item => item.platformId),
rules: [
{ required: true, message: '请选择平台' }
]
})(
<SelectAllCheck
action={props.actions.getGlobalRulePlatforms}
options={props.allPlatforms}
onDeselect={onDeselectPlatform}
/>
)}
</FormItem>
<FormItem label='设置全局规则' required>
<Icon style={{ color: "#faad14" }} type="exclamation-circle" theme="filled" /> 折扣与返点至少填一项
<DiscountEdit form={props.form} rule={globalAccountRule.discountRule} fieldKeyPrefix="globalAccountRule." />
<RebateEdit form={props.form} rule={globalAccountRule.rebateRule} fieldKeyPrefix="globalAccountRule." />
</FormItem>
<FormItem label='特殊账号'>
{getFieldDecorator(`specialAccountRules`, {
initialValue: specialAccountRules
})(
<SpecialRuleEdit
actions={props.actions}
getExcludeIds={getExcludeIds}
platforms={getFieldValue('globalAccountRule.platformIds')}
params={{
mcn
|
rebateRatio: percentage[index]
})
}
if (_rebateStepRules.length > 0) {
newValue.globalAccountRule.rebateRule.rebateStepRules = _rebateStepRules;
}
}
|
conditional_block
|
PolicyEditForm.js
|
= forwardRef((props, ref) => {
useImperativeHandle(ref, () => ({
form,
handleSubmit
}));
// 判断策略名称是否存在
const asyncValidatePolicyNameRepeat = (rule, value, callback) => {
debouncedCheckPolicyName(() => {
props.actions.existPolicyName({ username: value }).then(({ data }) => {
if (data.isExist) {
callback("该政策名称已存在,请重新输入")
} else {
callback()
}
}).catch(() => {
callback("请求错误, 校验失败")
})
})
}
// 获取政策下的全部账号
const getExcludeIds = (list = []) => {
const {
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let result = specialAccountRules.reduce((result, item) => {
return result.concat(item.accountList)
}, [])
result = result.concat(whiteList.accountList, list)
return {
ids: result.map(item => item.accountId),
items: result
}
}
// 判断政策下是否包含返点
const hasRebate = (values) => {
const {
specialAccountRules,
globalAccountRule
} = values
let index = [ ...specialAccountRules, globalAccountRule ].findIndex((rule) => {
return rule.rebateRule?.rebateType
})
return index > -1
}
// 处理表单数据
const handleValues = (values) => {
let newValue = { ...values }
const { globalAccountRule, specialAccountRules } = values
// 处理全局折扣 阶梯设置
if (globalAccountRule.rebateRule?.rebateType === RULE_REBATE_LADDER) {
const { rebateNumbers = [], percentage = [] } = globalAccountRule.rebateRule.rebateStepRules;
const _rebateStepRules = [];
for (let index = 0; index < rebateNumbers.length - 1; index++) {
_rebateStepRules.push({
amountLowLimit: rebateNumbers[index],
amountHighLimit: rebateNumbers[index + 1],
rebateRatio: percentage[index]
})
}
if (_rebateStepRules.length > 0) {
newValue.globalAccountRule.rebateRule.rebateStepRules = _rebateStepRules;
}
}
// 处理 accountList -> accountIds
newValue.specialAccountRules = specialAccountRules.map((rule, index) => {
let newRule = { ...rule }
newRule.ruleId = index + 1
newRule.accountIds = newRule.accountList.map(item => item.accountId)
delete newRule.uuid
delete newRule.accountList
return newRule
})
newValue.whiteList.accountIds = newValue.whiteList.accountList.map(item => item.accountId)
delete newValue.whiteList.accountList
// 处理时间
newValue.validStartTime = values.policyTime[0].format('YYYY-MM-DD 00:00:00');
newValue.validEndTime = values.policyTime[1].format('YYYY-MM-DD 23:59:59');
delete newValue.policyTime
newValue.isGuaranteed = newValue.isGuaranteed ? 1 : 2
newValue.mcnId = mcnId
newValue.id = data.id
return newValue
}
// 提交前置操作
const handleSubmit = (submit) => {
props.form.validateFields((err, values) => {
if (err) return;
if (!values.globalAccountRule.discountRule && !values.globalAccountRule.rebateRule) {
message.warn('折扣与返点至少填一项')
return
}
/*
if (hasRebate(values) && !values.rebateSettlementCycle) {
message.warn('当前全局或特殊规则中设置了返点, 请添加返点规则')
return
}
*/
const body = handleValues(values)
submit(body)
})
}
// 删除平台触发删除关联账号
const onDeselectPlatform = (key, clear, confirm) => {
function hasDeleted() {
const list = getExcludeIds().items
if (list.length === 0) return
return clear || list.find(account => account.platformId === key)
}
// 没有关联账号
if (!hasDeleted()) {
confirm()
return
}
|
irm({
title: "特殊账号或白名单中设置了该平台的账号,若删除该平台,账号将一起删除,是否确认此操作?",
onOk: () => {
// 删除平台
confirm()
// 删除账号
const {
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let r1 = specialAccountRules.map(rule => {
let newRule = { ...rule }
newRule.accountList = clear ? [] : newRule.accountList.filter(item => item.platformId !== key)
return newRule
})
let r2 = clear ? [] : whiteList.accountList.filter(item => item.platformId !== key)
setFieldsValue({
'specialAccountRules': r1,
'whiteList.accountList': r2
})
}
})
}
const { form, data } = props;
const { globalAccountRule = {}, mcnId, specialAccountRules = [], whiteList = {} } = data
const { getFieldDecorator, getFieldValue, setFieldsValue } = form;
return (
<Form {...formItemLayout} className="policy-manage-details-container-scroll" id="scroll-box">
<ConfigProvider getPopupContainer={() => document.getElementById('scroll-box')}>
<FormItem label='主账号名称'>
{data.identityName || '-'}
</FormItem>
<FormItem label='主账号ID'>
{data.mcnId}
</FormItem>
<FormItem label='政策名称'>
{getFieldDecorator('policyName', {
initialValue: data.policyName,
rules: [
{ required: true, message: '请填写政策名称' },
{
pattern: /^.{1,30}$/,
message: '政策名称最多可输入30个字'
}
// { validator: asyncValidatePolicyNameRepeat }
]
})(
<Input placeholder='请输入' style={{ width: 330 }} />
)}
</FormItem>
<FormItem label='是否有合同'>
{getFieldDecorator('hasContract', {
initialValue: data.hasContract,
rules: [
{ required: true, message: '请选择是否有合同' },
]
})(
<Radio.Group>
<Radio value={1}>是</Radio>
<Radio value={2}>否</Radio>
</Radio.Group>
)}
</FormItem>
<FormItem label="政策有效期">
{getFieldDecorator('policyTime', {
initialValue: data.validStartTime ? [ moment(data.validStartTime), moment(data.validEndTime) ] : [],
rules: [
{ type: 'array', required: true, message: '请添加政策有效期' },
{
validator: (rule, value, callback) => {
const [ start, end ] = value
const min = moment(start).add(30, 'd')
if (end < min) {
return callback(`结束日期与开始日期的间隔至少30天`)
}
callback()
}
}
]
})(
<RangePicker />
)}
</FormItem>
<FormItem label="政策级别" {...formItemLayout}>
{getFieldDecorator('policyLevel', {
initialValue: data.policyLevel,
rules: [ { required: true, message: '该项为必填项,请选择!' } ]
})(
<RadioGroup>
{
Object.entries(POLICY_LEVEL).map(([ key, item ]) =>
<Radio key={key} value={parseInt(key)}>
<IconFont type={item.icon} /> {item.text}
</Radio>)
}
</RadioGroup>
)}
</FormItem>
<FormItem label='平台'>
{getFieldDecorator(`globalAccountRule.platformIds`, {
initialValue: (globalAccountRule.platformList || []).map(item => item.platformId),
rules: [
{ required: true, message: '请选择平台' }
]
})(
<SelectAllCheck
action={props.actions.getGlobalRulePlatforms}
options={props.allPlatforms}
onDeselect={onDeselectPlatform}
/>
)}
</FormItem>
<FormItem label='设置全局规则' required>
<Icon style={{ color: "#faad14" }} type="exclamation-circle" theme="filled" /> 折扣与返点至少填一项
<DiscountEdit form={props.form} rule={globalAccountRule.discountRule} fieldKeyPrefix="globalAccountRule." />
<RebateEdit form={props.form} rule={globalAccountRule.rebateRule} fieldKeyPrefix="globalAccountRule." />
</FormItem>
<FormItem label='特殊账号'>
{getFieldDecorator(`specialAccountRules`, {
initialValue: specialAccountRules
})(
<SpecialRuleEdit
actions={props.actions}
getExcludeIds={getExcludeIds}
platforms={getFieldValue('globalAccountRule.platformIds')}
params={{
mcn
|
Modal.conf
|
identifier_name
|
PolicyEditForm.js
|
AccountRules.reduce((result, item) => {
return result.concat(item.accountList)
}, [])
result = result.concat(whiteList.accountList, list)
return {
ids: result.map(item => item.accountId),
items: result
}
}
// 判断政策下是否包含返点
const hasRebate = (values) => {
const {
specialAccountRules,
globalAccountRule
} = values
let index = [ ...specialAccountRules, globalAccountRule ].findIndex((rule) => {
return rule.rebateRule?.rebateType
})
return index > -1
}
// 处理表单数据
const handleValues = (values) => {
let newValue = { ...values }
const { globalAccountRule, specialAccountRules } = values
// 处理全局折扣 阶梯设置
if (globalAccountRule.rebateRule?.rebateType === RULE_REBATE_LADDER) {
const { rebateNumbers = [], percentage = [] } = globalAccountRule.rebateRule.rebateStepRules;
const _rebateStepRules = [];
for (let index = 0; index < rebateNumbers.length - 1; index++) {
_rebateStepRules.push({
amountLowLimit: rebateNumbers[index],
amountHighLimit: rebateNumbers[index + 1],
rebateRatio: percentage[index]
})
}
if (_rebateStepRules.length > 0) {
newValue.globalAccountRule.rebateRule.rebateStepRules = _rebateStepRules;
}
}
// 处理 accountList -> accountIds
newValue.specialAccountRules = specialAccountRules.map((rule, index) => {
let newRule = { ...rule }
newRule.ruleId = index + 1
newRule.accountIds = newRule.accountList.map(item => item.accountId)
delete newRule.uuid
delete newRule.accountList
return newRule
})
newValue.whiteList.accountIds = newValue.whiteList.accountList.map(item => item.accountId)
delete newValue.whiteList.accountList
// 处理时间
newValue.validStartTime = values.policyTime[0].format('YYYY-MM-DD 00:00:00');
newValue.validEndTime = values.policyTime[1].format('YYYY-MM-DD 23:59:59');
delete newValue.policyTime
newValue.isGuaranteed = newValue.isGuaranteed ? 1 : 2
newValue.mcnId = mcnId
newValue.id = data.id
return newValue
}
// 提交前置操作
const handleSubmit = (submit) => {
props.form.validateFields((err, values) => {
if (err) return;
if (!values.globalAccountRule.discountRule && !values.globalAccountRule.rebateRule) {
message.warn('折扣与返点至少填一项')
return
}
/*
if (hasRebate(values) && !values.rebateSettlementCycle) {
message.warn('当前全局或特殊规则中设置了返点, 请添加返点规则')
return
}
*/
const body = handleValues(values)
submit(body)
})
}
// 删除平台触发删除关联账号
const onDeselectPlatform = (key, clear, confirm) => {
function hasDeleted() {
const list = getExcludeIds().items
if (list.length === 0) return
return clear || list.find(account => account.platformId === key)
}
// 没有关联账号
if (!hasDeleted()) {
confirm()
return
}
Modal.confirm({
title: "特殊账号或白名单中设置了该平台的账号,若删除该平台,账号将一起删除,是否确认此操作?",
onOk: () => {
// 删除平台
confirm()
// 删除账号
const {
specialAccountRules,
whiteList
} = props.form.getFieldsValue([ 'specialAccountRules', 'whiteList.accountList' ])
let r1 = specialAccountRules.map(rule => {
let newRule = { ...rule }
newRule.accountList = clear ? [] : newRule.accountList.filter(item => item.platformId !== key)
return newRule
})
let r2 = clear ? [] : whiteList.accountList.filter(item => item.platformId !== key)
setFieldsValue({
'specialAccountRules': r1,
'whiteList.accountList': r2
})
}
})
}
const { form, data } = props;
const { globalAccountRule = {}, mcnId, specialAccountRules = [], whiteList = {} } = data
const { getFieldDecorator, getFieldValue, setFieldsValue } = form;
return (
<Form {...formItemLayout} className="policy-manage-details-container-scroll" id="scroll-box">
<ConfigProvider getPopupContainer={() => document.getElementById('scroll-box')}>
<FormItem label='主账号名称'>
{data.identityName || '-'}
</FormItem>
<FormItem label='主账号ID'>
{data.mcnId}
</FormItem>
<FormItem label='政策名称'>
{getFieldDecorator('policyName', {
initialValue: data.policyName,
rules: [
{ required: true, message: '请填写政策名称' },
{
pattern: /^.{1,30}$/,
message: '政策名称最多可输入30个字'
}
// { validator: asyncValidatePolicyNameRepeat }
]
})(
<Input placeholder='请输入' style={{ width: 330 }} />
)}
</FormItem>
<FormItem label='是否有合同'>
{getFieldDecorator('hasContract', {
initialValue: data.hasContract,
rules: [
{ required: true, message: '请选择是否有合同' },
]
})(
<Radio.Group>
<Radio value={1}>是</Radio>
<Radio value={2}>否</Radio>
</Radio.Group>
)}
</FormItem>
<FormItem label="政策有效期">
{getFieldDecorator('policyTime', {
initialValue: data.validStartTime ? [ moment(data.validStartTime), moment(data.validEndTime) ] : [],
rules: [
{ type: 'array', required: true, message: '请添加政策有效期' },
{
validator: (rule, value, callback) => {
const [ start, end ] = value
const min = moment(start).add(30, 'd')
if (end < min) {
return callback(`结束日期与开始日期的间隔至少30天`)
}
callback()
}
}
]
})(
<RangePicker />
)}
</FormItem>
<FormItem label="政策级别" {...formItemLayout}>
{getFieldDecorator('policyLevel', {
initialValue: data.policyLevel,
rules: [ { required: true, message: '该项为必填项,请选择!' } ]
})(
<RadioGroup>
{
Object.entries(POLICY_LEVEL).map(([ key, item ]) =>
<Radio key={key} value={parseInt(key)}>
<IconFont type={item.icon} /> {item.text}
</Radio>)
}
</RadioGroup>
)}
</FormItem>
<FormItem label='平台'>
{getFieldDecorator(`globalAccountRule.platformIds`, {
initialValue: (globalAccountRule.platformList || []).map(item => item.platformId),
rules: [
{ required: true, message: '请选择平台' }
]
})(
<SelectAllCheck
action={props.actions.getGlobalRulePlatforms}
options={props.allPlatforms}
onDeselect={onDeselectPlatform}
/>
)}
</FormItem>
<FormItem label='设置全局规则' required>
<Icon style={{ color: "#faad14" }} type="exclamation-circle" theme="filled" /> 折扣与返点至少填一项
<DiscountEdit form={props.form} rule={globalAccountRule.discountRule} fieldKeyPrefix="globalAccountRule." />
<RebateEdit form={props.form} rule={globalAccountRule.rebateRule} fieldKeyPrefix="globalAccountRule." />
</FormItem>
<FormItem label='特殊账号'>
{getFieldDecorator(`specialAccountRules`, {
initialValue: specialAccountRules
})(
<SpecialRuleEdit
actions={props.actions}
getExcludeIds={getExcludeIds}
platforms={getFieldValue('globalAccountRule.platformIds')}
params={{
mcnId,
platformIds: getFieldValue('globalAccountRule.platformIds').join(','),
type: 1
}}
/>
)}
</FormItem>
<FormItem label='白名单账号'>
{getFieldDecorator(`whiteList.accountList`, {
initialValue: whiteList.accountList || [],
rules: [
{
type: "array",
max: WHITE_LIST_ACCOUNTS_LIMIT,
message: '最多可添加' + WHITE_LIST_ACCOUNTS_LIMIT + '个账号'
}
]
})(
<AccountListEdit
getAccountInfoByIds={props.actions.getAccountInfoByIds}
getExcludeIds={getExcludeIds}
params={{
mcnId,
platformIds: getFieldValue('globalAccountRule.platformIds').join(','),
type: 2
}}
style={{ marginRight: 20 }}
limit={WHITE_LIST_ACCOUNTS_LIMIT}
|
>
|
random_line_split
|
|
grid.rs
|
}
if child.0.row_end - child.0.row > 1 {
dim.row_spans += 1;
}
}
self.dim = dim;
}
/// Construct via a builder
pub fn build<F: FnOnce(GridBuilder<W>)>(f: F) -> Self {
let mut grid = Self::default();
let _ = grid.edit(f);
grid
}
/// Edit an existing grid via a builder
///
/// This may be used to edit children before window construction. It may
/// also be used from a running UI, but in this case a full reconfigure
/// of the window's widgets is required (triggered by the the return
/// value, [`Action::RECONFIGURE`]).
pub fn edit<F: FnOnce(GridBuilder<W>)>(&mut self, f: F) -> Action {
f(GridBuilder(&mut self.widgets));
self.calc_dim();
Action::RECONFIGURE
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns a reference to the child, if any
pub fn get(&self, index: usize) -> Option<&W> {
self.widgets.get(index).map(|t| &t.1)
}
/// Returns a mutable reference to the child, if any
pub fn get_mut(&mut self, index: usize) -> Option<&mut W> {
self.widgets.get_mut(index).map(|t| &mut t.1)
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &(GridChildInfo, W)> {
ListIter {
list: &self.widgets,
}
}
/// Mutably iterate over childern
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (GridChildInfo, W)> {
ListIterMut {
list: &mut self.widgets,
}
}
}
pub struct GridBuilder<'a, W: Widget>(&'a mut Vec<(GridChildInfo, W)>);
impl<'a, W: Widget> GridBuilder<'a, W> {
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.0.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.0.reserve(additional);
}
/// Remove all child widgets
pub fn clear(&mut self) {
self.0.clear();
}
/// Add a child widget
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push(&mut self, info: GridChildInfo, widget: W) {
self.0.push((info, widget));
}
/// Add a child widget to the given cell
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push_cell(&mut self, col: u32, row: u32, widget: W) {
let info = GridChildInfo::new(col, row);
self.push(info, widget);
}
/// Add a child widget to the given cell, builder style
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
#[must_use]
pub fn with_cell(self, col: u32, row: u32, widget: W) -> Self {
self.with_cell_span(col, row, 1, 1, widget)
}
/// Add a child widget to the given cell, with spans
///
/// Parameters `col_span` and `row_span` are the number of columns/rows
/// spanned and should each be at least 1.
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push_cell_span(&mut self, col: u32, row: u32, col_span: u32, row_span: u32, widget: W) {
let info = GridChildInfo {
col,
col_end: col + col_span,
row,
row_end: row + row_span,
};
self.push(info, widget);
}
/// Add a child widget to the given cell, with spans, builder style
///
/// Parameters `col_span` and `row_span` are the number of columns/rows
/// spanned and should each be at least 1.
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
#[must_use]
pub fn with_cell_span(
mut self,
col: u32,
row: u32,
col_span: u32,
row_span: u32,
widget: W,
) -> Self {
self.push_cell_span(col, row, col_span, row_span, widget);
self
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
pub fn pop(&mut self) -> Option<(GridChildInfo, W)> {
self.0.pop()
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
pub fn insert(&mut self, index: usize, info: GridChildInfo, widget: W) {
self.0.insert(index, (info, widget));
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
pub fn remove(&mut self, index: usize) -> (GridChildInfo, W) {
self.0.remove(index)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
pub fn replace(&mut self, index: usize, info: GridChildInfo, widget: W) -> (GridChildInfo, W) {
let mut item = (info, widget);
std::mem::swap(&mut item, &mut self.0[index]);
item
}
/// Append child widgets from an iterator
pub fn extend<T: IntoIterator<Item = (GridChildInfo, W)>>(&mut self, iter: T) {
self.0.extend(iter);
}
/// Resize, using the given closure to construct new widgets
pub fn resize_with<F: Fn(usize) -> (GridChildInfo, W)>(&mut self, len: usize, f: F) {
let l0 = self.0.len();
if l0 > len {
self.0.truncate(len);
} else if l0 < len {
self.0.reserve(len);
for i in l0..len {
self.0.push(f(i));
}
}
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
pub fn retain<F: FnMut(&(GridChildInfo, W)) -> bool>(&mut self, f: F) {
self.0.retain(f);
}
/// Get the first index of a child occupying the given cell, if any
pub fn find_child_cell(&self, col: u32, row: u32) -> Option<usize> {
for (i, (info, _)) in self.0.iter().enumerate() {
if info.col <= col && col < info.col_end && info.row <= row && row < info.row_end {
return Some(i);
}
}
None
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &(GridChildInfo, W)> {
ListIter { list: self.0 }
}
/// Mutably iterate over childern
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (GridChildInfo, W)> {
ListIterMut { list: self.0 }
}
}
impl<W: Widget> FromIterator<(GridChildInfo, W)> for Grid<W> {
#[inline]
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (GridChildInfo, W)>,
{
Self::new_vec(iter.into_iter().collect())
}
}
impl<W: Widget> Index<usize> for Grid<W> {
type Output = (GridChildInfo, W);
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<W: Widget> IndexMut<usize> for Grid<W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct
|
ListIter
|
identifier_name
|
|
grid.rs
|
-> Option<&W> {
self.widgets.get(index).map(|t| &t.1)
}
/// Returns a mutable reference to the child, if any
pub fn get_mut(&mut self, index: usize) -> Option<&mut W> {
self.widgets.get_mut(index).map(|t| &mut t.1)
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &(GridChildInfo, W)> {
ListIter {
list: &self.widgets,
}
}
/// Mutably iterate over childern
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (GridChildInfo, W)> {
ListIterMut {
list: &mut self.widgets,
}
}
}
pub struct GridBuilder<'a, W: Widget>(&'a mut Vec<(GridChildInfo, W)>);
impl<'a, W: Widget> GridBuilder<'a, W> {
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.0.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.0.reserve(additional);
}
/// Remove all child widgets
pub fn clear(&mut self) {
self.0.clear();
}
/// Add a child widget
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push(&mut self, info: GridChildInfo, widget: W) {
self.0.push((info, widget));
}
/// Add a child widget to the given cell
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push_cell(&mut self, col: u32, row: u32, widget: W) {
let info = GridChildInfo::new(col, row);
self.push(info, widget);
}
/// Add a child widget to the given cell, builder style
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
#[must_use]
pub fn with_cell(self, col: u32, row: u32, widget: W) -> Self {
self.with_cell_span(col, row, 1, 1, widget)
}
/// Add a child widget to the given cell, with spans
///
/// Parameters `col_span` and `row_span` are the number of columns/rows
/// spanned and should each be at least 1.
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push_cell_span(&mut self, col: u32, row: u32, col_span: u32, row_span: u32, widget: W) {
let info = GridChildInfo {
col,
col_end: col + col_span,
row,
row_end: row + row_span,
};
self.push(info, widget);
}
/// Add a child widget to the given cell, with spans, builder style
///
/// Parameters `col_span` and `row_span` are the number of columns/rows
/// spanned and should each be at least 1.
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
#[must_use]
pub fn with_cell_span(
mut self,
col: u32,
row: u32,
col_span: u32,
row_span: u32,
widget: W,
) -> Self {
self.push_cell_span(col, row, col_span, row_span, widget);
self
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
pub fn pop(&mut self) -> Option<(GridChildInfo, W)> {
self.0.pop()
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
pub fn insert(&mut self, index: usize, info: GridChildInfo, widget: W) {
self.0.insert(index, (info, widget));
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
pub fn remove(&mut self, index: usize) -> (GridChildInfo, W) {
self.0.remove(index)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
pub fn replace(&mut self, index: usize, info: GridChildInfo, widget: W) -> (GridChildInfo, W) {
let mut item = (info, widget);
std::mem::swap(&mut item, &mut self.0[index]);
item
}
/// Append child widgets from an iterator
pub fn extend<T: IntoIterator<Item = (GridChildInfo, W)>>(&mut self, iter: T) {
self.0.extend(iter);
}
/// Resize, using the given closure to construct new widgets
pub fn resize_with<F: Fn(usize) -> (GridChildInfo, W)>(&mut self, len: usize, f: F) {
let l0 = self.0.len();
if l0 > len {
self.0.truncate(len);
} else if l0 < len {
self.0.reserve(len);
for i in l0..len {
self.0.push(f(i));
}
}
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
pub fn retain<F: FnMut(&(GridChildInfo, W)) -> bool>(&mut self, f: F) {
self.0.retain(f);
}
/// Get the first index of a child occupying the given cell, if any
pub fn find_child_cell(&self, col: u32, row: u32) -> Option<usize> {
for (i, (info, _)) in self.0.iter().enumerate() {
if info.col <= col && col < info.col_end && info.row <= row && row < info.row_end {
return Some(i);
}
}
None
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &(GridChildInfo, W)> {
ListIter { list: self.0 }
}
/// Mutably iterate over childern
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (GridChildInfo, W)> {
ListIterMut { list: self.0 }
}
}
impl<W: Widget> FromIterator<(GridChildInfo, W)> for Grid<W> {
#[inline]
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (GridChildInfo, W)>,
{
Self::new_vec(iter.into_iter().collect())
}
}
impl<W: Widget> Index<usize> for Grid<W> {
type Output = (GridChildInfo, W);
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<W: Widget> IndexMut<usize> for Grid<W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct ListIter<'a, W: Widget> {
list: &'a [(GridChildInfo, W)],
}
impl<'a, W: Widget> Iterator for ListIter<'a, W> {
type Item = &'a (GridChildInfo, W);
fn next(&mut self) -> Option<Self::Item> {
if let Some((first, rest)) = self.list.split_first() {
self.list = rest;
Some(first)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<'a, W: Widget> ExactSizeIterator for ListIter<'a, W> {
fn len(&self) -> usize {
self.list.len()
}
}
struct ListIterMut<'a, W: Widget> {
list: &'a mut [(GridChildInfo, W)],
}
impl<'a, W: Widget> Iterator for ListIterMut<'a, W> {
type Item = &'a mut (GridChildInfo, W);
fn next(&mut self) -> Option<Self::Item> {
let list = std::mem::take(&mut self.list);
if let Some((first, rest)) = list.split_first_mut()
|
{
self.list = rest;
Some(first)
}
|
conditional_block
|
|
grid.rs
|
data: DynGridStorage,
dim: GridDimensions,
on_messages: Option<Box<dyn Fn(&mut EventCx, usize)>>,
}
impl Widget for Self {
type Data = W::Data;
fn for_child_node(
&mut self,
data: &W::Data,
index: usize,
closure: Box<dyn FnOnce(Node<'_>) + '_>,
) {
if let Some(w) = self.widgets.get_mut(index) {
closure(w.1.as_node(data));
}
}
}
impl Layout for Self {
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
fn get_child(&self, index: usize) -> Option<&dyn Layout> {
self.widgets.get(index).map(|w| w.1.as_layout())
|
for (info, child) in &mut self.widgets {
solver.for_child(&mut self.data, *info, |axis| {
child.size_rules(sizer.re(), axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, cx: &mut ConfigCx, rect: Rect) {
self.core.rect = rect;
let mut setter = GridSetter::<Vec<_>, Vec<_>, _>::new(rect, self.dim, &mut self.data);
for (info, child) in &mut self.widgets {
child.set_rect(cx, setter.child_rect(&mut self.data, *info));
}
}
fn find_id(&mut self, coord: Coord) -> Option<WidgetId> {
if !self.rect().contains(coord) {
return None;
}
self.widgets
.iter_mut()
.find_map(|(_, child)| child.find_id(coord))
.or_else(|| Some(self.id()))
}
fn draw(&mut self, mut draw: DrawCx) {
for (_, child) in &mut self.widgets {
draw.recurse(child);
}
}
}
impl Events for Self {
fn handle_messages(&mut self, cx: &mut EventCx, _: &Self::Data) {
if let Some(ref f) = self.on_messages {
let index = cx.last_child().expect("message not sent from self");
f(cx, index);
}
}
}
}
impl<W: Widget> Grid<W> {
/// Construct a new instance
#[inline]
pub fn new() -> Self {
Self::new_vec(vec![])
}
/// Construct a new instance
#[inline]
pub fn new_vec(widgets: Vec<(GridChildInfo, W)>) -> Self {
let mut grid = Grid {
widgets,
..Default::default()
};
grid.calc_dim();
grid
}
/// Assign a child message handler (inline style)
///
/// This handler is called when a child pushes a message:
/// `f(cx, index)`, where `index` is the child's index.
#[inline]
pub fn on_messages(mut self, f: impl Fn(&mut EventCx, usize) + 'static) -> Self {
self.on_messages = Some(Box::new(f));
self
}
/// Get grid dimensions
///
/// The numbers of rows, columns and spans is determined automatically.
#[inline]
pub fn dimensions(&self) -> GridDimensions {
self.dim
}
/// Access layout storage
///
/// Use [`Self::dimensions`] to get expected dimensions.
#[inline]
pub fn layout_storage(&mut self) -> &mut impl layout::GridStorage {
&mut self.data
}
fn calc_dim(&mut self) {
let mut dim = GridDimensions::default();
for child in &self.widgets {
dim.cols = dim.cols.max(child.0.col_end);
dim.rows = dim.rows.max(child.0.row_end);
if child.0.col_end - child.0.col > 1 {
dim.col_spans += 1;
}
if child.0.row_end - child.0.row > 1 {
dim.row_spans += 1;
}
}
self.dim = dim;
}
/// Construct via a builder
pub fn build<F: FnOnce(GridBuilder<W>)>(f: F) -> Self {
let mut grid = Self::default();
let _ = grid.edit(f);
grid
}
/// Edit an existing grid via a builder
///
/// This may be used to edit children before window construction. It may
/// also be used from a running UI, but in this case a full reconfigure
/// of the window's widgets is required (triggered by the the return
/// value, [`Action::RECONFIGURE`]).
pub fn edit<F: FnOnce(GridBuilder<W>)>(&mut self, f: F) -> Action {
f(GridBuilder(&mut self.widgets));
self.calc_dim();
Action::RECONFIGURE
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns a reference to the child, if any
pub fn get(&self, index: usize) -> Option<&W> {
self.widgets.get(index).map(|t| &t.1)
}
/// Returns a mutable reference to the child, if any
pub fn get_mut(&mut self, index: usize) -> Option<&mut W> {
self.widgets.get_mut(index).map(|t| &mut t.1)
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &(GridChildInfo, W)> {
ListIter {
list: &self.widgets,
}
}
/// Mutably iterate over childern
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (GridChildInfo, W)> {
ListIterMut {
list: &mut self.widgets,
}
}
}
pub struct GridBuilder<'a, W: Widget>(&'a mut Vec<(GridChildInfo, W)>);
impl<'a, W: Widget> GridBuilder<'a, W> {
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.0.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.0.reserve(additional);
}
/// Remove all child widgets
pub fn clear(&mut self) {
self.0.clear();
}
/// Add a child widget
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push(&mut self, info: GridChildInfo, widget: W) {
self.0.push((info, widget));
}
/// Add a child widget to the given cell
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push_cell(&mut self, col: u32, row: u32, widget: W) {
let info = GridChildInfo::new(col, row);
self.push(info, widget);
}
/// Add a child widget to the given cell, builder style
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
#[must_use]
pub fn with_cell(self, col: u32, row: u32, widget: W) -> Self {
self.with_cell_span(col, row, 1, 1, widget)
}
/// Add a child widget to the given cell, with spans
///
/// Parameters `col_span` and `row_span` are the number of columns/rows
/// spanned and should each be at least 1.
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
pub fn push_cell_span(&mut self, col: u32, row: u32, col_span: u32, row_span: u32, widget: W) {
let info = GridChildInfo {
col,
col_end: col + col_span,
row,
row_end: row + row_span,
};
self.push(info, widget);
}
/// Add a child widget to the given cell, with spans, builder style
///
/// Parameters `col_span` and `row_span` are the number of columns/rows
/// spanned and should each be at least 1.
///
/// The child is added to the end of the "list", thus appears last in
/// navigation order.
#[must_use
|
}
fn size_rules(&mut self, sizer: SizeCx, axis: AxisInfo) -> SizeRules {
let mut solver = GridSolver::<Vec<_>, Vec<_>, _>::new(axis, self.dim, &mut self.data);
|
random_line_split
|
hypermodel.js
|
iator {
constructor(options) {
super();
// get dom ref
// resource url
// create ajax object
// essentially the constructor binds to a resource
// can return a JSON model, another reason for calling it 'Hyper'
// check all events on itemprop, itemscope etc when in dom
// @_ref
options = options || {};
this._options = {}; //$.extend(true, {}, $.ajaxSettings, defaults, options)}
this._headers = {};
let redirect = options.redirectOnError || false;
this._hash = parseURL(options.url).hash;
// @_options.data = {} unless options.data
// @_options.data._pjax = "true"
//this._timeoutTimer;
this._successCb = function () { };
// callbacks
this._options.beforeSend = (xhr, settings) => {
// No timeout for non-GET requests
// Its not safe to request the resource again with a fallback method.
if (settings.type !== "GET") { settings.timeout = 0; }
xhr.setRequestHeader("X-PJAX", "true");
if (options.isMobile) {
xhr.setRequestHeader("X-Mobile", "true");
}
for (let field in this._headers) {
let value = this._headers[field];
xhr.setRequestHeader(field, value);
}
// xhr.setRequestHeader "X-PJAX-Container", @_context.selector
if (!this._fire("pjax:beforeSend", [xhr, settings])) { return false; }
if (settings.timeout > 0) {
this._timeoutTimer = setTimeout(() => {
if (this._fire("pjax:timeout", [xhr, options])) { return xhr.abort("timeout"); }
}, settings.timeout);
// Clear timeout setting so jquerys internal timeout isn't invoked
settings.timeout = 0;
}
return this._requestUrl = parseURL(settings.url).href;
};
this._options.complete = (xhr, textStatus) => {
if (this._timeoutTimer) { clearTimeout(this._timeoutTimer); }
this._fire("pjax:complete", [xhr, textStatus, this._options]);
return this._fire("pjax:end", [xhr, this._options]);
};
this._options.success = (data, status, xhr) => {
if (xhr.status !== 204 && (data != null)) {
var container = this._extractContainer(data, xhr, options);
// TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// If the new response is missing a body, hard load the page
// unless container.contents
// @_locationReplace container.url
// return
// @_state =
// id: options.id or core.uniqueId()
// url: container.url
// title: container.title
// container: @_context.selector
// fragment: @_options.fragment
// timeout: @_options.timeout
// window.history.replaceState @_state, container.title, container.url if options.push or options.replace
// @navigate container.url, { replace: true }, @_state if @_options.push or @_options.replace
// Clear out any focused controls before inserting new page contents.
// document.activeElement.blur()
if (container.title) { document.title = container.title; }
}
// @_context.html container.contents
// FF bug: Won't autofocus fields that are inserted via JS.
// This behavior is incorrect. So if theres no current focus, autofocus
// the last field.
//
// http://www.w3.org/html/wg/drafts/html/master/forms.html
// autofocusEl = @_context.find("input[autofocus], textarea[autofocus]").last()[0]
// autofocusEl.focus() if autofocusEl and document.activeElement isnt autofocusEl
// Scroll to top by default
// $(window).scrollTop options.scrollTo if typeof options.scrollTo is "number"
// If the URL has a hash in it, make sure the browser
// knows to navigate to the hash.
// if @_hash isnt ""
// Avoid using simple hash set here. Will add another history
// entry. Replace the url with replaceState and scroll to target
// by hand.
//
// window.location.hash = hash
// url = core.parseURL(container.url)
// url.hash = @_hash
// @_state.url = url.href
// window.history.replaceState @_state, container.title, url.href
// @navigate url.href, { replace: true }, @_state
// target = $(url.hash)
// $(window).scrollTop target.offset().top if target.length
// console.log data
this._fire("pjax:success", [container, status, xhr, this._options]);
return this._successCb(container, status, xhr, this._options);
};
this._options.error = (xhr, textStatus, errorThrown) => {
let container = this._extractContainer("", xhr, this._options);
let { status } = xhr;
this._errorCb(xhr, textStatus, errorThrown, this._options);
this._fire("pjax:error", [xhr, textStatus, errorThrown, this._options]);
// reload page
// handle this better
if (status === 401) {
return location.href = xhr.getResponseHeader("X-LOGIN-URL");
} else if (redirect && (status === 0 || (this._options.type === "GET" && textStatus !== "abort"))) {
return this._locationReplace(container.url);
}
};
}
_pjax(options) {
options = $.extend(true, {}, options, this._options);
let xhr = this._xhr;
// stop current call if any
if (xhr && xhr.readyState < 4) {
xhr.onreadystatechange = function () { };
xhr.abort();
}
xhr = this._xhr = $.ajax(options);
if (xhr.readyState > 0) {
this._fire("pjax:start", [xhr, options]);
this._fire("pjax:send", [xhr, options]);
}
return this._xhr;
}
_setCallbacks(options) {
this._successCb = options.success || function () { };
this._completeCb = options.complete || function () { };
this._errorCb = options.error || function () { };
// remove callbacks before we merge with global options
if (options.success != null) { delete options.success; }
if (options.error != null) { delete options.error; }
if (options.complete != null) { return delete options.complete; }
}
get(options = {}, verb = 'GET') {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = verb;
return this._pjax($.extend({}, defaults, options));
}
put(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "PUT";
return this._pjax($.extend({}, defaults, options));
}
post(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "POST";
return this._pjax($.extend({}, defaults, options));
}
detele(form, options) { }
patch(form, options) {
return this._handleSubmit(form, "PATCH", options);
}
head(options = {}) {
return this.get(options, "HEAD");
}
submit(form, options) {
return this._handleSubmit(form, undefined, options);
}
stop(form, options) {
return this._xhr.abort();
}
toJSON() {
return {};
}
setHeader(field, value) {
return this._headers[field] = value;
}
removeHeader(field) {
return delete this._headers[field];
}
_handleSubmit(form, method, options = {}) {
form = form.tagName.toUpperCase() === "FORM" ? form : $(form).find("form").get(0);
if (typeof form !== 'object') { throw "$.pjax.submit requires a form element"; }
let settings = {
type: method || form.method.toUpperCase(),
url: form.action,
data: options.data || $(form).serializeArray(),
// container: $(form).attr("data-pjax")
target: form
};
this._setCallbacks(options);
return this._pjax($.extend({}, defaults, settings, options));
}
// event.preventDefault()
_fire(type, args) {
return this.emit(type, args);
}
_locationReplace(url) {
window.history.replaceState(null, "", "#");
return window.location.replace(url);
}
_stripPjaxParam(url) {
return url.replace(/\?_pjax=[^&]+&?/, "?").replace(/_pjax=[^&]+&?/, "").replace(/[\?&]$/, "");
}
findAll(elems, selector) {
return elems.filter(selector).add(elems.find(selector));
}
pa
|
tml) {
return $.parseHTML(html, document, true);
}
$html(html) {
return $(this.parseHTML(html));
}
_extractContainer(data, xhr, options) {
let obj = {};
let isPjaxSnippet = false;
// Prefer X-PJAX-URL header if it was set, otherwise fallback to
// using the original requested url.
obj.url = this
|
rseHTML(h
|
identifier_name
|
hypermodel.js
|
Mediator {
constructor(options) {
super();
// get dom ref
// resource url
// create ajax object
// essentially the constructor binds to a resource
// can return a JSON model, another reason for calling it 'Hyper'
// check all events on itemprop, itemscope etc when in dom
// @_ref
options = options || {};
this._options = {}; //$.extend(true, {}, $.ajaxSettings, defaults, options)}
this._headers = {};
let redirect = options.redirectOnError || false;
this._hash = parseURL(options.url).hash;
// @_options.data = {} unless options.data
// @_options.data._pjax = "true"
//this._timeoutTimer;
this._successCb = function () { };
// callbacks
this._options.beforeSend = (xhr, settings) => {
// No timeout for non-GET requests
// Its not safe to request the resource again with a fallback method.
if (settings.type !== "GET") { settings.timeout = 0; }
xhr.setRequestHeader("X-PJAX", "true");
if (options.isMobile) {
xhr.setRequestHeader("X-Mobile", "true");
}
for (let field in this._headers) {
let value = this._headers[field];
xhr.setRequestHeader(field, value);
}
// xhr.setRequestHeader "X-PJAX-Container", @_context.selector
if (!this._fire("pjax:beforeSend", [xhr, settings])) { return false; }
if (settings.timeout > 0) {
this._timeoutTimer = setTimeout(() => {
if (this._fire("pjax:timeout", [xhr, options])) { return xhr.abort("timeout"); }
}, settings.timeout);
// Clear timeout setting so jquerys internal timeout isn't invoked
settings.timeout = 0;
}
return this._requestUrl = parseURL(settings.url).href;
};
this._options.complete = (xhr, textStatus) => {
if (this._timeoutTimer) { clearTimeout(this._timeoutTimer); }
this._fire("pjax:complete", [xhr, textStatus, this._options]);
return this._fire("pjax:end", [xhr, this._options]);
};
this._options.success = (data, status, xhr) => {
if (xhr.status !== 204 && (data != null)) {
var container = this._extractContainer(data, xhr, options);
// TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// If the new response is missing a body, hard load the page
// unless container.contents
// @_locationReplace container.url
// return
// @_state =
// id: options.id or core.uniqueId()
// url: container.url
// title: container.title
// container: @_context.selector
// fragment: @_options.fragment
// timeout: @_options.timeout
// window.history.replaceState @_state, container.title, container.url if options.push or options.replace
// @navigate container.url, { replace: true }, @_state if @_options.push or @_options.replace
// Clear out any focused controls before inserting new page contents.
// document.activeElement.blur()
if (container.title) { document.title = container.title; }
}
// @_context.html container.contents
// FF bug: Won't autofocus fields that are inserted via JS.
// This behavior is incorrect. So if theres no current focus, autofocus
// the last field.
//
// http://www.w3.org/html/wg/drafts/html/master/forms.html
// autofocusEl = @_context.find("input[autofocus], textarea[autofocus]").last()[0]
// autofocusEl.focus() if autofocusEl and document.activeElement isnt autofocusEl
|
// Scroll to top by default
// $(window).scrollTop options.scrollTo if typeof options.scrollTo is "number"
// If the URL has a hash in it, make sure the browser
// knows to navigate to the hash.
// if @_hash isnt ""
// Avoid using simple hash set here. Will add another history
// entry. Replace the url with replaceState and scroll to target
// by hand.
//
// window.location.hash = hash
// url = core.parseURL(container.url)
// url.hash = @_hash
// @_state.url = url.href
// window.history.replaceState @_state, container.title, url.href
// @navigate url.href, { replace: true }, @_state
// target = $(url.hash)
// $(window).scrollTop target.offset().top if target.length
// console.log data
this._fire("pjax:success", [container, status, xhr, this._options]);
return this._successCb(container, status, xhr, this._options);
};
this._options.error = (xhr, textStatus, errorThrown) => {
let container = this._extractContainer("", xhr, this._options);
let { status } = xhr;
this._errorCb(xhr, textStatus, errorThrown, this._options);
this._fire("pjax:error", [xhr, textStatus, errorThrown, this._options]);
// reload page
// handle this better
if (status === 401) {
return location.href = xhr.getResponseHeader("X-LOGIN-URL");
} else if (redirect && (status === 0 || (this._options.type === "GET" && textStatus !== "abort"))) {
return this._locationReplace(container.url);
}
};
}
_pjax(options) {
options = $.extend(true, {}, options, this._options);
let xhr = this._xhr;
// stop current call if any
if (xhr && xhr.readyState < 4) {
xhr.onreadystatechange = function () { };
xhr.abort();
}
xhr = this._xhr = $.ajax(options);
if (xhr.readyState > 0) {
this._fire("pjax:start", [xhr, options]);
this._fire("pjax:send", [xhr, options]);
}
return this._xhr;
}
_setCallbacks(options) {
this._successCb = options.success || function () { };
this._completeCb = options.complete || function () { };
this._errorCb = options.error || function () { };
// remove callbacks before we merge with global options
if (options.success != null) { delete options.success; }
if (options.error != null) { delete options.error; }
if (options.complete != null) { return delete options.complete; }
}
get(options = {}, verb = 'GET') {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = verb;
return this._pjax($.extend({}, defaults, options));
}
put(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "PUT";
return this._pjax($.extend({}, defaults, options));
}
post(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "POST";
return this._pjax($.extend({}, defaults, options));
}
detele(form, options) { }
patch(form, options) {
return this._handleSubmit(form, "PATCH", options);
}
head(options = {}) {
return this.get(options, "HEAD");
}
submit(form, options) {
return this._handleSubmit(form, undefined, options);
}
stop(form, options) {
return this._xhr.abort();
}
toJSON() {
return {};
}
setHeader(field, value) {
return this._headers[field] = value;
}
removeHeader(field) {
return delete this._headers[field];
}
_handleSubmit(form, method, options = {}) {
form = form.tagName.toUpperCase() === "FORM" ? form : $(form).find("form").get(0);
if (typeof form !== 'object') { throw "$.pjax.submit requires a form element"; }
let settings = {
type: method || form.method.toUpperCase(),
url: form.action,
data: options.data || $(form).serializeArray(),
// container: $(form).attr("data-pjax")
target: form
};
this._setCallbacks(options);
return this._pjax($.extend({}, defaults, settings, options));
}
// event.preventDefault()
_fire(type, args) {
return this.emit(type, args);
}
_locationReplace(url) {
window.history.replaceState(null, "", "#");
return window.location.replace(url);
}
_stripPjaxParam(url) {
return url.replace(/\?_pjax=[^&]+&?/, "?").replace(/_pjax=[^&]+&?/, "").replace(/[\?&]$/, "");
}
findAll(elems, selector) {
return elems.filter(selector).add(elems.find(selector));
}
parseHTML(html) {
return $.parseHTML(html, document, true);
}
$html(html) {
return $(this.parseHTML(html));
}
_extractContainer(data, xhr, options) {
let obj = {};
let isPjaxSnippet = false;
// Prefer X-PJAX-URL header if it was set, otherwise fallback to
// using the original requested url.
obj.url = this._stripP
|
random_line_split
|
|
hypermodel.js
|
Mediator {
constructor(options) {
super();
// get dom ref
// resource url
// create ajax object
// essentially the constructor binds to a resource
// can return a JSON model, another reason for calling it 'Hyper'
// check all events on itemprop, itemscope etc when in dom
// @_ref
options = options || {};
this._options = {}; //$.extend(true, {}, $.ajaxSettings, defaults, options)}
this._headers = {};
let redirect = options.redirectOnError || false;
this._hash = parseURL(options.url).hash;
// @_options.data = {} unless options.data
// @_options.data._pjax = "true"
//this._timeoutTimer;
this._successCb = function () { };
// callbacks
this._options.beforeSend = (xhr, settings) => {
// No timeout for non-GET requests
// Its not safe to request the resource again with a fallback method.
if (settings.type !== "GET") { settings.timeout = 0; }
xhr.setRequestHeader("X-PJAX", "true");
if (options.isMobile) {
xhr.setRequestHeader("X-Mobile", "true");
}
for (let field in this._headers) {
let value = this._headers[field];
xhr.setRequestHeader(field, value);
}
// xhr.setRequestHeader "X-PJAX-Container", @_context.selector
if (!this._fire("pjax:beforeSend", [xhr, settings])) { return false; }
if (settings.timeout > 0) {
this._timeoutTimer = setTimeout(() => {
if (this._fire("pjax:timeout", [xhr, options])) { return xhr.abort("timeout"); }
}, settings.timeout);
// Clear timeout setting so jquerys internal timeout isn't invoked
settings.timeout = 0;
}
return this._requestUrl = parseURL(settings.url).href;
};
this._options.complete = (xhr, textStatus) => {
if (this._timeoutTimer) { clearTimeout(this._timeoutTimer); }
this._fire("pjax:complete", [xhr, textStatus, this._options]);
return this._fire("pjax:end", [xhr, this._options]);
};
this._options.success = (data, status, xhr) => {
if (xhr.status !== 204 && (data != null)) {
var container = this._extractContainer(data, xhr, options);
// TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// If the new response is missing a body, hard load the page
// unless container.contents
// @_locationReplace container.url
// return
// @_state =
// id: options.id or core.uniqueId()
// url: container.url
// title: container.title
// container: @_context.selector
// fragment: @_options.fragment
// timeout: @_options.timeout
// window.history.replaceState @_state, container.title, container.url if options.push or options.replace
// @navigate container.url, { replace: true }, @_state if @_options.push or @_options.replace
// Clear out any focused controls before inserting new page contents.
// document.activeElement.blur()
if (container.title) { document.title = container.title; }
}
// @_context.html container.contents
// FF bug: Won't autofocus fields that are inserted via JS.
// This behavior is incorrect. So if theres no current focus, autofocus
// the last field.
//
// http://www.w3.org/html/wg/drafts/html/master/forms.html
// autofocusEl = @_context.find("input[autofocus], textarea[autofocus]").last()[0]
// autofocusEl.focus() if autofocusEl and document.activeElement isnt autofocusEl
// Scroll to top by default
// $(window).scrollTop options.scrollTo if typeof options.scrollTo is "number"
// If the URL has a hash in it, make sure the browser
// knows to navigate to the hash.
// if @_hash isnt ""
// Avoid using simple hash set here. Will add another history
// entry. Replace the url with replaceState and scroll to target
// by hand.
//
// window.location.hash = hash
// url = core.parseURL(container.url)
// url.hash = @_hash
// @_state.url = url.href
// window.history.replaceState @_state, container.title, url.href
// @navigate url.href, { replace: true }, @_state
// target = $(url.hash)
// $(window).scrollTop target.offset().top if target.length
// console.log data
this._fire("pjax:success", [container, status, xhr, this._options]);
return this._successCb(container, status, xhr, this._options);
};
this._options.error = (xhr, textStatus, errorThrown) => {
let container = this._extractContainer("", xhr, this._options);
let { status } = xhr;
this._errorCb(xhr, textStatus, errorThrown, this._options);
this._fire("pjax:error", [xhr, textStatus, errorThrown, this._options]);
// reload page
// handle this better
if (status === 401) {
return location.href = xhr.getResponseHeader("X-LOGIN-URL");
} else if (redirect && (status === 0 || (this._options.type === "GET" && textStatus !== "abort"))) {
return this._locationReplace(container.url);
}
};
}
_pjax(options) {
options = $.extend(true, {}, options, this._options);
let xhr = this._xhr;
// stop current call if any
if (xhr && xhr.readyState < 4) {
xhr.onreadystatechange = function () { };
xhr.abort();
}
xhr = this._xhr = $.ajax(options);
if (xhr.readyState > 0) {
this._fire("pjax:start", [xhr, options]);
this._fire("pjax:send", [xhr, options]);
}
return this._xhr;
}
_setCallbacks(options) {
this._successCb = options.success || function () { };
this._completeCb = options.complete || function () { };
this._errorCb = options.error || function () { };
// remove callbacks before we merge with global options
if (options.success != null) { delete options.success; }
if (options.error != null) { delete options.error; }
if (options.complete != null) { return delete options.complete; }
}
get(options = {}, verb = 'GET') {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = verb;
return this._pjax($.extend({}, defaults, options));
}
put(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "PUT";
return this._pjax($.extend({}, defaults, options));
}
post(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "POST";
return this._pjax($.extend({}, defaults, options));
}
detele(form, options) { }
patch(form, options) {
return this._handleSubmit(form, "PATCH", options);
}
head(options = {}) {
return this.get(options, "HEAD");
}
submit(form, options) {
return this._handleSubmit(form, undefined, options);
}
stop(form, options) {
return this._xhr.abort();
}
toJSON() {
return {};
}
setHeader(field, value) {
return this._headers[field] = value;
}
removeHeader(field) {
return delete this._headers[field];
}
_handleSubmit(form, method, options = {}) {
form = form.tagName.toUpperCase() === "FORM" ? form : $(form).find("form").get(0);
if (typeof form !== 'object') { throw "$.pjax.submit requires a form element"; }
let settings = {
type: method || form.method.toUpperCase(),
url: form.action,
data: options.data || $(form).serializeArray(),
// container: $(form).attr("data-pjax")
target: form
};
this._setCallbacks(options);
return this._pjax($.extend({}, defaults, settings, options));
}
// event.preventDefault()
_fire(type, args) {
return this.emit(type, args);
}
_locationReplace(url) {
window.history.replaceState(null, "", "#");
return window.location.replace(url);
}
_stripPjaxParam(url) {
return url.replace(/\?_pjax=[^&]+&?/, "?").replace(/_pjax=[^&]+&?/, "").replace(/[\?&]$/, "");
}
findAll(elems, selector) {
return elems.filter(selector).add(elems.find(selector));
}
parseHTML(html) {
return $.parseHTML(html, document, true);
}
$html(html) {
|
_extractContainer(data, xhr, options) {
let obj = {};
let isPjaxSnippet = false;
// Prefer X-PJAX-URL header if it was set, otherwise fallback to
// using the original requested url.
obj.url = this._strip
|
return $(this.parseHTML(html));
}
|
identifier_body
|
hypermodel.js
|
Mediator {
constructor(options) {
super();
// get dom ref
// resource url
// create ajax object
// essentially the constructor binds to a resource
// can return a JSON model, another reason for calling it 'Hyper'
// check all events on itemprop, itemscope etc when in dom
// @_ref
options = options || {};
this._options = {}; //$.extend(true, {}, $.ajaxSettings, defaults, options)}
this._headers = {};
let redirect = options.redirectOnError || false;
this._hash = parseURL(options.url).hash;
// @_options.data = {} unless options.data
// @_options.data._pjax = "true"
//this._timeoutTimer;
this._successCb = function () { };
// callbacks
this._options.beforeSend = (xhr, settings) => {
// No timeout for non-GET requests
// Its not safe to request the resource again with a fallback method.
if (settings.type !== "GET") { settings.timeout = 0; }
xhr.setRequestHeader("X-PJAX", "true");
if (options.isMobile) {
xhr.setRequestHeader("X-Mobile", "true");
}
for (let field in this._headers) {
let value = this._headers[field];
xhr.setRequestHeader(field, value);
}
// xhr.setRequestHeader "X-PJAX-Container", @_context.selector
if (!this._fire("pjax:beforeSend", [xhr, settings])) { return false; }
if (settings.timeout > 0) {
this._timeoutTimer = setTimeout(() => {
if (this._fire("pjax:timeout", [xhr, options])) { return xhr.abort("timeout"); }
}, settings.timeout);
// Clear timeout setting so jquerys internal timeout isn't invoked
settings.timeout = 0;
}
return this._requestUrl = parseURL(settings.url).href;
};
this._options.complete = (xhr, textStatus) => {
if (this._timeoutTimer) { clearTimeout(this._timeoutTimer); }
this._fire("pjax:complete", [xhr, textStatus, this._options]);
return this._fire("pjax:end", [xhr, this._options]);
};
this._options.success = (data, status, xhr) => {
if (xhr.status !== 204 && (data != null)) {
var container = this._extractContainer(data, xhr, options);
// TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// If the new response is missing a body, hard load the page
// unless container.contents
// @_locationReplace container.url
// return
// @_state =
// id: options.id or core.uniqueId()
// url: container.url
// title: container.title
// container: @_context.selector
// fragment: @_options.fragment
// timeout: @_options.timeout
// window.history.replaceState @_state, container.title, container.url if options.push or options.replace
// @navigate container.url, { replace: true }, @_state if @_options.push or @_options.replace
// Clear out any focused controls before inserting new page contents.
// document.activeElement.blur()
if (container.title) { document.title = container.title; }
}
// @_context.html container.contents
// FF bug: Won't autofocus fields that are inserted via JS.
// This behavior is incorrect. So if theres no current focus, autofocus
// the last field.
//
// http://www.w3.org/html/wg/drafts/html/master/forms.html
// autofocusEl = @_context.find("input[autofocus], textarea[autofocus]").last()[0]
// autofocusEl.focus() if autofocusEl and document.activeElement isnt autofocusEl
// Scroll to top by default
// $(window).scrollTop options.scrollTo if typeof options.scrollTo is "number"
// If the URL has a hash in it, make sure the browser
// knows to navigate to the hash.
// if @_hash isnt ""
// Avoid using simple hash set here. Will add another history
// entry. Replace the url with replaceState and scroll to target
// by hand.
//
// window.location.hash = hash
// url = core.parseURL(container.url)
// url.hash = @_hash
// @_state.url = url.href
// window.history.replaceState @_state, container.title, url.href
// @navigate url.href, { replace: true }, @_state
// target = $(url.hash)
// $(window).scrollTop target.offset().top if target.length
// console.log data
this._fire("pjax:success", [container, status, xhr, this._options]);
return this._successCb(container, status, xhr, this._options);
};
this._options.error = (xhr, textStatus, errorThrown) => {
let container = this._extractContainer("", xhr, this._options);
let { status } = xhr;
this._errorCb(xhr, textStatus, errorThrown, this._options);
this._fire("pjax:error", [xhr, textStatus, errorThrown, this._options]);
// reload page
// handle this better
if (status === 401) {
return location.href = xhr.getResponseHeader("X-LOGIN-URL");
} else if (redirect && (status === 0 || (this._options.type === "GET" && textStatus !== "abort"))) {
return this._locationReplace(container.url);
}
};
}
_pjax(options) {
options = $.extend(true, {}, options, this._options);
let xhr = this._xhr;
// stop current call if any
if (xhr && xhr.readyState < 4) {
|
xhr = this._xhr = $.ajax(options);
if (xhr.readyState > 0) {
this._fire("pjax:start", [xhr, options]);
this._fire("pjax:send", [xhr, options]);
}
return this._xhr;
}
_setCallbacks(options) {
this._successCb = options.success || function () { };
this._completeCb = options.complete || function () { };
this._errorCb = options.error || function () { };
// remove callbacks before we merge with global options
if (options.success != null) { delete options.success; }
if (options.error != null) { delete options.error; }
if (options.complete != null) { return delete options.complete; }
}
get(options = {}, verb = 'GET') {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = verb;
return this._pjax($.extend({}, defaults, options));
}
put(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "PUT";
return this._pjax($.extend({}, defaults, options));
}
post(options) {
if (options.url == null) { return; }
this._setCallbacks(options);
options.type = "POST";
return this._pjax($.extend({}, defaults, options));
}
detele(form, options) { }
patch(form, options) {
return this._handleSubmit(form, "PATCH", options);
}
head(options = {}) {
return this.get(options, "HEAD");
}
submit(form, options) {
return this._handleSubmit(form, undefined, options);
}
stop(form, options) {
return this._xhr.abort();
}
toJSON() {
return {};
}
setHeader(field, value) {
return this._headers[field] = value;
}
removeHeader(field) {
return delete this._headers[field];
}
_handleSubmit(form, method, options = {}) {
form = form.tagName.toUpperCase() === "FORM" ? form : $(form).find("form").get(0);
if (typeof form !== 'object') { throw "$.pjax.submit requires a form element"; }
let settings = {
type: method || form.method.toUpperCase(),
url: form.action,
data: options.data || $(form).serializeArray(),
// container: $(form).attr("data-pjax")
target: form
};
this._setCallbacks(options);
return this._pjax($.extend({}, defaults, settings, options));
}
// event.preventDefault()
_fire(type, args) {
return this.emit(type, args);
}
_locationReplace(url) {
window.history.replaceState(null, "", "#");
return window.location.replace(url);
}
_stripPjaxParam(url) {
return url.replace(/\?_pjax=[^&]+&?/, "?").replace(/_pjax=[^&]+&?/, "").replace(/[\?&]$/, "");
}
findAll(elems, selector) {
return elems.filter(selector).add(elems.find(selector));
}
parseHTML(html) {
return $.parseHTML(html, document, true);
}
$html(html) {
return $(this.parseHTML(html));
}
_extractContainer(data, xhr, options) {
let obj = {};
let isPjaxSnippet = false;
// Prefer X-PJAX-URL header if it was set, otherwise fallback to
// using the original requested url.
obj.url = this._strip
|
xhr.onreadystatechange = function () { };
xhr.abort();
}
|
conditional_block
|
gosmonaut.go
|
()
} else {
nProcs = conf.NumProcessors
}
// Create decoder
dec, header, err := newDecoder(file, nProcs, conf.Decoder)
if err != nil {
return nil, err
}
return &Gosmonaut{
dec: dec,
header: header,
debugMode: conf.DebugMode,
printWarnings: conf.PrintWarnings,
}, nil
}
// Header returns the meta information of the PBF file.
func (g *Gosmonaut) Header() Header {
return g.header
}
// Typical PrimitiveBlock contains 8k OSM entities
const entitiesPerPrimitiveBlock = 8000
// Start starts the decoding process. The function call will block until the
// previous run has finished.
// Only types that are enabled in `types` will be sent to the caller.
// funcEntityNeeded will be called to determine if the caller needs a specific
// OSM entity.
// Found entities and encountered errors can be received by polling the Next()
// method.
func (g *Gosmonaut) Start(
types OSMTypeSet,
funcEntityNeeded func(OSMType, OSMTags) bool,
) {
// Block until previous run finished
g.lock.Lock()
g.stream = make(chan osmPair, entitiesPerPrimitiveBlock)
// Init vars
g.funcEntityNeeded = funcEntityNeeded
g.types = types
go func() {
// Decode
g.decode()
// Finish
close(g.stream)
g.lock.Unlock()
}()
}
func (g *Gosmonaut) decode() {
// Init debug vars
{
timeNow := time.Now()
g.timeStarted = timeNow
g.timeLast = timeNow
}
g.printDebugInfo("Decoding started")
// Scan relation dependencies
g.nodeIDTracker = newBitsetIDTracker()
g.wayIDTracker = newBitsetIDTracker()
if g.types.Get(RelationType) {
if err := g.scanRelationDependencies(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo(fmt.Sprintf("Scanned relation dependencies [length: %d]", g.wayIDTracker.len()))
}
// Scan way dependencies
if g.types.Get(WayType) || g.wayIDTracker.len() != 0 {
if err := g.scanWayDependencies(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo(fmt.Sprintf("Scanned way dependencies [length: %d]", g.nodeIDTracker.len()))
}
g.nodeCache = newBinaryNodeEntityMap(g.nodeIDTracker.len())
g.nodeTags = make(map[int64]OSMTags)
g.printDebugInfo("Created node cache")
// Scan nodes
if g.types.Get(NodeType) || g.nodeIDTracker.len() != 0 {
if err := g.scanNodes(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned nodes")
}
g.nodeIDTracker = nil
g.printDebugInfo("Deleted node ID tracker")
g.nodeCache.prepare()
g.printDebugInfo("Prepared node cache")
g.wayCache = newBinaryWayEntityMap(g.wayIDTracker.len())
g.wayTags = make(map[int64]OSMTags)
g.printDebugInfo("Created way cache")
// Scan ways
if g.types.Get(WayType) || g.wayIDTracker.len() != 0 {
if err := g.scanWays(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned ways")
}
g.wayIDTracker = nil
g.printDebugInfo("Deleted way ID tracker")
g.wayCache.prepare()
g.printDebugInfo("Prepared way cache")
// Scan relations
if g.types.Get(RelationType) {
if err := g.scanRelations(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned relations")
}
g.wayCache = nil
g.nodeCache = nil
g.wayTags = nil
g.nodeTags = nil
g.printDebugInfo("Deleted entity caches")
if g.debugMode {
fmt.Println("Elapsed time:", time.Since(g.timeStarted))
}
}
func (g *Gosmonaut) streamError(err error) {
g.stream <- osmPair{nil, err}
}
func (g *Gosmonaut) streamEntity(i OSMEntity) {
g.stream <- osmPair{i, nil}
}
// Next returns the next decoded entity (x)or an error.
// If the error is io.EOF the file has successfully been decoded.
// If the error is not EOF decoding has been stopped due to another error.
func (g *Gosmonaut) Next() (OSMEntity, error) {
p, ok := <-g.stream
if !ok
|
return p.i, p.e
}
func (g *Gosmonaut) entityNeeded(t OSMType, tags OSMTags) bool {
if !g.types.Get(t) {
return false
}
return g.funcEntityNeeded(t, tags)
}
func (g *Gosmonaut) scanRelationDependencies() error {
return g.scan(RelationType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache?
if !g.entityNeeded(RelationType, tags) {
return nil
}
// Get parser
d, ok := v.(relationParser)
if !ok {
return fmt.Errorf("got invalid relation parser (%T)", v)
}
// Add members to ID caches
ids, err := d.ids()
if err != nil {
return err
}
types, err := d.types()
if err != nil {
return err
}
if len(ids) != len(types) {
return errors.New("length of relation ids and types differs")
}
for i, id := range ids {
switch types[i] {
case WayType:
g.wayIDTracker.set(id)
case NodeType:
g.nodeIDTracker.set(id)
// We don't support sub-relations yet
}
}
return nil
})
}
func (g *Gosmonaut) scanWayDependencies() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
if !g.wayIDTracker.get(id) && !g.entityNeeded(WayType, tags) {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Add nodes to ID cache
refs, err := d.refs()
if err != nil {
return err
}
for _, id := range refs {
g.nodeIDTracker.set(id)
}
return nil
})
}
func (g *Gosmonaut) scanNodes() error {
return g.scan(NodeType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.nodeIDTracker.get(id), g.entityNeeded(NodeType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(nodeParser)
if !ok {
return fmt.Errorf("got invalid node parser (%T)", v)
}
// Get properties
lat, lon, err := d.coords()
if err != nil {
return err
}
// Add to cache
if rc {
g.nodeCache.add(rawNode{
id: id,
lat: newCoordinate(lat),
lon: newCoordinate(lon),
})
// Add tags
if tags.Len() != 0 {
g.nodeTags[id] = tags
}
}
// Send to output stream
if rs {
g.streamEntity(Node{
ID: id,
Lat: lat,
Lon: lon,
Tags: tags,
})
}
return nil
})
}
func (g *Gosmonaut) scanWays() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.wayIDTracker.get(id), g.entityNeeded(WayType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Get properties
refs, err := d.refs()
if err != nil {
return err
}
// Add to cache
if rc {
g.wayCache.add(rawWay{
id: id,
refs: refs,
})
// Add tags
if tags.Len() != 0 {
g.wayTags[id] = tags
}
}
// Send to output stream
if rs {
nodes, err := g
|
{
return nil, io.EOF
}
|
conditional_block
|
gosmonaut.go
|
g.wayTags = nil
g.nodeTags = nil
g.printDebugInfo("Deleted entity caches")
if g.debugMode {
fmt.Println("Elapsed time:", time.Since(g.timeStarted))
}
}
func (g *Gosmonaut) streamError(err error) {
g.stream <- osmPair{nil, err}
}
func (g *Gosmonaut) streamEntity(i OSMEntity) {
g.stream <- osmPair{i, nil}
}
// Next returns the next decoded entity (x)or an error.
// If the error is io.EOF the file has successfully been decoded.
// If the error is not EOF decoding has been stopped due to another error.
func (g *Gosmonaut) Next() (OSMEntity, error) {
p, ok := <-g.stream
if !ok {
return nil, io.EOF
}
return p.i, p.e
}
func (g *Gosmonaut) entityNeeded(t OSMType, tags OSMTags) bool {
if !g.types.Get(t) {
return false
}
return g.funcEntityNeeded(t, tags)
}
func (g *Gosmonaut) scanRelationDependencies() error {
return g.scan(RelationType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache?
if !g.entityNeeded(RelationType, tags) {
return nil
}
// Get parser
d, ok := v.(relationParser)
if !ok {
return fmt.Errorf("got invalid relation parser (%T)", v)
}
// Add members to ID caches
ids, err := d.ids()
if err != nil {
return err
}
types, err := d.types()
if err != nil {
return err
}
if len(ids) != len(types) {
return errors.New("length of relation ids and types differs")
}
for i, id := range ids {
switch types[i] {
case WayType:
g.wayIDTracker.set(id)
case NodeType:
g.nodeIDTracker.set(id)
// We don't support sub-relations yet
}
}
return nil
})
}
func (g *Gosmonaut) scanWayDependencies() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
if !g.wayIDTracker.get(id) && !g.entityNeeded(WayType, tags) {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Add nodes to ID cache
refs, err := d.refs()
if err != nil {
return err
}
for _, id := range refs {
g.nodeIDTracker.set(id)
}
return nil
})
}
func (g *Gosmonaut) scanNodes() error {
return g.scan(NodeType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.nodeIDTracker.get(id), g.entityNeeded(NodeType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(nodeParser)
if !ok {
return fmt.Errorf("got invalid node parser (%T)", v)
}
// Get properties
lat, lon, err := d.coords()
if err != nil {
return err
}
// Add to cache
if rc {
g.nodeCache.add(rawNode{
id: id,
lat: newCoordinate(lat),
lon: newCoordinate(lon),
})
// Add tags
if tags.Len() != 0 {
g.nodeTags[id] = tags
}
}
// Send to output stream
if rs {
g.streamEntity(Node{
ID: id,
Lat: lat,
Lon: lon,
Tags: tags,
})
}
return nil
})
}
func (g *Gosmonaut) scanWays() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.wayIDTracker.get(id), g.entityNeeded(WayType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Get properties
refs, err := d.refs()
if err != nil {
return err
}
// Add to cache
if rc {
g.wayCache.add(rawWay{
id: id,
refs: refs,
})
// Add tags
if tags.Len() != 0 {
g.wayTags[id] = tags
}
}
// Send to output stream
if rs {
nodes, err := g.uncacheNodes(refs)
if err != nil {
return err
}
g.streamEntity(Way{
ID: id,
Tags: tags,
Nodes: nodes,
})
}
return nil
})
}
func (g *Gosmonaut) scanRelations() error {
return g.scan(RelationType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by stream?
if !g.entityNeeded(RelationType, tags) {
return nil
}
// Get parser
d, ok := v.(relationParser)
if !ok {
return fmt.Errorf("got invalid relation parser (%T)", v)
}
// Get properties
ids, err := d.ids()
if err != nil {
return err
}
types, err := d.types()
if err != nil {
return err
}
roles, err := d.roles()
if err != nil {
return err
}
n := len(ids)
if len(types) != n || len(roles) != n {
return errors.New("length of relation ids, roles and types differs")
}
// Build relation
members := make([]Member, 0, n)
for i, mid := range ids {
var e OSMEntity
switch types[i] {
case WayType:
if rw, ok := g.wayCache.get(mid); ok {
nodes, err := g.uncacheNodes(rw.refs)
if err != nil {
return err
}
e = Way{
ID: rw.id,
Tags: g.wayTags[rw.id],
Nodes: nodes,
}
} else {
g.printWarning(fmt.Sprintf("Way #%d in not in file for relation #%d", mid, id))
continue
}
case NodeType:
if n, err := g.uncacheNode(mid); err == nil {
e = n
} else {
g.printWarning(err.Error())
continue
}
default:
// We don't support sub-relations yet
g.printWarning(fmt.Sprintf("Skipping sub-relation #%d in relation #%d (not supported)", mid, id))
continue
}
members = append(members, Member{roles[i], e})
}
// Send to output stream
g.streamEntity(Relation{
ID: id,
Tags: tags,
Members: members,
})
return nil
})
}
func (g *Gosmonaut) scan(t OSMType,
receiver func(int64, OSMTags, entityParser) error,
) error {
if err := g.dec.Start(t); err != nil {
return err
}
// Decode file
for {
parsers, err := g.dec.nextPair()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
// Iterate parsers
for _, v := range parsers {
// Iterate entities
for {
id, tags, err := v.next()
if err == io.EOF {
break
} else if err != nil {
return err
}
// Send to receiver
if err := receiver(id, tags, v); err != nil {
return err
}
}
}
}
}
/* Cache */
func (g *Gosmonaut) uncacheNode(id int64) (Node, error)
|
{
rn, ok := g.nodeCache.get(id)
if !ok {
return Node{}, fmt.Errorf("Node #%d in not in file", id)
}
return Node{
ID: rn.id,
Lat: rn.lat.float(),
Lon: rn.lon.float(),
Tags: g.nodeTags[id],
}, nil
}
|
identifier_body
|
|
gosmonaut.go
|
()
} else {
nProcs = conf.NumProcessors
}
// Create decoder
dec, header, err := newDecoder(file, nProcs, conf.Decoder)
if err != nil {
return nil, err
}
return &Gosmonaut{
dec: dec,
header: header,
debugMode: conf.DebugMode,
printWarnings: conf.PrintWarnings,
}, nil
}
// Header returns the meta information of the PBF file.
func (g *Gosmonaut) Header() Header {
return g.header
}
// Typical PrimitiveBlock contains 8k OSM entities
const entitiesPerPrimitiveBlock = 8000
// Start starts the decoding process. The function call will block until the
// previous run has finished.
// Only types that are enabled in `types` will be sent to the caller.
// funcEntityNeeded will be called to determine if the caller needs a specific
// OSM entity.
// Found entities and encountered errors can be received by polling the Next()
// method.
func (g *Gosmonaut) Start(
types OSMTypeSet,
funcEntityNeeded func(OSMType, OSMTags) bool,
) {
// Block until previous run finished
g.lock.Lock()
g.stream = make(chan osmPair, entitiesPerPrimitiveBlock)
// Init vars
g.funcEntityNeeded = funcEntityNeeded
g.types = types
go func() {
// Decode
g.decode()
// Finish
close(g.stream)
g.lock.Unlock()
}()
}
func (g *Gosmonaut) decode() {
// Init debug vars
{
timeNow := time.Now()
g.timeStarted = timeNow
g.timeLast = timeNow
}
g.printDebugInfo("Decoding started")
// Scan relation dependencies
g.nodeIDTracker = newBitsetIDTracker()
g.wayIDTracker = newBitsetIDTracker()
if g.types.Get(RelationType) {
if err := g.scanRelationDependencies(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo(fmt.Sprintf("Scanned relation dependencies [length: %d]", g.wayIDTracker.len()))
}
// Scan way dependencies
if g.types.Get(WayType) || g.wayIDTracker.len() != 0 {
if err := g.scanWayDependencies(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo(fmt.Sprintf("Scanned way dependencies [length: %d]", g.nodeIDTracker.len()))
}
g.nodeCache = newBinaryNodeEntityMap(g.nodeIDTracker.len())
g.nodeTags = make(map[int64]OSMTags)
g.printDebugInfo("Created node cache")
// Scan nodes
if g.types.Get(NodeType) || g.nodeIDTracker.len() != 0 {
if err := g.scanNodes(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned nodes")
}
g.nodeIDTracker = nil
g.printDebugInfo("Deleted node ID tracker")
g.nodeCache.prepare()
g.printDebugInfo("Prepared node cache")
g.wayCache = newBinaryWayEntityMap(g.wayIDTracker.len())
g.wayTags = make(map[int64]OSMTags)
g.printDebugInfo("Created way cache")
// Scan ways
if g.types.Get(WayType) || g.wayIDTracker.len() != 0 {
if err := g.scanWays(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned ways")
}
g.wayIDTracker = nil
g.printDebugInfo("Deleted way ID tracker")
g.wayCache.prepare()
g.printDebugInfo("Prepared way cache")
// Scan relations
if g.types.Get(RelationType) {
if err := g.scanRelations(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned relations")
}
g.wayCache = nil
g.nodeCache = nil
g.wayTags = nil
g.nodeTags = nil
g.printDebugInfo("Deleted entity caches")
if g.debugMode {
fmt.Println("Elapsed time:", time.Since(g.timeStarted))
}
}
func (g *Gosmonaut) streamError(err error) {
g.stream <- osmPair{nil, err}
}
func (g *Gosmonaut) streamEntity(i OSMEntity) {
g.stream <- osmPair{i, nil}
}
// Next returns the next decoded entity (x)or an error.
// If the error is io.EOF the file has successfully been decoded.
// If the error is not EOF decoding has been stopped due to another error.
func (g *Gosmonaut) Next() (OSMEntity, error) {
p, ok := <-g.stream
if !ok {
return nil, io.EOF
}
return p.i, p.e
}
func (g *Gosmonaut) entityNeeded(t OSMType, tags OSMTags) bool {
if !g.types.Get(t) {
return false
}
return g.funcEntityNeeded(t, tags)
}
func (g *Gosmonaut) scanRelationDependencies() error {
return g.scan(RelationType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache?
if !g.entityNeeded(RelationType, tags) {
return nil
}
// Get parser
d, ok := v.(relationParser)
if !ok {
return fmt.Errorf("got invalid relation parser (%T)", v)
}
// Add members to ID caches
ids, err := d.ids()
if err != nil {
return err
}
types, err := d.types()
if err != nil {
return err
}
if len(ids) != len(types) {
return errors.New("length of relation ids and types differs")
}
for i, id := range ids {
switch types[i] {
case WayType:
g.wayIDTracker.set(id)
case NodeType:
g.nodeIDTracker.set(id)
// We don't support sub-relations yet
}
}
return nil
})
}
func (g *Gosmonaut) scanWayDependencies() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
if !g.wayIDTracker.get(id) && !g.entityNeeded(WayType, tags) {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Add nodes to ID cache
refs, err := d.refs()
if err != nil {
return err
}
for _, id := range refs {
g.nodeIDTracker.set(id)
}
return nil
})
}
func (g *Gosmonaut) scanNodes() error {
return g.scan(NodeType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.nodeIDTracker.get(id), g.entityNeeded(NodeType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(nodeParser)
if !ok {
|
lat, lon, err := d.coords()
if err != nil {
return err
}
// Add to cache
if rc {
g.nodeCache.add(rawNode{
id: id,
lat: newCoordinate(lat),
lon: newCoordinate(lon),
})
// Add tags
if tags.Len() != 0 {
g.nodeTags[id] = tags
}
}
// Send to output stream
if rs {
g.streamEntity(Node{
ID: id,
Lat: lat,
Lon: lon,
Tags: tags,
})
}
return nil
})
}
func (g *Gosmonaut) scanWays() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.wayIDTracker.get(id), g.entityNeeded(WayType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Get properties
refs, err := d.refs()
if err != nil {
return err
}
// Add to cache
if rc {
g.wayCache.add(rawWay{
id: id,
refs: refs,
})
// Add tags
if tags.Len() != 0 {
g.wayTags[id] = tags
}
}
// Send to output stream
if rs {
nodes, err := g.uncache
|
return fmt.Errorf("got invalid node parser (%T)", v)
}
// Get properties
|
random_line_split
|
gosmonaut.go
|
()
} else {
nProcs = conf.NumProcessors
}
// Create decoder
dec, header, err := newDecoder(file, nProcs, conf.Decoder)
if err != nil {
return nil, err
}
return &Gosmonaut{
dec: dec,
header: header,
debugMode: conf.DebugMode,
printWarnings: conf.PrintWarnings,
}, nil
}
// Header returns the meta information of the PBF file.
func (g *Gosmonaut) Header() Header {
return g.header
}
// Typical PrimitiveBlock contains 8k OSM entities
const entitiesPerPrimitiveBlock = 8000
// Start starts the decoding process. The function call will block until the
// previous run has finished.
// Only types that are enabled in `types` will be sent to the caller.
// funcEntityNeeded will be called to determine if the caller needs a specific
// OSM entity.
// Found entities and encountered errors can be received by polling the Next()
// method.
func (g *Gosmonaut) Start(
types OSMTypeSet,
funcEntityNeeded func(OSMType, OSMTags) bool,
) {
// Block until previous run finished
g.lock.Lock()
g.stream = make(chan osmPair, entitiesPerPrimitiveBlock)
// Init vars
g.funcEntityNeeded = funcEntityNeeded
g.types = types
go func() {
// Decode
g.decode()
// Finish
close(g.stream)
g.lock.Unlock()
}()
}
func (g *Gosmonaut) decode() {
// Init debug vars
{
timeNow := time.Now()
g.timeStarted = timeNow
g.timeLast = timeNow
}
g.printDebugInfo("Decoding started")
// Scan relation dependencies
g.nodeIDTracker = newBitsetIDTracker()
g.wayIDTracker = newBitsetIDTracker()
if g.types.Get(RelationType) {
if err := g.scanRelationDependencies(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo(fmt.Sprintf("Scanned relation dependencies [length: %d]", g.wayIDTracker.len()))
}
// Scan way dependencies
if g.types.Get(WayType) || g.wayIDTracker.len() != 0 {
if err := g.scanWayDependencies(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo(fmt.Sprintf("Scanned way dependencies [length: %d]", g.nodeIDTracker.len()))
}
g.nodeCache = newBinaryNodeEntityMap(g.nodeIDTracker.len())
g.nodeTags = make(map[int64]OSMTags)
g.printDebugInfo("Created node cache")
// Scan nodes
if g.types.Get(NodeType) || g.nodeIDTracker.len() != 0 {
if err := g.scanNodes(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned nodes")
}
g.nodeIDTracker = nil
g.printDebugInfo("Deleted node ID tracker")
g.nodeCache.prepare()
g.printDebugInfo("Prepared node cache")
g.wayCache = newBinaryWayEntityMap(g.wayIDTracker.len())
g.wayTags = make(map[int64]OSMTags)
g.printDebugInfo("Created way cache")
// Scan ways
if g.types.Get(WayType) || g.wayIDTracker.len() != 0 {
if err := g.scanWays(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned ways")
}
g.wayIDTracker = nil
g.printDebugInfo("Deleted way ID tracker")
g.wayCache.prepare()
g.printDebugInfo("Prepared way cache")
// Scan relations
if g.types.Get(RelationType) {
if err := g.scanRelations(); err != nil {
g.streamError(err)
return
}
g.printDebugInfo("Scanned relations")
}
g.wayCache = nil
g.nodeCache = nil
g.wayTags = nil
g.nodeTags = nil
g.printDebugInfo("Deleted entity caches")
if g.debugMode {
fmt.Println("Elapsed time:", time.Since(g.timeStarted))
}
}
func (g *Gosmonaut) streamError(err error) {
g.stream <- osmPair{nil, err}
}
func (g *Gosmonaut)
|
(i OSMEntity) {
g.stream <- osmPair{i, nil}
}
// Next returns the next decoded entity (x)or an error.
// If the error is io.EOF the file has successfully been decoded.
// If the error is not EOF decoding has been stopped due to another error.
func (g *Gosmonaut) Next() (OSMEntity, error) {
p, ok := <-g.stream
if !ok {
return nil, io.EOF
}
return p.i, p.e
}
func (g *Gosmonaut) entityNeeded(t OSMType, tags OSMTags) bool {
if !g.types.Get(t) {
return false
}
return g.funcEntityNeeded(t, tags)
}
func (g *Gosmonaut) scanRelationDependencies() error {
return g.scan(RelationType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache?
if !g.entityNeeded(RelationType, tags) {
return nil
}
// Get parser
d, ok := v.(relationParser)
if !ok {
return fmt.Errorf("got invalid relation parser (%T)", v)
}
// Add members to ID caches
ids, err := d.ids()
if err != nil {
return err
}
types, err := d.types()
if err != nil {
return err
}
if len(ids) != len(types) {
return errors.New("length of relation ids and types differs")
}
for i, id := range ids {
switch types[i] {
case WayType:
g.wayIDTracker.set(id)
case NodeType:
g.nodeIDTracker.set(id)
// We don't support sub-relations yet
}
}
return nil
})
}
func (g *Gosmonaut) scanWayDependencies() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
if !g.wayIDTracker.get(id) && !g.entityNeeded(WayType, tags) {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Add nodes to ID cache
refs, err := d.refs()
if err != nil {
return err
}
for _, id := range refs {
g.nodeIDTracker.set(id)
}
return nil
})
}
func (g *Gosmonaut) scanNodes() error {
return g.scan(NodeType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.nodeIDTracker.get(id), g.entityNeeded(NodeType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(nodeParser)
if !ok {
return fmt.Errorf("got invalid node parser (%T)", v)
}
// Get properties
lat, lon, err := d.coords()
if err != nil {
return err
}
// Add to cache
if rc {
g.nodeCache.add(rawNode{
id: id,
lat: newCoordinate(lat),
lon: newCoordinate(lon),
})
// Add tags
if tags.Len() != 0 {
g.nodeTags[id] = tags
}
}
// Send to output stream
if rs {
g.streamEntity(Node{
ID: id,
Lat: lat,
Lon: lon,
Tags: tags,
})
}
return nil
})
}
func (g *Gosmonaut) scanWays() error {
return g.scan(WayType, func(id int64, tags OSMTags, v entityParser) error {
// Needed by cache or stream?
rc, rs := g.wayIDTracker.get(id), g.entityNeeded(WayType, tags)
if !rc && !rs {
return nil
}
// Get parser
d, ok := v.(wayParser)
if !ok {
return fmt.Errorf("got invalid way parser (%T)", v)
}
// Get properties
refs, err := d.refs()
if err != nil {
return err
}
// Add to cache
if rc {
g.wayCache.add(rawWay{
id: id,
refs: refs,
})
// Add tags
if tags.Len() != 0 {
g.wayTags[id] = tags
}
}
// Send to output stream
if rs {
nodes, err := g.
|
streamEntity
|
identifier_name
|
initializer.ts
|
MMMMMMN/ +NMMMN- /yyyyyys d. dMMMMNo` dNy-+ymmmho-+NN- dMMMMMMMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMNyNMMMMN+::::::::::m+/mMMMMMMd: dMMNho///+ymMMN+/mMMMMMMMMNs/hMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMNMMMMMMMMMMMMMMMMMMMMMMMMMMMMNsmMMMMMMMMMMMMMMNNNNMMNNNMMNNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNNMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
*/
import * as chalk from 'chalk';
import { readFileSync } from 'fs';
import { Browser, Page } from 'puppeteer';
import { deleteFiles, checkingCloses } from '../api/helpers';
import { Whatsapp } from '../api/whatsapp';
import { CreateConfig, defaultOptions } from '../config/create-config';
import { tokenSession } from '../config/tokenSession.config';
import { checkFileJson } from '../api/helpers/check-token-file';
import { SocketState, SocketStream } from '../api/model/enum';
import { SessionTokenCkeck, saveToken, isBeta } from './auth';
import {
initWhatsapp,
initBrowser,
injectApi,
getWhatsappPage
} from './browser';
import { welcomeScreen } from './welcome';
const path = require('path');
/**
* A callback will be received, informing the status of the qrcode
*/
export type CatchQR = (
qrCode: string,
asciiQR: string,
attempt: number,
urlCode?: string
) => void;
/**
* A callback will be received, informing the customer's status
*/
export type StatusFind = (statusGet: string, session: string) => void;
/**
* A callback will be received, informing user about browser and page instance
*/
export type BrowserInstance = (
browser: string | Browser,
waPage: false | Page
) => void;
export interface CreateOptions extends CreateConfig {
/**
* You must pass a string type parameter, this parameter will be the name of the client's session. If the parameter is not passed, the section name will be "session".
*/
session: string;
/**
* A callback will be received, informing the status of the qrcode
*/
catchQR?: CatchQR;
/**
* A callback will be received, informing the customer's status
*/
statusFind?: StatusFind;
/**
* Pass the session token information you can receive this token with the await client.getSessionTokenBrowser () function
*/
browserSessionToken?: tokenSession;
/**
* A callback will be received, informing user about browser and page instance
*/
browserInstance?: BrowserInstance;
}
/**
* Start the bot
* @returns Whatsapp page, with this parameter you will be able to access the bot functions
*/
export async function create(createOption: CreateOptions): Promise<Whatsapp>;
/**
* Start the bot
* You must pass a string type parameter, this parameter will be the name of the client's session. If the parameter is not passed, the section name will be "session".
* @returns Whatsapp page, with this parameter you will be able to access the bot functions
*/
export async function create(
sessionName: string,
catchQR?: CatchQR,
statusFind?: StatusFind,
options?: CreateConfig,
browserSessionToken?: tokenSession,
browserInstance?: BrowserInstance
): Promise<Whatsapp>;
export async function create(
sessionOrOption: string | CreateOptions,
catchQR?: CatchQR,
statusFind?: StatusFind,
options?: CreateConfig,
browserSessionToken?: tokenSession,
browserInstance?: BrowserInstance
): Promise<Whatsapp> {
let session = 'session';
if (
typeof sessionOrOption === 'string' &&
sessionOrOption.replace(/\s/g, '').length
) {
session = sessionOrOption.replace(/\s/g, '');
} else if (typeof sessionOrOption === 'object') {
session = sessionOrOption.session || session;
catchQR = sessionOrOption.catchQR || catchQR;
statusFind = sessionOrOption.statusFind || statusFind;
browserSessionToken =
sessionOrOption.browserSessionToken || browserSessionToken;
browserInstance = sessionOrOption.browserInstance || browserInstance;
options = sessionOrOption;
}
let browserToken: any;
const mergedOptions = { ...defaultOptions, ...options };
const logger = mergedOptions.logger;
if (!mergedOptions.disableWelcome) {
welcomeScreen();
}
logger.info(`${chalk.underline('https://orkestral.io - official site!')}\n`);
statusFind && statusFind('initBrowser', session);
// Initialize whatsapp
if (mergedOptions.browserWS) {
logger.info(`Waiting... checking the wss server...`, { session });
} else {
logger.info('Waiting... checking the browser...', { session });
}
const browser = await initBrowser(session, mergedOptions, logger);
// Erro of connect wss
if (typeof browser === 'string' && browser === 'connect') {
logger.info('Error when try to connect ' + mergedOptions.browserWS, {
session
});
statusFind && statusFind('serverWssNotConnected', session);
throw `Error when try to connect ${mergedOptions.browserWS}`;
}
// Erro open browser
if (typeof browser === 'string' && browser === 'launch') {
logger.info('Error no open browser.... ', {
session
});
statusFind && statusFind('noOpenBrowser', session);
throw `Error no open browser....`;
}
if (mergedOptions.browserWS) {
logger.info('Has been properly connected to the wss server', {
session
});
statusFind && statusFind('connectBrowserWs', session);
} else {
statusFind && statusFind('openBrowser', session);
logger.info('Browser successfully opened', {
session
});
}
if (!mergedOptions.browserWS) {
logger.info('checking headless...', {
session
});
if (mergedOptions.headless) {
logger.info('headless option is active, browser hidden', {
session
});
} else {
logger.info('headless option is disabled, browser visible', {
session
});
}
}
if (typeof browser === 'object') {
if (!mergedOptions.browserWS && browser['_process']) {
browser['_process'].once('close', () => {
browser['isClose'] = true;
});
}
checkingCloses(browser, mergedOptions, (result) => {
statusFind && statusFind(result, session);
}).catch(() => {
throw 'The client has been closed';
});
if (SessionTokenCkeck(browserSessionToken)) {
browserToken = browserSessionToken;
}
logger.info('Checking page...', {
session
});
statusFind && statusFind('initWhatsapp', session);
const newPage: Page = await getWhatsappPage(browser);
const client = new Whatsapp(newPage, session, mergedOptions);
const page: false | Page = await initWhatsapp(
session,
mergedOptions,
browser,
browserToken
);
if (browserInstance) {
browserInstance(browser, page);
}
if (page === false) {
logger.info('Error accessing the page: "https://web.whatsapp.com"', {
session
});
statusFind && statusFind('erroPageWhatsapp', session);
throw 'Error when trying to access the page: "https://web.whatsapp.com"';
}
statusFind && statusFind('successPageWhatsapp', session);
logger.info(`${chalk.green('Page successfully accessed')}`, {
session
});
client.onStreamChange(async (stateStream) => {
if (stateStream === SocketStream.CONNECTED) {
statusFind && statusFind('chatsAvailable', session);
}
if (stateStream === SocketStream.DISCONNECTED) {
await page.waitForFunction(
() => {
if (
document.querySelector('canvas') &&
document.querySelectorAll('._2Nr6U').length == 0
) {
return true;
}
},
{
timeout: 0,
polling: 100
}
);
if (checkFileJson(mergedOptions, session)) {
if (statusFind) {
statusFind('desconnectedMobile', session);
}
deleteFiles(mergedOptions, session, logger);
}
}
});
client.onStateChange(async (state) => {
if (state === SocketState.PAIRING) {
const device: Boolean = await page
.evaluate(() => {
if (
document.querySelector('[tabindex="-1"]') &&
window?.Store?.Stream?.mode == 'SYNCING' &&
window?.Store?.Stream?.obscurity == 'SHOW'
)
|
{
return true;
}
|
conditional_block
|
|
initializer.ts
|
NhmNNMmmNMNNmmmmdmmdyhhoyddddoo++yoyysooossyhsmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMNNNmmNNNmdNdNmmddhhhdNNhsmNssdooo/dso++osyyysoymMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMNNNNmNNNNNmddmmNhshNmmmNmNMdhNsh/ohho++/:++MMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MNNNMMNNNNmmmhhhhdyosdNmdmMMhoNmhdmys+ooo++/+MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNMMNNNNmddmdoodmMMNmmNNhssdmNMMMNdNd/osomMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNdhMNmNNMNmdNddohmMMNNNmdmdddNMMMMMMMMmMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNhmMmmmmNNmdNyoNMNmNmdhyyyhdhoyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmdmMmmddddNmmdys+hmMMMmmhysssyy++dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmdNMMdmdddmmNNyshmNNNNNNNdhhs+yy//dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNMMMdmdddmmMNysdmNNMMMNhhNdhs+y+/:mMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNMMNhmmddNNNMdyydmMMMNdyshNhyoss+:/MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNMMddmmmmNMNMNdsymNNmdhhdNMNdhsss+:yMMMMMMMMMMMMMMMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMdhmmmmmNMNNMmshNMMMmmMMMMMmNdyo+//NMMMMMMMMMMMMMMMhNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMmhmmmmmmNMMNNMyshdhhhyhNMMMMMMdhso+sMMMMMMMMMMMMMMMhmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMmdmmmmmmmNMMMmNm+ys++oyyNMMMMMMNmmyyoyNMMMMMMMMMMMMMddMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmmmmmmmmmmmNMNNmNNyyo+/oohNMMMMMMMMdhhsshmMMMMMMMMMMMyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNNNNmmmmNMMNmmddNmmdhhdmMMMMMMMMMNddhssshmmNNNmmdhdMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNmNNNNMMMMMNomMMMMMMMMMNNmdhhyyyyyyyhdmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
Nd+oNMMMMMMMmodo++++++++++m..yNMMMMMNo+mNMMmhssshdNMMNhNMMMMMMMMMMMddMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MN+ /NMMMMMm: d` -ssssss+`d. `+mMMMMN. dNm+:+syso//hNN--yNMMMMMMMd+`yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMN+ /NMMMm: oM` +NMMMMMNdN. /`.yNMMN. dh.omMMMMMNy.oM- `:hNMMMm+. yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMN/ /NMm: oNy` :sssmMMMMN. dh-`/mMN. d-/NMMMMMMMMy`m- y/`/dmo..o: yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMN/ /m: +NNy. /yyyNMMMMN. dNNo`.yN- d.oNMMMMMMMMd d- mNh-`.`+mN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMN/ . +NMMN- oNMMMMMNdN. dMMMd:`/. ds.dNMMMMMMm::M- dMMNy/dMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMN/ +NMMMN- /yyyyyys d. dMMMMNo` dNy-+ymmmho-+NN- dMMMMMMMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMNyNMMMMN+::::::::::m+/mMMMMMMd: dMMNho///+ymMMN+/mMMMMMMMMNs/hMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMNMMMMMMMMMMMMMMMMMMMMMMMMMMMMNsmMMMMMMMMMMMMMMNNNNMMNNNMMNNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNNMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
*/
import * as chalk from 'chalk';
import { readFileSync } from 'fs';
import { Browser, Page } from 'puppeteer';
import { deleteFiles, checkingCloses } from '../api/helpers';
import { Whatsapp } from '../api/whatsapp';
import { CreateConfig, defaultOptions } from '../config/create-config';
import { tokenSession } from '../config/tokenSession.config';
import { checkFileJson } from '../api/helpers/check-token-file';
import { SocketState, SocketStream } from '../api/model/enum';
import { SessionTokenCkeck, saveToken, isBeta } from './auth';
import {
initWhatsapp,
initBrowser,
injectApi,
getWhatsappPage
} from './browser';
import { welcomeScreen } from './welcome';
const path = require('path');
/**
* A callback will be received, informing the status of the qrcode
*/
export type CatchQR = (
qrCode: string,
asciiQR: string,
attempt: number,
urlCode?: string
) => void;
/**
* A callback will be received, informing the customer's status
*/
export type StatusFind = (statusGet: string, session: string) => void;
/**
* A callback will be received, informing user about browser and page instance
*/
export type BrowserInstance = (
browser: string | Browser,
waPage: false | Page
) => void;
export interface CreateOptions extends CreateConfig {
/**
* You must pass a string type parameter, this parameter will be the name of the client's session. If the parameter is not passed, the section name will be "session".
*/
session: string;
/**
* A callback will be received, informing the status of the qrcode
*/
catchQR?: CatchQR;
/**
* A callback will be received, informing the customer's status
*/
statusFind?: StatusFind;
/**
* Pass the session token information you can receive this token with the await client.getSessionTokenBrowser () function
*/
browserSessionToken?: tokenSession;
/**
* A callback will be received, informing user about browser and page instance
*/
browserInstance?: BrowserInstance;
}
/**
* Start the bot
* @returns Whatsapp page, with this parameter you will be able to access the bot functions
*/
export async function create(createOption: CreateOptions): Promise<Whatsapp>;
/**
* Start the bot
* You must pass a string type parameter, this parameter will be the name of the client's session. If the parameter is not passed, the section name will be "session".
* @returns Whatsapp page, with this parameter you will be able to access the bot functions
*/
export async function create(
sessionName: string,
catchQR?: CatchQR,
statusFind?: StatusFind,
options?: CreateConfig,
browserSessionToken?: tokenSession,
browserInstance?: BrowserInstance
): Promise<Whatsapp>;
export async function create(
sessionOrOption: string | CreateOptions,
catchQR?: CatchQR,
statusFind?: StatusFind,
options?: CreateConfig,
browserSessionToken?: tokenSession,
browserInstance?: BrowserInstance
): Promise<Whatsapp>
|
{
let session = 'session';
if (
typeof sessionOrOption === 'string' &&
sessionOrOption.replace(/\s/g, '').length
) {
session = sessionOrOption.replace(/\s/g, '');
} else if (typeof sessionOrOption === 'object') {
session = sessionOrOption.session || session;
catchQR = sessionOrOption.catchQR || catchQR;
statusFind = sessionOrOption.statusFind || statusFind;
browserSessionToken =
sessionOrOption.browserSessionToken || browserSessionToken;
browserInstance = sessionOrOption.browserInstance || browserInstance;
options = sessionOrOption;
}
let browserToken: any;
const mergedOptions = { ...defaultOptions, ...options };
|
identifier_body
|
|
initializer.ts
|
MMMMMMMMMMMMMMMMMMMMMM
NMMMNmhNdNMNMNMMNmNNNmmmdyoohmhoyo::hsooo++oooydhymMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMNNNhmNNMmmNMNNmmmmdmmdyhhoyddddoo++yoyysooossyhsmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMNNNmmNNNmdNdNmmddhhhdNNhsmNssdooo/dso++osyyysoymMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMNNNNmNNNNNmddmmNhshNmmmNmNMdhNsh/ohho++/:++MMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MNNNMMNNNNmmmhhhhdyosdNmdmMMhoNmhdmys+ooo++/+MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNMMNNNNmddmdoodmMMNmmNNhssdmNMMMNdNd/osomMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNdhMNmNNMNmdNddohmMMNNNmdmdddNMMMMMMMMmMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNhmMmmmmNNmdNyoNMNmNmdhyyyhdhoyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmdmMmmddddNmmdys+hmMMMmmhysssyy++dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmdNMMdmdddmmNNyshmNNNNNNNdhhs+yy//dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNMMMdmdddmmMNysdmNNMMMNhhNdhs+y+/:mMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNMMNhmmddNNNMdyydmMMMNdyshNhyoss+:/MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNMMddmmmmNMNMNdsymNNmdhhdNMNdhsss+:yMMMMMMMMMMMMMMMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMdhmmmmmNMNNMmshNMMMmmMMMMMmNdyo+//NMMMMMMMMMMMMMMMhNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMmhmmmmmmNMMNNMyshdhhhyhNMMMMMMdhso+sMMMMMMMMMMMMMMMhmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMmdmmmmmmmNMMMmNm+ys++oyyNMMMMMMNmmyyoyNMMMMMMMMMMMMMddMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmmmmmmmmmmmNMNNmNNyyo+/oohNMMMMMMMMdhhsshmMMMMMMMMMMMyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNNNNmmmmNMMNmmddNmmdhhdmMMMMMMMMMNddhssshmmNNNmmdhdMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNmNNNNMMMMMNomMMMMMMMMMNNmdhhyyyyyyyhdmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
Nd+oNMMMMMMMmodo++++++++++m..yNMMMMMNo+mNMMmhssshdNMMNhNMMMMMMMMMMMddMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MN+ /NMMMMMm: d` -ssssss+`d. `+mMMMMN. dNm+:+syso//hNN--yNMMMMMMMd+`yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMN+ /NMMMm: oM` +NMMMMMNdN. /`.yNMMN. dh.omMMMMMNy.oM- `:hNMMMm+. yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMN/ /NMm: oNy` :sssmMMMMN. dh-`/mMN. d-/NMMMMMMMMy`m- y/`/dmo..o: yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMN/ /m: +NNy. /yyyNMMMMN. dNNo`.yN- d.oNMMMMMMMMd d- mNh-`.`+mN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMN/ . +NMMN- oNMMMMMNdN. dMMMd:`/. ds.dNMMMMMMm::M- dMMNy/dMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMN/ +NMMMN- /yyyyyys d. dMMMMNo` dNy-+ymmmho-+NN- dMMMMMMMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMNyNMMMMN+::::::::::m+/mMMMMMMd: dMMNho///+ymMMN+/mMMMMMMMMNs/hMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMNMMMMMMMMMMMMMMMMMMMMMMMMMMMMNsmMMMMMMMMMMMMMMNNNNMMNNNMMNNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNNMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
*/
import * as chalk from 'chalk';
import { readFileSync } from 'fs';
import { Browser, Page } from 'puppeteer';
import { deleteFiles, checkingCloses } from '../api/helpers';
import { Whatsapp } from '../api/whatsapp';
import { CreateConfig, defaultOptions } from '../config/create-config';
import { tokenSession } from '../config/tokenSession.config';
import { checkFileJson } from '../api/helpers/check-token-file';
import { SocketState, SocketStream } from '../api/model/enum';
import { SessionTokenCkeck, saveToken, isBeta } from './auth';
import {
initWhatsapp,
initBrowser,
injectApi,
getWhatsappPage
} from './browser';
import { welcomeScreen } from './welcome';
const path = require('path');
/**
* A callback will be received, informing the status of the qrcode
*/
export type CatchQR = (
qrCode: string,
asciiQR: string,
attempt: number,
urlCode?: string
) => void;
/**
* A callback will be received, informing the customer's status
*/
export type StatusFind = (statusGet: string, session: string) => void;
/**
* A callback will be received, informing user about browser and page instance
*/
export type BrowserInstance = (
browser: string | Browser,
waPage: false | Page
) => void;
export interface CreateOptions extends CreateConfig {
/**
* You must pass a string type parameter, this parameter will be the name of the client's session. If the parameter is not passed, the section name will be "session".
*/
session: string;
/**
* A callback will be received, informing the status of the qrcode
*/
catchQR?: CatchQR;
/**
* A callback will be received, informing the customer's status
*/
statusFind?: StatusFind;
/**
* Pass the session token information you can receive this token with the await client.getSessionTokenBrowser () function
*/
browserSessionToken?: tokenSession;
/**
* A callback will be received, informing user about browser and page instance
*/
browserInstance?: BrowserInstance;
}
/**
* Start the bot
* @returns Whatsapp page, with this parameter you will be able to access the bot functions
*/
export async function create(createOption: CreateOptions): Promise<Whatsapp>;
/**
* Start the bot
* You must pass a string type parameter, this parameter will be the name of the client's session. If the parameter is not passed, the section name will be "session".
* @returns Whatsapp page, with this parameter you will be able to access the bot functions
*/
export async function create(
sessionName: string,
catchQR?: CatchQR,
statusFind?: StatusFind,
options?: CreateConfig,
browserSessionToken?: tokenSession,
browserInstance?: BrowserInstance
): Promise<Whatsapp>;
export async function
|
(
sessionOrOption: string | CreateOptions,
catchQR?: CatchQR,
statusFind?: StatusFind,
options?: CreateConfig,
browserSessionToken?: tokenSession,
browserInstance?: BrowserInstance
): Promise<Whatsapp> {
let session = 'session';
if (
typeof sessionOrOption === 'string' &&
sessionOrOption.replace(/\s/g, '').length
) {
session = sessionOrOption.replace(/\s/g, '');
} else if (typeof sessionOrOption === 'object') {
session = sessionOrOption.session || session;
catchQR = sessionOrOption.catchQR || catchQR;
statusFind = sessionOrOption.statusFind || statusFind;
browserSessionToken =
sessionOrOption
|
create
|
identifier_name
|
initializer.ts
|
mMMMMMMMMMNNNmmNNNMMNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNMMNMMMMNNNNNmmmddhdddNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mddNMMNy:/odNmmddmmNNmdhhddmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmmdNMNd:--+dNmmddhhddmmhsyhhmdmmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNmdNmy:.-oyNmmmhmdhho+sososyhhhddNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmmNdh+-`.:oyNNdmmdmmdo-://oysssyhhhdmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
Nmmmoyyyo+osdNmdmmddNNhs+/::/+osyssydyhdNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNmhsymMMNmmmmdmdNNddNmsso+++////ossssyyhdmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mhhhmNNMNNNhssshhmmddmmssyooooso/::+oysshhhhmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmdhdddNNdyoosyhdmddmmmsoooooyysyys/::/oyyhhhyMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mdddhddmhsooshdmdmdhhyyyysso/ooo+syhhs/-/+shyhMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
dyyhdmd+ososhdmdmyyhhhhhhhyo++o/+///+ohhso++sdMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
dhdmNNdsossyhmdmsydhssssyhhs/++o/o+//:++yhhy+/hNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mdmNNNNmhysshddyshdyyy/oss+s::/:://++///++++/::hmNNNNNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNMNNNmmNNdymNNhshdshdyhdysh+sy+-:++osssosss++yNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNNNmdNNmNmmmNmyyddyyhdhydyohys/-oo+osssysyyohNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNhdNmmNNmNMMNhyyhhhdhyyhmmyh+-/s+sysssyyhyydNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mNMMMhdNdmMNMMMMMNNmdhdddmhdmmNho/-osoyyo++oyddhhNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NMMMNmhNdNMNMNMMNmNNNmmmdyoohmhoyo::hsooo++oooydhymMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMNNNhmNNMmmNMNNmmmmdmmdyhhoyddddoo++yoyysooossyhsmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMNNNmmNNNmdNdNmmddhhhdNNhsmNssdooo/dso++osyyysoymMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMNNNNmNNNNNmddmmNhshNmmmNmNMdhNsh/ohho++/:++MMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MNNNMMNNNNmmmhhhhdyosdNmdmMMhoNmhdmys+ooo++/+MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNMMNNNNmddmdoodmMMNmmNNhssdmNMMMNdNd/osomMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNdhMNmNNMNmdNddohmMMNNNmdmdddNMMMMMMMMmMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNhmMmmmmNNmdNyoNMNmNmdhyyyhdhoyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmdmMmmddddNmmdys+hmMMMmmhysssyy++dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmdNMMdmdddmmNNyshmNNNNNNNdhhs+yy//dMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNMMMdmdddmmMNysdmNNMMMNhhNdhs+y+/:mMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNMMNhmmddNNNMdyydmMMMNdyshNhyoss+:/MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmNMMddmmmmNMNMNdsymNNmdhhdNMNdhsss+:yMMMMMMMMMMMMMMMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMdhmmmmmNMNNMmshNMMMmmMMMMMmNdyo+//NMMMMMMMMMMMMMMMhNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMmhmmmmmmNMMNNMyshdhhhyhNMMMMMMdhso+sMMMMMMMMMMMMMMMhmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMmdmmmmmmmNMMMmNm+ys++oyyNMMMMMMNmmyyoyNMMMMMMMMMMMMMddMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NmmmmmmmmmmmNMNNmNNyyo+/oohNMMMMMMMMdhhsshmMMMMMMMMMMMyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
mmNNNNNNmmmmNMMNmmddNmmdhhdmMMMMMMMMMNddhssshmmNNNmmdhdMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNmNNNNMMMMMNomMMMMMMMMMNNmdhhyyyyyyyhdmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
Nd+oNMMMMMMMmodo++++++++++m..yNMMMMMNo+mNMMmhssshdNMMNhNMMMMMMMMMMMddMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MN+ /NMMMMMm: d` -ssssss+`d. `+mMMMMN. dNm+:+syso//hNN--yNMMMMMMMd+`yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMN+ /NMMMm: oM` +NMMMMMNdN. /`.yNMMN. dh.omMMMMMNy.oM- `:hNMMMm+. yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMN/ /NMm: oNy` :sssmMMMMN. dh-`/mMN. d-/NMMMMMMMMy`m- y/`/dmo..o: yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMN/ /m: +NNy. /yyyNMMMMN. dNNo`.yN- d.oNMMMMMMMMd d- mNh-`.`+mN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMN/ . +NMMN- oNMMMMMNdN. dMMMd:`/. ds.dNMMMMMMm::M- dMMNy/dMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMN/ +NMMMN- /yyyyyys d. dMMMMNo` dNy-+ymmmho-+NN- dMMMMMMMMN/ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMNyNMMMMN+::::::::::m+/mMMMMMMd: dMMNho///+ymMMN+/mMMMMMMMMNs/hMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMNMMMMMMMMMMMMMMMMMMMMMMMMMMMMNsmMMMMMMMMMMMMMMNNNNMMNNNMMNNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMNMNMMMNMMNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNNMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
*/
import * as chalk from 'chalk';
import { readFileSync } from 'fs';
import { Browser, Page } from 'puppeteer';
import { deleteFiles, checkingCloses } from '../api/helpers';
import { Whatsapp } from '../api/whatsapp';
import { CreateConfig, defaultOptions } from
|
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
|
random_line_split
|
|
main.rs
|
.push(name_string);
}
target_names
}
pub fn u8_to_string(u: &[u8]) -> String {
String::from_utf8(u.to_vec()).unwrap()
}
pub fn dna_vec(u: &[u8]) -> (Vec<char>) {
let mut v: Vec<char> = Vec::with_capacity(u.len());
for cu in u.to_ascii_uppercase() {
let c = cu as char;
//assert!(c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N');
if c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N' {
v.push(c);
} else {
eprintln!("Warning: Unexpected base \"{}\" encountered. Replaced with \"N\".",
c);
v.push('N');
}
}
v
}
pub fn
|
(bam_file: &String) -> Vec<GenomicInterval> {
let bam = bam::Reader::from_path(bam_file).unwrap();
let header_view = bam.header();
let target_names_dec: Vec<&[u8]> = header_view.target_names();
let mut intervals: Vec<GenomicInterval> = vec![];
for (tid, t_name_dec) in target_names_dec.iter().enumerate() {
let mut name_vec: Vec<char> = vec![];
for decr in t_name_dec.iter() {
let dec: u8 = *decr;
name_vec.push(dec as char);
}
let name_string: String = name_vec.into_iter().collect();
intervals.push(GenomicInterval{
tid: tid as u32,
chrom: name_string,
start_pos: 0,
end_pos: header_view.target_len(tid as u32).unwrap()-1
});
}
intervals
}
// given a bam file name and a possible genomic interval,
// if the interval exists then just return a vector holding that lone interval
// otherwise, if the interval is None,
// return a vector holding GenomicIntervals representing the whole genome.
pub fn get_interval_lst(bam_file: &String, interval: &Option<GenomicInterval>) -> Vec<GenomicInterval> {
match interval {
&Some(ref iv) => {
vec![iv.clone()]
}
&None => {
get_whole_genome_intervals(bam_file)
}
}
}
// this is really ugly. TODO a less verbose implementation
pub fn parse_region_string(region_string: Option<&str>,
bamfile_name: &String)
-> Option<GenomicInterval> {
let bam = bam::Reader::from_path(bamfile_name).unwrap();
match region_string {
Some(r) if r.contains(":") && r.contains("-") => {
let split1: Vec<&str> = r.split(":").collect();
if split1.len() != 2 {
panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>");
}
let split2: Vec<&str> = split1[1].split("-").collect();
if split2.len() != 2 {
panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>");
}
let iv_chrom = split1[0].to_string();
let iv_start = split2[0].parse::<u32>().expect("Invalid position value specified in region string.");
let iv_end = split2[1].parse::<u32>().expect("Invalid position value specified in region string.");
let mut tid: u32 = 0;
for name in bam.header().target_names() {
if u8_to_string(name) == iv_chrom {
break;
}
tid += 1;
}
if tid as usize == bam.header().target_names().len() {
panic!("Chromosome name for region is not in BAM file.");
}
Some(GenomicInterval {
tid: tid,
chrom: iv_chrom,
start_pos: iv_start - 1,
end_pos: iv_end - 1,
})
}
Some(r) => {
let r_str = r.to_string();
let mut tid: u32 = 0;
for name in bam.header().target_names() {
if u8_to_string(name) == r_str {
break;
}
tid += 1;
}
if tid as usize == bam.header().target_names().len() {
panic!("Chromosome name for region is not in BAM file.");
}
let tlen = bam.header().target_len(tid).unwrap();
Some(GenomicInterval {
tid: tid,
chrom: r_str,
start_pos: 0,
end_pos: tlen - 1,
})
}
None => None,
}
}
pub fn count_mapped_reads(bam_file: &String,
fasta_file: &String,
interval: &Option<GenomicInterval>,
min_coverage: u32,
min_mapq: u8,
min_map_frac: f64,
mapped_count_mode: bool) {
let target_names = parse_target_names(&bam_file);
let mut fasta = fasta::IndexedReader::from_file(&fasta_file).unwrap();
// pileup over all covered sites
let mut ref_seq: Vec<char> = vec![];
let mut prev_tid = 4294967295;
let a_str = "A".to_string();
let c_str = "C".to_string();
let g_str = "G".to_string();
let t_str = "T".to_string();
let interval_lst: Vec<GenomicInterval> = get_interval_lst(bam_file, interval);
let mut bam_ix = bam::IndexedReader::from_path(bam_file).unwrap();
let mut count = 0;
for iv in interval_lst {
bam_ix.fetch(iv.tid as u32, iv.start_pos as u32, iv.end_pos as u32 + 1).ok().expect("Error seeking BAM file while extracting fragments.");
let bam_pileup = bam_ix.pileup();
for p in bam_pileup {
let pileup = p.unwrap();
let tid: usize = pileup.tid() as usize;
let chrom: String = target_names[tid].clone();
let pos0: usize = pileup.pos() as usize;
if chrom != iv.chrom ||
pos0 < iv.start_pos as usize ||
pos0 > iv.end_pos as usize {
continue;
}
if tid != prev_tid {
let mut ref_seq_u8: Vec<u8> = vec![];
fasta.read_all(&chrom, &mut ref_seq_u8).expect("Failed to read fasta sequence record.");
ref_seq = dna_vec(&ref_seq_u8);
}
let ref_base_str = (ref_seq[pileup.pos() as usize]).to_string();
if ref_base_str.contains("N") {
continue;
}
assert!(ref_base_str == a_str || ref_base_str == c_str || ref_base_str == g_str || ref_base_str == t_str);
let mut depth: usize = 0;
let mut well_mapped: usize = 0;
// pileup the bases for a single position and count number of each base
for alignment in pileup.alignments() {
let record = alignment.record();
// may be faster to implement this as bitwise operation on raw flag in the future?
if record.is_secondary() || record.is_quality_check_failed() ||
record.is_duplicate() || record.is_supplementary() {
continue;
}
depth += 1;
if record.is_unmapped() || record.mapq() < min_mapq {
continue;
}
well_mapped += 1;
}
let well_mapped_frac = well_mapped as f64 / depth as f64;
if mapped_count_mode {
if well_mapped >= min_coverage as usize {
count += 1;
}
} else {
if depth >= min_coverage as usize && well_mapped_frac >= min_map_frac {
count += 1;
}
}
prev_tid = tid;
}
}
println!("{}",count);
}
fn main() {
let input_args = App::new("Map Counter")
.version("0.1")
.author("Peter Edge <edge.peterj@gmail.com>")
.about("Given a bam, count the number of positions exceeding a given min coverage and \"well-mapped\" fraction.")
.arg(Arg::with_name("Input BAM")
.short("b")
.long("bam")
.value_name("BAM")
.help("sorted, indexed BAM file.")
.display_order(10)
.required(true)
.takes_value(true))
.arg(Arg::with_name("Input FASTA")
.short("r")
.long("ref")
.value_name("FASTA")
.help("indexed fasta reference that BAM file is aligned to")
.display_order(20)
.required(true)
.takes_value(true))
.arg(Arg::with_name("Chrom")
.short("C")
.long("chrom")
.value_name("string")
.help("Chromosome to limit analysis to.")
.display_order(30)
.takes_value(true))
.arg(Arg::with_name("Min coverage")
.short
|
get_whole_genome_intervals
|
identifier_name
|
main.rs
|
.push(name_string);
}
target_names
}
pub fn u8_to_string(u: &[u8]) -> String {
String::from_utf8(u.to_vec()).unwrap()
}
pub fn dna_vec(u: &[u8]) -> (Vec<char>) {
let mut v: Vec<char> = Vec::with_capacity(u.len());
for cu in u.to_ascii_uppercase() {
let c = cu as char;
//assert!(c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N');
if c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N' {
v.push(c);
} else {
eprintln!("Warning: Unexpected base \"{}\" encountered. Replaced with \"N\".",
c);
v.push('N');
}
}
v
}
pub fn get_whole_genome_intervals(bam_file: &String) -> Vec<GenomicInterval> {
let bam = bam::Reader::from_path(bam_file).unwrap();
let header_view = bam.header();
let target_names_dec: Vec<&[u8]> = header_view.target_names();
let mut intervals: Vec<GenomicInterval> = vec![];
for (tid, t_name_dec) in target_names_dec.iter().enumerate() {
let mut name_vec: Vec<char> = vec![];
for decr in t_name_dec.iter() {
let dec: u8 = *decr;
name_vec.push(dec as char);
}
let name_string: String = name_vec.into_iter().collect();
intervals.push(GenomicInterval{
tid: tid as u32,
chrom: name_string,
start_pos: 0,
end_pos: header_view.target_len(tid as u32).unwrap()-1
});
}
intervals
}
// given a bam file name and a possible genomic interval,
// if the interval exists then just return a vector holding that lone interval
// otherwise, if the interval is None,
// return a vector holding GenomicIntervals representing the whole genome.
pub fn get_interval_lst(bam_file: &String, interval: &Option<GenomicInterval>) -> Vec<GenomicInterval> {
match interval {
&Some(ref iv) => {
vec![iv.clone()]
}
&None => {
get_whole_genome_intervals(bam_file)
}
}
}
// this is really ugly. TODO a less verbose implementation
pub fn parse_region_string(region_string: Option<&str>,
bamfile_name: &String)
-> Option<GenomicInterval> {
let bam = bam::Reader::from_path(bamfile_name).unwrap();
match region_string {
Some(r) if r.contains(":") && r.contains("-") => {
let split1: Vec<&str> = r.split(":").collect();
if split1.len() != 2 {
panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>");
}
let split2: Vec<&str> = split1[1].split("-").collect();
if split2.len() != 2 {
panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>");
}
let iv_chrom = split1[0].to_string();
let iv_start = split2[0].parse::<u32>().expect("Invalid position value specified in region string.");
let iv_end = split2[1].parse::<u32>().expect("Invalid position value specified in region string.");
let mut tid: u32 = 0;
for name in bam.header().target_names() {
if u8_to_string(name) == iv_chrom {
break;
}
tid += 1;
}
if tid as usize == bam.header().target_names().len() {
panic!("Chromosome name for region is not in BAM file.");
}
Some(GenomicInterval {
tid: tid,
chrom: iv_chrom,
start_pos: iv_start - 1,
end_pos: iv_end - 1,
})
}
Some(r) => {
let r_str = r.to_string();
let mut tid: u32 = 0;
for name in bam.header().target_names() {
if u8_to_string(name) == r_str {
break;
}
tid += 1;
}
if tid as usize == bam.header().target_names().len() {
panic!("Chromosome name for region is not in BAM file.");
}
let tlen = bam.header().target_len(tid).unwrap();
Some(GenomicInterval {
tid: tid,
chrom: r_str,
start_pos: 0,
end_pos: tlen - 1,
})
}
None => None,
}
}
pub fn count_mapped_reads(bam_file: &String,
fasta_file: &String,
interval: &Option<GenomicInterval>,
min_coverage: u32,
min_mapq: u8,
min_map_frac: f64,
mapped_count_mode: bool) {
let target_names = parse_target_names(&bam_file);
let mut fasta = fasta::IndexedReader::from_file(&fasta_file).unwrap();
// pileup over all covered sites
let mut ref_seq: Vec<char> = vec![];
let mut prev_tid = 4294967295;
let a_str = "A".to_string();
let c_str = "C".to_string();
let g_str = "G".to_string();
let t_str = "T".to_string();
let interval_lst: Vec<GenomicInterval> = get_interval_lst(bam_file, interval);
let mut bam_ix = bam::IndexedReader::from_path(bam_file).unwrap();
let mut count = 0;
for iv in interval_lst {
bam_ix.fetch(iv.tid as u32, iv.start_pos as u32, iv.end_pos as u32 + 1).ok().expect("Error seeking BAM file while extracting fragments.");
let bam_pileup = bam_ix.pileup();
for p in bam_pileup {
let pileup = p.unwrap();
let tid: usize = pileup.tid() as usize;
let chrom: String = target_names[tid].clone();
let pos0: usize = pileup.pos() as usize;
if chrom != iv.chrom ||
pos0 < iv.start_pos as usize ||
pos0 > iv.end_pos as usize {
continue;
}
if tid != prev_tid {
let mut ref_seq_u8: Vec<u8> = vec![];
fasta.read_all(&chrom, &mut ref_seq_u8).expect("Failed to read fasta sequence record.");
ref_seq = dna_vec(&ref_seq_u8);
}
let ref_base_str = (ref_seq[pileup.pos() as usize]).to_string();
if ref_base_str.contains("N") {
continue;
}
assert!(ref_base_str == a_str || ref_base_str == c_str || ref_base_str == g_str || ref_base_str == t_str);
let mut depth: usize = 0;
let mut well_mapped: usize = 0;
// pileup the bases for a single position and count number of each base
for alignment in pileup.alignments() {
let record = alignment.record();
// may be faster to implement this as bitwise operation on raw flag in the future?
if record.is_secondary() || record.is_quality_check_failed() ||
record.is_duplicate() || record.is_supplementary() {
continue;
}
depth += 1;
if record.is_unmapped() || record.mapq() < min_mapq {
continue;
}
well_mapped += 1;
}
let well_mapped_frac = well_mapped as f64 / depth as f64;
if mapped_count_mode {
if well_mapped >= min_coverage as usize {
count += 1;
}
} else {
if depth >= min_coverage as usize && well_mapped_frac >= min_map_frac {
count += 1;
}
}
prev_tid = tid;
}
}
println!("{}",count);
}
fn main()
|
.takes_value(true))
.arg(Arg::with_name("Chrom")
.short("C")
.long("chrom")
.value_name("string")
.help("Chromosome to limit analysis to.")
.display_order(30)
.takes_value(true))
.arg(Arg::with_name("Min coverage")
.short
|
{
let input_args = App::new("Map Counter")
.version("0.1")
.author("Peter Edge <edge.peterj@gmail.com>")
.about("Given a bam, count the number of positions exceeding a given min coverage and \"well-mapped\" fraction.")
.arg(Arg::with_name("Input BAM")
.short("b")
.long("bam")
.value_name("BAM")
.help("sorted, indexed BAM file.")
.display_order(10)
.required(true)
.takes_value(true))
.arg(Arg::with_name("Input FASTA")
.short("r")
.long("ref")
.value_name("FASTA")
.help("indexed fasta reference that BAM file is aligned to")
.display_order(20)
.required(true)
|
identifier_body
|
main.rs
|
_names.push(name_string);
}
target_names
}
pub fn u8_to_string(u: &[u8]) -> String {
String::from_utf8(u.to_vec()).unwrap()
}
pub fn dna_vec(u: &[u8]) -> (Vec<char>) {
let mut v: Vec<char> = Vec::with_capacity(u.len());
for cu in u.to_ascii_uppercase() {
let c = cu as char;
//assert!(c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N');
if c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N' {
v.push(c);
} else {
eprintln!("Warning: Unexpected base \"{}\" encountered. Replaced with \"N\".",
c);
v.push('N');
}
}
v
}
pub fn get_whole_genome_intervals(bam_file: &String) -> Vec<GenomicInterval> {
let bam = bam::Reader::from_path(bam_file).unwrap();
let header_view = bam.header();
let target_names_dec: Vec<&[u8]> = header_view.target_names();
let mut intervals: Vec<GenomicInterval> = vec![];
for (tid, t_name_dec) in target_names_dec.iter().enumerate() {
let mut name_vec: Vec<char> = vec![];
for decr in t_name_dec.iter() {
let dec: u8 = *decr;
name_vec.push(dec as char);
}
let name_string: String = name_vec.into_iter().collect();
intervals.push(GenomicInterval{
tid: tid as u32,
chrom: name_string,
start_pos: 0,
|
}
intervals
}
// given a bam file name and a possible genomic interval,
// if the interval exists then just return a vector holding that lone interval
// otherwise, if the interval is None,
// return a vector holding GenomicIntervals representing the whole genome.
pub fn get_interval_lst(bam_file: &String, interval: &Option<GenomicInterval>) -> Vec<GenomicInterval> {
match interval {
&Some(ref iv) => {
vec![iv.clone()]
}
&None => {
get_whole_genome_intervals(bam_file)
}
}
}
// this is really ugly. TODO a less verbose implementation
pub fn parse_region_string(region_string: Option<&str>,
bamfile_name: &String)
-> Option<GenomicInterval> {
let bam = bam::Reader::from_path(bamfile_name).unwrap();
match region_string {
Some(r) if r.contains(":") && r.contains("-") => {
let split1: Vec<&str> = r.split(":").collect();
if split1.len() != 2 {
panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>");
}
let split2: Vec<&str> = split1[1].split("-").collect();
if split2.len() != 2 {
panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>");
}
let iv_chrom = split1[0].to_string();
let iv_start = split2[0].parse::<u32>().expect("Invalid position value specified in region string.");
let iv_end = split2[1].parse::<u32>().expect("Invalid position value specified in region string.");
let mut tid: u32 = 0;
for name in bam.header().target_names() {
if u8_to_string(name) == iv_chrom {
break;
}
tid += 1;
}
if tid as usize == bam.header().target_names().len() {
panic!("Chromosome name for region is not in BAM file.");
}
Some(GenomicInterval {
tid: tid,
chrom: iv_chrom,
start_pos: iv_start - 1,
end_pos: iv_end - 1,
})
}
Some(r) => {
let r_str = r.to_string();
let mut tid: u32 = 0;
for name in bam.header().target_names() {
if u8_to_string(name) == r_str {
break;
}
tid += 1;
}
if tid as usize == bam.header().target_names().len() {
panic!("Chromosome name for region is not in BAM file.");
}
let tlen = bam.header().target_len(tid).unwrap();
Some(GenomicInterval {
tid: tid,
chrom: r_str,
start_pos: 0,
end_pos: tlen - 1,
})
}
None => None,
}
}
pub fn count_mapped_reads(bam_file: &String,
fasta_file: &String,
interval: &Option<GenomicInterval>,
min_coverage: u32,
min_mapq: u8,
min_map_frac: f64,
mapped_count_mode: bool) {
let target_names = parse_target_names(&bam_file);
let mut fasta = fasta::IndexedReader::from_file(&fasta_file).unwrap();
// pileup over all covered sites
let mut ref_seq: Vec<char> = vec![];
let mut prev_tid = 4294967295;
let a_str = "A".to_string();
let c_str = "C".to_string();
let g_str = "G".to_string();
let t_str = "T".to_string();
let interval_lst: Vec<GenomicInterval> = get_interval_lst(bam_file, interval);
let mut bam_ix = bam::IndexedReader::from_path(bam_file).unwrap();
let mut count = 0;
for iv in interval_lst {
bam_ix.fetch(iv.tid as u32, iv.start_pos as u32, iv.end_pos as u32 + 1).ok().expect("Error seeking BAM file while extracting fragments.");
let bam_pileup = bam_ix.pileup();
for p in bam_pileup {
let pileup = p.unwrap();
let tid: usize = pileup.tid() as usize;
let chrom: String = target_names[tid].clone();
let pos0: usize = pileup.pos() as usize;
if chrom != iv.chrom ||
pos0 < iv.start_pos as usize ||
pos0 > iv.end_pos as usize {
continue;
}
if tid != prev_tid {
let mut ref_seq_u8: Vec<u8> = vec![];
fasta.read_all(&chrom, &mut ref_seq_u8).expect("Failed to read fasta sequence record.");
ref_seq = dna_vec(&ref_seq_u8);
}
let ref_base_str = (ref_seq[pileup.pos() as usize]).to_string();
if ref_base_str.contains("N") {
continue;
}
assert!(ref_base_str == a_str || ref_base_str == c_str || ref_base_str == g_str || ref_base_str == t_str);
let mut depth: usize = 0;
let mut well_mapped: usize = 0;
// pileup the bases for a single position and count number of each base
for alignment in pileup.alignments() {
let record = alignment.record();
// may be faster to implement this as bitwise operation on raw flag in the future?
if record.is_secondary() || record.is_quality_check_failed() ||
record.is_duplicate() || record.is_supplementary() {
continue;
}
depth += 1;
if record.is_unmapped() || record.mapq() < min_mapq {
continue;
}
well_mapped += 1;
}
let well_mapped_frac = well_mapped as f64 / depth as f64;
if mapped_count_mode {
if well_mapped >= min_coverage as usize {
count += 1;
}
} else {
if depth >= min_coverage as usize && well_mapped_frac >= min_map_frac {
count += 1;
}
}
prev_tid = tid;
}
}
println!("{}",count);
}
fn main() {
let input_args = App::new("Map Counter")
.version("0.1")
.author("Peter Edge <edge.peterj@gmail.com>")
.about("Given a bam, count the number of positions exceeding a given min coverage and \"well-mapped\" fraction.")
.arg(Arg::with_name("Input BAM")
.short("b")
.long("bam")
.value_name("BAM")
.help("sorted, indexed BAM file.")
.display_order(10)
.required(true)
.takes_value(true))
.arg(Arg::with_name("Input FASTA")
.short("r")
.long("ref")
.value_name("FASTA")
.help("indexed fasta reference that BAM file is aligned to")
.display_order(20)
.required(true)
.takes_value(true))
.arg(Arg::with_name("Chrom")
.short("C")
.long("chrom")
.value_name("string")
.help("Chromosome to limit analysis to.")
.display_order(30)
.takes_value(true))
.arg(Arg::with_name("Min coverage")
.short
|
end_pos: header_view.target_len(tid as u32).unwrap()-1
});
|
random_line_split
|
configureDCR.py
|
p.name]
if len(ifxs2s) == 0:
ifxsSetting2 = None
else:
ifxsSetting2 = ifxs2s[0].port
if ifxsSetting2 == ifxsSetting1:
|
# are we done yet?
if feed2path is not None:
break
# return feed1path, feed2path
return IFPaths([feed1path, feed2path])
def setFreqWithVelocity(config, paths):
"TBF: explain this"
vlow = config['vlow']
vhigh = config['vhigh']
vdef = config['vdef']
# for no doppler affect, I think we do this:
config['freq'] = config['restfreq']
config['dfreq'] = 0
user_freqs = [config["freq"]]
user_dfreqs = [config["dfreq"]]
freqs = user_freqs
dfreqs = user_dfreqs
vd = Vdef()
minMaxFreqs = MinMaxFreqs()
freqList = []
freqAvgVels = []
for freq, dfreq in zip(freqs, dfreqs):
vd.compute_local_frame_with_vdef(vdef, vhigh, freq, vlow)
minMaxFreqs.setMin(vd.cur_vlow + dfreq)
minMaxFreqs.setMax(vd.cur_vhigh + dfreq)
vel_freq = vd.get_vave() + dfreq
freqAvgVels.append(vel_freq)
freqList.append(vel_freq)
return freqList, dfreqs, freqAvgVels, minMaxFreqs
def compute_Flocal(config):
"""Compute the frequency compensated for the velociy of the source"""
vlow = config['vlow']
vhigh = config['vhigh']
vdef = config['vdef']
lo_restfreq = config["DOPPLERTRACKFREQ"]
velocity = (vlow + vhigh) * 0.5
vd = Vdef()
vd.compute_local_frame_with_vdef(vdef, velocity,
lo_restfreq, velocity)
# this better be the same as vlow since i sent in the avg
cur_vhigh = vd.get_vhigh()
cur_vlow = vd.get_vlow()
if cur_vhigh != cur_vlow:
"PANIC: How can the avg velocities differ!!!!!"
return cur_vhigh
def setRxFilters(receiver, tuning_freq, bw_total):
"""Set receiver filters:
use the first filter in the list for this receiver that
encompasses the bandwidth defined by the given
tuning frequency and bandwidth.
"""
found = 0
# bw_total = None #self.freq_calc.get_bw_total()
# if_center = self.freq_calc.get_center_freq()
# self.if_freq_low = if_center - (0.5 * bw_total)
# self.if_freq_high = if_center + (0.5 * bw_total)
# fake the bandwidth? Nope, too wide, no filter will get selected.
# TBF: we need to figure out the freq stuff better - start with compute_bw algo.
# lo, hi = RCVR_FREQS[receiver]
# bw_total = hi - lo
filter_setting = freqLow = freqHigh = None
if receiver in FILTERS:
param_names, filters, rcvr_type = FILTERS[receiver]
bw_low = tuning_freq - (0.5 * bw_total)
bw_high = tuning_freq + (0.5 * bw_total)
# if rcvr_type == 0: # Mixer before filter. Convert filter freq
# bw_low = self.if_freq_low
# bw_high = self.if_freq_high
filter_setting = filter_bandwidth = None
# Choose the first one that encompasses our bw_low and bw_high!
for freqLow, freqHigh, freqSetting in filters:
if bw_low >= freqLow and bw_high <= freqHigh:
filter_setting = freqSetting
filter_bandwidth = freqHigh - freqLow
found = 1
break
return filter_setting, freqLow, freqHigh
def setIFFilters(total_bw_low, total_bw_high, ifpath):
"Returns IFRack parameters and updates paths bandpass info"
# print("setIFFilters", total_bw_low, total_bw_high)
params = []
for path in ifpath:
filter_value = "pass_all"
param = None
for fv, fLow, fHigh in IFfilters:
# fLow = f[1]
# fHigh = f[2]
# if f[1] <= total_bw_low and f[2] >= total_bw_high:
# filter_value = f[0]
if fLow <= total_bw_low and fHigh >= total_bw_high:
filter_value = fv
# print("print found good filter at", filter_value, fLow, fHigh)
break
opticalDriver = path.getFirstLikeDeviceNode("OpticalDriver")
# if "opticalDriver" in path:
if opticalDriver is not None:
# update the IFRack parameters
filterNum = opticalDriver.deviceId # path["opticalDriver"]
param = "IFRack,filter_select,{}".format(filterNum)
params.append((param, filter_value))
if filter_value != "pass_all":
# set this nodes ifInfo filter info
opticalDriver.ifInfo = IFInfo()
opticalDriver.ifInfo.filters = []
opticalDriver.ifInfo.filters.append((param, filter_value))
# then use filter range to update the bandpass at this node
bp = path.getBandpassUpToNode(opticalDriver)
bpFilter = copy(bp)
bpFilter.filter(fLow, fHigh)
bpFilter.changes = "%s, (%f, %f)" % (param, fLow, fHigh)
opticalDriver.setBandpasses([bpFilter])
# self.seq.add_param(self.mng, fs, self.filter_value)
return params
def getFilterBandpasses(bp1, rxNode):
"Return bandpasses representing filters in receiver"
bps = []
bp = copy(bp1)
for fName, fLow, fHigh in rxNode.ifInfo.filters:
bp = copy(bp)
bp.filter(fLow, fHigh)
bp.changes = "Filter %s (%f, %f)" % (fName, fLow, fHigh)
bps.append(bp)
return bps
def getLO1Bandpass(bp1, rxNode, lowerSideband=None):
"Return bandpass representing receivers LO1 mixing"
if lowerSideband is None:
s = RCVR_SIDEBAND[rxNode.device]
lowerSideband = s == -1
bpLO1 = copy(bp1)
loMixFreq = rxNode.ifInfo.lo['freq']
bpLO1.mix(loMixFreq, lowerSideband=lowerSideband)
sideband = "lower" if lowerSideband else "upper"
bpLO1.changes = "LO %s sideband at %f" % (sideband, loMixFreq)
return bpLO1
def calcFreqs(config, ifPaths, debug=False):
"Make the bandpass decisions to get our signal to the backend"
# we'll return what manager parameters are set here
params = []
# At minimum:
# set filters in receiver
# set LO1 freq
# set optical driver freqs
receiver = config['receiver']
backend = config['backend']
# first see if there's a doppler shift
if config['vframe'] is not None and config['vframe'] != 'topo':
# can't handle this
assert False
# doppler shit sux
if 'DOPPLERTRACKFREQ' not in config:
config['DOPPLERTRACKFREQ'] = int(config['restfreq'])
if 'lo2freq' not in config:
config['lo2freq'] = [0]
# we need the tuning freq of our receiver
tuningFreq = config['restfreq']
bwTotal = config['bandwidth']
# now we can see how the band pass changes at this stage?
# 2) set the LO1 freq to match the IF1. That seems to be 3000; always? No.
freq, vfreq, _, minMaxFreqs = setFreqWithVelocity(config, ifPaths.paths)
span = minMaxFreqs.maxf - minMaxFreqs.minf
skyFreq = minMaxFreqs.avgFreqs()
ifNom = RCVR_IF_NOMINAL[receiver]
multiplier1 = RCVR_SIDEBAND[receiver]
freqLocal = compute_Flocal(config)
if debug:
print("IF1 computed from: ", multiplier1, freqLocal, skyFreq, ifNom)
# Compute the IF frequencies! What equation is this????
if1 = (multiplier1 * (freqLocal - skyFreq) + ifNom)
if receiver == "Rcvr26_40": # W-band too!
# mmconverter!
if_offset = 44000.
if0 = if_offset - freqLocal
else:
if0 = if1
# for path in paths:
#
|
feed2path = ifPath
break
|
conditional_block
|
configureDCR.py
|
p.name]
if len(ifxs2s) == 0:
ifxsSetting2 = None
else:
ifxsSetting2 = ifxs2s[0].port
if ifxsSetting2 == ifxsSetting1:
feed2path = ifPath
break
# are we done yet?
if feed2path is not None:
break
# return feed1path, feed2path
return IFPaths([feed1path, feed2path])
def setFreqWithVelocity(config, paths):
"TBF: explain this"
|
vhigh = config['vhigh']
vdef = config['vdef']
# for no doppler affect, I think we do this:
config['freq'] = config['restfreq']
config['dfreq'] = 0
user_freqs = [config["freq"]]
user_dfreqs = [config["dfreq"]]
freqs = user_freqs
dfreqs = user_dfreqs
vd = Vdef()
minMaxFreqs = MinMaxFreqs()
freqList = []
freqAvgVels = []
for freq, dfreq in zip(freqs, dfreqs):
vd.compute_local_frame_with_vdef(vdef, vhigh, freq, vlow)
minMaxFreqs.setMin(vd.cur_vlow + dfreq)
minMaxFreqs.setMax(vd.cur_vhigh + dfreq)
vel_freq = vd.get_vave() + dfreq
freqAvgVels.append(vel_freq)
freqList.append(vel_freq)
return freqList, dfreqs, freqAvgVels, minMaxFreqs
def compute_Flocal(config):
"""Compute the frequency compensated for the velociy of the source"""
vlow = config['vlow']
vhigh = config['vhigh']
vdef = config['vdef']
lo_restfreq = config["DOPPLERTRACKFREQ"]
velocity = (vlow + vhigh) * 0.5
vd = Vdef()
vd.compute_local_frame_with_vdef(vdef, velocity,
lo_restfreq, velocity)
# this better be the same as vlow since i sent in the avg
cur_vhigh = vd.get_vhigh()
cur_vlow = vd.get_vlow()
if cur_vhigh != cur_vlow:
"PANIC: How can the avg velocities differ!!!!!"
return cur_vhigh
def setRxFilters(receiver, tuning_freq, bw_total):
"""Set receiver filters:
use the first filter in the list for this receiver that
encompasses the bandwidth defined by the given
tuning frequency and bandwidth.
"""
found = 0
# bw_total = None #self.freq_calc.get_bw_total()
# if_center = self.freq_calc.get_center_freq()
# self.if_freq_low = if_center - (0.5 * bw_total)
# self.if_freq_high = if_center + (0.5 * bw_total)
# fake the bandwidth? Nope, too wide, no filter will get selected.
# TBF: we need to figure out the freq stuff better - start with compute_bw algo.
# lo, hi = RCVR_FREQS[receiver]
# bw_total = hi - lo
filter_setting = freqLow = freqHigh = None
if receiver in FILTERS:
param_names, filters, rcvr_type = FILTERS[receiver]
bw_low = tuning_freq - (0.5 * bw_total)
bw_high = tuning_freq + (0.5 * bw_total)
# if rcvr_type == 0: # Mixer before filter. Convert filter freq
# bw_low = self.if_freq_low
# bw_high = self.if_freq_high
filter_setting = filter_bandwidth = None
# Choose the first one that encompasses our bw_low and bw_high!
for freqLow, freqHigh, freqSetting in filters:
if bw_low >= freqLow and bw_high <= freqHigh:
filter_setting = freqSetting
filter_bandwidth = freqHigh - freqLow
found = 1
break
return filter_setting, freqLow, freqHigh
def setIFFilters(total_bw_low, total_bw_high, ifpath):
"Returns IFRack parameters and updates paths bandpass info"
# print("setIFFilters", total_bw_low, total_bw_high)
params = []
for path in ifpath:
filter_value = "pass_all"
param = None
for fv, fLow, fHigh in IFfilters:
# fLow = f[1]
# fHigh = f[2]
# if f[1] <= total_bw_low and f[2] >= total_bw_high:
# filter_value = f[0]
if fLow <= total_bw_low and fHigh >= total_bw_high:
filter_value = fv
# print("print found good filter at", filter_value, fLow, fHigh)
break
opticalDriver = path.getFirstLikeDeviceNode("OpticalDriver")
# if "opticalDriver" in path:
if opticalDriver is not None:
# update the IFRack parameters
filterNum = opticalDriver.deviceId # path["opticalDriver"]
param = "IFRack,filter_select,{}".format(filterNum)
params.append((param, filter_value))
if filter_value != "pass_all":
# set this nodes ifInfo filter info
opticalDriver.ifInfo = IFInfo()
opticalDriver.ifInfo.filters = []
opticalDriver.ifInfo.filters.append((param, filter_value))
# then use filter range to update the bandpass at this node
bp = path.getBandpassUpToNode(opticalDriver)
bpFilter = copy(bp)
bpFilter.filter(fLow, fHigh)
bpFilter.changes = "%s, (%f, %f)" % (param, fLow, fHigh)
opticalDriver.setBandpasses([bpFilter])
# self.seq.add_param(self.mng, fs, self.filter_value)
return params
def getFilterBandpasses(bp1, rxNode):
"Return bandpasses representing filters in receiver"
bps = []
bp = copy(bp1)
for fName, fLow, fHigh in rxNode.ifInfo.filters:
bp = copy(bp)
bp.filter(fLow, fHigh)
bp.changes = "Filter %s (%f, %f)" % (fName, fLow, fHigh)
bps.append(bp)
return bps
def getLO1Bandpass(bp1, rxNode, lowerSideband=None):
"Return bandpass representing receivers LO1 mixing"
if lowerSideband is None:
s = RCVR_SIDEBAND[rxNode.device]
lowerSideband = s == -1
bpLO1 = copy(bp1)
loMixFreq = rxNode.ifInfo.lo['freq']
bpLO1.mix(loMixFreq, lowerSideband=lowerSideband)
sideband = "lower" if lowerSideband else "upper"
bpLO1.changes = "LO %s sideband at %f" % (sideband, loMixFreq)
return bpLO1
def calcFreqs(config, ifPaths, debug=False):
"Make the bandpass decisions to get our signal to the backend"
# we'll return what manager parameters are set here
params = []
# At minimum:
# set filters in receiver
# set LO1 freq
# set optical driver freqs
receiver = config['receiver']
backend = config['backend']
# first see if there's a doppler shift
if config['vframe'] is not None and config['vframe'] != 'topo':
# can't handle this
assert False
# doppler shit sux
if 'DOPPLERTRACKFREQ' not in config:
config['DOPPLERTRACKFREQ'] = int(config['restfreq'])
if 'lo2freq' not in config:
config['lo2freq'] = [0]
# we need the tuning freq of our receiver
tuningFreq = config['restfreq']
bwTotal = config['bandwidth']
# now we can see how the band pass changes at this stage?
# 2) set the LO1 freq to match the IF1. That seems to be 3000; always? No.
freq, vfreq, _, minMaxFreqs = setFreqWithVelocity(config, ifPaths.paths)
span = minMaxFreqs.maxf - minMaxFreqs.minf
skyFreq = minMaxFreqs.avgFreqs()
ifNom = RCVR_IF_NOMINAL[receiver]
multiplier1 = RCVR_SIDEBAND[receiver]
freqLocal = compute_Flocal(config)
if debug:
print("IF1 computed from: ", multiplier1, freqLocal, skyFreq, ifNom)
# Compute the IF frequencies! What equation is this????
if1 = (multiplier1 * (freqLocal - skyFreq) + ifNom)
if receiver == "Rcvr26_40": # W-band too!
# mmconverter!
if_offset = 44000.
if0 = if_offset - freqLocal
else:
if0 = if1
# for path in paths:
|
vlow = config['vlow']
|
random_line_split
|
configureDCR.py
|
p.name]
if len(ifxs2s) == 0:
ifxsSetting2 = None
else:
ifxsSetting2 = ifxs2s[0].port
if ifxsSetting2 == ifxsSetting1:
feed2path = ifPath
break
# are we done yet?
if feed2path is not None:
break
# return feed1path, feed2path
return IFPaths([feed1path, feed2path])
def setFreqWithVelocity(config, paths):
"TBF: explain this"
vlow = config['vlow']
vhigh = config['vhigh']
vdef = config['vdef']
# for no doppler affect, I think we do this:
config['freq'] = config['restfreq']
config['dfreq'] = 0
user_freqs = [config["freq"]]
user_dfreqs = [config["dfreq"]]
freqs = user_freqs
dfreqs = user_dfreqs
vd = Vdef()
minMaxFreqs = MinMaxFreqs()
freqList = []
freqAvgVels = []
for freq, dfreq in zip(freqs, dfreqs):
vd.compute_local_frame_with_vdef(vdef, vhigh, freq, vlow)
minMaxFreqs.setMin(vd.cur_vlow + dfreq)
minMaxFreqs.setMax(vd.cur_vhigh + dfreq)
vel_freq = vd.get_vave() + dfreq
freqAvgVels.append(vel_freq)
freqList.append(vel_freq)
return freqList, dfreqs, freqAvgVels, minMaxFreqs
def compute_Flocal(config):
"""Compute the frequency compensated for the velociy of the source"""
vlow = config['vlow']
vhigh = config['vhigh']
vdef = config['vdef']
lo_restfreq = config["DOPPLERTRACKFREQ"]
velocity = (vlow + vhigh) * 0.5
vd = Vdef()
vd.compute_local_frame_with_vdef(vdef, velocity,
lo_restfreq, velocity)
# this better be the same as vlow since i sent in the avg
cur_vhigh = vd.get_vhigh()
cur_vlow = vd.get_vlow()
if cur_vhigh != cur_vlow:
"PANIC: How can the avg velocities differ!!!!!"
return cur_vhigh
def setRxFilters(receiver, tuning_freq, bw_total):
"""Set receiver filters:
use the first filter in the list for this receiver that
encompasses the bandwidth defined by the given
tuning frequency and bandwidth.
"""
found = 0
# bw_total = None #self.freq_calc.get_bw_total()
# if_center = self.freq_calc.get_center_freq()
# self.if_freq_low = if_center - (0.5 * bw_total)
# self.if_freq_high = if_center + (0.5 * bw_total)
# fake the bandwidth? Nope, too wide, no filter will get selected.
# TBF: we need to figure out the freq stuff better - start with compute_bw algo.
# lo, hi = RCVR_FREQS[receiver]
# bw_total = hi - lo
filter_setting = freqLow = freqHigh = None
if receiver in FILTERS:
param_names, filters, rcvr_type = FILTERS[receiver]
bw_low = tuning_freq - (0.5 * bw_total)
bw_high = tuning_freq + (0.5 * bw_total)
# if rcvr_type == 0: # Mixer before filter. Convert filter freq
# bw_low = self.if_freq_low
# bw_high = self.if_freq_high
filter_setting = filter_bandwidth = None
# Choose the first one that encompasses our bw_low and bw_high!
for freqLow, freqHigh, freqSetting in filters:
if bw_low >= freqLow and bw_high <= freqHigh:
filter_setting = freqSetting
filter_bandwidth = freqHigh - freqLow
found = 1
break
return filter_setting, freqLow, freqHigh
def setIFFilters(total_bw_low, total_bw_high, ifpath):
"Returns IFRack parameters and updates paths bandpass info"
# print("setIFFilters", total_bw_low, total_bw_high)
params = []
for path in ifpath:
filter_value = "pass_all"
param = None
for fv, fLow, fHigh in IFfilters:
# fLow = f[1]
# fHigh = f[2]
# if f[1] <= total_bw_low and f[2] >= total_bw_high:
# filter_value = f[0]
if fLow <= total_bw_low and fHigh >= total_bw_high:
filter_value = fv
# print("print found good filter at", filter_value, fLow, fHigh)
break
opticalDriver = path.getFirstLikeDeviceNode("OpticalDriver")
# if "opticalDriver" in path:
if opticalDriver is not None:
# update the IFRack parameters
filterNum = opticalDriver.deviceId # path["opticalDriver"]
param = "IFRack,filter_select,{}".format(filterNum)
params.append((param, filter_value))
if filter_value != "pass_all":
# set this nodes ifInfo filter info
opticalDriver.ifInfo = IFInfo()
opticalDriver.ifInfo.filters = []
opticalDriver.ifInfo.filters.append((param, filter_value))
# then use filter range to update the bandpass at this node
bp = path.getBandpassUpToNode(opticalDriver)
bpFilter = copy(bp)
bpFilter.filter(fLow, fHigh)
bpFilter.changes = "%s, (%f, %f)" % (param, fLow, fHigh)
opticalDriver.setBandpasses([bpFilter])
# self.seq.add_param(self.mng, fs, self.filter_value)
return params
def
|
(bp1, rxNode):
"Return bandpasses representing filters in receiver"
bps = []
bp = copy(bp1)
for fName, fLow, fHigh in rxNode.ifInfo.filters:
bp = copy(bp)
bp.filter(fLow, fHigh)
bp.changes = "Filter %s (%f, %f)" % (fName, fLow, fHigh)
bps.append(bp)
return bps
def getLO1Bandpass(bp1, rxNode, lowerSideband=None):
"Return bandpass representing receivers LO1 mixing"
if lowerSideband is None:
s = RCVR_SIDEBAND[rxNode.device]
lowerSideband = s == -1
bpLO1 = copy(bp1)
loMixFreq = rxNode.ifInfo.lo['freq']
bpLO1.mix(loMixFreq, lowerSideband=lowerSideband)
sideband = "lower" if lowerSideband else "upper"
bpLO1.changes = "LO %s sideband at %f" % (sideband, loMixFreq)
return bpLO1
def calcFreqs(config, ifPaths, debug=False):
"Make the bandpass decisions to get our signal to the backend"
# we'll return what manager parameters are set here
params = []
# At minimum:
# set filters in receiver
# set LO1 freq
# set optical driver freqs
receiver = config['receiver']
backend = config['backend']
# first see if there's a doppler shift
if config['vframe'] is not None and config['vframe'] != 'topo':
# can't handle this
assert False
# doppler shit sux
if 'DOPPLERTRACKFREQ' not in config:
config['DOPPLERTRACKFREQ'] = int(config['restfreq'])
if 'lo2freq' not in config:
config['lo2freq'] = [0]
# we need the tuning freq of our receiver
tuningFreq = config['restfreq']
bwTotal = config['bandwidth']
# now we can see how the band pass changes at this stage?
# 2) set the LO1 freq to match the IF1. That seems to be 3000; always? No.
freq, vfreq, _, minMaxFreqs = setFreqWithVelocity(config, ifPaths.paths)
span = minMaxFreqs.maxf - minMaxFreqs.minf
skyFreq = minMaxFreqs.avgFreqs()
ifNom = RCVR_IF_NOMINAL[receiver]
multiplier1 = RCVR_SIDEBAND[receiver]
freqLocal = compute_Flocal(config)
if debug:
print("IF1 computed from: ", multiplier1, freqLocal, skyFreq, ifNom)
# Compute the IF frequencies! What equation is this????
if1 = (multiplier1 * (freqLocal - skyFreq) + ifNom)
if receiver == "Rcvr26_40": # W-band too!
# mmconverter!
if_offset = 44000.
if0 = if_offset - freqLocal
else:
if0 = if1
# for path in paths:
|
getFilterBandpasses
|
identifier_name
|
configureDCR.py
|
.append((paramName, filterSetting))
# start calculating band pass info
for path in ifPaths.paths:
frontendNode = path.path[0]
frontendNode.ifInfo = IFInfo()
if filterSetting is not None:
frontendNode.ifInfo.filters = [(filterSetting, filterLo, filterHi)]
else:
frontendNode.ifInfo.filters = []
# now we can see how the band pass changes by mixing in the LO1
# LO1 params set:
velocity = (config["vlow"] + config["vhigh"])/2.0
velframe = vframe[config["vframe"]]
centerFreq = if0 # If no adjustments needed!
# print("LO1,restFrequency", config["DOPPLERTRACKFREQ"])
# print("LO1,velocityDefinition", config["vdef"])
# print("LO1,sourceVelocity,position", velocity)
# print("LO1,restFrame", velframe)
# print("LO1,ifCenterFreq", centerFreq)
# print("LO1 derived mixing freq", centerFreq + config["DOPPLERTRACKFREQ"])
# recrod lo details for band pass info
loMixFreq = centerFreq + config['restfreq']
for path in ifPaths.paths:
# path[0].ifInfo = IFInfo()
frontendNode = path.path[0]
frontendNode.ifInfo.lo = {'freq': loMixFreq}
# calculate LO1 paramters
# params.append(("LO1,restFrequency", int(config["DOPPLERTRACKFREQ"])))
# params.append(("LO1,velocityDefinition", config["vdef"]))
params.append(("LO1,sourceVelocity,position", velocity))
# params.append(("LO1,restFrame", velframe))
# params.append(("LO1,ifCenterFreq", centerFreq))
# TBF: for now use config dct to pass this along
config['center_freq'] = centerFreq
# we're now ready to setup the bandpasses in the receiver
for path in ifPaths.paths:
# setup the receiver bandpasses
# path[0].ifInfo = IFInfo()
# initial bandpass for this receiver
low, high = RCVR_FREQS[receiver]
bpFeed = Bandpass(lo=low, hi=high, target=config['restfreq'])
bpFeed.changes = 'feed'
bps = [bpFeed]
rxNode = path.path[0]
# TBF: check the receiver type for filter-lo1 order
if receiver in LO1_FIRST_RXS: #["Rcvr8_10"]:
bps.append(getLO1Bandpass(bpFeed, rxNode))
bps.extend(getFilterBandpasses(bps[-1], rxNode))
else:
bps.extend(getFilterBandpasses(bpFeed, rxNode))
bps.append(getLO1Bandpass(bps[-1], rxNode))
rxNode.setBandpasses(bps)
# What about the IF2, LO2 settings?
# Does not matter for DCR.
if3s = copy(IF3[config["backend"]])
# print("IFs:", if0, if1, if3s)
# Calculate the filters in the IF Rack
# lo1aFreq = (self.flocal + self.if1) / 2.0
# RCVR_IF_NOMINAL[receiver]
centerFreq = RCVR_IF_NOMINAL[receiver]
low = centerFreq - (bwTotal*.5)
high = centerFreq + (bwTotal*.5)
# we will record the bandpass info in this function
ifRackFilters = setIFFilters(low, high, ifPaths.paths)
# print("ifRackFilters: ", ifRackFilters)
for f in ifRackFilters:
params.append(f)
# now that we're done, print the bandpasses to make sure
# that we got it right
if debug:
print("paths", ifPaths)
ifPaths.printFreqs()
vdef = config["vdef"]
ifSystem = IFSystem(
ifPaths,
receiver,
if1,
centerFreq,
tuningFreq,
bwTotal,
velocity,
vdef,
vframe
)
# return the values added paths
return ifSystem
# return params
def getDCRContinuumParams(config, ifSys):
"Set obvious manager parameters for these types of observations"
ifPaths = ifSys.ifPaths
# simple enough!
motorRack = ('MotorRack,receiver', config['receiver'])
dcr = getDCRParams(config, ifPaths)
scSubsystem = getScanCoordinatorDCRContinuumSysParams(config)
# TBF: are these correct?
tuningFreq = ifSys.tuningFreq #config['restfreq']
centerFreq = str(config['center_freq'])
velocity = ifSys.velocity # 0.
vdef = ifSys.vdef #config['vdef']
rxMgr = Receiver(config, ifSys) #tuning_freq=tuningFreq, bw_total=ifSys.bwTotal, if_center=tuningFreq)
rxMgr.setParams()
# s12 = 4
s12 = None
lo1 = LO1(config, tuningFreq, centerFreq, velocity, vdef, s12_value=s12)
ifRack = IFRack(config, ifSys, rxMgr)
# TBF: why is this singled out to be called last in config tool?
ifRack.set_laser_power()
# TBF: what else?
# put them together
params = [
motorRack,
]
params.extend(dcr)
params.extend(scSubsystem)
params.extend(rxMgr.getParams())
params.extend(lo1.getParams())
params.extend(ifRack.getParams())
return params
def getScanCoordinatorDCRContinuumSysParams(config):
"Use the config info to set the managers ScanCoordinator will use"
sc = ScanCoordinator(config)
sc.findParameterValues()
return sc.getParams()
def getDCRParams(config, paths):
"Use the paths information to determine some DCR params"
dcr = DCR(config, paths)
dcr.findDCRParams()
return dcr.getParams()
def addMissingKeywords(config):
"Config tool expands all configs to more then 50 values."
# Here we'll just add what we need as we go
ks = [
# needed by Receiver.setReceiverDefaults
'polarization',
'notchfilter',
'polswitch',
'beamswitch',
'xfer',
# LO1
'phasecal',
# IFRack
'iftarget',
]
# add them
for k in ks:
if k not in config:
config[k] = None
def configureDCR(config, pathsFile=None, debug=False, firstBackendNode=None):
"""
The Main Event.
Here we are given a standard config tool dictionary. We return
the resultant paths (lists of IFPathNodes) and the manager params.
"""
# Step 1: expand user's configuration
# first let's add any necessary missing keywords
addMissingKeywords(config)
# let's expand the configuration with defaults
rxMgr = Receiver(config)
rxMgr.setReceiverDefaultsForConfig(config)
# Step 2: find IF paths
# now find how we're getting from our rx to the DCR
paths = getDCRPaths(config, pathsFile=pathsFile, debug=debug, firstBackendNode=firstBackendNode)
# Step 3: define bandpasses
# now add frequency info to these paths, and return some
# related manager parameters
ifSys = calcFreqs(config, paths, debug=debug)
# Last Step: translate everything into manager parameters
params = []
# get more parameters: First ones from the DB
params.extend(getDBParamsFromConfig(config, dct=False, tuples=True))
# then some really simple ones from what we've done so far
params.extend(getDCRContinuumParams(config, ifSys))
return ifSys, params
def testKFPA():
|
"Mimics Configure('Continuum with RcvrArray18_26')"
# configure from DB
config = {
'receiver' : 'RcvrArray18_26', # changes from other 'Continuum with *' scripts
'beam' : '4,6',
'obstype' : 'Continuum',
'backend' : 'DCR', # 'DCR_AF' used by config tool to enforce Analog Filter rack routing
'nwin' : 1,
'restfreq' : 2500, # changes
'deltafreq' : 0,
'bandwidth' : 800, # changed from 80!
'swmode' : "tp",
'swtype' : "none",
'swper' : 0.1,
# 'swfreq' : 0,0,
'tint' : 0.1,
'vlow' : 0.0,
'vhigh' : 0.0,
|
identifier_body
|
|
datautils.py
|
# class names are subdirectory names in Preproc/ directory
def get_class_names(path="Preproc/Train/", sort=True):
if (sort):
class_names = sorted(list(listdir_nohidden(path, subdirs_only=True))) # sorted alphabetically for consistency with "ls" command
else:
class_names = listdir_nohidden(path) # not in same order as "ls", because Python
return class_names
def get_total_files(class_names, path="Preproc/Train/"):
sum_total = 0
for subdir in class_names:
files = os.listdir(path+subdir)
n_files = len(files)
sum_total += n_files
return sum_total
def scale_to_uint8(float_img):
#out_img = 255*(float_img - np.min(float_img))/np.ptp(float_img).astype(np.uint8)
out_img = img_as_ubyte( (float_img-np.min(float_img))/np.ptp(float_img) )
return out_img
def save_melgram(outfile, melgram, out_format='npz'):
channels = melgram.shape[3]
melgram = melgram.astype(np.float16)
if (('jpeg' == out_format) or ('png' == out_format)) and (channels <=4):
melgram = np.squeeze(melgram) # squeeze gets rid of dimensions of batch_size 1
#melgram = np.moveaxis(melgram, 1, 3).squeeze() # we use the 'channels_first' in tensorflow, but images have channels_first. squeeze removes unit-size axes
melgram = np.flip(melgram, 0) # flip spectrogram image right-side-up before saving, for viewing
if (2 == channels): # special case: 1=greyscale, 3=RGB, 4=RGBA, ..no 2. so...?
# pad a channel of zeros (for blue) and you'll just be stuck with it forever. so channels will =3
# TODO: this is SLOWWW
b = np.zeros((melgram.shape[0], melgram.shape[1], 3)) # 3-channel array of zeros
b[:,:,:-1] = melgram # fill the zeros on the 1st 2 channels
imwrite(outfile, scale_to_uint8(b), format=out_format)
else:
imwrite(outfile, scale_to_uint8(melgram), format=out_format)
elif ('npy' == out_format):
np.save(outfile,melgram=melgram)
else:
np.savez_compressed(outfile,melgram=melgram) # default is compressed npz file
return
def load_audio(audio_path, mono=None, sr=None, convertOSXaliases=True): # wrapper for librosa.load
try:
signal, sr = librosa.load(audio_path, mono=mono, sr=sr)
except NoBackendError as e:
if ('Darwin' == platform.system()): # handle OS X alias files gracefully
source = resolve_osx_alias(audio_path, convert=convertOSXaliases, already_checked_os=True) # convert to symlinks for next time
try:
signal, sr = librosa.load(source, mono=mono, sr=sr)
except NoBackendError as e:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
else:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
return signal, sr
def load_melgram(file_path):
#auto-detect load method based on filename extension
name, extension = os.path.splitext(file_path)
if ('.npy' == extension):
melgram = np.load(file_path)
elif ('.npz' == extension): # compressed npz file (preferred)
with np.load(file_path) as data:
melgram = data['melgram']
elif ('.png' == extension) or ('.jpeg' == extension):
arr = imread(file_path)
melgram = np.reshape(arr, (1,arr.shape[0],arr.shape[1],1)) # convert 2-d image
melgram = np.flip(melgram, 0) # we save images 'rightside up' but librosa internally presents them 'upside down'
else:
print("load_melgram: Error: unrecognized file extension '",extension,"' for file ",file_path,sep="")
#print("melgram.shape = ",melgram.shape)
return melgram
def get_sample_dimensions(class_names, path='Preproc/Train/'):
classname = class_names[0]
audio_path = path + classname + '/'
infilename = os.listdir(audio_path)[0]
melgram = load_melgram(audio_path+infilename)
print(" get_sample_dimensions: "+infilename+": melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names, label_smoothing=0.005):
# makes a "one-hot" vector for each class name called
# label_smoothing is a parameter to make the training more robust to mislabeled data
try:
idx = class_names.index(class_name)
num_classes = len(class_names)
vec = np.zeros(num_classes)
vec[idx] = 1
if label_smoothing > 0:
vec = vec * (1 - label_smoothing) + label_smoothing / num_classes
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
#print("shuffle_XY_paths: Y.shape[0], len(paths) = ",Y.shape[0], len(paths))
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths[:]
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def make_melgram(mono_sig, sr, n_mels=128): # @keunwoochoi upgraded form 96 to 128 mel bins in kapre
#melgram = librosa.logamplitude(librosa.feature.melspectrogram(mono_sig, # latest librosa deprecated logamplitude in favor of amplitude_to_db
# sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
melgram = librosa.amplitude_to_db(librosa.feature.melspectrogram(mono_sig,
sr=sr, n_mels=n_mels))[np.newaxis,:,:,np.newaxis] # last newaxis is b/c tensorflow wants 'channels_last' order
'''
# librosa docs also include a perceptual CQT example:
CQT = librosa.cqt(mono_sig, sr=sr, fmin=librosa.note_to_hz('A1'))
freqs = librosa.cqt_frequencies(CQT.shape[0], fmin=librosa.note_to_hz('A1'))
perceptual_CQT = librosa.perceptual_weighting(CQT**2, freqs, ref=np.max)
melgram = perceptual_CQT[np.newaxis,np.newaxis,:,:]
'''
return melgram
def make_phase_gram(mono_sig, sr, n_bins=128):
stft = librosa.stft(mono_sig)#, n_fft = (2*n_bins)-1)
magnitude, phase = librosa.magphase(stft) # we don't need magnitude
# resample the phase array to match n_bins
phase = np.resize(phase, (n_bins, phase.shape[1]))[np.newaxis,:,:,np.newaxis]
return phase
# turn multichannel audio as multiple melgram layers
def make_layered_melgram(signal, sr, mels=128, phase=False):
if (signal.ndim == 1): # given the way the preprocessing code is now, this may not get called
signal = np.reshape( signal, (1,signal.shape[0]))
# get mel-spectrogram for each channel, and layer them into multi-dim array
for channel in range(signal.shape[0]):
melgram = make_melgram(signal[channel],sr, n_mels=mels)
if (0 == channel):
layers = melgram
else:
layers = np.append(layers,melgram,axis=3) # we keep axis=0 free for keras batches, axis=3 means 'channels_last'
if (phase):
phasegram = make_phase_gram(signal[channel],sr, n_bins=mels)
layers = np.append(layers,phasegram,axis=3)
return layers
def nearest_multiple( a, b ): # returns number smaller than a, which is the nearest multiple of b
return int(a/b) * b
# can be used for test dataset as well
def
|
random_line_split
|
||
datautils.py
|
melgram, out_format='npz'):
channels = melgram.shape[3]
melgram = melgram.astype(np.float16)
if (('jpeg' == out_format) or ('png' == out_format)) and (channels <=4):
melgram = np.squeeze(melgram) # squeeze gets rid of dimensions of batch_size 1
#melgram = np.moveaxis(melgram, 1, 3).squeeze() # we use the 'channels_first' in tensorflow, but images have channels_first. squeeze removes unit-size axes
melgram = np.flip(melgram, 0) # flip spectrogram image right-side-up before saving, for viewing
if (2 == channels): # special case: 1=greyscale, 3=RGB, 4=RGBA, ..no 2. so...?
# pad a channel of zeros (for blue) and you'll just be stuck with it forever. so channels will =3
# TODO: this is SLOWWW
b = np.zeros((melgram.shape[0], melgram.shape[1], 3)) # 3-channel array of zeros
b[:,:,:-1] = melgram # fill the zeros on the 1st 2 channels
imwrite(outfile, scale_to_uint8(b), format=out_format)
else:
imwrite(outfile, scale_to_uint8(melgram), format=out_format)
elif ('npy' == out_format):
np.save(outfile,melgram=melgram)
else:
np.savez_compressed(outfile,melgram=melgram) # default is compressed npz file
return
def load_audio(audio_path, mono=None, sr=None, convertOSXaliases=True): # wrapper for librosa.load
try:
signal, sr = librosa.load(audio_path, mono=mono, sr=sr)
except NoBackendError as e:
if ('Darwin' == platform.system()): # handle OS X alias files gracefully
source = resolve_osx_alias(audio_path, convert=convertOSXaliases, already_checked_os=True) # convert to symlinks for next time
try:
signal, sr = librosa.load(source, mono=mono, sr=sr)
except NoBackendError as e:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
else:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
return signal, sr
def load_melgram(file_path):
#auto-detect load method based on filename extension
name, extension = os.path.splitext(file_path)
if ('.npy' == extension):
melgram = np.load(file_path)
elif ('.npz' == extension): # compressed npz file (preferred)
with np.load(file_path) as data:
melgram = data['melgram']
elif ('.png' == extension) or ('.jpeg' == extension):
arr = imread(file_path)
melgram = np.reshape(arr, (1,arr.shape[0],arr.shape[1],1)) # convert 2-d image
melgram = np.flip(melgram, 0) # we save images 'rightside up' but librosa internally presents them 'upside down'
else:
print("load_melgram: Error: unrecognized file extension '",extension,"' for file ",file_path,sep="")
#print("melgram.shape = ",melgram.shape)
return melgram
def get_sample_dimensions(class_names, path='Preproc/Train/'):
classname = class_names[0]
audio_path = path + classname + '/'
infilename = os.listdir(audio_path)[0]
melgram = load_melgram(audio_path+infilename)
print(" get_sample_dimensions: "+infilename+": melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names, label_smoothing=0.005):
# makes a "one-hot" vector for each class name called
# label_smoothing is a parameter to make the training more robust to mislabeled data
try:
idx = class_names.index(class_name)
num_classes = len(class_names)
vec = np.zeros(num_classes)
vec[idx] = 1
if label_smoothing > 0:
vec = vec * (1 - label_smoothing) + label_smoothing / num_classes
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
#print("shuffle_XY_paths: Y.shape[0], len(paths) = ",Y.shape[0], len(paths))
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths[:]
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def make_melgram(mono_sig, sr, n_mels=128): # @keunwoochoi upgraded form 96 to 128 mel bins in kapre
#melgram = librosa.logamplitude(librosa.feature.melspectrogram(mono_sig, # latest librosa deprecated logamplitude in favor of amplitude_to_db
# sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
melgram = librosa.amplitude_to_db(librosa.feature.melspectrogram(mono_sig,
sr=sr, n_mels=n_mels))[np.newaxis,:,:,np.newaxis] # last newaxis is b/c tensorflow wants 'channels_last' order
'''
# librosa docs also include a perceptual CQT example:
CQT = librosa.cqt(mono_sig, sr=sr, fmin=librosa.note_to_hz('A1'))
freqs = librosa.cqt_frequencies(CQT.shape[0], fmin=librosa.note_to_hz('A1'))
perceptual_CQT = librosa.perceptual_weighting(CQT**2, freqs, ref=np.max)
melgram = perceptual_CQT[np.newaxis,np.newaxis,:,:]
'''
return melgram
def make_phase_gram(mono_sig, sr, n_bins=128):
stft = librosa.stft(mono_sig)#, n_fft = (2*n_bins)-1)
magnitude, phase = librosa.magphase(stft) # we don't need magnitude
# resample the phase array to match n_bins
phase = np.resize(phase, (n_bins, phase.shape[1]))[np.newaxis,:,:,np.newaxis]
return phase
# turn multichannel audio as multiple melgram layers
def make_layered_melgram(signal, sr, mels=128, phase=False):
if (signal.ndim == 1): # given the way the preprocessing code is now, this may not get called
signal = np.reshape( signal, (1,signal.shape[0]))
# get mel-spectrogram for each channel, and layer them into multi-dim array
for channel in range(signal.shape[0]):
melgram = make_melgram(signal[channel],sr, n_mels=mels)
if (0 == channel):
layers = melgram
else:
layers = np.append(layers,melgram,axis=3) # we keep axis=0 free for keras batches, axis=3 means 'channels_last'
if (phase):
phasegram = make_phase_gram(signal[channel],sr, n_bins=mels)
layers = np.append(layers,phasegram,axis=3)
return layers
def nearest_multiple( a, b ): # returns number smaller than a, which is the nearest multiple of b
return int(a/b) * b
# can be used for test dataset as well
def build_dataset(path="Preproc/Train/", load_frac=1.0, batch_size=None, tile=False, max_per_class=0):
|
class_names = get_class_names(path=path)
print("class_names = ",class_names)
nb_classes = len(class_names)
total_files = get_total_files(class_names, path=path)
total_load = int(total_files * load_frac)
if max_per_class > 0:
total_load = min( total_load, max_per_class * nb_classes)
if (batch_size is not None): # keras gets particular: dataset size must be mult. of batch_size
total_load = nearest_multiple( total_load, batch_size)
print(" total files = ",total_files,", going to load total_load = ",total_load)
print("total files = ",total_files,", going to load total_load = ",total_load)
# pre-allocate memory for speed (old method used np.concatenate, slow)
mel_dims = get_sample_dimensions(class_names,path=path) # get dims of sample data file
if (tile):
|
identifier_body
|
|
datautils.py
|
= len(files)
sum_total += n_files
return sum_total
def
|
(float_img):
#out_img = 255*(float_img - np.min(float_img))/np.ptp(float_img).astype(np.uint8)
out_img = img_as_ubyte( (float_img-np.min(float_img))/np.ptp(float_img) )
return out_img
def save_melgram(outfile, melgram, out_format='npz'):
channels = melgram.shape[3]
melgram = melgram.astype(np.float16)
if (('jpeg' == out_format) or ('png' == out_format)) and (channels <=4):
melgram = np.squeeze(melgram) # squeeze gets rid of dimensions of batch_size 1
#melgram = np.moveaxis(melgram, 1, 3).squeeze() # we use the 'channels_first' in tensorflow, but images have channels_first. squeeze removes unit-size axes
melgram = np.flip(melgram, 0) # flip spectrogram image right-side-up before saving, for viewing
if (2 == channels): # special case: 1=greyscale, 3=RGB, 4=RGBA, ..no 2. so...?
# pad a channel of zeros (for blue) and you'll just be stuck with it forever. so channels will =3
# TODO: this is SLOWWW
b = np.zeros((melgram.shape[0], melgram.shape[1], 3)) # 3-channel array of zeros
b[:,:,:-1] = melgram # fill the zeros on the 1st 2 channels
imwrite(outfile, scale_to_uint8(b), format=out_format)
else:
imwrite(outfile, scale_to_uint8(melgram), format=out_format)
elif ('npy' == out_format):
np.save(outfile,melgram=melgram)
else:
np.savez_compressed(outfile,melgram=melgram) # default is compressed npz file
return
def load_audio(audio_path, mono=None, sr=None, convertOSXaliases=True): # wrapper for librosa.load
try:
signal, sr = librosa.load(audio_path, mono=mono, sr=sr)
except NoBackendError as e:
if ('Darwin' == platform.system()): # handle OS X alias files gracefully
source = resolve_osx_alias(audio_path, convert=convertOSXaliases, already_checked_os=True) # convert to symlinks for next time
try:
signal, sr = librosa.load(source, mono=mono, sr=sr)
except NoBackendError as e:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
else:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
return signal, sr
def load_melgram(file_path):
#auto-detect load method based on filename extension
name, extension = os.path.splitext(file_path)
if ('.npy' == extension):
melgram = np.load(file_path)
elif ('.npz' == extension): # compressed npz file (preferred)
with np.load(file_path) as data:
melgram = data['melgram']
elif ('.png' == extension) or ('.jpeg' == extension):
arr = imread(file_path)
melgram = np.reshape(arr, (1,arr.shape[0],arr.shape[1],1)) # convert 2-d image
melgram = np.flip(melgram, 0) # we save images 'rightside up' but librosa internally presents them 'upside down'
else:
print("load_melgram: Error: unrecognized file extension '",extension,"' for file ",file_path,sep="")
#print("melgram.shape = ",melgram.shape)
return melgram
def get_sample_dimensions(class_names, path='Preproc/Train/'):
classname = class_names[0]
audio_path = path + classname + '/'
infilename = os.listdir(audio_path)[0]
melgram = load_melgram(audio_path+infilename)
print(" get_sample_dimensions: "+infilename+": melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names, label_smoothing=0.005):
# makes a "one-hot" vector for each class name called
# label_smoothing is a parameter to make the training more robust to mislabeled data
try:
idx = class_names.index(class_name)
num_classes = len(class_names)
vec = np.zeros(num_classes)
vec[idx] = 1
if label_smoothing > 0:
vec = vec * (1 - label_smoothing) + label_smoothing / num_classes
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
#print("shuffle_XY_paths: Y.shape[0], len(paths) = ",Y.shape[0], len(paths))
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths[:]
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def make_melgram(mono_sig, sr, n_mels=128): # @keunwoochoi upgraded form 96 to 128 mel bins in kapre
#melgram = librosa.logamplitude(librosa.feature.melspectrogram(mono_sig, # latest librosa deprecated logamplitude in favor of amplitude_to_db
# sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
melgram = librosa.amplitude_to_db(librosa.feature.melspectrogram(mono_sig,
sr=sr, n_mels=n_mels))[np.newaxis,:,:,np.newaxis] # last newaxis is b/c tensorflow wants 'channels_last' order
'''
# librosa docs also include a perceptual CQT example:
CQT = librosa.cqt(mono_sig, sr=sr, fmin=librosa.note_to_hz('A1'))
freqs = librosa.cqt_frequencies(CQT.shape[0], fmin=librosa.note_to_hz('A1'))
perceptual_CQT = librosa.perceptual_weighting(CQT**2, freqs, ref=np.max)
melgram = perceptual_CQT[np.newaxis,np.newaxis,:,:]
'''
return melgram
def make_phase_gram(mono_sig, sr, n_bins=128):
stft = librosa.stft(mono_sig)#, n_fft = (2*n_bins)-1)
magnitude, phase = librosa.magphase(stft) # we don't need magnitude
# resample the phase array to match n_bins
phase = np.resize(phase, (n_bins, phase.shape[1]))[np.newaxis,:,:,np.newaxis]
return phase
# turn multichannel audio as multiple melgram layers
def make_layered_melgram(signal, sr, mels=128, phase=False):
if (signal.ndim == 1): # given the way the preprocessing code is now, this may not get called
signal = np.reshape( signal, (1,signal.shape[0]))
# get mel-spectrogram for each channel, and layer them into multi-dim array
for channel in range(signal.shape[0]):
melgram = make_melgram(signal[channel],sr, n_mels=mels)
if (0 == channel):
layers = melgram
else:
layers = np.append(layers,melgram,axis=3) # we keep axis=0 free for keras batches, axis=3 means 'channels_last'
if (phase):
phasegram = make_phase_gram(signal[channel],sr, n_bins=mels)
layers = np.append(layers,phasegram,axis=3)
return layers
def nearest_multiple( a, b ): # returns number smaller than a, which is the nearest multiple of b
return int(a/b) * b
# can be used for test dataset as well
def build_dataset(path="Preproc/Train/", load_frac=1.0, batch_size=None, tile=False, max_per_class=0):
class_names = get_class_names(path=path)
print("class_names = ",class_names)
nb_classes = len(class_names)
total_files = get_total_files(class_names, path=path)
total_load = int(total_files * load_frac)
if max_per_class > 0:
total_load = min( total_load, max_per_class * nb_classes)
if (batch_size is not None): # keras gets particular: dataset size must be mult. of batch_size
total_load = nearest_multiple
|
scale_to_uint8
|
identifier_name
|
datautils.py
|
_total += n_files
return sum_total
def scale_to_uint8(float_img):
#out_img = 255*(float_img - np.min(float_img))/np.ptp(float_img).astype(np.uint8)
out_img = img_as_ubyte( (float_img-np.min(float_img))/np.ptp(float_img) )
return out_img
def save_melgram(outfile, melgram, out_format='npz'):
channels = melgram.shape[3]
melgram = melgram.astype(np.float16)
if (('jpeg' == out_format) or ('png' == out_format)) and (channels <=4):
melgram = np.squeeze(melgram) # squeeze gets rid of dimensions of batch_size 1
#melgram = np.moveaxis(melgram, 1, 3).squeeze() # we use the 'channels_first' in tensorflow, but images have channels_first. squeeze removes unit-size axes
melgram = np.flip(melgram, 0) # flip spectrogram image right-side-up before saving, for viewing
if (2 == channels): # special case: 1=greyscale, 3=RGB, 4=RGBA, ..no 2. so...?
# pad a channel of zeros (for blue) and you'll just be stuck with it forever. so channels will =3
# TODO: this is SLOWWW
b = np.zeros((melgram.shape[0], melgram.shape[1], 3)) # 3-channel array of zeros
b[:,:,:-1] = melgram # fill the zeros on the 1st 2 channels
imwrite(outfile, scale_to_uint8(b), format=out_format)
else:
imwrite(outfile, scale_to_uint8(melgram), format=out_format)
elif ('npy' == out_format):
np.save(outfile,melgram=melgram)
else:
np.savez_compressed(outfile,melgram=melgram) # default is compressed npz file
return
def load_audio(audio_path, mono=None, sr=None, convertOSXaliases=True): # wrapper for librosa.load
try:
signal, sr = librosa.load(audio_path, mono=mono, sr=sr)
except NoBackendError as e:
if ('Darwin' == platform.system()): # handle OS X alias files gracefully
source = resolve_osx_alias(audio_path, convert=convertOSXaliases, already_checked_os=True) # convert to symlinks for next time
try:
signal, sr = librosa.load(source, mono=mono, sr=sr)
except NoBackendError as e:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
else:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
return signal, sr
def load_melgram(file_path):
#auto-detect load method based on filename extension
name, extension = os.path.splitext(file_path)
if ('.npy' == extension):
melgram = np.load(file_path)
elif ('.npz' == extension): # compressed npz file (preferred)
with np.load(file_path) as data:
melgram = data['melgram']
elif ('.png' == extension) or ('.jpeg' == extension):
arr = imread(file_path)
melgram = np.reshape(arr, (1,arr.shape[0],arr.shape[1],1)) # convert 2-d image
melgram = np.flip(melgram, 0) # we save images 'rightside up' but librosa internally presents them 'upside down'
else:
print("load_melgram: Error: unrecognized file extension '",extension,"' for file ",file_path,sep="")
#print("melgram.shape = ",melgram.shape)
return melgram
def get_sample_dimensions(class_names, path='Preproc/Train/'):
classname = class_names[0]
audio_path = path + classname + '/'
infilename = os.listdir(audio_path)[0]
melgram = load_melgram(audio_path+infilename)
print(" get_sample_dimensions: "+infilename+": melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names, label_smoothing=0.005):
# makes a "one-hot" vector for each class name called
# label_smoothing is a parameter to make the training more robust to mislabeled data
try:
idx = class_names.index(class_name)
num_classes = len(class_names)
vec = np.zeros(num_classes)
vec[idx] = 1
if label_smoothing > 0:
vec = vec * (1 - label_smoothing) + label_smoothing / num_classes
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
#print("shuffle_XY_paths: Y.shape[0], len(paths) = ",Y.shape[0], len(paths))
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths[:]
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def make_melgram(mono_sig, sr, n_mels=128): # @keunwoochoi upgraded form 96 to 128 mel bins in kapre
#melgram = librosa.logamplitude(librosa.feature.melspectrogram(mono_sig, # latest librosa deprecated logamplitude in favor of amplitude_to_db
# sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
melgram = librosa.amplitude_to_db(librosa.feature.melspectrogram(mono_sig,
sr=sr, n_mels=n_mels))[np.newaxis,:,:,np.newaxis] # last newaxis is b/c tensorflow wants 'channels_last' order
'''
# librosa docs also include a perceptual CQT example:
CQT = librosa.cqt(mono_sig, sr=sr, fmin=librosa.note_to_hz('A1'))
freqs = librosa.cqt_frequencies(CQT.shape[0], fmin=librosa.note_to_hz('A1'))
perceptual_CQT = librosa.perceptual_weighting(CQT**2, freqs, ref=np.max)
melgram = perceptual_CQT[np.newaxis,np.newaxis,:,:]
'''
return melgram
def make_phase_gram(mono_sig, sr, n_bins=128):
stft = librosa.stft(mono_sig)#, n_fft = (2*n_bins)-1)
magnitude, phase = librosa.magphase(stft) # we don't need magnitude
# resample the phase array to match n_bins
phase = np.resize(phase, (n_bins, phase.shape[1]))[np.newaxis,:,:,np.newaxis]
return phase
# turn multichannel audio as multiple melgram layers
def make_layered_melgram(signal, sr, mels=128, phase=False):
if (signal.ndim == 1): # given the way the preprocessing code is now, this may not get called
signal = np.reshape( signal, (1,signal.shape[0]))
# get mel-spectrogram for each channel, and layer them into multi-dim array
for channel in range(signal.shape[0]):
melgram = make_melgram(signal[channel],sr, n_mels=mels)
if (0 == channel):
layers = melgram
else:
layers = np.append(layers,melgram,axis=3) # we keep axis=0 free for keras batches, axis=3 means 'channels_last'
if (phase):
phasegram = make_phase_gram(signal[channel],sr, n_bins=mels)
layers = np.append(layers,phasegram,axis=3)
return layers
def nearest_multiple( a, b ): # returns number smaller than a, which is the nearest multiple of b
return int(a/b) * b
# can be used for test dataset as well
def build_dataset(path="Preproc/Train/", load_frac=1.0, batch_size=None, tile=False, max_per_class=0):
class_names = get_class_names(path=path)
print("class_names = ",class_names)
nb_classes = len(class_names)
total_files = get_total_files(class_names, path=path)
total_load = int(total_files * load_frac)
if max_per_class > 0:
total_load = min( total_load, max_per_class * nb_classes)
if (batch_size is not None): # keras gets particular: dataset size must be mult. of batch_size
|
total_load = nearest_multiple( total_load, batch_size)
|
conditional_block
|
|
lib.rs
|
Mut};
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "device")] {
mod slave_req_handler;
mod slave_proxy;
pub use self::slave_req_handler::{
Protocol, SlaveReqHandler, SlaveReqHelper, VhostUserSlaveReqHandler,
VhostUserSlaveReqHandlerMut,
};
pub use self::slave_proxy::Slave;
}
}
cfg_if::cfg_if! {
if #[cfg(all(feature = "device", unix))] {
mod slave;
pub use self::slave::SlaveListener;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "vmm")] {
pub use self::master_req_handler::MasterReqHandler;
}
}
/// Errors for vhost-user operations
#[sorted]
#[derive(Debug, ThisError)]
pub enum
|
{
/// client exited properly.
#[error("client exited properly")]
ClientExit,
/// client disconnected.
/// If connection is closed properly, use `ClientExit` instead.
#[error("client closed the connection")]
Disconnect,
/// Virtio/protocol features mismatch.
#[error("virtio features mismatch")]
FeatureMismatch,
/// Fd array in question is too big or too small
#[error("wrong number of attached fds")]
IncorrectFds,
/// Invalid message format, flag or content.
#[error("invalid message")]
InvalidMessage,
/// Unsupported operations due to that the protocol feature hasn't been negotiated.
#[error("invalid operation")]
InvalidOperation,
/// Invalid parameters.
#[error("invalid parameters")]
InvalidParam,
/// Failure from the master side.
#[error("master Internal error")]
MasterInternalError,
/// Message is too large
#[error("oversized message")]
OversizedMsg,
/// Only part of a message have been sent or received successfully
#[error("partial message")]
PartialMessage,
/// Provided recv buffer was too small, and data was dropped.
#[error("buffer for recv was too small, data was dropped: got size {got}, needed {want}")]
RecvBufferTooSmall {
/// The size of the buffer received.
got: usize,
/// The expected size of the buffer.
want: usize,
},
/// Error from request handler
#[error("handler failed to handle request: {0}")]
ReqHandlerError(IOError),
/// Failure from the slave side.
#[error("slave internal error")]
SlaveInternalError,
/// The socket is broken or has been closed.
#[error("socket is broken: {0}")]
SocketBroken(std::io::Error),
/// Can't connect to peer.
#[error("can't connect to peer: {0}")]
SocketConnect(std::io::Error),
/// Generic socket errors.
#[error("socket error: {0}")]
SocketError(std::io::Error),
/// Should retry the socket operation again.
#[error("temporary socket error: {0}")]
SocketRetry(std::io::Error),
/// Error from tx/rx on a Tube.
#[error("failed to read/write on Tube: {0}")]
TubeError(base::TubeError),
/// Error from VFIO device.
#[error("error occurred in VFIO device: {0}")]
VfioDeviceError(anyhow::Error),
}
impl From<base::TubeError> for Error {
fn from(err: base::TubeError) -> Self {
Error::TubeError(err)
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::SocketError(err)
}
}
impl From<base::Error> for Error {
/// Convert raw socket errors into meaningful vhost-user errors.
///
/// The base::Error is a simple wrapper over the raw errno, which doesn't means
/// much to the vhost-user connection manager. So convert it into meaningful errors to simplify
/// the connection manager logic.
///
/// # Return:
/// * - Error::SocketRetry: temporary error caused by signals or short of resources.
/// * - Error::SocketBroken: the underline socket is broken.
/// * - Error::SocketError: other socket related errors.
#[allow(unreachable_patterns)] // EWOULDBLOCK equals to EGAIN on linux
fn from(err: base::Error) -> Self {
match err.errno() {
// Retry:
// * EAGAIN, EWOULDBLOCK: The socket is marked nonblocking and the requested operation
// would block.
// * EINTR: A signal occurred before any data was transmitted
// * ENOBUFS: The output queue for a network interface was full. This generally
// indicates that the interface has stopped sending, but may be caused by transient
// congestion.
// * ENOMEM: No memory available.
libc::EAGAIN | libc::EWOULDBLOCK | libc::EINTR | libc::ENOBUFS | libc::ENOMEM => {
Error::SocketRetry(err.into())
}
// Broken:
// * ECONNRESET: Connection reset by peer.
// * EPIPE: The local end has been shut down on a connection oriented socket. In this
// case the process will also receive a SIGPIPE unless MSG_NOSIGNAL is set.
libc::ECONNRESET | libc::EPIPE => Error::SocketBroken(err.into()),
// Write permission is denied on the destination socket file, or search permission is
// denied for one of the directories the path prefix.
libc::EACCES => Error::SocketConnect(IOError::from_raw_os_error(libc::EACCES)),
// Catch all other errors
e => Error::SocketError(IOError::from_raw_os_error(e)),
}
}
}
/// Result of vhost-user operations
pub type Result<T> = std::result::Result<T, Error>;
/// Result of request handler.
pub type HandlerResult<T> = std::result::Result<T, IOError>;
/// Utility function to take the first element from option of a vector of files.
/// Returns `None` if the vector contains no file or more than one file.
pub(crate) fn take_single_file(files: Option<Vec<File>>) -> Option<File> {
let mut files = files?;
if files.len() != 1 {
return None;
}
Some(files.swap_remove(0))
}
#[cfg(all(test, feature = "device"))]
mod dummy_slave;
#[cfg(all(test, feature = "vmm", feature = "device"))]
mod tests {
use std::sync::Arc;
use std::sync::Barrier;
use std::sync::Mutex;
use std::thread;
use base::AsRawDescriptor;
use tempfile::tempfile;
use super::*;
use crate::backend::VhostBackend;
use crate::connection::tests::*;
use crate::dummy_slave::DummySlaveReqHandler;
use crate::dummy_slave::VIRTIO_FEATURES;
use crate::message::*;
use crate::VhostUserMemoryRegionInfo;
use crate::VringConfigData;
#[test]
fn create_dummy_slave() {
let slave = Mutex::new(DummySlaveReqHandler::new());
slave.set_owner().unwrap();
assert!(slave.set_owner().is_err());
}
#[test]
fn test_set_owner() {
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (master, mut slave) = create_master_slave_pair(slave_be);
assert!(!slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
assert!(slave.handle_request().is_err());
assert!(slave.as_ref().lock().unwrap().owned);
}
#[test]
fn test_set_features() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut master, mut slave) = create_master_slave_pair(slave_be);
thread::spawn(move || {
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_features,
VIRTIO_FEATURES & !0x1
);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
sbar.wait();
});
master.set_owner().unwrap();
// set virtio features
let features = master.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
let features = master.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
master.set_protocol_features(features).unwrap();
mbar.wait();
}
#[test]
fn test_master_slave_process() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut
|
Error
|
identifier_name
|
lib.rs
|
Mut};
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "device")] {
mod slave_req_handler;
mod slave_proxy;
pub use self::slave_req_handler::{
Protocol, SlaveReqHandler, SlaveReqHelper, VhostUserSlaveReqHandler,
VhostUserSlaveReqHandlerMut,
};
pub use self::slave_proxy::Slave;
}
}
cfg_if::cfg_if! {
if #[cfg(all(feature = "device", unix))] {
mod slave;
pub use self::slave::SlaveListener;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "vmm")] {
pub use self::master_req_handler::MasterReqHandler;
}
}
/// Errors for vhost-user operations
#[sorted]
#[derive(Debug, ThisError)]
pub enum Error {
/// client exited properly.
#[error("client exited properly")]
ClientExit,
/// client disconnected.
/// If connection is closed properly, use `ClientExit` instead.
#[error("client closed the connection")]
Disconnect,
/// Virtio/protocol features mismatch.
#[error("virtio features mismatch")]
FeatureMismatch,
/// Fd array in question is too big or too small
#[error("wrong number of attached fds")]
IncorrectFds,
/// Invalid message format, flag or content.
#[error("invalid message")]
InvalidMessage,
/// Unsupported operations due to that the protocol feature hasn't been negotiated.
#[error("invalid operation")]
InvalidOperation,
/// Invalid parameters.
#[error("invalid parameters")]
InvalidParam,
/// Failure from the master side.
#[error("master Internal error")]
MasterInternalError,
/// Message is too large
#[error("oversized message")]
OversizedMsg,
/// Only part of a message have been sent or received successfully
#[error("partial message")]
PartialMessage,
/// Provided recv buffer was too small, and data was dropped.
#[error("buffer for recv was too small, data was dropped: got size {got}, needed {want}")]
RecvBufferTooSmall {
/// The size of the buffer received.
got: usize,
/// The expected size of the buffer.
want: usize,
},
/// Error from request handler
#[error("handler failed to handle request: {0}")]
ReqHandlerError(IOError),
/// Failure from the slave side.
#[error("slave internal error")]
SlaveInternalError,
/// The socket is broken or has been closed.
#[error("socket is broken: {0}")]
SocketBroken(std::io::Error),
/// Can't connect to peer.
#[error("can't connect to peer: {0}")]
SocketConnect(std::io::Error),
/// Generic socket errors.
#[error("socket error: {0}")]
SocketError(std::io::Error),
/// Should retry the socket operation again.
#[error("temporary socket error: {0}")]
SocketRetry(std::io::Error),
/// Error from tx/rx on a Tube.
#[error("failed to read/write on Tube: {0}")]
TubeError(base::TubeError),
/// Error from VFIO device.
#[error("error occurred in VFIO device: {0}")]
VfioDeviceError(anyhow::Error),
}
impl From<base::TubeError> for Error {
fn from(err: base::TubeError) -> Self {
Error::TubeError(err)
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::SocketError(err)
}
}
impl From<base::Error> for Error {
/// Convert raw socket errors into meaningful vhost-user errors.
///
/// The base::Error is a simple wrapper over the raw errno, which doesn't means
/// much to the vhost-user connection manager. So convert it into meaningful errors to simplify
/// the connection manager logic.
///
/// # Return:
/// * - Error::SocketRetry: temporary error caused by signals or short of resources.
/// * - Error::SocketBroken: the underline socket is broken.
/// * - Error::SocketError: other socket related errors.
#[allow(unreachable_patterns)] // EWOULDBLOCK equals to EGAIN on linux
fn from(err: base::Error) -> Self
|
libc::EACCES => Error::SocketConnect(IOError::from_raw_os_error(libc::EACCES)),
// Catch all other errors
e => Error::SocketError(IOError::from_raw_os_error(e)),
}
}
}
/// Result of vhost-user operations
pub type Result<T> = std::result::Result<T, Error>;
/// Result of request handler.
pub type HandlerResult<T> = std::result::Result<T, IOError>;
/// Utility function to take the first element from option of a vector of files.
/// Returns `None` if the vector contains no file or more than one file.
pub(crate) fn take_single_file(files: Option<Vec<File>>) -> Option<File> {
let mut files = files?;
if files.len() != 1 {
return None;
}
Some(files.swap_remove(0))
}
#[cfg(all(test, feature = "device"))]
mod dummy_slave;
#[cfg(all(test, feature = "vmm", feature = "device"))]
mod tests {
use std::sync::Arc;
use std::sync::Barrier;
use std::sync::Mutex;
use std::thread;
use base::AsRawDescriptor;
use tempfile::tempfile;
use super::*;
use crate::backend::VhostBackend;
use crate::connection::tests::*;
use crate::dummy_slave::DummySlaveReqHandler;
use crate::dummy_slave::VIRTIO_FEATURES;
use crate::message::*;
use crate::VhostUserMemoryRegionInfo;
use crate::VringConfigData;
#[test]
fn create_dummy_slave() {
let slave = Mutex::new(DummySlaveReqHandler::new());
slave.set_owner().unwrap();
assert!(slave.set_owner().is_err());
}
#[test]
fn test_set_owner() {
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (master, mut slave) = create_master_slave_pair(slave_be);
assert!(!slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
assert!(slave.handle_request().is_err());
assert!(slave.as_ref().lock().unwrap().owned);
}
#[test]
fn test_set_features() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut master, mut slave) = create_master_slave_pair(slave_be);
thread::spawn(move || {
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_features,
VIRTIO_FEATURES & !0x1
);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
sbar.wait();
});
master.set_owner().unwrap();
// set virtio features
let features = master.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
let features = master.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
master.set_protocol_features(features).unwrap();
mbar.wait();
}
#[test]
fn test_master_slave_process() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut
|
{
match err.errno() {
// Retry:
// * EAGAIN, EWOULDBLOCK: The socket is marked nonblocking and the requested operation
// would block.
// * EINTR: A signal occurred before any data was transmitted
// * ENOBUFS: The output queue for a network interface was full. This generally
// indicates that the interface has stopped sending, but may be caused by transient
// congestion.
// * ENOMEM: No memory available.
libc::EAGAIN | libc::EWOULDBLOCK | libc::EINTR | libc::ENOBUFS | libc::ENOMEM => {
Error::SocketRetry(err.into())
}
// Broken:
// * ECONNRESET: Connection reset by peer.
// * EPIPE: The local end has been shut down on a connection oriented socket. In this
// case the process will also receive a SIGPIPE unless MSG_NOSIGNAL is set.
libc::ECONNRESET | libc::EPIPE => Error::SocketBroken(err.into()),
// Write permission is denied on the destination socket file, or search permission is
// denied for one of the directories the path prefix.
|
identifier_body
|
lib.rs
|
))
}
#[cfg(all(test, feature = "device"))]
mod dummy_slave;
#[cfg(all(test, feature = "vmm", feature = "device"))]
mod tests {
use std::sync::Arc;
use std::sync::Barrier;
use std::sync::Mutex;
use std::thread;
use base::AsRawDescriptor;
use tempfile::tempfile;
use super::*;
use crate::backend::VhostBackend;
use crate::connection::tests::*;
use crate::dummy_slave::DummySlaveReqHandler;
use crate::dummy_slave::VIRTIO_FEATURES;
use crate::message::*;
use crate::VhostUserMemoryRegionInfo;
use crate::VringConfigData;
#[test]
fn create_dummy_slave() {
let slave = Mutex::new(DummySlaveReqHandler::new());
slave.set_owner().unwrap();
assert!(slave.set_owner().is_err());
}
#[test]
fn test_set_owner() {
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (master, mut slave) = create_master_slave_pair(slave_be);
assert!(!slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
assert!(slave.handle_request().is_err());
assert!(slave.as_ref().lock().unwrap().owned);
}
#[test]
fn test_set_features() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut master, mut slave) = create_master_slave_pair(slave_be);
thread::spawn(move || {
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_features,
VIRTIO_FEATURES & !0x1
);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
sbar.wait();
});
master.set_owner().unwrap();
// set virtio features
let features = master.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
let features = master.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
master.set_protocol_features(features).unwrap();
mbar.wait();
}
#[test]
fn test_master_slave_process() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut master, mut slave) = create_master_slave_pair(slave_be);
thread::spawn(move || {
// set_own()
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
// get/set_features()
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_features,
VIRTIO_FEATURES & !0x1
);
slave.handle_request().unwrap();
slave.handle_request().unwrap();
assert_eq!(
slave.as_ref().lock().unwrap().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
// get_inflight_fd()
slave.handle_request().unwrap();
// set_inflight_fd()
slave.handle_request().unwrap();
// get_queue_num()
slave.handle_request().unwrap();
// set_mem_table()
slave.handle_request().unwrap();
// get/set_config()
slave.handle_request().unwrap();
slave.handle_request().unwrap();
// set_slave_request_fd
slave.handle_request().unwrap();
// set_vring_enable
slave.handle_request().unwrap();
// set_log_base,set_log_fd()
slave.handle_request().unwrap_err();
slave.handle_request().unwrap_err();
// set_vring_xxx
slave.handle_request().unwrap();
slave.handle_request().unwrap();
slave.handle_request().unwrap();
slave.handle_request().unwrap();
slave.handle_request().unwrap();
slave.handle_request().unwrap();
// get_max_mem_slots()
slave.handle_request().unwrap();
// add_mem_region()
slave.handle_request().unwrap();
// remove_mem_region()
slave.handle_request().unwrap();
sbar.wait();
});
master.set_owner().unwrap();
// set virtio features
let features = master.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
let features = master.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
master.set_protocol_features(features).unwrap();
// Retrieve inflight I/O tracking information
let (inflight_info, inflight_file) = master
.get_inflight_fd(&VhostUserInflight {
num_queues: 2,
queue_size: 256,
..Default::default()
})
.unwrap();
// Set the buffer back to the backend
master
.set_inflight_fd(&inflight_info, inflight_file.as_raw_descriptor())
.unwrap();
let num = master.get_queue_num().unwrap();
assert_eq!(num, 2);
let event = base::Event::new().unwrap();
let mem = [VhostUserMemoryRegionInfo {
guest_phys_addr: 0,
memory_size: 0x10_0000,
userspace_addr: 0,
mmap_offset: 0,
mmap_handle: event.as_raw_descriptor(),
}];
master.set_mem_table(&mem).unwrap();
master
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &[0xa5u8])
.unwrap();
let buf = [0x0u8; 4];
let (reply_body, reply_payload) = master
.get_config(0x100, 4, VhostUserConfigFlags::empty(), &buf)
.unwrap();
let offset = reply_body.offset;
assert_eq!(offset, 0x100);
assert_eq!(reply_payload[0], 0xa5);
#[cfg(windows)]
let tubes = base::Tube::pair().unwrap();
#[cfg(windows)]
// Safe because we will be importing the Tube in the other thread.
let descriptor =
unsafe { tube_transporter::packed_tube::pack(tubes.0, std::process::id()).unwrap() };
#[cfg(unix)]
let descriptor = base::Event::new().unwrap();
master.set_slave_request_fd(&descriptor).unwrap();
master.set_vring_enable(0, true).unwrap();
// unimplemented yet
master
.set_log_base(0, Some(event.as_raw_descriptor()))
.unwrap();
master.set_log_fd(event.as_raw_descriptor()).unwrap();
master.set_vring_num(0, 256).unwrap();
master.set_vring_base(0, 0).unwrap();
let config = VringConfigData {
queue_max_size: 256,
queue_size: 128,
flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(),
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: Some(0x4000),
};
master.set_vring_addr(0, &config).unwrap();
master.set_vring_call(0, &event).unwrap();
master.set_vring_kick(0, &event).unwrap();
master.set_vring_err(0, &event).unwrap();
let max_mem_slots = master.get_max_mem_slots().unwrap();
assert_eq!(max_mem_slots, 32);
let region_file = tempfile().unwrap();
let region = VhostUserMemoryRegionInfo {
guest_phys_addr: 0x10_0000,
memory_size: 0x10_0000,
userspace_addr: 0,
mmap_offset: 0,
mmap_handle: region_file.as_raw_descriptor(),
};
master.add_mem_region(®ion).unwrap();
master.remove_mem_region(®ion).unwrap();
mbar.wait();
}
#[test]
fn test_error_display() {
assert_eq!(format!("{}", Error::InvalidParam), "invalid parameters");
assert_eq!(format!("{}", Error::InvalidOperation), "invalid operation");
}
#[test]
fn test_error_from_base_error() {
let e: Error = base::Error::new(libc::EAGAIN).into();
if let Error::SocketRetry(e1) = e {
assert_eq!(e1.raw_os_error().unwrap(), libc::EAGAIN);
} else
|
{
panic!("invalid error code conversion!");
}
|
conditional_block
|
|
lib.rs
|
//! virtqueues sharing with a user space process on the same host. It uses communication over a
//! Unix domain socket to share file descriptors in the ancillary data of the message.
//! The protocol defines 2 sides of the communication, master and slave. Master is the application
//! that shares its virtqueues. Slave is the consumer of the virtqueues. Master and slave can be
//! either a client (i.e. connecting) or server (listening) in the socket communication.
#![deny(missing_docs)]
use std::fs::File;
use std::io::Error as IOError;
use remain::sorted;
use thiserror::Error as ThisError;
mod backend;
pub use backend::*;
pub mod message;
pub mod connection;
mod sys;
pub use sys::SystemStream;
pub use sys::*;
cfg_if::cfg_if! {
if #[cfg(feature = "vmm")] {
pub(crate) mod master;
pub use self::master::{Master, VhostUserMaster};
mod master_req_handler;
pub use self::master_req_handler::{VhostUserMasterReqHandler,
VhostUserMasterReqHandlerMut};
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "device")] {
mod slave_req_handler;
mod slave_proxy;
pub use self::slave_req_handler::{
Protocol, SlaveReqHandler, SlaveReqHelper, VhostUserSlaveReqHandler,
VhostUserSlaveReqHandlerMut,
};
pub use self::slave_proxy::Slave;
}
}
cfg_if::cfg_if! {
if #[cfg(all(feature = "device", unix))] {
mod slave;
pub use self::slave::SlaveListener;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "vmm")] {
pub use self::master_req_handler::MasterReqHandler;
}
}
/// Errors for vhost-user operations
#[sorted]
#[derive(Debug, ThisError)]
pub enum Error {
/// client exited properly.
#[error("client exited properly")]
ClientExit,
/// client disconnected.
/// If connection is closed properly, use `ClientExit` instead.
#[error("client closed the connection")]
Disconnect,
/// Virtio/protocol features mismatch.
#[error("virtio features mismatch")]
FeatureMismatch,
/// Fd array in question is too big or too small
#[error("wrong number of attached fds")]
IncorrectFds,
/// Invalid message format, flag or content.
#[error("invalid message")]
InvalidMessage,
/// Unsupported operations due to that the protocol feature hasn't been negotiated.
#[error("invalid operation")]
InvalidOperation,
/// Invalid parameters.
#[error("invalid parameters")]
InvalidParam,
/// Failure from the master side.
#[error("master Internal error")]
MasterInternalError,
/// Message is too large
#[error("oversized message")]
OversizedMsg,
/// Only part of a message have been sent or received successfully
#[error("partial message")]
PartialMessage,
/// Provided recv buffer was too small, and data was dropped.
#[error("buffer for recv was too small, data was dropped: got size {got}, needed {want}")]
RecvBufferTooSmall {
/// The size of the buffer received.
got: usize,
/// The expected size of the buffer.
want: usize,
},
/// Error from request handler
#[error("handler failed to handle request: {0}")]
ReqHandlerError(IOError),
/// Failure from the slave side.
#[error("slave internal error")]
SlaveInternalError,
/// The socket is broken or has been closed.
#[error("socket is broken: {0}")]
SocketBroken(std::io::Error),
/// Can't connect to peer.
#[error("can't connect to peer: {0}")]
SocketConnect(std::io::Error),
/// Generic socket errors.
#[error("socket error: {0}")]
SocketError(std::io::Error),
/// Should retry the socket operation again.
#[error("temporary socket error: {0}")]
SocketRetry(std::io::Error),
/// Error from tx/rx on a Tube.
#[error("failed to read/write on Tube: {0}")]
TubeError(base::TubeError),
/// Error from VFIO device.
#[error("error occurred in VFIO device: {0}")]
VfioDeviceError(anyhow::Error),
}
impl From<base::TubeError> for Error {
fn from(err: base::TubeError) -> Self {
Error::TubeError(err)
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::SocketError(err)
}
}
impl From<base::Error> for Error {
/// Convert raw socket errors into meaningful vhost-user errors.
///
/// The base::Error is a simple wrapper over the raw errno, which doesn't means
/// much to the vhost-user connection manager. So convert it into meaningful errors to simplify
/// the connection manager logic.
///
/// # Return:
/// * - Error::SocketRetry: temporary error caused by signals or short of resources.
/// * - Error::SocketBroken: the underline socket is broken.
/// * - Error::SocketError: other socket related errors.
#[allow(unreachable_patterns)] // EWOULDBLOCK equals to EGAIN on linux
fn from(err: base::Error) -> Self {
match err.errno() {
// Retry:
// * EAGAIN, EWOULDBLOCK: The socket is marked nonblocking and the requested operation
// would block.
// * EINTR: A signal occurred before any data was transmitted
// * ENOBUFS: The output queue for a network interface was full. This generally
// indicates that the interface has stopped sending, but may be caused by transient
// congestion.
// * ENOMEM: No memory available.
libc::EAGAIN | libc::EWOULDBLOCK | libc::EINTR | libc::ENOBUFS | libc::ENOMEM => {
Error::SocketRetry(err.into())
}
// Broken:
// * ECONNRESET: Connection reset by peer.
// * EPIPE: The local end has been shut down on a connection oriented socket. In this
// case the process will also receive a SIGPIPE unless MSG_NOSIGNAL is set.
libc::ECONNRESET | libc::EPIPE => Error::SocketBroken(err.into()),
// Write permission is denied on the destination socket file, or search permission is
// denied for one of the directories the path prefix.
libc::EACCES => Error::SocketConnect(IOError::from_raw_os_error(libc::EACCES)),
// Catch all other errors
e => Error::SocketError(IOError::from_raw_os_error(e)),
}
}
}
/// Result of vhost-user operations
pub type Result<T> = std::result::Result<T, Error>;
/// Result of request handler.
pub type HandlerResult<T> = std::result::Result<T, IOError>;
/// Utility function to take the first element from option of a vector of files.
/// Returns `None` if the vector contains no file or more than one file.
pub(crate) fn take_single_file(files: Option<Vec<File>>) -> Option<File> {
let mut files = files?;
if files.len() != 1 {
return None;
}
Some(files.swap_remove(0))
}
#[cfg(all(test, feature = "device"))]
mod dummy_slave;
#[cfg(all(test, feature = "vmm", feature = "device"))]
mod tests {
use std::sync::Arc;
use std::sync::Barrier;
use std::sync::Mutex;
use std::thread;
use base::AsRawDescriptor;
use tempfile::tempfile;
use super::*;
use crate::backend::VhostBackend;
use crate::connection::tests::*;
use crate::dummy_slave::DummySlaveReqHandler;
use crate::dummy_slave::VIRTIO_FEATURES;
use crate::message::*;
use crate::VhostUserMemoryRegionInfo;
use crate::VringConfigData;
#[test]
fn create_dummy_slave() {
let slave = Mutex::new(DummySlaveReqHandler::new());
slave.set_owner().unwrap();
assert!(slave.set_owner().is_err());
}
#[test]
fn test_set_owner() {
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (master, mut slave) = create_master_slave_pair(slave_be);
assert!(!slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
master.set_owner().unwrap();
assert!(slave.handle_request().is_err());
assert!(slave.as_ref().lock().unwrap().owned);
}
#[test]
fn test_set_features() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let slave_be = Mutex::new(DummySlaveReqHandler::new());
let (mut master, mut slave) = create_master_slave_pair(slave_be);
thread::spawn(move || {
slave.handle_request().unwrap();
assert!(slave.as_ref().lock().unwrap().owned);
slave.handle_request().unwrap();
|
//! vhost implementation in the Linux kernel. It implements the control plane needed to establish
|
random_line_split
|
|
file.go
|
.ProcessorEvent)
f.logs = make(map[string]os.FileInfo)
if _, err := os.Stat(f.c.Storage); os.IsNotExist(err) {
if err = os.MkdirAll(f.c.Storage, 0755); err != nil {
return nil, err
}
}
if err = f.nextFile(); err != nil {
return nil, err
}
if err = f.watch(); err != nil {
return
}
if err = f.loadFiles(); err != nil {
return
}
go f.watchproc()
go f.writeProcess()
go f.readProcess()
return f, nil
}
func (f *FileCache) WriteToCache(e *event.ProcessorEvent) {
f.writeChan <- e
}
func (f *FileCache) ReadFromCache() (e *event.ProcessorEvent) {
e = <-f.readChan
return
}
// loadFiles loadFiles
func (f *FileCache) loadFiles() (err error) {
var (
fi os.FileInfo
fis []os.FileInfo
)
if fis, err = ioutil.ReadDir(f.c.Storage); err != nil {
log.Error("ioutil.ReadDir(%s) error(%v)", f.c.Storage, err)
return
}
for _, fi = range fis {
name := path.Join(f.c.Storage, fi.Name())
if !fi.IsDir() && strings.HasSuffix(name, f.c.Suffix) {
f.eLock.Lock()
f.logs[name] = fi
f.eLock.Unlock()
log.Info("loadFile: %s, size: %d", name, fi.Size())
}
}
return
}
func (f *FileCache) writeProcess() {
var (
err error
n, total int
lengthbuf = make([]byte, 4)
cur *os.File
wr = bufio.NewWriterSize(nil, f.c.WriteBuffer)
tk = time.Tick(time.Duration(f.c.CacheFlushInterval))
timestamp = []byte(fmt.Sprintf("%d", time.Now().UnixNano()/1e6))
)
rand.Seed(time.Now().UnixNano())
for {
select {
case next := <-f.next:
if cur != nil && wr != nil {
wr.Flush()
cur.Close()
}
f, err := os.OpenFile(next, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", next, err)
continue
}
cur = f
wr.Reset(f)
total = 0
case <-tk:
if wr != nil && cur != nil {
wr.Flush()
}
f.checkStorageSize()
case e := <-f.writeChan:
if f.storageFull {
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "ERROR", "file cache storgefull")
event.PutEvent(e)
continue
}
if total > f.c.FileBytes && len(f.next) == 0 {
if err := f.nextFile(); err != nil {
log.Error("c.nextFile() error(%v)", err)
}
}
binary.BigEndian.PutUint32(lengthbuf, uint32(e.Length+_logLancerHeaderLen))
// write logMagic
if n, err = wr.Write(logMagic); err != nil {
goto HERE
}
total += n
// write length
if n, err = wr.Write(lengthbuf); err != nil {
goto HERE
}
total += n
// write log
if n, err = wr.Write([]byte(e.LogId)); err != nil {
goto HERE
}
if n, err = wr.Write(timestamp); err != nil {
goto HERE
}
if n, err = wr.Write(e.Bytes()); err != nil {
goto HERE
}
total += n
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "OK", "write file cache ok")
event.PutEvent(e)
continue
HERE: // write file cache error
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "ERROR", "write file cache failed")
event.PutEvent(e)
log.Error("wr.Write() error(%v)", err)
if cur != nil && wr != nil {
wr.Flush()
cur.Close()
}
name := f.nextFileName()
f, err := os.OpenFile(name, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", name, err)
continue
}
cur = f
wr.Reset(f)
total = 0
continue
}
}
}
// index index
func (f *FileCache) index() (idx *Index, err error) {
f.eLock.RLock()
length := len(f.logs)
f.eLock.RUnlock()
if length == 0 {
err = errLogNotFound
return
}
i, err := os.OpenFile(f.c.Index, os.O_RDONLY, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer i.Close()
b, err := ioutil.ReadAll(i)
if err != nil {
log.Error("ioutil.ReadAll(%s) error(%v)", f.c.Index, err)
return
}
idx = &Index{}
if err = json.Unmarshal(b, idx); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", b, err)
return
}
return
}
// nextFile return first filename.
// sorted by name.
func (f *FileCache) nextReadFile() (name string) {
var names []string
f.eLock.RLock()
for name = range f.logs {
names = append(names, name)
}
f.eLock.RUnlock()
if len(names) > 0 {
sort.Strings(names)
name = names[0]
}
return
}
// loadRemain loadRemain
func (f *FileCache) loadRemain() (i *Index, w *os.File, err error) {
if i, err = f.index(); err != nil {
next := f.nextReadFile()
if next == "" {
err = errLogNotFound
return
}
i = &Index{
Name: next,
Updated: time.Now().Format(_formatUpdated),
}
}
if w, err = f.openLog(i); err != nil {
log.Warn("a.openLog(%v) error(%v)", i, err)
return
}
return
}
// openLog open the log file
func (f *FileCache) openLog(idx *Index) (w *os.File, err error) {
if w, err = os.OpenFile(idx.Name, os.O_RDONLY, 0666); err != nil {
log.Error("os.OpenFile(%s) error(%v)", idx.Name, err)
return
}
if _, err = w.Seek(idx.Offset, os.SEEK_SET); err != nil {
log.Error("f.Seek(%d) error(%v)", idx.Offset, err)
return
}
return
}
// watch watch
func (f *FileCache) watch() (err error) {
if f.wh, err = fsnotify.NewWatcher(); err != nil {
log.Error("fsnotify.NewWatcher() error(%v)", err)
return
}
if err = f.wh.Add(f.c.Storage); err != nil {
log.Error("wh.Watch(%s) error(%v)", err)
}
return
}
// watchproc observe the directory file changes
func (f *FileCache) watchproc() {
var evt fsnotify.Event
for {
evt = <-f.wh.Events
if evt.Op&fsnotify.Create == fsnotify.Create
|
if evt.Op&fsnotify.Remove == fsnotify.Remove {
f.eLock.Lock()
delete(f.logs, evt.Name)
f.eLock.Unlock()
log.Info("remove file: %s", evt.Name)
}
}
}
// setIndex setIndex
func (f *FileCache) setIndex(idx *Index) (err error) {
w, err := os.OpenFile(f.c.Index, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer w.Close()
b, err := json.Marshal(idx)
if err != nil {
log.Error("json.Marshal(%v)", idx)
return
}
|
{
if !strings.HasSuffix(evt.Name, f.c.Suffix) {
log.Warn("create invalid file: %s", evt.Name)
continue
}
fi, err := os.Stat(evt.Name)
if err != nil {
log.Error("os.Stat(%s) error(%v)", evt.Name, err)
continue
}
f.eLock.Lock()
f.logs[evt.Name] = fi
f.eLock.Unlock()
log.Info("create file: %s", evt.Name)
}
|
conditional_block
|
file.go
|
err = os.MkdirAll(f.c.Storage, 0755); err != nil {
return nil, err
}
}
if err = f.nextFile(); err != nil {
return nil, err
}
if err = f.watch(); err != nil {
return
}
if err = f.loadFiles(); err != nil {
return
}
go f.watchproc()
go f.writeProcess()
go f.readProcess()
return f, nil
}
func (f *FileCache) WriteToCache(e *event.ProcessorEvent) {
f.writeChan <- e
}
func (f *FileCache) ReadFromCache() (e *event.ProcessorEvent) {
e = <-f.readChan
return
}
// loadFiles loadFiles
func (f *FileCache) loadFiles() (err error) {
var (
fi os.FileInfo
fis []os.FileInfo
)
if fis, err = ioutil.ReadDir(f.c.Storage); err != nil {
log.Error("ioutil.ReadDir(%s) error(%v)", f.c.Storage, err)
return
}
for _, fi = range fis {
name := path.Join(f.c.Storage, fi.Name())
if !fi.IsDir() && strings.HasSuffix(name, f.c.Suffix) {
f.eLock.Lock()
f.logs[name] = fi
f.eLock.Unlock()
log.Info("loadFile: %s, size: %d", name, fi.Size())
}
}
return
}
func (f *FileCache) writeProcess() {
var (
err error
n, total int
lengthbuf = make([]byte, 4)
cur *os.File
wr = bufio.NewWriterSize(nil, f.c.WriteBuffer)
tk = time.Tick(time.Duration(f.c.CacheFlushInterval))
timestamp = []byte(fmt.Sprintf("%d", time.Now().UnixNano()/1e6))
)
rand.Seed(time.Now().UnixNano())
for {
select {
case next := <-f.next:
if cur != nil && wr != nil {
wr.Flush()
cur.Close()
}
f, err := os.OpenFile(next, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", next, err)
continue
}
cur = f
wr.Reset(f)
total = 0
case <-tk:
if wr != nil && cur != nil {
wr.Flush()
}
f.checkStorageSize()
case e := <-f.writeChan:
if f.storageFull {
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "ERROR", "file cache storgefull")
event.PutEvent(e)
continue
}
if total > f.c.FileBytes && len(f.next) == 0 {
if err := f.nextFile(); err != nil {
log.Error("c.nextFile() error(%v)", err)
}
}
binary.BigEndian.PutUint32(lengthbuf, uint32(e.Length+_logLancerHeaderLen))
// write logMagic
if n, err = wr.Write(logMagic); err != nil {
goto HERE
}
total += n
// write length
if n, err = wr.Write(lengthbuf); err != nil {
goto HERE
}
total += n
// write log
if n, err = wr.Write([]byte(e.LogId)); err != nil {
goto HERE
}
if n, err = wr.Write(timestamp); err != nil {
goto HERE
}
if n, err = wr.Write(e.Bytes()); err != nil {
goto HERE
}
total += n
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "OK", "write file cache ok")
event.PutEvent(e)
continue
HERE: // write file cache error
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "ERROR", "write file cache failed")
event.PutEvent(e)
log.Error("wr.Write() error(%v)", err)
if cur != nil && wr != nil {
wr.Flush()
cur.Close()
}
name := f.nextFileName()
f, err := os.OpenFile(name, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", name, err)
continue
}
cur = f
wr.Reset(f)
total = 0
continue
}
}
}
// index index
func (f *FileCache) index() (idx *Index, err error) {
f.eLock.RLock()
length := len(f.logs)
f.eLock.RUnlock()
if length == 0 {
err = errLogNotFound
return
}
i, err := os.OpenFile(f.c.Index, os.O_RDONLY, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer i.Close()
b, err := ioutil.ReadAll(i)
if err != nil {
log.Error("ioutil.ReadAll(%s) error(%v)", f.c.Index, err)
return
}
idx = &Index{}
if err = json.Unmarshal(b, idx); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", b, err)
return
}
return
}
// nextFile return first filename.
// sorted by name.
func (f *FileCache) nextReadFile() (name string) {
var names []string
f.eLock.RLock()
for name = range f.logs {
names = append(names, name)
}
f.eLock.RUnlock()
if len(names) > 0 {
sort.Strings(names)
name = names[0]
}
return
}
// loadRemain loadRemain
func (f *FileCache) loadRemain() (i *Index, w *os.File, err error) {
if i, err = f.index(); err != nil {
next := f.nextReadFile()
if next == "" {
err = errLogNotFound
return
}
i = &Index{
Name: next,
Updated: time.Now().Format(_formatUpdated),
}
}
if w, err = f.openLog(i); err != nil {
log.Warn("a.openLog(%v) error(%v)", i, err)
return
}
return
}
// openLog open the log file
func (f *FileCache) openLog(idx *Index) (w *os.File, err error) {
if w, err = os.OpenFile(idx.Name, os.O_RDONLY, 0666); err != nil {
log.Error("os.OpenFile(%s) error(%v)", idx.Name, err)
return
}
if _, err = w.Seek(idx.Offset, os.SEEK_SET); err != nil {
log.Error("f.Seek(%d) error(%v)", idx.Offset, err)
return
}
return
}
// watch watch
func (f *FileCache) watch() (err error) {
if f.wh, err = fsnotify.NewWatcher(); err != nil {
log.Error("fsnotify.NewWatcher() error(%v)", err)
return
}
if err = f.wh.Add(f.c.Storage); err != nil {
log.Error("wh.Watch(%s) error(%v)", err)
}
return
}
// watchproc observe the directory file changes
func (f *FileCache) watchproc() {
var evt fsnotify.Event
for {
evt = <-f.wh.Events
if evt.Op&fsnotify.Create == fsnotify.Create {
if !strings.HasSuffix(evt.Name, f.c.Suffix) {
log.Warn("create invalid file: %s", evt.Name)
continue
}
fi, err := os.Stat(evt.Name)
if err != nil {
log.Error("os.Stat(%s) error(%v)", evt.Name, err)
continue
}
f.eLock.Lock()
f.logs[evt.Name] = fi
f.eLock.Unlock()
log.Info("create file: %s", evt.Name)
}
if evt.Op&fsnotify.Remove == fsnotify.Remove {
f.eLock.Lock()
delete(f.logs, evt.Name)
f.eLock.Unlock()
log.Info("remove file: %s", evt.Name)
}
}
}
// setIndex setIndex
func (f *FileCache) setIndex(idx *Index) (err error)
|
{
w, err := os.OpenFile(f.c.Index, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer w.Close()
b, err := json.Marshal(idx)
if err != nil {
log.Error("json.Marshal(%v)", idx)
return
}
if _, err = w.Write(b); err != nil {
log.Error("f.Write(%s) error(%v)", b, err)
}
return
}
|
identifier_body
|
|
file.go
|
cur.Close()
}
name := f.nextFileName()
f, err := os.OpenFile(name, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", name, err)
continue
}
cur = f
wr.Reset(f)
total = 0
continue
}
}
}
// index index
func (f *FileCache) index() (idx *Index, err error) {
f.eLock.RLock()
length := len(f.logs)
f.eLock.RUnlock()
if length == 0 {
err = errLogNotFound
return
}
i, err := os.OpenFile(f.c.Index, os.O_RDONLY, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer i.Close()
b, err := ioutil.ReadAll(i)
if err != nil {
log.Error("ioutil.ReadAll(%s) error(%v)", f.c.Index, err)
return
}
idx = &Index{}
if err = json.Unmarshal(b, idx); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", b, err)
return
}
return
}
// nextFile return first filename.
// sorted by name.
func (f *FileCache) nextReadFile() (name string) {
var names []string
f.eLock.RLock()
for name = range f.logs {
names = append(names, name)
}
f.eLock.RUnlock()
if len(names) > 0 {
sort.Strings(names)
name = names[0]
}
return
}
// loadRemain loadRemain
func (f *FileCache) loadRemain() (i *Index, w *os.File, err error) {
if i, err = f.index(); err != nil {
next := f.nextReadFile()
if next == "" {
err = errLogNotFound
return
}
i = &Index{
Name: next,
Updated: time.Now().Format(_formatUpdated),
}
}
if w, err = f.openLog(i); err != nil {
log.Warn("a.openLog(%v) error(%v)", i, err)
return
}
return
}
// openLog open the log file
func (f *FileCache) openLog(idx *Index) (w *os.File, err error) {
if w, err = os.OpenFile(idx.Name, os.O_RDONLY, 0666); err != nil {
log.Error("os.OpenFile(%s) error(%v)", idx.Name, err)
return
}
if _, err = w.Seek(idx.Offset, os.SEEK_SET); err != nil {
log.Error("f.Seek(%d) error(%v)", idx.Offset, err)
return
}
return
}
// watch watch
func (f *FileCache) watch() (err error) {
if f.wh, err = fsnotify.NewWatcher(); err != nil {
log.Error("fsnotify.NewWatcher() error(%v)", err)
return
}
if err = f.wh.Add(f.c.Storage); err != nil {
log.Error("wh.Watch(%s) error(%v)", err)
}
return
}
// watchproc observe the directory file changes
func (f *FileCache) watchproc() {
var evt fsnotify.Event
for {
evt = <-f.wh.Events
if evt.Op&fsnotify.Create == fsnotify.Create {
if !strings.HasSuffix(evt.Name, f.c.Suffix) {
log.Warn("create invalid file: %s", evt.Name)
continue
}
fi, err := os.Stat(evt.Name)
if err != nil {
log.Error("os.Stat(%s) error(%v)", evt.Name, err)
continue
}
f.eLock.Lock()
f.logs[evt.Name] = fi
f.eLock.Unlock()
log.Info("create file: %s", evt.Name)
}
if evt.Op&fsnotify.Remove == fsnotify.Remove {
f.eLock.Lock()
delete(f.logs, evt.Name)
f.eLock.Unlock()
log.Info("remove file: %s", evt.Name)
}
}
}
// setIndex setIndex
func (f *FileCache) setIndex(idx *Index) (err error) {
w, err := os.OpenFile(f.c.Index, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer w.Close()
b, err := json.Marshal(idx)
if err != nil {
log.Error("json.Marshal(%v)", idx)
return
}
if _, err = w.Write(b); err != nil {
log.Error("f.Write(%s) error(%v)", b, err)
}
return
}
// tailLog check the log format and get log from reader
func (f *FileCache) tailLog(rr *bufio.Reader) (b []byte, err error) {
var (
t []byte
)
// peek magic
for {
if b, err = rr.Peek(_logMagicSize); err != nil {
return
}
if bytes.Equal(b, logMagic) {
break
}
rr.Discard(1)
}
// peek length
if t, err = rr.Peek(_logHeadSize); err != nil {
if err != io.EOF {
log.Error("rr.Peek(len:%d) error(%v)", _logLenSize, err)
}
return
}
// peek body
l := int(binary.BigEndian.Uint32(t[_logMagicSize:_logHeadSize]))
if t, err = rr.Peek(_logHeadSize + l); err != nil {
if err != io.EOF {
log.Error("rr.Peek(%d) error(%v)", l, err)
}
return
}
b = t[_logHeadSize:]
rr.Discard(l + _logHeadSize)
return
}
// readproc read data and encapsulation protocol from file
func (f *FileCache) readProcess() {
var (
err error
idx *Index
rr = bufio.NewReaderSize(nil, f.c.ReadBuffer)
lastTime int64
length int
cur *os.File
)
if idx, cur, err = f.loadRemain(); err == nil {
rr.Reset(cur)
}
for {
if time.Now().Unix()-lastTime > 5 {
if idx != nil {
f.setIndex(idx)
}
lastTime = time.Now().Unix()
}
f.eLock.RLock()
length = len(f.logs)
f.eLock.RUnlock()
// check is available for observing file
if length == 0 {
if cur != nil {
cur.Close()
cur = nil
}
time.Sleep(time.Second * 1)
continue
}
// read first file from observing logs
if cur == nil {
next := f.nextReadFile()
idx = &Index{
Name: next,
Updated: time.Now().Format(_formatUpdated),
}
if cur, err = f.openLog(idx); err != nil {
log.Error("a.openLog(%v) error(%v)", idx, err)
continue
}
rr.Reset(cur)
f.setIndex(idx)
}
// tail a log from thos.OpenFilee buffer
b, err := f.tailLog(rr)
if err != nil {
if err == io.EOF {
if length > 1 {
cur.Close()
cur = nil
os.Remove(idx.Name)
f.eLock.Lock()
delete(f.logs, idx.Name)
f.eLock.Unlock()
} else {
time.Sleep(time.Second * 1)
}
continue
}
log.Error("read log error(%v)", err)
rr.Discard(1)
continue
}
idx.Offset += int64(len(b)) + _logHeadSize
if len(b) <= _logLancerHeaderLen {
continue
}
e := event.GetEvent()
e.Write(b[_logLancerHeaderLen:])
e.LogId = string(b[:_logIdSize])
f.readChan <- e
}
}
// check storage size
func (f *FileCache) checkStorageSize() {
var size int64
if entries, err := ioutil.ReadDir(f.c.Storage); err == nil {
for _, entry := range entries {
if !entry.IsDir() {
size += entry.Size()
}
}
}
if size > int64(f.c.StorageMaxMB*1024*1024) {
log.Error("storage is full, discard log")
flowmonitor.Fm.Add("log-agent", "log-agent.output.file-cache", strconv.FormatInt(time.Now().Unix()/100*100, 10), "ERROR", "storage full")
f.storageFull = true
} else {
f.storageFull = false
}
}
func (f *FileCache)
|
nextFileName
|
identifier_name
|
|
file.go
|
event.ProcessorEvent)
f.logs = make(map[string]os.FileInfo)
if _, err := os.Stat(f.c.Storage); os.IsNotExist(err) {
if err = os.MkdirAll(f.c.Storage, 0755); err != nil {
return nil, err
}
}
if err = f.nextFile(); err != nil {
return nil, err
}
if err = f.watch(); err != nil {
return
}
if err = f.loadFiles(); err != nil {
return
}
go f.watchproc()
go f.writeProcess()
go f.readProcess()
return f, nil
}
func (f *FileCache) WriteToCache(e *event.ProcessorEvent) {
f.writeChan <- e
}
func (f *FileCache) ReadFromCache() (e *event.ProcessorEvent) {
e = <-f.readChan
return
}
// loadFiles loadFiles
func (f *FileCache) loadFiles() (err error) {
var (
fi os.FileInfo
fis []os.FileInfo
)
if fis, err = ioutil.ReadDir(f.c.Storage); err != nil {
log.Error("ioutil.ReadDir(%s) error(%v)", f.c.Storage, err)
return
}
for _, fi = range fis {
name := path.Join(f.c.Storage, fi.Name())
if !fi.IsDir() && strings.HasSuffix(name, f.c.Suffix) {
f.eLock.Lock()
f.logs[name] = fi
f.eLock.Unlock()
log.Info("loadFile: %s, size: %d", name, fi.Size())
}
}
return
}
func (f *FileCache) writeProcess() {
var (
err error
n, total int
lengthbuf = make([]byte, 4)
cur *os.File
wr = bufio.NewWriterSize(nil, f.c.WriteBuffer)
tk = time.Tick(time.Duration(f.c.CacheFlushInterval))
timestamp = []byte(fmt.Sprintf("%d", time.Now().UnixNano()/1e6))
)
rand.Seed(time.Now().UnixNano())
for {
select {
case next := <-f.next:
if cur != nil && wr != nil {
wr.Flush()
cur.Close()
}
f, err := os.OpenFile(next, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", next, err)
continue
}
cur = f
wr.Reset(f)
total = 0
case <-tk:
if wr != nil && cur != nil {
wr.Flush()
}
f.checkStorageSize()
case e := <-f.writeChan:
if f.storageFull {
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "ERROR", "file cache storgefull")
event.PutEvent(e)
continue
}
if total > f.c.FileBytes && len(f.next) == 0 {
if err := f.nextFile(); err != nil {
log.Error("c.nextFile() error(%v)", err)
}
}
binary.BigEndian.PutUint32(lengthbuf, uint32(e.Length+_logLancerHeaderLen))
// write logMagic
if n, err = wr.Write(logMagic); err != nil {
goto HERE
}
total += n
// write length
if n, err = wr.Write(lengthbuf); err != nil {
goto HERE
}
total += n
// write log
if n, err = wr.Write([]byte(e.LogId)); err != nil {
goto HERE
}
if n, err = wr.Write(timestamp); err != nil {
goto HERE
}
if n, err = wr.Write(e.Bytes()); err != nil {
goto HERE
}
total += n
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "OK", "write file cache ok")
event.PutEvent(e)
continue
HERE: // write file cache error
flowmonitor.Fm.AddEvent(e, "log-agent.output.lancer", "ERROR", "write file cache failed")
event.PutEvent(e)
log.Error("wr.Write() error(%v)", err)
if cur != nil && wr != nil {
wr.Flush()
cur.Close()
}
name := f.nextFileName()
f, err := os.OpenFile(name, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", name, err)
continue
}
cur = f
wr.Reset(f)
total = 0
continue
}
}
}
// index index
func (f *FileCache) index() (idx *Index, err error) {
f.eLock.RLock()
length := len(f.logs)
f.eLock.RUnlock()
if length == 0 {
err = errLogNotFound
return
}
i, err := os.OpenFile(f.c.Index, os.O_RDONLY, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer i.Close()
b, err := ioutil.ReadAll(i)
if err != nil {
log.Error("ioutil.ReadAll(%s) error(%v)", f.c.Index, err)
return
}
idx = &Index{}
if err = json.Unmarshal(b, idx); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", b, err)
return
}
return
}
// nextFile return first filename.
// sorted by name.
func (f *FileCache) nextReadFile() (name string) {
var names []string
f.eLock.RLock()
for name = range f.logs {
names = append(names, name)
}
f.eLock.RUnlock()
if len(names) > 0 {
sort.Strings(names)
name = names[0]
}
return
}
// loadRemain loadRemain
func (f *FileCache) loadRemain() (i *Index, w *os.File, err error) {
if i, err = f.index(); err != nil {
next := f.nextReadFile()
if next == "" {
err = errLogNotFound
return
}
i = &Index{
Name: next,
Updated: time.Now().Format(_formatUpdated),
}
}
if w, err = f.openLog(i); err != nil {
log.Warn("a.openLog(%v) error(%v)", i, err)
return
}
return
}
// openLog open the log file
func (f *FileCache) openLog(idx *Index) (w *os.File, err error) {
if w, err = os.OpenFile(idx.Name, os.O_RDONLY, 0666); err != nil {
log.Error("os.OpenFile(%s) error(%v)", idx.Name, err)
return
}
if _, err = w.Seek(idx.Offset, os.SEEK_SET); err != nil {
log.Error("f.Seek(%d) error(%v)", idx.Offset, err)
return
}
return
}
// watch watch
func (f *FileCache) watch() (err error) {
if f.wh, err = fsnotify.NewWatcher(); err != nil {
log.Error("fsnotify.NewWatcher() error(%v)", err)
return
}
if err = f.wh.Add(f.c.Storage); err != nil {
log.Error("wh.Watch(%s) error(%v)", err)
}
return
}
// watchproc observe the directory file changes
func (f *FileCache) watchproc() {
var evt fsnotify.Event
for {
evt = <-f.wh.Events
if evt.Op&fsnotify.Create == fsnotify.Create {
if !strings.HasSuffix(evt.Name, f.c.Suffix) {
log.Warn("create invalid file: %s", evt.Name)
continue
}
fi, err := os.Stat(evt.Name)
if err != nil {
log.Error("os.Stat(%s) error(%v)", evt.Name, err)
continue
}
f.eLock.Lock()
f.logs[evt.Name] = fi
f.eLock.Unlock()
log.Info("create file: %s", evt.Name)
|
f.eLock.Unlock()
log.Info("remove file: %s", evt.Name)
}
}
}
// setIndex setIndex
func (f *FileCache) setIndex(idx *Index) (err error) {
w, err := os.OpenFile(f.c.Index, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Error("os.OpenFile(%s) error(%v)", f.c.Index, err)
return
}
defer w.Close()
b, err := json.Marshal(idx)
if err != nil {
log.Error("json.Marshal(%v)", idx)
return
}
if
|
}
if evt.Op&fsnotify.Remove == fsnotify.Remove {
f.eLock.Lock()
delete(f.logs, evt.Name)
|
random_line_split
|
main.rs
|
process::{exit, Command};
use tempdir::TempDir;
use cretonne::settings::Configurable;
macro_rules! vprintln {
($x: expr, $($tts:tt)*) => {
if $x {
println!($($tts)*);
}
}
}
macro_rules! vprint {
($x: expr, $($tts:tt)*) => {
if $x {
print!($($tts)*);
}
}
}
const USAGE: &str = "
Wasm to Cretonne IL translation utility.
Takes a binary WebAssembly module and returns its functions in Cretonne IL format.
The translation is dependent on the runtime chosen.
The default is a dummy runtime that produces placeholder values.
Usage:
wasmstandalone [-vcop] <file>...
wasmstandalone -e [-mvcop] <file>...
wasmstandalone --help | --version
Options:
-v, --verbose displays info on the different steps
-p, --print displays the module and translated functions
-c, --check checks the corectness of the translated functions
-o, --optimize runs optimization passes on the translated functions
-e, --execute enable the standalone runtime and executes the start function of the module
-m, --memory interactive memory inspector after execution
-h, --help print this help message
--version print the Cretonne version
";
#[derive(Deserialize, Debug, Clone)]
struct Args {
arg_file: Vec<String>,
flag_verbose: bool,
flag_execute: bool,
flag_memory: bool,
flag_check: bool,
flag_optimize: bool,
flag_print: bool,
}
fn read_to_end(path: PathBuf) -> Result<Vec<u8>, io::Error> {
let mut buf: Vec<u8> = Vec::new();
let mut file = File::open(path)?;
file.read_to_end(&mut buf)?;
Ok(buf)
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| {
d.help(true)
.version(Some(String::from("0.0.0")))
.deserialize()
})
.unwrap_or_else(|e| e.exit());
let mut terminal = term::stdout().unwrap();
let (mut flag_builder, isa_builder) = cton_native::builders().unwrap_or_else(|_| {
panic!("host machine is not a supported target");
});
// Enable verifier passes in debug mode.
if cfg!(debug_assertions) {
flag_builder.enable("enable_verifier").unwrap();
}
let isa = isa_builder.finish(settings::Flags::new(&flag_builder));
for filename in &args.arg_file {
let path = Path::new(&filename);
let name = path.as_os_str().to_string_lossy();
match handle_module(&args, path.to_path_buf(), &name, &*isa) {
Ok(()) => {}
Err(message) => {
terminal.fg(term::color::RED).unwrap();
println!("error");
terminal.reset().unwrap();
println!("{}", message);
exit(1);
}
}
}
}
fn
|
(args: &Args, path: PathBuf, name: &str, isa: &TargetIsa) -> Result<(), String> {
let mut terminal = term::stdout().unwrap();
terminal.fg(term::color::YELLOW).unwrap();
vprint!(args.flag_verbose, "Handling: ");
terminal.reset().unwrap();
vprintln!(args.flag_verbose, "\"{}\"", name);
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Translating...");
terminal.reset().unwrap();
let mut data = read_to_end(path.clone()).map_err(|err| {
String::from(err.description())
})?;
if !data.starts_with(&[b'\0', b'a', b's', b'm']) {
let tmp_dir = TempDir::new("cretonne-wasm").unwrap();
let file_path = tmp_dir.path().join("module.wasm");
File::create(file_path.clone()).unwrap();
Command::new("wat2wasm")
.arg(path.clone())
.arg("-o")
.arg(file_path.to_str().unwrap())
.output()
.or_else(|e| if let io::ErrorKind::NotFound = e.kind() {
return Err(String::from("wat2wasm not found"));
} else {
return Err(String::from(e.description()));
})?;
data = read_to_end(file_path).map_err(
|err| String::from(err.description()),
)?;
}
let mut runtime = wasmstandalone_runtime::Runtime::with_flags(isa.flags().clone());
let translation = {
match translate_module(&data, &mut runtime) {
Ok(x) => x,
Err(string) => {
return Err(string);
}
}
};
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
if args.flag_check {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Checking... ");
terminal.reset().unwrap();
for func in &translation.functions {
verifier::verify_function(func, isa).map_err(|err| {
pretty_verifier_error(func, Some(isa), &err)
})?;
}
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
}
if args.flag_print {
let mut writer1 = stdout();
let mut writer2 = stdout();
match pretty_print_translation(name, &data, &translation, &mut writer1, &mut writer2, isa) {
Err(error) => return Err(String::from(error.description())),
Ok(()) => (),
}
}
if args.flag_optimize {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Optimizing... ");
terminal.reset().unwrap();
for func in &translation.functions {
let mut loop_analysis = LoopAnalysis::new();
let mut cfg = ControlFlowGraph::new();
cfg.compute(func);
let mut domtree = DominatorTree::new();
domtree.compute(func, &cfg);
loop_analysis.compute(func, &cfg, &domtree);
let mut context = Context::new();
context.func = func.clone(); // TODO: Avoid this clone.
context.cfg = cfg;
context.domtree = domtree;
context.loop_analysis = loop_analysis;
match verifier::verify_context(&context.func, &context.cfg, &context.domtree, isa) {
Ok(()) => (),
Err(ref err) => {
return Err(pretty_verifier_error(&context.func, Some(isa), err));
}
};
match context.licm(isa) {
Ok(())=> (),
Err(error) => {
match error {
CtonError::Verifier(ref err) => {
return Err(pretty_verifier_error(&context.func, Some(isa), err));
}
CtonError::InvalidInput |
CtonError::ImplLimitExceeded |
CtonError::CodeTooLarge => return Err(String::from(error.description())),
}
}
};
match verifier::verify_context(&context.func, &context.cfg, &context.domtree, isa) {
Ok(()) => (),
Err(ref err) => return Err(pretty_verifier_error(&context.func, Some(isa), err)),
}
}
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
}
if args.flag_execute {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Compiling... ");
terminal.reset().unwrap();
match compile_module(&translation, isa, &runtime) {
Ok(ref exec) => {
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, "ok");
terminal.reset().unwrap();
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Executing... ");
terminal.reset().unwrap();
match execute(exec) {
Ok(()) => {
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, "ok");
terminal.reset().unwrap();
}
Err(s) => {
return Err(s);
}
}
}
Err(s) => {
return Err(s);
}
};
if args.flag_memory {
let mut input = String::new();
terminal.fg(term::color::YELLOW).unwrap();
println!("Inspecting memory");
terminal.fg(term::color::MAGENTA).unwrap();
println!("Type 'quit' to exit.");
terminal.reset().unwrap();
loop {
input.clear();
terminal.fg(term::color::YELLOW).unwrap();
print!("Memory index, offset, length (e.g. 0,0,4): ");
terminal.reset().unwrap();
let _ = stdout().flush();
match io::stdin().read_line(&mut input) {
Ok(_) => {
input.pop();
if input == "quit" {
break;
}
let split: Vec<&str> = input.split(',').collect
|
handle_module
|
identifier_name
|
main.rs
|
[b'\0', b'a', b's', b'm']) {
let tmp_dir = TempDir::new("cretonne-wasm").unwrap();
let file_path = tmp_dir.path().join("module.wasm");
File::create(file_path.clone()).unwrap();
Command::new("wat2wasm")
.arg(path.clone())
.arg("-o")
.arg(file_path.to_str().unwrap())
.output()
.or_else(|e| if let io::ErrorKind::NotFound = e.kind() {
return Err(String::from("wat2wasm not found"));
} else {
return Err(String::from(e.description()));
})?;
data = read_to_end(file_path).map_err(
|err| String::from(err.description()),
)?;
}
let mut runtime = wasmstandalone_runtime::Runtime::with_flags(isa.flags().clone());
let translation = {
match translate_module(&data, &mut runtime) {
Ok(x) => x,
Err(string) => {
return Err(string);
}
}
};
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
if args.flag_check {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Checking... ");
terminal.reset().unwrap();
for func in &translation.functions {
verifier::verify_function(func, isa).map_err(|err| {
pretty_verifier_error(func, Some(isa), &err)
})?;
}
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
}
if args.flag_print {
let mut writer1 = stdout();
let mut writer2 = stdout();
match pretty_print_translation(name, &data, &translation, &mut writer1, &mut writer2, isa) {
Err(error) => return Err(String::from(error.description())),
Ok(()) => (),
}
}
if args.flag_optimize {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Optimizing... ");
terminal.reset().unwrap();
for func in &translation.functions {
let mut loop_analysis = LoopAnalysis::new();
let mut cfg = ControlFlowGraph::new();
cfg.compute(func);
let mut domtree = DominatorTree::new();
domtree.compute(func, &cfg);
loop_analysis.compute(func, &cfg, &domtree);
let mut context = Context::new();
context.func = func.clone(); // TODO: Avoid this clone.
context.cfg = cfg;
context.domtree = domtree;
context.loop_analysis = loop_analysis;
match verifier::verify_context(&context.func, &context.cfg, &context.domtree, isa) {
Ok(()) => (),
Err(ref err) => {
return Err(pretty_verifier_error(&context.func, Some(isa), err));
}
};
match context.licm(isa) {
Ok(())=> (),
Err(error) => {
match error {
CtonError::Verifier(ref err) => {
return Err(pretty_verifier_error(&context.func, Some(isa), err));
}
CtonError::InvalidInput |
CtonError::ImplLimitExceeded |
CtonError::CodeTooLarge => return Err(String::from(error.description())),
}
}
};
match verifier::verify_context(&context.func, &context.cfg, &context.domtree, isa) {
Ok(()) => (),
Err(ref err) => return Err(pretty_verifier_error(&context.func, Some(isa), err)),
}
}
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
}
if args.flag_execute {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Compiling... ");
terminal.reset().unwrap();
match compile_module(&translation, isa, &runtime) {
Ok(ref exec) => {
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, "ok");
terminal.reset().unwrap();
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Executing... ");
terminal.reset().unwrap();
match execute(exec) {
Ok(()) => {
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, "ok");
terminal.reset().unwrap();
}
Err(s) => {
return Err(s);
}
}
}
Err(s) => {
return Err(s);
}
};
if args.flag_memory {
let mut input = String::new();
terminal.fg(term::color::YELLOW).unwrap();
println!("Inspecting memory");
terminal.fg(term::color::MAGENTA).unwrap();
println!("Type 'quit' to exit.");
terminal.reset().unwrap();
loop {
input.clear();
terminal.fg(term::color::YELLOW).unwrap();
print!("Memory index, offset, length (e.g. 0,0,4): ");
terminal.reset().unwrap();
let _ = stdout().flush();
match io::stdin().read_line(&mut input) {
Ok(_) => {
input.pop();
if input == "quit" {
break;
}
let split: Vec<&str> = input.split(',').collect();
if split.len() != 3 {
break;
}
let memory = runtime.inspect_memory(
str::parse(split[0]).unwrap(),
str::parse(split[1]).unwrap(),
str::parse(split[2]).unwrap(),
);
let mut s = memory.iter().fold(String::from("#"), |mut acc, byte| {
acc.push_str(format!("{:02x}_", byte).as_str());
acc
});
s.pop();
println!("{}", s);
}
Err(error) => return Err(String::from(error.description())),
}
}
}
}
Ok(())
}
// Prints out a Wasm module, and for each function the corresponding translation in Cretonne IL.
fn pretty_print_translation(
filename: &str,
data: &[u8],
translation: &TranslationResult,
writer_wat: &mut Write,
writer_cretonne: &mut Write,
isa: &TargetIsa,
) -> Result<(), io::Error> {
let mut terminal = term::stdout().unwrap();
let mut parser = Parser::new(data);
let mut parser_writer = Writer::new(writer_wat);
match parser.read() {
s @ &ParserState::BeginWasm { .. } => parser_writer.write(s)?,
_ => panic!("modules should begin properly"),
}
loop {
match parser.read() {
s @ &ParserState::BeginSection { code: SectionCode::Code, .. } => {
// The code section begins
parser_writer.write(s)?;
break;
}
&ParserState::EndWasm => return Ok(()),
s => parser_writer.write(s)?,
}
}
let mut function_index = 0;
loop {
match parser.read() {
s @ &ParserState::BeginFunctionBody { .. } => {
terminal.fg(term::color::BLUE).unwrap();
write!(
writer_cretonne,
"====== Function No. {} of module \"{}\" ======\n",
function_index,
filename
)?;
terminal.fg(term::color::CYAN).unwrap();
write!(writer_cretonne, "Wast ---------->\n")?;
terminal.reset().unwrap();
parser_writer.write(s)?;
}
s @ &ParserState::EndSection => {
parser_writer.write(s)?;
break;
}
_ => panic!("wrong content in code section"),
}
loop {
match parser.read() {
s @ &ParserState::EndFunctionBody => {
parser_writer.write(s)?;
break;
}
s => {
parser_writer.write(s)?;
}
};
}
let mut function_string =
format!(" {}", translation.functions[function_index].display(isa));
function_string.pop();
let function_str = str::replace(function_string.as_str(), "\n", "\n ");
terminal.fg(term::color::CYAN).unwrap();
write!(writer_cretonne, "Cretonne IL --->\n")?;
terminal.reset().unwrap();
write!(writer_cretonne, "{}\n", function_str)?;
function_index += 1;
}
loop {
match parser.read() {
&ParserState::EndWasm => return Ok(()),
s => parser_writer.write(s)?,
}
}
}
/// Pretty-print a verifier error.
pub fn pretty_verifier_error(
func: &ir::Function,
isa: Option<&TargetIsa>,
err: &verifier::Error,
) -> String
|
{
let msg = err.to_string();
let str1 = match err.location {
AnyEntity::Inst(inst) => {
format!(
"{}\n{}: {}\n\n",
msg,
inst,
func.dfg.display_inst(inst, isa)
)
}
_ => String::from(format!("{}\n", msg)),
};
format!("{}{}", str1, func.display(isa))
}
|
identifier_body
|
|
main.rs
|
use cretonne::ir;
use cretonne::ir::entities::AnyEntity;
use cretonne::isa::TargetIsa;
use cretonne::verifier;
use cretonne::settings;
use std::fs::File;
use std::error::Error;
use std::io;
use std::io::stdout;
use std::io::prelude::*;
use docopt::Docopt;
use std::path::Path;
use std::process::{exit, Command};
use tempdir::TempDir;
use cretonne::settings::Configurable;
macro_rules! vprintln {
($x: expr, $($tts:tt)*) => {
if $x {
println!($($tts)*);
}
}
}
macro_rules! vprint {
($x: expr, $($tts:tt)*) => {
if $x {
print!($($tts)*);
}
}
}
const USAGE: &str = "
Wasm to Cretonne IL translation utility.
Takes a binary WebAssembly module and returns its functions in Cretonne IL format.
The translation is dependent on the runtime chosen.
The default is a dummy runtime that produces placeholder values.
Usage:
wasmstandalone [-vcop] <file>...
wasmstandalone -e [-mvcop] <file>...
wasmstandalone --help | --version
Options:
-v, --verbose displays info on the different steps
-p, --print displays the module and translated functions
-c, --check checks the corectness of the translated functions
-o, --optimize runs optimization passes on the translated functions
-e, --execute enable the standalone runtime and executes the start function of the module
-m, --memory interactive memory inspector after execution
-h, --help print this help message
--version print the Cretonne version
";
#[derive(Deserialize, Debug, Clone)]
struct Args {
arg_file: Vec<String>,
flag_verbose: bool,
flag_execute: bool,
flag_memory: bool,
flag_check: bool,
flag_optimize: bool,
flag_print: bool,
}
fn read_to_end(path: PathBuf) -> Result<Vec<u8>, io::Error> {
let mut buf: Vec<u8> = Vec::new();
let mut file = File::open(path)?;
file.read_to_end(&mut buf)?;
Ok(buf)
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| {
d.help(true)
.version(Some(String::from("0.0.0")))
.deserialize()
})
.unwrap_or_else(|e| e.exit());
let mut terminal = term::stdout().unwrap();
let (mut flag_builder, isa_builder) = cton_native::builders().unwrap_or_else(|_| {
panic!("host machine is not a supported target");
});
// Enable verifier passes in debug mode.
if cfg!(debug_assertions) {
flag_builder.enable("enable_verifier").unwrap();
}
let isa = isa_builder.finish(settings::Flags::new(&flag_builder));
for filename in &args.arg_file {
let path = Path::new(&filename);
let name = path.as_os_str().to_string_lossy();
match handle_module(&args, path.to_path_buf(), &name, &*isa) {
Ok(()) => {}
Err(message) => {
terminal.fg(term::color::RED).unwrap();
println!("error");
terminal.reset().unwrap();
println!("{}", message);
exit(1);
}
}
}
}
fn handle_module(args: &Args, path: PathBuf, name: &str, isa: &TargetIsa) -> Result<(), String> {
let mut terminal = term::stdout().unwrap();
terminal.fg(term::color::YELLOW).unwrap();
vprint!(args.flag_verbose, "Handling: ");
terminal.reset().unwrap();
vprintln!(args.flag_verbose, "\"{}\"", name);
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Translating...");
terminal.reset().unwrap();
let mut data = read_to_end(path.clone()).map_err(|err| {
String::from(err.description())
})?;
if !data.starts_with(&[b'\0', b'a', b's', b'm']) {
let tmp_dir = TempDir::new("cretonne-wasm").unwrap();
let file_path = tmp_dir.path().join("module.wasm");
File::create(file_path.clone()).unwrap();
Command::new("wat2wasm")
.arg(path.clone())
.arg("-o")
.arg(file_path.to_str().unwrap())
.output()
.or_else(|e| if let io::ErrorKind::NotFound = e.kind() {
return Err(String::from("wat2wasm not found"));
} else {
return Err(String::from(e.description()));
})?;
data = read_to_end(file_path).map_err(
|err| String::from(err.description()),
)?;
}
let mut runtime = wasmstandalone_runtime::Runtime::with_flags(isa.flags().clone());
let translation = {
match translate_module(&data, &mut runtime) {
Ok(x) => x,
Err(string) => {
return Err(string);
}
}
};
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
if args.flag_check {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Checking... ");
terminal.reset().unwrap();
for func in &translation.functions {
verifier::verify_function(func, isa).map_err(|err| {
pretty_verifier_error(func, Some(isa), &err)
})?;
}
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
}
if args.flag_print {
let mut writer1 = stdout();
let mut writer2 = stdout();
match pretty_print_translation(name, &data, &translation, &mut writer1, &mut writer2, isa) {
Err(error) => return Err(String::from(error.description())),
Ok(()) => (),
}
}
if args.flag_optimize {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Optimizing... ");
terminal.reset().unwrap();
for func in &translation.functions {
let mut loop_analysis = LoopAnalysis::new();
let mut cfg = ControlFlowGraph::new();
cfg.compute(func);
let mut domtree = DominatorTree::new();
domtree.compute(func, &cfg);
loop_analysis.compute(func, &cfg, &domtree);
let mut context = Context::new();
context.func = func.clone(); // TODO: Avoid this clone.
context.cfg = cfg;
context.domtree = domtree;
context.loop_analysis = loop_analysis;
match verifier::verify_context(&context.func, &context.cfg, &context.domtree, isa) {
Ok(()) => (),
Err(ref err) => {
return Err(pretty_verifier_error(&context.func, Some(isa), err));
}
};
match context.licm(isa) {
Ok(())=> (),
Err(error) => {
match error {
CtonError::Verifier(ref err) => {
return Err(pretty_verifier_error(&context.func, Some(isa), err));
}
CtonError::InvalidInput |
CtonError::ImplLimitExceeded |
CtonError::CodeTooLarge => return Err(String::from(error.description())),
}
}
};
match verifier::verify_context(&context.func, &context.cfg, &context.domtree, isa) {
Ok(()) => (),
Err(ref err) => return Err(pretty_verifier_error(&context.func, Some(isa), err)),
}
}
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, " ok");
terminal.reset().unwrap();
}
if args.flag_execute {
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Compiling... ");
terminal.reset().unwrap();
match compile_module(&translation, isa, &runtime) {
Ok(ref exec) => {
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, "ok");
terminal.reset().unwrap();
terminal.fg(term::color::MAGENTA).unwrap();
vprint!(args.flag_verbose, "Executing... ");
terminal.reset().unwrap();
match execute(exec) {
Ok(()) => {
terminal.fg(term::color::GREEN).unwrap();
vprintln!(args.flag_verbose, "ok");
terminal.reset().unwrap();
}
Err(s) => {
return Err(s);
}
}
}
Err(s) => {
return Err(s);
}
};
if args.flag_memory {
let mut input = String::new();
terminal.fg(term::color::YELLOW).unwrap();
println
|
use cretonne::flowgraph::ControlFlowGraph;
use cretonne::dominator_tree::DominatorTree;
use cretonne::Context;
use cretonne::result::CtonError;
|
random_line_split
|
|
nodeserver.go
|
req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "" && ns.ephemeral // Kubernetes 1.15 doesn't have csi.storage.k8s.io/ephemeral
if req.GetVolumeCapability().GetBlock() != nil &&
req.GetVolumeCapability().GetMount() != nil {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
// if ephemal is specified, create volume here to avoid errors
if ephemeralVolume {
volID := req.GetVolumeId()
volName := fmt.Sprintf("ephemeral=%s", volID)
vol, err := createHostpathVolume(req.GetVolumeId(), volName, maxStorageCapacity, mountAccess, ephemeralVolume)
if err != nil && !os.IsExist(err) {
glog.Error("ephemeral mode failed to create volume: ", err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("ephemeral mode: created volume: %s", vol.VolPath)
}
vol, err := getVolumeByID(req.GetVolumeId())
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if req.GetVolumeCapability().GetBlock() != nil {
if vol.VolAccessType != blockAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-block volume as block volume")
}
volPathHandler := volumepathhandler.VolumePathHandler{}
// Get loop device from the volume path
loopDevice, err := volPathHandler.GetLoopDevice(vol.VolPath)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get the loop device: %v", err))
}
mounter := mount.New("")
// Check if the target path exists. Create if not present.
_, err = os.Lstat(targetPath)
if os.IsNotExist(err) {
// if err = mounter.MakeFile(targetPath); err != nil {
if err = makeFile(targetPath); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to create target path: %s: %v", targetPath, err))
}
}
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to check if the target block file exists: %v", err)
}
// Check if the target path is already mounted. Prevent remounting
// notMount, err := mounter.IsNotMountPoint(targetPath)
notMount, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if !os.IsNotExist(err) {
return nil, status.Errorf(codes.Internal, "error checking path %s for mount: %s", targetPath, err)
}
notMount = true
}
if !notMount {
// Its already mounted.
glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
options := []string{"bind"}
if err := mount.New("").Mount(loopDevice, targetPath, "", options); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %v", loopDevice, targetPath, err))
}
} else if req.GetVolumeCapability().GetMount() != nil {
if vol.VolAccessType != mountAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-mount volume as mount volume")
}
mounter := mount.New("")
notMnt, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
} else {
return nil, status.Error(codes.Internal, err.Error())
}
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
fsType := req.GetVolumeCapability().GetMount().GetFsType()
deviceId := ""
if req.GetPublishContext() != nil {
deviceId = req.GetPublishContext()[deviceID]
}
readOnly := req.GetReadonly()
volumeId := req.GetVolumeId()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v", targetPath)
glog.V(4).Infof("fstype %v", fsType)
glog.V(4).Infof("device %v", deviceId)
glog.V(4).Infof("readonly %v", readOnly)
glog.V(4).Infof("volumeId %v", volumeId)
glog.V(4).Infof("attributes %v", attrib)
glog.V(4).Infof("mountflags %v", mountFlags)
options := []string{"bind"}
if readOnly {
options = append(options, "ro")
}
// mounter := mount.New("")
path := getVolumePath(volumeId)
if err := mounter.Mount(path, targetPath, "", options); err != nil {
var errList strings.Builder
errList.WriteString(err.Error())
if vol.Ephemeral {
if rmErr := os.RemoveAll(path); rmErr != nil && !os.IsNotExist(rmErr) {
errList.WriteString(fmt.Sprintf(" :%s", rmErr.Error()))
}
}
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %s", path, targetPath, errList.String()))
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume is the reverse of NodePublishVolume. It unmounts the volume from the target path.
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
vol, err := getVolumeByID(volumeID)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
// Unmount only if the target path is really a mount point.
if notMnt, err := mount.IsNotMountPoint(mount.New(""), targetPath); err != nil {
if !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
} else if !notMnt {
// Unmounting the image or filesystem.
err = mount.New("").Unmount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Delete the mount point.
// Does not return error for non-existent path, repeat calls OK for idempotency.
if err = os.RemoveAll(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("hostpath: volume %s has been unpublished.", targetPath)
if vol.Ephemeral {
glog.V(4).Infof("deleting volume %s", volumeID)
if err := deleteHostpathVolume(volumeID); err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to delete volume: %s", err))
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeStageVolume is called by Controller Orchestrator to temporarily mount the volume to a staging path.
// Usually this staging path is a global directory on the node. In Kubernetes, after it's mounted to the
// global directory, you mount it into the pod directory (via NodePublishVolume). The reason that mounting
// is a two step operation is because Kubernetes allows you to use a single volume by multiple pods. This
// allowed when the storage system supports it (say NFS) or if a ll pods run on the same node. One thing
// to note is that you also need to format the volume if it's not formatted already. Keep that in mind.
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capability missing in request")
}
|
random_line_split
|
||
nodeserver.go
|
StorageCapacity, mountAccess, ephemeralVolume)
if err != nil && !os.IsExist(err) {
glog.Error("ephemeral mode failed to create volume: ", err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("ephemeral mode: created volume: %s", vol.VolPath)
}
vol, err := getVolumeByID(req.GetVolumeId())
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if req.GetVolumeCapability().GetBlock() != nil {
if vol.VolAccessType != blockAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-block volume as block volume")
}
volPathHandler := volumepathhandler.VolumePathHandler{}
// Get loop device from the volume path
loopDevice, err := volPathHandler.GetLoopDevice(vol.VolPath)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get the loop device: %v", err))
}
mounter := mount.New("")
// Check if the target path exists. Create if not present.
_, err = os.Lstat(targetPath)
if os.IsNotExist(err) {
// if err = mounter.MakeFile(targetPath); err != nil {
if err = makeFile(targetPath); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to create target path: %s: %v", targetPath, err))
}
}
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to check if the target block file exists: %v", err)
}
// Check if the target path is already mounted. Prevent remounting
// notMount, err := mounter.IsNotMountPoint(targetPath)
notMount, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if !os.IsNotExist(err) {
return nil, status.Errorf(codes.Internal, "error checking path %s for mount: %s", targetPath, err)
}
notMount = true
}
if !notMount {
// Its already mounted.
glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
options := []string{"bind"}
if err := mount.New("").Mount(loopDevice, targetPath, "", options); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %v", loopDevice, targetPath, err))
}
} else if req.GetVolumeCapability().GetMount() != nil {
if vol.VolAccessType != mountAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-mount volume as mount volume")
}
mounter := mount.New("")
notMnt, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
} else {
return nil, status.Error(codes.Internal, err.Error())
}
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
fsType := req.GetVolumeCapability().GetMount().GetFsType()
deviceId := ""
if req.GetPublishContext() != nil {
deviceId = req.GetPublishContext()[deviceID]
}
readOnly := req.GetReadonly()
volumeId := req.GetVolumeId()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v", targetPath)
glog.V(4).Infof("fstype %v", fsType)
glog.V(4).Infof("device %v", deviceId)
glog.V(4).Infof("readonly %v", readOnly)
glog.V(4).Infof("volumeId %v", volumeId)
glog.V(4).Infof("attributes %v", attrib)
glog.V(4).Infof("mountflags %v", mountFlags)
options := []string{"bind"}
if readOnly {
options = append(options, "ro")
}
// mounter := mount.New("")
path := getVolumePath(volumeId)
if err := mounter.Mount(path, targetPath, "", options); err != nil {
var errList strings.Builder
errList.WriteString(err.Error())
if vol.Ephemeral {
if rmErr := os.RemoveAll(path); rmErr != nil && !os.IsNotExist(rmErr) {
errList.WriteString(fmt.Sprintf(" :%s", rmErr.Error()))
}
}
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %s", path, targetPath, errList.String()))
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume is the reverse of NodePublishVolume. It unmounts the volume from the target path.
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
vol, err := getVolumeByID(volumeID)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
// Unmount only if the target path is really a mount point.
if notMnt, err := mount.IsNotMountPoint(mount.New(""), targetPath); err != nil {
if !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
} else if !notMnt {
// Unmounting the image or filesystem.
err = mount.New("").Unmount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Delete the mount point.
// Does not return error for non-existent path, repeat calls OK for idempotency.
if err = os.RemoveAll(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("hostpath: volume %s has been unpublished.", targetPath)
if vol.Ephemeral {
glog.V(4).Infof("deleting volume %s", volumeID)
if err := deleteHostpathVolume(volumeID); err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to delete volume: %s", err))
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeStageVolume is called by Controller Orchestrator to temporarily mount the volume to a staging path.
// Usually this staging path is a global directory on the node. In Kubernetes, after it's mounted to the
// global directory, you mount it into the pod directory (via NodePublishVolume). The reason that mounting
// is a two step operation is because Kubernetes allows you to use a single volume by multiple pods. This
// allowed when the storage system supports it (say NFS) or if a ll pods run on the same node. One thing
// to note is that you also need to format the volume if it's not formatted already. Keep that in mind.
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capability missing in request")
}
return &csi.NodeStageVolumeResponse{}, nil
}
// NodeUnstageVolume is the reverse of NodeStageVolume. Called by Controller Orchestrator to unmount the
// volume from the staging path.
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error)
|
{
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missin gin request")
}
return &csi.NodeUnstageVolumeResponse{}, nil
}
|
identifier_body
|
|
nodeserver.go
|
nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" ||
req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "" && ns.ephemeral // Kubernetes 1.15 doesn't have csi.storage.k8s.io/ephemeral
if req.GetVolumeCapability().GetBlock() != nil &&
req.GetVolumeCapability().GetMount() != nil {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
// if ephemal is specified, create volume here to avoid errors
if ephemeralVolume {
volID := req.GetVolumeId()
volName := fmt.Sprintf("ephemeral=%s", volID)
vol, err := createHostpathVolume(req.GetVolumeId(), volName, maxStorageCapacity, mountAccess, ephemeralVolume)
if err != nil && !os.IsExist(err) {
glog.Error("ephemeral mode failed to create volume: ", err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("ephemeral mode: created volume: %s", vol.VolPath)
}
vol, err := getVolumeByID(req.GetVolumeId())
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if req.GetVolumeCapability().GetBlock() != nil {
if vol.VolAccessType != blockAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-block volume as block volume")
}
volPathHandler := volumepathhandler.VolumePathHandler{}
// Get loop device from the volume path
loopDevice, err := volPathHandler.GetLoopDevice(vol.VolPath)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get the loop device: %v", err))
}
mounter := mount.New("")
// Check if the target path exists. Create if not present.
_, err = os.Lstat(targetPath)
if os.IsNotExist(err) {
// if err = mounter.MakeFile(targetPath); err != nil {
if err = makeFile(targetPath); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to create target path: %s: %v", targetPath, err))
}
}
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to check if the target block file exists: %v", err)
}
// Check if the target path is already mounted. Prevent remounting
// notMount, err := mounter.IsNotMountPoint(targetPath)
notMount, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if !os.IsNotExist(err) {
return nil, status.Errorf(codes.Internal, "error checking path %s for mount: %s", targetPath, err)
}
notMount = true
}
if !notMount {
// Its already mounted.
glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
options := []string{"bind"}
if err := mount.New("").Mount(loopDevice, targetPath, "", options); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %v", loopDevice, targetPath, err))
}
} else if req.GetVolumeCapability().GetMount() != nil {
if vol.VolAccessType != mountAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-mount volume as mount volume")
}
mounter := mount.New("")
notMnt, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
} else {
return nil, status.Error(codes.Internal, err.Error())
}
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
fsType := req.GetVolumeCapability().GetMount().GetFsType()
deviceId := ""
if req.GetPublishContext() != nil {
deviceId = req.GetPublishContext()[deviceID]
}
readOnly := req.GetReadonly()
volumeId := req.GetVolumeId()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v", targetPath)
glog.V(4).Infof("fstype %v", fsType)
glog.V(4).Infof("device %v", deviceId)
glog.V(4).Infof("readonly %v", readOnly)
glog.V(4).Infof("volumeId %v", volumeId)
glog.V(4).Infof("attributes %v", attrib)
glog.V(4).Infof("mountflags %v", mountFlags)
options := []string{"bind"}
if readOnly {
options = append(options, "ro")
}
// mounter := mount.New("")
path := getVolumePath(volumeId)
if err := mounter.Mount(path, targetPath, "", options); err != nil {
var errList strings.Builder
errList.WriteString(err.Error())
if vol.Ephemeral {
if rmErr := os.RemoveAll(path); rmErr != nil && !os.IsNotExist(rmErr) {
errList.WriteString(fmt.Sprintf(" :%s", rmErr.Error()))
}
}
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %s", path, targetPath, errList.String()))
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume is the reverse of NodePublishVolume. It unmounts the volume from the target path.
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
vol, err := getVolumeByID(volumeID)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
// Unmount only if the target path is really a mount point.
if notMnt, err := mount.IsNotMountPoint(mount.New(""), targetPath); err != nil {
if !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
} else if !notMnt {
// Unmounting the image or filesystem.
err = mount.New("").Unmount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Delete the mount point.
// Does not return error for non-existent path, repeat calls OK for idempotency.
if err = os.RemoveAll(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("hostpath: volume %s has been unpublished.", targetPath)
if vol.Ephemeral {
glog.V(4).Infof("deleting volume %s", volumeID)
if err := deleteHostpathVolume(volumeID); err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to delete volume: %s", err))
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeStageVolume is called by Controller Orchestrator to temporarily mount the volume to a staging path.
// Usually this staging path is a global directory on the node. In Kubernetes, after it's mounted to the
// global directory, you mount it into the pod directory (via NodePublishVolume). The reason that mounting
// is a two step operation is because Kubernetes allows you to use a single volume by multiple pods. This
// allowed when the storage system supports it (say NFS) or if a ll pods run on the same node. One thing
// to note is that you also need to format the volume if it's not formatted already. Keep that in mind.
func (ns *nodeServer)
|
(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath
|
NodeStageVolume
|
identifier_name
|
nodeserver.go
|
nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" ||
req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "" && ns.ephemeral // Kubernetes 1.15 doesn't have csi.storage.k8s.io/ephemeral
if req.GetVolumeCapability().GetBlock() != nil &&
req.GetVolumeCapability().GetMount() != nil {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
// if ephemal is specified, create volume here to avoid errors
if ephemeralVolume {
volID := req.GetVolumeId()
volName := fmt.Sprintf("ephemeral=%s", volID)
vol, err := createHostpathVolume(req.GetVolumeId(), volName, maxStorageCapacity, mountAccess, ephemeralVolume)
if err != nil && !os.IsExist(err) {
glog.Error("ephemeral mode failed to create volume: ", err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("ephemeral mode: created volume: %s", vol.VolPath)
}
vol, err := getVolumeByID(req.GetVolumeId())
if err != nil
|
if req.GetVolumeCapability().GetBlock() != nil {
if vol.VolAccessType != blockAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-block volume as block volume")
}
volPathHandler := volumepathhandler.VolumePathHandler{}
// Get loop device from the volume path
loopDevice, err := volPathHandler.GetLoopDevice(vol.VolPath)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get the loop device: %v", err))
}
mounter := mount.New("")
// Check if the target path exists. Create if not present.
_, err = os.Lstat(targetPath)
if os.IsNotExist(err) {
// if err = mounter.MakeFile(targetPath); err != nil {
if err = makeFile(targetPath); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to create target path: %s: %v", targetPath, err))
}
}
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to check if the target block file exists: %v", err)
}
// Check if the target path is already mounted. Prevent remounting
// notMount, err := mounter.IsNotMountPoint(targetPath)
notMount, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if !os.IsNotExist(err) {
return nil, status.Errorf(codes.Internal, "error checking path %s for mount: %s", targetPath, err)
}
notMount = true
}
if !notMount {
// Its already mounted.
glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
options := []string{"bind"}
if err := mount.New("").Mount(loopDevice, targetPath, "", options); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %v", loopDevice, targetPath, err))
}
} else if req.GetVolumeCapability().GetMount() != nil {
if vol.VolAccessType != mountAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-mount volume as mount volume")
}
mounter := mount.New("")
notMnt, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
} else {
return nil, status.Error(codes.Internal, err.Error())
}
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
fsType := req.GetVolumeCapability().GetMount().GetFsType()
deviceId := ""
if req.GetPublishContext() != nil {
deviceId = req.GetPublishContext()[deviceID]
}
readOnly := req.GetReadonly()
volumeId := req.GetVolumeId()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v", targetPath)
glog.V(4).Infof("fstype %v", fsType)
glog.V(4).Infof("device %v", deviceId)
glog.V(4).Infof("readonly %v", readOnly)
glog.V(4).Infof("volumeId %v", volumeId)
glog.V(4).Infof("attributes %v", attrib)
glog.V(4).Infof("mountflags %v", mountFlags)
options := []string{"bind"}
if readOnly {
options = append(options, "ro")
}
// mounter := mount.New("")
path := getVolumePath(volumeId)
if err := mounter.Mount(path, targetPath, "", options); err != nil {
var errList strings.Builder
errList.WriteString(err.Error())
if vol.Ephemeral {
if rmErr := os.RemoveAll(path); rmErr != nil && !os.IsNotExist(rmErr) {
errList.WriteString(fmt.Sprintf(" :%s", rmErr.Error()))
}
}
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %s", path, targetPath, errList.String()))
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume is the reverse of NodePublishVolume. It unmounts the volume from the target path.
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
vol, err := getVolumeByID(volumeID)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
// Unmount only if the target path is really a mount point.
if notMnt, err := mount.IsNotMountPoint(mount.New(""), targetPath); err != nil {
if !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
} else if !notMnt {
// Unmounting the image or filesystem.
err = mount.New("").Unmount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Delete the mount point.
// Does not return error for non-existent path, repeat calls OK for idempotency.
if err = os.RemoveAll(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("hostpath: volume %s has been unpublished.", targetPath)
if vol.Ephemeral {
glog.V(4).Infof("deleting volume %s", volumeID)
if err := deleteHostpathVolume(volumeID); err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to delete volume: %s", err))
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeStageVolume is called by Controller Orchestrator to temporarily mount the volume to a staging path.
// Usually this staging path is a global directory on the node. In Kubernetes, after it's mounted to the
// global directory, you mount it into the pod directory (via NodePublishVolume). The reason that mounting
// is a two step operation is because Kubernetes allows you to use a single volume by multiple pods. This
// allowed when the storage system supports it (say NFS) or if a ll pods run on the same node. One thing
// to note is that you also need to format the volume if it's not formatted already. Keep that in mind.
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTarget
|
{
return nil, status.Error(codes.NotFound, err.Error())
}
|
conditional_block
|
lib.rs
|
`] trait. During the
//! dispatching process (in [`Display::dispatch_clients()`]), all requests sent by clients are read from
//! their respective process and delivered to your processing logic, by invoking methods on the various
//! [`Dispatch`] implementations of your `State` struct. In this paradigm, your `State` needs to implement
//! `Dispatch<O, _>` for every Wayland object `O` it needs to process events for.
//!
//! However, implementing all those traits on your own is a lot of (often uninteresting) work. To make this
//! easier a composition mechanism is provided using the [`delegate_dispatch!`] macro. This way, another
//! library (such as Smithay) can provide generic [`Dispatch`] implementations that you can reuse on your
//! own app by delegating those objects to that provided implementation. See the documentation of those
//! traits and macro for details.
//!
//! ## Globals
//!
//! The entry point of the protocol for clients goes through the protocol globals. Each global represents a
//! capability of your compositor, a peripheral it has access to, or a protocol extension it supports.
//! Globals are created by you using [`DisplayHandle::create_global()`], and require your `State` to
//! implement the [`GlobalDispatch`] trait for the interface associated with that global.
//!
//! ## Logging
//!
//! This crate can generate some runtime error message (notably when a protocol error occurs). By default
//! those messages are printed to stderr. If you activate the `log` cargo feature, they will instead be
//! piped through the `log` crate.
//!
//! ## Advanced use
//!
//! ### Bypassing [`Dispatch`]
//!
//! It may be that for some of your objects, handling them via the [`Dispatch`] trait is impractical. In
//! those contexts, this crate also provides some escape-hatches to directly interface with the low-level
//! APIs from `wayland-backend`, allowing you to register callbacks for those objects by directly providing
//! implementations of the backend [`ObjectData`](crate::backend::ObjectData) trait.
//! See [`Client::create_resource_from_objdata()`] and [`DataInit::custom_init()`].
//!
//! ### Interaction with FFI
//!
//! It can happen that you'll need to interact with Wayland states accross FFI, such as for example when
//! interfacing with the graphics stack for enabling hardware acceleration for clients.
//!
//! In this case, you'll need to do it in two steps, by explicitly working with `wayland-backend`, adding
//! it to your dependencies and enabling its `server_system` feature.
//!
//! Then, you'll generally need:
//!
//! - The `*mut wl_display` pointer, that you can retrieve by first retrieving the
//! [`Backend`](crate::backend::Backend) using [`Display::backend()`], and then invoke
//! `Backend::display_ptr().
//! - The `*mut wl_resource` pointers for the objects you need to share, by first getting the
//! [`ObjectId`](crate::backend::ObjectId) using the [`Resource::id()`] method, and then
//! the `ObjectId::as_ptr()` method.
//!
//! If you need to receive pointers from FFI, you can make [`ObjectId`]s from the `*mut wl_resource` pointers
//! using `ObjectId::from_ptr()`, and then make the resources using [`Resource::from_id`].
#![forbid(improper_ctypes, unsafe_op_in_unsafe_fn)]
// Doc feature labels can be tested locally by running RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc -p <crate>
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use std::{
fmt,
hash::{Hash, Hasher},
};
use wayland_backend::{
io_lifetimes::OwnedFd,
protocol::{Interface, Message},
server::{InvalidId, ObjectId, WeakHandle},
};
mod client;
mod dispatch;
mod display;
mod global;
mod socket;
pub use client::Client;
pub use dispatch::{DataInit, Dispatch, New, ResourceData};
pub use display::{Display, DisplayHandle};
pub use global::GlobalDispatch;
pub use socket::{BindError, ListeningSocket};
/// Backend reexports
pub mod backend {
pub use wayland_backend::io_lifetimes;
pub use wayland_backend::protocol;
pub use wayland_backend::server::{
Backend, ClientData, ClientId, Credentials, DisconnectReason, GlobalHandler, GlobalId,
Handle, InitError, InvalidId, ObjectData, ObjectId, WeakHandle,
};
pub use wayland_backend::smallvec;
}
pub use wayland_backend::protocol::WEnum;
/// Generated protocol definitions
///
/// This module is automatically generated from the `wayland.xml` protocol specification, and contains the
/// interface definitions for the core Wayland protocol.
#[allow(missing_docs)]
pub mod protocol {
use self::__interfaces::*;
use crate as wayland_server;
pub mod __interfaces {
wayland_scanner::generate_interfaces!("wayland.xml");
}
wayland_scanner::generate_server_code!("wayland.xml");
}
// internal imports for dispatching logging depending on the `log` feature
#[cfg(feature = "log")]
#[allow(unused_imports)]
use log::{debug as log_debug, error as log_error, info as log_info, warn as log_warn};
#[cfg(not(feature = "log"))]
#[allow(unused_imports)]
use std::{
eprintln as log_error, eprintln as log_warn, eprintln as log_info, eprintln as log_debug,
};
/// Trait representing a Wayland interface
pub trait Resource: Clone + std::fmt::Debug + Sized {
/// The event enum for this interface
type Event;
/// The request enum for this interface
type Request;
/// The interface description
fn interface() -> &'static Interface;
/// The ID of this object
fn id(&self) -> ObjectId;
/// The client owning this object
///
/// Returns [`None`] if the object is no longer alive.
fn client(&self) -> Option<Client> {
let handle = self.handle().upgrade()?;
let client_id = handle.get_client(self.id()).ok()?;
let dh = DisplayHandle::from(handle);
Client::from_id(&dh, client_id).ok()
}
/// The version of this object
fn version(&self) -> u32;
/// Checks if the Wayland object associated with this proxy is still alive
fn is_alive(&self) -> bool {
if let Some(handle) = self.handle().upgrade() {
handle.object_info(self.id()).is_ok()
} else {
false
}
}
/// Access the user-data associated with this object
fn data<U: 'static>(&self) -> Option<&U>;
/// Access the raw data associated with this object.
///
/// It is given to you as a `dyn Any`, and you are responsible for downcasting it.
///
/// For objects created using the scanner-generated methods, this will be an instance of the
/// [`ResourceData`] type.
fn object_data(&self) -> Option<&std::sync::Arc<dyn std::any::Any + Send + Sync>>;
/// Access the backend handle associated with this object
fn handle(&self) -> &backend::WeakHandle;
/// Create an object resource from its ID
///
/// Returns an error this the provided object ID does not correspond to the `Self` interface.
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn from_id(dh: &DisplayHandle, id: ObjectId) -> Result<Self, InvalidId>;
/// Send an event to this object
fn send_event(&self, evt: Self::Event) -> Result<(), InvalidId>;
/// Trigger a protocol error on this object
///
/// The `code` is intended to be from the `Error` enum declared alongside that object interface.
///
/// A protocol error is fatal to the Wayland connection, and the client will be disconnected.
#[inline]
fn post_error(&self, code: impl Into<u32>, error: impl Into<String>) {
if let Some(dh) = self.handle().upgrade().map(DisplayHandle::from) {
dh.post_error(self, code.into(), error.into());
}
}
/// Parse a event for this object
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn parse_request(
dh: &DisplayHandle,
msg: Message<ObjectId, OwnedFd>,
|
/// wayland-scanner.
fn write_event(
&self,
dh: &DisplayHandle,
req: Self::Event,
) -> Result<Message<ObjectId, std::os::unix::io::RawFd>, InvalidId>;
/// Creates a weak handle to this object
///
/// This weak handle will not keep the user-data associated with the object alive,
/// and can be converted back to a full resource using [`Weak::upgrade()`].
///
/// This can be of use if you need to store resources in the used data of other objects and want
/// to be sure to avoid reference cycles that would cause memory leaks.
fn downgrade(&self) -> Weak<Self> {
Weak { handle: self
|
) -> Result<(Self, Self::Request), DispatchError>;
/// Serialize an event for this object
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
|
random_line_split
|
lib.rs
|
you using [`DisplayHandle::create_global()`], and require your `State` to
//! implement the [`GlobalDispatch`] trait for the interface associated with that global.
//!
//! ## Logging
//!
//! This crate can generate some runtime error message (notably when a protocol error occurs). By default
//! those messages are printed to stderr. If you activate the `log` cargo feature, they will instead be
//! piped through the `log` crate.
//!
//! ## Advanced use
//!
//! ### Bypassing [`Dispatch`]
//!
//! It may be that for some of your objects, handling them via the [`Dispatch`] trait is impractical. In
//! those contexts, this crate also provides some escape-hatches to directly interface with the low-level
//! APIs from `wayland-backend`, allowing you to register callbacks for those objects by directly providing
//! implementations of the backend [`ObjectData`](crate::backend::ObjectData) trait.
//! See [`Client::create_resource_from_objdata()`] and [`DataInit::custom_init()`].
//!
//! ### Interaction with FFI
//!
//! It can happen that you'll need to interact with Wayland states accross FFI, such as for example when
//! interfacing with the graphics stack for enabling hardware acceleration for clients.
//!
//! In this case, you'll need to do it in two steps, by explicitly working with `wayland-backend`, adding
//! it to your dependencies and enabling its `server_system` feature.
//!
//! Then, you'll generally need:
//!
//! - The `*mut wl_display` pointer, that you can retrieve by first retrieving the
//! [`Backend`](crate::backend::Backend) using [`Display::backend()`], and then invoke
//! `Backend::display_ptr().
//! - The `*mut wl_resource` pointers for the objects you need to share, by first getting the
//! [`ObjectId`](crate::backend::ObjectId) using the [`Resource::id()`] method, and then
//! the `ObjectId::as_ptr()` method.
//!
//! If you need to receive pointers from FFI, you can make [`ObjectId`]s from the `*mut wl_resource` pointers
//! using `ObjectId::from_ptr()`, and then make the resources using [`Resource::from_id`].
#![forbid(improper_ctypes, unsafe_op_in_unsafe_fn)]
// Doc feature labels can be tested locally by running RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc -p <crate>
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use std::{
fmt,
hash::{Hash, Hasher},
};
use wayland_backend::{
io_lifetimes::OwnedFd,
protocol::{Interface, Message},
server::{InvalidId, ObjectId, WeakHandle},
};
mod client;
mod dispatch;
mod display;
mod global;
mod socket;
pub use client::Client;
pub use dispatch::{DataInit, Dispatch, New, ResourceData};
pub use display::{Display, DisplayHandle};
pub use global::GlobalDispatch;
pub use socket::{BindError, ListeningSocket};
/// Backend reexports
pub mod backend {
pub use wayland_backend::io_lifetimes;
pub use wayland_backend::protocol;
pub use wayland_backend::server::{
Backend, ClientData, ClientId, Credentials, DisconnectReason, GlobalHandler, GlobalId,
Handle, InitError, InvalidId, ObjectData, ObjectId, WeakHandle,
};
pub use wayland_backend::smallvec;
}
pub use wayland_backend::protocol::WEnum;
/// Generated protocol definitions
///
/// This module is automatically generated from the `wayland.xml` protocol specification, and contains the
/// interface definitions for the core Wayland protocol.
#[allow(missing_docs)]
pub mod protocol {
use self::__interfaces::*;
use crate as wayland_server;
pub mod __interfaces {
wayland_scanner::generate_interfaces!("wayland.xml");
}
wayland_scanner::generate_server_code!("wayland.xml");
}
// internal imports for dispatching logging depending on the `log` feature
#[cfg(feature = "log")]
#[allow(unused_imports)]
use log::{debug as log_debug, error as log_error, info as log_info, warn as log_warn};
#[cfg(not(feature = "log"))]
#[allow(unused_imports)]
use std::{
eprintln as log_error, eprintln as log_warn, eprintln as log_info, eprintln as log_debug,
};
/// Trait representing a Wayland interface
pub trait Resource: Clone + std::fmt::Debug + Sized {
/// The event enum for this interface
type Event;
/// The request enum for this interface
type Request;
/// The interface description
fn interface() -> &'static Interface;
/// The ID of this object
fn id(&self) -> ObjectId;
/// The client owning this object
///
/// Returns [`None`] if the object is no longer alive.
fn client(&self) -> Option<Client> {
let handle = self.handle().upgrade()?;
let client_id = handle.get_client(self.id()).ok()?;
let dh = DisplayHandle::from(handle);
Client::from_id(&dh, client_id).ok()
}
/// The version of this object
fn version(&self) -> u32;
/// Checks if the Wayland object associated with this proxy is still alive
fn is_alive(&self) -> bool {
if let Some(handle) = self.handle().upgrade() {
handle.object_info(self.id()).is_ok()
} else {
false
}
}
/// Access the user-data associated with this object
fn data<U: 'static>(&self) -> Option<&U>;
/// Access the raw data associated with this object.
///
/// It is given to you as a `dyn Any`, and you are responsible for downcasting it.
///
/// For objects created using the scanner-generated methods, this will be an instance of the
/// [`ResourceData`] type.
fn object_data(&self) -> Option<&std::sync::Arc<dyn std::any::Any + Send + Sync>>;
/// Access the backend handle associated with this object
fn handle(&self) -> &backend::WeakHandle;
/// Create an object resource from its ID
///
/// Returns an error this the provided object ID does not correspond to the `Self` interface.
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn from_id(dh: &DisplayHandle, id: ObjectId) -> Result<Self, InvalidId>;
/// Send an event to this object
fn send_event(&self, evt: Self::Event) -> Result<(), InvalidId>;
/// Trigger a protocol error on this object
///
/// The `code` is intended to be from the `Error` enum declared alongside that object interface.
///
/// A protocol error is fatal to the Wayland connection, and the client will be disconnected.
#[inline]
fn post_error(&self, code: impl Into<u32>, error: impl Into<String>) {
if let Some(dh) = self.handle().upgrade().map(DisplayHandle::from) {
dh.post_error(self, code.into(), error.into());
}
}
/// Parse a event for this object
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn parse_request(
dh: &DisplayHandle,
msg: Message<ObjectId, OwnedFd>,
) -> Result<(Self, Self::Request), DispatchError>;
/// Serialize an event for this object
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn write_event(
&self,
dh: &DisplayHandle,
req: Self::Event,
) -> Result<Message<ObjectId, std::os::unix::io::RawFd>, InvalidId>;
/// Creates a weak handle to this object
///
/// This weak handle will not keep the user-data associated with the object alive,
/// and can be converted back to a full resource using [`Weak::upgrade()`].
///
/// This can be of use if you need to store resources in the used data of other objects and want
/// to be sure to avoid reference cycles that would cause memory leaks.
fn downgrade(&self) -> Weak<Self> {
Weak { handle: self.handle().clone(), id: self.id(), _iface: std::marker::PhantomData }
}
#[doc(hidden)]
fn __set_object_data(
&mut self,
odata: std::sync::Arc<dyn std::any::Any + Send + Sync + 'static>,
);
}
/// An error generated if an illegal request was received from a client
#[derive(Debug)]
pub enum DispatchError {
/// The received message does not match the specification for the object's interface.
BadMessage {
/// The id of the target object
sender_id: ObjectId,
/// The interface of the target object
interface: &'static str,
/// The opcode number
opcode: u16,
},
}
impl std::error::Error for DispatchError {}
impl fmt::Display for DispatchError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DispatchError::BadMessage { sender_id, interface, opcode } =>
|
{
write!(f, "Bad message for object {interface}@{sender_id} on opcode {opcode}",)
}
|
conditional_block
|
|
lib.rs
|
trait. During the
//! dispatching process (in [`Display::dispatch_clients()`]), all requests sent by clients are read from
//! their respective process and delivered to your processing logic, by invoking methods on the various
//! [`Dispatch`] implementations of your `State` struct. In this paradigm, your `State` needs to implement
//! `Dispatch<O, _>` for every Wayland object `O` it needs to process events for.
//!
//! However, implementing all those traits on your own is a lot of (often uninteresting) work. To make this
//! easier a composition mechanism is provided using the [`delegate_dispatch!`] macro. This way, another
//! library (such as Smithay) can provide generic [`Dispatch`] implementations that you can reuse on your
//! own app by delegating those objects to that provided implementation. See the documentation of those
//! traits and macro for details.
//!
//! ## Globals
//!
//! The entry point of the protocol for clients goes through the protocol globals. Each global represents a
//! capability of your compositor, a peripheral it has access to, or a protocol extension it supports.
//! Globals are created by you using [`DisplayHandle::create_global()`], and require your `State` to
//! implement the [`GlobalDispatch`] trait for the interface associated with that global.
//!
//! ## Logging
//!
//! This crate can generate some runtime error message (notably when a protocol error occurs). By default
//! those messages are printed to stderr. If you activate the `log` cargo feature, they will instead be
//! piped through the `log` crate.
//!
//! ## Advanced use
//!
//! ### Bypassing [`Dispatch`]
//!
//! It may be that for some of your objects, handling them via the [`Dispatch`] trait is impractical. In
//! those contexts, this crate also provides some escape-hatches to directly interface with the low-level
//! APIs from `wayland-backend`, allowing you to register callbacks for those objects by directly providing
//! implementations of the backend [`ObjectData`](crate::backend::ObjectData) trait.
//! See [`Client::create_resource_from_objdata()`] and [`DataInit::custom_init()`].
//!
//! ### Interaction with FFI
//!
//! It can happen that you'll need to interact with Wayland states accross FFI, such as for example when
//! interfacing with the graphics stack for enabling hardware acceleration for clients.
//!
//! In this case, you'll need to do it in two steps, by explicitly working with `wayland-backend`, adding
//! it to your dependencies and enabling its `server_system` feature.
//!
//! Then, you'll generally need:
//!
//! - The `*mut wl_display` pointer, that you can retrieve by first retrieving the
//! [`Backend`](crate::backend::Backend) using [`Display::backend()`], and then invoke
//! `Backend::display_ptr().
//! - The `*mut wl_resource` pointers for the objects you need to share, by first getting the
//! [`ObjectId`](crate::backend::ObjectId) using the [`Resource::id()`] method, and then
//! the `ObjectId::as_ptr()` method.
//!
//! If you need to receive pointers from FFI, you can make [`ObjectId`]s from the `*mut wl_resource` pointers
//! using `ObjectId::from_ptr()`, and then make the resources using [`Resource::from_id`].
#![forbid(improper_ctypes, unsafe_op_in_unsafe_fn)]
// Doc feature labels can be tested locally by running RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc -p <crate>
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use std::{
fmt,
hash::{Hash, Hasher},
};
use wayland_backend::{
io_lifetimes::OwnedFd,
protocol::{Interface, Message},
server::{InvalidId, ObjectId, WeakHandle},
};
mod client;
mod dispatch;
mod display;
mod global;
mod socket;
pub use client::Client;
pub use dispatch::{DataInit, Dispatch, New, ResourceData};
pub use display::{Display, DisplayHandle};
pub use global::GlobalDispatch;
pub use socket::{BindError, ListeningSocket};
/// Backend reexports
pub mod backend {
pub use wayland_backend::io_lifetimes;
pub use wayland_backend::protocol;
pub use wayland_backend::server::{
Backend, ClientData, ClientId, Credentials, DisconnectReason, GlobalHandler, GlobalId,
Handle, InitError, InvalidId, ObjectData, ObjectId, WeakHandle,
};
pub use wayland_backend::smallvec;
}
pub use wayland_backend::protocol::WEnum;
/// Generated protocol definitions
///
/// This module is automatically generated from the `wayland.xml` protocol specification, and contains the
/// interface definitions for the core Wayland protocol.
#[allow(missing_docs)]
pub mod protocol {
use self::__interfaces::*;
use crate as wayland_server;
pub mod __interfaces {
wayland_scanner::generate_interfaces!("wayland.xml");
}
wayland_scanner::generate_server_code!("wayland.xml");
}
// internal imports for dispatching logging depending on the `log` feature
#[cfg(feature = "log")]
#[allow(unused_imports)]
use log::{debug as log_debug, error as log_error, info as log_info, warn as log_warn};
#[cfg(not(feature = "log"))]
#[allow(unused_imports)]
use std::{
eprintln as log_error, eprintln as log_warn, eprintln as log_info, eprintln as log_debug,
};
/// Trait representing a Wayland interface
pub trait Resource: Clone + std::fmt::Debug + Sized {
/// The event enum for this interface
type Event;
/// The request enum for this interface
type Request;
/// The interface description
fn interface() -> &'static Interface;
/// The ID of this object
fn id(&self) -> ObjectId;
/// The client owning this object
///
/// Returns [`None`] if the object is no longer alive.
fn client(&self) -> Option<Client> {
let handle = self.handle().upgrade()?;
let client_id = handle.get_client(self.id()).ok()?;
let dh = DisplayHandle::from(handle);
Client::from_id(&dh, client_id).ok()
}
/// The version of this object
fn version(&self) -> u32;
/// Checks if the Wayland object associated with this proxy is still alive
fn
|
(&self) -> bool {
if let Some(handle) = self.handle().upgrade() {
handle.object_info(self.id()).is_ok()
} else {
false
}
}
/// Access the user-data associated with this object
fn data<U: 'static>(&self) -> Option<&U>;
/// Access the raw data associated with this object.
///
/// It is given to you as a `dyn Any`, and you are responsible for downcasting it.
///
/// For objects created using the scanner-generated methods, this will be an instance of the
/// [`ResourceData`] type.
fn object_data(&self) -> Option<&std::sync::Arc<dyn std::any::Any + Send + Sync>>;
/// Access the backend handle associated with this object
fn handle(&self) -> &backend::WeakHandle;
/// Create an object resource from its ID
///
/// Returns an error this the provided object ID does not correspond to the `Self` interface.
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn from_id(dh: &DisplayHandle, id: ObjectId) -> Result<Self, InvalidId>;
/// Send an event to this object
fn send_event(&self, evt: Self::Event) -> Result<(), InvalidId>;
/// Trigger a protocol error on this object
///
/// The `code` is intended to be from the `Error` enum declared alongside that object interface.
///
/// A protocol error is fatal to the Wayland connection, and the client will be disconnected.
#[inline]
fn post_error(&self, code: impl Into<u32>, error: impl Into<String>) {
if let Some(dh) = self.handle().upgrade().map(DisplayHandle::from) {
dh.post_error(self, code.into(), error.into());
}
}
/// Parse a event for this object
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn parse_request(
dh: &DisplayHandle,
msg: Message<ObjectId, OwnedFd>,
) -> Result<(Self, Self::Request), DispatchError>;
/// Serialize an event for this object
///
/// **Note:** This method is mostly meant as an implementation detail to be used by code generated by
/// wayland-scanner.
fn write_event(
&self,
dh: &DisplayHandle,
req: Self::Event,
) -> Result<Message<ObjectId, std::os::unix::io::RawFd>, InvalidId>;
/// Creates a weak handle to this object
///
/// This weak handle will not keep the user-data associated with the object alive,
/// and can be converted back to a full resource using [`Weak::upgrade()`].
///
/// This can be of use if you need to store resources in the used data of other objects and want
/// to be sure to avoid reference cycles that would cause memory leaks.
fn downgrade(&self) -> Weak<Self> {
Weak { handle:
|
is_alive
|
identifier_name
|
player_vlc.rs
|
{}", url);
let mut resp = self
.0
.get(url.as_ref())
.basic_auth("", Some(VLC_HTTP_PASSWORD))
.send()?;
if !resp.status().is_success() {
return Err(VlcError::BadResponse(format_err!(
"Bad HTTP status {} : {}",
resp.status(),
resp.text().unwrap_or_else(|_| "N/A".to_string())
)));
}
Ok(resp.text()?)
}
}
pub struct VlcPlayer<C: HttpClient = ReqwestClient> {
vlc_config: VlcConfig,
config: Option<SlideshowConfig>,
process: Option<Child>,
client: C,
pausing: bool,
sleeping: bool,
muting: bool,
}
impl VlcPlayer {
pub fn new(config: VlcConfig) -> Self {
Self::new_with_client(
config,
ReqwestClient(
reqwest::Client::builder()
.timeout(Some(Duration::from_secs(VLC_REQUEST_TIMEOUT)))
.build()
.expect("reqwest client"),
),
)
}
}
impl<C: HttpClient> VlcPlayer<C> {
fn new_with_client(config: VlcConfig, client: C) -> Self {
Self {
vlc_config: config,
config: None,
process: None,
client,
pausing: false,
sleeping: false,
muting: false,
}
}
fn config(&self) -> std::result::Result<&SlideshowConfig, VlcError> {
self.config.as_ref().ok_or(VlcError::NotStarted)
}
/// Convert `audio_volume` set in config into the value
/// range used in VLC player
fn audio_volume(&self) -> std::result::Result<u32, VlcError> {
Ok((VLC_VOLUME_MAX as f32 * self.config()?.audio_volume).round() as u32)
}
fn http_port(&self) -> u32 {
self.vlc_config.http_port.unwrap_or(VLC_DEFAULT_HTTP_PORT)
}
fn send_get(
&self,
path: &str,
params: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
self.client.send_get(self.http_port(), path, params)
}
fn send_status_cmd(
&self,
cmd: &str,
args: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
let mut params = Vec::with_capacity(args.len() + 1);
if !cmd.is_empty() {
params.push(("command", cmd));
params.extend(args);
}
self.send_get("requests/status.xml", ¶ms)
}
fn get_playlist(&self) -> std::result::Result<Element, VlcError> {
let xml = self.send_get("requests/playlist.xml", &[])?;
debug!("Playlist XML from VLC: {}", xml);
let element = Element::from_reader(xml.into_bytes().as_slice())?;
Ok(element)
}
fn wait_on_http_interface(&self) -> std::result::Result<(), VlcError> {
let start_time = Instant::now();
while Instant::now() - start_time < VLC_STARTUP_TIMEOUT {
if self.is_ok() {
return Ok(());
}
std::thread::sleep(VLC_STARTUP_CHECK_BACKOFF);
}
Err(VlcError::StartTimeout)
}
fn set_volume(&self, volume: u32) -> std::result::Result<(), VlcError> {
info!("Setting audio volume to {}", volume);
self.send_status_cmd("volume", &[("val", &volume.to_string())])?;
Ok(())
}
fn playlist_ids(element: Element) -> std::result::Result<Vec<u64>, VlcError> {
for node in element.find_all("node") {
if node
.get_attr("name")
.map(|name| name == "Playlist")
.unwrap_or(false)
{
let mut ids = Vec::new();
for leaf in node.find_all("leaf") {
let id_s = leaf.get_attr("id").ok_or_else(|| {
VlcError::BadResponse(format_err!("missing id attribute"))
})?;
let id: u64 = id_s.parse().map_err(|_| {
VlcError::BadResponse(format_err!("cannot parse id: {}", id_s))
})?;
ids.push(id);
}
return Ok(ids);
}
}
Err(VlcError::BadResponse(format_err!(
"no playlist found in XML"
)))
}
fn maybe_restore_pause(&self) -> std::result::Result<(), VlcError> {
// Moving resets the pausing state
if self.locked() {
// Pausing before play starts causes blackscreen
std::thread::sleep(Duration::from_secs(1));
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_pause(&self) -> std::result::Result<(), VlcError> {
if !self.pausing && !self.sleeping {
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_resume(&mut self, resume: bool) -> std::result::Result<(), VlcError> {
if (self.pausing && resume) || (self.sleeping && !self.pausing) {
self.send_status_cmd("pl_play", &[])?;
self.pausing = false;
self.sleeping = false;
}
Ok(())
}
}
impl<C: HttpClient> Player for VlcPlayer<C> {
fn start(&mut self, config: SlideshowConfig) -> Result<()> {
let vlc_bin = self
.vlc_config
.vlc_bin
.as_ref()
.map(|s| s.as_ref())
.unwrap_or(VLC_DEFAULT_BIN);
let mut cmd = Command::new(vlc_bin);
cmd.arg("--loop")
.arg("--no-video-title-show")
// Don't show popup for asking whether to fetch media metadata through network
.arg("--no-qt-privacy-ask")
.arg("--no-qt-video-autoresize")
// https://wiki.videolan.org/index.php/VLC_command-line_help
.args(&[
"--image-duration",
&config.show_duration.as_secs().to_string(),
])
.args(&["--extraintf", "http"])
.args(&["--http-password", VLC_HTTP_PASSWORD])
.args(&["--http-host", VLC_HTTP_HOST])
.args(&["--http-port", &self.http_port().to_string()]);
if config.fullscreen {
cmd.arg("--fullscreen");
}
self.process = Some(cmd.spawn()?);
self.wait_on_http_interface()?;
self.config = Some(config);
self.set_volume(self.audio_volume()?)?;
Ok(())
}
fn play_next(&mut self) -> Result<()> {
self.send_status_cmd("pl_next", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn play_back(&mut self) -> Result<()> {
self.send_status_cmd("pl_previous", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn sleep(&mut self) -> Result<()> {
self.maybe_pause()?;
self.sleeping = true;
Ok(())
}
fn wakeup(&mut self) -> Result<()> {
self.maybe_resume(false)?;
Ok(())
}
fn pause(&mut self) -> Result<()> {
self.maybe_pause()?;
self.pausing = true;
Ok(())
}
fn resume(&mut self) -> Result<()> {
self.maybe_resume(true)?;
Ok(())
}
fn mute(&mut self) -> Result<()> {
if !self.muting {
self.set_volume(0)?;
}
self.muting = true;
Ok(())
}
fn unmute(&mut self) -> Result<()> {
if self.muting {
self.set_volume(self.audio_volume()?)?;
}
self.muting = false;
Ok(())
}
fn update_playlist(&mut self, playlist: Vec<PathBuf>) -> Result<()> {
debug!("Start updating playlist");
// 1. get current playlist
let old_ids = Self::playlist_ids(self.get_playlist()?)?;
// 2. enqueue all new items
for path in playlist {
debug!("Adding new item to playlist: {}", path.display());
self.send_status_cmd("in_enqueue", &[("input", path.to_str().unwrap())])?;
}
// 3. move to the head of new items
let cur_ids = Self::playlist_ids(self.get_playlist()?)?;
let head_id = cur_ids[old_ids.len()];
debug!("Jumping to playlist ID: {}", head_id);
self.send_status_cmd("pl_play", &[("id", &head_id.to_string())])?;
std::thread::sleep(Duration::from_secs(1));
// 4. Remove old items from playlist (assuming current media won't come up so soon)
for id in old_ids {
debug!("Removing old item from playlist: {}", id);
self.send_status_cmd("pl_delete", &[("id", &id.to_string())])?;
}
debug!("Update playlist complete");
Ok(())
}
fn locked(&self) -> bool {
self.pausing || self.sleeping
}
fn
|
is_ok
|
identifier_name
|
|
player_vlc.rs
|
::BadResponse(format_err!(
"Bad HTTP status {} : {}",
resp.status(),
resp.text().unwrap_or_else(|_| "N/A".to_string())
)));
}
Ok(resp.text()?)
}
}
pub struct VlcPlayer<C: HttpClient = ReqwestClient> {
vlc_config: VlcConfig,
config: Option<SlideshowConfig>,
process: Option<Child>,
client: C,
pausing: bool,
sleeping: bool,
muting: bool,
}
impl VlcPlayer {
pub fn new(config: VlcConfig) -> Self {
Self::new_with_client(
config,
ReqwestClient(
reqwest::Client::builder()
.timeout(Some(Duration::from_secs(VLC_REQUEST_TIMEOUT)))
.build()
.expect("reqwest client"),
),
)
}
}
impl<C: HttpClient> VlcPlayer<C> {
fn new_with_client(config: VlcConfig, client: C) -> Self {
Self {
vlc_config: config,
config: None,
process: None,
client,
pausing: false,
sleeping: false,
muting: false,
}
}
fn config(&self) -> std::result::Result<&SlideshowConfig, VlcError> {
self.config.as_ref().ok_or(VlcError::NotStarted)
}
/// Convert `audio_volume` set in config into the value
/// range used in VLC player
fn audio_volume(&self) -> std::result::Result<u32, VlcError> {
Ok((VLC_VOLUME_MAX as f32 * self.config()?.audio_volume).round() as u32)
}
fn http_port(&self) -> u32 {
self.vlc_config.http_port.unwrap_or(VLC_DEFAULT_HTTP_PORT)
}
fn send_get(
&self,
path: &str,
params: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
self.client.send_get(self.http_port(), path, params)
}
fn send_status_cmd(
&self,
cmd: &str,
args: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
let mut params = Vec::with_capacity(args.len() + 1);
if !cmd.is_empty() {
params.push(("command", cmd));
params.extend(args);
}
self.send_get("requests/status.xml", ¶ms)
}
fn get_playlist(&self) -> std::result::Result<Element, VlcError> {
let xml = self.send_get("requests/playlist.xml", &[])?;
debug!("Playlist XML from VLC: {}", xml);
let element = Element::from_reader(xml.into_bytes().as_slice())?;
Ok(element)
}
fn wait_on_http_interface(&self) -> std::result::Result<(), VlcError> {
let start_time = Instant::now();
while Instant::now() - start_time < VLC_STARTUP_TIMEOUT {
if self.is_ok() {
return Ok(());
}
std::thread::sleep(VLC_STARTUP_CHECK_BACKOFF);
}
Err(VlcError::StartTimeout)
}
fn set_volume(&self, volume: u32) -> std::result::Result<(), VlcError> {
info!("Setting audio volume to {}", volume);
self.send_status_cmd("volume", &[("val", &volume.to_string())])?;
Ok(())
}
fn playlist_ids(element: Element) -> std::result::Result<Vec<u64>, VlcError> {
for node in element.find_all("node") {
if node
.get_attr("name")
.map(|name| name == "Playlist")
.unwrap_or(false)
{
let mut ids = Vec::new();
for leaf in node.find_all("leaf") {
let id_s = leaf.get_attr("id").ok_or_else(|| {
VlcError::BadResponse(format_err!("missing id attribute"))
})?;
let id: u64 = id_s.parse().map_err(|_| {
VlcError::BadResponse(format_err!("cannot parse id: {}", id_s))
})?;
ids.push(id);
}
return Ok(ids);
}
}
Err(VlcError::BadResponse(format_err!(
"no playlist found in XML"
)))
}
fn maybe_restore_pause(&self) -> std::result::Result<(), VlcError> {
// Moving resets the pausing state
if self.locked() {
// Pausing before play starts causes blackscreen
std::thread::sleep(Duration::from_secs(1));
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_pause(&self) -> std::result::Result<(), VlcError> {
if !self.pausing && !self.sleeping {
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_resume(&mut self, resume: bool) -> std::result::Result<(), VlcError> {
if (self.pausing && resume) || (self.sleeping && !self.pausing) {
self.send_status_cmd("pl_play", &[])?;
self.pausing = false;
self.sleeping = false;
}
Ok(())
}
}
impl<C: HttpClient> Player for VlcPlayer<C> {
fn start(&mut self, config: SlideshowConfig) -> Result<()> {
let vlc_bin = self
.vlc_config
.vlc_bin
.as_ref()
.map(|s| s.as_ref())
.unwrap_or(VLC_DEFAULT_BIN);
let mut cmd = Command::new(vlc_bin);
cmd.arg("--loop")
.arg("--no-video-title-show")
// Don't show popup for asking whether to fetch media metadata through network
.arg("--no-qt-privacy-ask")
.arg("--no-qt-video-autoresize")
// https://wiki.videolan.org/index.php/VLC_command-line_help
.args(&[
"--image-duration",
&config.show_duration.as_secs().to_string(),
])
.args(&["--extraintf", "http"])
.args(&["--http-password", VLC_HTTP_PASSWORD])
.args(&["--http-host", VLC_HTTP_HOST])
.args(&["--http-port", &self.http_port().to_string()]);
if config.fullscreen {
cmd.arg("--fullscreen");
}
self.process = Some(cmd.spawn()?);
self.wait_on_http_interface()?;
self.config = Some(config);
self.set_volume(self.audio_volume()?)?;
Ok(())
}
fn play_next(&mut self) -> Result<()> {
self.send_status_cmd("pl_next", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn play_back(&mut self) -> Result<()> {
self.send_status_cmd("pl_previous", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn sleep(&mut self) -> Result<()> {
self.maybe_pause()?;
self.sleeping = true;
Ok(())
}
fn wakeup(&mut self) -> Result<()> {
self.maybe_resume(false)?;
Ok(())
}
fn pause(&mut self) -> Result<()> {
self.maybe_pause()?;
self.pausing = true;
Ok(())
}
fn resume(&mut self) -> Result<()> {
self.maybe_resume(true)?;
Ok(())
}
fn mute(&mut self) -> Result<()> {
if !self.muting {
self.set_volume(0)?;
}
self.muting = true;
Ok(())
}
fn unmute(&mut self) -> Result<()> {
if self.muting {
self.set_volume(self.audio_volume()?)?;
}
self.muting = false;
Ok(())
}
fn update_playlist(&mut self, playlist: Vec<PathBuf>) -> Result<()> {
debug!("Start updating playlist");
// 1. get current playlist
let old_ids = Self::playlist_ids(self.get_playlist()?)?;
// 2. enqueue all new items
for path in playlist {
debug!("Adding new item to playlist: {}", path.display());
self.send_status_cmd("in_enqueue", &[("input", path.to_str().unwrap())])?;
}
// 3. move to the head of new items
let cur_ids = Self::playlist_ids(self.get_playlist()?)?;
let head_id = cur_ids[old_ids.len()];
debug!("Jumping to playlist ID: {}", head_id);
self.send_status_cmd("pl_play", &[("id", &head_id.to_string())])?;
std::thread::sleep(Duration::from_secs(1));
// 4. Remove old items from playlist (assuming current media won't come up so soon)
for id in old_ids {
debug!("Removing old item from playlist: {}", id);
self.send_status_cmd("pl_delete", &[("id", &id.to_string())])?;
}
debug!("Update playlist complete");
Ok(())
}
fn locked(&self) -> bool {
self.pausing || self.sleeping
}
fn is_ok(&self) -> bool
|
{
match self.send_status_cmd("", &[]) {
Ok(_) => true,
Err(e) => {
debug!("Got error response while checking health of VLC: {}", e);
false
}
}
}
|
identifier_body
|
|
player_vlc.rs
|
(VlcError::NotStarted)
}
/// Convert `audio_volume` set in config into the value
/// range used in VLC player
fn audio_volume(&self) -> std::result::Result<u32, VlcError> {
Ok((VLC_VOLUME_MAX as f32 * self.config()?.audio_volume).round() as u32)
}
fn http_port(&self) -> u32 {
self.vlc_config.http_port.unwrap_or(VLC_DEFAULT_HTTP_PORT)
}
fn send_get(
&self,
path: &str,
params: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
self.client.send_get(self.http_port(), path, params)
}
fn send_status_cmd(
&self,
cmd: &str,
args: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
let mut params = Vec::with_capacity(args.len() + 1);
if !cmd.is_empty() {
params.push(("command", cmd));
params.extend(args);
}
self.send_get("requests/status.xml", ¶ms)
}
fn get_playlist(&self) -> std::result::Result<Element, VlcError> {
let xml = self.send_get("requests/playlist.xml", &[])?;
debug!("Playlist XML from VLC: {}", xml);
let element = Element::from_reader(xml.into_bytes().as_slice())?;
Ok(element)
}
fn wait_on_http_interface(&self) -> std::result::Result<(), VlcError> {
let start_time = Instant::now();
while Instant::now() - start_time < VLC_STARTUP_TIMEOUT {
if self.is_ok() {
return Ok(());
}
std::thread::sleep(VLC_STARTUP_CHECK_BACKOFF);
}
Err(VlcError::StartTimeout)
}
fn set_volume(&self, volume: u32) -> std::result::Result<(), VlcError> {
info!("Setting audio volume to {}", volume);
self.send_status_cmd("volume", &[("val", &volume.to_string())])?;
Ok(())
}
fn playlist_ids(element: Element) -> std::result::Result<Vec<u64>, VlcError> {
for node in element.find_all("node") {
if node
.get_attr("name")
.map(|name| name == "Playlist")
.unwrap_or(false)
{
let mut ids = Vec::new();
for leaf in node.find_all("leaf") {
let id_s = leaf.get_attr("id").ok_or_else(|| {
VlcError::BadResponse(format_err!("missing id attribute"))
})?;
let id: u64 = id_s.parse().map_err(|_| {
VlcError::BadResponse(format_err!("cannot parse id: {}", id_s))
})?;
ids.push(id);
}
return Ok(ids);
}
}
Err(VlcError::BadResponse(format_err!(
"no playlist found in XML"
)))
}
fn maybe_restore_pause(&self) -> std::result::Result<(), VlcError> {
// Moving resets the pausing state
if self.locked() {
// Pausing before play starts causes blackscreen
std::thread::sleep(Duration::from_secs(1));
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_pause(&self) -> std::result::Result<(), VlcError> {
if !self.pausing && !self.sleeping {
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_resume(&mut self, resume: bool) -> std::result::Result<(), VlcError> {
if (self.pausing && resume) || (self.sleeping && !self.pausing) {
self.send_status_cmd("pl_play", &[])?;
self.pausing = false;
self.sleeping = false;
}
Ok(())
}
}
impl<C: HttpClient> Player for VlcPlayer<C> {
fn start(&mut self, config: SlideshowConfig) -> Result<()> {
let vlc_bin = self
.vlc_config
.vlc_bin
.as_ref()
.map(|s| s.as_ref())
.unwrap_or(VLC_DEFAULT_BIN);
let mut cmd = Command::new(vlc_bin);
cmd.arg("--loop")
.arg("--no-video-title-show")
// Don't show popup for asking whether to fetch media metadata through network
.arg("--no-qt-privacy-ask")
.arg("--no-qt-video-autoresize")
// https://wiki.videolan.org/index.php/VLC_command-line_help
.args(&[
"--image-duration",
&config.show_duration.as_secs().to_string(),
])
.args(&["--extraintf", "http"])
.args(&["--http-password", VLC_HTTP_PASSWORD])
.args(&["--http-host", VLC_HTTP_HOST])
.args(&["--http-port", &self.http_port().to_string()]);
if config.fullscreen {
cmd.arg("--fullscreen");
}
self.process = Some(cmd.spawn()?);
self.wait_on_http_interface()?;
self.config = Some(config);
self.set_volume(self.audio_volume()?)?;
Ok(())
}
fn play_next(&mut self) -> Result<()> {
self.send_status_cmd("pl_next", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn play_back(&mut self) -> Result<()> {
self.send_status_cmd("pl_previous", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn sleep(&mut self) -> Result<()> {
self.maybe_pause()?;
self.sleeping = true;
Ok(())
}
fn wakeup(&mut self) -> Result<()> {
self.maybe_resume(false)?;
Ok(())
}
fn pause(&mut self) -> Result<()> {
self.maybe_pause()?;
self.pausing = true;
Ok(())
}
fn resume(&mut self) -> Result<()> {
self.maybe_resume(true)?;
Ok(())
}
fn mute(&mut self) -> Result<()> {
if !self.muting {
self.set_volume(0)?;
}
self.muting = true;
Ok(())
}
fn unmute(&mut self) -> Result<()> {
if self.muting {
self.set_volume(self.audio_volume()?)?;
}
self.muting = false;
Ok(())
}
fn update_playlist(&mut self, playlist: Vec<PathBuf>) -> Result<()> {
debug!("Start updating playlist");
// 1. get current playlist
let old_ids = Self::playlist_ids(self.get_playlist()?)?;
// 2. enqueue all new items
for path in playlist {
debug!("Adding new item to playlist: {}", path.display());
self.send_status_cmd("in_enqueue", &[("input", path.to_str().unwrap())])?;
}
// 3. move to the head of new items
let cur_ids = Self::playlist_ids(self.get_playlist()?)?;
let head_id = cur_ids[old_ids.len()];
debug!("Jumping to playlist ID: {}", head_id);
self.send_status_cmd("pl_play", &[("id", &head_id.to_string())])?;
std::thread::sleep(Duration::from_secs(1));
// 4. Remove old items from playlist (assuming current media won't come up so soon)
for id in old_ids {
debug!("Removing old item from playlist: {}", id);
self.send_status_cmd("pl_delete", &[("id", &id.to_string())])?;
}
debug!("Update playlist complete");
Ok(())
}
fn locked(&self) -> bool {
self.pausing || self.sleeping
}
fn is_ok(&self) -> bool {
match self.send_status_cmd("", &[]) {
Ok(_) => true,
Err(e) => {
debug!("Got error response while checking health of VLC: {}", e);
false
}
}
}
}
impl<C: HttpClient> Drop for VlcPlayer<C> {
fn drop(&mut self) {
if let Some(mut proc) = self.process.take() {
// Rust's Command doesn't support other than SIGKILL in portable interface
unsafe {
libc::kill(proc.id() as i32, libc::SIGTERM);
}
match proc.wait() {
Ok(status) => debug!("VLC process exit with {}", status.code().unwrap_or(-1)),
Err(e) => warn!("Failed to stop VLC process gracefully: {}", e),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::fs;
use std::io::Write;
use std::os::unix::fs::PermissionsExt;
use tempfile;
impl<F: Fn(&str, &HashMap<&str, &str>) -> std::result::Result<String, VlcError>> HttpClient for F {
fn send_get(
&self,
_port: u32,
path: &str,
params: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
self(path, ¶ms.into_iter().map(|v| *v).collect())
}
}
|
random_line_split
|
||
player_vlc.rs
|
VlcConfig) -> Self {
Self::new_with_client(
config,
ReqwestClient(
reqwest::Client::builder()
.timeout(Some(Duration::from_secs(VLC_REQUEST_TIMEOUT)))
.build()
.expect("reqwest client"),
),
)
}
}
impl<C: HttpClient> VlcPlayer<C> {
fn new_with_client(config: VlcConfig, client: C) -> Self {
Self {
vlc_config: config,
config: None,
process: None,
client,
pausing: false,
sleeping: false,
muting: false,
}
}
fn config(&self) -> std::result::Result<&SlideshowConfig, VlcError> {
self.config.as_ref().ok_or(VlcError::NotStarted)
}
/// Convert `audio_volume` set in config into the value
/// range used in VLC player
fn audio_volume(&self) -> std::result::Result<u32, VlcError> {
Ok((VLC_VOLUME_MAX as f32 * self.config()?.audio_volume).round() as u32)
}
fn http_port(&self) -> u32 {
self.vlc_config.http_port.unwrap_or(VLC_DEFAULT_HTTP_PORT)
}
fn send_get(
&self,
path: &str,
params: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
self.client.send_get(self.http_port(), path, params)
}
fn send_status_cmd(
&self,
cmd: &str,
args: &[(&str, &str)],
) -> std::result::Result<String, VlcError> {
let mut params = Vec::with_capacity(args.len() + 1);
if !cmd.is_empty() {
params.push(("command", cmd));
params.extend(args);
}
self.send_get("requests/status.xml", ¶ms)
}
fn get_playlist(&self) -> std::result::Result<Element, VlcError> {
let xml = self.send_get("requests/playlist.xml", &[])?;
debug!("Playlist XML from VLC: {}", xml);
let element = Element::from_reader(xml.into_bytes().as_slice())?;
Ok(element)
}
fn wait_on_http_interface(&self) -> std::result::Result<(), VlcError> {
let start_time = Instant::now();
while Instant::now() - start_time < VLC_STARTUP_TIMEOUT {
if self.is_ok() {
return Ok(());
}
std::thread::sleep(VLC_STARTUP_CHECK_BACKOFF);
}
Err(VlcError::StartTimeout)
}
fn set_volume(&self, volume: u32) -> std::result::Result<(), VlcError> {
info!("Setting audio volume to {}", volume);
self.send_status_cmd("volume", &[("val", &volume.to_string())])?;
Ok(())
}
fn playlist_ids(element: Element) -> std::result::Result<Vec<u64>, VlcError> {
for node in element.find_all("node") {
if node
.get_attr("name")
.map(|name| name == "Playlist")
.unwrap_or(false)
{
let mut ids = Vec::new();
for leaf in node.find_all("leaf") {
let id_s = leaf.get_attr("id").ok_or_else(|| {
VlcError::BadResponse(format_err!("missing id attribute"))
})?;
let id: u64 = id_s.parse().map_err(|_| {
VlcError::BadResponse(format_err!("cannot parse id: {}", id_s))
})?;
ids.push(id);
}
return Ok(ids);
}
}
Err(VlcError::BadResponse(format_err!(
"no playlist found in XML"
)))
}
fn maybe_restore_pause(&self) -> std::result::Result<(), VlcError> {
// Moving resets the pausing state
if self.locked() {
// Pausing before play starts causes blackscreen
std::thread::sleep(Duration::from_secs(1));
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_pause(&self) -> std::result::Result<(), VlcError> {
if !self.pausing && !self.sleeping {
self.send_status_cmd("pl_pause", &[])?;
}
Ok(())
}
fn maybe_resume(&mut self, resume: bool) -> std::result::Result<(), VlcError> {
if (self.pausing && resume) || (self.sleeping && !self.pausing) {
self.send_status_cmd("pl_play", &[])?;
self.pausing = false;
self.sleeping = false;
}
Ok(())
}
}
impl<C: HttpClient> Player for VlcPlayer<C> {
fn start(&mut self, config: SlideshowConfig) -> Result<()> {
let vlc_bin = self
.vlc_config
.vlc_bin
.as_ref()
.map(|s| s.as_ref())
.unwrap_or(VLC_DEFAULT_BIN);
let mut cmd = Command::new(vlc_bin);
cmd.arg("--loop")
.arg("--no-video-title-show")
// Don't show popup for asking whether to fetch media metadata through network
.arg("--no-qt-privacy-ask")
.arg("--no-qt-video-autoresize")
// https://wiki.videolan.org/index.php/VLC_command-line_help
.args(&[
"--image-duration",
&config.show_duration.as_secs().to_string(),
])
.args(&["--extraintf", "http"])
.args(&["--http-password", VLC_HTTP_PASSWORD])
.args(&["--http-host", VLC_HTTP_HOST])
.args(&["--http-port", &self.http_port().to_string()]);
if config.fullscreen {
cmd.arg("--fullscreen");
}
self.process = Some(cmd.spawn()?);
self.wait_on_http_interface()?;
self.config = Some(config);
self.set_volume(self.audio_volume()?)?;
Ok(())
}
fn play_next(&mut self) -> Result<()> {
self.send_status_cmd("pl_next", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn play_back(&mut self) -> Result<()> {
self.send_status_cmd("pl_previous", &[])?;
self.maybe_restore_pause()?;
Ok(())
}
fn sleep(&mut self) -> Result<()> {
self.maybe_pause()?;
self.sleeping = true;
Ok(())
}
fn wakeup(&mut self) -> Result<()> {
self.maybe_resume(false)?;
Ok(())
}
fn pause(&mut self) -> Result<()> {
self.maybe_pause()?;
self.pausing = true;
Ok(())
}
fn resume(&mut self) -> Result<()> {
self.maybe_resume(true)?;
Ok(())
}
fn mute(&mut self) -> Result<()> {
if !self.muting {
self.set_volume(0)?;
}
self.muting = true;
Ok(())
}
fn unmute(&mut self) -> Result<()> {
if self.muting {
self.set_volume(self.audio_volume()?)?;
}
self.muting = false;
Ok(())
}
fn update_playlist(&mut self, playlist: Vec<PathBuf>) -> Result<()> {
debug!("Start updating playlist");
// 1. get current playlist
let old_ids = Self::playlist_ids(self.get_playlist()?)?;
// 2. enqueue all new items
for path in playlist {
debug!("Adding new item to playlist: {}", path.display());
self.send_status_cmd("in_enqueue", &[("input", path.to_str().unwrap())])?;
}
// 3. move to the head of new items
let cur_ids = Self::playlist_ids(self.get_playlist()?)?;
let head_id = cur_ids[old_ids.len()];
debug!("Jumping to playlist ID: {}", head_id);
self.send_status_cmd("pl_play", &[("id", &head_id.to_string())])?;
std::thread::sleep(Duration::from_secs(1));
// 4. Remove old items from playlist (assuming current media won't come up so soon)
for id in old_ids {
debug!("Removing old item from playlist: {}", id);
self.send_status_cmd("pl_delete", &[("id", &id.to_string())])?;
}
debug!("Update playlist complete");
Ok(())
}
fn locked(&self) -> bool {
self.pausing || self.sleeping
}
fn is_ok(&self) -> bool {
match self.send_status_cmd("", &[]) {
Ok(_) => true,
Err(e) => {
debug!("Got error response while checking health of VLC: {}", e);
false
}
}
}
}
impl<C: HttpClient> Drop for VlcPlayer<C> {
fn drop(&mut self) {
if let Some(mut proc) = self.process.take()
|
{
// Rust's Command doesn't support other than SIGKILL in portable interface
unsafe {
libc::kill(proc.id() as i32, libc::SIGTERM);
}
match proc.wait() {
Ok(status) => debug!("VLC process exit with {}", status.code().unwrap_or(-1)),
Err(e) => warn!("Failed to stop VLC process gracefully: {}", e),
}
}
|
conditional_block
|
|
render.ts
|
Layout) {
// Run through our data, calculate normals and such.
const t = vec3.create();
const posData = new Float32Array(chunk.indexData.length * 3);
const nrmData = new Float32Array(chunk.indexData.length * 3);
for (let i = 0; i < chunk.indexData.length; i += 3) {
const i0 = chunk.indexData[i + 0];
const i1 = chunk.indexData[i + 1];
const i2 = chunk.indexData[i + 2];
const t0x = chunk.positionData[i0 * 3 + 0];
const t0y = chunk.positionData[i0 * 3 + 1];
const t0z = chunk.positionData[i0 * 3 + 2];
const t1x = chunk.positionData[i1 * 3 + 0];
const t1y = chunk.positionData[i1 * 3 + 1];
const t1z = chunk.positionData[i1 * 3 + 2];
const t2x = chunk.positionData[i2 * 3 + 0];
const t2y = chunk.positionData[i2 * 3 + 1];
const t2z = chunk.positionData[i2 * 3 + 2];
vec3.cross(t, [t0x - t1x, t0y - t1y, t0z - t1z], [t0x - t2x, t0y - t2y, t0z - t2z]);
vec3.normalize(t, t);
posData[(i + 0) * 3 + 0] = t0x;
posData[(i + 0) * 3 + 1] = t0y;
posData[(i + 0) * 3 + 2] = t0z;
posData[(i + 1) * 3 + 0] = t1x;
posData[(i + 1) * 3 + 1] = t1y;
posData[(i + 1) * 3 + 2] = t1z;
posData[(i + 2) * 3 + 0] = t2x;
posData[(i + 2) * 3 + 1] = t2y;
posData[(i + 2) * 3 + 2] = t2z;
nrmData[(i + 0) * 3 + 0] = t[0];
nrmData[(i + 0) * 3 + 1] = t[1];
nrmData[(i + 0) * 3 + 2] = t[2];
nrmData[(i + 1) * 3 + 0] = t[0];
nrmData[(i + 1) * 3 + 1] = t[1];
nrmData[(i + 1) * 3 + 2] = t[2];
nrmData[(i + 2) * 3 + 0] = t[0];
nrmData[(i + 2) * 3 + 1] = t[1];
nrmData[(i + 2) * 3 + 2] = t[2];
}
this.posBuffer = makeStaticDataBuffer(device, GfxBufferUsage.Vertex, posData.buffer);
this.nrmBuffer = makeStaticDataBuffer(device, GfxBufferUsage.Vertex, nrmData.buffer);
this.vertexBufferDescriptors = [
{ buffer: this.posBuffer, byteOffset: 0, },
{ buffer: this.nrmBuffer, byteOffset: 0, },
];
this.numVertices = chunk.indexData.length;
}
public prepareToRender(renderInstManager: GfxRenderInstManager): void {
const renderInst = renderInstManager.newRenderInst();
renderInst.setVertexInput(this.inputLayout, this.vertexBufferDescriptors, null);
renderInst.drawPrimitives(this.numVertices);
renderInstManager.submitRenderInst(renderInst);
}
public destroy(device: GfxDevice): void {
device.destroyBuffer(this.posBuffer);
device.destroyBuffer(this.nrmBuffer);
}
}
export class IVRenderer {
public visible: boolean = true;
public name: string;
private chunks: Chunk[];
constructor(device: GfxDevice, public iv: IV.IV, inputLayout: GfxInputLayout) {
// TODO(jstpierre): Coalesce chunks?
this.name = iv.name;
this.chunks = this.iv.chunks.map((chunk) => new Chunk(device, chunk, inputLayout));
}
public setVisible(v: boolean) {
this.visible = v;
}
public prepareToRender(renderInstManager: GfxRenderInstManager): void {
if (!this.visible)
return;
const templateRenderInst = renderInstManager.pushTemplateRenderInst();
let offs = templateRenderInst.allocateUniformBuffer(IVProgram.ub_ObjectParams, 4);
const d = templateRenderInst.mapUniformBufferF32(IVProgram.ub_ObjectParams);
offs += fillColor(d, offs, this.iv.color);
for (let i = 0; i < this.chunks.length; i++)
this.chunks[i].prepareToRender(renderInstManager);
renderInstManager.popTemplateRenderInst();
}
public destroy(device: GfxDevice): void {
this.chunks.forEach((chunk) => chunk.destroy(device));
}
}
const bindingLayouts: GfxBindingLayoutDescriptor[] = [
{ numUniformBuffers: 2, numSamplers: 0 }, // ub_SceneParams
];
export class Scene implements Viewer.SceneGfx {
private inputLayout: GfxInputLayout;
private program: GfxProgram;
private ivRenderers: IVRenderer[] = [];
private renderHelper: GfxRenderHelper;
constructor(device: GfxDevice, public ivs: IV.IV[]) {
this.renderHelper = new GfxRenderHelper(device);
this.program = this.renderHelper.renderCache.createProgram(new IVProgram());
const vertexAttributeDescriptors: GfxVertexAttributeDescriptor[] = [
{ location: IVProgram.a_Position, bufferIndex: 0, bufferByteOffset: 0, format: GfxFormat.F32_RGB, },
{ location: IVProgram.a_Normal, bufferIndex: 1, bufferByteOffset: 0, format: GfxFormat.F32_RGB, },
];
const vertexBufferDescriptors: GfxInputLayoutBufferDescriptor[] = [
{ byteStride: 3*0x04, frequency: GfxVertexBufferFrequency.PerVertex, },
{ byteStride: 3*0x04, frequency: GfxVertexBufferFrequency.PerVertex, },
];
const indexBufferFormat: GfxFormat | null = null;
const cache = this.renderHelper.renderCache;
this.inputLayout = cache.createInputLayout({ vertexAttributeDescriptors, vertexBufferDescriptors, indexBufferFormat });
this.ivRenderers = this.ivs.map((iv) => {
return new IVRenderer(device, iv, this.inputLayout);
});
}
public adjustCameraController(c: CameraController) {
c.setSceneMoveSpeedMult(16/60);
}
private prepareToRender(device: GfxDevice, viewerInput: Viewer.ViewerRenderInput): void {
const template = this.renderHelper.pushTemplateRenderInst();
template.setBindingLayouts(bindingLayouts);
template.setGfxProgram(this.program);
template.setMegaStateFlags({ cullMode: GfxCullMode.Back });
let offs = template.allocateUniformBuffer(IVProgram.ub_SceneParams, 32);
const mapped = template.mapUniformBufferF32(IVProgram.ub_SceneParams);
offs += fillMatrix4x4(mapped, offs, viewerInput.camera.projectionMatrix);
offs += fillMatrix4x4(mapped, offs, viewerInput.camera.viewMatrix);
for (let i = 0; i < this.ivRenderers.length; i++)
this.ivRenderers[i].prepareToRender(this.renderHelper.renderInstManager);
this.renderHelper.renderInstManager.popTemplateRenderInst();
this.renderHelper.prepareToRender();
}
public render(device: GfxDevice, viewerInput: Viewer.ViewerRenderInput)
|
{
const renderInstManager = this.renderHelper.renderInstManager;
const mainColorDesc = makeBackbufferDescSimple(GfxrAttachmentSlot.Color0, viewerInput, standardFullClearRenderPassDescriptor);
const mainDepthDesc = makeBackbufferDescSimple(GfxrAttachmentSlot.DepthStencil, viewerInput, standardFullClearRenderPassDescriptor);
const builder = this.renderHelper.renderGraph.newGraphBuilder();
const mainColorTargetID = builder.createRenderTargetID(mainColorDesc, 'Main Color');
const mainDepthTargetID = builder.createRenderTargetID(mainDepthDesc, 'Main Depth');
builder.pushPass((pass) => {
pass.setDebugName('Main');
pass.attachRenderTargetID(GfxrAttachmentSlot.Color0, mainColorTargetID);
pass.attachRenderTargetID(GfxrAttachmentSlot.DepthStencil, mainDepthTargetID);
pass.exec((passRenderer) => {
renderInstManager.drawOnPassRenderer(passRenderer);
});
});
pushAntialiasingPostProcessPass(builder, this.renderHelper, viewerInput, mainColorTargetID);
builder.resolveRenderTargetToExternalTexture(mainColorTargetID, viewerInput.onscreenTexture);
|
identifier_body
|
|
render.ts
|
'../Camera.js';
import { GfxrAttachmentSlot } from '../gfx/render/GfxRenderGraph.js';
class IVProgram extends DeviceProgram {
public static a_Position = 0;
public static a_Normal = 1;
public static ub_SceneParams = 0;
public static ub_ObjectParams = 1;
public override both = `
precision mediump float;
layout(std140) uniform ub_SceneParams {
Mat4x4 u_Projection;
Mat4x4 u_ModelView;
};
layout(std140) uniform ub_ObjectParams {
vec4 u_Color;
};
varying vec2 v_LightIntensity;
#ifdef VERT
layout(location = ${IVProgram.a_Position}) attribute vec3 a_Position;
layout(location = ${IVProgram.a_Normal}) attribute vec3 a_Normal;
void mainVS() {
const float t_ModelScale = 20.0;
gl_Position = Mul(u_Projection, Mul(u_ModelView, vec4(a_Position * t_ModelScale, 1.0)));
vec3 t_LightDirection = normalize(vec3(.2, -1, .5));
float t_LightIntensityF = dot(-a_Normal, t_LightDirection);
float t_LightIntensityB = dot( a_Normal, t_LightDirection);
v_LightIntensity = vec2(t_LightIntensityF, t_LightIntensityB);
}
#endif
#ifdef FRAG
void mainPS() {
float t_LightIntensity = gl_FrontFacing ? v_LightIntensity.x : v_LightIntensity.y;
float t_LightTint = 0.3 * t_LightIntensity;
gl_FragColor = u_Color + vec4(t_LightTint, t_LightTint, t_LightTint, 0.0);
}
#endif
`;
}
class Chunk {
public numVertices: number;
public posBuffer: GfxBuffer;
public nrmBuffer: GfxBuffer;
public vertexBufferDescriptors: GfxVertexBufferDescriptor[];
constructor(device: GfxDevice, public chunk: IV.Chunk, private inputLayout: GfxInputLayout) {
// Run through our data, calculate normals and such.
const t = vec3.create();
const posData = new Float32Array(chunk.indexData.length * 3);
const nrmData = new Float32Array(chunk.indexData.length * 3);
for (let i = 0; i < chunk.indexData.length; i += 3) {
const i0 = chunk.indexData[i + 0];
const i1 = chunk.indexData[i + 1];
const i2 = chunk.indexData[i + 2];
const t0x = chunk.positionData[i0 * 3 + 0];
const t0y = chunk.positionData[i0 * 3 + 1];
const t0z = chunk.positionData[i0 * 3 + 2];
const t1x = chunk.positionData[i1 * 3 + 0];
const t1y = chunk.positionData[i1 * 3 + 1];
const t1z = chunk.positionData[i1 * 3 + 2];
const t2x = chunk.positionData[i2 * 3 + 0];
const t2y = chunk.positionData[i2 * 3 + 1];
const t2z = chunk.positionData[i2 * 3 + 2];
vec3.cross(t, [t0x - t1x, t0y - t1y, t0z - t1z], [t0x - t2x, t0y - t2y, t0z - t2z]);
vec3.normalize(t, t);
posData[(i + 0) * 3 + 0] = t0x;
posData[(i + 0) * 3 + 1] = t0y;
posData[(i + 0) * 3 + 2] = t0z;
posData[(i + 1) * 3 + 0] = t1x;
posData[(i + 1) * 3 + 1] = t1y;
posData[(i + 1) * 3 + 2] = t1z;
posData[(i + 2) * 3 + 0] = t2x;
posData[(i + 2) * 3 + 1] = t2y;
posData[(i + 2) * 3 + 2] = t2z;
nrmData[(i + 0) * 3 + 0] = t[0];
nrmData[(i + 0) * 3 + 1] = t[1];
nrmData[(i + 0) * 3 + 2] = t[2];
nrmData[(i + 1) * 3 + 0] = t[0];
nrmData[(i + 1) * 3 + 1] = t[1];
nrmData[(i + 1) * 3 + 2] = t[2];
nrmData[(i + 2) * 3 + 0] = t[0];
nrmData[(i + 2) * 3 + 1] = t[1];
nrmData[(i + 2) * 3 + 2] = t[2];
}
this.posBuffer = makeStaticDataBuffer(device, GfxBufferUsage.Vertex, posData.buffer);
this.nrmBuffer = makeStaticDataBuffer(device, GfxBufferUsage.Vertex, nrmData.buffer);
this.vertexBufferDescriptors = [
{ buffer: this.posBuffer, byteOffset: 0, },
{ buffer: this.nrmBuffer, byteOffset: 0, },
];
this.numVertices = chunk.indexData.length;
}
public prepareToRender(renderInstManager: GfxRenderInstManager): void {
const renderInst = renderInstManager.newRenderInst();
renderInst.setVertexInput(this.inputLayout, this.vertexBufferDescriptors, null);
renderInst.drawPrimitives(this.numVertices);
renderInstManager.submitRenderInst(renderInst);
}
public destroy(device: GfxDevice): void {
device.destroyBuffer(this.posBuffer);
device.destroyBuffer(this.nrmBuffer);
}
}
export class IVRenderer {
public visible: boolean = true;
public name: string;
private chunks: Chunk[];
constructor(device: GfxDevice, public iv: IV.IV, inputLayout: GfxInputLayout) {
// TODO(jstpierre): Coalesce chunks?
this.name = iv.name;
this.chunks = this.iv.chunks.map((chunk) => new Chunk(device, chunk, inputLayout));
}
public setVisible(v: boolean) {
this.visible = v;
}
public prepareToRender(renderInstManager: GfxRenderInstManager): void {
if (!this.visible)
return;
|
const d = templateRenderInst.mapUniformBufferF32(IVProgram.ub_ObjectParams);
offs += fillColor(d, offs, this.iv.color);
for (let i = 0; i < this.chunks.length; i++)
this.chunks[i].prepareToRender(renderInstManager);
renderInstManager.popTemplateRenderInst();
}
public destroy(device: GfxDevice): void {
this.chunks.forEach((chunk) => chunk.destroy(device));
}
}
const bindingLayouts: GfxBindingLayoutDescriptor[] = [
{ numUniformBuffers: 2, numSamplers: 0 }, // ub_SceneParams
];
export class Scene implements Viewer.SceneGfx {
private inputLayout: GfxInputLayout;
private program: GfxProgram;
private ivRenderers: IVRenderer[] = [];
private renderHelper: GfxRenderHelper;
constructor(device: GfxDevice, public ivs: IV.IV[]) {
this.renderHelper = new GfxRenderHelper(device);
this.program = this.renderHelper.renderCache.createProgram(new IVProgram());
const vertexAttributeDescriptors: GfxVertexAttributeDescriptor[] = [
{ location: IVProgram.a_Position, bufferIndex: 0, bufferByteOffset: 0, format: GfxFormat.F32_RGB, },
{ location: IVProgram.a_Normal, bufferIndex: 1, bufferByteOffset: 0, format: GfxFormat.F32_RGB, },
];
const vertexBufferDescriptors: GfxInputLayoutBufferDescriptor[] = [
{ byteStride: 3*0x04, frequency: GfxVertexBufferFrequency.PerVertex, },
{ byteStride: 3*0x04, frequency: GfxVertexBufferFrequency.PerVertex, },
];
const indexBufferFormat: GfxFormat | null = null;
const cache = this.renderHelper.renderCache;
this.inputLayout = cache.createInputLayout({ vertexAttributeDescriptors, vertexBufferDescriptors, indexBufferFormat });
this.ivRenderers = this.ivs.map((iv) => {
return new IVRenderer(device, iv, this.inputLayout);
});
}
public adjustCameraController(c: CameraController) {
c.setSceneMoveSpeedMult(16/60);
}
private prepareToRender(device: GfxDevice, viewerInput: Viewer.ViewerRenderInput):
|
const templateRenderInst = renderInstManager.pushTemplateRenderInst();
let offs = templateRenderInst.allocateUniformBuffer(IVProgram.ub_ObjectParams, 4);
|
random_line_split
|
render.ts
|
'../Camera.js';
import { GfxrAttachmentSlot } from '../gfx/render/GfxRenderGraph.js';
class IVProgram extends DeviceProgram {
public static a_Position = 0;
public static a_Normal = 1;
public static ub_SceneParams = 0;
public static ub_ObjectParams = 1;
public override both = `
precision mediump float;
layout(std140) uniform ub_SceneParams {
Mat4x4 u_Projection;
Mat4x4 u_ModelView;
};
layout(std140) uniform ub_ObjectParams {
vec4 u_Color;
};
varying vec2 v_LightIntensity;
#ifdef VERT
layout(location = ${IVProgram.a_Position}) attribute vec3 a_Position;
layout(location = ${IVProgram.a_Normal}) attribute vec3 a_Normal;
void mainVS() {
const float t_ModelScale = 20.0;
gl_Position = Mul(u_Projection, Mul(u_ModelView, vec4(a_Position * t_ModelScale, 1.0)));
vec3 t_LightDirection = normalize(vec3(.2, -1, .5));
float t_LightIntensityF = dot(-a_Normal, t_LightDirection);
float t_LightIntensityB = dot( a_Normal, t_LightDirection);
v_LightIntensity = vec2(t_LightIntensityF, t_LightIntensityB);
}
#endif
#ifdef FRAG
void mainPS() {
float t_LightIntensity = gl_FrontFacing ? v_LightIntensity.x : v_LightIntensity.y;
float t_LightTint = 0.3 * t_LightIntensity;
gl_FragColor = u_Color + vec4(t_LightTint, t_LightTint, t_LightTint, 0.0);
}
#endif
`;
}
class Chunk {
public numVertices: number;
public posBuffer: GfxBuffer;
public nrmBuffer: GfxBuffer;
public vertexBufferDescriptors: GfxVertexBufferDescriptor[];
constructor(device: GfxDevice, public chunk: IV.Chunk, private inputLayout: GfxInputLayout) {
// Run through our data, calculate normals and such.
const t = vec3.create();
const posData = new Float32Array(chunk.indexData.length * 3);
const nrmData = new Float32Array(chunk.indexData.length * 3);
for (let i = 0; i < chunk.indexData.length; i += 3) {
const i0 = chunk.indexData[i + 0];
const i1 = chunk.indexData[i + 1];
const i2 = chunk.indexData[i + 2];
const t0x = chunk.positionData[i0 * 3 + 0];
const t0y = chunk.positionData[i0 * 3 + 1];
const t0z = chunk.positionData[i0 * 3 + 2];
const t1x = chunk.positionData[i1 * 3 + 0];
const t1y = chunk.positionData[i1 * 3 + 1];
const t1z = chunk.positionData[i1 * 3 + 2];
const t2x = chunk.positionData[i2 * 3 + 0];
const t2y = chunk.positionData[i2 * 3 + 1];
const t2z = chunk.positionData[i2 * 3 + 2];
vec3.cross(t, [t0x - t1x, t0y - t1y, t0z - t1z], [t0x - t2x, t0y - t2y, t0z - t2z]);
vec3.normalize(t, t);
posData[(i + 0) * 3 + 0] = t0x;
posData[(i + 0) * 3 + 1] = t0y;
posData[(i + 0) * 3 + 2] = t0z;
posData[(i + 1) * 3 + 0] = t1x;
posData[(i + 1) * 3 + 1] = t1y;
posData[(i + 1) * 3 + 2] = t1z;
posData[(i + 2) * 3 + 0] = t2x;
posData[(i + 2) * 3 + 1] = t2y;
posData[(i + 2) * 3 + 2] = t2z;
nrmData[(i + 0) * 3 + 0] = t[0];
nrmData[(i + 0) * 3 + 1] = t[1];
nrmData[(i + 0) * 3 + 2] = t[2];
nrmData[(i + 1) * 3 + 0] = t[0];
nrmData[(i + 1) * 3 + 1] = t[1];
nrmData[(i + 1) * 3 + 2] = t[2];
nrmData[(i + 2) * 3 + 0] = t[0];
nrmData[(i + 2) * 3 + 1] = t[1];
nrmData[(i + 2) * 3 + 2] = t[2];
}
this.posBuffer = makeStaticDataBuffer(device, GfxBufferUsage.Vertex, posData.buffer);
this.nrmBuffer = makeStaticDataBuffer(device, GfxBufferUsage.Vertex, nrmData.buffer);
this.vertexBufferDescriptors = [
{ buffer: this.posBuffer, byteOffset: 0, },
{ buffer: this.nrmBuffer, byteOffset: 0, },
];
this.numVertices = chunk.indexData.length;
}
public prepareToRender(renderInstManager: GfxRenderInstManager): void {
const renderInst = renderInstManager.newRenderInst();
renderInst.setVertexInput(this.inputLayout, this.vertexBufferDescriptors, null);
renderInst.drawPrimitives(this.numVertices);
renderInstManager.submitRenderInst(renderInst);
}
public destroy(device: GfxDevice): void {
device.destroyBuffer(this.posBuffer);
device.destroyBuffer(this.nrmBuffer);
}
}
export class IVRenderer {
public visible: boolean = true;
public name: string;
private chunks: Chunk[];
constructor(device: GfxDevice, public iv: IV.IV, inputLayout: GfxInputLayout) {
// TODO(jstpierre): Coalesce chunks?
this.name = iv.name;
this.chunks = this.iv.chunks.map((chunk) => new Chunk(device, chunk, inputLayout));
}
public setVisible(v: boolean) {
this.visible = v;
}
public
|
(renderInstManager: GfxRenderInstManager): void {
if (!this.visible)
return;
const templateRenderInst = renderInstManager.pushTemplateRenderInst();
let offs = templateRenderInst.allocateUniformBuffer(IVProgram.ub_ObjectParams, 4);
const d = templateRenderInst.mapUniformBufferF32(IVProgram.ub_ObjectParams);
offs += fillColor(d, offs, this.iv.color);
for (let i = 0; i < this.chunks.length; i++)
this.chunks[i].prepareToRender(renderInstManager);
renderInstManager.popTemplateRenderInst();
}
public destroy(device: GfxDevice): void {
this.chunks.forEach((chunk) => chunk.destroy(device));
}
}
const bindingLayouts: GfxBindingLayoutDescriptor[] = [
{ numUniformBuffers: 2, numSamplers: 0 }, // ub_SceneParams
];
export class Scene implements Viewer.SceneGfx {
private inputLayout: GfxInputLayout;
private program: GfxProgram;
private ivRenderers: IVRenderer[] = [];
private renderHelper: GfxRenderHelper;
constructor(device: GfxDevice, public ivs: IV.IV[]) {
this.renderHelper = new GfxRenderHelper(device);
this.program = this.renderHelper.renderCache.createProgram(new IVProgram());
const vertexAttributeDescriptors: GfxVertexAttributeDescriptor[] = [
{ location: IVProgram.a_Position, bufferIndex: 0, bufferByteOffset: 0, format: GfxFormat.F32_RGB, },
{ location: IVProgram.a_Normal, bufferIndex: 1, bufferByteOffset: 0, format: GfxFormat.F32_RGB, },
];
const vertexBufferDescriptors: GfxInputLayoutBufferDescriptor[] = [
{ byteStride: 3*0x04, frequency: GfxVertexBufferFrequency.PerVertex, },
{ byteStride: 3*0x04, frequency: GfxVertexBufferFrequency.PerVertex, },
];
const indexBufferFormat: GfxFormat | null = null;
const cache = this.renderHelper.renderCache;
this.inputLayout = cache.createInputLayout({ vertexAttributeDescriptors, vertexBufferDescriptors, indexBufferFormat });
this.ivRenderers = this.ivs.map((iv) => {
return new IVRenderer(device, iv, this.inputLayout);
});
}
public adjustCameraController(c: CameraController) {
c.setSceneMoveSpeedMult(16/60);
}
private prepareToRender(device: GfxDevice, viewerInput: Viewer.ViewerRenderInput
|
prepareToRender
|
identifier_name
|
pdf.js
|
ff8746',
fontSize: 24,
offset: [-15, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.5 )'
}
},
itemStyle: {
normal: {
color: '#FF640F',
borderWidth: 4,
borderColor: '#FF640F'
}
},
lineStyle: {
normal: {
color: '#FF640F',
type: 'solid'
// width: 1
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
pdf.renderRadarForLoanPdf = function (element, indicator, value, name) {
var myChart = echarts.init(element);
var option = {
tooltip: {
trigger: 'item',
enterable: true,
confine: true,
formatter: function (params) {
var str = params.name + '<br/>';
indicator.forEach(function (item, index) {
var val = value[index];
val = val === '' ? '暂无数据' : val;
str += item.name + ':' + val + '<br/>';
});
return str;
}
},
radar: {
splitNumber: 4,
radius: '58%',
name: {
textStyle: {
color: 'rgb(51,51,51)',
fontSize: 22
// padding: [3, 5]
}
},
splitArea: {
show: false,
areaStyle: {
// 图表背景的颜色
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
color: 'rgb(102, 102, 102)',
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: 'rgb(51,51,51)',
fontSize: 18,
offset: [0, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.0 )'
}
},
itemStyle: {
normal: {
borderWidth: 6,
borderColor: 'rgb(64, 125, 221)'
}
},
lineStyle: {
normal: {
color: 'rgba(255, 100, 15,0.3)',
type: 'solid',
width: 3
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
/**
* 柱状图
*/
pdf.renderBarForPdf = function(element,xData,section) {
var colorList = [
['#37BBF8','#ff00ff'],
['#BCEE68','#B2DFEE'],
['#8B8B00','#8B7765'],
['#7B68EE','#7FFF00'],
['#0000EE','#FFA500'],
];
var sectionList = ['较差','中等','良好','优秀','极好'];
var myChart = echarts.init(element);
var option = {
xAxis: {
type: 'category',
show: true,
data: xData,
axisTick: {
show: false
},
axisLine: {
show: false,
},
boundaryGap: true,
axisLabel: {
color: '#666',
interval: 0,
fontSize: 18,
padding:[0,0,0,0],
},
},
yAxis: {
type: 'value',
max: 40,
show: false,
},
series: [
{
data: [20,20,20,20,20],
type: 'bar',
barWidth: '100%',
itemStyle: {
normal: {
color: function (params) {
return new echarts.graphic.LinearGradient(
0, 0, 1, 0,
[
{offset: 0, color: colorList[params.dataIndex][0]},
{offset: 1, color: colorList[params.dataIndex][1]}
]
)
},
}
},
label: {
show: true,
position: 'top',
distance: 10,
color: '#666666',
fontSize: 16,
formatter: function (params) {
if (params.name == section) {
return sectionList[params.dataIndex]+'ok';
} else {
return sectionList[params.dataIndex]
}
},
rich: {
}
}
}
]
};
myChart.setOption(option);
return myChart;
};
/**
* 获取url中的参数
* @param name 参数名
* @returns {*}
*/
pdf.getQueryString = function (name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)", "i"); //定义正则表达式
var r = window.location.search.substr(1).match(reg);
if (r != null) return decodeURI(r[2]);
return null;
}
/**
* 【文本比较插件】
* 传递两个参数dom1、dom2,以dom1为基准进行比较。
* 0)dom1和dom2不能都为空;
* 1)如果dom1不存在,则dom2为新增效果
* 2)如果dom2不存在,则dom1为删除效果
* 3)如果dom1和dom2存在,则进行文本差异比较
*
*/
pdf.MyCompare = function (dom1, dom2) {
if (!dom1 && !dom2) {
// console.log('参数错误:dom1、dom2不能为空。');
return;
}
else if (!dom1) {
//dom1为空:新增
dom2.style.color = '#90EE90';
} else if (!dom2) {
//dom2为空:删除
dom1.style.color = '#FF6347';
dom1.style.textDecoration = 'line-through';
} else {
//进行差异比较
var result = _eq({value1: dom1.innerText || dom1.innerHTML, value2: dom2.innerText || dom2.innerHTML});
dom1.innerHTML = result.value1;
dom2.innerHTML = result.value2;
}
}
function _eq(op) {
if (!op) {
return op;
}
if (!op.value1_style) {
op.value1_style = "color:#6bd5fc;";
}
if (!op.value2_style) {
op.value2_style = "color:#6bd5fc;";
}
if (!op.eq_min) {
op.eq_min = 3;
}
if (!op.eq_index) {
op.eq_index = 5;
}
if (!op.value1 || !op.value2) {
return op;
}
var ps = {
v1_i: 0,
v1_new_value: "",
v2_i: 0,
v2_new_value: ""
};
while (ps.v1_i < op.value1.length && ps.v2_i < op.va
|
lue2.length) {
if (op.value1[ps.v1_i] == op.value2[ps.v2_i]) {
ps.v1_new_value += op.value1[ps.v1_i].replace(/</g, "<").replace(">", ">");
ps.v2_new_value += op.value2[ps.v2_i].replace(/</g, "<").replace(">", ">");
ps.v1_i += 1;
ps.v2_i += 1;
if (ps.v1_i >= op.value1.length) {
ps.v2_new_value += "<span style='" + op.value2_style + "'>" + op.value2.substr(ps.v2_i).replace(/</g, "<").replace(">", ">") + "</span>";
break;
}
if (ps.v2_i >= op.value2.length) {
ps.v1_new_value += "<span style='" + op.value1_style + "'>" + op.value1.substr(ps.v1_i).replace(/</g, "<").replace(">", ">") + "</span>";
break;
}
} else {
ps.v1_index = ps.v1_i + 1;
ps.v1_eq_length = 0;
ps.v1_eq_max = 0;
ps.v1_start = ps.v1_i + 1;
while (ps.v1_index < op.value1.length) {
|
identifier_body
|
|
pdf.js
|
的颜色
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
color: '#878787'
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: '#ff8746',
fontSize: 24,
offset: [-15, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.5 )'
}
},
itemStyle: {
normal: {
color: '#FF640F',
borderWidth: 4,
borderColor: '#FF640F'
}
},
lineStyle: {
normal: {
color: '#FF640F',
type: 'solid'
// width: 1
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
pdf.renderRadarForLoanPdf = function (element, indicator, value, name) {
var myChart = echarts.init(element);
var option = {
tooltip: {
trigger: 'item',
enterable: true,
confine: true,
formatter: function (params) {
var str = params.name + '<br/>';
indicator.forEach(function (item, index) {
var val = value[index];
val = val === '' ? '暂无数据' : val;
str += item.name + ':' + val + '<br/>';
});
return str;
}
},
radar: {
splitNumber: 4,
radius: '58%',
name: {
textStyle: {
color: 'rgb(51,51,51)',
fontSize: 22
// padding: [3, 5]
}
},
splitArea: {
show: false,
areaStyle: {
// 图表背景的颜色
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
color: 'rgb(102, 102, 102)',
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: 'rgb(51,51,51)',
fontSize: 18,
offset: [0, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.0 )'
}
},
itemStyle: {
normal: {
borderWidth: 6,
borderColor: 'rgb(64, 125, 221)'
}
},
lineStyle: {
normal: {
color: 'rgba(255, 100, 15,0.3)',
type: 'solid',
width: 3
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
/**
* 柱状图
*/
pdf.renderBarForPdf = function(element,xData,section) {
var colorList = [
['#37BBF8','#ff00ff'],
['#BCEE68','#B2DFEE'],
['#8B8B00','#8B7765'],
['#7B68EE','#7FFF00'],
['#0000EE','#FFA500'],
];
var sectionList = ['较差','中等','良好','优秀','极好'];
var myChart = echarts.init(element);
var option = {
xAxis: {
type: 'category',
show: true,
data: xData,
axisTick: {
show: false
},
axisLine: {
show: false,
},
boundaryGap: true,
axisLabel: {
color: '#666',
interval: 0,
fontSize: 18,
padding:[0,0,0,0],
},
},
yAxis: {
type: 'value',
max: 40,
show: false,
},
series: [
{
data: [20,20,20,20,20],
type: 'bar',
barWidth: '100%',
itemStyle: {
normal: {
color: function (params) {
return new echarts.graphic.LinearGradient(
0, 0, 1, 0,
[
{offset: 0, color: colorList[params.dataIndex][0]},
{offset: 1, color: colorList[params.dataIndex][1]}
]
)
},
}
},
label: {
show: true,
position: 'top',
distance: 10,
color: '#666666',
fontSize: 16,
formatter: function (params) {
if (params.name == section) {
return sectionList[params.dataIndex]+'ok';
} else {
return sectionList[params.dataIndex]
}
},
rich: {
}
}
}
]
};
myChart.setOption(option);
return myChart;
};
/**
* 获取url中的参数
* @param name 参数名
* @returns {*}
*/
pdf.getQueryString = function (name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)", "i"); //定义正则表达式
var r = window.location.search.substr(1).match(reg);
if (r != null) return decodeURI(r[2]);
return null;
}
/**
* 【文本比较插件】
* 传递两个参数dom1、dom2,以dom1为基准进行比较。
* 0)dom1和dom2不能都为空;
* 1)如果dom1不存在,则dom2为新增效果
* 2)如果dom2不存在,则dom1为删除效果
* 3)如果dom1和dom2存在,则进行文本差异比较
*
*/
pdf.MyCompare = function (dom1, dom2) {
if (!dom1 && !dom2) {
// console.log('参数错误:dom1、dom2不能为空。');
return;
}
else if (!dom1) {
//dom1为空:新增
dom2.style.color = '#90EE90';
} else if (!dom2) {
//dom2为空:删除
dom1.style.color = '#FF6347';
dom1.style.textDecoration = 'line-through';
} else {
//进行差异比较
var result = _eq({value1: dom1.innerText || dom1.innerHTML, value2: dom2.innerText || dom2.innerHTML});
dom1.innerHTML = result.value1;
dom2.innerHTML = result.value2;
}
}
function _eq(op) {
if (!op) {
return op;
}
if (!op.value1_style) {
op.value1_style = "color:#6bd5fc;";
}
if (!op.value2_style) {
op.value2_style = "color:#6bd5fc;";
}
if (!op.eq_min) {
op.eq_min = 3;
}
if (!op.eq_index) {
op.eq_index = 5;
}
if (!op.value1 || !op.value2) {
return op;
}
var ps = {
v1_i: 0,
v1_new_value: "",
v2_i: 0,
v2_new_value: ""
};
while (ps.v1_i < op.value1.length && ps.v2_i
|
op.value2.length) {
if (op.value1[ps.v1_i] == op.value2[ps.v2_i]) {
ps.v1_new_value += op.value1[ps.v1_i].replace(/</g, "<").replace(">", ">");
ps.v2_new_value += op.value2[ps.v2_i].replace(/</g, "<").replace(">", ">");
ps.v1_i += 1;
ps.v2_i += 1;
if (ps.v1_i >= op.value1.length) {
ps.v2_new_value += "<span style='" + op.value2_style + "'>" + op.value2.substr(ps.v2_i).replace(/</g, "<").replace(">", ">") + "</span>";
break;
}
if (ps.v2_i >= op.value2.length) {
ps
|
<
|
identifier_name
|
pdf.js
|
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
color: '#878787'
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: '#ff8746',
fontSize: 24,
offset: [-15, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.5 )'
}
},
itemStyle: {
normal: {
color: '#FF640F',
borderWidth: 4,
borderColor: '#FF640F'
}
},
lineStyle: {
normal: {
color: '#FF640F',
type: 'solid'
// width: 1
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
pdf.renderRadarForLoanPdf = function (element, indicator, value, name) {
var myChart = echarts.init(element);
var option = {
tooltip: {
trigger: 'item',
enterable: true,
confine: true,
formatter: function (params) {
var str = params.name + '<br/>';
indicator.forEach(function (item, index) {
var val = value[index];
val = val === '' ? '暂无数据' : val;
str += item.name + ':' + val + '<br/>';
});
return str;
}
},
radar: {
splitNumber: 4,
radius: '58%',
name: {
textStyle: {
color: 'rgb(51,51,51)',
fontSize: 22
// padding: [3, 5]
}
},
splitArea: {
show: false,
areaStyle: {
// 图表背景的颜色
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
color: 'rgb(102, 102, 102)',
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: 'rgb(51,51,51)',
fontSize: 18,
offset: [0, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.0 )'
}
},
itemStyle: {
normal: {
borderWidth: 6,
borderColor: 'rgb(64, 125, 221)'
}
},
lineStyle: {
normal: {
color: 'rgba(255, 100, 15,0.3)',
type: 'solid',
width: 3
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
/**
* 柱状图
*/
pdf.renderBarForPdf = function(element,xData,section) {
var colorList = [
['#37BBF8','#ff00ff'],
['#BCEE68','#B2DFEE'],
['#8B8B00','#8B7765'],
['#7B68EE','#7FFF00'],
['#0000EE','#FFA500'],
];
var sectionList = ['较差','中等','良好','优秀','极好'];
var myChart = echarts.init(element);
var option = {
xAxis: {
type: 'category',
show: true,
data: xData,
axisTick: {
show: false
},
axisLine: {
show: false,
},
boundaryGap: true,
axisLabel: {
color: '#666',
interval: 0,
fontSize: 18,
padding:[0,0,0,0],
},
},
yAxis: {
type: 'value',
max: 40,
show: false,
},
series: [
{
data: [20,20,20,20,20],
type: 'bar',
barWidth: '100%',
itemStyle: {
normal: {
color: function (params) {
return new echarts.graphic.LinearGradient(
0, 0, 1, 0,
[
{offset: 0, color: colorList[params.dataIndex][0]},
{offset: 1, color: colorList[params.dataIndex][1]}
]
)
},
}
},
label: {
show: true,
position: 'top',
distance: 10,
color: '#666666',
fontSize: 16,
formatter: function (params) {
if (params.name == section) {
return sectionList[params.dataIndex]+'ok';
} else {
return sectionList[params.dataIndex]
}
},
rich: {
}
}
}
]
};
myChart.setOption(option);
return myChart;
};
/**
* 获取url中的参数
* @param name 参数名
* @returns {*}
*/
pdf.getQueryString = function (name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)", "i"); //定义正则表达式
var r = window.location.search.substr(1).match(reg);
if (r != null) return decodeURI(r[2]);
return null;
}
/**
* 【文本比较插件】
* 传递两个参数dom1、dom2,以dom1为基准进行比较。
* 0)dom1和dom2不能都为空;
* 1)如果dom1不存在,则dom2为新增效果
* 2)如果dom2不存在,则dom1为删除效果
* 3)如果dom1和dom2存在,则进行文本差异比较
*
*/
pdf.MyCompare = function (dom1, dom2) {
if (!dom1 && !dom2) {
// console.log('参数错误:dom1、dom2不能为空。');
return;
}
else if (!dom1) {
//dom1为空:新增
dom2.style.color = '#90EE90';
} else if (!dom2) {
//dom2为空:删除
dom1.style.color = '#FF6347';
dom1.style.textDecoration = 'line-through';
} else {
//进行差异比较
var result = _eq({value1: dom1.innerText || dom1.innerHTML, value2: dom2.innerText || dom2.innerHTML});
dom1.innerHTML = result.value1;
dom2.innerHTML = result.value2;
}
}
function _eq(op) {
if (!op) {
return op;
}
if (!op.value1_style) {
op.value1_style = "color:#6bd5fc;";
}
if (!op.value2_style) {
op.value2_style = "color:#6bd5fc;";
}
if (!op.eq_min) {
op.eq_min = 3;
}
if (!op.eq_index)
|
length && ps.v2_i < op.value2.length) {
if (op.value1[ps.v1_i] == op.value2[ps.v2_i]) {
ps.v1_new_value += op.value1[ps.v1_i].replace(/</g, "<").replace(">", ">");
ps.v2_new_value += op.value2[ps.v2_i].replace(/</g, "<").replace(">", ">");
ps.v1_i += 1;
ps.v2_i += 1;
if (ps.v1_i >= op.value1.length) {
ps.v2_new_value += "<span style='" + op.value2_style + "'>" + op.value2.substr(ps.v2_i).replace(/</g, "<").replace(">", ">") + "</span>";
break;
}
if (ps.v2_i >= op.value2.length) {
ps
|
{
op.eq_index = 5;
}
if (!op.value1 || !op.value2) {
return op;
}
var ps = {
v1_i: 0,
v1_new_value: "",
v2_i: 0,
v2_new_value: ""
};
while (ps.v1_i < op.value1.
|
conditional_block
|
pdf.js
|
的颜色
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
color: '#878787'
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: '#ff8746',
fontSize: 24,
offset: [-15, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.5 )'
}
},
itemStyle: {
normal: {
color: '#FF640F',
borderWidth: 4,
borderColor: '#FF640F'
}
},
lineStyle: {
normal: {
color: '#FF640F',
type: 'solid'
// width: 1
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
pdf.renderRadarForLoanPdf = function (element, indicator, value, name) {
var myChart = echarts.init(element);
var option = {
tooltip: {
trigger: 'item',
enterable: true,
confine: true,
formatter: function (params) {
var str = params.name + '<br/>';
indicator.forEach(function (item, index) {
var val = value[index];
val = val === '' ? '暂无数据' : val;
str += item.name + ':' + val + '<br/>';
});
return str;
}
},
radar: {
splitNumber: 4,
radius: '58%',
name: {
textStyle: {
color: 'rgb(51,51,51)',
fontSize: 22
// padding: [3, 5]
}
},
splitArea: {
show: false,
areaStyle: {
// 图表背景的颜色
color: '#fff'
}
},
axisLine: {
show: false
},
splitLine: {
show: true,
lineStyle: {
width: 1,
// 设置网格的颜色
|
}
},
indicator: indicator
},
series: [{
name: name,
type: 'radar',
symbolSize: 6,
label: {
show: true,
position: 'top',
color: 'rgb(51,51,51)',
fontSize: 18,
offset: [0, 0]
},
data: [{
value: value,
name: name,
areaStyle: {
normal: {
color: 'rgba( 255, 100, 15, 0.0 )'
}
},
itemStyle: {
normal: {
borderWidth: 6,
borderColor: 'rgb(64, 125, 221)'
}
},
lineStyle: {
normal: {
color: 'rgba(255, 100, 15,0.3)',
type: 'solid',
width: 3
}
}
}]
}]
};
myChart.setOption(option);
return myChart;
};
/**
* 柱状图
*/
pdf.renderBarForPdf = function(element,xData,section) {
var colorList = [
['#37BBF8','#ff00ff'],
['#BCEE68','#B2DFEE'],
['#8B8B00','#8B7765'],
['#7B68EE','#7FFF00'],
['#0000EE','#FFA500'],
];
var sectionList = ['较差','中等','良好','优秀','极好'];
var myChart = echarts.init(element);
var option = {
xAxis: {
type: 'category',
show: true,
data: xData,
axisTick: {
show: false
},
axisLine: {
show: false,
},
boundaryGap: true,
axisLabel: {
color: '#666',
interval: 0,
fontSize: 18,
padding:[0,0,0,0],
},
},
yAxis: {
type: 'value',
max: 40,
show: false,
},
series: [
{
data: [20,20,20,20,20],
type: 'bar',
barWidth: '100%',
itemStyle: {
normal: {
color: function (params) {
return new echarts.graphic.LinearGradient(
0, 0, 1, 0,
[
{offset: 0, color: colorList[params.dataIndex][0]},
{offset: 1, color: colorList[params.dataIndex][1]}
]
)
},
}
},
label: {
show: true,
position: 'top',
distance: 10,
color: '#666666',
fontSize: 16,
formatter: function (params) {
if (params.name == section) {
return sectionList[params.dataIndex]+'ok';
} else {
return sectionList[params.dataIndex]
}
},
rich: {
}
}
}
]
};
myChart.setOption(option);
return myChart;
};
/**
* 获取url中的参数
* @param name 参数名
* @returns {*}
*/
pdf.getQueryString = function (name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)", "i"); //定义正则表达式
var r = window.location.search.substr(1).match(reg);
if (r != null) return decodeURI(r[2]);
return null;
}
/**
* 【文本比较插件】
* 传递两个参数dom1、dom2,以dom1为基准进行比较。
* 0)dom1和dom2不能都为空;
* 1)如果dom1不存在,则dom2为新增效果
* 2)如果dom2不存在,则dom1为删除效果
* 3)如果dom1和dom2存在,则进行文本差异比较
*
*/
pdf.MyCompare = function (dom1, dom2) {
if (!dom1 && !dom2) {
// console.log('参数错误:dom1、dom2不能为空。');
return;
}
else if (!dom1) {
//dom1为空:新增
dom2.style.color = '#90EE90';
} else if (!dom2) {
//dom2为空:删除
dom1.style.color = '#FF6347';
dom1.style.textDecoration = 'line-through';
} else {
//进行差异比较
var result = _eq({value1: dom1.innerText || dom1.innerHTML, value2: dom2.innerText || dom2.innerHTML});
dom1.innerHTML = result.value1;
dom2.innerHTML = result.value2;
}
}
function _eq(op) {
if (!op) {
return op;
}
if (!op.value1_style) {
op.value1_style = "color:#6bd5fc;";
}
if (!op.value2_style) {
op.value2_style = "color:#6bd5fc;";
}
if (!op.eq_min) {
op.eq_min = 3;
}
if (!op.eq_index) {
op.eq_index = 5;
}
if (!op.value1 || !op.value2) {
return op;
}
var ps = {
v1_i: 0,
v1_new_value: "",
v2_i: 0,
v2_new_value: ""
};
while (ps.v1_i < op.value1.length && ps.v2_i < op.value2.length) {
if (op.value1[ps.v1_i] == op.value2[ps.v2_i]) {
ps.v1_new_value += op.value1[ps.v1_i].replace(/</g, "<").replace(">", ">");
ps.v2_new_value += op.value2[ps.v2_i].replace(/</g, "<").replace(">", ">");
ps.v1_i += 1;
ps.v2_i += 1;
if (ps.v1_i >= op.value1.length) {
ps.v2_new_value += "<span style='" + op.value2_style + "'>" + op.value2.substr(ps.v2_i).replace(/</g, "<").replace(">", ">") + "</span>";
break;
}
if (ps.v2_i >= op.value2.length) {
ps.v1
|
color: 'rgb(102, 102, 102)',
|
random_line_split
|
EMPIRIC_analysis_EXP.py
|
(dat, dataset):
"""This function filters low-qual and other controls data from the
original expanded version of the EMPIRIC dataset."""
#
dat = dat[dat['organism'].isin(dataset)]
no_mmei_index = dat['mmei']=='no'
nonstop_index = dat['mutstop']=='no'
zerofit_index = dat['fitness'].abs()>1e-4
mutwt_index = dat['mutwt']=='no'
dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]
#print "Filtered data"
return dat
#add features to table
def pivotAddFeatures(dat, dat_features):
"""Takes raw EMPIRIC data (unrolled and filtered), and features of positions in library:
Returns a merged table, where each library position is characterized by features and 20-EMPIRIC fitness readings.
BEWARE:
Unique labels are 'organism-pos' and their order as in features table, which is cruicial for compatibility."""
#pivot EMPIRIC dataset to have mutaa(fitness readings) as columns and wt pos as rows
table = dat.pivot(index='organism-pos',columns='mutaa',values='fitness')
# add wtaa residues to table based on org-pos
columns_to_merge = dat[['organism-pos','wtaa']].drop_duplicates()
merged_table = table.merge(columns_to_merge, how='inner',left_index=True,right_on='organism-pos').reset_index(drop=True)
# add other features of interest to the table
# MUST merge the table to features to keep org-pos order as in EMPIRIC_features_fname ...
merged_table = dat_features.merge(merged_table,how='inner',on='organism-pos',suffixes=('', '_copy')).reset_index(drop=True)
#add kd of wildtype
KD = ProtParamData.kd
merged_table['wtKD'] = merged_table['wtaa'].map(KD)
# we can add more features later on here ...
# ...
# ...
#print merged_table
return merged_table
#main function for running PCA, calls on subfunctions
def runPCA(dat):
""" run PCA, notes:
matrix has to be pandas df, and can contain NAs, they are ommited during covariation calculation
and NAs are filled with 0.0 during the projection onto eigenvectors. """
#######################################################
def calculateVarianceExplainedSort(eig_vals, eig_vecs):
"""function zips eig val and vecs and sort them simultaneously,
calculates fractions of explained variance and returns fracs and eigvecs
sorted by eigvalues in descending order."""
#####################################
# check if there are any negative eigvals, there should not be any presumably ...
if (eig_vals < 0.0).any():
print "BEWARE: There are some negative eigvals in the PCA analysis!"
print "script proceeds, but that's something to check!"
###############################################
#sort from largest to smallest eigenvalues (both eigvals and eigvectors)
sorted_eigs = sorted(zip(np.abs(eig_vals),np.transpose(eig_vecs)),reverse=True)
# extracted sorted vectors ...
sorted_eigvecs = [eigvec for eigval,eigvec in sorted_eigs]
# calculate var fractions (ordered the same way as vectors: descending)
total_eigval = sum([eigval for eigval,eigvec in sorted_eigs])
fracs_var_explained = [eigval/total_eigval*100.0 for eigval,eigvec in sorted_eigs]
return (fracs_var_explained, sorted_eigvecs)
################################
def getProjectionMatrix(sorted_eigvecs):
"""takes eigen vectors in a sorted order and stacks them to create a projection matrix W"""
matrix_w = np.vstack(sorted_eigvecs).transpose()
return matrix_w
############################################
def getDotProduct(origina_matrix, matrix_w):
Y = np.asarray(origina_matrix.fillna(0.0)).dot(matrix_w)
return Y
#
# get matrix from data ...
raw_matrix = dat.reset_index(drop=True)
# normalize the matrix ...
normal_matrix = (raw_matrix - raw_matrix.mean())/raw_matrix.std()
# get covariation matrix ...
cov_matrix = normal_matrix.cov()
# get matrix's eigen-vectors and values ...
# BEWARE: use np.linalg.eigh, that assumes the symmetry of the matrix, to avoid imaginary numbers.
eig_vals, eig_vecs = np.linalg.eigh(cov_matrix)
# get varioation explained and sort everything by it ...
fracs_var_explained, sorted_eigvecs = calculateVarianceExplainedSort(eig_vals, eig_vecs)
# get projection matrix ...
matrix_w = getProjectionMatrix(sorted_eigvecs)
# Project data to the axes of highest variation (eig vectors)
# dot product of PCA table and eigvecs ...
Y = getDotProduct(normal_matrix, matrix_w)
# in 'Y' - columns are Principal Components and rows correpond to sample ...
# Y.shape -(num_rows,num_cols) ...
Y_num_rows, Y_num_cols = Y.shape
PC_dict = dict( ('PC%d'%(idx+1), Y[:,idx]) for idx in range(Y_num_cols) )
var_dict = dict( ('PC%d'%(idx+1), frac) for idx,frac in enumerate(fracs_var_explained) )
return (var_dict,PC_dict,matrix_w)
def EMPIRIC_pipeline_separate(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa' in lib_dat.columns
#
# merge pivoted raw data with the features ...
merged_dat = pivotAddFeatures(dat, lib_dat)
#
# separate lib_dat by organism ...
tmp_dat_grouped = lib_dat.groupby('organism')
# extract alignments corresponding to dataset ...
# empty storage ...
lib_pos_dict = {}
matrix_list = []
matrix_w_dict = {}
aln_info_dict = {}
PC_dict_of_dict = {}
for tmpid in dataset:
print
print "Preparing separate PCAs for different organisms: %s"%tmpid
matrix = merged_dat[merged_dat['organism']==tmpid][aacids]
# print matrix
var_dict, PC_dict, matrix_w = runPCA(matrix)
# print fraction of variability the PCs explain ...
print
print "variation explained by components for dataset: ",tmpid
for pc in sorted( var_dict, key=lambda x: int(x.strip('PC')) ):
print pc,'%.1f%%'%var_dict[pc]
# store PCs and projection matrices ...
PC_dict_of_dict[tmpid] = PC_dict
matrix_w_dict[tmpid] = matrix_w
# # PCA ...
#
# now merge components to lib_dat as well ...
# merge PC table to lib_dat as well ...
merged_dat = merged_dat.merge(
pd.concat(pd.DataFrame( PC_dict_of_dict[tmpid],index=merged_dat[merged_dat['organism']==tmpid]['organism-pos']) for tmpid in dataset),
left_on='organism-pos', right_index=True )
# add more columns to the merged_dat ...
# get average fitness per position, along with min/max fitness ...
merged_dat['fitness'] = merged_dat[aacids].mean(axis=1)
merged_dat['min_fitness'] = merged_dat[aacids].min(axis=1)
merged_dat['max_fitness'] = merged_dat[aacids].max(axis=1)
# return our output the large table ...
return merged_dat, matrix_w_dict
def EMPIRIC_pipeline_joint(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa
|
filterDataset
|
identifier_name
|
|
EMPIRIC_analysis_EXP.py
|
This function filters low-qual and other controls data from the
original expanded version of the EMPIRIC dataset."""
#
dat = dat[dat['organism'].isin(dataset)]
no_mmei_index = dat['mmei']=='no'
nonstop_index = dat['mutstop']=='no'
zerofit_index = dat['fitness'].abs()>1e-4
mutwt_index = dat['mutwt']=='no'
dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]
#print "Filtered data"
return dat
#add features to table
def pivotAddFeatures(dat, dat_features):
|
#main function for running PCA, calls on subfunctions
def runPCA(dat):
""" run PCA, notes:
matrix has to be pandas df, and can contain NAs, they are ommited during covariation calculation
and NAs are filled with 0.0 during the projection onto eigenvectors. """
#######################################################
def calculateVarianceExplainedSort(eig_vals, eig_vecs):
"""function zips eig val and vecs and sort them simultaneously,
calculates fractions of explained variance and returns fracs and eigvecs
sorted by eigvalues in descending order."""
#####################################
# check if there are any negative eigvals, there should not be any presumably ...
if (eig_vals < 0.0).any():
print "BEWARE: There are some negative eigvals in the PCA analysis!"
print "script proceeds, but that's something to check!"
###############################################
#sort from largest to smallest eigenvalues (both eigvals and eigvectors)
sorted_eigs = sorted(zip(np.abs(eig_vals),np.transpose(eig_vecs)),reverse=True)
# extracted sorted vectors ...
sorted_eigvecs = [eigvec for eigval,eigvec in sorted_eigs]
# calculate var fractions (ordered the same way as vectors: descending)
total_eigval = sum([eigval for eigval,eigvec in sorted_eigs])
fracs_var_explained = [eigval/total_eigval*100.0 for eigval,eigvec in sorted_eigs]
return (fracs_var_explained, sorted_eigvecs)
################################
def getProjectionMatrix(sorted_eigvecs):
"""takes eigen vectors in a sorted order and stacks them to create a projection matrix W"""
matrix_w = np.vstack(sorted_eigvecs).transpose()
return matrix_w
############################################
def getDotProduct(origina_matrix, matrix_w):
Y = np.asarray(origina_matrix.fillna(0.0)).dot(matrix_w)
return Y
#
# get matrix from data ...
raw_matrix = dat.reset_index(drop=True)
# normalize the matrix ...
normal_matrix = (raw_matrix - raw_matrix.mean())/raw_matrix.std()
# get covariation matrix ...
cov_matrix = normal_matrix.cov()
# get matrix's eigen-vectors and values ...
# BEWARE: use np.linalg.eigh, that assumes the symmetry of the matrix, to avoid imaginary numbers.
eig_vals, eig_vecs = np.linalg.eigh(cov_matrix)
# get varioation explained and sort everything by it ...
fracs_var_explained, sorted_eigvecs = calculateVarianceExplainedSort(eig_vals, eig_vecs)
# get projection matrix ...
matrix_w = getProjectionMatrix(sorted_eigvecs)
# Project data to the axes of highest variation (eig vectors)
# dot product of PCA table and eigvecs ...
Y = getDotProduct(normal_matrix, matrix_w)
# in 'Y' - columns are Principal Components and rows correpond to sample ...
# Y.shape -(num_rows,num_cols) ...
Y_num_rows, Y_num_cols = Y.shape
PC_dict = dict( ('PC%d'%(idx+1), Y[:,idx]) for idx in range(Y_num_cols) )
var_dict = dict( ('PC%d'%(idx+1), frac) for idx,frac in enumerate(fracs_var_explained) )
return (var_dict,PC_dict,matrix_w)
def EMPIRIC_pipeline_separate(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa' in lib_dat.columns
#
# merge pivoted raw data with the features ...
merged_dat = pivotAddFeatures(dat, lib_dat)
#
# separate lib_dat by organism ...
tmp_dat_grouped = lib_dat.groupby('organism')
# extract alignments corresponding to dataset ...
# empty storage ...
lib_pos_dict = {}
matrix_list = []
matrix_w_dict = {}
aln_info_dict = {}
PC_dict_of_dict = {}
for tmpid in dataset:
print
print "Preparing separate PCAs for different organisms: %s"%tmpid
matrix = merged_dat[merged_dat['organism']==tmpid][aacids]
# print matrix
var_dict, PC_dict, matrix_w = runPCA(matrix)
# print fraction of variability the PCs explain ...
print
print "variation explained by components for dataset: ",tmpid
for pc in sorted( var_dict, key=lambda x: int(x.strip('PC')) ):
print pc,'%.1f%%'%var_dict[pc]
# store PCs and projection matrices ...
PC_dict_of_dict[tmpid] = PC_dict
matrix_w_dict[tmpid] = matrix_w
# # PCA ...
#
# now merge components to lib_dat as well ...
# merge PC table to lib_dat as well ...
merged_dat = merged_dat.merge(
pd.concat(pd.DataFrame( PC_dict_of_dict[tmpid],index=merged_dat[merged_dat['organism']==tmpid]['organism-pos']) for tmpid in dataset),
left_on='organism-pos', right_index=True )
# add more columns to the merged_dat ...
# get average fitness per position, along with min/max fitness ...
merged_dat['fitness'] = merged_dat[aacids].mean(axis=1)
merged_dat['min_fitness'] = merged_dat[aacids].min(axis=1)
merged_dat['max_fitness'] = merged_dat[aacids].max(axis=1)
# return our output the large table ...
return merged_dat, matrix_w_dict
def EMPIRIC_pipeline_joint(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa' in lib_dat.columns
#
|
"""Takes raw EMPIRIC data (unrolled and filtered), and features of positions in library:
Returns a merged table, where each library position is characterized by features and 20-EMPIRIC fitness readings.
BEWARE:
Unique labels are 'organism-pos' and their order as in features table, which is cruicial for compatibility."""
#pivot EMPIRIC dataset to have mutaa(fitness readings) as columns and wt pos as rows
table = dat.pivot(index='organism-pos',columns='mutaa',values='fitness')
# add wtaa residues to table based on org-pos
columns_to_merge = dat[['organism-pos','wtaa']].drop_duplicates()
merged_table = table.merge(columns_to_merge, how='inner',left_index=True,right_on='organism-pos').reset_index(drop=True)
# add other features of interest to the table
# MUST merge the table to features to keep org-pos order as in EMPIRIC_features_fname ...
merged_table = dat_features.merge(merged_table,how='inner',on='organism-pos',suffixes=('', '_copy')).reset_index(drop=True)
#add kd of wildtype
KD = ProtParamData.kd
merged_table['wtKD'] = merged_table['wtaa'].map(KD)
# we can add more features later on here ...
# ...
# ...
#print merged_table
return merged_table
|
identifier_body
|
EMPIRIC_analysis_EXP.py
|
"""Takes raw EMPIRIC data (unrolled and filtered), and features of positions in library:
Returns a merged table, where each library position is characterized by features and 20-EMPIRIC fitness readings.
BEWARE:
Unique labels are 'organism-pos' and their order as in features table, which is cruicial for compatibility."""
#pivot EMPIRIC dataset to have mutaa(fitness readings) as columns and wt pos as rows
table = dat.pivot(index='organism-pos',columns='mutaa',values='fitness')
# add wtaa residues to table based on org-pos
columns_to_merge = dat[['organism-pos','wtaa']].drop_duplicates()
merged_table = table.merge(columns_to_merge, how='inner',left_index=True,right_on='organism-pos').reset_index(drop=True)
# add other features of interest to the table
# MUST merge the table to features to keep org-pos order as in EMPIRIC_features_fname ...
merged_table = dat_features.merge(merged_table,how='inner',on='organism-pos',suffixes=('', '_copy')).reset_index(drop=True)
#add kd of wildtype
KD = ProtParamData.kd
merged_table['wtKD'] = merged_table['wtaa'].map(KD)
# we can add more features later on here ...
# ...
# ...
#print merged_table
return merged_table
#main function for running PCA, calls on subfunctions
def runPCA(dat):
""" run PCA, notes:
matrix has to be pandas df, and can contain NAs, they are ommited during covariation calculation
and NAs are filled with 0.0 during the projection onto eigenvectors. """
#######################################################
def calculateVarianceExplainedSort(eig_vals, eig_vecs):
"""function zips eig val and vecs and sort them simultaneously,
calculates fractions of explained variance and returns fracs and eigvecs
sorted by eigvalues in descending order."""
#####################################
# check if there are any negative eigvals, there should not be any presumably ...
if (eig_vals < 0.0).any():
print "BEWARE: There are some negative eigvals in the PCA analysis!"
print "script proceeds, but that's something to check!"
###############################################
#sort from largest to smallest eigenvalues (both eigvals and eigvectors)
sorted_eigs = sorted(zip(np.abs(eig_vals),np.transpose(eig_vecs)),reverse=True)
# extracted sorted vectors ...
sorted_eigvecs = [eigvec for eigval,eigvec in sorted_eigs]
# calculate var fractions (ordered the same way as vectors: descending)
total_eigval = sum([eigval for eigval,eigvec in sorted_eigs])
fracs_var_explained = [eigval/total_eigval*100.0 for eigval,eigvec in sorted_eigs]
return (fracs_var_explained, sorted_eigvecs)
################################
def getProjectionMatrix(sorted_eigvecs):
"""takes eigen vectors in a sorted order and stacks them to create a projection matrix W"""
matrix_w = np.vstack(sorted_eigvecs).transpose()
return matrix_w
############################################
def getDotProduct(origina_matrix, matrix_w):
Y = np.asarray(origina_matrix.fillna(0.0)).dot(matrix_w)
return Y
#
# get matrix from data ...
raw_matrix = dat.reset_index(drop=True)
# normalize the matrix ...
normal_matrix = (raw_matrix - raw_matrix.mean())/raw_matrix.std()
# get covariation matrix ...
cov_matrix = normal_matrix.cov()
# get matrix's eigen-vectors and values ...
# BEWARE: use np.linalg.eigh, that assumes the symmetry of the matrix, to avoid imaginary numbers.
eig_vals, eig_vecs = np.linalg.eigh(cov_matrix)
# get varioation explained and sort everything by it ...
fracs_var_explained, sorted_eigvecs = calculateVarianceExplainedSort(eig_vals, eig_vecs)
# get projection matrix ...
matrix_w = getProjectionMatrix(sorted_eigvecs)
# Project data to the axes of highest variation (eig vectors)
# dot product of PCA table and eigvecs ...
Y = getDotProduct(normal_matrix, matrix_w)
# in 'Y' - columns are Principal Components and rows correpond to sample ...
# Y.shape -(num_rows,num_cols) ...
Y_num_rows, Y_num_cols = Y.shape
PC_dict = dict( ('PC%d'%(idx+1), Y[:,idx]) for idx in range(Y_num_cols) )
var_dict = dict( ('PC%d'%(idx+1), frac) for idx,frac in enumerate(fracs_var_explained) )
return (var_dict,PC_dict,matrix_w)
def EMPIRIC_pipeline_separate(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa' in lib_dat.columns
#
# merge pivoted raw data with the features ...
merged_dat = pivotAddFeatures(dat, lib_dat)
#
# separate lib_dat by organism ...
tmp_dat_grouped = lib_dat.groupby('organism')
# extract alignments corresponding to dataset ...
# empty storage ...
lib_pos_dict = {}
matrix_list = []
matrix_w_dict = {}
aln_info_dict = {}
PC_dict_of_dict = {}
for tmpid in dataset:
print
print "Preparing separate PCAs for different organisms: %s"%tmpid
matrix = merged_dat[merged_dat['organism']==tmpid][aacids]
# print matrix
var_dict, PC_dict, matrix_w = runPCA(matrix)
# print fraction of variability the PCs explain ...
print
print "variation explained by components for dataset: ",tmpid
for pc in sorted( var_dict, key=lambda x: int(x.strip('PC')) ):
print pc,'%.1f%%'%var_dict[pc]
# store PCs and projection matrices ...
PC_dict_of_dict[tmpid] = PC_dict
matrix_w_dict[tmpid] = matrix_w
# # PCA ...
#
# now merge components to lib_dat as well ...
# merge PC table to lib_dat as well ...
merged_dat = merged_dat.merge(
pd.concat(pd.DataFrame( PC_dict_of_dict[tmpid],index=merged_dat[merged_dat['organism']==tmpid]['organism-pos']) for tmpid in dataset),
left_on='organism-pos', right_index=True )
# add more columns to the merged_dat ...
# get average fitness per position, along with min/max fitness ...
merged_dat['fitness'] = merged_dat[aacids].mean(axis=1)
merged_dat['min_fitness'] = merged_dat[aacids].min(axis=1)
merged_dat['max_fitness'] = merged_dat[aacids].max(axis=1)
# return our output the large table ...
return merged_dat, matrix_w_dict
def EMPIRIC_pipeline_joint(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa' in lib_dat.columns
#
# merge pivoted raw data with the features ...
merged_dat = pivotAddFeatures(dat, lib_dat)
#
# separate lib_dat by organism ...
tmp_dat_grouped = lib_dat.groupby('organism')
# extract alignments corresponding to dataset ...
# PCA ...
total_matrix = merged_dat[aacids]
var_dict, PC_dict, matrix_w = runPCA(total_matrix)
# print fraction of variability the PCs explain ...
print
print "variation explained by components for dataset: ",dataset
for pc in sorted( var_dict, key=lambda x: int(x.strip('PC')) ):
|
print pc,'%.1f%%'%var_dict[pc]
|
conditional_block
|
|
EMPIRIC_analysis_EXP.py
|
import collections
# EMPIRIC_raw_data_fname = "db-fitness.csv"
# # most important reference file with all the libraries information ...
# EMPIRIC_features_fname = "features-original.csv"
# 20 amino acids ...
aacids = sorted(list(SeqUtils.IUPAC.protein.letters))
#filter dataset
def filterDataset(dat, dataset):
"""This function filters low-qual and other controls data from the
original expanded version of the EMPIRIC dataset."""
#
dat = dat[dat['organism'].isin(dataset)]
no_mmei_index = dat['mmei']=='no'
nonstop_index = dat['mutstop']=='no'
zerofit_index = dat['fitness'].abs()>1e-4
mutwt_index = dat['mutwt']=='no'
dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]
#print "Filtered data"
return dat
#add features to table
def pivotAddFeatures(dat, dat_features):
"""Takes raw EMPIRIC data (unrolled and filtered), and features of positions in library:
Returns a merged table, where each library position is characterized by features and 20-EMPIRIC fitness readings.
BEWARE:
Unique labels are 'organism-pos' and their order as in features table, which is cruicial for compatibility."""
#pivot EMPIRIC dataset to have mutaa(fitness readings) as columns and wt pos as rows
table = dat.pivot(index='organism-pos',columns='mutaa',values='fitness')
# add wtaa residues to table based on org-pos
columns_to_merge = dat[['organism-pos','wtaa']].drop_duplicates()
merged_table = table.merge(columns_to_merge, how='inner',left_index=True,right_on='organism-pos').reset_index(drop=True)
# add other features of interest to the table
# MUST merge the table to features to keep org-pos order as in EMPIRIC_features_fname ...
merged_table = dat_features.merge(merged_table,how='inner',on='organism-pos',suffixes=('', '_copy')).reset_index(drop=True)
#add kd of wildtype
KD = ProtParamData.kd
merged_table['wtKD'] = merged_table['wtaa'].map(KD)
# we can add more features later on here ...
# ...
# ...
#print merged_table
return merged_table
#main function for running PCA, calls on subfunctions
def runPCA(dat):
""" run PCA, notes:
matrix has to be pandas df, and can contain NAs, they are ommited during covariation calculation
and NAs are filled with 0.0 during the projection onto eigenvectors. """
#######################################################
def calculateVarianceExplainedSort(eig_vals, eig_vecs):
"""function zips eig val and vecs and sort them simultaneously,
calculates fractions of explained variance and returns fracs and eigvecs
sorted by eigvalues in descending order."""
#####################################
# check if there are any negative eigvals, there should not be any presumably ...
if (eig_vals < 0.0).any():
print "BEWARE: There are some negative eigvals in the PCA analysis!"
print "script proceeds, but that's something to check!"
###############################################
#sort from largest to smallest eigenvalues (both eigvals and eigvectors)
sorted_eigs = sorted(zip(np.abs(eig_vals),np.transpose(eig_vecs)),reverse=True)
# extracted sorted vectors ...
sorted_eigvecs = [eigvec for eigval,eigvec in sorted_eigs]
# calculate var fractions (ordered the same way as vectors: descending)
total_eigval = sum([eigval for eigval,eigvec in sorted_eigs])
fracs_var_explained = [eigval/total_eigval*100.0 for eigval,eigvec in sorted_eigs]
return (fracs_var_explained, sorted_eigvecs)
################################
def getProjectionMatrix(sorted_eigvecs):
"""takes eigen vectors in a sorted order and stacks them to create a projection matrix W"""
matrix_w = np.vstack(sorted_eigvecs).transpose()
return matrix_w
############################################
def getDotProduct(origina_matrix, matrix_w):
Y = np.asarray(origina_matrix.fillna(0.0)).dot(matrix_w)
return Y
#
# get matrix from data ...
raw_matrix = dat.reset_index(drop=True)
# normalize the matrix ...
normal_matrix = (raw_matrix - raw_matrix.mean())/raw_matrix.std()
# get covariation matrix ...
cov_matrix = normal_matrix.cov()
# get matrix's eigen-vectors and values ...
# BEWARE: use np.linalg.eigh, that assumes the symmetry of the matrix, to avoid imaginary numbers.
eig_vals, eig_vecs = np.linalg.eigh(cov_matrix)
# get varioation explained and sort everything by it ...
fracs_var_explained, sorted_eigvecs = calculateVarianceExplainedSort(eig_vals, eig_vecs)
# get projection matrix ...
matrix_w = getProjectionMatrix(sorted_eigvecs)
# Project data to the axes of highest variation (eig vectors)
# dot product of PCA table and eigvecs ...
Y = getDotProduct(normal_matrix, matrix_w)
# in 'Y' - columns are Principal Components and rows correpond to sample ...
# Y.shape -(num_rows,num_cols) ...
Y_num_rows, Y_num_cols = Y.shape
PC_dict = dict( ('PC%d'%(idx+1), Y[:,idx]) for idx in range(Y_num_cols) )
var_dict = dict( ('PC%d'%(idx+1), frac) for idx,frac in enumerate(fracs_var_explained) )
return (var_dict,PC_dict,matrix_w)
def EMPIRIC_pipeline_separate(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organism'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[0])
lib_dat['pos'] = lib_dat['organism-pos'].str.split('-').apply(lambda x: x[1])
# wt amino acid column MUST be in lib_dat!
assert 'wtaa' in lib_dat.columns
#
# merge pivoted raw data with the features ...
merged_dat = pivotAddFeatures(dat, lib_dat)
#
# separate lib_dat by organism ...
tmp_dat_grouped = lib_dat.groupby('organism')
# extract alignments corresponding to dataset ...
# empty storage ...
lib_pos_dict = {}
matrix_list = []
matrix_w_dict = {}
aln_info_dict = {}
PC_dict_of_dict = {}
for tmpid in dataset:
print
print "Preparing separate PCAs for different organisms: %s"%tmpid
matrix = merged_dat[merged_dat['organism']==tmpid][aacids]
# print matrix
var_dict, PC_dict, matrix_w = runPCA(matrix)
# print fraction of variability the PCs explain ...
print
print "variation explained by components for dataset: ",tmpid
for pc in sorted( var_dict, key=lambda x: int(x.strip('PC')) ):
print pc,'%.1f%%'%var_dict[pc]
# store PCs and projection matrices ...
PC_dict_of_dict[tmpid] = PC_dict
matrix_w_dict[tmpid] = matrix_w
# # PCA ...
#
# now merge components to lib_dat as well ...
# merge PC table to lib_dat as well ...
merged_dat = merged_dat.merge(
pd.concat(pd.DataFrame( PC_dict_of_dict[tmpid],index=merged_dat[merged_dat['organism']==tmpid]['organism-pos']) for tmpid in dataset),
left_on='organism-pos', right_index=True )
# add more columns to the merged_dat ...
# get average fitness per position, along with min/max fitness ...
merged_dat['fitness'] = merged_dat[aacids].mean(axis=1)
merged_dat['min_fitness'] = merged_dat[aacids].min(axis=1)
merged_dat['max_fitness'] = merged_dat[aacids].max(axis=1)
# return our output the large table ...
return merged_dat, matrix_w_dict
def EMPIRIC_pipeline_joint(EMPIRIC_features_fname, EMPIRIC_raw_data_fname, dataset=['Ss','Tm','Tt']):
# read raw data ...
dat = pd.read_csv(EMPIRIC_raw_data_fname)
# filter and extract dataset of interest ...
dat = filterDataset(dat, dataset=dataset)
# extract positions we care about from 'EMPIRIC_features_fname' ...
lib_dat = pd.read_csv(EMPIRIC_features_fname)
# example of '' organism-pos content: 'Ss-55' ...
lib_dat['organ
|
random_line_split
|
||
ExportGltf.ts
|
Globals.binBytesWritten,
byteLength: indices.byteLength,
});
GltfGlobals.binBytesWritten += indices.byteLength;
fs.writeSync(GltfGlobals.binFile, indices);
}
function addMeshPointsAndNormals(points: Float64Array, normals: Float32Array) {
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(points.length);
for (let i = 0; i < points.length; i += 3)
convertPoint(outPoints, i, points[i], points[i + 1], points[i + 2]);
const outNormals = new Float32Array(normals.length);
for (let i = 0; i < normals.length; i += 3)
convertPoint(outNormals, i, normals[i], normals[i + 1], normals[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength + outNormals.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
fs.writeSync(GltfGlobals.binFile, outNormals);
GltfGlobals.binBytesWritten += outPoints.byteLength + outNormals.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: outPoints.byteLength,
componentType: AccessorComponentType.Float,
count: outNormals.length / 3,
type: "VEC3",
});
}
function addMeshParams(params: Float32Array) {
const outParams = new Float32Array(params.length);
for (let i = 0; i < params.length; i += 2) {
outParams[i] = params[i];
outParams[i + 1] = 1 - params[i + 1]; // Flip to match GLTF spec
}
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outParams.byteLength,
byteStride: 8,
});
fs.writeSync(GltfGlobals.binFile, outParams);
GltfGlobals.binBytesWritten += outParams.byteLength;
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outParams.length / 2,
type: "VEC2",
});
}
function addMesh(mesh: ExportGraphicsMesh, color: number, textureId?: Id64String) {
const material = textureId !== undefined ? findOrAddMaterialIndexForTexture(textureId) :
findOrAddMaterialIndexForColor(color);
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlTriangles,
material,
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
// eslint-disable-next-line @typescript-eslint/naming-convention
NORMAL: GltfGlobals.gltf.accessors.length + 2,
},
};
if (textureId !== undefined)
primitive.attributes.TEXCOORD_0 = GltfGlobals.gltf.accessors.length + 3;
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(mesh.indices);
addMeshPointsAndNormals(mesh.points, mesh.normals);
if (textureId !== undefined) addMeshParams(mesh.params);
}
function addMeshNode(name: string) {
GltfGlobals.gltf.scenes[0].nodes.push(GltfGlobals.gltf.nodes.length);
GltfGlobals.gltf.nodes.push({ name, mesh: GltfGlobals.gltf.meshes.length });
}
function addLines(lines: ExportGraphicsLines, color: number) {
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlLines,
material: findOrAddMaterialIndexForColor(color),
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
},
};
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(lines.indices);
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(lines.points.length);
for (let i = 0; i < outPoints.length; i += 3)
convertPoint(outPoints, i, lines.points[i], lines.points[i + 1], lines.points[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
GltfGlobals.binBytesWritten += outPoints.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
}
function exportElements(elementIdArray: Id64Array, partInstanceArray: ExportPartInstanceInfo[]) {
const onGraphics = (info: ExportGraphicsInfo) => {
addMeshNode(info.elementId);
addMesh(info.mesh, info.color, info.textureId);
};
const onLineGraphics = (info: ExportLinesInfo) => {
addMeshNode(info.elementId);
addLines(info.lines, info.color);
};
GltfGlobals.iModel.exportGraphics({
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
onGraphics,
onLineGraphics,
elementIdArray,
partInstanceArray,
});
}
function getInstancesByPart(instances: ExportPartInstanceInfo[]): Map<Id64String, ExportPartInstanceInfo[]> {
const partMap = new Map<Id64String, ExportPartInstanceInfo[]>();
for (const instance of instances) {
const instancesForThisPart = partMap.get(instance.partId);
if (instancesForThisPart !== undefined) instancesForThisPart.push(instance);
else partMap.set(instance.partId, [instance]);
}
return partMap;
}
function almostEqual(testValue: number, ...arrayValues: number[]): boolean {
for (const val of arrayValues) {
if (!Geometry.isAlmostEqualNumber(testValue, val)) return false;
}
return true;
}
// translation, rotation, scale only defined if different from GLTF default transforms
class
|
TranslationRotationScale
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.